repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
aragos/tichu-tournament | python/reportlab/pdfbase/_fontdata_widths_courierbold.py | 224 | 3664 | widths = {'A': 600,
'AE': 600,
'Aacute': 600,
'Acircumflex': 600,
'Adieresis': 600,
'Agrave': 600,
'Aring': 600,
'Atilde': 600,
'B': 600,
'C': 600,
'Ccedilla': 600,
'D': 600,
'E': 600,
'Eacute': 600,
'Ecircumflex': 600,
'Edieresis': 600,
'Egrave': 600,
'Eth': 600,
'Euro': 600,
'F': 600,
'G': 600,
'H': 600,
'I': 600,
'Iacute': 600,
'Icircumflex': 600,
'Idieresis': 600,
'Igrave': 600,
'J': 600,
'K': 600,
'L': 600,
'Lslash': 600,
'M': 600,
'N': 600,
'Ntilde': 600,
'O': 600,
'OE': 600,
'Oacute': 600,
'Ocircumflex': 600,
'Odieresis': 600,
'Ograve': 600,
'Oslash': 600,
'Otilde': 600,
'P': 600,
'Q': 600,
'R': 600,
'S': 600,
'Scaron': 600,
'T': 600,
'Thorn': 600,
'U': 600,
'Uacute': 600,
'Ucircumflex': 600,
'Udieresis': 600,
'Ugrave': 600,
'V': 600,
'W': 600,
'X': 600,
'Y': 600,
'Yacute': 600,
'Ydieresis': 600,
'Z': 600,
'Zcaron': 600,
'a': 600,
'aacute': 600,
'acircumflex': 600,
'acute': 600,
'adieresis': 600,
'ae': 600,
'agrave': 600,
'ampersand': 600,
'aring': 600,
'asciicircum': 600,
'asciitilde': 600,
'asterisk': 600,
'at': 600,
'atilde': 600,
'b': 600,
'backslash': 600,
'bar': 600,
'braceleft': 600,
'braceright': 600,
'bracketleft': 600,
'bracketright': 600,
'breve': 600,
'brokenbar': 600,
'bullet': 600,
'c': 600,
'caron': 600,
'ccedilla': 600,
'cedilla': 600,
'cent': 600,
'circumflex': 600,
'colon': 600,
'comma': 600,
'copyright': 600,
'currency': 600,
'd': 600,
'dagger': 600,
'daggerdbl': 600,
'degree': 600,
'dieresis': 600,
'divide': 600,
'dollar': 600,
'dotaccent': 600,
'dotlessi': 600,
'e': 600,
'eacute': 600,
'ecircumflex': 600,
'edieresis': 600,
'egrave': 600,
'eight': 600,
'ellipsis': 600,
'emdash': 600,
'endash': 600,
'equal': 600,
'eth': 600,
'exclam': 600,
'exclamdown': 600,
'f': 600,
'fi': 600,
'five': 600,
'fl': 600,
'florin': 600,
'four': 600,
'fraction': 600,
'g': 600,
'germandbls': 600,
'grave': 600,
'greater': 600,
'guillemotleft': 600,
'guillemotright': 600,
'guilsinglleft': 600,
'guilsinglright': 600,
'h': 600,
'hungarumlaut': 600,
'hyphen': 600,
'i': 600,
'iacute': 600,
'icircumflex': 600,
'idieresis': 600,
'igrave': 600,
'j': 600,
'k': 600,
'l': 600,
'less': 600,
'logicalnot': 600,
'lslash': 600,
'm': 600,
'macron': 600,
'minus': 600,
'mu': 600,
'multiply': 600,
'n': 600,
'nine': 600,
'ntilde': 600,
'numbersign': 600,
'o': 600,
'oacute': 600,
'ocircumflex': 600,
'odieresis': 600,
'oe': 600,
'ogonek': 600,
'ograve': 600,
'one': 600,
'onehalf': 600,
'onequarter': 600,
'onesuperior': 600,
'ordfeminine': 600,
'ordmasculine': 600,
'oslash': 600,
'otilde': 600,
'p': 600,
'paragraph': 600,
'parenleft': 600,
'parenright': 600,
'percent': 600,
'period': 600,
'periodcentered': 600,
'perthousand': 600,
'plus': 600,
'plusminus': 600,
'q': 600,
'question': 600,
'questiondown': 600,
'quotedbl': 600,
'quotedblbase': 600,
'quotedblleft': 600,
'quotedblright': 600,
'quoteleft': 600,
'quoteright': 600,
'quotesinglbase': 600,
'quotesingle': 600,
'r': 600,
'registered': 600,
'ring': 600,
's': 600,
'scaron': 600,
'section': 600,
'semicolon': 600,
'seven': 600,
'six': 600,
'slash': 600,
'space': 600,
'sterling': 600,
't': 600,
'thorn': 600,
'three': 600,
'threequarters': 600,
'threesuperior': 600,
'tilde': 600,
'trademark': 600,
'two': 600,
'twosuperior': 600,
'u': 600,
'uacute': 600,
'ucircumflex': 600,
'udieresis': 600,
'ugrave': 600,
'underscore': 600,
'v': 600,
'w': 600,
'x': 600,
'y': 600,
'yacute': 600,
'ydieresis': 600,
'yen': 600,
'z': 600,
'zcaron': 600,
'zero': 600}
| mit |
alexissmirnov/donomo | donomo_archive/deps/paypal/standard/pdt/tests/test_pdt.py | 9 | 5522 | """
run this with ./manage.py test website
see http://www.djangoproject.com/documentation/testing/ for details
"""
import os
from django.conf import settings
from django.shortcuts import render_to_response
from django.test import TestCase
from paypal.standard.pdt.forms import PayPalPDTForm
from paypal.standard.pdt.models import PayPalPDT
from paypal.standard.pdt.signals import pdt_successful, pdt_failed
class DummyPayPalPDT(object):
def __init__(self, update_context_dict={}):
self.context_dict = {'st': 'SUCCESS', 'custom':'cb736658-3aad-4694-956f-d0aeade80194',
'txn_id':'1ED550410S3402306', 'mc_gross': '225.00',
'business': settings.PAYPAL_RECEIVER_EMAIL, 'error': 'Error code: 1234'}
self.context_dict.update(update_context_dict)
self.response = ''
def update_with_get_params(self, get_params):
if get_params.has_key('tx'):
self.context_dict['txn_id'] = get_params.get('tx')
if get_params.has_key('amt'):
self.context_dict['mc_gross'] = get_params.get('amt')
if get_params.has_key('cm'):
self.context_dict['custom'] = get_params.get('cm')
def _postback(self, test=True):
"""Perform a Fake PayPal PDT Postback request."""
# @@@ would be cool if this could live in the test templates dir...
return render_to_response("pdt/test_pdt_response.html", self.context_dict).content
class PDTTest(TestCase):
urls = "paypal.standard.pdt.tests.test_urls"
template_dirs = [os.path.join(os.path.dirname(__file__), 'templates'),]
def setUp(self):
# set up some dummy PDT get parameters
self.get_params = {"tx":"4WJ86550014687441", "st":"Completed", "amt":"225.00", "cc":"EUR",
"cm":"a3e192b8-8fea-4a86-b2e8-d5bf502e36be", "item_number":"",
"sig":"blahblahblah"}
# monkey patch the PayPalPDT._postback function
self.dpppdt = DummyPayPalPDT()
self.dpppdt.update_with_get_params(self.get_params)
PayPalPDT._postback = self.dpppdt._postback
def test_verify_postback(self):
dpppdt = DummyPayPalPDT()
paypal_response = dpppdt._postback()
assert('SUCCESS' in paypal_response)
self.assertEqual(len(PayPalPDT.objects.all()), 0)
pdt_obj = PayPalPDT()
pdt_obj.ipaddress = '127.0.0.1'
pdt_obj.response = paypal_response
pdt_obj._verify_postback()
self.assertEqual(len(PayPalPDT.objects.all()), 0)
self.assertEqual(pdt_obj.txn_id, '1ED550410S3402306')
def test_pdt(self):
self.assertEqual(len(PayPalPDT.objects.all()), 0)
self.dpppdt.update_with_get_params(self.get_params)
paypal_response = self.client.get("/pdt/", self.get_params)
self.assertContains(paypal_response, 'Transaction complete', status_code=200)
self.assertEqual(len(PayPalPDT.objects.all()), 1)
def test_pdt_signals(self):
self.successful_pdt_fired = False
self.failed_pdt_fired = False
def successful_pdt(sender, **kwargs):
self.successful_pdt_fired = True
pdt_successful.connect(successful_pdt)
def failed_pdt(sender, **kwargs):
self.failed_pdt_fired = True
pdt_failed.connect(failed_pdt)
self.assertEqual(len(PayPalPDT.objects.all()), 0)
paypal_response = self.client.get("/pdt/", self.get_params)
self.assertContains(paypal_response, 'Transaction complete', status_code=200)
self.assertEqual(len(PayPalPDT.objects.all()), 1)
self.assertTrue(self.successful_pdt_fired)
self.assertFalse(self.failed_pdt_fired)
pdt_obj = PayPalPDT.objects.all()[0]
self.assertEqual(pdt_obj.flag, False)
def test_double_pdt_get(self):
self.assertEqual(len(PayPalPDT.objects.all()), 0)
paypal_response = self.client.get("/pdt/", self.get_params)
self.assertContains(paypal_response, 'Transaction complete', status_code=200)
self.assertEqual(len(PayPalPDT.objects.all()), 1)
pdt_obj = PayPalPDT.objects.all()[0]
self.assertEqual(pdt_obj.flag, False)
paypal_response = self.client.get("/pdt/", self.get_params)
self.assertContains(paypal_response, 'Transaction complete', status_code=200)
self.assertEqual(len(PayPalPDT.objects.all()), 1) # we don't create a new pdt
pdt_obj = PayPalPDT.objects.all()[0]
self.assertEqual(pdt_obj.flag, False)
def test_no_txn_id_in_pdt(self):
self.dpppdt.context_dict.pop('txn_id')
self.get_params={}
paypal_response = self.client.get("/pdt/", self.get_params)
self.assertContains(paypal_response, 'Transaction Failed', status_code=200)
self.assertEqual(len(PayPalPDT.objects.all()), 0)
def test_custom_passthrough(self):
self.assertEqual(len(PayPalPDT.objects.all()), 0)
self.dpppdt.update_with_get_params(self.get_params)
paypal_response = self.client.get("/pdt/", self.get_params)
self.assertContains(paypal_response, 'Transaction complete', status_code=200)
self.assertEqual(len(PayPalPDT.objects.all()), 1)
pdt_obj = PayPalPDT.objects.all()[0]
self.assertEqual(pdt_obj.custom, self.get_params['cm'] ) | bsd-3-clause |
jayofdoom/cloud-init-debian-pkg | cloudinit/config/cc_byobu.py | 7 | 2886 | # vi: ts=4 expandtab
#
# Copyright (C) 2009-2010 Canonical Ltd.
# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
#
# Author: Scott Moser <[email protected]>
# Author: Juerg Haefliger <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3, as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Ensure this is aliased to a name not 'distros'
# since the module attribute 'distros'
# is a list of distros that are supported, not a sub-module
from cloudinit import distros as ds
from cloudinit import util
distros = ['ubuntu', 'debian']
def handle(name, cfg, cloud, log, args):
if len(args) != 0:
value = args[0]
else:
value = util.get_cfg_option_str(cfg, "byobu_by_default", "")
if not value:
log.debug("Skipping module named %s, no 'byobu' values found", name)
return
if value == "user" or value == "system":
value = "enable-%s" % value
valid = ("enable-user", "enable-system", "enable",
"disable-user", "disable-system", "disable")
if not value in valid:
log.warn("Unknown value %s for byobu_by_default", value)
mod_user = value.endswith("-user")
mod_sys = value.endswith("-system")
if value.startswith("enable"):
bl_inst = "install"
dc_val = "byobu byobu/launch-by-default boolean true"
mod_sys = True
else:
if value == "disable":
mod_user = True
mod_sys = True
bl_inst = "uninstall"
dc_val = "byobu byobu/launch-by-default boolean false"
shcmd = ""
if mod_user:
(users, _groups) = ds.normalize_users_groups(cfg, cloud.distro)
(user, _user_config) = ds.extract_default(users)
if not user:
log.warn(("No default byobu user provided, "
"can not launch %s for the default user"), bl_inst)
else:
shcmd += " sudo -Hu \"%s\" byobu-launcher-%s" % (user, bl_inst)
shcmd += " || X=$(($X+1)); "
if mod_sys:
shcmd += "echo \"%s\" | debconf-set-selections" % dc_val
shcmd += " && dpkg-reconfigure byobu --frontend=noninteractive"
shcmd += " || X=$(($X+1)); "
if len(shcmd):
cmd = ["/bin/sh", "-c", "%s %s %s" % ("X=0;", shcmd, "exit $X")]
log.debug("Setting byobu to %s", value)
util.subp(cmd, capture=False)
| gpl-3.0 |
gregcaporaso/qiime | scripts/conditional_uncovered_probability.py | 15 | 4838 | #!/usr/bin/env python
# File created on 1 April 2012
from __future__ import division
__author__ = "Jens Reeder"
__copyright__ = "Copyright 2011, The QIIME Project"
__credits__ = ["Jens Reeder", "Jose Antonio Navas Molina", "Jai Ram Rideout"]
__license__ = "GPL"
__version__ = "1.9.1-dev"
__maintainer__ = "Jens Reeder"
__email__ = "[email protected]"
from qiime.util import make_option, parse_command_line_parameters
from qiime.alpha_diversity import (single_file_cup,
list_known_cup_metrics)
import os
# conditional_uncovered_probability.py
script_info = {}
script_info['version'] = __version__
script_info['script_description'] = "Calculate the conditional uncovered\
probability."
script_info['brief_description'] = """Calculate the conditional uncovered\
probability on each sample in an otu table."""
script_info['script_description'] = \
"""This script calculates the conditional uncovered probability for each sample
in an OTU table. It uses the methods introduced in Lladser, Gouet, and Reeder,
"Extrapolation of Urn Models via Poissonization: Accurate Measurements of the
Microbial Unknown" PLoS 2011.
Specifically, it computes a point estimate and a confidence interval using two
different methods. Thus it can happen that the PE is actually outside of the
CI.
We only provide the ability to generate 95% (alpha=0.95) CIs. The CIs are ULCL
CIs; they provide an upper and lower bound, where the lower bound is
conservative. The CIs are constructed using an upper-to-lower bound ratio of
10.
The CI method requires precomputed constants that depend on the lookahead. We
only provide constants for r=3..25,30,40,50.
"""
script_info['script_usage'] = []
script_info['script_usage'].append(
("Default case:",
"To calculate the cond. uncovered probability with the default values, "
"you can use the following command:",
"%prog -i otu_table.biom -o cup.txt"))
script_info['script_usage'].append(
("Change lookahead:",
"To change the accuracy of the prediction change the lookahead value. "
"Larger values of r lead to more precise predictions, but might be "
"unfeasable for small samples. For deeply sequenced samples, try "
"increasing r to 50:",
"%prog -i otu_table.biom -o cup_r50.txt -r 50"))
script_info['output_description'] = \
"""The resulting file(s) is a tab-delimited text file, where the columns
correspond to estimates of the cond. uncovered probability and the rows
correspond to samples. The output file is compatible with the alpha_diversity
output files and thus could be tied into the rarefaction workflow.
Example Output:
====== ======= ============= ================
\ PE Lower Bound Upper Bound
====== ======= ============= ================
PC.354 0.111 0.0245 0.245
PC.124 0.001 0.000564 0.00564
====== ======= ============= ================
"""
script_info['required_options'] = []
script_info['optional_options'] = [
make_option('-i', '--input_path',
help='Input OTU table filepath. [default: %default]',
type='existing_path'),
make_option('-o', '--output_path',
help='Output filepath to store the predictions. [default: %default]',
type='new_path'),
make_option('-r', '--look_ahead',
help='Number of unobserved, new colors necessary for prediction.'
' [default: %default]', default=25,
type='int'),
make_option('-m', '--metrics', default='lladser_pe,lladser_ci',
type='multiple_choice', mchoices=list_known_cup_metrics(),
help='CUP metric(s) to use. A comma-separated list should' +
' be provided when multiple metrics are specified. [default: %default]'),
make_option('-s', '--show_metrics', action='store_true',
dest="show_metrics",
help='Show the available CUP metrics and exit.')
]
def main():
option_parser, opts, args = parse_command_line_parameters(**script_info)
if opts.show_metrics:
print("Known metrics are: %s\n"
% (', '.join(list_known_cup_metrics()),))
exit(0)
almost_required_options = ['input_path', 'output_path']
for option in almost_required_options:
if getattr(opts, option) is None:
option_parser.error('Required option --%s omitted.' % option)
if os.path.isfile(opts.input_path):
try:
f = open(opts.output_path, 'w')
except IOError:
exit("ioerror, couldn't create output file")
f.close()
single_file_cup(opts.input_path, opts.metrics, opts.output_path,
opts.look_ahead)
else:
exit("io error, input path not valid. does it exist?")
if __name__ == "__main__":
main()
| gpl-2.0 |
chirilo/mozillians | vendor-local/lib/python/unidecode/x0c5.py | 253 | 4581 | data = (
'sseum', # 0x00
'sseub', # 0x01
'sseubs', # 0x02
'sseus', # 0x03
'sseuss', # 0x04
'sseung', # 0x05
'sseuj', # 0x06
'sseuc', # 0x07
'sseuk', # 0x08
'sseut', # 0x09
'sseup', # 0x0a
'sseuh', # 0x0b
'ssyi', # 0x0c
'ssyig', # 0x0d
'ssyigg', # 0x0e
'ssyigs', # 0x0f
'ssyin', # 0x10
'ssyinj', # 0x11
'ssyinh', # 0x12
'ssyid', # 0x13
'ssyil', # 0x14
'ssyilg', # 0x15
'ssyilm', # 0x16
'ssyilb', # 0x17
'ssyils', # 0x18
'ssyilt', # 0x19
'ssyilp', # 0x1a
'ssyilh', # 0x1b
'ssyim', # 0x1c
'ssyib', # 0x1d
'ssyibs', # 0x1e
'ssyis', # 0x1f
'ssyiss', # 0x20
'ssying', # 0x21
'ssyij', # 0x22
'ssyic', # 0x23
'ssyik', # 0x24
'ssyit', # 0x25
'ssyip', # 0x26
'ssyih', # 0x27
'ssi', # 0x28
'ssig', # 0x29
'ssigg', # 0x2a
'ssigs', # 0x2b
'ssin', # 0x2c
'ssinj', # 0x2d
'ssinh', # 0x2e
'ssid', # 0x2f
'ssil', # 0x30
'ssilg', # 0x31
'ssilm', # 0x32
'ssilb', # 0x33
'ssils', # 0x34
'ssilt', # 0x35
'ssilp', # 0x36
'ssilh', # 0x37
'ssim', # 0x38
'ssib', # 0x39
'ssibs', # 0x3a
'ssis', # 0x3b
'ssiss', # 0x3c
'ssing', # 0x3d
'ssij', # 0x3e
'ssic', # 0x3f
'ssik', # 0x40
'ssit', # 0x41
'ssip', # 0x42
'ssih', # 0x43
'a', # 0x44
'ag', # 0x45
'agg', # 0x46
'ags', # 0x47
'an', # 0x48
'anj', # 0x49
'anh', # 0x4a
'ad', # 0x4b
'al', # 0x4c
'alg', # 0x4d
'alm', # 0x4e
'alb', # 0x4f
'als', # 0x50
'alt', # 0x51
'alp', # 0x52
'alh', # 0x53
'am', # 0x54
'ab', # 0x55
'abs', # 0x56
'as', # 0x57
'ass', # 0x58
'ang', # 0x59
'aj', # 0x5a
'ac', # 0x5b
'ak', # 0x5c
'at', # 0x5d
'ap', # 0x5e
'ah', # 0x5f
'ae', # 0x60
'aeg', # 0x61
'aegg', # 0x62
'aegs', # 0x63
'aen', # 0x64
'aenj', # 0x65
'aenh', # 0x66
'aed', # 0x67
'ael', # 0x68
'aelg', # 0x69
'aelm', # 0x6a
'aelb', # 0x6b
'aels', # 0x6c
'aelt', # 0x6d
'aelp', # 0x6e
'aelh', # 0x6f
'aem', # 0x70
'aeb', # 0x71
'aebs', # 0x72
'aes', # 0x73
'aess', # 0x74
'aeng', # 0x75
'aej', # 0x76
'aec', # 0x77
'aek', # 0x78
'aet', # 0x79
'aep', # 0x7a
'aeh', # 0x7b
'ya', # 0x7c
'yag', # 0x7d
'yagg', # 0x7e
'yags', # 0x7f
'yan', # 0x80
'yanj', # 0x81
'yanh', # 0x82
'yad', # 0x83
'yal', # 0x84
'yalg', # 0x85
'yalm', # 0x86
'yalb', # 0x87
'yals', # 0x88
'yalt', # 0x89
'yalp', # 0x8a
'yalh', # 0x8b
'yam', # 0x8c
'yab', # 0x8d
'yabs', # 0x8e
'yas', # 0x8f
'yass', # 0x90
'yang', # 0x91
'yaj', # 0x92
'yac', # 0x93
'yak', # 0x94
'yat', # 0x95
'yap', # 0x96
'yah', # 0x97
'yae', # 0x98
'yaeg', # 0x99
'yaegg', # 0x9a
'yaegs', # 0x9b
'yaen', # 0x9c
'yaenj', # 0x9d
'yaenh', # 0x9e
'yaed', # 0x9f
'yael', # 0xa0
'yaelg', # 0xa1
'yaelm', # 0xa2
'yaelb', # 0xa3
'yaels', # 0xa4
'yaelt', # 0xa5
'yaelp', # 0xa6
'yaelh', # 0xa7
'yaem', # 0xa8
'yaeb', # 0xa9
'yaebs', # 0xaa
'yaes', # 0xab
'yaess', # 0xac
'yaeng', # 0xad
'yaej', # 0xae
'yaec', # 0xaf
'yaek', # 0xb0
'yaet', # 0xb1
'yaep', # 0xb2
'yaeh', # 0xb3
'eo', # 0xb4
'eog', # 0xb5
'eogg', # 0xb6
'eogs', # 0xb7
'eon', # 0xb8
'eonj', # 0xb9
'eonh', # 0xba
'eod', # 0xbb
'eol', # 0xbc
'eolg', # 0xbd
'eolm', # 0xbe
'eolb', # 0xbf
'eols', # 0xc0
'eolt', # 0xc1
'eolp', # 0xc2
'eolh', # 0xc3
'eom', # 0xc4
'eob', # 0xc5
'eobs', # 0xc6
'eos', # 0xc7
'eoss', # 0xc8
'eong', # 0xc9
'eoj', # 0xca
'eoc', # 0xcb
'eok', # 0xcc
'eot', # 0xcd
'eop', # 0xce
'eoh', # 0xcf
'e', # 0xd0
'eg', # 0xd1
'egg', # 0xd2
'egs', # 0xd3
'en', # 0xd4
'enj', # 0xd5
'enh', # 0xd6
'ed', # 0xd7
'el', # 0xd8
'elg', # 0xd9
'elm', # 0xda
'elb', # 0xdb
'els', # 0xdc
'elt', # 0xdd
'elp', # 0xde
'elh', # 0xdf
'em', # 0xe0
'eb', # 0xe1
'ebs', # 0xe2
'es', # 0xe3
'ess', # 0xe4
'eng', # 0xe5
'ej', # 0xe6
'ec', # 0xe7
'ek', # 0xe8
'et', # 0xe9
'ep', # 0xea
'eh', # 0xeb
'yeo', # 0xec
'yeog', # 0xed
'yeogg', # 0xee
'yeogs', # 0xef
'yeon', # 0xf0
'yeonj', # 0xf1
'yeonh', # 0xf2
'yeod', # 0xf3
'yeol', # 0xf4
'yeolg', # 0xf5
'yeolm', # 0xf6
'yeolb', # 0xf7
'yeols', # 0xf8
'yeolt', # 0xf9
'yeolp', # 0xfa
'yeolh', # 0xfb
'yeom', # 0xfc
'yeob', # 0xfd
'yeobs', # 0xfe
'yeos', # 0xff
)
| bsd-3-clause |
topazproject/topaz | tests/test_celldict.py | 3 | 1141 | from topaz.celldict import CellDict, Cell, GlobalsDict
from .base import BaseTopazTest
class TestCellDict(BaseTopazTest):
def test_single_set(self, space):
c = CellDict()
v = c.version
c.set(space, "a", 2)
assert c.version is not v
assert c._get_cell("a", c.version) == 2
def test_multi_set(self, space):
c = CellDict()
c.set(space, "a", 2)
v = c.version
c.set(space, "a", 3)
assert isinstance(c._get_cell("a", c.version), Cell)
assert c.version is not v
v = c.version
c.set(space, "a", 4)
assert isinstance(c._get_cell("a", c.version), Cell)
assert c.version is v
def test_globals(self, space):
space.stuff = 4
g = GlobalsDict()
g.define_virtual("x", lambda s: s.stuff)
assert g.get(space, "x") == 4
with self.raises(space, "NameError"):
g.set(space, "x", 5)
g.define_virtual("y", lambda s: s.stuff, lambda s, v: setattr(s, "stuff", v))
assert g.get(space, "y") == 4
g.set(space, "y", 5)
assert g.get(space, "y") == 5
| bsd-3-clause |
MonicaHsu/truvaluation | venv/lib/python2.7/site-packages/simplejson/tests/test_item_sort_key.py | 140 | 1127 | from unittest import TestCase
import simplejson as json
from operator import itemgetter
class TestItemSortKey(TestCase):
def test_simple_first(self):
a = {'a': 1, 'c': 5, 'jack': 'jill', 'pick': 'axe', 'array': [1, 5, 6, 9], 'tuple': (83, 12, 3), 'crate': 'dog', 'zeak': 'oh'}
self.assertEqual(
'{"a": 1, "c": 5, "crate": "dog", "jack": "jill", "pick": "axe", "zeak": "oh", "array": [1, 5, 6, 9], "tuple": [83, 12, 3]}',
json.dumps(a, item_sort_key=json.simple_first))
def test_case(self):
a = {'a': 1, 'c': 5, 'Jack': 'jill', 'pick': 'axe', 'Array': [1, 5, 6, 9], 'tuple': (83, 12, 3), 'crate': 'dog', 'zeak': 'oh'}
self.assertEqual(
'{"Array": [1, 5, 6, 9], "Jack": "jill", "a": 1, "c": 5, "crate": "dog", "pick": "axe", "tuple": [83, 12, 3], "zeak": "oh"}',
json.dumps(a, item_sort_key=itemgetter(0)))
self.assertEqual(
'{"a": 1, "Array": [1, 5, 6, 9], "c": 5, "crate": "dog", "Jack": "jill", "pick": "axe", "tuple": [83, 12, 3], "zeak": "oh"}',
json.dumps(a, item_sort_key=lambda kv: kv[0].lower()))
| mit |
EricMuller/mywebmarks-backend | requirements/twisted/Twisted-17.1.0/build/lib.linux-x86_64-3.5/twisted/web/test/test_util.py | 16 | 12590 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.web.util}.
"""
from __future__ import absolute_import, division
import gc
from twisted.python.failure import Failure
from twisted.trial.unittest import SynchronousTestCase, TestCase
from twisted.internet import defer
from twisted.python.compat import _PY3, intToBytes, networkString
from twisted.web import resource, util
from twisted.web.error import FlattenerError
from twisted.web.http import FOUND
from twisted.web.server import Request
from twisted.web.template import TagLoader, flattenString, tags
from twisted.web.test.requesthelper import DummyChannel, DummyRequest
from twisted.web.util import DeferredResource
from twisted.web.util import _SourceFragmentElement, _FrameElement
from twisted.web.util import _StackElement, FailureElement, formatFailure
from twisted.web.util import redirectTo, _SourceLineElement
class RedirectToTests(TestCase):
"""
Tests for L{redirectTo}.
"""
def test_headersAndCode(self):
"""
L{redirectTo} will set the C{Location} and C{Content-Type} headers on
its request, and set the response code to C{FOUND}, so the browser will
be redirected.
"""
request = Request(DummyChannel(), True)
request.method = b'GET'
targetURL = b"http://target.example.com/4321"
redirectTo(targetURL, request)
self.assertEqual(request.code, FOUND)
self.assertEqual(
request.responseHeaders.getRawHeaders(b'location'), [targetURL])
self.assertEqual(
request.responseHeaders.getRawHeaders(b'content-type'),
[b'text/html; charset=utf-8'])
def test_redirectToUnicodeURL(self) :
"""
L{redirectTo} will raise TypeError if unicode object is passed in URL
"""
request = Request(DummyChannel(), True)
request.method = b'GET'
targetURL = u'http://target.example.com/4321'
self.assertRaises(TypeError, redirectTo, targetURL, request)
class FailureElementTests(TestCase):
"""
Tests for L{FailureElement} and related helpers which can render a
L{Failure} as an HTML string.
"""
def setUp(self):
"""
Create a L{Failure} which can be used by the rendering tests.
"""
def lineNumberProbeAlsoBroken():
message = "This is a problem"
raise Exception(message)
# Figure out the line number from which the exception will be raised.
self.base = lineNumberProbeAlsoBroken.__code__.co_firstlineno + 1
try:
lineNumberProbeAlsoBroken()
except:
self.failure = Failure(captureVars=True)
self.frame = self.failure.frames[-1]
def test_sourceLineElement(self):
"""
L{_SourceLineElement} renders a source line and line number.
"""
element = _SourceLineElement(
TagLoader(tags.div(
tags.span(render="lineNumber"),
tags.span(render="sourceLine"))),
50, " print 'hello'")
d = flattenString(None, element)
expected = (
u"<div><span>50</span><span>"
u" \N{NO-BREAK SPACE} \N{NO-BREAK SPACE}print 'hello'</span></div>")
d.addCallback(
self.assertEqual, expected.encode('utf-8'))
return d
def test_sourceFragmentElement(self):
"""
L{_SourceFragmentElement} renders source lines at and around the line
number indicated by a frame object.
"""
element = _SourceFragmentElement(
TagLoader(tags.div(
tags.span(render="lineNumber"),
tags.span(render="sourceLine"),
render="sourceLines")),
self.frame)
source = [
u' \N{NO-BREAK SPACE} \N{NO-BREAK SPACE}message = '
u'"This is a problem"',
u' \N{NO-BREAK SPACE} \N{NO-BREAK SPACE}raise Exception(message)',
u'# Figure out the line number from which the exception will be '
u'raised.',
]
d = flattenString(None, element)
if _PY3:
stringToCheckFor = ''.join([
'<div class="snippet%sLine"><span>%d</span><span>%s</span>'
'</div>' % (
["", "Highlight"][lineNumber == 1],
self.base + lineNumber,
(u" \N{NO-BREAK SPACE}" * 4 + sourceLine))
for (lineNumber, sourceLine)
in enumerate(source)]).encode("utf8")
else:
stringToCheckFor = ''.join([
'<div class="snippet%sLine"><span>%d</span><span>%s</span>'
'</div>' % (
["", "Highlight"][lineNumber == 1],
self.base + lineNumber,
(u" \N{NO-BREAK SPACE}" * 4 + sourceLine).encode('utf8'))
for (lineNumber, sourceLine)
in enumerate(source)])
d.addCallback(self.assertEqual, stringToCheckFor)
return d
def test_frameElementFilename(self):
"""
The I{filename} renderer of L{_FrameElement} renders the filename
associated with the frame object used to initialize the
L{_FrameElement}.
"""
element = _FrameElement(
TagLoader(tags.span(render="filename")),
self.frame)
d = flattenString(None, element)
d.addCallback(
# __file__ differs depending on whether an up-to-date .pyc file
# already existed.
self.assertEqual,
b"<span>" + networkString(__file__.rstrip('c')) + b"</span>")
return d
def test_frameElementLineNumber(self):
"""
The I{lineNumber} renderer of L{_FrameElement} renders the line number
associated with the frame object used to initialize the
L{_FrameElement}.
"""
element = _FrameElement(
TagLoader(tags.span(render="lineNumber")),
self.frame)
d = flattenString(None, element)
d.addCallback(
self.assertEqual, b"<span>" + intToBytes(self.base + 1) + b"</span>")
return d
def test_frameElementFunction(self):
"""
The I{function} renderer of L{_FrameElement} renders the line number
associated with the frame object used to initialize the
L{_FrameElement}.
"""
element = _FrameElement(
TagLoader(tags.span(render="function")),
self.frame)
d = flattenString(None, element)
d.addCallback(
self.assertEqual, b"<span>lineNumberProbeAlsoBroken</span>")
return d
def test_frameElementSource(self):
"""
The I{source} renderer of L{_FrameElement} renders the source code near
the source filename/line number associated with the frame object used to
initialize the L{_FrameElement}.
"""
element = _FrameElement(None, self.frame)
renderer = element.lookupRenderMethod("source")
tag = tags.div()
result = renderer(None, tag)
self.assertIsInstance(result, _SourceFragmentElement)
self.assertIdentical(result.frame, self.frame)
self.assertEqual([tag], result.loader.load())
def test_stackElement(self):
"""
The I{frames} renderer of L{_StackElement} renders each stack frame in
the list of frames used to initialize the L{_StackElement}.
"""
element = _StackElement(None, self.failure.frames[:2])
renderer = element.lookupRenderMethod("frames")
tag = tags.div()
result = renderer(None, tag)
self.assertIsInstance(result, list)
self.assertIsInstance(result[0], _FrameElement)
self.assertIdentical(result[0].frame, self.failure.frames[0])
self.assertIsInstance(result[1], _FrameElement)
self.assertIdentical(result[1].frame, self.failure.frames[1])
# They must not share the same tag object.
self.assertNotEqual(result[0].loader.load(), result[1].loader.load())
self.assertEqual(2, len(result))
def test_failureElementTraceback(self):
"""
The I{traceback} renderer of L{FailureElement} renders the failure's
stack frames using L{_StackElement}.
"""
element = FailureElement(self.failure)
renderer = element.lookupRenderMethod("traceback")
tag = tags.div()
result = renderer(None, tag)
self.assertIsInstance(result, _StackElement)
self.assertIdentical(result.stackFrames, self.failure.frames)
self.assertEqual([tag], result.loader.load())
def test_failureElementType(self):
"""
The I{type} renderer of L{FailureElement} renders the failure's
exception type.
"""
element = FailureElement(
self.failure, TagLoader(tags.span(render="type")))
d = flattenString(None, element)
if _PY3:
exc = b"builtins.Exception"
else:
exc = b"exceptions.Exception"
d.addCallback(
self.assertEqual, b"<span>" + exc + b"</span>")
return d
def test_failureElementValue(self):
"""
The I{value} renderer of L{FailureElement} renders the value's exception
value.
"""
element = FailureElement(
self.failure, TagLoader(tags.span(render="value")))
d = flattenString(None, element)
d.addCallback(
self.assertEqual, b'<span>This is a problem</span>')
return d
class FormatFailureTests(TestCase):
"""
Tests for L{twisted.web.util.formatFailure} which returns an HTML string
representing the L{Failure} instance passed to it.
"""
def test_flattenerError(self):
"""
If there is an error flattening the L{Failure} instance,
L{formatFailure} raises L{FlattenerError}.
"""
self.assertRaises(FlattenerError, formatFailure, object())
def test_returnsBytes(self):
"""
The return value of L{formatFailure} is a C{str} instance (not a
C{unicode} instance) with numeric character references for any non-ASCII
characters meant to appear in the output.
"""
try:
raise Exception("Fake bug")
except:
result = formatFailure(Failure())
self.assertIsInstance(result, bytes)
if _PY3:
self.assertTrue(all(ch < 128 for ch in result))
else:
self.assertTrue(all(ord(ch) < 128 for ch in result))
# Indentation happens to rely on NO-BREAK SPACE
self.assertIn(b" ", result)
class SDResource(resource.Resource):
def __init__(self,default):
self.default = default
def getChildWithDefault(self, name, request):
d = defer.succeed(self.default)
resource = util.DeferredResource(d)
return resource.getChildWithDefault(name, request)
class DeferredResourceTests(SynchronousTestCase):
"""
Tests for L{DeferredResource}.
"""
def testDeferredResource(self):
r = resource.Resource()
r.isLeaf = 1
s = SDResource(r)
d = DummyRequest(['foo', 'bar', 'baz'])
resource.getChildForRequest(s, d)
self.assertEqual(d.postpath, ['bar', 'baz'])
def test_render(self):
"""
L{DeferredResource} uses the request object's C{render} method to
render the resource which is the result of the L{Deferred} being
handled.
"""
rendered = []
request = DummyRequest([])
request.render = rendered.append
result = resource.Resource()
deferredResource = DeferredResource(defer.succeed(result))
deferredResource.render(request)
self.assertEqual(rendered, [result])
def test_renderNoFailure(self):
"""
If the L{Deferred} fails, L{DeferredResource} reports the failure via
C{processingFailed}, and does not cause an unhandled error to be
logged.
"""
request = DummyRequest([])
d = request.notifyFinish()
failure = Failure(RuntimeError())
deferredResource = DeferredResource(defer.fail(failure))
deferredResource.render(request)
self.assertEqual(self.failureResultOf(d), failure)
del deferredResource
gc.collect()
errors = self.flushLoggedErrors(RuntimeError)
self.assertEqual(errors, [])
| mit |
USGSDenverPychron/pychron | pychron/pipeline/tasks/tree_node.py | 1 | 3345 | # ===============================================================================
# Copyright 2015 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from pyface.qt.QtCore import Qt
from pyface.qt.QtGui import QColor
from traitsui.tree_node import TreeNode
from pychron.envisage.resources import icon
from pychron.pipeline.engine import Pipeline
from pychron.pipeline.nodes import ReviewNode
class PipelineGroupTreeNode(TreeNode):
icon_name = ''
label = 'name'
class PipelineTreeNode(TreeNode):
icon_name = ''
label = 'name'
def get_background(self, obj):
if isinstance(obj, Pipeline):
c = QColor(Qt.white)
else:
if isinstance(obj, ReviewNode):
if not obj.enabled:
c = QColor('#ff8080') # light red
else:
c = QColor(Qt.cyan)
elif obj.skip_configure:
c = QColor('#D05BFF')
elif not obj.enabled:
c = QColor('#ff8080') # light red
else:
c = super(PipelineTreeNode, self).get_background(obj)
return c
def get_status_color(self, obj):
c = QColor(Qt.white)
if not isinstance(obj, Pipeline):
c = QColor(Qt.lightGray)
if obj.visited:
c = QColor(Qt.green)
elif obj.active:
c = QColor('orange')
# if obj.status == 'ran':
# c = QColor('green')
# elif obj.status == 'paused':
# c = QColor('orange')
return c
def get_icon(self, obj, is_expanded):
name = self.icon_name
if not isinstance(obj, Pipeline):
if not object.enabled:
name = 'cancel'
return icon(name)
# def get_background(self, obj):
# # print 'get', obj, obj.visited
# return 'green' if obj.visited else 'white'
class DataTreeNode(PipelineTreeNode):
icon_name = 'table'
class FilterTreeNode(PipelineTreeNode):
icon_name = 'table_filter'
class IdeogramTreeNode(PipelineTreeNode):
icon_name = 'histogram'
class SpectrumTreeNode(PipelineTreeNode):
icon_name = ''
class SeriesTreeNode(PipelineTreeNode):
icon_name = ''
class PDFTreeNode(PipelineTreeNode):
icon_name = 'file_pdf'
class GroupingTreeNode(PipelineTreeNode):
pass
class DBSaveTreeNode(PipelineTreeNode):
icon_name = 'database_save'
class FindTreeNode(PipelineTreeNode):
icon_name = 'find'
class FitTreeNode(PipelineTreeNode):
icon_name = 'lightning'
class ReviewTreeNode(PipelineTreeNode):
pass
# ============= EOF =============================================
| apache-2.0 |
vigilv/scikit-learn | sklearn/gaussian_process/correlation_models.py | 230 | 7630 | # -*- coding: utf-8 -*-
# Author: Vincent Dubourg <[email protected]>
# (mostly translation, see implementation details)
# Licence: BSD 3 clause
"""
The built-in correlation models submodule for the gaussian_process module.
"""
import numpy as np
def absolute_exponential(theta, d):
"""
Absolute exponential autocorrelation model.
(Ornstein-Uhlenbeck stochastic process)::
n
theta, d --> r(theta, d) = exp( sum - theta_i * |d_i| )
i = 1
Parameters
----------
theta : array_like
An array with shape 1 (isotropic) or n (anisotropic) giving the
autocorrelation parameter(s).
d : array_like
An array with shape (n_eval, n_features) giving the componentwise
distances between locations x and x' at which the correlation model
should be evaluated.
Returns
-------
r : array_like
An array with shape (n_eval, ) containing the values of the
autocorrelation model.
"""
theta = np.asarray(theta, dtype=np.float)
d = np.abs(np.asarray(d, dtype=np.float))
if d.ndim > 1:
n_features = d.shape[1]
else:
n_features = 1
if theta.size == 1:
return np.exp(- theta[0] * np.sum(d, axis=1))
elif theta.size != n_features:
raise ValueError("Length of theta must be 1 or %s" % n_features)
else:
return np.exp(- np.sum(theta.reshape(1, n_features) * d, axis=1))
def squared_exponential(theta, d):
"""
Squared exponential correlation model (Radial Basis Function).
(Infinitely differentiable stochastic process, very smooth)::
n
theta, d --> r(theta, d) = exp( sum - theta_i * (d_i)^2 )
i = 1
Parameters
----------
theta : array_like
An array with shape 1 (isotropic) or n (anisotropic) giving the
autocorrelation parameter(s).
d : array_like
An array with shape (n_eval, n_features) giving the componentwise
distances between locations x and x' at which the correlation model
should be evaluated.
Returns
-------
r : array_like
An array with shape (n_eval, ) containing the values of the
autocorrelation model.
"""
theta = np.asarray(theta, dtype=np.float)
d = np.asarray(d, dtype=np.float)
if d.ndim > 1:
n_features = d.shape[1]
else:
n_features = 1
if theta.size == 1:
return np.exp(-theta[0] * np.sum(d ** 2, axis=1))
elif theta.size != n_features:
raise ValueError("Length of theta must be 1 or %s" % n_features)
else:
return np.exp(-np.sum(theta.reshape(1, n_features) * d ** 2, axis=1))
def generalized_exponential(theta, d):
"""
Generalized exponential correlation model.
(Useful when one does not know the smoothness of the function to be
predicted.)::
n
theta, d --> r(theta, d) = exp( sum - theta_i * |d_i|^p )
i = 1
Parameters
----------
theta : array_like
An array with shape 1+1 (isotropic) or n+1 (anisotropic) giving the
autocorrelation parameter(s) (theta, p).
d : array_like
An array with shape (n_eval, n_features) giving the componentwise
distances between locations x and x' at which the correlation model
should be evaluated.
Returns
-------
r : array_like
An array with shape (n_eval, ) with the values of the autocorrelation
model.
"""
theta = np.asarray(theta, dtype=np.float)
d = np.asarray(d, dtype=np.float)
if d.ndim > 1:
n_features = d.shape[1]
else:
n_features = 1
lth = theta.size
if n_features > 1 and lth == 2:
theta = np.hstack([np.repeat(theta[0], n_features), theta[1]])
elif lth != n_features + 1:
raise Exception("Length of theta must be 2 or %s" % (n_features + 1))
else:
theta = theta.reshape(1, lth)
td = theta[:, 0:-1].reshape(1, n_features) * np.abs(d) ** theta[:, -1]
r = np.exp(- np.sum(td, 1))
return r
def pure_nugget(theta, d):
"""
Spatial independence correlation model (pure nugget).
(Useful when one wants to solve an ordinary least squares problem!)::
n
theta, d --> r(theta, d) = 1 if sum |d_i| == 0
i = 1
0 otherwise
Parameters
----------
theta : array_like
None.
d : array_like
An array with shape (n_eval, n_features) giving the componentwise
distances between locations x and x' at which the correlation model
should be evaluated.
Returns
-------
r : array_like
An array with shape (n_eval, ) with the values of the autocorrelation
model.
"""
theta = np.asarray(theta, dtype=np.float)
d = np.asarray(d, dtype=np.float)
n_eval = d.shape[0]
r = np.zeros(n_eval)
r[np.all(d == 0., axis=1)] = 1.
return r
def cubic(theta, d):
"""
Cubic correlation model::
theta, d --> r(theta, d) =
n
prod max(0, 1 - 3(theta_j*d_ij)^2 + 2(theta_j*d_ij)^3) , i = 1,...,m
j = 1
Parameters
----------
theta : array_like
An array with shape 1 (isotropic) or n (anisotropic) giving the
autocorrelation parameter(s).
d : array_like
An array with shape (n_eval, n_features) giving the componentwise
distances between locations x and x' at which the correlation model
should be evaluated.
Returns
-------
r : array_like
An array with shape (n_eval, ) with the values of the autocorrelation
model.
"""
theta = np.asarray(theta, dtype=np.float)
d = np.asarray(d, dtype=np.float)
if d.ndim > 1:
n_features = d.shape[1]
else:
n_features = 1
lth = theta.size
if lth == 1:
td = np.abs(d) * theta
elif lth != n_features:
raise Exception("Length of theta must be 1 or " + str(n_features))
else:
td = np.abs(d) * theta.reshape(1, n_features)
td[td > 1.] = 1.
ss = 1. - td ** 2. * (3. - 2. * td)
r = np.prod(ss, 1)
return r
def linear(theta, d):
"""
Linear correlation model::
theta, d --> r(theta, d) =
n
prod max(0, 1 - theta_j*d_ij) , i = 1,...,m
j = 1
Parameters
----------
theta : array_like
An array with shape 1 (isotropic) or n (anisotropic) giving the
autocorrelation parameter(s).
d : array_like
An array with shape (n_eval, n_features) giving the componentwise
distances between locations x and x' at which the correlation model
should be evaluated.
Returns
-------
r : array_like
An array with shape (n_eval, ) with the values of the autocorrelation
model.
"""
theta = np.asarray(theta, dtype=np.float)
d = np.asarray(d, dtype=np.float)
if d.ndim > 1:
n_features = d.shape[1]
else:
n_features = 1
lth = theta.size
if lth == 1:
td = np.abs(d) * theta
elif lth != n_features:
raise Exception("Length of theta must be 1 or %s" % n_features)
else:
td = np.abs(d) * theta.reshape(1, n_features)
td[td > 1.] = 1.
ss = 1. - td
r = np.prod(ss, 1)
return r
| bsd-3-clause |
alexrao/YouCompleteMe | third_party/ycmd/third_party/jedi/test/test_evaluate/test_buildout_detection.py | 13 | 2751 | import os
from textwrap import dedent
from jedi._compatibility import u
from jedi.evaluate.sys_path import (_get_parent_dir_with_file,
_get_buildout_scripts,
sys_path_with_modifications,
_check_module)
from jedi.evaluate import Evaluator
from jedi.parser import Parser, load_grammar
from ..helpers import cwd_at
@cwd_at('test/test_evaluate/buildout_project/src/proj_name')
def test_parent_dir_with_file():
parent = _get_parent_dir_with_file(
os.path.abspath(os.curdir), 'buildout.cfg')
assert parent is not None
assert parent.endswith(os.path.join('test', 'test_evaluate', 'buildout_project'))
@cwd_at('test/test_evaluate/buildout_project/src/proj_name')
def test_buildout_detection():
scripts = _get_buildout_scripts(os.path.abspath('./module_name.py'))
assert len(scripts) == 1
curdir = os.path.abspath(os.curdir)
appdir_path = os.path.normpath(os.path.join(curdir, '../../bin/app'))
assert scripts[0] == appdir_path
def test_append_on_non_sys_path():
SRC = dedent(u("""
class Dummy(object):
path = []
d = Dummy()
d.path.append('foo')"""))
grammar = load_grammar()
p = Parser(grammar, SRC)
paths = _check_module(Evaluator(grammar), p.module)
assert len(paths) > 0
assert 'foo' not in paths
def test_path_from_invalid_sys_path_assignment():
SRC = dedent(u("""
import sys
sys.path = 'invalid'"""))
grammar = load_grammar()
p = Parser(grammar, SRC)
paths = _check_module(Evaluator(grammar), p.module)
assert len(paths) > 0
assert 'invalid' not in paths
@cwd_at('test/test_evaluate/buildout_project/src/proj_name/')
def test_sys_path_with_modifications():
SRC = dedent(u("""
import os
"""))
grammar = load_grammar()
p = Parser(grammar, SRC)
p.module.path = os.path.abspath(os.path.join(os.curdir, 'module_name.py'))
paths = sys_path_with_modifications(Evaluator(grammar), p.module)
assert '/tmp/.buildout/eggs/important_package.egg' in paths
def test_path_from_sys_path_assignment():
SRC = dedent(u("""
#!/usr/bin/python
import sys
sys.path[0:0] = [
'/usr/lib/python3.4/site-packages',
'/home/test/.buildout/eggs/important_package.egg'
]
path[0:0] = [1]
import important_package
if __name__ == '__main__':
sys.exit(important_package.main())"""))
grammar = load_grammar()
p = Parser(grammar, SRC)
paths = _check_module(Evaluator(grammar), p.module)
assert 1 not in paths
assert '/home/test/.buildout/eggs/important_package.egg' in paths
| gpl-3.0 |
tensorflow/lucid | lucid/scratch/web/observable.py | 1 | 2465 | import json
from lucid.misc.io.showing import _display_html
def renderObservable(url, cells=None, data=None):
"""Display observable notebook cells in iPython.
Args:
url: url fragment to observable notebook. ex: '@observablehq/downloading-and-embedding-notebooks'
cells: an array of strings for the names of cells you want to render. ex: ['viewof stage', 'viewof x']
data: a dictionary of variables that you'd like to overwrite. ex: {'x': 200, 'width': 500}
"""
head = """
<div id="output"></div>
<div>
<a target="_blank" href='https://observablehq.com/{}'>source</a>
</div>
<script type="module">
""".format(url)
runtimeImport = "import {Runtime} from 'https://unpkg.com/@observablehq/notebook-runtime?module';"
notebookImport = "import notebook from 'https://api.observablehq.com/{0}.js';".format(url)
cellsSerialized = "let cells = {};".format(json.dumps(cells))
dataSerialized = "let data = {};".format(json.dumps(data))
code = """
const outputEl = document.getElementById("output");
// Converts data into a map
let dataMap = new Map();
if (data) {
Object.keys(data).forEach(key => {
dataMap.set(key, data[key]);
});
}
// Converts cells into a map
let cellsMap = new Map();
if (cells) {
cells.forEach((key, i) => {
const element = document.createElement("div");
outputEl.appendChild(element)
cellsMap.set(key, element)
});
}
function render(_node, value) {
if (!(value instanceof Element)) {
const el = document.createElement("span");
el.innerHTML = value;
value = el;
}
if (_node.firstChild !== value) {
if (_node.firstChild) {
while (_node.lastChild !== _node.firstChild) _node.removeChild(_node.lastChild);
_node.replaceChild(value, _node.firstChild);
} else {
_node.appendChild(value);
}
}
}
Runtime.load(notebook, (variable) => {
// Override a variable with a passed value
if (dataMap.has(variable.name)) {
variable.value = dataMap.get(variable.name)
}
// Render the output to the corrent element
if (cellsMap.has(variable.name)) {
return { fulfilled: (value) => render(cellsMap.get(variable.name), value) };
} else {
return true;
}
});
"""
foot = "</script>"
_display_html(
head + runtimeImport + notebookImport + cellsSerialized + dataSerialized + code + foot
)
| apache-2.0 |
quixoten/ansible | lib/ansible/cli/doc.py | 17 | 11990 | # (c) 2014, James Tanner <[email protected]>
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
# ansible-vault is a script that encrypts/decrypts YAML files. See
# http://docs.ansible.com/playbooks_vault.html for more details.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import datetime
import os
import traceback
import textwrap
from ansible.compat.six import iteritems
from ansible.errors import AnsibleError, AnsibleOptionsError
from ansible.plugins import module_loader
from ansible.cli import CLI
from ansible.utils import module_docs
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class DocCLI(CLI):
""" Vault command line class """
BLACKLIST_EXTS = ('.pyc', '.swp', '.bak', '~', '.rpm', '.md', '.txt')
IGNORE_FILES = [ "COPYING", "CONTRIBUTING", "LICENSE", "README", "VERSION", "GUIDELINES", "test-docs.sh"]
def __init__(self, args):
super(DocCLI, self).__init__(args)
self.module_list = []
def parse(self):
self.parser = CLI.base_parser(
usage='usage: %prog [options] [module...]',
epilog='Show Ansible module documentation',
module_opts=True,
)
self.parser.add_option("-l", "--list", action="store_true", default=False, dest='list_dir',
help='List available modules')
self.parser.add_option("-s", "--snippet", action="store_true", default=False, dest='show_snippet',
help='Show playbook snippet for specified module(s)')
self.options, self.args = self.parser.parse_args(self.args[1:])
display.verbosity = self.options.verbosity
def run(self):
super(DocCLI, self).run()
if self.options.module_path is not None:
for i in self.options.module_path.split(os.pathsep):
module_loader.add_directory(i)
# list modules
if self.options.list_dir:
paths = module_loader._get_paths()
for path in paths:
self.find_modules(path)
self.pager(self.get_module_list_text())
return 0
if len(self.args) == 0:
raise AnsibleOptionsError("Incorrect options passed")
# process command line module list
text = ''
for module in self.args:
try:
# if the module lives in a non-python file (eg, win_X.ps1), require the corresponding python file for docs
filename = module_loader.find_plugin(module, mod_type='.py')
if filename is None:
display.warning("module %s not found in %s\n" % (module, DocCLI.print_paths(module_loader)))
continue
if any(filename.endswith(x) for x in self.BLACKLIST_EXTS):
continue
try:
doc, plainexamples, returndocs = module_docs.get_docstring(filename, verbose=(self.options.verbosity > 0))
except:
display.vvv(traceback.print_exc())
display.error("module %s has a documentation error formatting or is missing documentation\nTo see exact traceback use -vvv" % module)
continue
if doc is not None:
all_keys = []
for (k,v) in iteritems(doc['options']):
all_keys.append(k)
all_keys = sorted(all_keys)
doc['option_keys'] = all_keys
doc['filename'] = filename
doc['docuri'] = doc['module'].replace('_', '-')
doc['now_date'] = datetime.date.today().strftime('%Y-%m-%d')
doc['plainexamples'] = plainexamples
doc['returndocs'] = returndocs
if self.options.show_snippet:
text += self.get_snippet_text(doc)
else:
text += self.get_man_text(doc)
else:
# this typically means we couldn't even parse the docstring, not just that the YAML is busted,
# probably a quoting issue.
raise AnsibleError("Parsing produced an empty object.")
except Exception as e:
display.vvv(traceback.print_exc())
raise AnsibleError("module %s missing documentation (or could not parse documentation): %s\n" % (module, str(e)))
self.pager(text)
return 0
def find_modules(self, path):
if os.path.isdir(path):
for module in os.listdir(path):
if module.startswith('.'):
continue
elif os.path.isdir(module):
self.find_modules(module)
elif any(module.endswith(x) for x in self.BLACKLIST_EXTS):
continue
elif module.startswith('__'):
continue
elif module in self.IGNORE_FILES:
continue
elif module.startswith('_'):
fullpath = '/'.join([path,module])
if os.path.islink(fullpath): # avoids aliases
continue
module = os.path.splitext(module)[0] # removes the extension
self.module_list.append(module)
def get_module_list_text(self):
columns = display.columns
displace = max(len(x) for x in self.module_list)
linelimit = columns - displace - 5
text = []
deprecated = []
for module in sorted(set(self.module_list)):
if module in module_docs.BLACKLIST_MODULES:
continue
# if the module lives in a non-python file (eg, win_X.ps1), require the corresponding python file for docs
filename = module_loader.find_plugin(module, mod_type='.py')
if filename is None:
continue
if filename.endswith(".ps1"):
continue
if os.path.isdir(filename):
continue
try:
doc, plainexamples, returndocs = module_docs.get_docstring(filename)
desc = self.tty_ify(doc.get('short_description', '?')).strip()
if len(desc) > linelimit:
desc = desc[:linelimit] + '...'
if module.startswith('_'): # Handle deprecated
deprecated.append("%-*s %-*.*s" % (displace, module[1:], linelimit, len(desc), desc))
else:
text.append("%-*s %-*.*s" % (displace, module, linelimit, len(desc), desc))
except:
raise AnsibleError("module %s has a documentation error formatting or is missing documentation\n" % module)
if len(deprecated) > 0:
text.append("\nDEPRECATED:")
text.extend(deprecated)
return "\n".join(text)
@staticmethod
def print_paths(finder):
''' Returns a string suitable for printing of the search path '''
# Uses a list to get the order right
ret = []
for i in finder._get_paths():
if i not in ret:
ret.append(i)
return os.pathsep.join(ret)
def get_snippet_text(self, doc):
text = []
desc = CLI.tty_ify(doc['short_description'])
text.append("- name: %s" % (desc))
text.append(" action: %s" % (doc['module']))
pad = 31
subdent = ''.join([" " for a in xrange(pad)])
limit = display.columns - pad
for o in sorted(doc['options'].keys()):
opt = doc['options'][o]
desc = CLI.tty_ify(" ".join(opt['description']))
if opt.get('required', False):
s = o + "="
else:
s = o
text.append(" %-20s # %s" % (s, textwrap.fill(desc, limit, subsequent_indent=subdent)))
text.append('')
return "\n".join(text)
def get_man_text(self, doc):
opt_indent=" "
text = []
text.append("> %s\n" % doc['module'].upper())
pad = display.columns * 0.20
limit = max(display.columns - int(pad), 70)
if isinstance(doc['description'], list):
desc = " ".join(doc['description'])
else:
desc = doc['description']
text.append("%s\n" % textwrap.fill(CLI.tty_ify(desc), limit, initial_indent=" ", subsequent_indent=" "))
if 'deprecated' in doc and doc['deprecated'] is not None and len(doc['deprecated']) > 0:
text.append("DEPRECATED: \n%s\n" % doc['deprecated'])
if 'option_keys' in doc and len(doc['option_keys']) > 0:
text.append("Options (= is mandatory):\n")
for o in sorted(doc['option_keys']):
opt = doc['options'][o]
if opt.get('required', False):
opt_leadin = "="
else:
opt_leadin = "-"
text.append("%s %s" % (opt_leadin, o))
if isinstance(opt['description'], list):
desc = " ".join(opt['description'])
else:
desc = opt['description']
if 'choices' in opt:
choices = ", ".join(str(i) for i in opt['choices'])
desc = desc + " (Choices: " + choices + ")"
if 'default' in opt:
default = str(opt['default'])
desc = desc + " [Default: " + default + "]"
text.append("%s\n" % textwrap.fill(CLI.tty_ify(desc), limit, initial_indent=opt_indent, subsequent_indent=opt_indent))
if 'notes' in doc and doc['notes'] and len(doc['notes']) > 0:
notes = " ".join(doc['notes'])
text.append("Notes:%s\n" % textwrap.fill(CLI.tty_ify(notes), limit-6, initial_indent=" ", subsequent_indent=opt_indent))
if 'requirements' in doc and doc['requirements'] is not None and len(doc['requirements']) > 0:
req = ", ".join(doc['requirements'])
text.append("Requirements:%s\n" % textwrap.fill(CLI.tty_ify(req), limit-16, initial_indent=" ", subsequent_indent=opt_indent))
if 'examples' in doc and len(doc['examples']) > 0:
text.append("Example%s:\n" % ('' if len(doc['examples']) < 2 else 's'))
for ex in doc['examples']:
text.append("%s\n" % (ex['code']))
if 'plainexamples' in doc and doc['plainexamples'] is not None:
text.append("EXAMPLES:")
text.append(doc['plainexamples'])
if 'returndocs' in doc and doc['returndocs'] is not None:
text.append("RETURN VALUES:")
text.append(doc['returndocs'])
text.append('')
maintainers = set()
if 'author' in doc:
if isinstance(doc['author'], basestring):
maintainers.add(doc['author'])
else:
maintainers.update(doc['author'])
if 'maintainers' in doc:
if isinstance(doc['maintainers'], basestring):
maintainers.add(doc['author'])
else:
maintainers.update(doc['author'])
text.append('MAINTAINERS: ' + ', '.join(maintainers))
text.append('')
return "\n".join(text)
| gpl-3.0 |
dataxu/ansible | lib/ansible/module_utils/facts/other/facter.py | 232 | 2985 | # This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
from ansible.module_utils.facts.namespace import PrefixFactNamespace
from ansible.module_utils.facts.collector import BaseFactCollector
class FacterFactCollector(BaseFactCollector):
name = 'facter'
_fact_ids = set(['facter'])
def __init__(self, collectors=None, namespace=None):
namespace = PrefixFactNamespace(namespace_name='facter',
prefix='facter_')
super(FacterFactCollector, self).__init__(collectors=collectors,
namespace=namespace)
def find_facter(self, module):
facter_path = module.get_bin_path('facter', opt_dirs=['/opt/puppetlabs/bin'])
cfacter_path = module.get_bin_path('cfacter', opt_dirs=['/opt/puppetlabs/bin'])
# Prefer to use cfacter if available
if cfacter_path is not None:
facter_path = cfacter_path
return facter_path
def run_facter(self, module, facter_path):
# if facter is installed, and we can use --json because
# ruby-json is ALSO installed, include facter data in the JSON
rc, out, err = module.run_command(facter_path + " --puppet --json")
return rc, out, err
def get_facter_output(self, module):
facter_path = self.find_facter(module)
if not facter_path:
return None
rc, out, err = self.run_facter(module, facter_path)
if rc != 0:
return None
return out
def collect(self, module=None, collected_facts=None):
# Note that this mirrors previous facter behavior, where there isnt
# a 'ansible_facter' key in the main fact dict, but instead, 'facter_whatever'
# items are added to the main dict.
facter_dict = {}
if not module:
return facter_dict
facter_output = self.get_facter_output(module)
# TODO: if we fail, should we add a empty facter key or nothing?
if facter_output is None:
return facter_dict
try:
facter_dict = json.loads(facter_output)
except Exception:
# FIXME: maybe raise a FactCollectorError with some info attrs?
pass
return facter_dict
| gpl-3.0 |
ict-felix/stack | expedient/src/python/expedient/clearinghouse/commands/management/commands/setup_media.py | 2 | 1035 | '''Command to link static content to settings.STATIC_DOC_ROOT
Created on Aug 24, 2010
@author: jnaous
'''
from django.core.management.base import NoArgsCommand
from django.conf import settings
import pkg_resources
import os
class Command(NoArgsCommand):
help = "Link static content from package to %s" % settings.STATIC_DOC_ROOT
def handle_noargs(self, **options):
pkg_resources.ensure_directory(settings.MEDIA_ROOT)
pkg_resources.ensure_directory(
os.path.join(settings.MEDIA_ROOT, settings.AGGREGATE_LOGOS_DIR))
media_dir = os.path.join(
settings.SRC_DIR, "static", "expedient", "clearinghouse", "media")
for d in "css", "img", "js":
path = os.path.join(media_dir, d)
target = os.path.join(settings.MEDIA_ROOT, d)
if not os.access(target, os.F_OK):
os.symlink(path, target)
print "Created media directory and symlinks in %s" \
% settings.MEDIA_ROOT
| apache-2.0 |
robovm/robovm-studio | plugins/hg4idea/testData/bin/mercurial/pvec.py | 94 | 5989 | # pvec.py - probabilistic vector clocks for Mercurial
#
# Copyright 2012 Matt Mackall <[email protected]>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
'''
A "pvec" is a changeset property based on the theory of vector clocks
that can be compared to discover relatedness without consulting a
graph. This can be useful for tasks like determining how a
disconnected patch relates to a repository.
Currently a pvec consist of 448 bits, of which 24 are 'depth' and the
remainder are a bit vector. It is represented as a 70-character base85
string.
Construction:
- a root changeset has a depth of 0 and a bit vector based on its hash
- a normal commit has a changeset where depth is increased by one and
one bit vector bit is flipped based on its hash
- a merge changeset pvec is constructed by copying changes from one pvec into
the other to balance its depth
Properties:
- for linear changes, difference in depth is always <= hamming distance
- otherwise, changes are probably divergent
- when hamming distance is < 200, we can reliably detect when pvecs are near
Issues:
- hamming distance ceases to work over distances of ~ 200
- detecting divergence is less accurate when the common ancestor is very close
to either revision or total distance is high
- this could probably be improved by modeling the relation between
delta and hdist
Uses:
- a patch pvec can be used to locate the nearest available common ancestor for
resolving conflicts
- ordering of patches can be established without a DAG
- two head pvecs can be compared to determine whether push/pull/merge is needed
and approximately how many changesets are involved
- can be used to find a heuristic divergence measure between changesets on
different branches
'''
import base85, util
from node import nullrev
_size = 448 # 70 chars b85-encoded
_bytes = _size / 8
_depthbits = 24
_depthbytes = _depthbits / 8
_vecbytes = _bytes - _depthbytes
_vecbits = _vecbytes * 8
_radius = (_vecbits - 30) / 2 # high probability vectors are related
def _bin(bs):
'''convert a bytestring to a long'''
v = 0
for b in bs:
v = v * 256 + ord(b)
return v
def _str(v, l):
bs = ""
for p in xrange(l):
bs = chr(v & 255) + bs
v >>= 8
return bs
def _split(b):
'''depth and bitvec'''
return _bin(b[:_depthbytes]), _bin(b[_depthbytes:])
def _join(depth, bitvec):
return _str(depth, _depthbytes) + _str(bitvec, _vecbytes)
def _hweight(x):
c = 0
while x:
if x & 1:
c += 1
x >>= 1
return c
_htab = [_hweight(x) for x in xrange(256)]
def _hamming(a, b):
'''find the hamming distance between two longs'''
d = a ^ b
c = 0
while d:
c += _htab[d & 0xff]
d >>= 8
return c
def _mergevec(x, y, c):
# Ideally, this function would be x ^ y ^ ancestor, but finding
# ancestors is a nuisance. So instead we find the minimal number
# of changes to balance the depth and hamming distance
d1, v1 = x
d2, v2 = y
if d1 < d2:
d1, d2, v1, v2 = d2, d1, v2, v1
hdist = _hamming(v1, v2)
ddist = d1 - d2
v = v1
m = v1 ^ v2 # mask of different bits
i = 1
if hdist > ddist:
# if delta = 10 and hdist = 100, then we need to go up 55 steps
# to the ancestor and down 45
changes = (hdist - ddist + 1) / 2
else:
# must make at least one change
changes = 1
depth = d1 + changes
# copy changes from v2
if m:
while changes:
if m & i:
v ^= i
changes -= 1
i <<= 1
else:
v = _flipbit(v, c)
return depth, v
def _flipbit(v, node):
# converting bit strings to longs is slow
bit = (hash(node) & 0xffffffff) % _vecbits
return v ^ (1<<bit)
def ctxpvec(ctx):
'''construct a pvec for ctx while filling in the cache'''
r = ctx._repo
if not util.safehasattr(r, "_pveccache"):
r._pveccache = {}
pvc = r._pveccache
if ctx.rev() not in pvc:
cl = r.changelog
for n in xrange(ctx.rev() + 1):
if n not in pvc:
node = cl.node(n)
p1, p2 = cl.parentrevs(n)
if p1 == nullrev:
# start with a 'random' vector at root
pvc[n] = (0, _bin((node * 3)[:_vecbytes]))
elif p2 == nullrev:
d, v = pvc[p1]
pvc[n] = (d + 1, _flipbit(v, node))
else:
pvc[n] = _mergevec(pvc[p1], pvc[p2], node)
bs = _join(*pvc[ctx.rev()])
return pvec(base85.b85encode(bs))
class pvec(object):
def __init__(self, hashorctx):
if isinstance(hashorctx, str):
self._bs = hashorctx
self._depth, self._vec = _split(base85.b85decode(hashorctx))
else:
self._vec = ctxpvec(hashorctx)
def __str__(self):
return self._bs
def __eq__(self, b):
return self._vec == b._vec and self._depth == b._depth
def __lt__(self, b):
delta = b._depth - self._depth
if delta < 0:
return False # always correct
if _hamming(self._vec, b._vec) > delta:
return False
return True
def __gt__(self, b):
return b < self
def __or__(self, b):
delta = abs(b._depth - self._depth)
if _hamming(self._vec, b._vec) <= delta:
return False
return True
def __sub__(self, b):
if self | b:
raise ValueError("concurrent pvecs")
return self._depth - b._depth
def distance(self, b):
d = abs(b._depth - self._depth)
h = _hamming(self._vec, b._vec)
return max(d, h)
def near(self, b):
dist = abs(b.depth - self._depth)
if dist > _radius or _hamming(self._vec, b._vec) > _radius:
return False
| apache-2.0 |
imply/chuu | ppapi/generators/idl_parser.py | 25 | 37745 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""" Parser for PPAPI IDL """
#
# IDL Parser
#
# The parser is uses the PLY yacc library to build a set of parsing rules based
# on WebIDL.
#
# WebIDL, and WebIDL regular expressions can be found at:
# http://dev.w3.org/2006/webapi/WebIDL/
# PLY can be found at:
# http://www.dabeaz.com/ply/
#
# The parser generates a tree by recursively matching sets of items against
# defined patterns. When a match is made, that set of items is reduced
# to a new item. The new item can provide a match for parent patterns.
# In this way an AST is built (reduced) depth first.
import getopt
import glob
import os.path
import re
import sys
import time
from idl_ast import IDLAst
from idl_log import ErrOut, InfoOut, WarnOut
from idl_lexer import IDLLexer
from idl_node import IDLAttribute, IDLFile, IDLNode
from idl_option import GetOption, Option, ParseOptions
from idl_lint import Lint
from idl_visitor import IDLVisitor
from ply import lex
from ply import yacc
Option('build_debug', 'Debug tree building.')
Option('parse_debug', 'Debug parse reduction steps.')
Option('token_debug', 'Debug token generation.')
Option('dump_tree', 'Dump the tree.')
Option('srcroot', 'Working directory.', default=os.path.join('..', 'api'))
Option('include_private', 'Include private IDL directory in default API paths.')
#
# ERROR_REMAP
#
# Maps the standard error formula into a more friendly error message.
#
ERROR_REMAP = {
'Unexpected ")" after "(".' : 'Empty argument list.',
'Unexpected ")" after ",".' : 'Missing argument.',
'Unexpected "}" after ",".' : 'Trailing comma in block.',
'Unexpected "}" after "{".' : 'Unexpected empty block.',
'Unexpected comment after "}".' : 'Unexpected trailing comment.',
'Unexpected "{" after keyword "enum".' : 'Enum missing name.',
'Unexpected "{" after keyword "struct".' : 'Struct missing name.',
'Unexpected "{" after keyword "interface".' : 'Interface missing name.',
}
# DumpReduction
#
# Prints out the set of items which matched a particular pattern and the
# new item or set it was reduced to.
def DumpReduction(cls, p):
if p[0] is None:
InfoOut.Log("OBJ: %s(%d) - None\n" % (cls, len(p)))
InfoOut.Log(" [%s]\n" % [str(x) for x in p[1:]])
else:
out = ""
for index in range(len(p) - 1):
out += " >%s< " % str(p[index + 1])
InfoOut.Log("OBJ: %s(%d) - %s : %s\n" % (cls, len(p), str(p[0]), out))
# CopyToList
#
# Takes an input item, list, or None, and returns a new list of that set.
def CopyToList(item):
# If the item is 'Empty' make it an empty list
if not item: item = []
# If the item is not a list
if type(item) is not type([]): item = [item]
# Make a copy we can modify
return list(item)
# ListFromConcat
#
# Generate a new List by joining of two sets of inputs which can be an
# individual item, a list of items, or None.
def ListFromConcat(*items):
itemsout = []
for item in items:
itemlist = CopyToList(item)
itemsout.extend(itemlist)
return itemsout
# TokenTypeName
#
# Generate a string which has the type and value of the token.
def TokenTypeName(t):
if t.type == 'SYMBOL': return 'symbol %s' % t.value
if t.type in ['HEX', 'INT', 'OCT', 'FLOAT']:
return 'value %s' % t.value
if t.type == 'STRING' : return 'string "%s"' % t.value
if t.type == 'COMMENT' : return 'comment'
if t.type == t.value: return '"%s"' % t.value
return 'keyword "%s"' % t.value
#
# IDL Parser
#
# The Parser inherits the from the Lexer to provide PLY with the tokenizing
# definitions. Parsing patterns are encoded as function where p_<name> is
# is called any time a patern matching the function documentation is found.
# Paterns are expressed in the form of:
# """ <new item> : <item> ....
# | <item> ...."""
#
# Where new item is the result of a match against one or more sets of items
# separated by the "|".
#
# The function is called with an object 'p' where p[0] is the output object
# and p[n] is the set of inputs for positive values of 'n'. Len(p) can be
# used to distinguish between multiple item sets in the pattern.
#
# For more details on parsing refer to the PLY documentation at
# http://www.dabeaz.com/ply/
#
#
# The parser uses the following conventions:
# a <type>_block defines a block of <type> definitions in the form of:
# [comment] [ext_attr_block] <type> <name> '{' <type>_list '}' ';'
# A block is reduced by returning an object of <type> with a name of <name>
# which in turn has <type>_list as children.
#
# A [comment] is a optional C style comment block enclosed in /* ... */ which
# is appended to the adjacent node as a child.
#
# A [ext_attr_block] is an optional list of Extended Attributes which is
# appended to the adjacent node as a child.
#
# a <type>_list defines a list of <type> items which will be passed as a
# list of children to the parent pattern. A list is in the form of:
# [comment] [ext_attr_block] <...DEF...> ';' <type>_list | (empty)
# or
# [comment] [ext_attr_block] <...DEF...> <type>_cont
#
# In the first form, the list is reduced recursively, where the right side
# <type>_list is first reduced then joined with pattern currently being
# matched. The list is terminated with the (empty) pattern is matched.
#
# In the second form the list is reduced recursively, where the right side
# <type>_cont is first reduced then joined with the pattern currently being
# matched. The type_<cont> is in the form of:
# ',' <type>_list | (empty)
# The <type>_cont form is used to consume the ',' which only occurs when
# there is more than one object in the list. The <type>_cont also provides
# the terminating (empty) definition.
#
class IDLParser(IDLLexer):
# TOP
#
# This pattern defines the top of the parse tree. The parse tree is in the
# the form of:
#
# top
# *modifiers
# *comments
# *ext_attr_block
# ext_attr_list
# attr_arg_list
# *integer, value
# *param_list
# *typeref
#
# top_list
# describe_block
# describe_list
# enum_block
# enum_item
# interface_block
# member
# label_block
# label_item
# struct_block
# member
# typedef_decl
# typedef_data
# typedef_func
#
# (* sub matches found at multiple levels and are not truly children of top)
#
# We force all input files to start with two comments. The first comment is a
# Copyright notice followed by a set of file wide Extended Attributes, followed
# by the file comment and finally by file level patterns.
#
# Find the Copyright, File comment, and optional file wide attributes. We
# use a match with COMMENT instead of comments to force the token to be
# present. The extended attributes and the top_list become siblings which
# in turn are children of the file object created from the results of top.
def p_top(self, p):
"""top : COMMENT COMMENT ext_attr_block top_list"""
Copyright = self.BuildComment('Copyright', p, 1)
Filedoc = self.BuildComment('Comment', p, 2)
p[0] = ListFromConcat(Copyright, Filedoc, p[3], p[4])
if self.parse_debug: DumpReduction('top', p)
def p_top_short(self, p):
"""top : COMMENT ext_attr_block top_list"""
Copyright = self.BuildComment('Copyright', p, 1)
Filedoc = IDLNode('Comment', self.lexobj.filename, p.lineno(2)-1,
p.lexpos(2)-1, [self.BuildAttribute('NAME', ''),
self.BuildAttribute('FORM', 'cc')])
p[0] = ListFromConcat(Copyright, Filedoc, p[2], p[3])
if self.parse_debug: DumpReduction('top', p)
# Build a list of top level items.
def p_top_list(self, p):
"""top_list : callback_decl top_list
| describe_block top_list
| dictionary_block top_list
| enum_block top_list
| inline top_list
| interface_block top_list
| label_block top_list
| namespace top_list
| struct_block top_list
| typedef_decl top_list
| bad_decl top_list
| """
if len(p) > 2:
p[0] = ListFromConcat(p[1], p[2])
if self.parse_debug: DumpReduction('top_list', p)
# Recover from error and continue parsing at the next top match.
def p_top_error(self, p):
"""top_list : error top_list"""
p[0] = p[2]
# Recover from error and continue parsing at the next top match.
def p_bad_decl(self, p):
"""bad_decl : modifiers SYMBOL error '}' ';'"""
p[0] = []
#
# Modifier List
#
#
def p_modifiers(self, p):
"""modifiers : comments ext_attr_block"""
p[0] = ListFromConcat(p[1], p[2])
if self.parse_debug: DumpReduction('modifiers', p)
#
# Comments
#
# Comments are optional list of C style comment objects. Comments are returned
# as a list or None.
#
def p_comments(self, p):
"""comments : COMMENT comments
| """
if len(p) > 1:
child = self.BuildComment('Comment', p, 1)
p[0] = ListFromConcat(child, p[2])
if self.parse_debug: DumpReduction('comments', p)
else:
if self.parse_debug: DumpReduction('no comments', p)
#
# Namespace
#
# A namespace provides a named scope to an enclosed top_list.
#
def p_namespace(self, p):
"""namespace : modifiers NAMESPACE namespace_name '{' top_list '}' ';'"""
children = ListFromConcat(p[1], p[5])
p[0] = self.BuildNamed('Namespace', p, 3, children)
# We allow namespace names of the form foo.bar.baz.
def p_namespace_name(self, p):
"""namespace_name : SYMBOL
| SYMBOL '.' namespace_name"""
p[0] = "".join(p[1:])
#
# Dictionary
#
# A dictionary is a named list of optional and required members.
#
def p_dictionary_block(self, p):
"""dictionary_block : modifiers DICTIONARY SYMBOL '{' struct_list '}' ';'"""
p[0] = self.BuildNamed('Dictionary', p, 3, ListFromConcat(p[1], p[5]))
#
# Callback
#
# A callback is essentially a single function declaration (outside of an
# Interface).
#
def p_callback_decl(self, p):
"""callback_decl : modifiers CALLBACK SYMBOL '=' SYMBOL param_list ';'"""
children = ListFromConcat(p[1], p[6])
p[0] = self.BuildNamed('Callback', p, 3, children)
#
# Inline
#
# Inline blocks define option code to be emitted based on language tag,
# in the form of:
# #inline <LANGUAGE>
# <CODE>
# #endinl
#
def p_inline(self, p):
"""inline : modifiers INLINE"""
words = p[2].split()
name = self.BuildAttribute('NAME', words[1])
lines = p[2].split('\n')
value = self.BuildAttribute('VALUE', '\n'.join(lines[1:-1]) + '\n')
children = ListFromConcat(name, value, p[1])
p[0] = self.BuildProduction('Inline', p, 2, children)
if self.parse_debug: DumpReduction('inline', p)
# Extended Attributes
#
# Extended Attributes denote properties which will be applied to a node in the
# AST. A list of extended attributes are denoted by a brackets '[' ... ']'
# enclosing a comma separated list of extended attributes in the form of:
#
# Name
# Name=HEX | INT | OCT | FLOAT
# Name="STRING"
# Name=Function(arg ...)
# TODO(noelallen) -Not currently supported:
# ** Name(arg ...) ...
# ** Name=Scope::Value
#
# Extended Attributes are returned as a list or None.
def p_ext_attr_block(self, p):
"""ext_attr_block : '[' ext_attr_list ']'
| """
if len(p) > 1:
p[0] = p[2]
if self.parse_debug: DumpReduction('ext_attr_block', p)
else:
if self.parse_debug: DumpReduction('no ext_attr_block', p)
def p_ext_attr_list(self, p):
"""ext_attr_list : SYMBOL '=' SYMBOL ext_attr_cont
| SYMBOL '=' value ext_attr_cont
| SYMBOL '=' SYMBOL param_list ext_attr_cont
| SYMBOL ext_attr_cont"""
# If there are 4 tokens plus a return slot, this must be in the form
# SYMBOL = SYMBOL|value ext_attr_cont
if len(p) == 5:
p[0] = ListFromConcat(self.BuildAttribute(p[1], p[3]), p[4])
# If there are 5 tokens plus a return slot, this must be in the form
# SYMBOL = SYMBOL (param_list) ext_attr_cont
elif len(p) == 6:
member = self.BuildNamed('Member', p, 3, [p[4]])
p[0] = ListFromConcat(self.BuildAttribute(p[1], member), p[5])
# Otherwise, this must be: SYMBOL ext_attr_cont
else:
p[0] = ListFromConcat(self.BuildAttribute(p[1], 'True'), p[2])
if self.parse_debug: DumpReduction('ext_attribute_list', p)
def p_ext_attr_list_values(self, p):
"""ext_attr_list : SYMBOL '=' '(' values ')' ext_attr_cont
| SYMBOL '=' '(' symbols ')' ext_attr_cont"""
p[0] = ListFromConcat(self.BuildAttribute(p[1], p[4]), p[6])
def p_values(self, p):
"""values : value values_cont"""
p[0] = ListFromConcat(p[1], p[2])
def p_symbols(self, p):
"""symbols : SYMBOL symbols_cont"""
p[0] = ListFromConcat(p[1], p[2])
def p_symbols_cont(self, p):
"""symbols_cont : ',' SYMBOL symbols_cont
| """
if len(p) > 1: p[0] = ListFromConcat(p[2], p[3])
def p_values_cont(self, p):
"""values_cont : ',' value values_cont
| """
if len(p) > 1: p[0] = ListFromConcat(p[2], p[3])
def p_ext_attr_cont(self, p):
"""ext_attr_cont : ',' ext_attr_list
|"""
if len(p) > 1: p[0] = p[2]
if self.parse_debug: DumpReduction('ext_attribute_cont', p)
def p_ext_attr_func(self, p):
"""ext_attr_list : SYMBOL '(' attr_arg_list ')' ext_attr_cont"""
p[0] = ListFromConcat(self.BuildAttribute(p[1] + '()', p[3]), p[5])
if self.parse_debug: DumpReduction('attr_arg_func', p)
def p_ext_attr_arg_list(self, p):
"""attr_arg_list : SYMBOL attr_arg_cont
| value attr_arg_cont"""
p[0] = ListFromConcat(p[1], p[2])
def p_attr_arg_cont(self, p):
"""attr_arg_cont : ',' attr_arg_list
| """
if self.parse_debug: DumpReduction('attr_arg_cont', p)
if len(p) > 1: p[0] = p[2]
def p_attr_arg_error(self, p):
"""attr_arg_cont : error attr_arg_cont"""
p[0] = p[2]
if self.parse_debug: DumpReduction('attr_arg_error', p)
#
# Describe
#
# A describe block is defined at the top level. It provides a mechanism for
# attributing a group of ext_attr to a describe_list. Members of the
# describe list are language specific 'Type' declarations
#
def p_describe_block(self, p):
"""describe_block : modifiers DESCRIBE '{' describe_list '}' ';'"""
children = ListFromConcat(p[1], p[4])
p[0] = self.BuildProduction('Describe', p, 2, children)
if self.parse_debug: DumpReduction('describe_block', p)
# Recover from describe error and continue parsing at the next top match.
def p_describe_error(self, p):
"""describe_list : error describe_list"""
p[0] = []
def p_describe_list(self, p):
"""describe_list : modifiers SYMBOL ';' describe_list
| modifiers ENUM ';' describe_list
| modifiers STRUCT ';' describe_list
| modifiers TYPEDEF ';' describe_list
| """
if len(p) > 1:
Type = self.BuildNamed('Type', p, 2, p[1])
p[0] = ListFromConcat(Type, p[4])
#
# Constant Values (integer, value)
#
# Constant values can be found at various levels. A Constant value is returns
# as the string value after validated against a FLOAT, HEX, INT, OCT or
# STRING pattern as appropriate.
#
def p_value(self, p):
"""value : FLOAT
| HEX
| INT
| OCT
| STRING"""
p[0] = p[1]
if self.parse_debug: DumpReduction('value', p)
def p_value_lshift(self, p):
"""value : integer LSHIFT INT"""
p[0] = "%s << %s" % (p[1], p[3])
if self.parse_debug: DumpReduction('value', p)
# Integers are numbers which may not be floats used in cases like array sizes.
def p_integer(self, p):
"""integer : HEX
| INT
| OCT"""
p[0] = p[1]
if self.parse_debug: DumpReduction('integer', p)
#
# Expression
#
# A simple arithmetic expression.
#
precedence = (
('left','|','&','^'),
('left','LSHIFT','RSHIFT'),
('left','+','-'),
('left','*','/'),
('right','UMINUS','~'),
)
def p_expression_binop(self, p):
"""expression : expression LSHIFT expression
| expression RSHIFT expression
| expression '|' expression
| expression '&' expression
| expression '^' expression
| expression '+' expression
| expression '-' expression
| expression '*' expression
| expression '/' expression"""
p[0] = "%s %s %s" % (str(p[1]), str(p[2]), str(p[3]))
if self.parse_debug: DumpReduction('expression_binop', p)
def p_expression_unop(self, p):
"""expression : '-' expression %prec UMINUS
| '~' expression %prec '~'"""
p[0] = "%s%s" % (str(p[1]), str(p[2]))
if self.parse_debug: DumpReduction('expression_unop', p)
def p_expression_term(self, p):
"expression : '(' expression ')'"
p[0] = "%s%s%s" % (str(p[1]), str(p[2]), str(p[3]))
if self.parse_debug: DumpReduction('expression_term', p)
def p_expression_symbol(self, p):
"expression : SYMBOL"
p[0] = p[1]
if self.parse_debug: DumpReduction('expression_symbol', p)
def p_expression_integer(self, p):
"expression : integer"
p[0] = p[1]
if self.parse_debug: DumpReduction('expression_integer', p)
#
# Array List
#
# Defined a list of array sizes (if any).
#
def p_arrays(self, p):
"""arrays : '[' ']' arrays
| '[' integer ']' arrays
| """
# If there are 3 tokens plus a return slot it is an unsized array
if len(p) == 4:
array = self.BuildProduction('Array', p, 1)
p[0] = ListFromConcat(array, p[3])
# If there are 4 tokens plus a return slot it is a fixed array
elif len(p) == 5:
count = self.BuildAttribute('FIXED', p[2])
array = self.BuildProduction('Array', p, 2, [count])
p[0] = ListFromConcat(array, p[4])
# If there is only a return slot, do not fill it for this terminator.
elif len(p) == 1: return
if self.parse_debug: DumpReduction('arrays', p)
# An identifier is a legal value for a parameter or attribute name. Lots of
# existing IDL files use "callback" as a parameter/attribute name, so we allow
# a SYMBOL or the CALLBACK keyword.
def p_identifier(self, p):
"""identifier : SYMBOL
| CALLBACK"""
p[0] = p[1]
# Save the line number of the underlying token (otherwise it gets
# discarded), since we use it in the productions with an identifier in
# them.
p.set_lineno(0, p.lineno(1))
#
# Parameter List
#
# A parameter list is a collection of arguments which are passed to a
# function.
#
def p_param_list(self, p):
"""param_list : '(' param_item param_cont ')'
| '(' ')' """
if len(p) > 3:
args = ListFromConcat(p[2], p[3])
else:
args = []
p[0] = self.BuildProduction('Callspec', p, 1, args)
if self.parse_debug: DumpReduction('param_list', p)
def p_param_item(self, p):
"""param_item : modifiers optional SYMBOL arrays identifier"""
typeref = self.BuildAttribute('TYPEREF', p[3])
children = ListFromConcat(p[1], p[2], typeref, p[4])
p[0] = self.BuildNamed('Param', p, 5, children)
if self.parse_debug: DumpReduction('param_item', p)
def p_optional(self, p):
"""optional : OPTIONAL
| """
if len(p) == 2:
p[0] = self.BuildAttribute('OPTIONAL', True)
def p_param_cont(self, p):
"""param_cont : ',' param_item param_cont
| """
if len(p) > 1:
p[0] = ListFromConcat(p[2], p[3])
if self.parse_debug: DumpReduction('param_cont', p)
def p_param_error(self, p):
"""param_cont : error param_cont"""
p[0] = p[2]
#
# Typedef
#
# A typedef creates a new referencable type. The typedef can specify an array
# definition as well as a function declaration.
#
def p_typedef_data(self, p):
"""typedef_decl : modifiers TYPEDEF SYMBOL SYMBOL ';' """
typeref = self.BuildAttribute('TYPEREF', p[3])
children = ListFromConcat(p[1], typeref)
p[0] = self.BuildNamed('Typedef', p, 4, children)
if self.parse_debug: DumpReduction('typedef_data', p)
def p_typedef_array(self, p):
"""typedef_decl : modifiers TYPEDEF SYMBOL arrays SYMBOL ';' """
typeref = self.BuildAttribute('TYPEREF', p[3])
children = ListFromConcat(p[1], typeref, p[4])
p[0] = self.BuildNamed('Typedef', p, 5, children)
if self.parse_debug: DumpReduction('typedef_array', p)
def p_typedef_func(self, p):
"""typedef_decl : modifiers TYPEDEF SYMBOL SYMBOL param_list ';' """
typeref = self.BuildAttribute('TYPEREF', p[3])
children = ListFromConcat(p[1], typeref, p[5])
p[0] = self.BuildNamed('Typedef', p, 4, children)
if self.parse_debug: DumpReduction('typedef_func', p)
#
# Enumeration
#
# An enumeration is a set of named integer constants. An enumeration
# is valid type which can be referenced in other definitions.
#
def p_enum_block(self, p):
"""enum_block : modifiers ENUM SYMBOL '{' enum_list '}' ';'"""
p[0] = self.BuildNamed('Enum', p, 3, ListFromConcat(p[1], p[5]))
if self.parse_debug: DumpReduction('enum_block', p)
# Recover from enum error and continue parsing at the next top match.
def p_enum_errorA(self, p):
"""enum_block : modifiers ENUM error '{' enum_list '}' ';'"""
p[0] = []
def p_enum_errorB(self, p):
"""enum_block : modifiers ENUM error ';'"""
p[0] = []
def p_enum_list(self, p):
"""enum_list : modifiers SYMBOL '=' expression enum_cont
| modifiers SYMBOL enum_cont"""
if len(p) > 4:
val = self.BuildAttribute('VALUE', p[4])
enum = self.BuildNamed('EnumItem', p, 2, ListFromConcat(val, p[1]))
p[0] = ListFromConcat(enum, p[5])
else:
enum = self.BuildNamed('EnumItem', p, 2, p[1])
p[0] = ListFromConcat(enum, p[3])
if self.parse_debug: DumpReduction('enum_list', p)
def p_enum_cont(self, p):
"""enum_cont : ',' enum_list
|"""
if len(p) > 1: p[0] = p[2]
if self.parse_debug: DumpReduction('enum_cont', p)
def p_enum_cont_error(self, p):
"""enum_cont : error enum_cont"""
p[0] = p[2]
if self.parse_debug: DumpReduction('enum_error', p)
#
# Label
#
# A label is a special kind of enumeration which allows us to go from a
# set of labels
#
def p_label_block(self, p):
"""label_block : modifiers LABEL SYMBOL '{' label_list '}' ';'"""
p[0] = self.BuildNamed('Label', p, 3, ListFromConcat(p[1], p[5]))
if self.parse_debug: DumpReduction('label_block', p)
def p_label_list(self, p):
"""label_list : modifiers SYMBOL '=' FLOAT label_cont"""
val = self.BuildAttribute('VALUE', p[4])
label = self.BuildNamed('LabelItem', p, 2, ListFromConcat(val, p[1]))
p[0] = ListFromConcat(label, p[5])
if self.parse_debug: DumpReduction('label_list', p)
def p_label_cont(self, p):
"""label_cont : ',' label_list
|"""
if len(p) > 1: p[0] = p[2]
if self.parse_debug: DumpReduction('label_cont', p)
def p_label_cont_error(self, p):
"""label_cont : error label_cont"""
p[0] = p[2]
if self.parse_debug: DumpReduction('label_error', p)
#
# Members
#
# A member attribute or function of a struct or interface.
#
def p_member_attribute(self, p):
"""member_attribute : modifiers SYMBOL arrays questionmark identifier"""
typeref = self.BuildAttribute('TYPEREF', p[2])
children = ListFromConcat(p[1], typeref, p[3], p[4])
p[0] = self.BuildNamed('Member', p, 5, children)
if self.parse_debug: DumpReduction('attribute', p)
def p_member_function(self, p):
"""member_function : modifiers static SYMBOL SYMBOL param_list"""
typeref = self.BuildAttribute('TYPEREF', p[3])
children = ListFromConcat(p[1], p[2], typeref, p[5])
p[0] = self.BuildNamed('Member', p, 4, children)
if self.parse_debug: DumpReduction('function', p)
def p_static(self, p):
"""static : STATIC
| """
if len(p) == 2:
p[0] = self.BuildAttribute('STATIC', True)
def p_questionmark(self, p):
"""questionmark : '?'
| """
if len(p) == 2:
p[0] = self.BuildAttribute('OPTIONAL', True)
#
# Interface
#
# An interface is a named collection of functions.
#
def p_interface_block(self, p):
"""interface_block : modifiers INTERFACE SYMBOL '{' interface_list '}' ';'"""
p[0] = self.BuildNamed('Interface', p, 3, ListFromConcat(p[1], p[5]))
if self.parse_debug: DumpReduction('interface_block', p)
def p_interface_error(self, p):
"""interface_block : modifiers INTERFACE error '{' interface_list '}' ';'"""
p[0] = []
def p_interface_list(self, p):
"""interface_list : member_function ';' interface_list
| """
if len(p) > 1 :
p[0] = ListFromConcat(p[1], p[3])
if self.parse_debug: DumpReduction('interface_list', p)
#
# Struct
#
# A struct is a named collection of members which in turn reference other
# types. The struct is a referencable type.
#
def p_struct_block(self, p):
"""struct_block : modifiers STRUCT SYMBOL '{' struct_list '}' ';'"""
children = ListFromConcat(p[1], p[5])
p[0] = self.BuildNamed('Struct', p, 3, children)
if self.parse_debug: DumpReduction('struct_block', p)
# Recover from struct error and continue parsing at the next top match.
def p_struct_error(self, p):
"""enum_block : modifiers STRUCT error '{' struct_list '}' ';'"""
p[0] = []
def p_struct_list(self, p):
"""struct_list : member_attribute ';' struct_list
| member_function ';' struct_list
|"""
if len(p) > 1: p[0] = ListFromConcat(p[1], p[3])
#
# Parser Errors
#
# p_error is called whenever the parser can not find a pattern match for
# a set of items from the current state. The p_error function defined here
# is triggered logging an error, and parsing recover happens as the
# p_<type>_error functions defined above are called. This allows the parser
# to continue so as to capture more than one error per file.
#
def p_error(self, t):
filename = self.lexobj.filename
self.parse_errors += 1
if t:
lineno = t.lineno
pos = t.lexpos
prev = self.yaccobj.symstack[-1]
if type(prev) == lex.LexToken:
msg = "Unexpected %s after %s." % (
TokenTypeName(t), TokenTypeName(prev))
else:
msg = "Unexpected %s." % (t.value)
else:
lineno = self.last.lineno
pos = self.last.lexpos
msg = "Unexpected end of file after %s." % TokenTypeName(self.last)
self.yaccobj.restart()
# Attempt to remap the error to a friendlier form
if msg in ERROR_REMAP:
msg = ERROR_REMAP[msg]
# Log the error
ErrOut.LogLine(filename, lineno, pos, msg)
def Warn(self, node, msg):
WarnOut.LogLine(node.filename, node.lineno, node.pos, msg)
self.parse_warnings += 1
def __init__(self):
IDLLexer.__init__(self)
self.yaccobj = yacc.yacc(module=self, tabmodule=None, debug=False,
optimize=0, write_tables=0)
self.build_debug = GetOption('build_debug')
self.parse_debug = GetOption('parse_debug')
self.token_debug = GetOption('token_debug')
self.verbose = GetOption('verbose')
self.parse_errors = 0
#
# Tokenizer
#
# The token function returns the next token provided by IDLLexer for matching
# against the leaf paterns.
#
def token(self):
tok = self.lexobj.token()
if tok:
self.last = tok
if self.token_debug:
InfoOut.Log("TOKEN %s(%s)" % (tok.type, tok.value))
return tok
#
# BuildProduction
#
# Production is the set of items sent to a grammar rule resulting in a new
# item being returned.
#
# p - Is the Yacc production object containing the stack of items
# index - Index into the production of the name for the item being produced.
# cls - The type of item being producted
# childlist - The children of the new item
def BuildProduction(self, cls, p, index, childlist=None):
if not childlist: childlist = []
filename = self.lexobj.filename
lineno = p.lineno(index)
pos = p.lexpos(index)
out = IDLNode(cls, filename, lineno, pos, childlist)
if self.build_debug:
InfoOut.Log("Building %s" % out)
return out
def BuildNamed(self, cls, p, index, childlist=None):
if not childlist: childlist = []
childlist.append(self.BuildAttribute('NAME', p[index]))
return self.BuildProduction(cls, p, index, childlist)
def BuildComment(self, cls, p, index):
name = p[index]
# Remove comment markers
lines = []
if name[:2] == '//':
# For C++ style, remove any leading whitespace and the '//' marker from
# each line.
form = 'cc'
for line in name.split('\n'):
start = line.find('//')
lines.append(line[start+2:])
else:
# For C style, remove ending '*/''
form = 'c'
for line in name[:-2].split('\n'):
# Remove characters until start marker for this line '*' if found
# otherwise it should be blank.
offs = line.find('*')
if offs >= 0:
line = line[offs + 1:].rstrip()
else:
line = ''
lines.append(line)
name = '\n'.join(lines)
childlist = [self.BuildAttribute('NAME', name),
self.BuildAttribute('FORM', form)]
return self.BuildProduction(cls, p, index, childlist)
#
# BuildAttribute
#
# An ExtendedAttribute is a special production that results in a property
# which is applied to the adjacent item. Attributes have no children and
# instead represent key/value pairs.
#
def BuildAttribute(self, key, val):
return IDLAttribute(key, val)
#
# ParseData
#
# Attempts to parse the current data loaded in the lexer.
#
def ParseData(self, data, filename='<Internal>'):
self.SetData(filename, data)
try:
self.parse_errors = 0
self.parse_warnings = 0
return self.yaccobj.parse(lexer=self)
except lex.LexError as le:
ErrOut.Log(str(le))
return []
#
# ParseFile
#
# Loads a new file into the lexer and attemps to parse it.
#
def ParseFile(self, filename):
date = time.ctime(os.path.getmtime(filename))
data = open(filename).read()
if self.verbose:
InfoOut.Log("Parsing %s" % filename)
try:
out = self.ParseData(data, filename)
# If we have a src root specified, remove it from the path
srcroot = GetOption('srcroot')
if srcroot and filename.find(srcroot) == 0:
filename = filename[len(srcroot) + 1:]
filenode = IDLFile(filename, out, self.parse_errors + self.lex_errors)
filenode.SetProperty('DATETIME', date)
return filenode
except Exception as e:
ErrOut.LogLine(filename, self.last.lineno, self.last.lexpos,
'Internal parsing error - %s.' % str(e))
raise
#
# Flatten Tree
#
# Flattens the tree of IDLNodes for use in testing.
#
def FlattenTree(node):
add_self = False
out = []
for child in node.children:
if child.IsA('Comment'):
add_self = True
else:
out.extend(FlattenTree(child))
if add_self:
out = [str(node)] + out
return out
def TestErrors(filename, filenode):
nodelist = filenode.GetChildren()
lexer = IDLLexer()
data = open(filename).read()
lexer.SetData(filename, data)
pass_comments = []
fail_comments = []
while True:
tok = lexer.lexobj.token()
if tok == None: break
if tok.type == 'COMMENT':
args = tok.value[3:-3].split()
if args[0] == 'OK':
pass_comments.append((tok.lineno, ' '.join(args[1:])))
else:
if args[0] == 'FAIL':
fail_comments.append((tok.lineno, ' '.join(args[1:])))
obj_list = []
for node in nodelist:
obj_list.extend(FlattenTree(node))
errors = 0
#
# Check for expected successes
#
obj_cnt = len(obj_list)
pass_cnt = len(pass_comments)
if obj_cnt != pass_cnt:
InfoOut.Log("Mismatched pass (%d) vs. nodes built (%d)."
% (pass_cnt, obj_cnt))
InfoOut.Log("PASS: %s" % [x[1] for x in pass_comments])
InfoOut.Log("OBJS: %s" % obj_list)
errors += 1
if pass_cnt > obj_cnt: pass_cnt = obj_cnt
for i in range(pass_cnt):
line, comment = pass_comments[i]
if obj_list[i] != comment:
ErrOut.LogLine(filename, line, None, "OBJ %s : EXPECTED %s\n" %
(obj_list[i], comment))
errors += 1
#
# Check for expected errors
#
err_list = ErrOut.DrainLog()
err_cnt = len(err_list)
fail_cnt = len(fail_comments)
if err_cnt != fail_cnt:
InfoOut.Log("Mismatched fail (%d) vs. errors seen (%d)."
% (fail_cnt, err_cnt))
InfoOut.Log("FAIL: %s" % [x[1] for x in fail_comments])
InfoOut.Log("ERRS: %s" % err_list)
errors += 1
if fail_cnt > err_cnt: fail_cnt = err_cnt
for i in range(fail_cnt):
line, comment = fail_comments[i]
err = err_list[i].strip()
if err_list[i] != comment:
ErrOut.Log("%s(%d) Error\n\tERROR : %s\n\tEXPECT: %s" % (
filename, line, err_list[i], comment))
errors += 1
# Clear the error list for the next run
err_list = []
return errors
def TestFile(parser, filename):
# Capture errors instead of reporting them so we can compare them
# with the expected errors.
ErrOut.SetConsole(False)
ErrOut.SetCapture(True)
filenode = parser.ParseFile(filename)
# Renable output
ErrOut.SetConsole(True)
ErrOut.SetCapture(False)
# Compare captured errors
return TestErrors(filename, filenode)
def TestErrorFiles(filter):
idldir = os.path.split(sys.argv[0])[0]
idldir = os.path.join(idldir, 'test_parser', '*.idl')
filenames = glob.glob(idldir)
parser = IDLParser()
total_errs = 0
for filename in filenames:
if filter and filename not in filter: continue
errs = TestFile(parser, filename)
if errs:
ErrOut.Log("%s test failed with %d error(s)." % (filename, errs))
total_errs += errs
if total_errs:
ErrOut.Log("Failed parsing test.")
else:
InfoOut.Log("Passed parsing test.")
return total_errs
def TestNamespaceFiles(filter):
idldir = os.path.split(sys.argv[0])[0]
idldir = os.path.join(idldir, 'test_namespace', '*.idl')
filenames = glob.glob(idldir)
testnames = []
for filename in filenames:
if filter and filename not in filter: continue
testnames.append(filename)
# If we have no files to test, then skip this test
if not testnames:
InfoOut.Log('No files to test for namespace.')
return 0
InfoOut.SetConsole(False)
ast = ParseFiles(testnames)
InfoOut.SetConsole(True)
errs = ast.GetProperty('ERRORS')
if errs:
ErrOut.Log("Failed namespace test.")
else:
InfoOut.Log("Passed namespace test.")
return errs
def FindVersionError(releases, node):
err_cnt = 0
if node.IsA('Interface', 'Struct'):
comment_list = []
comment = node.GetOneOf('Comment')
if comment and comment.GetName()[:4] == 'REL:':
comment_list = comment.GetName()[5:].strip().split(' ')
first_list = [node.first_release[rel] for rel in releases]
first_list = sorted(set(first_list))
if first_list != comment_list:
node.Error("Mismatch in releases: %s vs %s." % (
comment_list, first_list))
err_cnt += 1
for child in node.GetChildren():
err_cnt += FindVersionError(releases, child)
return err_cnt
def TestVersionFiles(filter):
idldir = os.path.split(sys.argv[0])[0]
idldir = os.path.join(idldir, 'test_version', '*.idl')
filenames = glob.glob(idldir)
testnames = []
for filename in filenames:
if filter and filename not in filter: continue
testnames.append(filename)
# If we have no files to test, then skip this test
if not testnames:
InfoOut.Log('No files to test for version.')
return 0
ast = ParseFiles(testnames)
errs = FindVersionError(ast.releases, ast)
errs += ast.errors
if errs:
ErrOut.Log("Failed version test.")
else:
InfoOut.Log("Passed version test.")
return errs
default_dirs = ['.', 'trusted', 'dev', 'private', 'extensions',
'extensions/dev']
def ParseFiles(filenames):
parser = IDLParser()
filenodes = []
if not filenames:
filenames = []
srcroot = GetOption('srcroot')
dirs = default_dirs
if GetOption('include_private'):
dirs += ['private']
for dirname in dirs:
srcdir = os.path.join(srcroot, dirname, '*.idl')
srcdir = os.path.normpath(srcdir)
filenames += sorted(glob.glob(srcdir))
if not filenames:
ErrOut.Log('No sources provided.')
for filename in filenames:
filenode = parser.ParseFile(filename)
filenodes.append(filenode)
ast = IDLAst(filenodes)
if GetOption('dump_tree'): ast.Dump(0)
Lint(ast)
return ast
def Main(args):
filenames = ParseOptions(args)
# If testing...
if GetOption('test'):
errs = TestErrorFiles(filenames)
errs = TestNamespaceFiles(filenames)
errs = TestVersionFiles(filenames)
if errs:
ErrOut.Log("Parser failed with %d errors." % errs)
return -1
return 0
# Otherwise, build the AST
ast = ParseFiles(filenames)
errs = ast.GetProperty('ERRORS')
if errs:
ErrOut.Log('Found %d error(s).' % errs);
InfoOut.Log("%d files processed." % len(filenames))
return errs
if __name__ == '__main__':
sys.exit(Main(sys.argv[1:]))
| bsd-3-clause |
RichardLitt/wyrd-django-dev | tests/modeltests/custom_methods/tests.py | 150 | 1196 | from __future__ import absolute_import
from datetime import date
from django.test import TestCase
from .models import Article
class MethodsTests(TestCase):
def test_custom_methods(self):
a = Article.objects.create(
headline="Area man programs in Python", pub_date=date(2005, 7, 27)
)
b = Article.objects.create(
headline="Beatles reunite", pub_date=date(2005, 7, 27)
)
self.assertFalse(a.was_published_today())
self.assertQuerysetEqual(
a.articles_from_same_day_1(), [
"Beatles reunite",
],
lambda a: a.headline,
)
self.assertQuerysetEqual(
a.articles_from_same_day_2(), [
"Beatles reunite",
],
lambda a: a.headline
)
self.assertQuerysetEqual(
b.articles_from_same_day_1(), [
"Area man programs in Python",
],
lambda a: a.headline,
)
self.assertQuerysetEqual(
b.articles_from_same_day_2(), [
"Area man programs in Python",
],
lambda a: a.headline
)
| bsd-3-clause |
kevintaw/django | django/contrib/gis/db/models/query.py | 16 | 36639 | import warnings
from django.contrib.gis.db.models import aggregates
from django.contrib.gis.db.models.fields import (
GeometryField, LineStringField, PointField, get_srid_info,
)
from django.contrib.gis.db.models.lookups import GISLookup
from django.contrib.gis.db.models.sql import (
AreaField, DistanceField, GeomField, GMLField,
)
from django.contrib.gis.geometry.backend import Geometry
from django.contrib.gis.measure import Area, Distance
from django.db import connections
from django.db.models.expressions import RawSQL
from django.db.models.fields import Field
from django.db.models.query import QuerySet
from django.utils import six
from django.utils.deprecation import (
RemovedInDjango20Warning, RemovedInDjango21Warning,
)
class GeoQuerySet(QuerySet):
"The Geographic QuerySet."
# ### GeoQuerySet Methods ###
def area(self, tolerance=0.05, **kwargs):
"""
Returns the area of the geographic field in an `area` attribute on
each element of this GeoQuerySet.
"""
# Performing setup here rather than in `_spatial_attribute` so that
# we can get the units for `AreaField`.
procedure_args, geo_field = self._spatial_setup(
'area', field_name=kwargs.get('field_name'))
s = {'procedure_args': procedure_args,
'geo_field': geo_field,
'setup': False,
}
connection = connections[self.db]
backend = connection.ops
if backend.oracle:
s['procedure_fmt'] = '%(geo_col)s,%(tolerance)s'
s['procedure_args']['tolerance'] = tolerance
s['select_field'] = AreaField('sq_m') # Oracle returns area in units of meters.
elif backend.postgis or backend.spatialite:
if backend.geography:
# Geography fields support area calculation, returns square meters.
s['select_field'] = AreaField('sq_m')
elif not geo_field.geodetic(connection):
# Getting the area units of the geographic field.
s['select_field'] = AreaField(Area.unit_attname(geo_field.units_name(connection)))
else:
# TODO: Do we want to support raw number areas for geodetic fields?
raise Exception('Area on geodetic coordinate systems not supported.')
return self._spatial_attribute('area', s, **kwargs)
def centroid(self, **kwargs):
"""
Returns the centroid of the geographic field in a `centroid`
attribute on each element of this GeoQuerySet.
"""
return self._geom_attribute('centroid', **kwargs)
def collect(self, **kwargs):
"""
Performs an aggregate collect operation on the given geometry field.
This is analogous to a union operation, but much faster because
boundaries are not dissolved.
"""
warnings.warn(
"The collect GeoQuerySet method is deprecated. Use the Collect() "
"aggregate in an aggregate() or annotate() method.",
RemovedInDjango20Warning, stacklevel=2
)
return self._spatial_aggregate(aggregates.Collect, **kwargs)
def difference(self, geom, **kwargs):
"""
Returns the spatial difference of the geographic field in a `difference`
attribute on each element of this GeoQuerySet.
"""
return self._geomset_attribute('difference', geom, **kwargs)
def distance(self, geom, **kwargs):
"""
Returns the distance from the given geographic field name to the
given geometry in a `distance` attribute on each element of the
GeoQuerySet.
Keyword Arguments:
`spheroid` => If the geometry field is geodetic and PostGIS is
the spatial database, then the more accurate
spheroid calculation will be used instead of the
quicker sphere calculation.
`tolerance` => Used only for Oracle. The tolerance is
in meters -- a default of 5 centimeters (0.05)
is used.
"""
return self._distance_attribute('distance', geom, **kwargs)
def envelope(self, **kwargs):
"""
Returns a Geometry representing the bounding box of the
Geometry field in an `envelope` attribute on each element of
the GeoQuerySet.
"""
return self._geom_attribute('envelope', **kwargs)
def extent(self, **kwargs):
"""
Returns the extent (aggregate) of the features in the GeoQuerySet. The
extent will be returned as a 4-tuple, consisting of (xmin, ymin, xmax, ymax).
"""
warnings.warn(
"The extent GeoQuerySet method is deprecated. Use the Extent() "
"aggregate in an aggregate() or annotate() method.",
RemovedInDjango20Warning, stacklevel=2
)
return self._spatial_aggregate(aggregates.Extent, **kwargs)
def extent3d(self, **kwargs):
"""
Returns the aggregate extent, in 3D, of the features in the
GeoQuerySet. It is returned as a 6-tuple, comprising:
(xmin, ymin, zmin, xmax, ymax, zmax).
"""
warnings.warn(
"The extent3d GeoQuerySet method is deprecated. Use the Extent3D() "
"aggregate in an aggregate() or annotate() method.",
RemovedInDjango20Warning, stacklevel=2
)
return self._spatial_aggregate(aggregates.Extent3D, **kwargs)
def force_rhr(self, **kwargs):
"""
Returns a modified version of the Polygon/MultiPolygon in which
all of the vertices follow the Right-Hand-Rule. By default,
this is attached as the `force_rhr` attribute on each element
of the GeoQuerySet.
"""
return self._geom_attribute('force_rhr', **kwargs)
def geojson(self, precision=8, crs=False, bbox=False, **kwargs):
"""
Returns a GeoJSON representation of the geometry field in a `geojson`
attribute on each element of the GeoQuerySet.
The `crs` and `bbox` keywords may be set to True if the user wants
the coordinate reference system and the bounding box to be included
in the GeoJSON representation of the geometry.
"""
backend = connections[self.db].ops
if not backend.geojson:
raise NotImplementedError('Only PostGIS 1.3.4+ and SpatiaLite 3.0+ '
'support GeoJSON serialization.')
if not isinstance(precision, six.integer_types):
raise TypeError('Precision keyword must be set with an integer.')
options = 0
if crs and bbox:
options = 3
elif bbox:
options = 1
elif crs:
options = 2
s = {'desc': 'GeoJSON',
'procedure_args': {'precision': precision, 'options': options},
'procedure_fmt': '%(geo_col)s,%(precision)s,%(options)s',
}
return self._spatial_attribute('geojson', s, **kwargs)
def geohash(self, precision=20, **kwargs):
"""
Returns a GeoHash representation of the given field in a `geohash`
attribute on each element of the GeoQuerySet.
The `precision` keyword may be used to custom the number of
_characters_ used in the output GeoHash, the default is 20.
"""
s = {'desc': 'GeoHash',
'procedure_args': {'precision': precision},
'procedure_fmt': '%(geo_col)s,%(precision)s',
}
return self._spatial_attribute('geohash', s, **kwargs)
def gml(self, precision=8, version=2, **kwargs):
"""
Returns GML representation of the given field in a `gml` attribute
on each element of the GeoQuerySet.
"""
backend = connections[self.db].ops
s = {'desc': 'GML', 'procedure_args': {'precision': precision}}
if backend.postgis:
s['procedure_fmt'] = '%(version)s,%(geo_col)s,%(precision)s'
s['procedure_args'] = {'precision': precision, 'version': version}
if backend.oracle:
s['select_field'] = GMLField()
return self._spatial_attribute('gml', s, **kwargs)
def intersection(self, geom, **kwargs):
"""
Returns the spatial intersection of the Geometry field in
an `intersection` attribute on each element of this
GeoQuerySet.
"""
return self._geomset_attribute('intersection', geom, **kwargs)
def kml(self, **kwargs):
"""
Returns KML representation of the geometry field in a `kml`
attribute on each element of this GeoQuerySet.
"""
s = {'desc': 'KML',
'procedure_fmt': '%(geo_col)s,%(precision)s',
'procedure_args': {'precision': kwargs.pop('precision', 8)},
}
return self._spatial_attribute('kml', s, **kwargs)
def length(self, **kwargs):
"""
Returns the length of the geometry field as a `Distance` object
stored in a `length` attribute on each element of this GeoQuerySet.
"""
return self._distance_attribute('length', None, **kwargs)
def make_line(self, **kwargs):
"""
Creates a linestring from all of the PointField geometries in the
this GeoQuerySet and returns it. This is a spatial aggregate
method, and thus returns a geometry rather than a GeoQuerySet.
"""
warnings.warn(
"The make_line GeoQuerySet method is deprecated. Use the MakeLine() "
"aggregate in an aggregate() or annotate() method.",
RemovedInDjango20Warning, stacklevel=2
)
return self._spatial_aggregate(aggregates.MakeLine, geo_field_type=PointField, **kwargs)
def mem_size(self, **kwargs):
"""
Returns the memory size (number of bytes) that the geometry field takes
in a `mem_size` attribute on each element of this GeoQuerySet.
"""
return self._spatial_attribute('mem_size', {}, **kwargs)
def num_geom(self, **kwargs):
"""
Returns the number of geometries if the field is a
GeometryCollection or Multi* Field in a `num_geom`
attribute on each element of this GeoQuerySet; otherwise
the sets with None.
"""
return self._spatial_attribute('num_geom', {}, **kwargs)
def num_points(self, **kwargs):
"""
Returns the number of points in the first linestring in the
Geometry field in a `num_points` attribute on each element of
this GeoQuerySet; otherwise sets with None.
"""
return self._spatial_attribute('num_points', {}, **kwargs)
def perimeter(self, **kwargs):
"""
Returns the perimeter of the geometry field as a `Distance` object
stored in a `perimeter` attribute on each element of this GeoQuerySet.
"""
return self._distance_attribute('perimeter', None, **kwargs)
def point_on_surface(self, **kwargs):
"""
Returns a Point geometry guaranteed to lie on the surface of the
Geometry field in a `point_on_surface` attribute on each element
of this GeoQuerySet; otherwise sets with None.
"""
return self._geom_attribute('point_on_surface', **kwargs)
def reverse_geom(self, **kwargs):
"""
Reverses the coordinate order of the geometry, and attaches as a
`reverse` attribute on each element of this GeoQuerySet.
"""
s = {'select_field': GeomField()}
kwargs.setdefault('model_att', 'reverse_geom')
if connections[self.db].ops.oracle:
s['geo_field_type'] = LineStringField
return self._spatial_attribute('reverse', s, **kwargs)
def scale(self, x, y, z=0.0, **kwargs):
"""
Scales the geometry to a new size by multiplying the ordinates
with the given x,y,z scale factors.
"""
if connections[self.db].ops.spatialite:
if z != 0.0:
raise NotImplementedError('SpatiaLite does not support 3D scaling.')
s = {'procedure_fmt': '%(geo_col)s,%(x)s,%(y)s',
'procedure_args': {'x': x, 'y': y},
'select_field': GeomField(),
}
else:
s = {'procedure_fmt': '%(geo_col)s,%(x)s,%(y)s,%(z)s',
'procedure_args': {'x': x, 'y': y, 'z': z},
'select_field': GeomField(),
}
return self._spatial_attribute('scale', s, **kwargs)
def snap_to_grid(self, *args, **kwargs):
"""
Snap all points of the input geometry to the grid. How the
geometry is snapped to the grid depends on how many arguments
were given:
- 1 argument : A single size to snap both the X and Y grids to.
- 2 arguments: X and Y sizes to snap the grid to.
- 4 arguments: X, Y sizes and the X, Y origins.
"""
if False in [isinstance(arg, (float,) + six.integer_types) for arg in args]:
raise TypeError('Size argument(s) for the grid must be a float or integer values.')
nargs = len(args)
if nargs == 1:
size = args[0]
procedure_fmt = '%(geo_col)s,%(size)s'
procedure_args = {'size': size}
elif nargs == 2:
xsize, ysize = args
procedure_fmt = '%(geo_col)s,%(xsize)s,%(ysize)s'
procedure_args = {'xsize': xsize, 'ysize': ysize}
elif nargs == 4:
xsize, ysize, xorigin, yorigin = args
procedure_fmt = '%(geo_col)s,%(xorigin)s,%(yorigin)s,%(xsize)s,%(ysize)s'
procedure_args = {'xsize': xsize, 'ysize': ysize,
'xorigin': xorigin, 'yorigin': yorigin}
else:
raise ValueError('Must provide 1, 2, or 4 arguments to `snap_to_grid`.')
s = {'procedure_fmt': procedure_fmt,
'procedure_args': procedure_args,
'select_field': GeomField(),
}
return self._spatial_attribute('snap_to_grid', s, **kwargs)
def svg(self, relative=False, precision=8, **kwargs):
"""
Returns SVG representation of the geographic field in a `svg`
attribute on each element of this GeoQuerySet.
Keyword Arguments:
`relative` => If set to True, this will evaluate the path in
terms of relative moves (rather than absolute).
`precision` => May be used to set the maximum number of decimal
digits used in output (defaults to 8).
"""
relative = int(bool(relative))
if not isinstance(precision, six.integer_types):
raise TypeError('SVG precision keyword argument must be an integer.')
s = {
'desc': 'SVG',
'procedure_fmt': '%(geo_col)s,%(rel)s,%(precision)s',
'procedure_args': {
'rel': relative,
'precision': precision,
}
}
return self._spatial_attribute('svg', s, **kwargs)
def sym_difference(self, geom, **kwargs):
"""
Returns the symmetric difference of the geographic field in a
`sym_difference` attribute on each element of this GeoQuerySet.
"""
return self._geomset_attribute('sym_difference', geom, **kwargs)
def translate(self, x, y, z=0.0, **kwargs):
"""
Translates the geometry to a new location using the given numeric
parameters as offsets.
"""
if connections[self.db].ops.spatialite:
if z != 0.0:
raise NotImplementedError('SpatiaLite does not support 3D translation.')
s = {'procedure_fmt': '%(geo_col)s,%(x)s,%(y)s',
'procedure_args': {'x': x, 'y': y},
'select_field': GeomField(),
}
else:
s = {'procedure_fmt': '%(geo_col)s,%(x)s,%(y)s,%(z)s',
'procedure_args': {'x': x, 'y': y, 'z': z},
'select_field': GeomField(),
}
return self._spatial_attribute('translate', s, **kwargs)
def transform(self, srid=4326, **kwargs):
"""
Transforms the given geometry field to the given SRID. If no SRID is
provided, the transformation will default to using 4326 (WGS84).
"""
if not isinstance(srid, six.integer_types):
raise TypeError('An integer SRID must be provided.')
field_name = kwargs.get('field_name')
self._spatial_setup('transform', field_name=field_name)
self.query.add_context('transformed_srid', srid)
return self._clone()
def union(self, geom, **kwargs):
"""
Returns the union of the geographic field with the given
Geometry in a `union` attribute on each element of this GeoQuerySet.
"""
return self._geomset_attribute('union', geom, **kwargs)
def unionagg(self, **kwargs):
"""
Performs an aggregate union on the given geometry field. Returns
None if the GeoQuerySet is empty. The `tolerance` keyword is for
Oracle backends only.
"""
warnings.warn(
"The unionagg GeoQuerySet method is deprecated. Use the Union() "
"aggregate in an aggregate() or annotate() method.",
RemovedInDjango20Warning, stacklevel=2
)
return self._spatial_aggregate(aggregates.Union, **kwargs)
# ### Private API -- Abstracted DRY routines. ###
def _spatial_setup(self, att, desc=None, field_name=None, geo_field_type=None):
"""
Performs set up for executing the spatial function.
"""
# Does the spatial backend support this?
connection = connections[self.db]
func = getattr(connection.ops, att, False)
if desc is None:
desc = att
if not func:
raise NotImplementedError('%s stored procedure not available on '
'the %s backend.' %
(desc, connection.ops.name))
# Initializing the procedure arguments.
procedure_args = {'function': func}
# Is there a geographic field in the model to perform this
# operation on?
geo_field = self._geo_field(field_name)
if not geo_field:
raise TypeError('%s output only available on GeometryFields.' % func)
# If the `geo_field_type` keyword was used, then enforce that
# type limitation.
if geo_field_type is not None and not isinstance(geo_field, geo_field_type):
raise TypeError('"%s" stored procedures may only be called on %ss.' % (func, geo_field_type.__name__))
# Setting the procedure args.
procedure_args['geo_col'] = self._geocol_select(geo_field, field_name)
return procedure_args, geo_field
def _spatial_aggregate(self, aggregate, field_name=None,
geo_field_type=None, tolerance=0.05):
"""
DRY routine for calling aggregate spatial stored procedures and
returning their result to the caller of the function.
"""
# Getting the field the geographic aggregate will be called on.
geo_field = self._geo_field(field_name)
if not geo_field:
raise TypeError('%s aggregate only available on GeometryFields.' % aggregate.name)
# Checking if there are any geo field type limitations on this
# aggregate (e.g. ST_Makeline only operates on PointFields).
if geo_field_type is not None and not isinstance(geo_field, geo_field_type):
raise TypeError('%s aggregate may only be called on %ss.' % (aggregate.name, geo_field_type.__name__))
# Getting the string expression of the field name, as this is the
# argument taken by `Aggregate` objects.
agg_col = field_name or geo_field.name
# Adding any keyword parameters for the Aggregate object. Oracle backends
# in particular need an additional `tolerance` parameter.
agg_kwargs = {}
if connections[self.db].ops.oracle:
agg_kwargs['tolerance'] = tolerance
# Calling the QuerySet.aggregate, and returning only the value of the aggregate.
return self.aggregate(geoagg=aggregate(agg_col, **agg_kwargs))['geoagg']
def _spatial_attribute(self, att, settings, field_name=None, model_att=None):
"""
DRY routine for calling a spatial stored procedure on a geometry column
and attaching its output as an attribute of the model.
Arguments:
att:
The name of the spatial attribute that holds the spatial
SQL function to call.
settings:
Dictonary of internal settings to customize for the spatial procedure.
Public Keyword Arguments:
field_name:
The name of the geographic field to call the spatial
function on. May also be a lookup to a geometry field
as part of a foreign key relation.
model_att:
The name of the model attribute to attach the output of
the spatial function to.
"""
warnings.warn(
"The %s GeoQuerySet method is deprecated. See GeoDjango Functions "
"documentation to find the expression-based replacement." % att,
RemovedInDjango21Warning, stacklevel=2
)
# Default settings.
settings.setdefault('desc', None)
settings.setdefault('geom_args', ())
settings.setdefault('geom_field', None)
settings.setdefault('procedure_args', {})
settings.setdefault('procedure_fmt', '%(geo_col)s')
settings.setdefault('select_params', [])
connection = connections[self.db]
# Performing setup for the spatial column, unless told not to.
if settings.get('setup', True):
default_args, geo_field = self._spatial_setup(
att, desc=settings['desc'], field_name=field_name,
geo_field_type=settings.get('geo_field_type'))
for k, v in six.iteritems(default_args):
settings['procedure_args'].setdefault(k, v)
else:
geo_field = settings['geo_field']
# The attribute to attach to the model.
if not isinstance(model_att, six.string_types):
model_att = att
# Special handling for any argument that is a geometry.
for name in settings['geom_args']:
# Using the field's get_placeholder() routine to get any needed
# transformation SQL.
geom = geo_field.get_prep_value(settings['procedure_args'][name])
params = geo_field.get_db_prep_lookup('contains', geom, connection=connection)
geom_placeholder = geo_field.get_placeholder(geom, None, connection)
# Replacing the procedure format with that of any needed
# transformation SQL.
old_fmt = '%%(%s)s' % name
new_fmt = geom_placeholder % '%%s'
settings['procedure_fmt'] = settings['procedure_fmt'].replace(old_fmt, new_fmt)
settings['select_params'].extend(params)
# Getting the format for the stored procedure.
fmt = '%%(function)s(%s)' % settings['procedure_fmt']
# If the result of this function needs to be converted.
if settings.get('select_field'):
select_field = settings['select_field']
if connection.ops.oracle:
select_field.empty_strings_allowed = False
else:
select_field = Field()
# Finally, setting the extra selection attribute with
# the format string expanded with the stored procedure
# arguments.
self.query.add_annotation(
RawSQL(fmt % settings['procedure_args'], settings['select_params'], select_field),
model_att)
return self
def _distance_attribute(self, func, geom=None, tolerance=0.05, spheroid=False, **kwargs):
"""
DRY routine for GeoQuerySet distance attribute routines.
"""
# Setting up the distance procedure arguments.
procedure_args, geo_field = self._spatial_setup(func, field_name=kwargs.get('field_name'))
# If geodetic defaulting distance attribute to meters (Oracle and
# PostGIS spherical distances return meters). Otherwise, use the
# units of the geometry field.
connection = connections[self.db]
geodetic = geo_field.geodetic(connection)
geography = geo_field.geography
if geodetic:
dist_att = 'm'
else:
dist_att = Distance.unit_attname(geo_field.units_name(connection))
# Shortcut booleans for what distance function we're using and
# whether the geometry field is 3D.
distance = func == 'distance'
length = func == 'length'
perimeter = func == 'perimeter'
if not (distance or length or perimeter):
raise ValueError('Unknown distance function: %s' % func)
geom_3d = geo_field.dim == 3
# The field's get_db_prep_lookup() is used to get any
# extra distance parameters. Here we set up the
# parameters that will be passed in to field's function.
lookup_params = [geom or 'POINT (0 0)', 0]
# Getting the spatial backend operations.
backend = connection.ops
# If the spheroid calculation is desired, either by the `spheroid`
# keyword or when calculating the length of geodetic field, make
# sure the 'spheroid' distance setting string is passed in so we
# get the correct spatial stored procedure.
if spheroid or (backend.postgis and geodetic and
(not geography) and length):
lookup_params.append('spheroid')
lookup_params = geo_field.get_prep_value(lookup_params)
params = geo_field.get_db_prep_lookup('distance_lte', lookup_params, connection=connection)
# The `geom_args` flag is set to true if a geometry parameter was
# passed in.
geom_args = bool(geom)
if backend.oracle:
if distance:
procedure_fmt = '%(geo_col)s,%(geom)s,%(tolerance)s'
elif length or perimeter:
procedure_fmt = '%(geo_col)s,%(tolerance)s'
procedure_args['tolerance'] = tolerance
else:
# Getting whether this field is in units of degrees since the field may have
# been transformed via the `transform` GeoQuerySet method.
srid = self.query.get_context('transformed_srid')
if srid:
u, unit_name, s = get_srid_info(srid, connection)
geodetic = unit_name.lower() in geo_field.geodetic_units
if geodetic and not connection.features.supports_distance_geodetic:
raise ValueError(
'This database does not support linear distance '
'calculations on geodetic coordinate systems.'
)
if distance:
if srid:
# Setting the `geom_args` flag to false because we want to handle
# transformation SQL here, rather than the way done by default
# (which will transform to the original SRID of the field rather
# than to what was transformed to).
geom_args = False
procedure_fmt = '%s(%%(geo_col)s, %s)' % (backend.transform, srid)
if geom.srid is None or geom.srid == srid:
# If the geom parameter srid is None, it is assumed the coordinates
# are in the transformed units. A placeholder is used for the
# geometry parameter. `GeomFromText` constructor is also needed
# to wrap geom placeholder for SpatiaLite.
if backend.spatialite:
procedure_fmt += ', %s(%%%%s, %s)' % (backend.from_text, srid)
else:
procedure_fmt += ', %%s'
else:
# We need to transform the geom to the srid specified in `transform()`,
# so wrapping the geometry placeholder in transformation SQL.
# SpatiaLite also needs geometry placeholder wrapped in `GeomFromText`
# constructor.
if backend.spatialite:
procedure_fmt += (', %s(%s(%%%%s, %s), %s)' % (
backend.transform, backend.from_text,
geom.srid, srid))
else:
procedure_fmt += ', %s(%%%%s, %s)' % (backend.transform, srid)
else:
# `transform()` was not used on this GeoQuerySet.
procedure_fmt = '%(geo_col)s,%(geom)s'
if not geography and geodetic:
# Spherical distance calculation is needed (because the geographic
# field is geodetic). However, the PostGIS ST_distance_sphere/spheroid()
# procedures may only do queries from point columns to point geometries
# some error checking is required.
if not backend.geography:
if not isinstance(geo_field, PointField):
raise ValueError('Spherical distance calculation only supported on PointFields.')
if not str(Geometry(six.memoryview(params[0].ewkb)).geom_type) == 'Point':
raise ValueError(
'Spherical distance calculation only supported with '
'Point Geometry parameters'
)
# The `function` procedure argument needs to be set differently for
# geodetic distance calculations.
if spheroid:
# Call to distance_spheroid() requires spheroid param as well.
procedure_fmt += ",'%(spheroid)s'"
procedure_args.update({'function': backend.distance_spheroid, 'spheroid': params[1]})
else:
procedure_args.update({'function': backend.distance_sphere})
elif length or perimeter:
procedure_fmt = '%(geo_col)s'
if not geography and geodetic and length:
# There's no `length_sphere`, and `length_spheroid` also
# works on 3D geometries.
procedure_fmt += ",'%(spheroid)s'"
procedure_args.update({'function': backend.length_spheroid, 'spheroid': params[1]})
elif geom_3d and connection.features.supports_3d_functions:
# Use 3D variants of perimeter and length routines on supported backends.
if perimeter:
procedure_args.update({'function': backend.perimeter3d})
elif length:
procedure_args.update({'function': backend.length3d})
# Setting up the settings for `_spatial_attribute`.
s = {'select_field': DistanceField(dist_att),
'setup': False,
'geo_field': geo_field,
'procedure_args': procedure_args,
'procedure_fmt': procedure_fmt,
}
if geom_args:
s['geom_args'] = ('geom',)
s['procedure_args']['geom'] = geom
elif geom:
# The geometry is passed in as a parameter because we handled
# transformation conditions in this routine.
s['select_params'] = [backend.Adapter(geom)]
return self._spatial_attribute(func, s, **kwargs)
def _geom_attribute(self, func, tolerance=0.05, **kwargs):
"""
DRY routine for setting up a GeoQuerySet method that attaches a
Geometry attribute (e.g., `centroid`, `point_on_surface`).
"""
s = {'select_field': GeomField()}
if connections[self.db].ops.oracle:
s['procedure_fmt'] = '%(geo_col)s,%(tolerance)s'
s['procedure_args'] = {'tolerance': tolerance}
return self._spatial_attribute(func, s, **kwargs)
def _geomset_attribute(self, func, geom, tolerance=0.05, **kwargs):
"""
DRY routine for setting up a GeoQuerySet method that attaches a
Geometry attribute and takes a Geoemtry parameter. This is used
for geometry set-like operations (e.g., intersection, difference,
union, sym_difference).
"""
s = {
'geom_args': ('geom',),
'select_field': GeomField(),
'procedure_fmt': '%(geo_col)s,%(geom)s',
'procedure_args': {'geom': geom},
}
if connections[self.db].ops.oracle:
s['procedure_fmt'] += ',%(tolerance)s'
s['procedure_args']['tolerance'] = tolerance
return self._spatial_attribute(func, s, **kwargs)
def _geocol_select(self, geo_field, field_name):
"""
Helper routine for constructing the SQL to select the geographic
column. Takes into account if the geographic field is in a
ForeignKey relation to the current model.
"""
compiler = self.query.get_compiler(self.db)
opts = self.model._meta
if geo_field not in opts.fields:
# Is this operation going to be on a related geographic field?
# If so, it'll have to be added to the select related information
# (e.g., if 'location__point' was given as the field name).
# Note: the operation really is defined as "must add select related!"
self.query.add_select_related([field_name])
# Call pre_sql_setup() so that compiler.select gets populated.
compiler.pre_sql_setup()
for col, _, _ in compiler.select:
if col.output_field == geo_field:
return col.as_sql(compiler, compiler.connection)[0]
raise ValueError("%r not in compiler's related_select_cols" % geo_field)
elif geo_field not in opts.local_fields:
# This geographic field is inherited from another model, so we have to
# use the db table for the _parent_ model instead.
parent_model = geo_field.model._meta.concrete_model
return self._field_column(compiler, geo_field, parent_model._meta.db_table)
else:
return self._field_column(compiler, geo_field)
# Private API utilities, subject to change.
def _geo_field(self, field_name=None):
"""
Returns the first Geometry field encountered or the one specified via
the `field_name` keyword. The `field_name` may be a string specifying
the geometry field on this GeoQuerySet's model, or a lookup string
to a geometry field via a ForeignKey relation.
"""
if field_name is None:
# Incrementing until the first geographic field is found.
for field in self.model._meta.fields:
if isinstance(field, GeometryField):
return field
return False
else:
# Otherwise, check by the given field name -- which may be
# a lookup to a _related_ geographic field.
return GISLookup._check_geo_field(self.model._meta, field_name)
def _field_column(self, compiler, field, table_alias=None, column=None):
"""
Helper function that returns the database column for the given field.
The table and column are returned (quoted) in the proper format, e.g.,
`"geoapp_city"."point"`. If `table_alias` is not specified, the
database table associated with the model of this `GeoQuerySet` will be
used. If `column` is specified, it will be used instead of the value
in `field.column`.
"""
if table_alias is None:
table_alias = compiler.query.get_meta().db_table
return "%s.%s" % (compiler.quote_name_unless_alias(table_alias),
compiler.connection.ops.quote_name(column or field.column))
| bsd-3-clause |
paolodedios/tensorflow | tensorflow/python/kernel_tests/embedding_ops_test.py | 6 | 48038 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for ops used with embeddings."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import math
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
from tensorflow.python.util import compat
def _AsLong(array):
"""Casts arrays elements to long type. Used to convert from numpy tf."""
return [int(x) for x in array]
class ScatterAddSubTest(test.TestCase):
def _TestCase(self, shape, indices, scatter_op=state_ops.scatter_add):
"""Run a random test case with the given shape and indices.
Args:
shape: Shape of the parameters array.
indices: One-dimensional array of ints, the indices of the last dimension
of the parameters to update.
scatter_op: ScatterAdd or ScatterSub.
"""
super(ScatterAddSubTest, self).setUp()
with self.cached_session(use_gpu=False):
# Create a random parameter array of given shape
p_init = np.random.rand(*shape).astype("f")
# Create the shape of the update array. All dimensions except the last
# match the parameter array, the last dimension equals the # of indices.
vals_shape = [len(indices)] + shape[1:]
vals_init = np.random.rand(*vals_shape).astype("f")
v_i = [float(x) for x in vals_init.ravel()]
p = variables.Variable(p_init)
vals = constant_op.constant(v_i, shape=vals_shape, name="vals")
ind = constant_op.constant(indices, dtype=dtypes.int32)
p2 = scatter_op(p, ind, vals, name="updated_p")
# p = init
self.evaluate(variables.global_variables_initializer())
# p += vals
result = self.evaluate(p2)
# Compute the expected 'p' using numpy operations.
for i, ind in enumerate(indices):
if scatter_op == state_ops.scatter_add:
p_init.reshape(shape[0], -1)[ind, :] += (vals_init.reshape(
vals_shape[0], -1)[i, :])
else:
p_init.reshape(shape[0], -1)[ind, :] -= (vals_init.reshape(
vals_shape[0], -1)[i, :])
self.assertTrue(all((p_init == result).ravel()))
@test_util.run_deprecated_v1
def testNoRepetitions(self):
self._TestCase([2, 2], [1])
self._TestCase([4, 4, 4], [2, 0])
self._TestCase([43, 20, 10, 10], [42, 5, 6, 1, 3, 5, 7, 9])
@test_util.run_deprecated_v1
def testWithRepetitions(self):
self._TestCase([2, 2], [1, 1])
self._TestCase([5, 3, 9, 5], [2, 0, 4, 1, 3, 1, 4, 0, 4, 3])
self._TestCase([32, 4, 4], [31] * 8)
@test_util.run_deprecated_v1
def testRandom(self):
# Random shapes of rank 4, random indices
for _ in range(5):
shape = np.random.randint(1, 20, size=4)
indices = np.random.randint(shape[0], size=2 * shape[0])
self._TestCase(_AsLong(list(shape)), list(indices))
@test_util.run_deprecated_v1
def testSubRandom(self):
# Random shapes of rank 4, random indices
for _ in range(5):
shape = np.random.randint(1, 20, size=4)
indices = np.random.randint(shape[0], size=2 * shape[0])
self._TestCase(_AsLong(list(shape)), list(indices), state_ops.scatter_sub)
@test_util.run_deprecated_v1
def testWrongShape(self):
# Indices and values mismatch.
var = variables.Variable(
array_ops.zeros(shape=[1024, 64, 64], dtype=dtypes.float32))
indices = array_ops.placeholder(dtypes.int32, shape=[32])
values = array_ops.placeholder(dtypes.float32, shape=[33, 64, 64])
with self.assertRaises(ValueError):
state_ops.scatter_add(var, indices, values)
# Var and values mismatch.
values = array_ops.placeholder(dtypes.float32, shape=[32, 64, 63])
with self.assertRaises(ValueError):
state_ops.scatter_add(var, indices, values)
def _PName(param_id):
return "p" + str(param_id)
def _EmbeddingParams(num_shards,
vocab_size,
dtype=dtypes.float32,
shape=None,
use_shapeless_placeholder=False):
p = []
params = {}
feed_dict = {}
if not shape:
shape = [10]
for i in range(num_shards):
shard_shape = [vocab_size // num_shards] + shape
if i < vocab_size % num_shards: # Excess goes evenly on the first shards
shard_shape[0] += 1
param_name = _PName(i)
if use_shapeless_placeholder:
param = array_ops.placeholder(dtype, shape=None, name=param_name)
else:
param = constant_op.constant(
1.0, shape=shard_shape, dtype=dtype, name=param_name)
p.append(param)
np_type = "f" if dtype == dtypes.float32 else "d"
val = (np.random.rand(*shard_shape).astype(np_type)) + 1
params[param_name + ":0"] = val
feed_dict[param.name] = val
return p, params, feed_dict
def _EmbeddingParamsAsPartitionedVariable(num_shards,
vocab_size,
dtype=dtypes.float32,
shape=None,
use_resource=False):
p, params, feed_dict = _EmbeddingParams(
num_shards, vocab_size, dtype=dtype, shape=shape)
shape = shape or [10]
partitioned_variable = variable_scope.get_variable(
"p",
shape=[vocab_size] + shape,
initializer=array_ops.concat([params[p_i.name] for p_i in p], 0),
partitioner=partitioned_variables.min_max_variable_partitioner(
max_partitions=num_shards, min_slice_size=1),
use_resource=use_resource)
return p, partitioned_variable, params, feed_dict
def _EmbeddingResult(params,
id_vals,
num_shards,
vocab_size,
partition_strategy="mod",
weight_vals=None):
if weight_vals is None:
weight_vals = np.copy(id_vals)
weight_vals.fill(1)
values = []
weights = []
weights_squared = []
for ids, wts in zip(id_vals, weight_vals):
value_aggregation = None
weight_aggregation = None
squared_weight_aggregation = None
if isinstance(ids, compat.integral_types):
ids = [ids]
wts = [wts]
for i, weight_value in zip(ids, wts):
if partition_strategy == "mod":
val = np.copy(params[_PName(i % num_shards) + ":0"][
i // num_shards, :]) * weight_value
elif partition_strategy == "div":
ids_per_partition, extras = divmod(vocab_size, num_shards)
threshold = extras * (ids_per_partition + 1)
if i < threshold:
partition = i // (ids_per_partition + 1)
offset = i % (ids_per_partition + 1)
else:
partition = extras + (i - threshold) // ids_per_partition
offset = (i - threshold) % ids_per_partition
val = np.copy(
params[_PName(partition) + ":0"][offset, :]) * weight_value
else:
assert False
if value_aggregation is None:
assert weight_aggregation is None
assert squared_weight_aggregation is None
value_aggregation = val
weight_aggregation = weight_value
squared_weight_aggregation = weight_value * weight_value
else:
assert weight_aggregation is not None
assert squared_weight_aggregation is not None
value_aggregation += val
weight_aggregation += weight_value
squared_weight_aggregation += weight_value * weight_value
values.append(value_aggregation)
weights.append(weight_aggregation)
weights_squared.append(squared_weight_aggregation)
values = np.array(values).astype(np.float32)
weights = np.array(weights).astype(np.float32)
weights_squared = np.array(weights_squared).astype(np.float32)
return values, weights, weights_squared
class EmbeddingLookupTest(test.TestCase):
# This test looks up [0, 0] in a parameter matrix sharded 2 ways. Since
# both the ids are in the first shard, one of the resulting lookup
# vector is going to be empty. The subsequent DivOp fails because of that.
# TODO(keveman): Disabling the test until the underlying problem is fixed.
@test_util.run_deprecated_v1
def testSimpleSharded(self):
with self.cached_session():
num_shards = 2
vocab_size = 4
p, params, feed_dict = _EmbeddingParams(num_shards, vocab_size)
id_vals = np.array([0, 0])
ids = constant_op.constant(list(id_vals), dtype=dtypes.int32)
print("Construct ids", ids.get_shape())
embedding = embedding_ops.embedding_lookup(p, ids)
tf_result = embedding.eval(feed_dict=feed_dict)
np_result, _, _ = _EmbeddingResult(params, id_vals, num_shards, vocab_size)
self.assertAllEqual(np_result, tf_result)
self.assertShapeEqual(np_result, embedding)
@test_util.run_deprecated_v1
def testMaxNorm(self):
with self.cached_session():
embeddings = constant_op.constant([[2.0]])
ids = constant_op.constant([0], dtype=dtypes.int32)
embedding = embedding_ops.embedding_lookup(
[embeddings], ids, max_norm=1.0)
self.assertAllEqual(embedding, [[1.0]])
@test_util.run_deprecated_v1
def testMaxNormNontrivial(self):
with self.cached_session():
embeddings = constant_op.constant([[2.0, 4.0], [3.0, 1.0]])
ids = constant_op.constant([0, 1], dtype=dtypes.int32)
embedding = embedding_ops.embedding_lookup(
[embeddings], ids, max_norm=2.0)
norms = math_ops.sqrt(
math_ops.reduce_sum(embeddings * embeddings, axis=1))
normalized = embeddings / array_ops.stack([norms, norms], axis=1)
self.assertAllClose(embedding, 2 * self.evaluate(normalized))
@test_util.run_deprecated_v1
def testSimpleShardedPartitionedVariable(self):
with self.cached_session() as sess:
num_shards = 2
vocab_size = 4
p, p_variable, params, feed_dict = _EmbeddingParamsAsPartitionedVariable(
num_shards, vocab_size)
id_vals = np.array([0, 0])
ids = constant_op.constant(list(id_vals), dtype=dtypes.int32)
print("Construct ids", ids.get_shape())
embedding = embedding_ops.embedding_lookup(p_variable, ids)
self.evaluate(variables.global_variables_initializer())
params_values = [params[p_i.name] for p_i in p]
# Test that the PartitionedVariable components equal the list in p
p_var_val = self.evaluate(list(p_variable))
# Actual test
tf_result = embedding.eval(feed_dict=feed_dict)
np_result, _, _ = _EmbeddingResult(params, id_vals, num_shards, vocab_size)
self.assertAllEqual(params_values, p_var_val)
self.assertAllEqual(np_result, tf_result)
self.assertShapeEqual(np_result, embedding)
@test_util.run_deprecated_v1
def testSimpleShardedPartitionedResourceVariable(self):
with self.cached_session() as sess:
num_shards = 2
vocab_size = 4
p, p_variable, params, _ = _EmbeddingParamsAsPartitionedVariable(
num_shards, vocab_size, use_resource=True)
id_vals = np.array([0, 0])
ids = constant_op.constant(list(id_vals), dtype=dtypes.int32)
print("Construct ids", ids.get_shape())
embedding = embedding_ops.embedding_lookup(p_variable, ids)
self.evaluate(variables.global_variables_initializer())
params_values = [params[p_i.name] for p_i in p]
# Test that the PartitionedVariable components equal the list in p
p_var_val = self.evaluate(list(p_variable))
# Actual test
print(ops.get_default_graph().as_graph_def())
tf_result = self.evaluate(embedding)
np_result, _, _ = _EmbeddingResult(params, id_vals, num_shards, vocab_size)
self.assertAllEqual(params_values, p_var_val)
self.assertAllEqual(np_result, tf_result)
self.assertShapeEqual(np_result, embedding)
@test_util.run_deprecated_v1
def testShardedModPartitioningInt32Ids(self):
with self.cached_session():
num_shards = 5
vocab_size = 13
# Embedding dimensions is 10. The vocab_size x 10 embedding
# parameters are spread in num_shards matrices, so the first
# 3 shards are 3 x 10 and the last 2 shards are 2 x 10.
p, params, feed_dict = _EmbeddingParams(num_shards, vocab_size)
num_vals = 30
# Fetch num_vals embeddings for random word ids. Since
# num_vals > vocab_size, this ought to have repetitions, so
# will test that aspect.
id_vals = np.random.randint(vocab_size, size=num_vals)
ids = constant_op.constant(list(id_vals), dtype=dtypes.int32)
embedding = embedding_ops.embedding_lookup(p, ids)
tf_result = embedding.eval(feed_dict=feed_dict)
np_result, _, _ = _EmbeddingResult(params, id_vals, num_shards, vocab_size)
self.assertAllEqual(np_result, tf_result)
self.assertShapeEqual(np_result, embedding)
@test_util.run_deprecated_v1
def testShardedModPartitioningInt64Ids(self):
with self.cached_session():
num_shards = 5
vocab_size = 13
# Embedding dimensions is 10. The vocab_size x 10 embedding
# parameters are spread in num_shards matrices, so the first
# 3 shards are 3 x 10 and the last 2 shards are 2 x 10.
p, params, feed_dict = _EmbeddingParams(num_shards, vocab_size)
num_vals = 30
# Fetch num_vals embeddings for random word ids. Since
# num_vals > vocab_size, this ought to have repetitions, so
# will test that aspect.
id_vals = np.random.randint(vocab_size, size=num_vals)
ids = constant_op.constant(list(id_vals), dtype=dtypes.int64)
embedding = embedding_ops.embedding_lookup(p, ids)
tf_result = embedding.eval(feed_dict=feed_dict)
np_result, _, _ = _EmbeddingResult(params, id_vals, num_shards, vocab_size)
self.assertAllEqual(np_result, tf_result)
self.assertShapeEqual(np_result, embedding)
@test_util.run_deprecated_v1
def testShardedDivPartitioningInt32Ids(self):
with self.cached_session():
num_shards = 5
vocab_size = 13
# Embedding dimensions is 10. The vocab_size x 10 embedding
# parameters are spread in num_shards matrices, so the first
# 3 shards are 3 x 10 and the last 2 shards are 2 x 10.
p, params, feed_dict = _EmbeddingParams(num_shards, vocab_size)
num_vals = 30
# Fetch num_vals embeddings for random word ids. Since
# num_vals > vocab_size, this ought to have repetitions, so
# will test that aspect.
id_vals = np.random.randint(vocab_size, size=num_vals)
ids = constant_op.constant(list(id_vals), dtype=dtypes.int32)
embedding = embedding_ops.embedding_lookup(
p, ids, partition_strategy="div")
tf_result = embedding.eval(feed_dict=feed_dict)
np_result, _, _ = _EmbeddingResult(
params, id_vals, num_shards, vocab_size, partition_strategy="div")
self.assertAllEqual(np_result, tf_result)
self.assertShapeEqual(np_result, embedding)
@test_util.run_deprecated_v1
def testShardedDivPartitioningInt32IdsPartitionedVariable(self):
with self.cached_session():
num_shards = 5
vocab_size = 13
# Embedding dimensions is 10. The vocab_size x 10 embedding
# parameters are spread in num_shards matrices, so the first
# 3 shards are 3 x 10 and the last 2 shards are 2 x 10.
_, p_variable, params, feed_dict = _EmbeddingParamsAsPartitionedVariable(
num_shards, vocab_size)
num_vals = 30
# Fetch num_vals embeddings for random word ids. Since
# num_vals > vocab_size, this ought to have repetitions, so
# will test that aspect.
id_vals = np.random.randint(vocab_size, size=num_vals)
ids = constant_op.constant(list(id_vals), dtype=dtypes.int32)
self.evaluate(variables.global_variables_initializer())
embedding = embedding_ops.embedding_lookup(
p_variable, ids, partition_strategy="div")
tf_result = embedding.eval(feed_dict=feed_dict)
np_result, _, _ = _EmbeddingResult(
params, id_vals, num_shards, vocab_size, partition_strategy="div")
self.assertAllEqual(np_result, tf_result)
self.assertShapeEqual(np_result, embedding)
@test_util.run_deprecated_v1
def testShardedDivPartitioningInt64Ids(self):
with self.cached_session():
num_shards = 5
vocab_size = 13
# Embedding dimensions is 10. The vocab_size x 10 embedding
# parameters are spread in num_shards matrices, so the first
# 3 shards are 3 x 10 and the last 2 shards are 2 x 10.
p, params, feed_dict = _EmbeddingParams(num_shards, vocab_size)
num_vals = 30
# Fetch num_vals embeddings for random word ids. Since
# num_vals > vocab_size, this ought to have repetitions, so
# will test that aspect.
id_vals = np.random.randint(vocab_size, size=num_vals)
ids = constant_op.constant(list(id_vals), dtype=dtypes.int64)
embedding = embedding_ops.embedding_lookup(
p, ids, partition_strategy="div")
tf_result = embedding.eval(feed_dict=feed_dict)
np_result, _, _ = _EmbeddingResult(
params, id_vals, num_shards, vocab_size, partition_strategy="div")
self.assertAllEqual(np_result, tf_result)
self.assertShapeEqual(np_result, embedding)
@test_util.run_deprecated_v1
def testShardedDivPartitioningUnknownParamShape(self):
with self.cached_session():
num_shards = 5
vocab_size = 13
# Embedding dimensions is 10. The vocab_size x 10 embedding
# parameters are spread in num_shards matrices, so the first
# 3 shards are 3 x 10 and the last 2 shards are 2 x 10.
# We clear parameter shapes, to test when shape is not statically known.
p, params, feed_dict = _EmbeddingParams(
num_shards, vocab_size, use_shapeless_placeholder=True)
num_vals = 30
# Fetch num_vals embeddings for random word ids. Since
# num_vals > vocab_size, this ought to have repetitions, so
# will test that aspect.
id_vals = np.random.randint(vocab_size, size=num_vals)
ids = constant_op.constant(list(id_vals), dtype=dtypes.int64)
embedding = embedding_ops.embedding_lookup(
p, ids, partition_strategy="div")
tf_result = embedding.eval(feed_dict=feed_dict)
np_result, _, _ = _EmbeddingResult(
params, id_vals, num_shards, vocab_size, partition_strategy="div")
self.assertAllEqual(np_result, tf_result)
@test_util.run_deprecated_v1
def testGradientsEmbeddingLookup(self):
vocab_size = 9
num_ids = 10
id_vals = list(np.random.randint(vocab_size, size=num_ids))
tf_logging.vlog(1, id_vals)
for ids_shape in [(10,), (2, 5)]:
for num_shards in [1, 3]:
with self.cached_session():
ids = constant_op.constant(
id_vals, shape=ids_shape, dtype=dtypes.int32)
x, params, _ = _EmbeddingParams(num_shards, vocab_size, shape=[2])
y = embedding_ops.embedding_lookup(x, ids)
y_shape = ids_shape + tuple(params[_PName(0) + ":0"].shape[1:])
x_name = [_PName(i) for i in range(num_shards)]
x_init_value = [params[x_n + ":0"] for x_n in x_name]
x_shape = [i.shape for i in x_init_value]
err = gradient_checker.compute_gradient_error(
x, x_shape, y, y_shape, x_init_value=x_init_value)
self.assertLess(err, 1e-4)
@test_util.run_deprecated_v1
def testGradientsEmbeddingLookupWithComputedParams(self):
vocab_size = 9
num_ids = 5
id_vals = list(np.random.randint(vocab_size, size=num_ids))
tf_logging.vlog(1, id_vals)
for num_shards in [1, 3]:
with self.cached_session():
ids = constant_op.constant(id_vals, dtype=dtypes.int32)
x, params, _ = _EmbeddingParams(num_shards, vocab_size, shape=[2])
# This will force a conversion from IndexedSlices to Tensor.
x_squared = [math_ops.square(elem) for elem in x]
y = embedding_ops.embedding_lookup(x_squared, ids)
y_shape = [num_ids] + list(params[_PName(0) + ":0"].shape[1:])
x_name = [_PName(i) for i in range(num_shards)]
x_init_value = [params[x_n + ":0"] for x_n in x_name]
x_shape = [i.shape for i in x_init_value]
err = gradient_checker.compute_gradient_error(
x, x_shape, y, y_shape, x_init_value=x_init_value)
self.assertLess(err, 1e-3)
def testConstructionNonSharded(self):
with ops.Graph().as_default():
p = variables.Variable(
array_ops.zeros(shape=[100, 100], dtype=dtypes.float32))
ids = constant_op.constant([0, 1, 1, 7], dtype=dtypes.int32)
embedding_ops.embedding_lookup([p], ids)
def testConstructionSharded(self):
with ops.Graph().as_default():
p = []
for _ in range(2):
p += [
variables.Variable(
array_ops.zeros(shape=[100, 100], dtype=dtypes.float32))
]
ids = constant_op.constant([0, 1, 1, 17], dtype=dtypes.int32)
embedding_ops.embedding_lookup(p, ids)
@test_util.run_deprecated_v1
def testHigherRank(self):
np.random.seed(8)
with self.cached_session():
for params_shape in (12,), (6, 3):
params = np.random.randn(*params_shape)
for ids_shape in (3, 2), (4, 3):
ids = np.random.randint(
params.shape[0], size=np.prod(ids_shape)).reshape(ids_shape)
# Compare nonsharded to gather
simple = embedding_ops.embedding_lookup(params, ids)
self.assertAllEqual(simple, array_ops.gather(params, ids))
# Run a few random sharded versions
for procs in 1, 2, 3:
stride = procs * math_ops.range(params.shape[0] // procs)
split_params = [
array_ops.gather(params, stride + p) for p in xrange(procs)
]
sharded = embedding_ops.embedding_lookup(split_params, ids)
self.assertAllEqual(simple, sharded)
@test_util.run_deprecated_v1
def testHigherRankMaxNorm(self):
np.random.seed(8)
with self.cached_session():
for params_shape in (12,), (6, 3), (6, 2, 3):
# Test embedding rank 0, 1, 2.
# Note: the first dimension must be a common multiple of procs below.
params = 2 * np.ones(params_shape)
params_norm = params / np.sqrt(
np.sum(
params * params, tuple(range(params.ndim)[1:]), keepdims=True))
for ids_shape in (), (3), (4, 3), (2, 3, 4):
ids = np.random.randint(
params.shape[0], size=np.prod(ids_shape,
dtype=np.int64)).reshape(ids_shape)
# Compare nonsharded to gather
simple = embedding_ops.embedding_lookup(params, ids, max_norm=1.0)
# assertAllClose is used here as different implementations of sqrt may
# be used to compute each of the values being compared. For example,
# on AVX512 builds the embedding operation makes use of Eigen's fast
# vectorized square root algorithm for doubles. These different
# implementations of sqrt are not guaranteed to produce exactly the
# same results. Therefore, an exact comparison cannot be made.
self.assertAllClose(simple, array_ops.gather(params_norm, ids))
# Run a few different sharded versions.
for procs in 1, 2, 3:
stride = procs * math_ops.range(params.shape[0] // procs)
split_params = [
array_ops.gather(params, stride + p) for p in xrange(procs)
]
sharded = embedding_ops.embedding_lookup(
split_params, ids, max_norm=1.0)
self.assertAllEqual(simple, sharded)
@test_util.run_deprecated_v1
def testTransform(self):
# This tests all combinations of:
# - ids rank 0, 1, >1
# - params sharded/unsharded
# It always applies max_norm.
np.random.seed(8)
l2_norm = 2.
with self.cached_session():
# Param values are in [l2_norm, l2_norm+1) so it will always clip.
params = np.random.rand(6, 3) + l2_norm
params_norm = l2_norm * params / np.sqrt(
np.sum(params * params, axis=1, keepdims=True))
# Compute the norm of each embedding. This will change the embedding
# rank to 0.
params_norm = np.linalg.norm(params_norm, axis=1)
transform = lambda x: linalg_ops.norm(x, axis=1)
for ids_shape in (), (3), (4, 3), (2, 3, 4):
# Test ids rank 0, 1, 2, 3.
ids = np.random.randint(
params.shape[0], size=np.prod(ids_shape,
dtype=np.int64)).reshape(ids_shape)
# Compare nonsharded to gather.
simple = embedding_ops._embedding_lookup_and_transform(
params, ids, max_norm=l2_norm, transform_fn=transform)
self.assertAllClose(simple, array_ops.gather(params_norm, ids))
# Run a few different sharded versions.
for procs in 1, 2, 3:
stride = procs * math_ops.range(params.shape[0] // procs)
split_params = [
array_ops.gather(params, stride + p) for p in xrange(procs)
]
sharded = embedding_ops._embedding_lookup_and_transform(
split_params, ids, max_norm=l2_norm, transform_fn=transform)
# assertAllClose is used here as different implementations of sqrt may
# be used to compute each of the values being compared. For example,
# on AVX512 builds the embedding operation makes use of Eigen's fast
# vectorized square root algorithm for doubles. These different
# implementations of sqrt are not guaranteed to produce exactly the
# same results. Therefore, an exact comparison cannot be made.
self.assertAllClose(simple, sharded)
def testRaggedMaxNorm(self):
embeddings = constant_op.constant([[2.0]])
ids = ragged_factory_ops.constant([[0, 0], [0]], dtype=dtypes.int32)
embedding = embedding_ops.embedding_lookup([embeddings], ids, max_norm=1.0)
self.assertAllEqual(embedding, [[[1.0], [1.0]], [[1.0]]])
class EmbeddingLookupSparseTest(test.TestCase):
def _RandomIdsAndWeights(self, batch_size, vocab_size):
max_val_per_entry = 6
vals_per_batch_entry = np.random.randint(
1, max_val_per_entry, size=batch_size)
num_vals = np.sum(vals_per_batch_entry)
ids = np.random.randint(vocab_size, size=num_vals)
weights = 1 + np.random.rand(num_vals)
indices = []
for batch_entry, num_val in enumerate(vals_per_batch_entry):
for val_index in range(num_val):
indices.append([batch_entry, val_index])
shape = [batch_size, max_val_per_entry]
sp_ids = sparse_tensor.SparseTensor(
constant_op.constant(indices, dtypes.int64),
constant_op.constant(ids, dtypes.int32),
constant_op.constant(shape, dtypes.int64))
sp_weights = sparse_tensor.SparseTensor(
constant_op.constant(indices, dtypes.int64),
constant_op.constant(weights, dtypes.float32),
constant_op.constant(shape, dtypes.int64))
return sp_ids, sp_weights, ids, weights, vals_per_batch_entry
def _GroupByBatchEntry(self, vals, vals_per_batch_entry):
grouped_vals = []
index = 0
for num_val in vals_per_batch_entry:
grouped_vals.append(list(vals[index:(index + num_val)]))
index += num_val
return grouped_vals
@test_util.run_deprecated_v1
def testEmbeddingLookupSparse(self):
vocab_size = 13
batch_size = 10
param_shape = [2, 5]
expected_lookup_result_shape = [None] + param_shape
sp_ids, sp_weights, ids, weights, vals_per_batch_entry = (
self._RandomIdsAndWeights(batch_size, vocab_size))
grouped_ids = self._GroupByBatchEntry(ids, vals_per_batch_entry)
grouped_weights = self._GroupByBatchEntry(weights, vals_per_batch_entry)
grouped_ignored_weights = self._GroupByBatchEntry(
np.ones(np.sum(vals_per_batch_entry)), vals_per_batch_entry)
for num_shards, combiner, dtype, ignore_weights in itertools.product(
[1, 5], ["sum", "mean", "sqrtn"],
[dtypes.float16, dtypes.bfloat16, dtypes.float32, dtypes.float64],
[True, False]):
with self.cached_session():
p, params, feed_dict = _EmbeddingParams(
num_shards, vocab_size, shape=param_shape, dtype=dtype)
embedding_sum = embedding_ops.embedding_lookup_sparse(
p,
sp_ids,
None if ignore_weights else sp_weights,
combiner=combiner)
self.assertEqual(embedding_sum.get_shape().as_list(),
expected_lookup_result_shape)
self.assertEqual(embedding_sum.dtype, dtype)
tf_embedding_sum = embedding_sum.eval(feed_dict=feed_dict)
np_embedding_sum, np_weight_sum, np_weight_sq_sum = _EmbeddingResult(
params,
grouped_ids,
num_shards,
vocab_size,
weight_vals=grouped_ignored_weights
if ignore_weights else grouped_weights)
if combiner == "mean":
np_embedding_sum /= np.reshape(np_weight_sum, (batch_size, 1, 1))
if combiner == "sqrtn":
np_embedding_sum /= np.reshape(
np.sqrt(np_weight_sq_sum), (batch_size, 1, 1))
rtol = 1e-6
if dtype == dtypes.bfloat16:
rtol = 1e-2
elif dtype == dtypes.float16:
rtol = 1e-3
atol = rtol
self.assertAllClose(np_embedding_sum, tf_embedding_sum, rtol, atol)
def testMissingInSparseIds(self):
# Github issue, 36359
with self.test_session():
x = array_ops.ones((4, 5))
sp_ids = sparse_tensor.SparseTensor(
constant_op.constant([[1, 0], [3, 0]], dtypes.int64),
constant_op.constant([0, 2], dtypes.int32),
constant_op.constant([4, 1], dtypes.int64))
sp_weights = sparse_tensor.SparseTensor(
constant_op.constant([[1, 0], [3, 0]], dtypes.int64),
constant_op.constant([1, 1], dtypes.float32),
constant_op.constant([4, 1], dtypes.int64))
for combiner in ["sum", "mean", "sqrtn"]:
embedding_sum = embedding_ops.embedding_lookup_sparse(
x, sp_ids, sp_weights, combiner=combiner)
tf_embedding_sum = ops.convert_to_tensor(embedding_sum)
self.assertAllClose(tf_embedding_sum[0], np.zeros(5))
self.assertAllClose(tf_embedding_sum[1], np.ones(5))
self.assertAllClose(tf_embedding_sum[2], np.zeros(5))
self.assertAllClose(tf_embedding_sum[3], np.ones(5))
@test_util.run_deprecated_v1
def testGradientsEmbeddingLookupSparse(self):
vocab_size = 12
batch_size = 4
param_shape = [2, 3]
sp_ids, sp_weights, _, _, _ = (self._RandomIdsAndWeights(
batch_size, vocab_size))
for num_shards, combiner, dtype, ignore_weights in itertools.product(
[1, 3], ["sum", "mean", "sqrtn"], [dtypes.float32,
dtypes.float64], [True, False]):
with self.cached_session():
x, params, _ = _EmbeddingParams(
num_shards, vocab_size, shape=param_shape, dtype=dtype)
y = embedding_ops.embedding_lookup_sparse(
x,
sp_ids,
None if ignore_weights else sp_weights,
combiner=combiner)
x_name = [_PName(i) for i in range(num_shards)]
x_init_value = [params[x_n + ":0"] for x_n in x_name]
x_shape = [i.shape for i in x_init_value]
y_shape = [batch_size] + list(params[_PName(0) + ":0"].shape[1:])
err = gradient_checker.compute_gradient_error(
x, x_shape, y, y_shape, x_init_value=x_init_value)
self.assertLess(err, 1e-5 if dtype == dtypes.float64 else 2e-3)
@test_util.run_deprecated_v1
def testIncompatibleShapes(self):
with self.cached_session():
x, _, _ = _EmbeddingParams(1, 10, dtype=dtypes.float32)
sp_ids = sparse_tensor.SparseTensor(
constant_op.constant([[0, 0], [0, 1], [1, 0]], dtypes.int64),
constant_op.constant([0, 1, 2], dtypes.int32),
constant_op.constant([2, 2], dtypes.int64))
sp_weights = sparse_tensor.SparseTensor(
constant_op.constant([[0, 0], [0, 1]], dtypes.int64),
constant_op.constant([12.0, 5.0], dtypes.float32),
constant_op.constant([1, 2], dtypes.int64))
with self.assertRaises(ValueError):
embedding_ops.embedding_lookup_sparse(
x, sp_ids, sp_weights, combiner="mean")
class SafeEmbeddingLookupSparseTest(test.TestCase):
def _random_weights(self, vocab_size=4, embed_dim=4, num_shards=1):
assert vocab_size > 0
assert embed_dim > 0
assert num_shards > 0
assert num_shards <= vocab_size
initializer = init_ops.truncated_normal_initializer(
mean=0.0, stddev=1.0 / math.sqrt(vocab_size), dtype=dtypes.float32)
embedding_weights = list(variable_scope.get_variable(
name="embedding_weights",
shape=[vocab_size, embed_dim],
partitioner=partitioned_variables.fixed_size_partitioner(num_shards),
initializer=initializer))
for w in embedding_weights:
self.evaluate(w.initializer)
embedding_weights = [self.evaluate(w) for w in embedding_weights]
return embedding_weights
def _ids_and_weights_2d(self):
# Each row demonstrates a test case:
# Row 0: multiple valid ids, 1 invalid id, weighted mean
# Row 1: all ids are invalid (leaving no valid ids after pruning)
# Row 2: no ids to begin with
# Row 3: single id
# Row 4: all ids have <=0 weight
indices = [[0, 0], [0, 1], [0, 2], [1, 0], [3, 0], [4, 0], [4, 1]]
ids = [0, 1, -1, -1, 2, 0, 1]
weights = [1.0, 2.0, 1.0, 1.0, 3.0, 0.0, -0.5]
shape = [5, 4]
sparse_ids = sparse_tensor.SparseTensor(
constant_op.constant(indices, dtypes.int64),
constant_op.constant(ids, dtypes.int64),
constant_op.constant(shape, dtypes.int64))
sparse_weights = sparse_tensor.SparseTensor(
constant_op.constant(indices, dtypes.int64),
constant_op.constant(weights, dtypes.float32),
constant_op.constant(shape, dtypes.int64))
return sparse_ids, sparse_weights
def _ids_and_weights_3d(self):
# Each (2-D) index demonstrates a test case:
# Index 0, 0: multiple valid ids, 1 invalid id, weighted mean
# Index 0, 1: all ids are invalid (leaving no valid ids after pruning)
# Index 0, 2: no ids to begin with
# Index 1, 0: single id
# Index 1, 1: all ids have <=0 weight
# Index 1, 2: no ids to begin with
indices = [[0, 0, 0], [0, 0, 1], [0, 0, 2], [0, 1, 0], [1, 0, 0], [1, 1, 0],
[1, 1, 1]]
ids = [0, 1, -1, -1, 2, 0, 1]
weights = [1.0, 2.0, 1.0, 1.0, 3.0, 0.0, -0.5]
shape = [2, 3, 4]
sparse_ids = sparse_tensor.SparseTensor(
constant_op.constant(indices, dtypes.int64),
constant_op.constant(ids, dtypes.int64),
constant_op.constant(shape, dtypes.int64))
sparse_weights = sparse_tensor.SparseTensor(
constant_op.constant(indices, dtypes.int64),
constant_op.constant(weights, dtypes.float32),
constant_op.constant(shape, dtypes.int64))
return sparse_ids, sparse_weights
@test_util.run_deprecated_v1
def test_safe_embedding_lookup_sparse_return_zero_vector(self):
with self.cached_session():
embedding_weights = self._random_weights()
sparse_ids, sparse_weights = self._ids_and_weights_2d()
embedding_lookup_result = (
embedding_ops.safe_embedding_lookup_sparse_v2(embedding_weights,
sparse_ids,
sparse_weights))
self.assertAllClose(
embedding_lookup_result,
[(1.0 * embedding_weights[0][0] + 2.0 * embedding_weights[0][1]) /
3.0, [0] * 4, [0] * 4, embedding_weights[0][2], [0] * 4])
@test_util.run_deprecated_v1
def test_safe_embedding_lookup_sparse_return_special_vector(self):
with self.cached_session():
embedding_weights = self._random_weights()
sparse_ids, sparse_weights = self._ids_and_weights_2d()
embedding_lookup_result = (
embedding_ops.safe_embedding_lookup_sparse_v2(
embedding_weights, sparse_ids, sparse_weights, default_id=3))
self.assertAllClose(
embedding_lookup_result,
[(1.0 * embedding_weights[0][0] + 2.0 * embedding_weights[0][1]) /
3.0, embedding_weights[0][3], embedding_weights[0][3],
embedding_weights[0][2], embedding_weights[0][3]])
@test_util.run_deprecated_v1
def test_safe_embedding_lookup_sparse_no_weights(self):
with self.cached_session():
embedding_weights = self._random_weights()
sparse_ids, _ = self._ids_and_weights_2d()
embedding_lookup_result = (
embedding_ops.safe_embedding_lookup_sparse_v2(embedding_weights,
sparse_ids, None))
self.assertAllClose(
embedding_lookup_result,
[(embedding_weights[0][0] + embedding_weights[0][1]) / 2.0, [0] * 4,
[0] * 4, embedding_weights[0][2], (
embedding_weights[0][0] + embedding_weights[0][1]) / 2.0])
@test_util.run_deprecated_v1
def test_safe_embedding_lookup_sparse_partitioned(self):
with self.cached_session():
embedding_weights = self._random_weights(num_shards=3)
sparse_ids, _ = self._ids_and_weights_2d()
embedding_lookup_result = (
embedding_ops.safe_embedding_lookup_sparse_v2(embedding_weights,
sparse_ids, None))
embedding_weights = list(itertools.chain(*embedding_weights))
self.assertAllClose(embedding_lookup_result,
[(embedding_weights[0] + embedding_weights[1]) / 2.0,
[0] * 4, [0] * 4, embedding_weights[2],
(embedding_weights[0] + embedding_weights[1]) / 2.0])
@test_util.run_deprecated_v1
def test_safe_embedding_lookup_sparse_partitioned_inconsistent_weights(self):
with self.cached_session():
embedding_weights = self._random_weights(num_shards=3)
sparse_ids, sparse_weights = self._ids_and_weights_2d()
embedding_weights[1] = embedding_weights[1].astype(np.float64)
self.assertRaises(TypeError, embedding_ops.safe_embedding_lookup_sparse,
embedding_weights, sparse_ids)
embedding_weights = [
constant_op.constant(w, dtype=dtypes.float64)
for w in embedding_weights
]
self.assertRaises(ValueError, embedding_ops.safe_embedding_lookup_sparse,
embedding_weights, sparse_ids, sparse_weights)
@test_util.run_deprecated_v1
def test_safe_embedding_lookup_sparse_3d_return_zero_vector(self):
with self.cached_session():
embedding_weights = self._random_weights()
sparse_ids, sparse_weights = self._ids_and_weights_3d()
embedding_lookup_result = (
embedding_ops.safe_embedding_lookup_sparse_v2(embedding_weights,
sparse_ids,
sparse_weights))
self.assertAllClose(embedding_lookup_result, [[
(1.0 * embedding_weights[0][0] + 2.0 * embedding_weights[0][1]) / 3.0,
[0] * 4, [0] * 4
], [embedding_weights[0][2], [0] * 4, [0] * 4]])
@test_util.run_deprecated_v1
def test_safe_embedding_lookup_sparse_3d_return_special_vector(self):
with self.cached_session():
embedding_weights = self._random_weights()
sparse_ids, sparse_weights = self._ids_and_weights_3d()
embedding_lookup_result = (
embedding_ops.safe_embedding_lookup_sparse_v2(
embedding_weights, sparse_ids, sparse_weights, default_id=3))
self.assertAllClose(
embedding_lookup_result,
[[(1.0 * embedding_weights[0][0] + 2.0 * embedding_weights[0][1]) /
3.0, embedding_weights[0][3], embedding_weights[0][3]], [
embedding_weights[0][2], embedding_weights[0][3],
embedding_weights[0][3]
]])
@test_util.run_deprecated_v1
def test_safe_embedding_lookup_sparse_3d_no_weights(self):
with self.cached_session():
embedding_weights = self._random_weights()
sparse_ids, _ = self._ids_and_weights_3d()
embedding_lookup_result = (
embedding_ops.safe_embedding_lookup_sparse_v2(embedding_weights,
sparse_ids, None))
self.assertAllClose(embedding_lookup_result, [[(
embedding_weights[0][0] + embedding_weights[0][1]) / 2.0, [0] * 4, [
0
] * 4], [
embedding_weights[0][2],
(embedding_weights[0][0] + embedding_weights[0][1]) / 2.0, [0] * 4
]])
@test_util.run_deprecated_v1
def test_safe_embedding_lookup_sparse_3d_partitioned(self):
with self.cached_session():
embedding_weights = self._random_weights(num_shards=3)
sparse_ids, _ = self._ids_and_weights_3d()
embedding_lookup_result = (
embedding_ops.safe_embedding_lookup_sparse_v2(embedding_weights,
sparse_ids, None))
embedding_weights = list(itertools.chain(*embedding_weights))
self.assertAllClose(embedding_lookup_result, [[
(embedding_weights[0] + embedding_weights[1]) / 2.0, [0] * 4, [0] * 4
], [
embedding_weights[2],
(embedding_weights[0] + embedding_weights[1]) / 2.0, [0] * 4
]])
@test_util.run_deprecated_v1
def test_safe_embedding_lookup_sparse_3d_partitioned_inconsistent_weights(
self):
with self.cached_session():
embedding_weights = self._random_weights(num_shards=3)
sparse_ids, sparse_weights = self._ids_and_weights_3d()
embedding_weights[1] = embedding_weights[1].astype(np.float64)
self.assertRaises(TypeError, embedding_ops.safe_embedding_lookup_sparse,
embedding_weights, sparse_ids)
embedding_weights = [
constant_op.constant(w, dtype=dtypes.float64)
for w in embedding_weights
]
self.assertRaises(ValueError, embedding_ops.safe_embedding_lookup_sparse,
embedding_weights, sparse_ids, sparse_weights)
class DynamicStitchOpTest(test.TestCase):
@test_util.run_deprecated_v1
def testCint32Cpu(self):
with self.session(use_gpu=False):
indices = [
ops.convert_to_tensor([0, 1, 2]),
ops.convert_to_tensor([2, 3])
]
values = [
ops.convert_to_tensor([12, 23, 34]),
ops.convert_to_tensor([1, 2])
]
self.assertAllEqual(
data_flow_ops.dynamic_stitch(indices, values), [12, 23, 1, 2])
@test_util.run_deprecated_v1
def testCint32Gpu(self):
with self.session():
indices = [
ops.convert_to_tensor([0, 1, 2]),
ops.convert_to_tensor([2, 3])
]
values = [
ops.convert_to_tensor([12, 23, 34]),
ops.convert_to_tensor([1, 2])
]
self.assertAllEqual(
data_flow_ops.dynamic_stitch(indices, values), [12, 23, 1, 2])
@test_util.run_deprecated_v1
def testInt32Cpu(self):
with self.session(use_gpu=False):
indices = [
ops.convert_to_tensor([0, 1, 2]),
ops.convert_to_tensor([2, 3])
]
values = [
ops.convert_to_tensor([12, 23, 34]),
ops.convert_to_tensor([1, 2])
]
self.assertAllEqual(
data_flow_ops.dynamic_stitch(indices, values), [12, 23, 1, 2])
@test_util.run_deprecated_v1
def testInt32Gpu(self):
with self.session():
indices = [
ops.convert_to_tensor([0, 1, 2]),
ops.convert_to_tensor([2, 3])
]
values = [
ops.convert_to_tensor([12, 23, 34]),
ops.convert_to_tensor([1, 2])
]
self.assertAllEqual(
data_flow_ops.dynamic_stitch(indices, values), [12, 23, 1, 2])
@test_util.run_deprecated_v1
def testSumGradArgs(self):
with self.session(use_gpu=False):
indices = [
ops.convert_to_tensor([0, 1, 2, 3]),
ops.convert_to_tensor([2, 3])
]
values = [
ops.convert_to_tensor([2, 3, 5, 7]),
ops.convert_to_tensor([1, 1])
]
self.assertAllEqual(
data_flow_ops.dynamic_stitch(indices, values), [2, 3, 1, 1])
# We expect that the values are merged in order.
@test_util.run_deprecated_v1
def testStitchOrder(self):
with self.cached_session():
indices = []
np_values = []
values = []
for _ in range(10):
indices.extend([ops.convert_to_tensor(np.arange(100).astype(np.int32))])
np_values.extend([np.random.uniform(size=100)])
values.extend([ops.convert_to_tensor(np_values[-1])])
stitched = data_flow_ops.dynamic_stitch(indices, values)
self.assertAllEqual(np_values[-1], stitched)
class ParallelDynamicStitchOpTest(test.TestCase):
@test_util.run_deprecated_v1
def testCint32Cpu(self):
with self.session(use_gpu=False):
indices = [
ops.convert_to_tensor([0, 1, 4, 6]),
ops.convert_to_tensor([2, 3, 5])
]
values = [
ops.convert_to_tensor([12, 23, 34, 45]),
ops.convert_to_tensor([1, 2, 3])
]
self.assertAllEqual(
data_flow_ops.parallel_dynamic_stitch(indices, values),
[12, 23, 1, 2, 34, 3, 45])
@test_util.run_deprecated_v1
def testInt32Cpu(self):
with self.session(use_gpu=False):
indices = [
ops.convert_to_tensor([0, 1, 5, 6, 7]),
ops.convert_to_tensor([2, 4, 3])
]
values = [
ops.convert_to_tensor([12, 23, 34, 45, 56]),
ops.convert_to_tensor([1, 3, 2])
]
self.assertAllEqual(
data_flow_ops.parallel_dynamic_stitch(indices, values),
[12, 23, 1, 2, 3, 34, 45, 56])
@test_util.run_deprecated_v1
def testSimple(self):
with self.session(use_gpu=False):
indices = [ops.convert_to_tensor([0, 1]), ops.convert_to_tensor([2, 3])]
values = [ops.convert_to_tensor([2, 3]), ops.convert_to_tensor([1, 1])]
self.assertAllEqual(
data_flow_ops.parallel_dynamic_stitch(indices, values), [2, 3, 1, 1])
if __name__ == "__main__":
test.main()
| apache-2.0 |
sergio-incaser/bank-payment | __unported__/account_banking/parsers/__init__.py | 14 | 1053 | # -*- encoding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2009 EduSense BV (<http://www.edusense.nl>).
# All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import models
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
trachelr/mne-python | mne/stats/parametric.py | 5 | 12542 | # Authors: Alexandre Gramfort <[email protected]>
# Denis Engemann <[email protected]>
# Eric Larson <[email protected]>
#
# License: Simplified BSD
import numpy as np
from functools import reduce
from string import ascii_uppercase
from ..externals.six import string_types
from ..utils import deprecated
from ..fixes import matrix_rank
# The following function is a rewriting of scipy.stats.f_oneway
# Contrary to the scipy.stats.f_oneway implementation it does not
# copy the data while keeping the inputs unchanged.
def _f_oneway(*args):
"""
Performs a 1-way ANOVA.
The one-way ANOVA tests the null hypothesis that 2 or more groups have
the same population mean. The test is applied to samples from two or
more groups, possibly with differing sizes.
Parameters
----------
sample1, sample2, ... : array_like
The sample measurements should be given as arguments.
Returns
-------
F-value : float
The computed F-value of the test
p-value : float
The associated p-value from the F-distribution
Notes
-----
The ANOVA test has important assumptions that must be satisfied in order
for the associated p-value to be valid.
1. The samples are independent
2. Each sample is from a normally distributed population
3. The population standard deviations of the groups are all equal. This
property is known as homocedasticity.
If these assumptions are not true for a given set of data, it may still be
possible to use the Kruskal-Wallis H-test (`stats.kruskal`_) although with
some loss of power
The algorithm is from Heiman[2], pp.394-7.
See scipy.stats.f_oneway that should give the same results while
being less efficient
References
----------
.. [1] Lowry, Richard. "Concepts and Applications of Inferential
Statistics". Chapter 14.
http://faculty.vassar.edu/lowry/ch14pt1.html
.. [2] Heiman, G.W. Research Methods in Statistics. 2002.
"""
from scipy import stats
sf = stats.f.sf
n_classes = len(args)
n_samples_per_class = np.array([len(a) for a in args])
n_samples = np.sum(n_samples_per_class)
ss_alldata = reduce(lambda x, y: x + y,
[np.sum(a ** 2, axis=0) for a in args])
sums_args = [np.sum(a, axis=0) for a in args]
square_of_sums_alldata = reduce(lambda x, y: x + y, sums_args) ** 2
square_of_sums_args = [s ** 2 for s in sums_args]
sstot = ss_alldata - square_of_sums_alldata / float(n_samples)
ssbn = 0
for k, _ in enumerate(args):
ssbn += square_of_sums_args[k] / n_samples_per_class[k]
ssbn -= square_of_sums_alldata / float(n_samples)
sswn = sstot - ssbn
dfbn = n_classes - 1
dfwn = n_samples - n_classes
msb = ssbn / float(dfbn)
msw = sswn / float(dfwn)
f = msb / msw
prob = sf(dfbn, dfwn, f)
return f, prob
def f_oneway(*args):
"""Call scipy.stats.f_oneway, but return only f-value"""
return _f_oneway(*args)[0]
def _map_effects(n_factors, effects):
"""Map effects to indices"""
if n_factors > len(ascii_uppercase):
raise ValueError('Maximum number of factors supported is 26')
factor_names = list(ascii_uppercase[:n_factors])
if isinstance(effects, string_types):
if '*' in effects and ':' in effects:
raise ValueError('Not "*" and ":" permitted in effects')
elif '+' in effects and ':' in effects:
raise ValueError('Not "+" and ":" permitted in effects')
elif effects == 'all':
effects = None
elif len(effects) == 1 or ':' in effects:
effects = [effects]
elif '+' in effects:
# all main effects
effects = effects.split('+')
elif '*' in effects:
pass # handle later
else:
raise ValueError('"{0}" is not a valid option for "effects"'
.format(effects))
if isinstance(effects, list):
bad_names = [e for e in effects if e not in factor_names]
if len(bad_names) > 1:
raise ValueError('Effect names: {0} are not valid. They should '
'the first `n_factors` ({1}) characters from the'
'alphabet'.format(bad_names, n_factors))
indices = list(np.arange(2 ** n_factors - 1))
names = list()
for this_effect in indices:
contrast_idx = _get_contrast_indices(this_effect + 1, n_factors)
this_code = (n_factors - 1) - np.where(contrast_idx == 1)[0]
this_name = [factor_names[e] for e in this_code]
this_name.sort()
names.append(':'.join(this_name))
if effects is None or isinstance(effects, string_types):
effects_ = names
else:
effects_ = effects
selection = [names.index(sel) for sel in effects_]
names = [names[sel] for sel in selection]
if isinstance(effects, string_types):
if '*' in effects:
# hierarchical order of effects
# the * based effect can be used as stop index
sel_ind = names.index(effects.replace('*', ':')) + 1
names = names[:sel_ind]
selection = selection[:sel_ind]
return selection, names
def _get_contrast_indices(effect_idx, n_factors):
"""Henson's factor coding, see num2binvec"""
binrepr = np.binary_repr(effect_idx, n_factors)
return np.array([int(i) for i in binrepr], dtype=int)
def _iter_contrasts(n_subjects, factor_levels, effect_picks):
""" Aux Function: Setup contrasts """
from scipy.signal import detrend
sc = []
n_factors = len(factor_levels)
# prepare computation of Kronecker products
for n_levels in factor_levels:
# for each factor append
# 1) column vector of length == number of levels,
# 2) square matrix with diagonal == number of levels
# main + interaction effects for contrasts
sc.append([np.ones([n_levels, 1]),
detrend(np.eye(n_levels), type='constant')])
for this_effect in effect_picks:
contrast_idx = _get_contrast_indices(this_effect + 1, n_factors)
c_ = sc[0][contrast_idx[n_factors - 1]]
for i_contrast in range(1, n_factors):
this_contrast = contrast_idx[(n_factors - 1) - i_contrast]
c_ = np.kron(c_, sc[i_contrast][this_contrast])
df1 = matrix_rank(c_)
df2 = df1 * (n_subjects - 1)
yield c_, df1, df2
@deprecated('"f_threshold_twoway_rm" is deprecated and will be removed in'
'MNE-0.11. Please use f_threshold_mway_rm instead')
def f_threshold_twoway_rm(n_subjects, factor_levels, effects='A*B',
pvalue=0.05):
return f_threshold_mway_rm(
n_subjects=n_subjects, factor_levels=factor_levels,
effects=effects, pvalue=pvalue)
def f_threshold_mway_rm(n_subjects, factor_levels, effects='A*B',
pvalue=0.05):
""" Compute f-value thesholds for a two-way ANOVA
Parameters
----------
n_subjects : int
The number of subjects to be analyzed.
factor_levels : list-like
The number of levels per factor.
effects : str
A string denoting the effect to be returned. The following
mapping is currently supported:
'A': main effect of A
'B': main effect of B
'A:B': interaction effect
'A+B': both main effects
'A*B': all three effects
pvalue : float
The p-value to be thresholded.
Returns
-------
f_threshold : list | float
list of f-values for each effect if the number of effects
requested > 2, else float.
See Also
--------
f_oneway
f_mway_rm
Notes
-----
.. versionadded:: 0.10
"""
from scipy.stats import f
effect_picks, _ = _map_effects(len(factor_levels), effects)
f_threshold = []
for _, df1, df2 in _iter_contrasts(n_subjects, factor_levels,
effect_picks):
f_threshold.append(f(df1, df2).isf(pvalue))
return f_threshold if len(f_threshold) > 1 else f_threshold[0]
# The following functions based on MATLAB code by Rik Henson
# and Python code from the pvttble toolbox by Roger Lew.
@deprecated('"f_twoway_rm" is deprecated and will be removed in MNE 0.11."'
" Please use f_mway_rm instead")
def f_twoway_rm(data, factor_levels, effects='A*B', alpha=0.05,
correction=False, return_pvals=True):
"""This function is deprecated, use `f_mway_rm` instead"""
return f_mway_rm(data=data, factor_levels=factor_levels, effects=effects,
alpha=alpha, correction=correction,
return_pvals=return_pvals)
def f_mway_rm(data, factor_levels, effects='all', alpha=0.05,
correction=False, return_pvals=True):
"""M-way repeated measures ANOVA for fully balanced designs
Parameters
----------
data : ndarray
3D array where the first two dimensions are compliant
with a subjects X conditions scheme where the first
factor repeats slowest::
A1B1 A1B2 A2B1 B2B2
subject 1 1.34 2.53 0.97 1.74
subject ... .... .... .... ....
subject k 2.45 7.90 3.09 4.76
The last dimensions is thought to carry the observations
for mass univariate analysis.
factor_levels : list-like
The number of levels per factor.
effects : str | list
A string denoting the effect to be returned. The following
mapping is currently supported (example with 2 factors):
* ``'A'``: main effect of A
* ``'B'``: main effect of B
* ``'A:B'``: interaction effect
* ``'A+B'``: both main effects
* ``'A*B'``: all three effects
* ``'all'``: all effects (equals 'A*B' in a 2 way design)
If list, effect names are used: ``['A', 'B', 'A:B']``.
alpha : float
The significance threshold.
correction : bool
The correction method to be employed if one factor has more than two
levels. If True, sphericity correction using the Greenhouse-Geisser
method will be applied.
return_pvals : bool
If True, return p values corresponding to f values.
Returns
-------
f_vals : ndarray
An array of f values with length corresponding to the number
of effects estimated. The shape depends on the number of effects
estimated.
p_vals : ndarray
If not requested via return_pvals, defaults to an empty array.
See Also
--------
f_oneway
f_threshold_mway_rm
Notes
-----
.. versionadded:: 0.10
"""
from scipy.stats import f
if data.ndim == 2: # general purpose support, e.g. behavioural data
data = data[:, :, np.newaxis]
elif data.ndim > 3: # let's allow for some magic here.
data = data.reshape(
data.shape[0], data.shape[1], np.prod(data.shape[2:]))
effect_picks, _ = _map_effects(len(factor_levels), effects)
n_obs = data.shape[2]
n_replications = data.shape[0]
# pute last axis in fornt to 'iterate' over mass univariate instances.
data = np.rollaxis(data, 2)
fvalues, pvalues = [], []
for c_, df1, df2 in _iter_contrasts(n_replications, factor_levels,
effect_picks):
y = np.dot(data, c_)
b = np.mean(y, axis=1)[:, np.newaxis, :]
ss = np.sum(np.sum(y * b, axis=2), axis=1)
mse = (np.sum(np.sum(y * y, axis=2), axis=1) - ss) / (df2 / df1)
fvals = ss / mse
fvalues.append(fvals)
if correction:
# sample covariances, leave off "/ (y.shape[1] - 1)" norm because
# it falls out.
v = np.array([np.dot(y_.T, y_) for y_ in y])
v = (np.array([np.trace(vv) for vv in v]) ** 2 /
(df1 * np.sum(np.sum(v * v, axis=2), axis=1)))
eps = v
df1, df2 = np.zeros(n_obs) + df1, np.zeros(n_obs) + df2
if correction:
df1, df2 = [d[None, :] * eps for d in (df1, df2)]
if return_pvals:
pvals = f(df1, df2).sf(fvals)
else:
pvals = np.empty(0)
pvalues.append(pvals)
# handle single effect returns
return [np.squeeze(np.asarray(vv)) for vv in (fvalues, pvalues)]
| bsd-3-clause |
stainsteelcrown/nonsense-story-generator | venv/lib/python2.7/site-packages/werkzeug/urls.py | 146 | 33150 | # -*- coding: utf-8 -*-
"""
werkzeug.urls
~~~~~~~~~~~~~
This module implements various URL related functions.
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import re
from werkzeug._compat import text_type, PY2, to_unicode, \
to_native, implements_to_string, try_coerce_native, \
normalize_string_tuple, make_literal_wrapper, \
fix_tuple_repr
from werkzeug._internal import _encode_idna, _decode_idna
from werkzeug.datastructures import MultiDict, iter_multi_items
from collections import namedtuple
# A regular expression for what a valid schema looks like
_scheme_re = re.compile(r'^[a-zA-Z0-9+-.]+$')
# Characters that are safe in any part of an URL.
_always_safe = (b'abcdefghijklmnopqrstuvwxyz'
b'ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_.-+')
_hexdigits = '0123456789ABCDEFabcdef'
_hextobyte = dict(
((a + b).encode(), int(a + b, 16))
for a in _hexdigits for b in _hexdigits
)
_URLTuple = fix_tuple_repr(namedtuple('_URLTuple',
['scheme', 'netloc', 'path', 'query', 'fragment']))
class _URLMixin(object):
__slots__ = ()
def replace(self, **kwargs):
"""Return an URL with the same values, except for those parameters
given new values by whichever keyword arguments are specified."""
return self._replace(**kwargs)
@property
def host(self):
"""The host part of the URL if available, otherwise `None`. The
host is either the hostname or the IP address mentioned in the
URL. It will not contain the port.
"""
return self._split_host()[0]
@property
def ascii_host(self):
"""Works exactly like :attr:`host` but will return a result that
is restricted to ASCII. If it finds a netloc that is not ASCII
it will attempt to idna decode it. This is useful for socket
operations when the URL might include internationalized characters.
"""
rv = self.host
if rv is not None and isinstance(rv, text_type):
rv = _encode_idna(rv)
return to_native(rv, 'ascii', 'ignore')
@property
def port(self):
"""The port in the URL as an integer if it was present, `None`
otherwise. This does not fill in default ports.
"""
try:
rv = int(to_native(self._split_host()[1]))
if 0 <= rv <= 65535:
return rv
except (ValueError, TypeError):
pass
@property
def auth(self):
"""The authentication part in the URL if available, `None`
otherwise.
"""
return self._split_netloc()[0]
@property
def username(self):
"""The username if it was part of the URL, `None` otherwise.
This undergoes URL decoding and will always be a unicode string.
"""
rv = self._split_auth()[0]
if rv is not None:
return _url_unquote_legacy(rv)
@property
def raw_username(self):
"""The username if it was part of the URL, `None` otherwise.
Unlike :attr:`username` this one is not being decoded.
"""
return self._split_auth()[0]
@property
def password(self):
"""The password if it was part of the URL, `None` otherwise.
This undergoes URL decoding and will always be a unicode string.
"""
rv = self._split_auth()[1]
if rv is not None:
return _url_unquote_legacy(rv)
@property
def raw_password(self):
"""The password if it was part of the URL, `None` otherwise.
Unlike :attr:`password` this one is not being decoded.
"""
return self._split_auth()[1]
def decode_query(self, *args, **kwargs):
"""Decodes the query part of the URL. Ths is a shortcut for
calling :func:`url_decode` on the query argument. The arguments and
keyword arguments are forwarded to :func:`url_decode` unchanged.
"""
return url_decode(self.query, *args, **kwargs)
def join(self, *args, **kwargs):
"""Joins this URL with another one. This is just a convenience
function for calling into :meth:`url_join` and then parsing the
return value again.
"""
return url_parse(url_join(self, *args, **kwargs))
def to_url(self):
"""Returns a URL string or bytes depending on the type of the
information stored. This is just a convenience function
for calling :meth:`url_unparse` for this URL.
"""
return url_unparse(self)
def decode_netloc(self):
"""Decodes the netloc part into a string."""
rv = _decode_idna(self.host or '')
if ':' in rv:
rv = '[%s]' % rv
port = self.port
if port is not None:
rv = '%s:%d' % (rv, port)
auth = ':'.join(filter(None, [
_url_unquote_legacy(self.raw_username or '', '/:%@'),
_url_unquote_legacy(self.raw_password or '', '/:%@'),
]))
if auth:
rv = '%s@%s' % (auth, rv)
return rv
def to_uri_tuple(self):
"""Returns a :class:`BytesURL` tuple that holds a URI. This will
encode all the information in the URL properly to ASCII using the
rules a web browser would follow.
It's usually more interesting to directly call :meth:`iri_to_uri` which
will return a string.
"""
return url_parse(iri_to_uri(self).encode('ascii'))
def to_iri_tuple(self):
"""Returns a :class:`URL` tuple that holds a IRI. This will try
to decode as much information as possible in the URL without
losing information similar to how a web browser does it for the
URL bar.
It's usually more interesting to directly call :meth:`uri_to_iri` which
will return a string.
"""
return url_parse(uri_to_iri(self))
def _split_netloc(self):
if self._at in self.netloc:
return self.netloc.split(self._at, 1)
return None, self.netloc
def _split_auth(self):
auth = self._split_netloc()[0]
if not auth:
return None, None
if self._colon not in auth:
return auth, None
return auth.split(self._colon, 1)
def _split_host(self):
rv = self._split_netloc()[1]
if not rv:
return None, None
if not rv.startswith(self._lbracket):
if self._colon in rv:
return rv.split(self._colon, 1)
return rv, None
idx = rv.find(self._rbracket)
if idx < 0:
return rv, None
host = rv[1:idx]
rest = rv[idx + 1:]
if rest.startswith(self._colon):
return host, rest[1:]
return host, None
@implements_to_string
class URL(_URLTuple, _URLMixin):
"""Represents a parsed URL. This behaves like a regular tuple but
also has some extra attributes that give further insight into the
URL.
"""
__slots__ = ()
_at = '@'
_colon = ':'
_lbracket = '['
_rbracket = ']'
def __str__(self):
return self.to_url()
def encode_netloc(self):
"""Encodes the netloc part to an ASCII safe URL as bytes."""
rv = self.ascii_host or ''
if ':' in rv:
rv = '[%s]' % rv
port = self.port
if port is not None:
rv = '%s:%d' % (rv, port)
auth = ':'.join(filter(None, [
url_quote(self.raw_username or '', 'utf-8', 'strict', '/:%'),
url_quote(self.raw_password or '', 'utf-8', 'strict', '/:%'),
]))
if auth:
rv = '%s@%s' % (auth, rv)
return rv.encode('ascii')
def encode(self, charset='utf-8', errors='replace'):
"""Encodes the URL to a tuple made out of bytes. The charset is
only being used for the path, query and fragment.
"""
return BytesURL(
self.scheme.encode('ascii'),
self.encode_netloc(),
self.path.encode(charset, errors),
self.query.encode(charset, errors),
self.fragment.encode(charset, errors)
)
class BytesURL(_URLTuple, _URLMixin):
"""Represents a parsed URL in bytes."""
__slots__ = ()
_at = b'@'
_colon = b':'
_lbracket = b'['
_rbracket = b']'
def __str__(self):
return self.to_url().decode('utf-8', 'replace')
def encode_netloc(self):
"""Returns the netloc unchanged as bytes."""
return self.netloc
def decode(self, charset='utf-8', errors='replace'):
"""Decodes the URL to a tuple made out of strings. The charset is
only being used for the path, query and fragment.
"""
return URL(
self.scheme.decode('ascii'),
self.decode_netloc(),
self.path.decode(charset, errors),
self.query.decode(charset, errors),
self.fragment.decode(charset, errors)
)
def _unquote_to_bytes(string, unsafe=''):
if isinstance(string, text_type):
string = string.encode('utf-8')
if isinstance(unsafe, text_type):
unsafe = unsafe.encode('utf-8')
unsafe = frozenset(bytearray(unsafe))
bits = iter(string.split(b'%'))
result = bytearray(next(bits, b''))
for item in bits:
try:
char = _hextobyte[item[:2]]
if char in unsafe:
raise KeyError()
result.append(char)
result.extend(item[2:])
except KeyError:
result.extend(b'%')
result.extend(item)
return bytes(result)
def _url_encode_impl(obj, charset, encode_keys, sort, key):
iterable = iter_multi_items(obj)
if sort:
iterable = sorted(iterable, key=key)
for key, value in iterable:
if value is None:
continue
if not isinstance(key, bytes):
key = text_type(key).encode(charset)
if not isinstance(value, bytes):
value = text_type(value).encode(charset)
yield url_quote_plus(key) + '=' + url_quote_plus(value)
def _url_unquote_legacy(value, unsafe=''):
try:
return url_unquote(value, charset='utf-8',
errors='strict', unsafe=unsafe)
except UnicodeError:
return url_unquote(value, charset='latin1', unsafe=unsafe)
def url_parse(url, scheme=None, allow_fragments=True):
"""Parses a URL from a string into a :class:`URL` tuple. If the URL
is lacking a scheme it can be provided as second argument. Otherwise,
it is ignored. Optionally fragments can be stripped from the URL
by setting `allow_fragments` to `False`.
The inverse of this function is :func:`url_unparse`.
:param url: the URL to parse.
:param scheme: the default schema to use if the URL is schemaless.
:param allow_fragments: if set to `False` a fragment will be removed
from the URL.
"""
s = make_literal_wrapper(url)
is_text_based = isinstance(url, text_type)
if scheme is None:
scheme = s('')
netloc = query = fragment = s('')
i = url.find(s(':'))
if i > 0 and _scheme_re.match(to_native(url[:i], errors='replace')):
# make sure "iri" is not actually a port number (in which case
# "scheme" is really part of the path)
rest = url[i + 1:]
if not rest or any(c not in s('0123456789') for c in rest):
# not a port number
scheme, url = url[:i].lower(), rest
if url[:2] == s('//'):
delim = len(url)
for c in s('/?#'):
wdelim = url.find(c, 2)
if wdelim >= 0:
delim = min(delim, wdelim)
netloc, url = url[2:delim], url[delim:]
if (s('[') in netloc and s(']') not in netloc) or \
(s(']') in netloc and s('[') not in netloc):
raise ValueError('Invalid IPv6 URL')
if allow_fragments and s('#') in url:
url, fragment = url.split(s('#'), 1)
if s('?') in url:
url, query = url.split(s('?'), 1)
result_type = is_text_based and URL or BytesURL
return result_type(scheme, netloc, url, query, fragment)
def url_quote(string, charset='utf-8', errors='strict', safe='/:', unsafe=''):
"""URL encode a single string with a given encoding.
:param s: the string to quote.
:param charset: the charset to be used.
:param safe: an optional sequence of safe characters.
:param unsafe: an optional sequence of unsafe characters.
.. versionadded:: 0.9.2
The `unsafe` parameter was added.
"""
if not isinstance(string, (text_type, bytes, bytearray)):
string = text_type(string)
if isinstance(string, text_type):
string = string.encode(charset, errors)
if isinstance(safe, text_type):
safe = safe.encode(charset, errors)
if isinstance(unsafe, text_type):
unsafe = unsafe.encode(charset, errors)
safe = frozenset(bytearray(safe) + _always_safe) - frozenset(bytearray(unsafe))
rv = bytearray()
for char in bytearray(string):
if char in safe:
rv.append(char)
else:
rv.extend(('%%%02X' % char).encode('ascii'))
return to_native(bytes(rv))
def url_quote_plus(string, charset='utf-8', errors='strict', safe=''):
"""URL encode a single string with the given encoding and convert
whitespace to "+".
:param s: The string to quote.
:param charset: The charset to be used.
:param safe: An optional sequence of safe characters.
"""
return url_quote(string, charset, errors, safe + ' ', '+').replace(' ', '+')
def url_unparse(components):
"""The reverse operation to :meth:`url_parse`. This accepts arbitrary
as well as :class:`URL` tuples and returns a URL as a string.
:param components: the parsed URL as tuple which should be converted
into a URL string.
"""
scheme, netloc, path, query, fragment = \
normalize_string_tuple(components)
s = make_literal_wrapper(scheme)
url = s('')
# We generally treat file:///x and file:/x the same which is also
# what browsers seem to do. This also allows us to ignore a schema
# register for netloc utilization or having to differenciate between
# empty and missing netloc.
if netloc or (scheme and path.startswith(s('/'))):
if path and path[:1] != s('/'):
path = s('/') + path
url = s('//') + (netloc or s('')) + path
elif path:
url += path
if scheme:
url = scheme + s(':') + url
if query:
url = url + s('?') + query
if fragment:
url = url + s('#') + fragment
return url
def url_unquote(string, charset='utf-8', errors='replace', unsafe=''):
"""URL decode a single string with a given encoding. If the charset
is set to `None` no unicode decoding is performed and raw bytes
are returned.
:param s: the string to unquote.
:param charset: the charset of the query string. If set to `None`
no unicode decoding will take place.
:param errors: the error handling for the charset decoding.
"""
rv = _unquote_to_bytes(string, unsafe)
if charset is not None:
rv = rv.decode(charset, errors)
return rv
def url_unquote_plus(s, charset='utf-8', errors='replace'):
"""URL decode a single string with the given `charset` and decode "+" to
whitespace.
Per default encoding errors are ignored. If you want a different behavior
you can set `errors` to ``'replace'`` or ``'strict'``. In strict mode a
:exc:`HTTPUnicodeError` is raised.
:param s: The string to unquote.
:param charset: the charset of the query string. If set to `None`
no unicode decoding will take place.
:param errors: The error handling for the `charset` decoding.
"""
if isinstance(s, text_type):
s = s.replace(u'+', u' ')
else:
s = s.replace(b'+', b' ')
return url_unquote(s, charset, errors)
def url_fix(s, charset='utf-8'):
r"""Sometimes you get an URL by a user that just isn't a real URL because
it contains unsafe characters like ' ' and so on. This function can fix
some of the problems in a similar way browsers handle data entered by the
user:
>>> url_fix(u'http://de.wikipedia.org/wiki/Elf (Begriffskl\xe4rung)')
'http://de.wikipedia.org/wiki/Elf%20(Begriffskl%C3%A4rung)'
:param s: the string with the URL to fix.
:param charset: The target charset for the URL if the url was given as
unicode string.
"""
scheme, netloc, path, qs, anchor = url_parse(to_unicode(s, charset, 'replace'))
path = url_quote(path, charset, safe='/%+$!*\'(),')
qs = url_quote_plus(qs, charset, safe=':&%=+$!*\'(),')
return to_native(url_unparse((scheme, netloc, path, qs, anchor)))
def uri_to_iri(uri, charset='utf-8', errors='replace'):
r"""
Converts a URI in a given charset to a IRI.
Examples for URI versus IRI:
>>> uri_to_iri(b'http://xn--n3h.net/')
u'http://\u2603.net/'
>>> uri_to_iri(b'http://%C3%BCser:p%C3%[email protected]/p%C3%A5th')
u'http://\xfcser:p\xe4ssword@\u2603.net/p\xe5th'
Query strings are left unchanged:
>>> uri_to_iri('/?foo=24&x=%26%2f')
u'/?foo=24&x=%26%2f'
.. versionadded:: 0.6
:param uri: The URI to convert.
:param charset: The charset of the URI.
:param errors: The error handling on decode.
"""
if isinstance(uri, tuple):
uri = url_unparse(uri)
uri = url_parse(to_unicode(uri, charset))
path = url_unquote(uri.path, charset, errors, '%/;?')
query = url_unquote(uri.query, charset, errors, '%;/?:@&=+,$')
fragment = url_unquote(uri.fragment, charset, errors, '%;/?:@&=+,$')
return url_unparse((uri.scheme, uri.decode_netloc(),
path, query, fragment))
def iri_to_uri(iri, charset='utf-8', errors='strict', safe_conversion=False):
r"""
Converts any unicode based IRI to an acceptable ASCII URI. Werkzeug always
uses utf-8 URLs internally because this is what browsers and HTTP do as
well. In some places where it accepts an URL it also accepts a unicode IRI
and converts it into a URI.
Examples for IRI versus URI:
>>> iri_to_uri(u'http://☃.net/')
'http://xn--n3h.net/'
>>> iri_to_uri(u'http://üser:pässword@☃.net/påth')
'http://%C3%BCser:p%C3%[email protected]/p%C3%A5th'
There is a general problem with IRI and URI conversion with some
protocols that appear in the wild that are in violation of the URI
specification. In places where Werkzeug goes through a forced IRI to
URI conversion it will set the `safe_conversion` flag which will
not perform a conversion if the end result is already ASCII. This
can mean that the return value is not an entirely correct URI but
it will not destroy such invalid URLs in the process.
As an example consider the following two IRIs::
magnet:?xt=uri:whatever
itms-services://?action=download-manifest
The internal representation after parsing of those URLs is the same
and there is no way to reconstruct the original one. If safe
conversion is enabled however this function becomes a noop for both of
those strings as they both can be considered URIs.
.. versionadded:: 0.6
.. versionchanged:: 0.9.6
The `safe_conversion` parameter was added.
:param iri: The IRI to convert.
:param charset: The charset for the URI.
:param safe_conversion: indicates if a safe conversion should take place.
For more information see the explanation above.
"""
if isinstance(iri, tuple):
iri = url_unparse(iri)
if safe_conversion:
try:
native_iri = to_native(iri)
ascii_iri = to_native(iri).encode('ascii')
if ascii_iri.split() == [ascii_iri]:
return native_iri
except UnicodeError:
pass
iri = url_parse(to_unicode(iri, charset, errors))
netloc = iri.encode_netloc().decode('ascii')
path = url_quote(iri.path, charset, errors, '/:~+%')
query = url_quote(iri.query, charset, errors, '%&[]:;$*()+,!?*/=')
fragment = url_quote(iri.fragment, charset, errors, '=%&[]:;$()+,!?*/')
return to_native(url_unparse((iri.scheme, netloc,
path, query, fragment)))
def url_decode(s, charset='utf-8', decode_keys=False, include_empty=True,
errors='replace', separator='&', cls=None):
"""
Parse a querystring and return it as :class:`MultiDict`. There is a
difference in key decoding on different Python versions. On Python 3
keys will always be fully decoded whereas on Python 2, keys will
remain bytestrings if they fit into ASCII. On 2.x keys can be forced
to be unicode by setting `decode_keys` to `True`.
If the charset is set to `None` no unicode decoding will happen and
raw bytes will be returned.
Per default a missing value for a key will default to an empty key. If
you don't want that behavior you can set `include_empty` to `False`.
Per default encoding errors are ignored. If you want a different behavior
you can set `errors` to ``'replace'`` or ``'strict'``. In strict mode a
`HTTPUnicodeError` is raised.
.. versionchanged:: 0.5
In previous versions ";" and "&" could be used for url decoding.
This changed in 0.5 where only "&" is supported. If you want to
use ";" instead a different `separator` can be provided.
The `cls` parameter was added.
:param s: a string with the query string to decode.
:param charset: the charset of the query string. If set to `None`
no unicode decoding will take place.
:param decode_keys: Used on Python 2.x to control whether keys should
be forced to be unicode objects. If set to `True`
then keys will be unicode in all cases. Otherwise,
they remain `str` if they fit into ASCII.
:param include_empty: Set to `False` if you don't want empty values to
appear in the dict.
:param errors: the decoding error behavior.
:param separator: the pair separator to be used, defaults to ``&``
:param cls: an optional dict class to use. If this is not specified
or `None` the default :class:`MultiDict` is used.
"""
if cls is None:
cls = MultiDict
if isinstance(s, text_type) and not isinstance(separator, text_type):
separator = separator.decode(charset or 'ascii')
elif isinstance(s, bytes) and not isinstance(separator, bytes):
separator = separator.encode(charset or 'ascii')
return cls(_url_decode_impl(s.split(separator), charset, decode_keys,
include_empty, errors))
def url_decode_stream(stream, charset='utf-8', decode_keys=False,
include_empty=True, errors='replace', separator='&',
cls=None, limit=None, return_iterator=False):
"""Works like :func:`url_decode` but decodes a stream. The behavior
of stream and limit follows functions like
:func:`~werkzeug.wsgi.make_line_iter`. The generator of pairs is
directly fed to the `cls` so you can consume the data while it's
parsed.
.. versionadded:: 0.8
:param stream: a stream with the encoded querystring
:param charset: the charset of the query string. If set to `None`
no unicode decoding will take place.
:param decode_keys: Used on Python 2.x to control whether keys should
be forced to be unicode objects. If set to `True`,
keys will be unicode in all cases. Otherwise, they
remain `str` if they fit into ASCII.
:param include_empty: Set to `False` if you don't want empty values to
appear in the dict.
:param errors: the decoding error behavior.
:param separator: the pair separator to be used, defaults to ``&``
:param cls: an optional dict class to use. If this is not specified
or `None` the default :class:`MultiDict` is used.
:param limit: the content length of the URL data. Not necessary if
a limited stream is provided.
:param return_iterator: if set to `True` the `cls` argument is ignored
and an iterator over all decoded pairs is
returned
"""
from werkzeug.wsgi import make_chunk_iter
if return_iterator:
cls = lambda x: x
elif cls is None:
cls = MultiDict
pair_iter = make_chunk_iter(stream, separator, limit)
return cls(_url_decode_impl(pair_iter, charset, decode_keys,
include_empty, errors))
def _url_decode_impl(pair_iter, charset, decode_keys, include_empty, errors):
for pair in pair_iter:
if not pair:
continue
s = make_literal_wrapper(pair)
equal = s('=')
if equal in pair:
key, value = pair.split(equal, 1)
else:
if not include_empty:
continue
key = pair
value = s('')
key = url_unquote_plus(key, charset, errors)
if charset is not None and PY2 and not decode_keys:
key = try_coerce_native(key)
yield key, url_unquote_plus(value, charset, errors)
def url_encode(obj, charset='utf-8', encode_keys=False, sort=False, key=None,
separator=b'&'):
"""URL encode a dict/`MultiDict`. If a value is `None` it will not appear
in the result string. Per default only values are encoded into the target
charset strings. If `encode_keys` is set to ``True`` unicode keys are
supported too.
If `sort` is set to `True` the items are sorted by `key` or the default
sorting algorithm.
.. versionadded:: 0.5
`sort`, `key`, and `separator` were added.
:param obj: the object to encode into a query string.
:param charset: the charset of the query string.
:param encode_keys: set to `True` if you have unicode keys. (Ignored on
Python 3.x)
:param sort: set to `True` if you want parameters to be sorted by `key`.
:param separator: the separator to be used for the pairs.
:param key: an optional function to be used for sorting. For more details
check out the :func:`sorted` documentation.
"""
separator = to_native(separator, 'ascii')
return separator.join(_url_encode_impl(obj, charset, encode_keys, sort, key))
def url_encode_stream(obj, stream=None, charset='utf-8', encode_keys=False,
sort=False, key=None, separator=b'&'):
"""Like :meth:`url_encode` but writes the results to a stream
object. If the stream is `None` a generator over all encoded
pairs is returned.
.. versionadded:: 0.8
:param obj: the object to encode into a query string.
:param stream: a stream to write the encoded object into or `None` if
an iterator over the encoded pairs should be returned. In
that case the separator argument is ignored.
:param charset: the charset of the query string.
:param encode_keys: set to `True` if you have unicode keys. (Ignored on
Python 3.x)
:param sort: set to `True` if you want parameters to be sorted by `key`.
:param separator: the separator to be used for the pairs.
:param key: an optional function to be used for sorting. For more details
check out the :func:`sorted` documentation.
"""
separator = to_native(separator, 'ascii')
gen = _url_encode_impl(obj, charset, encode_keys, sort, key)
if stream is None:
return gen
for idx, chunk in enumerate(gen):
if idx:
stream.write(separator)
stream.write(chunk)
def url_join(base, url, allow_fragments=True):
"""Join a base URL and a possibly relative URL to form an absolute
interpretation of the latter.
:param base: the base URL for the join operation.
:param url: the URL to join.
:param allow_fragments: indicates whether fragments should be allowed.
"""
if isinstance(base, tuple):
base = url_unparse(base)
if isinstance(url, tuple):
url = url_unparse(url)
base, url = normalize_string_tuple((base, url))
s = make_literal_wrapper(base)
if not base:
return url
if not url:
return base
bscheme, bnetloc, bpath, bquery, bfragment = \
url_parse(base, allow_fragments=allow_fragments)
scheme, netloc, path, query, fragment = \
url_parse(url, bscheme, allow_fragments)
if scheme != bscheme:
return url
if netloc:
return url_unparse((scheme, netloc, path, query, fragment))
netloc = bnetloc
if path[:1] == s('/'):
segments = path.split(s('/'))
elif not path:
segments = bpath.split(s('/'))
if not query:
query = bquery
else:
segments = bpath.split(s('/'))[:-1] + path.split(s('/'))
# If the rightmost part is "./" we want to keep the slash but
# remove the dot.
if segments[-1] == s('.'):
segments[-1] = s('')
# Resolve ".." and "."
segments = [segment for segment in segments if segment != s('.')]
while 1:
i = 1
n = len(segments) - 1
while i < n:
if segments[i] == s('..') and \
segments[i - 1] not in (s(''), s('..')):
del segments[i - 1:i + 1]
break
i += 1
else:
break
# Remove trailing ".." if the URL is absolute
unwanted_marker = [s(''), s('..')]
while segments[:2] == unwanted_marker:
del segments[1]
path = s('/').join(segments)
return url_unparse((scheme, netloc, path, query, fragment))
class Href(object):
"""Implements a callable that constructs URLs with the given base. The
function can be called with any number of positional and keyword
arguments which than are used to assemble the URL. Works with URLs
and posix paths.
Positional arguments are appended as individual segments to
the path of the URL:
>>> href = Href('/foo')
>>> href('bar', 23)
'/foo/bar/23'
>>> href('foo', bar=23)
'/foo/foo?bar=23'
If any of the arguments (positional or keyword) evaluates to `None` it
will be skipped. If no keyword arguments are given the last argument
can be a :class:`dict` or :class:`MultiDict` (or any other dict subclass),
otherwise the keyword arguments are used for the query parameters, cutting
off the first trailing underscore of the parameter name:
>>> href(is_=42)
'/foo?is=42'
>>> href({'foo': 'bar'})
'/foo?foo=bar'
Combining of both methods is not allowed:
>>> href({'foo': 'bar'}, bar=42)
Traceback (most recent call last):
...
TypeError: keyword arguments and query-dicts can't be combined
Accessing attributes on the href object creates a new href object with
the attribute name as prefix:
>>> bar_href = href.bar
>>> bar_href("blub")
'/foo/bar/blub'
If `sort` is set to `True` the items are sorted by `key` or the default
sorting algorithm:
>>> href = Href("/", sort=True)
>>> href(a=1, b=2, c=3)
'/?a=1&b=2&c=3'
.. versionadded:: 0.5
`sort` and `key` were added.
"""
def __init__(self, base='./', charset='utf-8', sort=False, key=None):
if not base:
base = './'
self.base = base
self.charset = charset
self.sort = sort
self.key = key
def __getattr__(self, name):
if name[:2] == '__':
raise AttributeError(name)
base = self.base
if base[-1:] != '/':
base += '/'
return Href(url_join(base, name), self.charset, self.sort, self.key)
def __call__(self, *path, **query):
if path and isinstance(path[-1], dict):
if query:
raise TypeError('keyword arguments and query-dicts '
'can\'t be combined')
query, path = path[-1], path[:-1]
elif query:
query = dict([(k.endswith('_') and k[:-1] or k, v)
for k, v in query.items()])
path = '/'.join([to_unicode(url_quote(x, self.charset), 'ascii')
for x in path if x is not None]).lstrip('/')
rv = self.base
if path:
if not rv.endswith('/'):
rv += '/'
rv = url_join(rv, './' + path)
if query:
rv += '?' + to_unicode(url_encode(query, self.charset, sort=self.sort,
key=self.key), 'ascii')
return to_native(rv)
| mit |
techvoltage/capstone | bindings/python/test.py | 33 | 5021 | #!/usr/bin/env python
# Capstone Python bindings, by Nguyen Anh Quynnh <[email protected]>
from __future__ import print_function
from capstone import *
import binascii
import sys
from xprint import to_hex, to_x, to_x_32
_python3 = sys.version_info.major == 3
X86_CODE16 = b"\x8d\x4c\x32\x08\x01\xd8\x81\xc6\x34\x12\x00\x00"
X86_CODE32 = b"\x8d\x4c\x32\x08\x01\xd8\x81\xc6\x34\x12\x00\x00"
X86_CODE64 = b"\x55\x48\x8b\x05\xb8\x13\x00\x00"
ARM_CODE = b"\xED\xFF\xFF\xEB\x04\xe0\x2d\xe5\x00\x00\x00\x00\xe0\x83\x22\xe5\xf1\x02\x03\x0e\x00\x00\xa0\xe3\x02\x30\xc1\xe7\x00\x00\x53\xe3"
ARM_CODE2 = b"\x10\xf1\x10\xe7\x11\xf2\x31\xe7\xdc\xa1\x2e\xf3\xe8\x4e\x62\xf3"
THUMB_CODE = b"\x70\x47\xeb\x46\x83\xb0\xc9\x68"
THUMB_CODE2 = b"\x4f\xf0\x00\x01\xbd\xe8\x00\x88\xd1\xe8\x00\xf0"
THUMB_MCLASS = b"\xef\xf3\x02\x80"
ARMV8 = b"\xe0\x3b\xb2\xee\x42\x00\x01\xe1\x51\xf0\x7f\xf5"
MIPS_CODE = b"\x0C\x10\x00\x97\x00\x00\x00\x00\x24\x02\x00\x0c\x8f\xa2\x00\x00\x34\x21\x34\x56"
MIPS_CODE2 = b"\x56\x34\x21\x34\xc2\x17\x01\x00"
MIPS_32R6M = b"\x00\x07\x00\x07\x00\x11\x93\x7c\x01\x8c\x8b\x7c\x00\xc7\x48\xd0"
MIPS_32R6 = b"\xec\x80\x00\x19\x7c\x43\x22\xa0"
ARM64_CODE = b"\x21\x7c\x02\x9b\x21\x7c\x00\x53\x00\x40\x21\x4b\xe1\x0b\x40\xb9"
PPC_CODE = b"\x80\x20\x00\x00\x80\x3f\x00\x00\x10\x43\x23\x0e\xd0\x44\x00\x80\x4c\x43\x22\x02\x2d\x03\x00\x80\x7c\x43\x20\x14\x7c\x43\x20\x93\x4f\x20\x00\x21\x4c\xc8\x00\x21"
SPARC_CODE = b"\x80\xa0\x40\x02\x85\xc2\x60\x08\x85\xe8\x20\x01\x81\xe8\x00\x00\x90\x10\x20\x01\xd5\xf6\x10\x16\x21\x00\x00\x0a\x86\x00\x40\x02\x01\x00\x00\x00\x12\xbf\xff\xff\x10\xbf\xff\xff\xa0\x02\x00\x09\x0d\xbf\xff\xff\xd4\x20\x60\x00\xd4\x4e\x00\x16\x2a\xc2\x80\x03"
SPARCV9_CODE = b"\x81\xa8\x0a\x24\x89\xa0\x10\x20\x89\xa0\x1a\x60\x89\xa0\x00\xe0"
SYSZ_CODE = b"\xed\x00\x00\x00\x00\x1a\x5a\x0f\x1f\xff\xc2\x09\x80\x00\x00\x00\x07\xf7\xeb\x2a\xff\xff\x7f\x57\xe3\x01\xff\xff\x7f\x57\xeb\x00\xf0\x00\x00\x24\xb2\x4f\x00\x78"
XCORE_CODE = b"\xfe\x0f\xfe\x17\x13\x17\xc6\xfe\xec\x17\x97\xf8\xec\x4f\x1f\xfd\xec\x37\x07\xf2\x45\x5b\xf9\xfa\x02\x06\x1b\x10"
all_tests = (
(CS_ARCH_X86, CS_MODE_16, X86_CODE16, "X86 16bit (Intel syntax)", 0),
(CS_ARCH_X86, CS_MODE_32, X86_CODE32, "X86 32bit (ATT syntax)", CS_OPT_SYNTAX_ATT),
(CS_ARCH_X86, CS_MODE_32, X86_CODE32, "X86 32 (Intel syntax)", 0),
(CS_ARCH_X86, CS_MODE_64, X86_CODE64, "X86 64 (Intel syntax)", 0),
(CS_ARCH_ARM, CS_MODE_ARM, ARM_CODE, "ARM", 0),
(CS_ARCH_ARM, CS_MODE_THUMB, THUMB_CODE2, "THUMB-2", 0),
(CS_ARCH_ARM, CS_MODE_ARM, ARM_CODE2, "ARM: Cortex-A15 + NEON", 0),
(CS_ARCH_ARM, CS_MODE_THUMB, THUMB_CODE, "THUMB", 0),
(CS_ARCH_ARM, CS_MODE_THUMB + CS_MODE_MCLASS, THUMB_MCLASS, "Thumb-MClass", 0),
(CS_ARCH_ARM, CS_MODE_ARM + CS_MODE_V8, ARMV8, "Arm-V8", 0),
(CS_ARCH_MIPS, CS_MODE_MIPS32 + CS_MODE_BIG_ENDIAN, MIPS_CODE, "MIPS-32 (Big-endian)", 0),
(CS_ARCH_MIPS, CS_MODE_MIPS64 + CS_MODE_LITTLE_ENDIAN, MIPS_CODE2, "MIPS-64-EL (Little-endian)", 0),
(CS_ARCH_MIPS, CS_MODE_MIPS32R6 + CS_MODE_MICRO + CS_MODE_BIG_ENDIAN, MIPS_32R6M, "MIPS-32R6 | Micro (Big-endian)", 0),
(CS_ARCH_MIPS, CS_MODE_MIPS32R6 + CS_MODE_BIG_ENDIAN, MIPS_32R6, "MIPS-32R6 (Big-endian)", 0),
(CS_ARCH_ARM64, CS_MODE_ARM, ARM64_CODE, "ARM-64", 0),
(CS_ARCH_PPC, CS_MODE_BIG_ENDIAN, PPC_CODE, "PPC-64", 0),
(CS_ARCH_PPC, CS_MODE_BIG_ENDIAN, PPC_CODE, "PPC-64, print register with number only", CS_OPT_SYNTAX_NOREGNAME),
(CS_ARCH_SPARC, CS_MODE_BIG_ENDIAN, SPARC_CODE, "Sparc", 0),
(CS_ARCH_SPARC, CS_MODE_BIG_ENDIAN + CS_MODE_V9, SPARCV9_CODE, "SparcV9", 0),
(CS_ARCH_SYSZ, 0, SYSZ_CODE, "SystemZ", 0),
(CS_ARCH_XCORE, 0, XCORE_CODE, "XCore", 0),
)
# ## Test cs_disasm_quick()
def test_cs_disasm_quick():
for arch, mode, code, comment, syntax in all_tests:
print('*' * 40)
print("Platform: %s" % comment)
print("Disasm:"),
print(to_hex(code))
for insn in cs_disasm_quick(arch, mode, code, 0x1000):
print("0x%x:\t%s\t%s" % (insn.address, insn.mnemonic, insn.op_str))
print()
# ## Test class Cs
def test_class():
for arch, mode, code, comment, syntax in all_tests:
print('*' * 16)
print("Platform: %s" % comment)
print("Code: %s" % to_hex(code))
print("Disasm:")
try:
md = Cs(arch, mode)
if syntax != 0:
md.syntax = syntax
for insn in md.disasm(code, 0x1000):
# bytes = binascii.hexlify(insn.bytes)
# print("0x%x:\t%s\t%s\t// hex-code: %s" %(insn.address, insn.mnemonic, insn.op_str, bytes))
print("0x%x:\t%s\t%s" % (insn.address, insn.mnemonic, insn.op_str))
print("0x%x:" % (insn.address + insn.size))
print()
except CsError as e:
print("ERROR: %s" % e)
# test_cs_disasm_quick()
# print ("*" * 40)
if __name__ == '__main__':
test_class()
| bsd-3-clause |
transferwise/bootstrap | test-infra/s3_cache.py | 2166 | 5734 | #!/usr/bin/env python2.7
# pylint: disable=C0301
from __future__ import absolute_import, unicode_literals, print_function, division
from sys import argv
from os import environ, stat, chdir, remove as _delete_file
from os.path import dirname, basename, abspath, realpath, expandvars
from hashlib import sha256
from subprocess import check_call as run
from json import load, dump as save
from contextlib import contextmanager
from datetime import datetime
from boto.s3.connection import S3Connection
from boto.s3.key import Key
from boto.exception import S3ResponseError
CONFIG_FILE = './S3Cachefile.json'
UPLOAD_TODO_FILE = './S3CacheTodo.json'
BYTES_PER_MB = 1024 * 1024
@contextmanager
def timer():
start = datetime.utcnow()
yield
end = datetime.utcnow()
elapsed = end - start
print("\tDone. Took", int(elapsed.total_seconds()), "second(s).")
@contextmanager
def todo_file(writeback=True):
try:
with open(UPLOAD_TODO_FILE, 'rt') as json_file:
todo = load(json_file)
except (IOError, OSError, ValueError):
todo = {}
yield todo
if writeback:
try:
with open(UPLOAD_TODO_FILE, 'wt') as json_file:
save(todo, json_file)
except (OSError, IOError) as save_err:
print("Error saving {}:".format(UPLOAD_TODO_FILE), save_err)
def _sha256_of_file(filename):
hasher = sha256()
with open(filename, 'rb') as input_file:
hasher.update(input_file.read())
file_hash = hasher.hexdigest()
print('sha256({}) = {}'.format(filename, file_hash))
return file_hash
def _delete_file_quietly(filename):
try:
_delete_file(filename)
except (OSError, IOError):
pass
def mark_needs_uploading(cache_name):
with todo_file() as todo:
todo[cache_name] = True
def mark_uploaded(cache_name):
with todo_file() as todo:
todo.pop(cache_name, None)
def need_to_upload(cache_name):
with todo_file(writeback=False) as todo:
return todo.get(cache_name, False)
def _tarball_size(directory):
kib = stat(_tarball_filename_for(directory)).st_size // BYTES_PER_MB
return "{} MiB".format(kib)
def _tarball_filename_for(directory):
return abspath('./{}.tar.gz'.format(basename(directory)))
def _create_tarball(directory):
print("Creating tarball of {}...".format(directory))
with timer():
run(['tar', '-czf', _tarball_filename_for(directory), '-C', dirname(directory), basename(directory)])
def _extract_tarball(directory):
print("Extracting tarball of {}...".format(directory))
with timer():
run(['tar', '-xzf', _tarball_filename_for(directory), '-C', dirname(directory)])
def download(directory):
mark_uploaded(cache_name) # reset
try:
print("Downloading {} tarball from S3...".format(cache_name))
with timer():
key.get_contents_to_filename(_tarball_filename_for(directory))
except S3ResponseError as err:
mark_needs_uploading(cache_name)
raise SystemExit("Cached {} download failed!".format(cache_name))
print("Downloaded {}.".format(_tarball_size(directory)))
_extract_tarball(directory)
print("{} successfully installed from cache.".format(cache_name))
def upload(directory):
_create_tarball(directory)
print("Uploading {} tarball to S3... ({})".format(cache_name, _tarball_size(directory)))
with timer():
key.set_contents_from_filename(_tarball_filename_for(directory))
print("{} cache successfully updated.".format(cache_name))
mark_uploaded(cache_name)
if __name__ == '__main__':
# Uses environment variables:
# AWS_ACCESS_KEY_ID -- AWS Access Key ID
# AWS_SECRET_ACCESS_KEY -- AWS Secret Access Key
argv.pop(0)
if len(argv) != 2:
raise SystemExit("USAGE: s3_cache.py <download | upload> <cache name>")
mode, cache_name = argv
script_dir = dirname(realpath(__file__))
chdir(script_dir)
try:
with open(CONFIG_FILE, 'rt') as config_file:
config = load(config_file)
except (IOError, OSError, ValueError) as config_err:
print(config_err)
raise SystemExit("Error when trying to load config from JSON file!")
try:
cache_info = config[cache_name]
key_file = expandvars(cache_info["key"])
fallback_cmd = cache_info["generate"]
directory = expandvars(cache_info["cache"])
except (TypeError, KeyError) as load_err:
print(load_err)
raise SystemExit("Config for cache named {!r} is missing or malformed!".format(cache_name))
try:
try:
BUCKET_NAME = environ['TWBS_S3_BUCKET']
except KeyError:
raise SystemExit("TWBS_S3_BUCKET environment variable not set!")
conn = S3Connection()
bucket = conn.lookup(BUCKET_NAME)
if bucket is None:
raise SystemExit("Could not access bucket!")
key_file_hash = _sha256_of_file(key_file)
key = Key(bucket, key_file_hash)
key.storage_class = 'REDUCED_REDUNDANCY'
if mode == 'download':
download(directory)
elif mode == 'upload':
if need_to_upload(cache_name):
upload(directory)
else:
print("No need to upload anything.")
else:
raise SystemExit("Unrecognized mode {!r}".format(mode))
except BaseException as exc:
if mode != 'download':
raise
print("Error!:", exc)
print("Unable to download from cache.")
print("Running fallback command to generate cache directory {!r}: {}".format(directory, fallback_cmd))
with timer():
run(fallback_cmd, shell=True)
| mit |
gabrielsaldana/sqmc | sabesqmc/quote/tests/test_forms.py | 1 | 2384 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals, absolute_import
from django.test import TestCase
from ..forms import QuoteForm
class TestQuoteForm(TestCase):
def setUp(self):
pass
def test_validate_emtpy_quote(self):
form = QuoteForm({'message': ''})
self.assertFalse(form.is_valid())
form = QuoteForm({'message': ' '})
self.assertFalse(form.is_valid())
def test_validate_invalid_quote(self):
form = QuoteForm({'message': 'Mensaje invalido'})
self.assertFalse(form.is_valid())
form = QuoteForm({'message': 'mensaje invalido'})
self.assertFalse(form.is_valid())
form = QuoteForm({'message': 'me nsaje invalido'})
self.assertFalse(form.is_valid())
def test_urls_in_quote(self):
form = QuoteForm({'message': 'http://122.33.43.322'})
self.assertFalse(form.is_valid())
form = QuoteForm({'message': 'Me caga http://sabesquemecaga.com'})
self.assertFalse(form.is_valid())
form = QuoteForm({'message': 'http://sabesquemecaga.com'})
self.assertFalse(form.is_valid())
form = QuoteForm({'message': 'http://sabesquemecaga.com/asdfads/'})
self.assertFalse(form.is_valid())
form = QuoteForm({'message': 'Me caga http://www.sabesquemecaga.com'})
self.assertFalse(form.is_valid())
form = QuoteForm({'message': 'Me caga http://www.sabesquemecaga.com/test/12'})
self.assertFalse(form.is_valid())
def test_emails_in_quote(self):
form = QuoteForm({'message': 'Me caga [email protected]'})
self.assertFalse(form.is_valid())
form = QuoteForm({'message': 'Me caga [email protected]'})
self.assertFalse(form.is_valid())
def test_validate_short_quote(self):
form = QuoteForm({'message': 'Me caga '})
self.assertFalse(form.is_valid())
def test_validate_long_quote(self):
form = QuoteForm({'message': 'Me caga que sea que Este mensaje es demasiado largo y no pase las pruebas de lo que tenemos que probar asdfadfa adsfasdfa. Me caga que sea que Este mensaje es demasiado largo y no pase las pruebas de lo que tenemos que probar.'})
self.assertFalse(form.is_valid())
def test_valid_message(self):
form = QuoteForm({'message': 'Me caga probar esto'})
self.assertTrue(form.is_valid())
| agpl-3.0 |
hbrunn/OCB | addons/payment_authorize/controllers/main.py | 247 | 1261 | # -*- coding: utf-8 -*-
import pprint
import logging
import urlparse
from openerp import http
from openerp.http import request
_logger = logging.getLogger(__name__)
class AuthorizeController(http.Controller):
_return_url = '/payment/authorize/return/'
_cancel_url = '/payment/authorize/cancel/'
@http.route([
'/payment/authorize/return/',
'/payment/authorize/cancel/',
], type='http', auth='public')
def authorize_form_feedback(self, **post):
_logger.info('Authorize: entering form_feedback with post data %s', pprint.pformat(post))
return_url = '/'
if post:
request.env['payment.transaction'].sudo().form_feedback(post, 'authorize')
return_url = post.pop('return_url', '/')
base_url = request.env['ir.config_parameter'].get_param('web.base.url')
# Authorize.Net is expecting a response to the POST sent by their server.
# This response is in the form of a URL that Authorize.Net will pass on to the
# client's browser to redirect them to the desired location need javascript.
return request.render('payment_authorize.payment_authorize_redirect', {
'return_url': '%s' % urlparse.urljoin(base_url, return_url)
})
| agpl-3.0 |
cjparsons74/kupfer | kupfer/obj/objects.py | 1 | 11035 | # -*- coding: UTF-8 -*-
"""
Copyright 2007--2009 Ulrik Sverdrup <[email protected]>
This file is a part of the program kupfer, which is
released under GNU General Public License v3 (or any later version),
see the main program file, and COPYING for details.
"""
import os
from os import path
import gobject
from kupfer import icons, launch, utils
from kupfer import pretty
from kupfer.obj.base import Leaf, Action, Source, InvalidDataError
from kupfer.obj import fileactions
from kupfer.interface import TextRepresentation
from kupfer.kupferstring import tounicode
def ConstructFileLeafTypes():
""" Return a seq of the Leaf types returned by ConstructFileLeaf"""
yield FileLeaf
yield AppLeaf
def ConstructFileLeaf(obj):
"""
If the path in @obj points to a Desktop Item file,
return an AppLeaf, otherwise return a FileLeaf
"""
root, ext = path.splitext(obj)
if ext == ".desktop":
try:
return AppLeaf(init_path=obj)
except InvalidDataError:
pass
return FileLeaf(obj)
def _directory_content(dirpath, show_hidden):
from kupfer.obj.sources import DirectorySource
return DirectorySource(dirpath, show_hidden)
class FileLeaf (Leaf, TextRepresentation):
"""
Represents one file: the represented object is a bytestring (important!)
"""
serializable = 1
def __init__(self, obj, name=None):
"""Construct a FileLeaf
The display name of the file is normally derived from the full path,
and @name should normally be left unspecified.
@obj: byte string (file system encoding)
@name: unicode name or None for using basename
"""
if obj is None:
raise InvalidDataError("File path for %s may not be None" % name)
# Use glib filename reading to make display name out of filenames
# this function returns a `unicode` object
if not name:
name = gobject.filename_display_basename(obj)
super(FileLeaf, self).__init__(obj, name)
def __eq__(self, other):
try:
return (type(self) == type(other) and
unicode(self) == unicode(other) and
path.samefile(self.object, other.object))
except OSError, exc:
pretty.print_debug(__name__, exc)
return False
def repr_key(self):
return self.object
def canonical_path(self):
"""Return the true path of the File (without symlinks)"""
return path.realpath(self.object)
def is_valid(self):
return os.access(self.object, os.R_OK)
def _is_executable(self):
return os.access(self.object, os.R_OK | os.X_OK)
def is_dir(self):
return path.isdir(self.object)
def get_text_representation(self):
return gobject.filename_display_name(self.object)
def get_description(self):
return utils.get_display_path_for_bytestring(self.canonical_path())
def get_actions(self):
return fileactions.get_actions_for_file(self)
def has_content(self):
return self.is_dir() or Leaf.has_content(self)
def content_source(self, alternate=False):
if self.is_dir():
return _directory_content(self.object, alternate)
else:
return Leaf.content_source(self)
def get_thumbnail(self, width, height):
if self.is_dir(): return None
return icons.get_thumbnail_for_file(self.object, width, height)
def get_gicon(self):
return icons.get_gicon_for_file(self.object)
def get_icon_name(self):
if self.is_dir():
return "folder"
else:
return "text-x-generic"
class SourceLeaf (Leaf):
def __init__(self, obj, name=None):
"""Create SourceLeaf for source @obj"""
if not name:
name = unicode(obj)
Leaf.__init__(self, obj, name)
def has_content(self):
return True
def repr_key(self):
return repr(self.object)
def content_source(self, alternate=False):
return self.object
def get_description(self):
return self.object.get_description()
@property
def fallback_icon_name(self):
return self.object.fallback_icon_name
def get_gicon(self):
return self.object.get_gicon()
def get_icon_name(self):
return self.object.get_icon_name()
class AppLeaf (Leaf):
def __init__(self, item=None, init_path=None, app_id=None):
"""Try constructing an Application for GAppInfo @item,
for file @path or for package name @app_id.
"""
self.init_item = item
self.init_path = init_path
self.init_item_id = app_id and app_id + ".desktop"
# finish will raise InvalidDataError on invalid item
self.finish()
Leaf.__init__(self, self.object, self.object.get_name())
self._add_aliases()
def _add_aliases(self):
# find suitable alias
# use package name: non-extension part of ID
lowername = unicode(self).lower()
package_name = self._get_package_name()
if package_name and package_name not in lowername:
self.kupfer_add_alias(package_name)
def __hash__(self):
return hash(unicode(self))
def __eq__(self, other):
return (isinstance(other, type(self)) and
self.get_id() == other.get_id())
def __getstate__(self):
self.init_item_id = self.object and self.object.get_id()
state = dict(vars(self))
state["object"] = None
state["init_item"] = None
return state
def __setstate__(self, state):
vars(self).update(state)
self.finish()
def finish(self):
"""Try to set self.object from init's parameters"""
item = None
if self.init_item:
item = self.init_item
else:
# Construct an AppInfo item from either path or item_id
from gio.unix import DesktopAppInfo, desktop_app_info_new_from_filename
if self.init_path and os.access(self.init_path, os.X_OK):
# serilizable if created from a "loose file"
self.serializable = 1
item = desktop_app_info_new_from_filename(self.init_path)
try:
# try to annotate the GAppInfo object
item.init_path = self.init_path
except AttributeError, exc:
pretty.print_debug(__name__, exc)
elif self.init_item_id:
try:
item = DesktopAppInfo(self.init_item_id)
except RuntimeError:
pretty.print_debug(__name__, "Application not found:",
self.init_item_id)
self.object = item
if not self.object:
raise InvalidDataError
def repr_key(self):
return self.get_id()
def _get_package_name(self):
return gobject.filename_display_basename(self.get_id())
def get_id(self):
"""Return the unique ID for this app.
This is the GIO id "gedit.desktop" minus the .desktop part for
system-installed applications.
"""
return launch.application_id(self.object)
def get_actions(self):
if launch.application_is_running(self.object):
yield Launch(_("Go To"), is_running=True)
yield CloseAll()
else:
yield Launch()
yield LaunchAgain()
def get_description(self):
# Use Application's description, else use executable
# for "file-based" applications we show the path
app_desc = tounicode(self.object.get_description())
ret = tounicode(app_desc if app_desc else self.object.get_executable())
if self.init_path:
app_path = utils.get_display_path_for_bytestring(self.init_path)
return u"(%s) %s" % (app_path, ret)
return ret
def get_gicon(self):
return self.object.get_icon()
def get_icon_name(self):
return "exec"
class OpenUrl (Action):
rank_adjust = 5
def __init__(self, name=None):
if not name:
name = _("Open URL")
super(OpenUrl, self).__init__(name)
def activate(self, leaf):
url = leaf.object
self.open_url(url)
def open_url(self, url):
utils.show_url(url)
def get_description(self):
return _("Open URL with default viewer")
def get_icon_name(self):
return "forward"
class Launch (Action):
""" Launches an application (AppLeaf) """
rank_adjust = 5
def __init__(self, name=None, is_running=False, open_new=False):
"""
If @is_running, style as if the app is running (Show application)
If @open_new, always start a new instance.
"""
if not name:
name = _("Launch")
Action.__init__(self, name)
self.is_running = is_running
self.open_new = open_new
def activate(self, leaf):
desktop_item = leaf.object
launch.launch_application(leaf.object, activate=not self.open_new)
def get_description(self):
if self.is_running:
return _("Show application window")
return _("Launch application")
def get_icon_name(self):
if self.is_running:
return "go-jump"
return Action.get_icon_name(self)
class LaunchAgain (Launch):
rank_adjust = 0
def __init__(self, name=None):
if not name:
name = _("Launch Again")
Launch.__init__(self, name, open_new=True)
def item_types(self):
yield AppLeaf
def valid_for_item(self, leaf):
return launch.application_is_running(leaf.object)
def get_description(self):
return _("Launch another instance of this application")
class CloseAll (Action):
"""Attempt to close all application windows"""
rank_adjust = -10
def __init__(self):
Action.__init__(self, _("Close"))
def activate(self, leaf):
return launch.application_close_all(leaf.object)
def item_types(self):
yield AppLeaf
def valid_for_item(self, leaf):
return launch.application_is_running(leaf.object)
def get_description(self):
return _("Attempt to close all application windows")
def get_icon_name(self):
return "window-close"
class UrlLeaf (Leaf, TextRepresentation):
def __init__(self, obj, name):
super(UrlLeaf, self).__init__(obj, name)
def get_actions(self):
return (OpenUrl(), )
def get_description(self):
return self.object
def get_icon_name(self):
return "text-html"
class RunnableLeaf (Leaf):
"""Leaf where the Leaf is basically the action itself,
for items such as Quit, Log out etc.
"""
def __init__(self, obj=None, name=None):
Leaf.__init__(self, obj, name)
def get_actions(self):
yield Perform()
def run(self):
raise NotImplementedError
def repr_key(self):
return ""
def get_gicon(self):
iname = self.get_icon_name()
if iname:
return icons.get_gicon_with_fallbacks(None, (iname, ))
return icons.ComposedIcon("kupfer-object", "gtk-execute")
def get_icon_name(self):
return ""
class Perform (Action):
"""Perform the action in a RunnableLeaf"""
rank_adjust = 5
def __init__(self, name=None):
# TRANS: 'Run' as in Perform a (saved) command
if not name: name = _("Run")
super(Perform, self).__init__(name=name)
def activate(self, leaf):
return leaf.run()
def get_description(self):
return _("Perform command")
class TextLeaf (Leaf, TextRepresentation):
"""Represent a text query
The represented object is a unicode string
"""
serializable = 1
def __init__(self, text, name=None):
"""@text *must* be unicode or UTF-8 str"""
text = tounicode(text)
if not name:
lines = [l for l in text.splitlines() if l.strip()]
name = lines[0] if lines else text
Leaf.__init__(self, text, name)
def get_actions(self):
return ()
def repr_key(self):
return hash(self.object)
def get_description(self):
lines = [l for l in self.object.splitlines() if l.strip()]
desc = lines[0] if lines else self.object
numlines = len(lines) or 1
# TRANS: This is description for a TextLeaf, a free-text search
# TRANS: The plural parameter is the number of lines %(num)d
return ngettext('"%(text)s"', '(%(num)d lines) "%(text)s"',
numlines) % {"num": numlines, "text": desc }
def get_icon_name(self):
return "edit-select-all"
| gpl-3.0 |
klabble/pollution-music | weather.py | 1 | 4565 | #!/usr/bin/env python3
# Copyright 2013 David Walker
#
# This file is part of Pollution Music.
#
# Pollution Music is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# Pollution Music is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# Pollution Music. If not, see <http://www.gnu.org/licenses/>.
import calendar
from collections import defaultdict
import datetime
import json
import time
import urllib.request, urllib.error, urllib.parse
class WeatherData(object):
'''Caching front-end to weather data API.'''
# minimum time to wait, in seconds, between calls to the weather api
MIN_TIME_BETWEEN_CALLS = 12.0
def __init__(self, location, api_key):
'''Init with location about which to retrieve weather data.
Args:
location: Must be a string in form "state_or_country_code/city",
e.g., "CA/San_Francisco" or "China/Beijing".
api_key: issued by wunderground
'''
# this object stores weather data for only one location
self._location = location
self._url_prefix = 'http://api.wunderground.com/api/' + api_key + '/'
# for self-throttling to avoid exceeding calls/min limit
self._time_of_last_call = None
# cached weather observations. When populated, the expression
# self._wx[year][month][day][hour] (keys are of type int) will give a
# comma-separated-value string with the readings for temperature, wind
# speed, and so on.
self._wx = defaultdict(
lambda: defaultdict(
lambda: defaultdict(
lambda: defaultdict(str))))
# empty csv to return when an observation is not available
self._no_data = ',,,,'
def _get_history(self, dt):
'''Ask the weather API for the history of observations for the given
date, and return the observation for the given hour on that date.
Args:
dt: datetime instance with values for year, month, day, and hour
Returns:
A string containing weather observations separated by commas.
'''
self._throttle()
url = (self._url_prefix +
'history_{}{:02}{:02}/q/{}.json'.format(
dt.year, dt.month, dt.day, self._location))
h = json.loads(urllib.request.urlopen(url).read().decode())
for ob in h['history']['observations']:
if ob['date']['min'] == '00':
hour = int(ob['date']['hour'])
self._wx[dt.year][dt.month][dt.day][hour] = ','.join([
ob['tempm'],
ob['hum'],
ob['wspdm'],
ob['wdird'],
ob['pressurem']])
ob = self._wx[dt.year][dt.month][dt.day][dt.hour]
if ob == '':
ob = self._wx[dt.year][dt.month][dt.day][dt.hour] = self._no_data
return ob
def _throttle(self):
'''Record the time of this call, first sleeping if necessary to avoid
exceeding the call/minute limit for the weather api.'''
if self._time_of_last_call is not None:
time_since_last_call = time.time() - self._time_of_last_call
if time_since_last_call < WeatherData.MIN_TIME_BETWEEN_CALLS:
time.sleep(WeatherData.MIN_TIME_BETWEEN_CALLS -
time_since_last_call)
self._time_of_last_call = time.time()
def get_observation(self, dt):
'''Return a comma-delimited string containing weather observations for
the date and hour specified by dt.'''
ob = self._wx[dt.year][dt.month][dt.day][dt.hour]
if ob == '':
# Never fetched this data, ask the API for it.
ob = self._get_history(dt)
return ob
def get_cur_conditions(self):
self._throttle()
url = (self._url_prefix + 'geolookup/conditions/q/' + self._location +
'.json')
return json.loads(urllib.request.urlopen(url).read().decode())
def main():
WeatherData('China/Beijing', WUNDERGROUND_API_KEY)
if __name__ == '__main__':
main()
| gpl-3.0 |
was4444/chromium.src | third_party/WebKit/Source/devtools/scripts/modular_build.py | 32 | 6644 | #!/usr/bin/env python
#
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Utilities for the modular DevTools build.
"""
from os import path
import os
try:
import simplejson as json
except ImportError:
import json
def read_file(filename):
with open(path.normpath(filename), 'rt') as input:
return input.read()
def write_file(filename, content):
if path.exists(filename):
os.remove(filename)
with open(filename, 'wt') as output:
output.write(content)
def bail_error(message):
raise Exception(message)
def load_and_parse_json(filename):
try:
return json.loads(read_file(filename))
except:
print 'ERROR: Failed to parse %s' % filename
raise
def concatenate_scripts(file_names, module_dir, output_dir, output):
for file_name in file_names:
output.write('/* %s */\n' % file_name)
file_path = path.join(module_dir, file_name)
if not path.isfile(file_path):
file_path = path.join(output_dir, path.basename(module_dir), file_name)
output.write(read_file(file_path))
output.write(';')
class Descriptors:
def __init__(self, application_dir, application_descriptor, module_descriptors):
self.application_dir = application_dir
self.application = application_descriptor
self.modules = module_descriptors
self._cached_sorted_modules = None
def application_json(self):
return json.dumps(self.application.values())
def all_compiled_files(self):
files = {}
for name in self.modules:
module = self.modules[name]
skipped_files = set(module.get('skip_compilation', []))
for script in module.get('scripts', []):
if script not in skipped_files:
files[path.normpath(path.join(self.application_dir, name, script))] = True
return files.keys()
def module_compiled_files(self, name):
files = []
module = self.modules.get(name)
skipped_files = set(module.get('skip_compilation', []))
for script in module.get('scripts', []):
if script not in skipped_files:
files.append(script)
return files
def module_resources(self, name):
return [name + '/' + resource for resource in self.modules[name].get('resources', [])]
def sorted_modules(self):
if self._cached_sorted_modules:
return self._cached_sorted_modules
result = []
unvisited_modules = set(self.modules)
temp_modules = set()
def visit(parent, name):
if name not in unvisited_modules:
return None
if name not in self.modules:
return (parent, name)
if name in temp_modules:
bail_error('Dependency cycle found at module "%s"' % name)
temp_modules.add(name)
deps = self.modules[name].get('dependencies')
if deps:
for dep_name in deps:
bad_dep = visit(name, dep_name)
if bad_dep:
return bad_dep
unvisited_modules.remove(name)
temp_modules.remove(name)
result.append(name)
return None
while len(unvisited_modules):
for next in unvisited_modules:
break
failure = visit(None, next)
if failure:
# failure[0] can never be None
bail_error('Unknown module "%s" encountered in dependencies of "%s"' % (failure[1], failure[0]))
self._cached_sorted_modules = result
return result
def sorted_dependencies_closure(self, module_name):
visited = set()
def sorted_deps_for_module(name):
result = []
desc = self.modules[name]
deps = desc.get('dependencies', [])
for dep in deps:
result += sorted_deps_for_module(dep)
if name not in visited:
result.append(name)
visited.add(name)
return result
return sorted_deps_for_module(module_name)
class DescriptorLoader:
def __init__(self, application_dir):
self.application_dir = application_dir
def load_application(self, application_descriptor_name):
return self.load_applications([application_descriptor_name])
def load_applications(self, application_descriptor_names):
merged_application_descriptor = {}
all_module_descriptors = {}
for application_descriptor_name in application_descriptor_names:
module_descriptors = {}
application_descriptor_filename = path.join(self.application_dir, application_descriptor_name)
application_descriptor = {desc['name']: desc for desc in load_and_parse_json(application_descriptor_filename)}
for name in application_descriptor:
merged_application_descriptor[name] = application_descriptor[name]
for (module_name, module) in application_descriptor.items():
if module_descriptors.get(module_name):
bail_error('Duplicate definition of module "%s" in %s' % (module_name, application_descriptor_filename))
if not all_module_descriptors.get(module_name):
module_descriptors[module_name] = self._read_module_descriptor(module_name, application_descriptor_filename)
all_module_descriptors[module_name] = module_descriptors[module_name]
for module in module_descriptors.values():
deps = module.get('dependencies', [])
for dep in deps:
if dep not in application_descriptor:
bail_error('Module "%s" (dependency of "%s") not listed in application descriptor %s' % (dep, module['name'], application_descriptor_filename))
return Descriptors(self.application_dir, merged_application_descriptor, all_module_descriptors)
def _read_module_descriptor(self, module_name, application_descriptor_filename):
json_filename = path.join(self.application_dir, module_name, 'module.json')
if not path.exists(json_filename):
bail_error('Module descriptor %s referenced in %s is missing' % (json_filename, application_descriptor_filename))
module_json = load_and_parse_json(json_filename)
module_json['name'] = module_name
return module_json
| bsd-3-clause |
nevil/edash-packager | packager/third_party/protobuf/python/google/protobuf/descriptor_database.py | 230 | 4411 | # Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Provides a container for DescriptorProtos."""
__author__ = '[email protected] (Matt Toia)'
class DescriptorDatabase(object):
"""A container accepting FileDescriptorProtos and maps DescriptorProtos."""
def __init__(self):
self._file_desc_protos_by_file = {}
self._file_desc_protos_by_symbol = {}
def Add(self, file_desc_proto):
"""Adds the FileDescriptorProto and its types to this database.
Args:
file_desc_proto: The FileDescriptorProto to add.
"""
self._file_desc_protos_by_file[file_desc_proto.name] = file_desc_proto
package = file_desc_proto.package
for message in file_desc_proto.message_type:
self._file_desc_protos_by_symbol.update(
(name, file_desc_proto) for name in _ExtractSymbols(message, package))
for enum in file_desc_proto.enum_type:
self._file_desc_protos_by_symbol[
'.'.join((package, enum.name))] = file_desc_proto
def FindFileByName(self, name):
"""Finds the file descriptor proto by file name.
Typically the file name is a relative path ending to a .proto file. The
proto with the given name will have to have been added to this database
using the Add method or else an error will be raised.
Args:
name: The file name to find.
Returns:
The file descriptor proto matching the name.
Raises:
KeyError if no file by the given name was added.
"""
return self._file_desc_protos_by_file[name]
def FindFileContainingSymbol(self, symbol):
"""Finds the file descriptor proto containing the specified symbol.
The symbol should be a fully qualified name including the file descriptor's
package and any containing messages. Some examples:
'some.package.name.Message'
'some.package.name.Message.NestedEnum'
The file descriptor proto containing the specified symbol must be added to
this database using the Add method or else an error will be raised.
Args:
symbol: The fully qualified symbol name.
Returns:
The file descriptor proto containing the symbol.
Raises:
KeyError if no file contains the specified symbol.
"""
return self._file_desc_protos_by_symbol[symbol]
def _ExtractSymbols(desc_proto, package):
"""Pulls out all the symbols from a descriptor proto.
Args:
desc_proto: The proto to extract symbols from.
package: The package containing the descriptor type.
Yields:
The fully qualified name found in the descriptor.
"""
message_name = '.'.join((package, desc_proto.name))
yield message_name
for nested_type in desc_proto.nested_type:
for symbol in _ExtractSymbols(nested_type, message_name):
yield symbol
for enum_type in desc_proto.enum_type:
yield '.'.join((message_name, enum_type.name))
| bsd-3-clause |
dyyi/moneybook | venv/Lib/site-packages/pip/_vendor/distlib/manifest.py | 559 | 13598 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2012-2013 Python Software Foundation.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
"""
Class representing the list of files in a distribution.
Equivalent to distutils.filelist, but fixes some problems.
"""
import fnmatch
import logging
import os
import re
from . import DistlibException
from .compat import fsdecode
from .util import convert_path
__all__ = ['Manifest']
logger = logging.getLogger(__name__)
# a \ followed by some spaces + EOL
_COLLAPSE_PATTERN = re.compile('\\\w*\n', re.M)
_COMMENTED_LINE = re.compile('#.*?(?=\n)|\n(?=$)', re.M | re.S)
class Manifest(object):
"""A list of files built by on exploring the filesystem and filtered by
applying various patterns to what we find there.
"""
def __init__(self, base=None):
"""
Initialise an instance.
:param base: The base directory to explore under.
"""
self.base = os.path.abspath(os.path.normpath(base or os.getcwd()))
self.prefix = self.base + os.sep
self.allfiles = None
self.files = set()
#
# Public API
#
def findall(self):
"""Find all files under the base and set ``allfiles`` to the absolute
pathnames of files found.
"""
from stat import S_ISREG, S_ISDIR, S_ISLNK
self.allfiles = allfiles = []
root = self.base
stack = [root]
pop = stack.pop
push = stack.append
while stack:
root = pop()
names = os.listdir(root)
for name in names:
fullname = os.path.join(root, name)
# Avoid excess stat calls -- just one will do, thank you!
stat = os.stat(fullname)
mode = stat.st_mode
if S_ISREG(mode):
allfiles.append(fsdecode(fullname))
elif S_ISDIR(mode) and not S_ISLNK(mode):
push(fullname)
def add(self, item):
"""
Add a file to the manifest.
:param item: The pathname to add. This can be relative to the base.
"""
if not item.startswith(self.prefix):
item = os.path.join(self.base, item)
self.files.add(os.path.normpath(item))
def add_many(self, items):
"""
Add a list of files to the manifest.
:param items: The pathnames to add. These can be relative to the base.
"""
for item in items:
self.add(item)
def sorted(self, wantdirs=False):
"""
Return sorted files in directory order
"""
def add_dir(dirs, d):
dirs.add(d)
logger.debug('add_dir added %s', d)
if d != self.base:
parent, _ = os.path.split(d)
assert parent not in ('', '/')
add_dir(dirs, parent)
result = set(self.files) # make a copy!
if wantdirs:
dirs = set()
for f in result:
add_dir(dirs, os.path.dirname(f))
result |= dirs
return [os.path.join(*path_tuple) for path_tuple in
sorted(os.path.split(path) for path in result)]
def clear(self):
"""Clear all collected files."""
self.files = set()
self.allfiles = []
def process_directive(self, directive):
"""
Process a directive which either adds some files from ``allfiles`` to
``files``, or removes some files from ``files``.
:param directive: The directive to process. This should be in a format
compatible with distutils ``MANIFEST.in`` files:
http://docs.python.org/distutils/sourcedist.html#commands
"""
# Parse the line: split it up, make sure the right number of words
# is there, and return the relevant words. 'action' is always
# defined: it's the first word of the line. Which of the other
# three are defined depends on the action; it'll be either
# patterns, (dir and patterns), or (dirpattern).
action, patterns, thedir, dirpattern = self._parse_directive(directive)
# OK, now we know that the action is valid and we have the
# right number of words on the line for that action -- so we
# can proceed with minimal error-checking.
if action == 'include':
for pattern in patterns:
if not self._include_pattern(pattern, anchor=True):
logger.warning('no files found matching %r', pattern)
elif action == 'exclude':
for pattern in patterns:
found = self._exclude_pattern(pattern, anchor=True)
#if not found:
# logger.warning('no previously-included files '
# 'found matching %r', pattern)
elif action == 'global-include':
for pattern in patterns:
if not self._include_pattern(pattern, anchor=False):
logger.warning('no files found matching %r '
'anywhere in distribution', pattern)
elif action == 'global-exclude':
for pattern in patterns:
found = self._exclude_pattern(pattern, anchor=False)
#if not found:
# logger.warning('no previously-included files '
# 'matching %r found anywhere in '
# 'distribution', pattern)
elif action == 'recursive-include':
for pattern in patterns:
if not self._include_pattern(pattern, prefix=thedir):
logger.warning('no files found matching %r '
'under directory %r', pattern, thedir)
elif action == 'recursive-exclude':
for pattern in patterns:
found = self._exclude_pattern(pattern, prefix=thedir)
#if not found:
# logger.warning('no previously-included files '
# 'matching %r found under directory %r',
# pattern, thedir)
elif action == 'graft':
if not self._include_pattern(None, prefix=dirpattern):
logger.warning('no directories found matching %r',
dirpattern)
elif action == 'prune':
if not self._exclude_pattern(None, prefix=dirpattern):
logger.warning('no previously-included directories found '
'matching %r', dirpattern)
else: # pragma: no cover
# This should never happen, as it should be caught in
# _parse_template_line
raise DistlibException(
'invalid action %r' % action)
#
# Private API
#
def _parse_directive(self, directive):
"""
Validate a directive.
:param directive: The directive to validate.
:return: A tuple of action, patterns, thedir, dir_patterns
"""
words = directive.split()
if len(words) == 1 and words[0] not in ('include', 'exclude',
'global-include',
'global-exclude',
'recursive-include',
'recursive-exclude',
'graft', 'prune'):
# no action given, let's use the default 'include'
words.insert(0, 'include')
action = words[0]
patterns = thedir = dir_pattern = None
if action in ('include', 'exclude',
'global-include', 'global-exclude'):
if len(words) < 2:
raise DistlibException(
'%r expects <pattern1> <pattern2> ...' % action)
patterns = [convert_path(word) for word in words[1:]]
elif action in ('recursive-include', 'recursive-exclude'):
if len(words) < 3:
raise DistlibException(
'%r expects <dir> <pattern1> <pattern2> ...' % action)
thedir = convert_path(words[1])
patterns = [convert_path(word) for word in words[2:]]
elif action in ('graft', 'prune'):
if len(words) != 2:
raise DistlibException(
'%r expects a single <dir_pattern>' % action)
dir_pattern = convert_path(words[1])
else:
raise DistlibException('unknown action %r' % action)
return action, patterns, thedir, dir_pattern
def _include_pattern(self, pattern, anchor=True, prefix=None,
is_regex=False):
"""Select strings (presumably filenames) from 'self.files' that
match 'pattern', a Unix-style wildcard (glob) pattern.
Patterns are not quite the same as implemented by the 'fnmatch'
module: '*' and '?' match non-special characters, where "special"
is platform-dependent: slash on Unix; colon, slash, and backslash on
DOS/Windows; and colon on Mac OS.
If 'anchor' is true (the default), then the pattern match is more
stringent: "*.py" will match "foo.py" but not "foo/bar.py". If
'anchor' is false, both of these will match.
If 'prefix' is supplied, then only filenames starting with 'prefix'
(itself a pattern) and ending with 'pattern', with anything in between
them, will match. 'anchor' is ignored in this case.
If 'is_regex' is true, 'anchor' and 'prefix' are ignored, and
'pattern' is assumed to be either a string containing a regex or a
regex object -- no translation is done, the regex is just compiled
and used as-is.
Selected strings will be added to self.files.
Return True if files are found.
"""
# XXX docstring lying about what the special chars are?
found = False
pattern_re = self._translate_pattern(pattern, anchor, prefix, is_regex)
# delayed loading of allfiles list
if self.allfiles is None:
self.findall()
for name in self.allfiles:
if pattern_re.search(name):
self.files.add(name)
found = True
return found
def _exclude_pattern(self, pattern, anchor=True, prefix=None,
is_regex=False):
"""Remove strings (presumably filenames) from 'files' that match
'pattern'.
Other parameters are the same as for 'include_pattern()', above.
The list 'self.files' is modified in place. Return True if files are
found.
This API is public to allow e.g. exclusion of SCM subdirs, e.g. when
packaging source distributions
"""
found = False
pattern_re = self._translate_pattern(pattern, anchor, prefix, is_regex)
for f in list(self.files):
if pattern_re.search(f):
self.files.remove(f)
found = True
return found
def _translate_pattern(self, pattern, anchor=True, prefix=None,
is_regex=False):
"""Translate a shell-like wildcard pattern to a compiled regular
expression.
Return the compiled regex. If 'is_regex' true,
then 'pattern' is directly compiled to a regex (if it's a string)
or just returned as-is (assumes it's a regex object).
"""
if is_regex:
if isinstance(pattern, str):
return re.compile(pattern)
else:
return pattern
if pattern:
pattern_re = self._glob_to_re(pattern)
else:
pattern_re = ''
base = re.escape(os.path.join(self.base, ''))
if prefix is not None:
# ditch end of pattern character
empty_pattern = self._glob_to_re('')
prefix_re = self._glob_to_re(prefix)[:-len(empty_pattern)]
sep = os.sep
if os.sep == '\\':
sep = r'\\'
pattern_re = '^' + base + sep.join((prefix_re,
'.*' + pattern_re))
else: # no prefix -- respect anchor flag
if anchor:
pattern_re = '^' + base + pattern_re
return re.compile(pattern_re)
def _glob_to_re(self, pattern):
"""Translate a shell-like glob pattern to a regular expression.
Return a string containing the regex. Differs from
'fnmatch.translate()' in that '*' does not match "special characters"
(which are platform-specific).
"""
pattern_re = fnmatch.translate(pattern)
# '?' and '*' in the glob pattern become '.' and '.*' in the RE, which
# IMHO is wrong -- '?' and '*' aren't supposed to match slash in Unix,
# and by extension they shouldn't match such "special characters" under
# any OS. So change all non-escaped dots in the RE to match any
# character except the special characters (currently: just os.sep).
sep = os.sep
if os.sep == '\\':
# we're using a regex to manipulate a regex, so we need
# to escape the backslash twice
sep = r'\\\\'
escaped = r'\1[^%s]' % sep
pattern_re = re.sub(r'((?<!\\)(\\\\)*)\.', escaped, pattern_re)
return pattern_re
| apache-2.0 |
0x46616c6b/ansible | lib/ansible/plugins/action/net_config.py | 137 | 4196 | #
# Copyright 2015 Peter Sprygada <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import re
import time
import glob
from ansible.plugins.action.normal import ActionModule as _ActionModule
from ansible.module_utils._text import to_text
from ansible.module_utils.six.moves.urllib.parse import urlsplit
from ansible.utils.vars import merge_hash
PRIVATE_KEYS_RE = re.compile('__.+__')
class ActionModule(_ActionModule):
def run(self, tmp=None, task_vars=None):
if self._task.args.get('src'):
try:
self._handle_template()
except ValueError as exc:
return dict(failed=True, msg=exc.message)
result = super(ActionModule, self).run(tmp, task_vars)
if self._task.args.get('backup') and result.get('__backup__'):
# User requested backup and no error occurred in module.
# NOTE: If there is a parameter error, _backup key may not be in results.
filepath = self._write_backup(task_vars['inventory_hostname'],
result['__backup__'])
result['backup_path'] = filepath
# strip out any keys that have two leading and two trailing
# underscore characters
for key in result.keys():
if PRIVATE_KEYS_RE.match(key):
del result[key]
return result
def _get_working_path(self):
cwd = self._loader.get_basedir()
if self._task._role is not None:
cwd = self._task._role._role_path
return cwd
def _write_backup(self, host, contents):
backup_path = self._get_working_path() + '/backup'
if not os.path.exists(backup_path):
os.mkdir(backup_path)
for fn in glob.glob('%s/%s*' % (backup_path, host)):
os.remove(fn)
tstamp = time.strftime("%Y-%m-%d@%H:%M:%S", time.localtime(time.time()))
filename = '%s/%s_config.%s' % (backup_path, host, tstamp)
open(filename, 'w').write(contents)
return filename
def _handle_template(self):
src = self._task.args.get('src')
working_path = self._get_working_path()
if os.path.isabs(src) or urlsplit('src').scheme:
source = src
else:
source = self._loader.path_dwim_relative(working_path, 'templates', src)
if not source:
source = self._loader.path_dwim_relative(working_path, src)
if not os.path.exists(source):
raise ValueError('path specified in src not found')
try:
with open(source, 'r') as f:
template_data = to_text(f.read())
except IOError:
return dict(failed=True, msg='unable to load src file')
# Create a template search path in the following order:
# [working_path, self_role_path, dependent_role_paths, dirname(source)]
searchpath = [working_path]
if self._task._role is not None:
searchpath.append(self._task._role._role_path)
if hasattr(self._task, "_block:"):
dep_chain = self._task._block.get_dep_chain()
if dep_chain is not None:
for role in dep_chain:
searchpath.append(role._role_path)
searchpath.append(os.path.dirname(source))
self._templar.environment.loader.searchpath = searchpath
self._task.args['src'] = self._templar.template(template_data)
| gpl-3.0 |
vCentre/vFRP-6233 | frappe/patches/v4_1/file_manager_fix.py | 29 | 3309 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
import os
from frappe.utils.file_manager import get_content_hash, get_file, get_file_name
from frappe.utils import get_files_path, get_site_path
# The files missed by the previous patch might have been replaced with new files
# with the same filename
#
# This patch does the following,
# * Detect which files were replaced and rename them with name{hash:5}.extn and
# update filedata record for the new file
#
# * make missing_files.txt in site dir with files that should be recovered from
# a backup from a time before version 3 migration
#
# * Patch remaining unpatched File records.
def execute():
frappe.db.auto_commit_on_many_writes = True
rename_replacing_files()
for name, file_name, file_url in frappe.db.sql(
"""select name, file_name, file_url from `tabFile`
where ifnull(file_name, '')!='' and ifnull(content_hash, '')=''"""):
b = frappe.get_doc('File', name)
old_file_name = b.file_name
b.file_name = os.path.basename(old_file_name)
if old_file_name.startswith('files/') or old_file_name.startswith('/files/'):
b.file_url = os.path.normpath('/' + old_file_name)
else:
b.file_url = os.path.normpath('/files/' + old_file_name)
try:
_file_name, content = get_file(name)
b.content_hash = get_content_hash(content)
except IOError:
print 'Warning: Error processing ', name
b.content_hash = None
b.flags.ignore_duplicate_entry_error = True
b.save()
frappe.db.auto_commit_on_many_writes = False
def get_replaced_files():
ret = []
new_files = dict(frappe.db.sql("select name, file_name from `tabFile` where file_name not like 'files/%'"))
old_files = dict(frappe.db.sql("select name, file_name from `tabFile` where ifnull(content_hash, '')=''"))
invfiles = invert_dict(new_files)
for nname, nfilename in new_files.iteritems():
if 'files/' + nfilename in old_files.values():
ret.append((nfilename, invfiles[nfilename]))
return ret
def rename_replacing_files():
replaced_files = get_replaced_files()
if len(replaced_files):
missing_files = [v[0] for v in replaced_files]
with open(get_site_path('missing_files.txt'), 'w') as f:
f.write(('\n'.join(missing_files) + '\n').encode('utf-8'))
for file_name, file_datas in replaced_files:
print 'processing ' + file_name
content_hash = frappe.db.get_value('File', file_datas[0], 'content_hash')
if not content_hash:
continue
new_file_name = get_file_name(file_name, content_hash)
if os.path.exists(get_files_path(new_file_name)):
continue
print 'skipping ' + file_name
try:
os.rename(get_files_path(file_name), get_files_path(new_file_name))
except OSError:
print 'Error renaming ', file_name
for name in file_datas:
f = frappe.get_doc('File', name)
f.file_name = new_file_name
f.file_url = '/files/' + new_file_name
f.save()
def invert_dict(ddict):
ret = {}
for k,v in ddict.iteritems():
if not ret.get(v):
ret[v] = [k]
else:
ret[v].append(k)
return ret
def get_file_name(fname, hash):
if '.' in fname:
partial, extn = fname.rsplit('.', 1)
else:
partial = fname
extn = ''
return '{partial}{suffix}.{extn}'.format(partial=partial, extn=extn, suffix=hash[:5])
| mit |
adazey/Muzez | libs/nltk/corpus/reader/timit.py | 4 | 17406 | # Natural Language Toolkit: TIMIT Corpus Reader
#
# Copyright (C) 2001-2007 NLTK Project
# Author: Haejoong Lee <[email protected]>
# Steven Bird <[email protected]>
# Jacob Perkins <[email protected]>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
# [xx] this docstring is out-of-date:
"""
Read tokens, phonemes and audio data from the NLTK TIMIT Corpus.
This corpus contains selected portion of the TIMIT corpus.
- 16 speakers from 8 dialect regions
- 1 male and 1 female from each dialect region
- total 130 sentences (10 sentences per speaker. Note that some
sentences are shared among other speakers, especially sa1 and sa2
are spoken by all speakers.)
- total 160 recording of sentences (10 recordings per speaker)
- audio format: NIST Sphere, single channel, 16kHz sampling,
16 bit sample, PCM encoding
Module contents
===============
The timit corpus reader provides 4 functions and 4 data items.
- utterances
List of utterances in the corpus. There are total 160 utterances,
each of which corresponds to a unique utterance of a speaker.
Here's an example of an utterance identifier in the list::
dr1-fvmh0/sx206
- _---- _---
| | | | |
| | | | |
| | | | `--- sentence number
| | | `----- sentence type (a:all, i:shared, x:exclusive)
| | `--------- speaker ID
| `------------ sex (m:male, f:female)
`-------------- dialect region (1..8)
- speakers
List of speaker IDs. An example of speaker ID::
dr1-fvmh0
Note that if you split an item ID with colon and take the first element of
the result, you will get a speaker ID.
>>> itemid = 'dr1-fvmh0/sx206'
>>> spkrid , sentid = itemid.split('/')
>>> spkrid
'dr1-fvmh0'
The second element of the result is a sentence ID.
- dictionary()
Phonetic dictionary of words contained in this corpus. This is a Python
dictionary from words to phoneme lists.
- spkrinfo()
Speaker information table. It's a Python dictionary from speaker IDs to
records of 10 fields. Speaker IDs the same as the ones in timie.speakers.
Each record is a dictionary from field names to values, and the fields are
as follows::
id speaker ID as defined in the original TIMIT speaker info table
sex speaker gender (M:male, F:female)
dr speaker dialect region (1:new england, 2:northern,
3:north midland, 4:south midland, 5:southern, 6:new york city,
7:western, 8:army brat (moved around))
use corpus type (TRN:training, TST:test)
in this sample corpus only TRN is available
recdate recording date
birthdate speaker birth date
ht speaker height
race speaker race (WHT:white, BLK:black, AMR:american indian,
SPN:spanish-american, ORN:oriental,???:unknown)
edu speaker education level (HS:high school, AS:associate degree,
BS:bachelor's degree (BS or BA), MS:master's degree (MS or MA),
PHD:doctorate degree (PhD,JD,MD), ??:unknown)
comments comments by the recorder
The 4 functions are as follows.
- tokenized(sentences=items, offset=False)
Given a list of items, returns an iterator of a list of word lists,
each of which corresponds to an item (sentence). If offset is set to True,
each element of the word list is a tuple of word(string), start offset and
end offset, where offset is represented as a number of 16kHz samples.
- phonetic(sentences=items, offset=False)
Given a list of items, returns an iterator of a list of phoneme lists,
each of which corresponds to an item (sentence). If offset is set to True,
each element of the phoneme list is a tuple of word(string), start offset
and end offset, where offset is represented as a number of 16kHz samples.
- audiodata(item, start=0, end=None)
Given an item, returns a chunk of audio samples formatted into a string.
When the fuction is called, if start and end are omitted, the entire
samples of the recording will be returned. If only end is omitted,
samples from the start offset to the end of the recording will be returned.
- play(data)
Play the given audio samples. The audio samples can be obtained from the
timit.audiodata function.
"""
from __future__ import print_function, unicode_literals
import sys
import os
import re
import tempfile
import time
from nltk import compat
from nltk.tree import Tree
from nltk.internals import import_from_stdlib
from nltk.corpus.reader.util import *
from nltk.corpus.reader.api import *
class TimitCorpusReader(CorpusReader):
"""
Reader for the TIMIT corpus (or any other corpus with the same
file layout and use of file formats). The corpus root directory
should contain the following files:
- timitdic.txt: dictionary of standard transcriptions
- spkrinfo.txt: table of speaker information
In addition, the root directory should contain one subdirectory
for each speaker, containing three files for each utterance:
- <utterance-id>.txt: text content of utterances
- <utterance-id>.wrd: tokenized text content of utterances
- <utterance-id>.phn: phonetic transcription of utterances
- <utterance-id>.wav: utterance sound file
"""
_FILE_RE = (r'(\w+-\w+/\w+\.(phn|txt|wav|wrd))|' +
r'timitdic\.txt|spkrinfo\.txt')
"""A regexp matching fileids that are used by this corpus reader."""
_UTTERANCE_RE = r'\w+-\w+/\w+\.txt'
def __init__(self, root, encoding='utf8'):
"""
Construct a new TIMIT corpus reader in the given directory.
:param root: The root directory for this corpus.
"""
# Ensure that wave files don't get treated as unicode data:
if isinstance(encoding, compat.string_types):
encoding = [('.*\.wav', None), ('.*', encoding)]
CorpusReader.__init__(self, root,
find_corpus_fileids(root, self._FILE_RE),
encoding=encoding)
self._utterances = [name[:-4] for name in
find_corpus_fileids(root, self._UTTERANCE_RE)]
"""A list of the utterance identifiers for all utterances in
this corpus."""
self._speakerinfo = None
self._root = root
self.speakers = sorted(set(u.split('/')[0] for u in self._utterances))
def fileids(self, filetype=None):
"""
Return a list of file identifiers for the files that make up
this corpus.
:param filetype: If specified, then ``filetype`` indicates that
only the files that have the given type should be
returned. Accepted values are: ``txt``, ``wrd``, ``phn``,
``wav``, or ``metadata``,
"""
if filetype is None:
return CorpusReader.fileids(self)
elif filetype in ('txt', 'wrd', 'phn', 'wav'):
return ['%s.%s' % (u, filetype) for u in self._utterances]
elif filetype == 'metadata':
return ['timitdic.txt', 'spkrinfo.txt']
else:
raise ValueError('Bad value for filetype: %r' % filetype)
def utteranceids(self, dialect=None, sex=None, spkrid=None,
sent_type=None, sentid=None):
"""
:return: A list of the utterance identifiers for all
utterances in this corpus, or for the given speaker, dialect
region, gender, sentence type, or sentence number, if
specified.
"""
if isinstance(dialect, compat.string_types): dialect = [dialect]
if isinstance(sex, compat.string_types): sex = [sex]
if isinstance(spkrid, compat.string_types): spkrid = [spkrid]
if isinstance(sent_type, compat.string_types): sent_type = [sent_type]
if isinstance(sentid, compat.string_types): sentid = [sentid]
utterances = self._utterances[:]
if dialect is not None:
utterances = [u for u in utterances if u[2] in dialect]
if sex is not None:
utterances = [u for u in utterances if u[4] in sex]
if spkrid is not None:
utterances = [u for u in utterances if u[:9] in spkrid]
if sent_type is not None:
utterances = [u for u in utterances if u[11] in sent_type]
if sentid is not None:
utterances = [u for u in utterances if u[10:] in spkrid]
return utterances
def transcription_dict(self):
"""
:return: A dictionary giving the 'standard' transcription for
each word.
"""
_transcriptions = {}
for line in self.open('timitdic.txt'):
if not line.strip() or line[0] == ';': continue
m = re.match(r'\s*(\S+)\s+/(.*)/\s*$', line)
if not m: raise ValueError('Bad line: %r' % line)
_transcriptions[m.group(1)] = m.group(2).split()
return _transcriptions
def spkrid(self, utterance):
return utterance.split('/')[0]
def sentid(self, utterance):
return utterance.split('/')[1]
def utterance(self, spkrid, sentid):
return '%s/%s' % (spkrid, sentid)
def spkrutteranceids(self, speaker):
"""
:return: A list of all utterances associated with a given
speaker.
"""
return [utterance for utterance in self._utterances
if utterance.startswith(speaker+'/')]
def spkrinfo(self, speaker):
"""
:return: A dictionary mapping .. something.
"""
if speaker in self._utterances:
speaker = self.spkrid(speaker)
if self._speakerinfo is None:
self._speakerinfo = {}
for line in self.open('spkrinfo.txt'):
if not line.strip() or line[0] == ';': continue
rec = line.strip().split(None, 9)
key = "dr%s-%s%s" % (rec[2],rec[1].lower(),rec[0].lower())
self._speakerinfo[key] = SpeakerInfo(*rec)
return self._speakerinfo[speaker]
def phones(self, utterances=None):
return [line.split()[-1]
for fileid in self._utterance_fileids(utterances, '.phn')
for line in self.open(fileid) if line.strip()]
def phone_times(self, utterances=None):
"""
offset is represented as a number of 16kHz samples!
"""
return [(line.split()[2], int(line.split()[0]), int(line.split()[1]))
for fileid in self._utterance_fileids(utterances, '.phn')
for line in self.open(fileid) if line.strip()]
def words(self, utterances=None):
return [line.split()[-1]
for fileid in self._utterance_fileids(utterances, '.wrd')
for line in self.open(fileid) if line.strip()]
def word_times(self, utterances=None):
return [(line.split()[2], int(line.split()[0]), int(line.split()[1]))
for fileid in self._utterance_fileids(utterances, '.wrd')
for line in self.open(fileid) if line.strip()]
def sents(self, utterances=None):
return [[line.split()[-1]
for line in self.open(fileid) if line.strip()]
for fileid in self._utterance_fileids(utterances, '.wrd')]
def sent_times(self, utterances=None):
return [(line.split(None,2)[-1].strip(),
int(line.split()[0]), int(line.split()[1]))
for fileid in self._utterance_fileids(utterances, '.txt')
for line in self.open(fileid) if line.strip()]
def phone_trees(self, utterances=None):
if utterances is None: utterances = self._utterances
if isinstance(utterances, compat.string_types): utterances = [utterances]
trees = []
for utterance in utterances:
word_times = self.word_times(utterance)
phone_times = self.phone_times(utterance)
sent_times = self.sent_times(utterance)
while sent_times:
(sent, sent_start, sent_end) = sent_times.pop(0)
trees.append(Tree('S', []))
while (word_times and phone_times and
phone_times[0][2] <= word_times[0][1]):
trees[-1].append(phone_times.pop(0)[0])
while word_times and word_times[0][2] <= sent_end:
(word, word_start, word_end) = word_times.pop(0)
trees[-1].append(Tree(word, []))
while phone_times and phone_times[0][2] <= word_end:
trees[-1][-1].append(phone_times.pop(0)[0])
while phone_times and phone_times[0][2] <= sent_end:
trees[-1].append(phone_times.pop(0)[0])
return trees
# [xx] NOTE: This is currently broken -- we're assuming that the
# fileids are WAV fileids (aka RIFF), but they're actually NIST SPHERE
# fileids.
def wav(self, utterance, start=0, end=None):
# nltk.chunk conflicts with the stdlib module 'chunk'
wave = import_from_stdlib('wave')
w = wave.open(self.open(utterance+'.wav'), 'rb')
if end is None:
end = w.getnframes()
# Skip past frames before start, then read the frames we want
w.readframes(start)
frames = w.readframes(end-start)
# Open a new temporary file -- the wave module requires
# an actual file, and won't work w/ stringio. :(
tf = tempfile.TemporaryFile()
out = wave.open(tf, 'w')
# Write the parameters & data to the new file.
out.setparams(w.getparams())
out.writeframes(frames)
out.close()
# Read the data back from the file, and return it. The
# file will automatically be deleted when we return.
tf.seek(0)
return tf.read()
def audiodata(self, utterance, start=0, end=None):
assert(end is None or end > start)
headersize = 44
if end is None:
data = self.open(utterance+'.wav').read()
else:
data = self.open(utterance+'.wav').read(headersize+end*2)
return data[headersize+start*2:]
def _utterance_fileids(self, utterances, extension):
if utterances is None: utterances = self._utterances
if isinstance(utterances, compat.string_types): utterances = [utterances]
return ['%s%s' % (u, extension) for u in utterances]
def play(self, utterance, start=0, end=None):
"""
Play the given audio sample.
:param utterance: The utterance id of the sample to play
"""
# Method 1: os audio dev.
try:
import ossaudiodev
try:
dsp = ossaudiodev.open('w')
dsp.setfmt(ossaudiodev.AFMT_S16_LE)
dsp.channels(1)
dsp.speed(16000)
dsp.write(self.audiodata(utterance, start, end))
dsp.close()
except IOError as e:
print(("can't acquire the audio device; please "
"activate your audio device."), file=sys.stderr)
print("system error message:", str(e), file=sys.stderr)
return
except ImportError:
pass
# Method 2: pygame
try:
# FIXME: this won't work under python 3
import pygame.mixer, StringIO
pygame.mixer.init(16000)
f = StringIO.StringIO(self.wav(utterance, start, end))
pygame.mixer.Sound(f).play()
while pygame.mixer.get_busy():
time.sleep(0.01)
return
except ImportError:
pass
# Method 3: complain. :)
print(("you must install pygame or ossaudiodev "
"for audio playback."), file=sys.stderr)
@compat.python_2_unicode_compatible
class SpeakerInfo(object):
def __init__(self, id, sex, dr, use, recdate, birthdate,
ht, race, edu, comments=None):
self.id = id
self.sex = sex
self.dr = dr
self.use = use
self.recdate = recdate
self.birthdate = birthdate
self.ht = ht
self.race = race
self.edu = edu
self.comments = comments
def __repr__(self):
attribs = 'id sex dr use recdate birthdate ht race edu comments'
args = ['%s=%r' % (attr, getattr(self, attr))
for attr in attribs.split()]
return 'SpeakerInfo(%s)' % (', '.join(args))
def read_timit_block(stream):
"""
Block reader for timit tagged sentences, which are preceded by a sentence
number that will be ignored.
"""
line = stream.readline()
if not line: return []
n, sent = line.split(' ', 1)
return [sent]
| gpl-3.0 |
muff1nman/duplicity | duplicity/backends/ncftpbackend.py | 1 | 5129 | # -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright 2002 Ben Escoto <[email protected]>
# Copyright 2007 Kenneth Loafman <[email protected]>
#
# This file is part of duplicity.
#
# Duplicity is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2 of the License, or (at your
# option) any later version.
#
# Duplicity is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with duplicity; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import os.path
import urllib
import duplicity.backend
from duplicity import globals
from duplicity import log
from duplicity import tempdir
class NCFTPBackend(duplicity.backend.Backend):
"""Connect to remote store using File Transfer Protocol"""
def __init__(self, parsed_url):
duplicity.backend.Backend.__init__(self, parsed_url)
# we expect an error return, so go low-level and ignore it
try:
p = os.popen("ncftpls -v")
fout = p.read()
ret = p.close()
except Exception:
pass
# the expected error is 8 in the high-byte and some output
if ret != 0x0800 or not fout:
log.FatalError("NcFTP not found: Please install NcFTP version 3.1.9 or later",
log.ErrorCode.ftp_ncftp_missing)
# version is the second word of the first line
version = fout.split('\n')[0].split()[1]
if version < "3.1.9":
log.FatalError("NcFTP too old: Duplicity requires NcFTP version 3.1.9,"
"3.2.1 or later. Version 3.2.0 will not work properly.",
log.ErrorCode.ftp_ncftp_too_old)
elif version == "3.2.0":
log.Warn("NcFTP (ncftpput) version 3.2.0 may fail with duplicity.\n"
"see: http://www.ncftpd.com/ncftp/doc/changelog.html\n"
"If you have trouble, please upgrade to 3.2.1 or later",
log.WarningCode.ftp_ncftp_v320)
log.Notice("NcFTP version is %s" % version)
self.parsed_url = parsed_url
self.url_string = duplicity.backend.strip_auth_from_url(self.parsed_url)
# strip ncftp+ prefix
self.url_string = duplicity.backend.strip_prefix(self.url_string, 'ncftp')
# This squelches the "file not found" result from ncftpls when
# the ftp backend looks for a collection that does not exist.
# version 3.2.2 has error code 5, 1280 is some legacy value
self.popen_breaks['ncftpls'] = [5, 1280]
# Use an explicit directory name.
if self.url_string[-1] != '/':
self.url_string += '/'
self.password = self.get_password()
if globals.ftp_connection == 'regular':
self.conn_opt = '-E'
else:
self.conn_opt = '-F'
self.tempfile, self.tempname = tempdir.default().mkstemp()
os.write(self.tempfile, "host %s\n" % self.parsed_url.hostname)
os.write(self.tempfile, "user %s\n" % self.parsed_url.username)
os.write(self.tempfile, "pass %s\n" % self.password)
os.close(self.tempfile)
self.flags = "-f %s %s -t %s -o useCLNT=0,useHELP_SITE=0 " % \
(self.tempname, self.conn_opt, globals.timeout)
if parsed_url.port is not None and parsed_url.port != 21:
self.flags += " -P '%s'" % (parsed_url.port)
def _put(self, source_path, remote_filename):
remote_path = os.path.join(urllib.unquote(self.parsed_url.path.lstrip('/')), remote_filename).rstrip()
commandline = "ncftpput %s -m -V -C '%s' '%s'" % \
(self.flags, source_path.name, remote_path)
self.subprocess_popen(commandline)
def _get(self, remote_filename, local_path):
remote_path = os.path.join(urllib.unquote(self.parsed_url.path), remote_filename).rstrip()
commandline = "ncftpget %s -V -C '%s' '%s' '%s'" % \
(self.flags, self.parsed_url.hostname, remote_path.lstrip('/'), local_path.name)
self.subprocess_popen(commandline)
def _list(self):
# Do a long listing to avoid connection reset
commandline = "ncftpls %s -l '%s'" % (self.flags, self.url_string)
_, l, _ = self.subprocess_popen(commandline)
# Look for our files as the last element of a long list line
return [x.split()[-1] for x in l.split('\n') if x and not x.startswith("total ")]
def _delete(self, filename):
commandline = "ncftpls %s -l -X 'DELE %s' '%s'" % \
(self.flags, filename, self.url_string)
self.subprocess_popen(commandline)
duplicity.backend.register_backend("ncftp+ftp", NCFTPBackend)
duplicity.backend.uses_netloc.extend(['ncftp+ftp'])
| gpl-2.0 |
mkalte666/Markkasse | setup.py | 1 | 3478 | #!/usr/bin/env python
import io
import sys
import os
import sqlite3
import hashlib
import binascii
print "Setting Up Mark System"
print "This will, delete all data but the ones in the backup-folder !"
print "If you are shure you want to continue, type \" YES \". yep, in capslock!\n"
ShouldInstall = unicode(raw_input("Shure? "))
if ShouldInstall != unicode("YES"):
print "Quitting Installation...\n"
sys.exit()
print "Cleaning Up..."
os.system("rm -rf ./marksystem")
os.system("rm -rf ./log")
print "Done!"
print "Beginning Installation. Creating folders...\n"
os.system("mkdir ./backup")
os.system("mkdir ./marksystem")
os.system("mkdir ./marksystem/db")
os.system("mkdir ./marksystem/templates")
os.system("mkdir ./marksystem/static/")
os.system("mkdir ./log/")
os.system("mkdir ./marksystem/static/css")
os.system("mkdir ./marksystem/static/uploads")
os.system("mkdir ./marksystem/static/img")
os.system("mkdir ./marksystem/static/font/")
os.system("mkdir ./marksystem/static/js/")
print "Done!\n"
print "Copying Files..."
os.system("cp ./installation/*.py ./marksystem/")
os.system("touch ./log/mark.log")
os.system("cp ./installation/templates/* ./marksystem/templates")
os.system("cp ./installation/media/img/* ./marksystem/static/img")
os.system("cp ./installation/media/css/* ./marksystem/static/css")
os.system("cp ./installation/media/font/* ./marksystem/static/font")
os.system("cp ./installation/js/* ./marksystem/static/js/")
#copys of files from the installation-files folder here
print "Done!\n"
print "Creating Database..."
#database creation
connection = sqlite3.connect("./marksystem/db/mark.db")
cursor = connection.cursor()
cursor.execute('''CREATE TABLE user_info(id INTEGER PRIMARY KEY, name TEXT, hash TEXT, session TEXT, userlevel INTEGER)''')
cursor.execute('''CREATE TABLE products(id INTEGER PRIMARY KEY, name TEXT, price REAL, amoutInStock INTEGER, image TEXT, isSubproduct BOOLEAN, parent INTEGER, isBuyable INTEGER)''')
cursor.execute('''CREATE TABLE transactions(id INTEGER PRIMARY KEY, description TEXT, inflow REAL, outflow REAL, userID INTEGER, productIDs TEXT, isGenerated BOOLEAN, date TEXT)''')
cursor.execute('''CREATE TABLE pending_orders(id INTEGER PRIMARY KEY, transactionId INTEGER)''')
cursor.execute('''CREATE TABLE debtTransactions(id INTEGER PRIMARY KEY, transactionId INTEGER, isPaied BOOLEAN, userId INTEGER)''')
print "Setting basic information in Database"
cursor.execute('''insert into products(name, price, isSubproduct, parent, isBuyable) values ('remove me!', 3.0, 0, -1, 1)''')
print "Set Root User:"
username = unicode(raw_input("Username: "))
password = "not the"
passwordConfirm = "same"
while password != passwordConfirm:
password = hashlib.sha256(unicode(raw_input("Password: "))).hexdigest()
passwordConfirm = hashlib.sha256(unicode(raw_input("Confirm: "))).hexdigest()
print "Change Password after logging in for the first time!!!"
cursor.execute('''INSERT INTO user_info (name, hash, session, userlevel) VALUES (?, ?, 'invalid', 9001)''', (username, password, ))
connection.commit()
cursor.close()
connection.close()
print "Done!\n"
print "Genarating files"
sessionKey = os.urandom(24).encode('hex')
outfile = open('./marksystem/generated.py', 'w')
outfile.write("secretKey = '"+unicode(sessionKey)+"'\n")
maxdays = 6
maxdays = input("Input maximal time user can owe the system Money:")
outfile.write("maxdays = "+unicode(maxdays)+"\n")
outfile.close()
print "Done!"
print "Installation Compleated!"
| mit |
cloudcache/zstack-utility | kvmagent/kvmagent/test/test_nfs_primary_storage_create_root_volume.py | 3 | 2034 | '''
@author: Frank
'''
import unittest
import time
import os.path
from kvmagent import kvmagent
from kvmagent.plugins import nfs_primarystorage_plugin
from zstacklib.utils import http
from zstacklib.utils import jsonobject
from zstacklib.utils import log
from zstacklib.utils import uuidhelper
from zstacklib.utils import linux
logger = log.get_logger(__name__)
class Test(unittest.TestCase):
NFS_URL = 'localhost:/home/primary'
CALLBACK_URL = 'http://localhost:7070/testcallback'
def callback(self, req):
rsp = jsonobject.loads(req[http.REQUEST_BODY])
print jsonobject.dumps(rsp)
def setUp(self):
self.service = kvmagent.new_rest_service()
kvmagent.get_http_server().register_sync_uri('/testcallback', self.callback)
self.service.start(True)
time.sleep(1)
def mount(self):
cmd = nfs_primarystorage_plugin.MountCmd()
cmd.url = self.NFS_URL
cmd.mountPath = os.path.join('/mnt', uuidhelper.uuid())
callurl = kvmagent._build_url_for_test([nfs_primarystorage_plugin.MOUNT_PATH])
ret = http.json_dump_post(callurl, cmd)
rsp = jsonobject.loads(ret)
self.assertTrue(rsp.success, rsp.error)
self.assertTrue(linux.is_mounted(cmd.url, cmd.mountPath))
def testName(self):
self.mount()
cmd = nfs_primarystorage_plugin.CreateRootVolumeFromTemplateCmd()
cmd.installUrl = '/tmp/test1.qcow2'
cmd.templatePathInCache = "/tmp/test.qcow2"
cmd.timeout = 30
url = kvmagent._build_url_for_test([nfs_primarystorage_plugin.CREATE_VOLUME_FROM_TEMPLATE_PATH])
rsp = http.json_dump_post(url, cmd, headers={http.TASK_UUID:uuidhelper.uuid(), http.CALLBACK_URI:self.CALLBACK_URL})
time.sleep(10)
self.service.stop()
linux.umount_by_url(self.NFS_URL)
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main() | apache-2.0 |
dmitry-r/incubator-airflow | airflow/ti_deps/deps/not_running_dep.py | 58 | 1332 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from airflow.ti_deps.deps.base_ti_dep import BaseTIDep
from airflow.utils.db import provide_session
from airflow.utils.state import State
class NotRunningDep(BaseTIDep):
NAME = "Task Instance Not Already Running"
# Task instances must not already be running, as running two copies of the same
# task instance at the same time (AKA double-trigger) should be avoided at all
# costs, even if the context specifies that all dependencies should be ignored.
IGNOREABLE = False
@provide_session
def _get_dep_statuses(self, ti, session, dep_context):
if ti.state == State.RUNNING:
yield self._failing_status(
reason="Task is already running, it started on {0}.".format(
ti.start_date))
| apache-2.0 |
obadonke/pythonPlay | scripts/where_dat_ex_go/wheredatexgo.py | 2 | 2822 | import csv
class ExceptionStats:
def __init__(self):
self.status = None
self.owner = None
self.count = 0
self.hash = None
def inc_count(self):
self.count += 1
class ExceptionData:
def __init__(self, ident):
self.ident = ident
self.prev_stats = ExceptionStats()
self.curr_stats = ExceptionStats()
self.status_changed = False;
exceptions = dict()
def should_skip(row: dict):
""" Apply same filters as the report
"""
if row['Region'] == "IGNORE":
return True
if row['VersionShort'] == "1" or row['VersionShort'] == "1.0":
return True
if '09/2014' in row['Date']:
return True
return False
def read_exc_from_file(ex_dict, filename, stat_getter):
with open(filename, 'r') as csv_file:
reader = csv.DictReader(csv_file, delimiter=',')
for row in reader:
if should_skip(row):
continue
ident = row['Id']
row_ex = None
if not ident in ex_dict:
row_ex = ExceptionData(ident)
ex_dict[ident] = row_ex
else:
row_ex = ex_dict[ident]
# just override status and owner as we go
stats = stat_getter(row_ex)
stats.status = row['Status']
stats.owner = row['Owner']
stats.hash = row['Exception Hash']
stats.inc_count()
def get_prev_stats(row_ex):
return row_ex.prev_stats
def get_curr_stats(row_ex):
return row_ex.curr_stats
# do initial count of exceptions
read_exc_from_file(exceptions, '20170526.csv', get_prev_stats)
# count current status values
read_exc_from_file(exceptions, '20170620.csv', get_curr_stats)
prev_total_count = 0
curr_total_count = 0
stat_changed_count = 0
with open('20170526-0620.csv', 'w', newline='') as csv_file:
ex_writer = csv.writer(csv_file, delimiter=',')
ex_writer.writerow(['id', 'old_owner', 'old_status', 'old_count', 'old_hash','new_owner', 'new_status', 'new_count', 'new_hash', 'status_changed'])
for k, ex in exceptions.items():
ex.status_changed = not (ex.prev_stats.status == ex.curr_stats.status)
ex_writer.writerow([
ex.ident,
ex.prev_stats.owner, ex.prev_stats.status, ex.prev_stats.count, ex.prev_stats.hash,
ex.curr_stats.owner, ex.curr_stats.status, ex.curr_stats.count, ex.curr_stats.hash,
ex.status_changed
])
prev_total_count += ex.prev_stats.count
curr_total_count += ex.curr_stats.count
if ex.status_changed:
stat_changed_count += ex.curr_stats.count
print("Prev total count:",prev_total_count)
print("Curr total count:",curr_total_count)
print("Changed total count:",stat_changed_count)
| gpl-3.0 |
hannorein/rebound | docs/generate_python_docs.py | 1 | 1054 | import rebound
import inspect
import docstring_to_markdown
def convert_code_blocks(doc):
new_doc = ""
lines = doc.split("\n")
first = True
for line in lines:
if first:
if line[:3]==">>>":
first = False
new_doc += "```python\n"
new_doc += line[3:]+"\n"
else:
new_doc += line+"\n"
else:
if line[:3]==">>>":
new_doc += line[3:]+"\n"
else:
new_doc += "```\n"
new_doc += line+"\n"
first = True
if first==False:
new_doc += "```\n"
return new_doc
def render_class(cls, functions=None):
d = "## Class `"+cls+"`\n"
d += convert_code_blocks(inspect.cleandoc(eval(cls).__doc__))
for function in functions:
f = getattr(eval(cls),function)
d += "## Function `"+cls+"."+function+"`\n"
d += convert_code_blocks(inspect.cleandoc(f.__doc__))
return d
print(render_class("rebound.Simulation",["copy"]))
| gpl-3.0 |
anbangleo/NlsdeWeb | Python-3.6.0/Lib/test/test_float.py | 2 | 62538 | import fractions
import operator
import os
import random
import sys
import struct
import time
import unittest
from test import support
from test.test_grammar import (VALID_UNDERSCORE_LITERALS,
INVALID_UNDERSCORE_LITERALS)
from math import isinf, isnan, copysign, ldexp
INF = float("inf")
NAN = float("nan")
have_getformat = hasattr(float, "__getformat__")
requires_getformat = unittest.skipUnless(have_getformat,
"requires __getformat__")
requires_setformat = unittest.skipUnless(hasattr(float, "__setformat__"),
"requires __setformat__")
#locate file with float format test values
test_dir = os.path.dirname(__file__) or os.curdir
format_testfile = os.path.join(test_dir, 'formatfloat_testcases.txt')
class FloatSubclass(float):
pass
class OtherFloatSubclass(float):
pass
class GeneralFloatCases(unittest.TestCase):
def test_float(self):
self.assertEqual(float(3.14), 3.14)
self.assertEqual(float(314), 314.0)
self.assertEqual(float(" 3.14 "), 3.14)
self.assertRaises(ValueError, float, " 0x3.1 ")
self.assertRaises(ValueError, float, " -0x3.p-1 ")
self.assertRaises(ValueError, float, " +0x3.p-1 ")
self.assertRaises(ValueError, float, "++3.14")
self.assertRaises(ValueError, float, "+-3.14")
self.assertRaises(ValueError, float, "-+3.14")
self.assertRaises(ValueError, float, "--3.14")
self.assertRaises(ValueError, float, ".nan")
self.assertRaises(ValueError, float, "+.inf")
self.assertRaises(ValueError, float, ".")
self.assertRaises(ValueError, float, "-.")
self.assertRaises(TypeError, float, {})
self.assertRaisesRegex(TypeError, "not 'dict'", float, {})
# Lone surrogate
self.assertRaises(UnicodeEncodeError, float, '\uD8F0')
# check that we don't accept alternate exponent markers
self.assertRaises(ValueError, float, "-1.7d29")
self.assertRaises(ValueError, float, "3D-14")
self.assertEqual(float(" \u0663.\u0661\u0664 "), 3.14)
self.assertEqual(float("\N{EM SPACE}3.14\N{EN SPACE}"), 3.14)
# extra long strings should not be a problem
float(b'.' + b'1'*1000)
float('.' + '1'*1000)
def test_underscores(self):
for lit in VALID_UNDERSCORE_LITERALS:
if not any(ch in lit for ch in 'jJxXoObB'):
self.assertEqual(float(lit), eval(lit))
self.assertEqual(float(lit), float(lit.replace('_', '')))
for lit in INVALID_UNDERSCORE_LITERALS:
if lit in ('0_7', '09_99'): # octals are not recognized here
continue
if not any(ch in lit for ch in 'jJxXoObB'):
self.assertRaises(ValueError, float, lit)
# Additional test cases; nan and inf are never valid as literals,
# only in the float() constructor, but we don't allow underscores
# in or around them.
self.assertRaises(ValueError, float, '_NaN')
self.assertRaises(ValueError, float, 'Na_N')
self.assertRaises(ValueError, float, 'IN_F')
self.assertRaises(ValueError, float, '-_INF')
self.assertRaises(ValueError, float, '-INF_')
# Check that we handle bytes values correctly.
self.assertRaises(ValueError, float, b'0_.\xff9')
def test_non_numeric_input_types(self):
# Test possible non-numeric types for the argument x, including
# subclasses of the explicitly documented accepted types.
class CustomStr(str): pass
class CustomBytes(bytes): pass
class CustomByteArray(bytearray): pass
factories = [
bytes,
bytearray,
lambda b: CustomStr(b.decode()),
CustomBytes,
CustomByteArray,
memoryview,
]
try:
from array import array
except ImportError:
pass
else:
factories.append(lambda b: array('B', b))
for f in factories:
x = f(b" 3.14 ")
with self.subTest(type(x)):
self.assertEqual(float(x), 3.14)
with self.assertRaisesRegex(ValueError, "could not convert"):
float(f(b'A' * 0x10))
def test_float_memoryview(self):
self.assertEqual(float(memoryview(b'12.3')[1:4]), 2.3)
self.assertEqual(float(memoryview(b'12.3\x00')[1:4]), 2.3)
self.assertEqual(float(memoryview(b'12.3 ')[1:4]), 2.3)
self.assertEqual(float(memoryview(b'12.3A')[1:4]), 2.3)
self.assertEqual(float(memoryview(b'12.34')[1:4]), 2.3)
def test_error_message(self):
testlist = ('\xbd', '123\xbd', ' 123 456 ')
for s in testlist:
try:
float(s)
except ValueError as e:
self.assertIn(s.strip(), e.args[0])
else:
self.fail("Expected int(%r) to raise a ValueError", s)
@support.run_with_locale('LC_NUMERIC', 'fr_FR', 'de_DE')
def test_float_with_comma(self):
# set locale to something that doesn't use '.' for the decimal point
# float must not accept the locale specific decimal point but
# it still has to accept the normal python syntax
import locale
if not locale.localeconv()['decimal_point'] == ',':
self.skipTest('decimal_point is not ","')
self.assertEqual(float(" 3.14 "), 3.14)
self.assertEqual(float("+3.14 "), 3.14)
self.assertEqual(float("-3.14 "), -3.14)
self.assertEqual(float(".14 "), .14)
self.assertEqual(float("3. "), 3.0)
self.assertEqual(float("3.e3 "), 3000.0)
self.assertEqual(float("3.2e3 "), 3200.0)
self.assertEqual(float("2.5e-1 "), 0.25)
self.assertEqual(float("5e-1"), 0.5)
self.assertRaises(ValueError, float, " 3,14 ")
self.assertRaises(ValueError, float, " +3,14 ")
self.assertRaises(ValueError, float, " -3,14 ")
self.assertRaises(ValueError, float, " 0x3.1 ")
self.assertRaises(ValueError, float, " -0x3.p-1 ")
self.assertRaises(ValueError, float, " +0x3.p-1 ")
self.assertEqual(float(" 25.e-1 "), 2.5)
self.assertAlmostEqual(float(" .25e-1 "), .025)
def test_floatconversion(self):
# Make sure that calls to __float__() work properly
class Foo1(object):
def __float__(self):
return 42.
class Foo2(float):
def __float__(self):
return 42.
class Foo3(float):
def __new__(cls, value=0.):
return float.__new__(cls, 2*value)
def __float__(self):
return self
class Foo4(float):
def __float__(self):
return 42
# Issue 5759: __float__ not called on str subclasses (though it is on
# unicode subclasses).
class FooStr(str):
def __float__(self):
return float(str(self)) + 1
self.assertEqual(float(Foo1()), 42.)
self.assertEqual(float(Foo2()), 42.)
with self.assertWarns(DeprecationWarning):
self.assertEqual(float(Foo3(21)), 42.)
self.assertRaises(TypeError, float, Foo4(42))
self.assertEqual(float(FooStr('8')), 9.)
class Foo5:
def __float__(self):
return ""
self.assertRaises(TypeError, time.sleep, Foo5())
# Issue #24731
class F:
def __float__(self):
return OtherFloatSubclass(42.)
with self.assertWarns(DeprecationWarning):
self.assertEqual(float(F()), 42.)
with self.assertWarns(DeprecationWarning):
self.assertIs(type(float(F())), float)
with self.assertWarns(DeprecationWarning):
self.assertEqual(FloatSubclass(F()), 42.)
with self.assertWarns(DeprecationWarning):
self.assertIs(type(FloatSubclass(F())), FloatSubclass)
def test_is_integer(self):
self.assertFalse((1.1).is_integer())
self.assertTrue((1.).is_integer())
self.assertFalse(float("nan").is_integer())
self.assertFalse(float("inf").is_integer())
def test_floatasratio(self):
for f, ratio in [
(0.875, (7, 8)),
(-0.875, (-7, 8)),
(0.0, (0, 1)),
(11.5, (23, 2)),
]:
self.assertEqual(f.as_integer_ratio(), ratio)
for i in range(10000):
f = random.random()
f *= 10 ** random.randint(-100, 100)
n, d = f.as_integer_ratio()
self.assertEqual(float(n).__truediv__(d), f)
R = fractions.Fraction
self.assertEqual(R(0, 1),
R(*float(0.0).as_integer_ratio()))
self.assertEqual(R(5, 2),
R(*float(2.5).as_integer_ratio()))
self.assertEqual(R(1, 2),
R(*float(0.5).as_integer_ratio()))
self.assertEqual(R(4728779608739021, 2251799813685248),
R(*float(2.1).as_integer_ratio()))
self.assertEqual(R(-4728779608739021, 2251799813685248),
R(*float(-2.1).as_integer_ratio()))
self.assertEqual(R(-2100, 1),
R(*float(-2100.0).as_integer_ratio()))
self.assertRaises(OverflowError, float('inf').as_integer_ratio)
self.assertRaises(OverflowError, float('-inf').as_integer_ratio)
self.assertRaises(ValueError, float('nan').as_integer_ratio)
def test_float_containment(self):
floats = (INF, -INF, 0.0, 1.0, NAN)
for f in floats:
self.assertIn(f, [f])
self.assertIn(f, (f,))
self.assertIn(f, {f})
self.assertIn(f, {f: None})
self.assertEqual([f].count(f), 1, "[].count('%r') != 1" % f)
self.assertIn(f, floats)
for f in floats:
# nonidentical containers, same type, same contents
self.assertTrue([f] == [f], "[%r] != [%r]" % (f, f))
self.assertTrue((f,) == (f,), "(%r,) != (%r,)" % (f, f))
self.assertTrue({f} == {f}, "{%r} != {%r}" % (f, f))
self.assertTrue({f : None} == {f: None}, "{%r : None} != "
"{%r : None}" % (f, f))
# identical containers
l, t, s, d = [f], (f,), {f}, {f: None}
self.assertTrue(l == l, "[%r] not equal to itself" % f)
self.assertTrue(t == t, "(%r,) not equal to itself" % f)
self.assertTrue(s == s, "{%r} not equal to itself" % f)
self.assertTrue(d == d, "{%r : None} not equal to itself" % f)
def assertEqualAndEqualSign(self, a, b):
# fail unless a == b and a and b have the same sign bit;
# the only difference from assertEqual is that this test
# distinguishes -0.0 and 0.0.
self.assertEqual((a, copysign(1.0, a)), (b, copysign(1.0, b)))
@support.requires_IEEE_754
def test_float_mod(self):
# Check behaviour of % operator for IEEE 754 special cases.
# In particular, check signs of zeros.
mod = operator.mod
self.assertEqualAndEqualSign(mod(-1.0, 1.0), 0.0)
self.assertEqualAndEqualSign(mod(-1e-100, 1.0), 1.0)
self.assertEqualAndEqualSign(mod(-0.0, 1.0), 0.0)
self.assertEqualAndEqualSign(mod(0.0, 1.0), 0.0)
self.assertEqualAndEqualSign(mod(1e-100, 1.0), 1e-100)
self.assertEqualAndEqualSign(mod(1.0, 1.0), 0.0)
self.assertEqualAndEqualSign(mod(-1.0, -1.0), -0.0)
self.assertEqualAndEqualSign(mod(-1e-100, -1.0), -1e-100)
self.assertEqualAndEqualSign(mod(-0.0, -1.0), -0.0)
self.assertEqualAndEqualSign(mod(0.0, -1.0), -0.0)
self.assertEqualAndEqualSign(mod(1e-100, -1.0), -1.0)
self.assertEqualAndEqualSign(mod(1.0, -1.0), -0.0)
@support.requires_IEEE_754
def test_float_pow(self):
# test builtin pow and ** operator for IEEE 754 special cases.
# Special cases taken from section F.9.4.4 of the C99 specification
for pow_op in pow, operator.pow:
# x**NAN is NAN for any x except 1
self.assertTrue(isnan(pow_op(-INF, NAN)))
self.assertTrue(isnan(pow_op(-2.0, NAN)))
self.assertTrue(isnan(pow_op(-1.0, NAN)))
self.assertTrue(isnan(pow_op(-0.5, NAN)))
self.assertTrue(isnan(pow_op(-0.0, NAN)))
self.assertTrue(isnan(pow_op(0.0, NAN)))
self.assertTrue(isnan(pow_op(0.5, NAN)))
self.assertTrue(isnan(pow_op(2.0, NAN)))
self.assertTrue(isnan(pow_op(INF, NAN)))
self.assertTrue(isnan(pow_op(NAN, NAN)))
# NAN**y is NAN for any y except +-0
self.assertTrue(isnan(pow_op(NAN, -INF)))
self.assertTrue(isnan(pow_op(NAN, -2.0)))
self.assertTrue(isnan(pow_op(NAN, -1.0)))
self.assertTrue(isnan(pow_op(NAN, -0.5)))
self.assertTrue(isnan(pow_op(NAN, 0.5)))
self.assertTrue(isnan(pow_op(NAN, 1.0)))
self.assertTrue(isnan(pow_op(NAN, 2.0)))
self.assertTrue(isnan(pow_op(NAN, INF)))
# (+-0)**y raises ZeroDivisionError for y a negative odd integer
self.assertRaises(ZeroDivisionError, pow_op, -0.0, -1.0)
self.assertRaises(ZeroDivisionError, pow_op, 0.0, -1.0)
# (+-0)**y raises ZeroDivisionError for y finite and negative
# but not an odd integer
self.assertRaises(ZeroDivisionError, pow_op, -0.0, -2.0)
self.assertRaises(ZeroDivisionError, pow_op, -0.0, -0.5)
self.assertRaises(ZeroDivisionError, pow_op, 0.0, -2.0)
self.assertRaises(ZeroDivisionError, pow_op, 0.0, -0.5)
# (+-0)**y is +-0 for y a positive odd integer
self.assertEqualAndEqualSign(pow_op(-0.0, 1.0), -0.0)
self.assertEqualAndEqualSign(pow_op(0.0, 1.0), 0.0)
# (+-0)**y is 0 for y finite and positive but not an odd integer
self.assertEqualAndEqualSign(pow_op(-0.0, 0.5), 0.0)
self.assertEqualAndEqualSign(pow_op(-0.0, 2.0), 0.0)
self.assertEqualAndEqualSign(pow_op(0.0, 0.5), 0.0)
self.assertEqualAndEqualSign(pow_op(0.0, 2.0), 0.0)
# (-1)**+-inf is 1
self.assertEqualAndEqualSign(pow_op(-1.0, -INF), 1.0)
self.assertEqualAndEqualSign(pow_op(-1.0, INF), 1.0)
# 1**y is 1 for any y, even if y is an infinity or nan
self.assertEqualAndEqualSign(pow_op(1.0, -INF), 1.0)
self.assertEqualAndEqualSign(pow_op(1.0, -2.0), 1.0)
self.assertEqualAndEqualSign(pow_op(1.0, -1.0), 1.0)
self.assertEqualAndEqualSign(pow_op(1.0, -0.5), 1.0)
self.assertEqualAndEqualSign(pow_op(1.0, -0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(1.0, 0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(1.0, 0.5), 1.0)
self.assertEqualAndEqualSign(pow_op(1.0, 1.0), 1.0)
self.assertEqualAndEqualSign(pow_op(1.0, 2.0), 1.0)
self.assertEqualAndEqualSign(pow_op(1.0, INF), 1.0)
self.assertEqualAndEqualSign(pow_op(1.0, NAN), 1.0)
# x**+-0 is 1 for any x, even if x is a zero, infinity, or nan
self.assertEqualAndEqualSign(pow_op(-INF, 0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(-2.0, 0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(-1.0, 0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(-0.5, 0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(-0.0, 0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(0.0, 0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(0.5, 0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(1.0, 0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(2.0, 0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(INF, 0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(NAN, 0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(-INF, -0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(-2.0, -0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(-1.0, -0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(-0.5, -0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(-0.0, -0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(0.0, -0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(0.5, -0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(1.0, -0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(2.0, -0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(INF, -0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(NAN, -0.0), 1.0)
# x**y defers to complex pow for finite negative x and
# non-integral y.
self.assertEqual(type(pow_op(-2.0, -0.5)), complex)
self.assertEqual(type(pow_op(-2.0, 0.5)), complex)
self.assertEqual(type(pow_op(-1.0, -0.5)), complex)
self.assertEqual(type(pow_op(-1.0, 0.5)), complex)
self.assertEqual(type(pow_op(-0.5, -0.5)), complex)
self.assertEqual(type(pow_op(-0.5, 0.5)), complex)
# x**-INF is INF for abs(x) < 1
self.assertEqualAndEqualSign(pow_op(-0.5, -INF), INF)
self.assertEqualAndEqualSign(pow_op(-0.0, -INF), INF)
self.assertEqualAndEqualSign(pow_op(0.0, -INF), INF)
self.assertEqualAndEqualSign(pow_op(0.5, -INF), INF)
# x**-INF is 0 for abs(x) > 1
self.assertEqualAndEqualSign(pow_op(-INF, -INF), 0.0)
self.assertEqualAndEqualSign(pow_op(-2.0, -INF), 0.0)
self.assertEqualAndEqualSign(pow_op(2.0, -INF), 0.0)
self.assertEqualAndEqualSign(pow_op(INF, -INF), 0.0)
# x**INF is 0 for abs(x) < 1
self.assertEqualAndEqualSign(pow_op(-0.5, INF), 0.0)
self.assertEqualAndEqualSign(pow_op(-0.0, INF), 0.0)
self.assertEqualAndEqualSign(pow_op(0.0, INF), 0.0)
self.assertEqualAndEqualSign(pow_op(0.5, INF), 0.0)
# x**INF is INF for abs(x) > 1
self.assertEqualAndEqualSign(pow_op(-INF, INF), INF)
self.assertEqualAndEqualSign(pow_op(-2.0, INF), INF)
self.assertEqualAndEqualSign(pow_op(2.0, INF), INF)
self.assertEqualAndEqualSign(pow_op(INF, INF), INF)
# (-INF)**y is -0.0 for y a negative odd integer
self.assertEqualAndEqualSign(pow_op(-INF, -1.0), -0.0)
# (-INF)**y is 0.0 for y negative but not an odd integer
self.assertEqualAndEqualSign(pow_op(-INF, -0.5), 0.0)
self.assertEqualAndEqualSign(pow_op(-INF, -2.0), 0.0)
# (-INF)**y is -INF for y a positive odd integer
self.assertEqualAndEqualSign(pow_op(-INF, 1.0), -INF)
# (-INF)**y is INF for y positive but not an odd integer
self.assertEqualAndEqualSign(pow_op(-INF, 0.5), INF)
self.assertEqualAndEqualSign(pow_op(-INF, 2.0), INF)
# INF**y is INF for y positive
self.assertEqualAndEqualSign(pow_op(INF, 0.5), INF)
self.assertEqualAndEqualSign(pow_op(INF, 1.0), INF)
self.assertEqualAndEqualSign(pow_op(INF, 2.0), INF)
# INF**y is 0.0 for y negative
self.assertEqualAndEqualSign(pow_op(INF, -2.0), 0.0)
self.assertEqualAndEqualSign(pow_op(INF, -1.0), 0.0)
self.assertEqualAndEqualSign(pow_op(INF, -0.5), 0.0)
# basic checks not covered by the special cases above
self.assertEqualAndEqualSign(pow_op(-2.0, -2.0), 0.25)
self.assertEqualAndEqualSign(pow_op(-2.0, -1.0), -0.5)
self.assertEqualAndEqualSign(pow_op(-2.0, -0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(-2.0, 0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(-2.0, 1.0), -2.0)
self.assertEqualAndEqualSign(pow_op(-2.0, 2.0), 4.0)
self.assertEqualAndEqualSign(pow_op(-1.0, -2.0), 1.0)
self.assertEqualAndEqualSign(pow_op(-1.0, -1.0), -1.0)
self.assertEqualAndEqualSign(pow_op(-1.0, -0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(-1.0, 0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(-1.0, 1.0), -1.0)
self.assertEqualAndEqualSign(pow_op(-1.0, 2.0), 1.0)
self.assertEqualAndEqualSign(pow_op(2.0, -2.0), 0.25)
self.assertEqualAndEqualSign(pow_op(2.0, -1.0), 0.5)
self.assertEqualAndEqualSign(pow_op(2.0, -0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(2.0, 0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(2.0, 1.0), 2.0)
self.assertEqualAndEqualSign(pow_op(2.0, 2.0), 4.0)
# 1 ** large and -1 ** large; some libms apparently
# have problems with these
self.assertEqualAndEqualSign(pow_op(1.0, -1e100), 1.0)
self.assertEqualAndEqualSign(pow_op(1.0, 1e100), 1.0)
self.assertEqualAndEqualSign(pow_op(-1.0, -1e100), 1.0)
self.assertEqualAndEqualSign(pow_op(-1.0, 1e100), 1.0)
# check sign for results that underflow to 0
self.assertEqualAndEqualSign(pow_op(-2.0, -2000.0), 0.0)
self.assertEqual(type(pow_op(-2.0, -2000.5)), complex)
self.assertEqualAndEqualSign(pow_op(-2.0, -2001.0), -0.0)
self.assertEqualAndEqualSign(pow_op(2.0, -2000.0), 0.0)
self.assertEqualAndEqualSign(pow_op(2.0, -2000.5), 0.0)
self.assertEqualAndEqualSign(pow_op(2.0, -2001.0), 0.0)
self.assertEqualAndEqualSign(pow_op(-0.5, 2000.0), 0.0)
self.assertEqual(type(pow_op(-0.5, 2000.5)), complex)
self.assertEqualAndEqualSign(pow_op(-0.5, 2001.0), -0.0)
self.assertEqualAndEqualSign(pow_op(0.5, 2000.0), 0.0)
self.assertEqualAndEqualSign(pow_op(0.5, 2000.5), 0.0)
self.assertEqualAndEqualSign(pow_op(0.5, 2001.0), 0.0)
# check we don't raise an exception for subnormal results,
# and validate signs. Tests currently disabled, since
# they fail on systems where a subnormal result from pow
# is flushed to zero (e.g. Debian/ia64.)
#self.assertTrue(0.0 < pow_op(0.5, 1048) < 1e-315)
#self.assertTrue(0.0 < pow_op(-0.5, 1048) < 1e-315)
#self.assertTrue(0.0 < pow_op(0.5, 1047) < 1e-315)
#self.assertTrue(0.0 > pow_op(-0.5, 1047) > -1e-315)
#self.assertTrue(0.0 < pow_op(2.0, -1048) < 1e-315)
#self.assertTrue(0.0 < pow_op(-2.0, -1048) < 1e-315)
#self.assertTrue(0.0 < pow_op(2.0, -1047) < 1e-315)
#self.assertTrue(0.0 > pow_op(-2.0, -1047) > -1e-315)
@requires_setformat
class FormatFunctionsTestCase(unittest.TestCase):
def setUp(self):
self.save_formats = {'double':float.__getformat__('double'),
'float':float.__getformat__('float')}
def tearDown(self):
float.__setformat__('double', self.save_formats['double'])
float.__setformat__('float', self.save_formats['float'])
def test_getformat(self):
self.assertIn(float.__getformat__('double'),
['unknown', 'IEEE, big-endian', 'IEEE, little-endian'])
self.assertIn(float.__getformat__('float'),
['unknown', 'IEEE, big-endian', 'IEEE, little-endian'])
self.assertRaises(ValueError, float.__getformat__, 'chicken')
self.assertRaises(TypeError, float.__getformat__, 1)
def test_setformat(self):
for t in 'double', 'float':
float.__setformat__(t, 'unknown')
if self.save_formats[t] == 'IEEE, big-endian':
self.assertRaises(ValueError, float.__setformat__,
t, 'IEEE, little-endian')
elif self.save_formats[t] == 'IEEE, little-endian':
self.assertRaises(ValueError, float.__setformat__,
t, 'IEEE, big-endian')
else:
self.assertRaises(ValueError, float.__setformat__,
t, 'IEEE, big-endian')
self.assertRaises(ValueError, float.__setformat__,
t, 'IEEE, little-endian')
self.assertRaises(ValueError, float.__setformat__,
t, 'chicken')
self.assertRaises(ValueError, float.__setformat__,
'chicken', 'unknown')
BE_DOUBLE_INF = b'\x7f\xf0\x00\x00\x00\x00\x00\x00'
LE_DOUBLE_INF = bytes(reversed(BE_DOUBLE_INF))
BE_DOUBLE_NAN = b'\x7f\xf8\x00\x00\x00\x00\x00\x00'
LE_DOUBLE_NAN = bytes(reversed(BE_DOUBLE_NAN))
BE_FLOAT_INF = b'\x7f\x80\x00\x00'
LE_FLOAT_INF = bytes(reversed(BE_FLOAT_INF))
BE_FLOAT_NAN = b'\x7f\xc0\x00\x00'
LE_FLOAT_NAN = bytes(reversed(BE_FLOAT_NAN))
# on non-IEEE platforms, attempting to unpack a bit pattern
# representing an infinity or a NaN should raise an exception.
@requires_setformat
class UnknownFormatTestCase(unittest.TestCase):
def setUp(self):
self.save_formats = {'double':float.__getformat__('double'),
'float':float.__getformat__('float')}
float.__setformat__('double', 'unknown')
float.__setformat__('float', 'unknown')
def tearDown(self):
float.__setformat__('double', self.save_formats['double'])
float.__setformat__('float', self.save_formats['float'])
def test_double_specials_dont_unpack(self):
for fmt, data in [('>d', BE_DOUBLE_INF),
('>d', BE_DOUBLE_NAN),
('<d', LE_DOUBLE_INF),
('<d', LE_DOUBLE_NAN)]:
self.assertRaises(ValueError, struct.unpack, fmt, data)
def test_float_specials_dont_unpack(self):
for fmt, data in [('>f', BE_FLOAT_INF),
('>f', BE_FLOAT_NAN),
('<f', LE_FLOAT_INF),
('<f', LE_FLOAT_NAN)]:
self.assertRaises(ValueError, struct.unpack, fmt, data)
# on an IEEE platform, all we guarantee is that bit patterns
# representing infinities or NaNs do not raise an exception; all else
# is accident (today).
# let's also try to guarantee that -0.0 and 0.0 don't get confused.
class IEEEFormatTestCase(unittest.TestCase):
@support.requires_IEEE_754
def test_double_specials_do_unpack(self):
for fmt, data in [('>d', BE_DOUBLE_INF),
('>d', BE_DOUBLE_NAN),
('<d', LE_DOUBLE_INF),
('<d', LE_DOUBLE_NAN)]:
struct.unpack(fmt, data)
@support.requires_IEEE_754
def test_float_specials_do_unpack(self):
for fmt, data in [('>f', BE_FLOAT_INF),
('>f', BE_FLOAT_NAN),
('<f', LE_FLOAT_INF),
('<f', LE_FLOAT_NAN)]:
struct.unpack(fmt, data)
class FormatTestCase(unittest.TestCase):
def test_format(self):
# these should be rewritten to use both format(x, spec) and
# x.__format__(spec)
self.assertEqual(format(0.0, 'f'), '0.000000')
# the default is 'g', except for empty format spec
self.assertEqual(format(0.0, ''), '0.0')
self.assertEqual(format(0.01, ''), '0.01')
self.assertEqual(format(0.01, 'g'), '0.01')
# empty presentation type should format in the same way as str
# (issue 5920)
x = 100/7.
self.assertEqual(format(x, ''), str(x))
self.assertEqual(format(x, '-'), str(x))
self.assertEqual(format(x, '>'), str(x))
self.assertEqual(format(x, '2'), str(x))
self.assertEqual(format(1.0, 'f'), '1.000000')
self.assertEqual(format(-1.0, 'f'), '-1.000000')
self.assertEqual(format( 1.0, ' f'), ' 1.000000')
self.assertEqual(format(-1.0, ' f'), '-1.000000')
self.assertEqual(format( 1.0, '+f'), '+1.000000')
self.assertEqual(format(-1.0, '+f'), '-1.000000')
# % formatting
self.assertEqual(format(-1.0, '%'), '-100.000000%')
# conversion to string should fail
self.assertRaises(ValueError, format, 3.0, "s")
# other format specifiers shouldn't work on floats,
# in particular int specifiers
for format_spec in ([chr(x) for x in range(ord('a'), ord('z')+1)] +
[chr(x) for x in range(ord('A'), ord('Z')+1)]):
if not format_spec in 'eEfFgGn%':
self.assertRaises(ValueError, format, 0.0, format_spec)
self.assertRaises(ValueError, format, 1.0, format_spec)
self.assertRaises(ValueError, format, -1.0, format_spec)
self.assertRaises(ValueError, format, 1e100, format_spec)
self.assertRaises(ValueError, format, -1e100, format_spec)
self.assertRaises(ValueError, format, 1e-100, format_spec)
self.assertRaises(ValueError, format, -1e-100, format_spec)
# issue 3382
self.assertEqual(format(NAN, 'f'), 'nan')
self.assertEqual(format(NAN, 'F'), 'NAN')
self.assertEqual(format(INF, 'f'), 'inf')
self.assertEqual(format(INF, 'F'), 'INF')
@support.requires_IEEE_754
def test_format_testfile(self):
with open(format_testfile) as testfile:
for line in testfile:
if line.startswith('--'):
continue
line = line.strip()
if not line:
continue
lhs, rhs = map(str.strip, line.split('->'))
fmt, arg = lhs.split()
self.assertEqual(fmt % float(arg), rhs)
self.assertEqual(fmt % -float(arg), '-' + rhs)
def test_issue5864(self):
self.assertEqual(format(123.456, '.4'), '123.5')
self.assertEqual(format(1234.56, '.4'), '1.235e+03')
self.assertEqual(format(12345.6, '.4'), '1.235e+04')
class ReprTestCase(unittest.TestCase):
def test_repr(self):
floats_file = open(os.path.join(os.path.split(__file__)[0],
'floating_points.txt'))
for line in floats_file:
line = line.strip()
if not line or line.startswith('#'):
continue
v = eval(line)
self.assertEqual(v, eval(repr(v)))
floats_file.close()
@unittest.skipUnless(getattr(sys, 'float_repr_style', '') == 'short',
"applies only when using short float repr style")
def test_short_repr(self):
# test short float repr introduced in Python 3.1. One aspect
# of this repr is that we get some degree of str -> float ->
# str roundtripping. In particular, for any numeric string
# containing 15 or fewer significant digits, those exact same
# digits (modulo trailing zeros) should appear in the output.
# No more repr(0.03) -> "0.029999999999999999"!
test_strings = [
# output always includes *either* a decimal point and at
# least one digit after that point, or an exponent.
'0.0',
'1.0',
'0.01',
'0.02',
'0.03',
'0.04',
'0.05',
'1.23456789',
'10.0',
'100.0',
# values >= 1e16 get an exponent...
'1000000000000000.0',
'9999999999999990.0',
'1e+16',
'1e+17',
# ... and so do values < 1e-4
'0.001',
'0.001001',
'0.00010000000000001',
'0.0001',
'9.999999999999e-05',
'1e-05',
# values designed to provoke failure if the FPU rounding
# precision isn't set correctly
'8.72293771110361e+25',
'7.47005307342313e+26',
'2.86438000439698e+28',
'8.89142905246179e+28',
'3.08578087079232e+35',
]
for s in test_strings:
negs = '-'+s
self.assertEqual(s, repr(float(s)))
self.assertEqual(negs, repr(float(negs)))
# Since Python 3.2, repr and str are identical
self.assertEqual(repr(float(s)), str(float(s)))
self.assertEqual(repr(float(negs)), str(float(negs)))
@support.requires_IEEE_754
class RoundTestCase(unittest.TestCase):
def test_inf_nan(self):
self.assertRaises(OverflowError, round, INF)
self.assertRaises(OverflowError, round, -INF)
self.assertRaises(ValueError, round, NAN)
self.assertRaises(TypeError, round, INF, 0.0)
self.assertRaises(TypeError, round, -INF, 1.0)
self.assertRaises(TypeError, round, NAN, "ceci n'est pas un integer")
self.assertRaises(TypeError, round, -0.0, 1j)
def test_large_n(self):
for n in [324, 325, 400, 2**31-1, 2**31, 2**32, 2**100]:
self.assertEqual(round(123.456, n), 123.456)
self.assertEqual(round(-123.456, n), -123.456)
self.assertEqual(round(1e300, n), 1e300)
self.assertEqual(round(1e-320, n), 1e-320)
self.assertEqual(round(1e150, 300), 1e150)
self.assertEqual(round(1e300, 307), 1e300)
self.assertEqual(round(-3.1415, 308), -3.1415)
self.assertEqual(round(1e150, 309), 1e150)
self.assertEqual(round(1.4e-315, 315), 1e-315)
def test_small_n(self):
for n in [-308, -309, -400, 1-2**31, -2**31, -2**31-1, -2**100]:
self.assertEqual(round(123.456, n), 0.0)
self.assertEqual(round(-123.456, n), -0.0)
self.assertEqual(round(1e300, n), 0.0)
self.assertEqual(round(1e-320, n), 0.0)
def test_overflow(self):
self.assertRaises(OverflowError, round, 1.6e308, -308)
self.assertRaises(OverflowError, round, -1.7e308, -308)
@unittest.skipUnless(getattr(sys, 'float_repr_style', '') == 'short',
"applies only when using short float repr style")
def test_previous_round_bugs(self):
# particular cases that have occurred in bug reports
self.assertEqual(round(562949953421312.5, 1),
562949953421312.5)
self.assertEqual(round(56294995342131.5, 3),
56294995342131.5)
# round-half-even
self.assertEqual(round(25.0, -1), 20.0)
self.assertEqual(round(35.0, -1), 40.0)
self.assertEqual(round(45.0, -1), 40.0)
self.assertEqual(round(55.0, -1), 60.0)
self.assertEqual(round(65.0, -1), 60.0)
self.assertEqual(round(75.0, -1), 80.0)
self.assertEqual(round(85.0, -1), 80.0)
self.assertEqual(round(95.0, -1), 100.0)
@unittest.skipUnless(getattr(sys, 'float_repr_style', '') == 'short',
"applies only when using short float repr style")
def test_matches_float_format(self):
# round should give the same results as float formatting
for i in range(500):
x = i/1000.
self.assertEqual(float(format(x, '.0f')), round(x, 0))
self.assertEqual(float(format(x, '.1f')), round(x, 1))
self.assertEqual(float(format(x, '.2f')), round(x, 2))
self.assertEqual(float(format(x, '.3f')), round(x, 3))
for i in range(5, 5000, 10):
x = i/1000.
self.assertEqual(float(format(x, '.0f')), round(x, 0))
self.assertEqual(float(format(x, '.1f')), round(x, 1))
self.assertEqual(float(format(x, '.2f')), round(x, 2))
self.assertEqual(float(format(x, '.3f')), round(x, 3))
for i in range(500):
x = random.random()
self.assertEqual(float(format(x, '.0f')), round(x, 0))
self.assertEqual(float(format(x, '.1f')), round(x, 1))
self.assertEqual(float(format(x, '.2f')), round(x, 2))
self.assertEqual(float(format(x, '.3f')), round(x, 3))
def test_format_specials(self):
# Test formatting of nans and infs.
def test(fmt, value, expected):
# Test with both % and format().
self.assertEqual(fmt % value, expected, fmt)
fmt = fmt[1:] # strip off the %
self.assertEqual(format(value, fmt), expected, fmt)
for fmt in ['%e', '%f', '%g', '%.0e', '%.6f', '%.20g',
'%#e', '%#f', '%#g', '%#.20e', '%#.15f', '%#.3g']:
pfmt = '%+' + fmt[1:]
sfmt = '% ' + fmt[1:]
test(fmt, INF, 'inf')
test(fmt, -INF, '-inf')
test(fmt, NAN, 'nan')
test(fmt, -NAN, 'nan')
# When asking for a sign, it's always provided. nans are
# always positive.
test(pfmt, INF, '+inf')
test(pfmt, -INF, '-inf')
test(pfmt, NAN, '+nan')
test(pfmt, -NAN, '+nan')
# When using ' ' for a sign code, only infs can be negative.
# Others have a space.
test(sfmt, INF, ' inf')
test(sfmt, -INF, '-inf')
test(sfmt, NAN, ' nan')
test(sfmt, -NAN, ' nan')
def test_None_ndigits(self):
for x in round(1.23), round(1.23, None), round(1.23, ndigits=None):
self.assertEqual(x, 1)
self.assertIsInstance(x, int)
for x in round(1.78), round(1.78, None), round(1.78, ndigits=None):
self.assertEqual(x, 2)
self.assertIsInstance(x, int)
# Beginning with Python 2.6 float has cross platform compatible
# ways to create and represent inf and nan
class InfNanTest(unittest.TestCase):
def test_inf_from_str(self):
self.assertTrue(isinf(float("inf")))
self.assertTrue(isinf(float("+inf")))
self.assertTrue(isinf(float("-inf")))
self.assertTrue(isinf(float("infinity")))
self.assertTrue(isinf(float("+infinity")))
self.assertTrue(isinf(float("-infinity")))
self.assertEqual(repr(float("inf")), "inf")
self.assertEqual(repr(float("+inf")), "inf")
self.assertEqual(repr(float("-inf")), "-inf")
self.assertEqual(repr(float("infinity")), "inf")
self.assertEqual(repr(float("+infinity")), "inf")
self.assertEqual(repr(float("-infinity")), "-inf")
self.assertEqual(repr(float("INF")), "inf")
self.assertEqual(repr(float("+Inf")), "inf")
self.assertEqual(repr(float("-iNF")), "-inf")
self.assertEqual(repr(float("Infinity")), "inf")
self.assertEqual(repr(float("+iNfInItY")), "inf")
self.assertEqual(repr(float("-INFINITY")), "-inf")
self.assertEqual(str(float("inf")), "inf")
self.assertEqual(str(float("+inf")), "inf")
self.assertEqual(str(float("-inf")), "-inf")
self.assertEqual(str(float("infinity")), "inf")
self.assertEqual(str(float("+infinity")), "inf")
self.assertEqual(str(float("-infinity")), "-inf")
self.assertRaises(ValueError, float, "info")
self.assertRaises(ValueError, float, "+info")
self.assertRaises(ValueError, float, "-info")
self.assertRaises(ValueError, float, "in")
self.assertRaises(ValueError, float, "+in")
self.assertRaises(ValueError, float, "-in")
self.assertRaises(ValueError, float, "infinit")
self.assertRaises(ValueError, float, "+Infin")
self.assertRaises(ValueError, float, "-INFI")
self.assertRaises(ValueError, float, "infinitys")
self.assertRaises(ValueError, float, "++Inf")
self.assertRaises(ValueError, float, "-+inf")
self.assertRaises(ValueError, float, "+-infinity")
self.assertRaises(ValueError, float, "--Infinity")
def test_inf_as_str(self):
self.assertEqual(repr(1e300 * 1e300), "inf")
self.assertEqual(repr(-1e300 * 1e300), "-inf")
self.assertEqual(str(1e300 * 1e300), "inf")
self.assertEqual(str(-1e300 * 1e300), "-inf")
def test_nan_from_str(self):
self.assertTrue(isnan(float("nan")))
self.assertTrue(isnan(float("+nan")))
self.assertTrue(isnan(float("-nan")))
self.assertEqual(repr(float("nan")), "nan")
self.assertEqual(repr(float("+nan")), "nan")
self.assertEqual(repr(float("-nan")), "nan")
self.assertEqual(repr(float("NAN")), "nan")
self.assertEqual(repr(float("+NAn")), "nan")
self.assertEqual(repr(float("-NaN")), "nan")
self.assertEqual(str(float("nan")), "nan")
self.assertEqual(str(float("+nan")), "nan")
self.assertEqual(str(float("-nan")), "nan")
self.assertRaises(ValueError, float, "nana")
self.assertRaises(ValueError, float, "+nana")
self.assertRaises(ValueError, float, "-nana")
self.assertRaises(ValueError, float, "na")
self.assertRaises(ValueError, float, "+na")
self.assertRaises(ValueError, float, "-na")
self.assertRaises(ValueError, float, "++nan")
self.assertRaises(ValueError, float, "-+NAN")
self.assertRaises(ValueError, float, "+-NaN")
self.assertRaises(ValueError, float, "--nAn")
def test_nan_as_str(self):
self.assertEqual(repr(1e300 * 1e300 * 0), "nan")
self.assertEqual(repr(-1e300 * 1e300 * 0), "nan")
self.assertEqual(str(1e300 * 1e300 * 0), "nan")
self.assertEqual(str(-1e300 * 1e300 * 0), "nan")
def test_inf_signs(self):
self.assertEqual(copysign(1.0, float('inf')), 1.0)
self.assertEqual(copysign(1.0, float('-inf')), -1.0)
@unittest.skipUnless(getattr(sys, 'float_repr_style', '') == 'short',
"applies only when using short float repr style")
def test_nan_signs(self):
# When using the dtoa.c code, the sign of float('nan') should
# be predictable.
self.assertEqual(copysign(1.0, float('nan')), 1.0)
self.assertEqual(copysign(1.0, float('-nan')), -1.0)
fromHex = float.fromhex
toHex = float.hex
class HexFloatTestCase(unittest.TestCase):
MAX = fromHex('0x.fffffffffffff8p+1024') # max normal
MIN = fromHex('0x1p-1022') # min normal
TINY = fromHex('0x0.0000000000001p-1022') # min subnormal
EPS = fromHex('0x0.0000000000001p0') # diff between 1.0 and next float up
def identical(self, x, y):
# check that floats x and y are identical, or that both
# are NaNs
if isnan(x) or isnan(y):
if isnan(x) == isnan(y):
return
elif x == y and (x != 0.0 or copysign(1.0, x) == copysign(1.0, y)):
return
self.fail('%r not identical to %r' % (x, y))
def test_ends(self):
self.identical(self.MIN, ldexp(1.0, -1022))
self.identical(self.TINY, ldexp(1.0, -1074))
self.identical(self.EPS, ldexp(1.0, -52))
self.identical(self.MAX, 2.*(ldexp(1.0, 1023) - ldexp(1.0, 970)))
def test_invalid_inputs(self):
invalid_inputs = [
'infi', # misspelt infinities and nans
'-Infinit',
'++inf',
'-+Inf',
'--nan',
'+-NaN',
'snan',
'NaNs',
'nna',
'an',
'nf',
'nfinity',
'inity',
'iinity',
'0xnan',
'',
' ',
'x1.0p0',
'0xX1.0p0',
'+ 0x1.0p0', # internal whitespace
'- 0x1.0p0',
'0 x1.0p0',
'0x 1.0p0',
'0x1 2.0p0',
'+0x1 .0p0',
'0x1. 0p0',
'-0x1.0 1p0',
'-0x1.0 p0',
'+0x1.0p +0',
'0x1.0p -0',
'0x1.0p 0',
'+0x1.0p+ 0',
'-0x1.0p- 0',
'++0x1.0p-0', # double signs
'--0x1.0p0',
'+-0x1.0p+0',
'-+0x1.0p0',
'0x1.0p++0',
'+0x1.0p+-0',
'-0x1.0p-+0',
'0x1.0p--0',
'0x1.0.p0',
'0x.p0', # no hex digits before or after point
'0x1,p0', # wrong decimal point character
'0x1pa',
'0x1p\uff10', # fullwidth Unicode digits
'\uff10x1p0',
'0x\uff11p0',
'0x1.\uff10p0',
'0x1p0 \n 0x2p0',
'0x1p0\0 0x1p0', # embedded null byte is not end of string
]
for x in invalid_inputs:
try:
result = fromHex(x)
except ValueError:
pass
else:
self.fail('Expected float.fromhex(%r) to raise ValueError; '
'got %r instead' % (x, result))
def test_whitespace(self):
value_pairs = [
('inf', INF),
('-Infinity', -INF),
('nan', NAN),
('1.0', 1.0),
('-0x.2', -0.125),
('-0.0', -0.0)
]
whitespace = [
'',
' ',
'\t',
'\n',
'\n \t',
'\f',
'\v',
'\r'
]
for inp, expected in value_pairs:
for lead in whitespace:
for trail in whitespace:
got = fromHex(lead + inp + trail)
self.identical(got, expected)
def test_from_hex(self):
MIN = self.MIN;
MAX = self.MAX;
TINY = self.TINY;
EPS = self.EPS;
# two spellings of infinity, with optional signs; case-insensitive
self.identical(fromHex('inf'), INF)
self.identical(fromHex('+Inf'), INF)
self.identical(fromHex('-INF'), -INF)
self.identical(fromHex('iNf'), INF)
self.identical(fromHex('Infinity'), INF)
self.identical(fromHex('+INFINITY'), INF)
self.identical(fromHex('-infinity'), -INF)
self.identical(fromHex('-iNFiNitY'), -INF)
# nans with optional sign; case insensitive
self.identical(fromHex('nan'), NAN)
self.identical(fromHex('+NaN'), NAN)
self.identical(fromHex('-NaN'), NAN)
self.identical(fromHex('-nAN'), NAN)
# variations in input format
self.identical(fromHex('1'), 1.0)
self.identical(fromHex('+1'), 1.0)
self.identical(fromHex('1.'), 1.0)
self.identical(fromHex('1.0'), 1.0)
self.identical(fromHex('1.0p0'), 1.0)
self.identical(fromHex('01'), 1.0)
self.identical(fromHex('01.'), 1.0)
self.identical(fromHex('0x1'), 1.0)
self.identical(fromHex('0x1.'), 1.0)
self.identical(fromHex('0x1.0'), 1.0)
self.identical(fromHex('+0x1.0'), 1.0)
self.identical(fromHex('0x1p0'), 1.0)
self.identical(fromHex('0X1p0'), 1.0)
self.identical(fromHex('0X1P0'), 1.0)
self.identical(fromHex('0x1P0'), 1.0)
self.identical(fromHex('0x1.p0'), 1.0)
self.identical(fromHex('0x1.0p0'), 1.0)
self.identical(fromHex('0x.1p4'), 1.0)
self.identical(fromHex('0x.1p04'), 1.0)
self.identical(fromHex('0x.1p004'), 1.0)
self.identical(fromHex('0x1p+0'), 1.0)
self.identical(fromHex('0x1P-0'), 1.0)
self.identical(fromHex('+0x1p0'), 1.0)
self.identical(fromHex('0x01p0'), 1.0)
self.identical(fromHex('0x1p00'), 1.0)
self.identical(fromHex(' 0x1p0 '), 1.0)
self.identical(fromHex('\n 0x1p0'), 1.0)
self.identical(fromHex('0x1p0 \t'), 1.0)
self.identical(fromHex('0xap0'), 10.0)
self.identical(fromHex('0xAp0'), 10.0)
self.identical(fromHex('0xaP0'), 10.0)
self.identical(fromHex('0xAP0'), 10.0)
self.identical(fromHex('0xbep0'), 190.0)
self.identical(fromHex('0xBep0'), 190.0)
self.identical(fromHex('0xbEp0'), 190.0)
self.identical(fromHex('0XBE0P-4'), 190.0)
self.identical(fromHex('0xBEp0'), 190.0)
self.identical(fromHex('0xB.Ep4'), 190.0)
self.identical(fromHex('0x.BEp8'), 190.0)
self.identical(fromHex('0x.0BEp12'), 190.0)
# moving the point around
pi = fromHex('0x1.921fb54442d18p1')
self.identical(fromHex('0x.006487ed5110b46p11'), pi)
self.identical(fromHex('0x.00c90fdaa22168cp10'), pi)
self.identical(fromHex('0x.01921fb54442d18p9'), pi)
self.identical(fromHex('0x.03243f6a8885a3p8'), pi)
self.identical(fromHex('0x.06487ed5110b46p7'), pi)
self.identical(fromHex('0x.0c90fdaa22168cp6'), pi)
self.identical(fromHex('0x.1921fb54442d18p5'), pi)
self.identical(fromHex('0x.3243f6a8885a3p4'), pi)
self.identical(fromHex('0x.6487ed5110b46p3'), pi)
self.identical(fromHex('0x.c90fdaa22168cp2'), pi)
self.identical(fromHex('0x1.921fb54442d18p1'), pi)
self.identical(fromHex('0x3.243f6a8885a3p0'), pi)
self.identical(fromHex('0x6.487ed5110b46p-1'), pi)
self.identical(fromHex('0xc.90fdaa22168cp-2'), pi)
self.identical(fromHex('0x19.21fb54442d18p-3'), pi)
self.identical(fromHex('0x32.43f6a8885a3p-4'), pi)
self.identical(fromHex('0x64.87ed5110b46p-5'), pi)
self.identical(fromHex('0xc9.0fdaa22168cp-6'), pi)
self.identical(fromHex('0x192.1fb54442d18p-7'), pi)
self.identical(fromHex('0x324.3f6a8885a3p-8'), pi)
self.identical(fromHex('0x648.7ed5110b46p-9'), pi)
self.identical(fromHex('0xc90.fdaa22168cp-10'), pi)
self.identical(fromHex('0x1921.fb54442d18p-11'), pi)
# ...
self.identical(fromHex('0x1921fb54442d1.8p-47'), pi)
self.identical(fromHex('0x3243f6a8885a3p-48'), pi)
self.identical(fromHex('0x6487ed5110b46p-49'), pi)
self.identical(fromHex('0xc90fdaa22168cp-50'), pi)
self.identical(fromHex('0x1921fb54442d18p-51'), pi)
self.identical(fromHex('0x3243f6a8885a30p-52'), pi)
self.identical(fromHex('0x6487ed5110b460p-53'), pi)
self.identical(fromHex('0xc90fdaa22168c0p-54'), pi)
self.identical(fromHex('0x1921fb54442d180p-55'), pi)
# results that should overflow...
self.assertRaises(OverflowError, fromHex, '-0x1p1024')
self.assertRaises(OverflowError, fromHex, '0x1p+1025')
self.assertRaises(OverflowError, fromHex, '+0X1p1030')
self.assertRaises(OverflowError, fromHex, '-0x1p+1100')
self.assertRaises(OverflowError, fromHex, '0X1p123456789123456789')
self.assertRaises(OverflowError, fromHex, '+0X.8p+1025')
self.assertRaises(OverflowError, fromHex, '+0x0.8p1025')
self.assertRaises(OverflowError, fromHex, '-0x0.4p1026')
self.assertRaises(OverflowError, fromHex, '0X2p+1023')
self.assertRaises(OverflowError, fromHex, '0x2.p1023')
self.assertRaises(OverflowError, fromHex, '-0x2.0p+1023')
self.assertRaises(OverflowError, fromHex, '+0X4p+1022')
self.assertRaises(OverflowError, fromHex, '0x1.ffffffffffffffp+1023')
self.assertRaises(OverflowError, fromHex, '-0X1.fffffffffffff9p1023')
self.assertRaises(OverflowError, fromHex, '0X1.fffffffffffff8p1023')
self.assertRaises(OverflowError, fromHex, '+0x3.fffffffffffffp1022')
self.assertRaises(OverflowError, fromHex, '0x3fffffffffffffp+970')
self.assertRaises(OverflowError, fromHex, '0x10000000000000000p960')
self.assertRaises(OverflowError, fromHex, '-0Xffffffffffffffffp960')
# ...and those that round to +-max float
self.identical(fromHex('+0x1.fffffffffffffp+1023'), MAX)
self.identical(fromHex('-0X1.fffffffffffff7p1023'), -MAX)
self.identical(fromHex('0X1.fffffffffffff7fffffffffffffp1023'), MAX)
# zeros
self.identical(fromHex('0x0p0'), 0.0)
self.identical(fromHex('0x0p1000'), 0.0)
self.identical(fromHex('-0x0p1023'), -0.0)
self.identical(fromHex('0X0p1024'), 0.0)
self.identical(fromHex('-0x0p1025'), -0.0)
self.identical(fromHex('0X0p2000'), 0.0)
self.identical(fromHex('0x0p123456789123456789'), 0.0)
self.identical(fromHex('-0X0p-0'), -0.0)
self.identical(fromHex('-0X0p-1000'), -0.0)
self.identical(fromHex('0x0p-1023'), 0.0)
self.identical(fromHex('-0X0p-1024'), -0.0)
self.identical(fromHex('-0x0p-1025'), -0.0)
self.identical(fromHex('-0x0p-1072'), -0.0)
self.identical(fromHex('0X0p-1073'), 0.0)
self.identical(fromHex('-0x0p-1074'), -0.0)
self.identical(fromHex('0x0p-1075'), 0.0)
self.identical(fromHex('0X0p-1076'), 0.0)
self.identical(fromHex('-0X0p-2000'), -0.0)
self.identical(fromHex('-0x0p-123456789123456789'), -0.0)
# values that should underflow to 0
self.identical(fromHex('0X1p-1075'), 0.0)
self.identical(fromHex('-0X1p-1075'), -0.0)
self.identical(fromHex('-0x1p-123456789123456789'), -0.0)
self.identical(fromHex('0x1.00000000000000001p-1075'), TINY)
self.identical(fromHex('-0x1.1p-1075'), -TINY)
self.identical(fromHex('0x1.fffffffffffffffffp-1075'), TINY)
# check round-half-even is working correctly near 0 ...
self.identical(fromHex('0x1p-1076'), 0.0)
self.identical(fromHex('0X2p-1076'), 0.0)
self.identical(fromHex('0X3p-1076'), TINY)
self.identical(fromHex('0x4p-1076'), TINY)
self.identical(fromHex('0X5p-1076'), TINY)
self.identical(fromHex('0X6p-1076'), 2*TINY)
self.identical(fromHex('0x7p-1076'), 2*TINY)
self.identical(fromHex('0X8p-1076'), 2*TINY)
self.identical(fromHex('0X9p-1076'), 2*TINY)
self.identical(fromHex('0xap-1076'), 2*TINY)
self.identical(fromHex('0Xbp-1076'), 3*TINY)
self.identical(fromHex('0xcp-1076'), 3*TINY)
self.identical(fromHex('0Xdp-1076'), 3*TINY)
self.identical(fromHex('0Xep-1076'), 4*TINY)
self.identical(fromHex('0xfp-1076'), 4*TINY)
self.identical(fromHex('0x10p-1076'), 4*TINY)
self.identical(fromHex('-0x1p-1076'), -0.0)
self.identical(fromHex('-0X2p-1076'), -0.0)
self.identical(fromHex('-0x3p-1076'), -TINY)
self.identical(fromHex('-0X4p-1076'), -TINY)
self.identical(fromHex('-0x5p-1076'), -TINY)
self.identical(fromHex('-0x6p-1076'), -2*TINY)
self.identical(fromHex('-0X7p-1076'), -2*TINY)
self.identical(fromHex('-0X8p-1076'), -2*TINY)
self.identical(fromHex('-0X9p-1076'), -2*TINY)
self.identical(fromHex('-0Xap-1076'), -2*TINY)
self.identical(fromHex('-0xbp-1076'), -3*TINY)
self.identical(fromHex('-0xcp-1076'), -3*TINY)
self.identical(fromHex('-0Xdp-1076'), -3*TINY)
self.identical(fromHex('-0xep-1076'), -4*TINY)
self.identical(fromHex('-0Xfp-1076'), -4*TINY)
self.identical(fromHex('-0X10p-1076'), -4*TINY)
# ... and near MIN ...
self.identical(fromHex('0x0.ffffffffffffd6p-1022'), MIN-3*TINY)
self.identical(fromHex('0x0.ffffffffffffd8p-1022'), MIN-2*TINY)
self.identical(fromHex('0x0.ffffffffffffdap-1022'), MIN-2*TINY)
self.identical(fromHex('0x0.ffffffffffffdcp-1022'), MIN-2*TINY)
self.identical(fromHex('0x0.ffffffffffffdep-1022'), MIN-2*TINY)
self.identical(fromHex('0x0.ffffffffffffe0p-1022'), MIN-2*TINY)
self.identical(fromHex('0x0.ffffffffffffe2p-1022'), MIN-2*TINY)
self.identical(fromHex('0x0.ffffffffffffe4p-1022'), MIN-2*TINY)
self.identical(fromHex('0x0.ffffffffffffe6p-1022'), MIN-2*TINY)
self.identical(fromHex('0x0.ffffffffffffe8p-1022'), MIN-2*TINY)
self.identical(fromHex('0x0.ffffffffffffeap-1022'), MIN-TINY)
self.identical(fromHex('0x0.ffffffffffffecp-1022'), MIN-TINY)
self.identical(fromHex('0x0.ffffffffffffeep-1022'), MIN-TINY)
self.identical(fromHex('0x0.fffffffffffff0p-1022'), MIN-TINY)
self.identical(fromHex('0x0.fffffffffffff2p-1022'), MIN-TINY)
self.identical(fromHex('0x0.fffffffffffff4p-1022'), MIN-TINY)
self.identical(fromHex('0x0.fffffffffffff6p-1022'), MIN-TINY)
self.identical(fromHex('0x0.fffffffffffff8p-1022'), MIN)
self.identical(fromHex('0x0.fffffffffffffap-1022'), MIN)
self.identical(fromHex('0x0.fffffffffffffcp-1022'), MIN)
self.identical(fromHex('0x0.fffffffffffffep-1022'), MIN)
self.identical(fromHex('0x1.00000000000000p-1022'), MIN)
self.identical(fromHex('0x1.00000000000002p-1022'), MIN)
self.identical(fromHex('0x1.00000000000004p-1022'), MIN)
self.identical(fromHex('0x1.00000000000006p-1022'), MIN)
self.identical(fromHex('0x1.00000000000008p-1022'), MIN)
self.identical(fromHex('0x1.0000000000000ap-1022'), MIN+TINY)
self.identical(fromHex('0x1.0000000000000cp-1022'), MIN+TINY)
self.identical(fromHex('0x1.0000000000000ep-1022'), MIN+TINY)
self.identical(fromHex('0x1.00000000000010p-1022'), MIN+TINY)
self.identical(fromHex('0x1.00000000000012p-1022'), MIN+TINY)
self.identical(fromHex('0x1.00000000000014p-1022'), MIN+TINY)
self.identical(fromHex('0x1.00000000000016p-1022'), MIN+TINY)
self.identical(fromHex('0x1.00000000000018p-1022'), MIN+2*TINY)
# ... and near 1.0.
self.identical(fromHex('0x0.fffffffffffff0p0'), 1.0-EPS)
self.identical(fromHex('0x0.fffffffffffff1p0'), 1.0-EPS)
self.identical(fromHex('0X0.fffffffffffff2p0'), 1.0-EPS)
self.identical(fromHex('0x0.fffffffffffff3p0'), 1.0-EPS)
self.identical(fromHex('0X0.fffffffffffff4p0'), 1.0-EPS)
self.identical(fromHex('0X0.fffffffffffff5p0'), 1.0-EPS/2)
self.identical(fromHex('0X0.fffffffffffff6p0'), 1.0-EPS/2)
self.identical(fromHex('0x0.fffffffffffff7p0'), 1.0-EPS/2)
self.identical(fromHex('0x0.fffffffffffff8p0'), 1.0-EPS/2)
self.identical(fromHex('0X0.fffffffffffff9p0'), 1.0-EPS/2)
self.identical(fromHex('0X0.fffffffffffffap0'), 1.0-EPS/2)
self.identical(fromHex('0x0.fffffffffffffbp0'), 1.0-EPS/2)
self.identical(fromHex('0X0.fffffffffffffcp0'), 1.0)
self.identical(fromHex('0x0.fffffffffffffdp0'), 1.0)
self.identical(fromHex('0X0.fffffffffffffep0'), 1.0)
self.identical(fromHex('0x0.ffffffffffffffp0'), 1.0)
self.identical(fromHex('0X1.00000000000000p0'), 1.0)
self.identical(fromHex('0X1.00000000000001p0'), 1.0)
self.identical(fromHex('0x1.00000000000002p0'), 1.0)
self.identical(fromHex('0X1.00000000000003p0'), 1.0)
self.identical(fromHex('0x1.00000000000004p0'), 1.0)
self.identical(fromHex('0X1.00000000000005p0'), 1.0)
self.identical(fromHex('0X1.00000000000006p0'), 1.0)
self.identical(fromHex('0X1.00000000000007p0'), 1.0)
self.identical(fromHex('0x1.00000000000007ffffffffffffffffffffp0'),
1.0)
self.identical(fromHex('0x1.00000000000008p0'), 1.0)
self.identical(fromHex('0x1.00000000000008000000000000000001p0'),
1+EPS)
self.identical(fromHex('0X1.00000000000009p0'), 1.0+EPS)
self.identical(fromHex('0x1.0000000000000ap0'), 1.0+EPS)
self.identical(fromHex('0x1.0000000000000bp0'), 1.0+EPS)
self.identical(fromHex('0X1.0000000000000cp0'), 1.0+EPS)
self.identical(fromHex('0x1.0000000000000dp0'), 1.0+EPS)
self.identical(fromHex('0x1.0000000000000ep0'), 1.0+EPS)
self.identical(fromHex('0X1.0000000000000fp0'), 1.0+EPS)
self.identical(fromHex('0x1.00000000000010p0'), 1.0+EPS)
self.identical(fromHex('0X1.00000000000011p0'), 1.0+EPS)
self.identical(fromHex('0x1.00000000000012p0'), 1.0+EPS)
self.identical(fromHex('0X1.00000000000013p0'), 1.0+EPS)
self.identical(fromHex('0X1.00000000000014p0'), 1.0+EPS)
self.identical(fromHex('0x1.00000000000015p0'), 1.0+EPS)
self.identical(fromHex('0x1.00000000000016p0'), 1.0+EPS)
self.identical(fromHex('0X1.00000000000017p0'), 1.0+EPS)
self.identical(fromHex('0x1.00000000000017ffffffffffffffffffffp0'),
1.0+EPS)
self.identical(fromHex('0x1.00000000000018p0'), 1.0+2*EPS)
self.identical(fromHex('0X1.00000000000018000000000000000001p0'),
1.0+2*EPS)
self.identical(fromHex('0x1.00000000000019p0'), 1.0+2*EPS)
self.identical(fromHex('0X1.0000000000001ap0'), 1.0+2*EPS)
self.identical(fromHex('0X1.0000000000001bp0'), 1.0+2*EPS)
self.identical(fromHex('0x1.0000000000001cp0'), 1.0+2*EPS)
self.identical(fromHex('0x1.0000000000001dp0'), 1.0+2*EPS)
self.identical(fromHex('0x1.0000000000001ep0'), 1.0+2*EPS)
self.identical(fromHex('0X1.0000000000001fp0'), 1.0+2*EPS)
self.identical(fromHex('0x1.00000000000020p0'), 1.0+2*EPS)
def test_roundtrip(self):
def roundtrip(x):
return fromHex(toHex(x))
for x in [NAN, INF, self.MAX, self.MIN, self.MIN-self.TINY, self.TINY, 0.0]:
self.identical(x, roundtrip(x))
self.identical(-x, roundtrip(-x))
# fromHex(toHex(x)) should exactly recover x, for any non-NaN float x.
import random
for i in range(10000):
e = random.randrange(-1200, 1200)
m = random.random()
s = random.choice([1.0, -1.0])
try:
x = s*ldexp(m, e)
except OverflowError:
pass
else:
self.identical(x, fromHex(toHex(x)))
def test_subclass(self):
class F(float):
def __new__(cls, value):
return float.__new__(cls, value + 1)
f = F.fromhex((1.5).hex())
self.assertIs(type(f), F)
self.assertEqual(f, 2.5)
class F2(float):
def __init__(self, value):
self.foo = 'bar'
f = F2.fromhex((1.5).hex())
self.assertIs(type(f), F2)
self.assertEqual(f, 1.5)
self.assertEqual(getattr(f, 'foo', 'none'), 'bar')
if __name__ == '__main__':
unittest.main()
| mit |
nuagenetworks/vspk-python | vspk/v5_0/fetchers/nuinfrastructuregatewayprofiles_fetcher.py | 2 | 2247 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2015, Alcatel-Lucent Inc, 2017 Nokia
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from bambou import NURESTFetcher
class NUInfrastructureGatewayProfilesFetcher(NURESTFetcher):
""" Represents a NUInfrastructureGatewayProfiles fetcher
Notes:
This fetcher enables to fetch NUInfrastructureGatewayProfile objects.
See:
bambou.NURESTFetcher
"""
@classmethod
def managed_class(cls):
""" Return NUInfrastructureGatewayProfile class that is managed.
Returns:
.NUInfrastructureGatewayProfile: the managed class
"""
from .. import NUInfrastructureGatewayProfile
return NUInfrastructureGatewayProfile
| bsd-3-clause |
gangadhar-kadam/lgnlvefrape | frappe/core/doctype/doctype/doctype.py | 6 | 14843 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.utils import now, cint
from frappe.model import no_value_fields
from frappe.model.document import Document
from frappe.model.db_schema import type_map
from frappe.core.doctype.property_setter.property_setter import make_property_setter
from frappe.core.doctype.notification_count.notification_count import delete_notification_count_for
from frappe.modules import make_boilerplate
form_grid_templates = {
"fields": "templates/form_grid/fields.html"
}
class DocType(Document):
def validate(self):
if not frappe.conf.get("developer_mode"):
frappe.throw(_("Not in Developer Mode! Set in site_config.json"))
for c in [".", "/", "#", "&", "=", ":", "'", '"']:
if c in self.name:
frappe.throw(_("{0} not allowed in name").format(c))
self.validate_series()
self.scrub_field_names()
self.validate_title_field()
validate_fields(self)
if self.istable:
# no permission records for child table
self.permissions = []
else:
validate_permissions(self)
self.make_amendable()
def change_modified_of_parent(self):
if frappe.flags.in_import:
return
parent_list = frappe.db.sql("""SELECT parent
from tabDocField where fieldtype="Table" and options=%s""", self.name)
for p in parent_list:
frappe.db.sql('UPDATE tabDocType SET modified=%s WHERE `name`=%s', (now(), p[0]))
def scrub_field_names(self):
restricted = ('name','parent','creation','modified','modified_by',
'parentfield','parenttype',"file_list")
for d in self.get("fields"):
if d.fieldtype:
if (not getattr(d, "fieldname", None)):
if d.label:
d.fieldname = d.label.strip().lower().replace(' ','_')
if d.fieldname in restricted:
d.fieldname = d.fieldname + '1'
else:
d.fieldname = d.fieldtype.lower().replace(" ","_") + "_" + str(d.idx)
def validate_title_field(self):
if self.title_field and \
self.title_field not in [d.fieldname for d in self.get("fields")]:
frappe.throw(_("Title field must be a valid fieldname"))
def validate_series(self, autoname=None, name=None):
if not autoname: autoname = self.autoname
if not name: name = self.name
if not autoname and self.get("fields", {"fieldname":"naming_series"}):
self.autoname = "naming_series:"
if autoname and (not autoname.startswith('field:')) \
and (not autoname.startswith('eval:')) \
and (not autoname in ('Prompt', 'hash')) \
and (not autoname.startswith('naming_series:')):
prefix = autoname.split('.')[0]
used_in = frappe.db.sql('select name from tabDocType where substring_index(autoname, ".", 1) = %s and name!=%s', (prefix, name))
if used_in:
frappe.throw(_("Series {0} already used in {1}").format(prefix, used_in[0][0]))
def on_update(self):
from frappe.model.db_schema import updatedb
updatedb(self.name)
self.change_modified_of_parent()
make_module_and_roles(self)
from frappe import conf
if not (frappe.flags.in_import or frappe.flags.in_test) and conf.get('developer_mode') or 0:
self.export_doc()
self.make_controller_template()
# update index
if not getattr(self, "custom", False):
from frappe.modules import load_doctype_module
module = load_doctype_module(self.name, self.module)
if hasattr(module, "on_doctype_update"):
module.on_doctype_update()
delete_notification_count_for(doctype=self.name)
frappe.clear_cache(doctype=self.name)
def before_rename(self, old, new, merge=False):
if merge:
frappe.throw(_("DocType can not be merged"))
def after_rename(self, old, new, merge=False):
if self.issingle:
frappe.db.sql("""update tabSingles set doctype=%s where doctype=%s""", (new, old))
else:
frappe.db.sql("rename table `tab%s` to `tab%s`" % (old, new))
def before_reload(self):
if not (self.issingle and self.istable):
self.preserve_naming_series_options_in_property_setter()
def preserve_naming_series_options_in_property_setter(self):
"""preserve naming_series as property setter if it does not exist"""
naming_series = self.get("fields", {"fieldname": "naming_series"})
if not naming_series:
return
# check if atleast 1 record exists
if not (frappe.db.table_exists("tab" + self.name) and frappe.db.sql("select name from `tab{}` limit 1".format(self.name))):
return
existing_property_setter = frappe.db.get_value("Property Setter", {"doc_type": self.name,
"property": "options", "field_name": "naming_series"})
if not existing_property_setter:
make_property_setter(self.name, "naming_series", "options", naming_series[0].options, "Text", validate_fields_for_doctype=False)
if naming_series[0].default:
make_property_setter(self.name, "naming_series", "default", naming_series[0].default, "Text", validate_fields_for_doctype=False)
def export_doc(self):
from frappe.modules.export_file import export_to_files
export_to_files(record_list=[['DocType', self.name]])
def import_doc(self):
from frappe.modules.import_module import import_from_files
import_from_files(record_list=[[self.module, 'doctype', self.name]])
def make_controller_template(self):
make_boilerplate("controller.py", self)
if not (self.istable or self.issingle):
make_boilerplate("test_controller.py", self)
make_boilerplate("test_records.json", self)
def make_amendable(self):
"""
if is_submittable is set, add amended_from docfields
"""
if self.is_submittable:
if not frappe.db.sql("""select name from tabDocField
where fieldname = 'amended_from' and parent = %s""", self.name):
self.append("fields", {
"label": "Amended From",
"fieldtype": "Link",
"fieldname": "amended_from",
"options": self.name,
"read_only": 1,
"print_hide": 1,
"no_copy": 1
})
def get_max_idx(self):
max_idx = frappe.db.sql("""select max(idx) from `tabDocField` where parent = %s""",
self.name)
return max_idx and max_idx[0][0] or 0
def validate_fields_for_doctype(doctype):
validate_fields(frappe.get_meta(doctype))
# this is separate because it is also called via custom field
def validate_fields(meta):
def check_illegal_characters(fieldname):
for c in ['.', ',', ' ', '-', '&', '%', '=', '"', "'", '*', '$',
'(', ')', '[', ']', '/']:
if c in fieldname:
frappe.throw(_("{0} not allowed in fieldname {1}").format(c, fieldname))
def check_unique_fieldname(fieldname):
duplicates = filter(None, map(lambda df: df.fieldname==fieldname and str(df.idx) or None, fields))
if len(duplicates) > 1:
frappe.throw(_("Fieldname {0} appears multiple times in rows {1}").format(fieldname, ", ".join(duplicates)))
def check_illegal_mandatory(d):
if (d.fieldtype in no_value_fields) and d.fieldtype!="Table" and d.reqd:
frappe.throw(_("Field {0} of type {1} cannot be mandatory").format(d.label, d.fieldtype))
def check_link_table_options(d):
if d.fieldtype in ("Link", "Table"):
if not d.options:
frappe.throw(_("Options requried for Link or Table type field {0} in row {1}").format(d.label, d.idx))
if d.options=="[Select]" or d.options==d.parent:
return
if d.options != d.parent and not frappe.db.exists("DocType", d.options):
frappe.throw(_("Options must be a valid DocType for field {0} in row {1}").format(d.label, d.idx))
def check_hidden_and_mandatory(d):
if d.hidden and d.reqd and not d.default:
frappe.throw(_("Field {0} in row {1} cannot be hidden and mandatory without default").format(d.label, d.idx))
def check_min_items_in_list(fields):
if len(filter(lambda d: d.in_list_view, fields))==0:
for d in fields[:5]:
if d.fieldtype in type_map:
d.in_list_view = 1
def check_width(d):
if d.fieldtype == "Currency" and cint(d.width) < 100:
frappe.throw(_("Max width for type Currency is 100px in row {0}").format(d.idx))
def check_in_list_view(d):
if d.in_list_view and d.fieldtype!="Image" and (d.fieldtype in no_value_fields):
frappe.throw(_("'In List View' not allowed for type {0} in row {1}").format(d.fieldtype, d.idx))
def check_dynamic_link_options(d):
if d.fieldtype=="Dynamic Link":
doctype_pointer = filter(lambda df: df.fieldname==d.options, fields)
if not doctype_pointer or (doctype_pointer[0].fieldtype!="Link") \
or (doctype_pointer[0].options!="DocType"):
frappe.throw(_("Options 'Dynamic Link' type of field must point to another Link Field with options as 'DocType'"))
def check_illegal_default(d):
if d.fieldtype == "Check" and d.default and d.default not in ('0', '1'):
frappe.throw(_("Default for 'Check' type of field must be either '0' or '1'"))
def check_precision(d):
if d.fieldtype in ("Currency", "Float", "Percent") and d.precision is not None and not (1 <= cint(d.precision) <= 6):
frappe.throw(_("Precision should be between 1 and 6"))
def check_fold(fields):
fold_exists = False
for i, f in enumerate(fields):
if f.fieldtype=="Fold":
if fold_exists:
frappe.throw(_("There can be only one Fold in a form"))
fold_exists = True
if i < len(fields)-1:
nxt = fields[i+1]
if nxt.fieldtype != "Section Break" \
or (nxt.fieldtype=="Section Break" and not nxt.label):
frappe.throw(_("Fold must come before a labelled Section Break"))
else:
frappe.throw(_("Fold can not be at the end of the form"))
def check_search_fields(meta):
if not meta.search_fields:
return
fieldname_list = [d.fieldname for d in fields]
for fieldname in (meta.search_fields or "").split(","):
fieldname = fieldname.strip()
if fieldname not in fieldname_list:
frappe.throw(_("Search Fields should contain valid fieldnames"))
fields = meta.get("fields")
for d in fields:
if not d.permlevel: d.permlevel = 0
if not d.fieldname:
frappe.throw(_("Fieldname is required in row {0}").format(d.idx))
check_illegal_characters(d.fieldname)
check_unique_fieldname(d.fieldname)
check_illegal_mandatory(d)
check_link_table_options(d)
check_dynamic_link_options(d)
check_hidden_and_mandatory(d)
check_in_list_view(d)
check_illegal_default(d)
check_min_items_in_list(fields)
check_fold(fields)
check_search_fields(meta)
def validate_permissions_for_doctype(doctype, for_remove=False):
doctype = frappe.get_doc("DocType", doctype)
if frappe.conf.developer_mode and not frappe.flags.in_test:
# save doctype
doctype.save()
else:
validate_permissions(doctype, for_remove)
# save permissions
for perm in doctype.get("permissions"):
perm.db_update()
def validate_permissions(doctype, for_remove=False):
permissions = doctype.get("permissions")
if not permissions:
frappe.throw(_('Enter at least one permission row'), frappe.MandatoryError)
issingle = issubmittable = isimportable = False
if doctype:
issingle = cint(doctype.issingle)
issubmittable = cint(doctype.is_submittable)
isimportable = cint(doctype.allow_import)
def get_txt(d):
return _("For {0} at level {1} in {2} in row {3}").format(d.role, d.permlevel, d.parent, d.idx)
def check_atleast_one_set(d):
if not d.read and not d.write and not d.submit and not d.cancel and not d.create:
frappe.throw(_("{0}: No basic permissions set").format(get_txt(d)))
def check_double(d):
has_similar = False
for p in permissions:
if (p.role==d.role and p.permlevel==d.permlevel
and p.apply_user_permissions==d.apply_user_permissions and p!=d):
has_similar = True
break
if has_similar:
frappe.throw(_("{0}: Only one rule allowed with the same Role, Level and Apply User Permissions").format(get_txt(d)))
def check_level_zero_is_set(d):
if cint(d.permlevel) > 0 and d.role != 'All':
has_zero_perm = False
for p in permissions:
if p.role==d.role and (p.permlevel or 0)==0 and p!=d:
has_zero_perm = True
break
if not has_zero_perm:
frappe.throw(_("{0}: Permission at level 0 must be set before higher levels are set").format(get_txt(d)))
if d.create or d.submit or d.cancel or d.amend or d.match:
frappe.throw(_("{0}: Create, Submit, Cancel and Amend only valid at level 0").format(get_txt(d)))
def check_permission_dependency(d):
if d.cancel and not d.submit:
frappe.throw(_("{0}: Cannot set Cancel without Submit").format(get_txt(d)))
if (d.submit or d.cancel or d.amend) and not d.write:
frappe.throw(_("{0}: Cannot set Submit, Cancel, Amend without Write").format(get_txt(d)))
if d.amend and not d.write:
frappe.throw(_("{0}: Cannot set Amend without Cancel").format(get_txt(d)))
if d.get("import") and not d.create:
frappe.throw(_("{0}: Cannot set Import without Create").format(get_txt(d)))
def remove_rights_for_single(d):
if not issingle:
return
if d.report:
frappe.msgprint(_("Report cannot be set for Single types"))
d.report = 0
d.set("import", 0)
d.set("export", 0)
for ptype, label in (
("set_user_permissions", _("Set User Permissions")),
("apply_user_permissions", _("Apply User Permissions"))):
if d.get(ptype):
d.set(ptype, 0)
frappe.msgprint(_("{0} cannot be set for Single types").format(label))
def check_if_submittable(d):
if d.submit and not issubmittable:
frappe.throw(_("{0}: Cannot set Assign Submit if not Submittable").format(get_txt(d)))
elif d.amend and not issubmittable:
frappe.throw(_("{0}: Cannot set Assign Amend if not Submittable").format(get_txt(d)))
def check_if_importable(d):
if d.get("import") and not isimportable:
frappe.throw(_("{0}: Cannot set import as {1} is not importable").format(get_txt(d), doctype))
for d in permissions:
if not d.permlevel:
d.permlevel=0
check_atleast_one_set(d)
if not for_remove:
check_double(d)
check_permission_dependency(d)
check_if_submittable(d)
check_if_importable(d)
check_level_zero_is_set(d)
remove_rights_for_single(d)
def make_module_and_roles(doc, perm_fieldname="permissions"):
try:
if not frappe.db.exists("Module Def", doc.module):
m = frappe.get_doc({"doctype": "Module Def", "module_name": doc.module})
m.app_name = frappe.local.module_app[frappe.scrub(doc.module)]
m.ignore_mandatory = m.ignore_permissions = True
m.insert()
default_roles = ["Administrator", "Guest", "All"]
roles = [p.role for p in doc.get("permissions") or []] + default_roles
for role in list(set(roles)):
if not frappe.db.exists("Role", role):
r = frappe.get_doc({"doctype": "Role", "role_name": role})
r.role_name = role
r.ignore_mandatory = r.ignore_permissions = True
r.insert()
except frappe.DoesNotExistError, e:
pass
except frappe.SQLError, e:
if e.args[0]==1146:
pass
else:
raise
def init_list(doctype):
doc = frappe.get_meta(doctype)
make_boilerplate("controller_list.js", doc)
make_boilerplate("controller_list.html", doc)
| mit |
Pakoach/Sick-Beard | cherrypy/lib/jsontools.py | 80 | 1574 | import sys
import cherrypy
if sys.version_info >= (2, 6):
# Python 2.6: simplejson is part of the standard library
import json
else:
try:
import simplejson as json
except ImportError:
json = None
if json is None:
def json_decode(s):
raise ValueError('No JSON library is available')
def json_encode(s):
raise ValueError('No JSON library is available')
else:
json_decode = json.JSONDecoder().decode
json_encode = json.JSONEncoder().iterencode
def json_in(force=True, debug=False):
request = cherrypy.serving.request
def json_processor(entity):
"""Read application/json data into request.json."""
if not entity.headers.get(u"Content-Length", u""):
raise cherrypy.HTTPError(411)
body = entity.fp.read()
try:
request.json = json_decode(body)
except ValueError:
raise cherrypy.HTTPError(400, 'Invalid JSON document')
if force:
request.body.processors.clear()
request.body.default_proc = cherrypy.HTTPError(
415, 'Expected an application/json content type')
request.body.processors[u'application/json'] = json_processor
def json_out(debug=False):
request = cherrypy.serving.request
response = cherrypy.serving.response
real_handler = request.handler
def json_handler(*args, **kwargs):
response.headers['Content-Type'] = 'application/json'
value = real_handler(*args, **kwargs)
return json_encode(value)
request.handler = json_handler
| gpl-3.0 |
hermantai/sorno-py-scripts | sorno/mathlib.py | 1 | 2471 | """A library for math related things
Copyright 2015 Heung Ming Tai
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from collections import namedtuple
class Interval(object):
"""An interval with a starting and a ending points, open or closed.
It's a read-only class.
Attributes:
start (int or float): The starting point of the interval.
end (int or float): The ending point of the interval.
is_start_opened (Optional[bool]): True if the starting point is open.
It's False by default.
is_end_opened (Optional[bool]): True if the ending point is open.
It's False by default.
"""
def __init__(self, start, end, is_start_opened=False, is_end_opened=False):
self._start = start
self._end = end
self._is_start_opened = is_start_opened
self._is_end_opened = is_end_opened
@property
def start(self):
return self._start
@property
def end(self):
return self._end
@property
def is_start_opened(self):
return self._is_start_opened
@property
def is_end_opened(self):
return self._is_end_opened
def __str__(self):
tmp = "Interval(start=%r,end=%r,is_start_opened=%r,is_end_opened=%r)"
return tmp % (
self._start,
self._end,
self._is_start_opened,
self._is_end_opened,
)
def __repr__(self):
return str(self)
def __eq__(self, other):
if not isinstance(other, Interval):
return False
return (
self._start,
self._end,
self._is_start_opened,
self._is_end_opened,
) == (
other._start,
other._end,
other._is_start_opened,
other._is_end_opened,
)
| apache-2.0 |
iwm911/plaso | plaso/parsers/utmpx.py | 1 | 6216 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2013 The Plaso Project Authors.
# Please see the AUTHORS file for details on individual authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Parser for utmpx files."""
# TODO: Add support for other implementations than Mac OS X.
# The parser should be checked against IOS UTMPX file.
import construct
import logging
from plaso.lib import errors
from plaso.lib import event
from plaso.lib import eventdata
from plaso.lib import parser
from plaso.lib import timelib
__author__ = 'Joaquin Moreno Garijo ([email protected])'
class UtmpxMacOsXEvent(event.EventObject):
"""Convenience class for an event utmpx."""
DATA_TYPE = 'mac:utmpx:event'
def __init__(self, timestamp, user, terminal, status, computer_name):
"""Initializes the event object.
Args:
timestamp: when the terminal was started
user: active user name
terminal: name of the terminal
status: terminal status
computer_name: name of the host or IP.
"""
super(UtmpxMacOsXEvent, self).__init__()
self.timestamp = timestamp
self.timestamp_desc = eventdata.EventTimestamp.START_TIME
self.user = user
self.terminal = terminal
self.status = status
self.computer_name = computer_name
class UtmpxParser(parser.BaseParser):
"""Parser for UTMPX files. """
NAME = 'utmpx'
# INFO: Type is suppose to be a short (2 bytes),
# however if we analyze the file it is always
# byte follow by 3 bytes with \x00 value.
MAC_UTMPX_ENTRY = construct.Struct(
'utmpx_mac',
construct.String('user', 256),
construct.ULInt32('id'),
construct.String('tty_name', 32),
construct.ULInt32('pid'),
construct.ULInt16('status_type'),
construct.ULInt16('unknown'),
construct.ULInt32('timestamp'),
construct.ULInt32('microsecond'),
construct.String('hostname', 256),
construct.Padding(64))
# 9, 10 and 11 are only for Darwin and IOS.
MAC_STATUS_TYPE = {
0: 'EMPTY',
1: 'RUN_LVL',
2: 'BOOT_TIME',
3: 'OLD_TIME',
4: 'NEW_TIME',
5: 'INIT_PROCESS',
6: 'LOGIN_PROCESS',
7: 'USER_PROCESS',
8: 'DEAD_PROCESS',
9: 'ACCOUNTING',
10: 'SIGNATURE',
11: 'SHUTDOWN_TIME'}
def __init__(self, pre_obj, config):
"""Initializes the parser.
Args:
pre_obj: pre-parsing object.
config: configuration object.
"""
super(UtmpxParser, self).__init__(pre_obj, config)
self._utmpx_record_size = self.MAC_UTMPX_ENTRY.sizeof()
def _ReadEntry(self, file_object):
"""Reads an UTMPX entry.
Args:
file_object: a file-like object that points to an UTMPX file.
Returns:
An event object constructed from the UTMPX entry.
"""
data = file_object.read(self._utmpx_record_size)
if len(data) != self._utmpx_record_size:
return
try:
entry = self.MAC_UTMPX_ENTRY.parse(data)
except (IOError, construct.FieldError) as exception:
logging.warning(
u'Unable to parse Mac OS X UTMPX entry with error: {0:s}'.format(
exception))
return
user, _, _ = entry.user.partition('\x00')
if not user:
user = u'N/A'
terminal, _, _ = entry.tty_name.partition('\x00')
if not terminal:
terminal = u'N/A'
computer_name, _, _ = entry.hostname.partition('\x00')
if not computer_name:
computer_name = u'localhost'
value_status = self.MAC_STATUS_TYPE.get(entry.status_type, u'N/A')
status = u'{0}'.format(value_status)
timestamp = timelib.Timestamp.FromPosixTimeWithMicrosecond(
entry.timestamp, entry.microsecond)
return UtmpxMacOsXEvent(timestamp, user, terminal, status, computer_name)
def _VerifyStructure(self, file_object):
"""Verify that we are dealing with an UTMPX entry.
Args:
file_object: a file-like object that points to an UTMPX file.
Returns:
True if it is a UTMPX entry or False otherwise.
"""
# First entry is a SIGNAL entry of the file ("header").
try:
header = self.MAC_UTMPX_ENTRY.parse_stream(file_object)
except (IOError, construct.FieldError):
return False
user, _, _ = header.user.partition('\x00')
# The UTMPX_ENTRY structure will often succesfully compile on various
# structures, such as binary plist files, and thus we need to do some
# additional validation. The first one is to check if the user name
# can be converted into a unicode string, otherwise we can assume
# we are dealing with non UTMPX data.
try:
_ = unicode(user)
except UnicodeDecodeError:
return False
if user != u'utmpx-1.00':
return False
if self.MAC_STATUS_TYPE[header.status_type] != 'SIGNATURE':
return False
if header.timestamp != 0 or header.microsecond != 0 or header.pid != 0:
return False
tty_name, _, _ = header.tty_name.partition('\x00')
hostname, _, _ = header.hostname.partition('\x00')
if tty_name or hostname:
return False
return True
def Parse(self, file_entry):
"""Extract data from a UTMPX file.
Args:
file_entry: a file entry object.
Returns:
An event object (instance of UtmpxMacOsXEvent) for each logon/logoff
event.
"""
file_object = file_entry.GetFileObject()
if not self._VerifyStructure(file_object):
raise errors.UnableToParseFile(
u'The file is not an UTMPX file.')
event_object = self._ReadEntry(file_object)
while event_object:
event_object.offset = file_object.tell()
yield event_object
event_object = self._ReadEntry(file_object)
file_object.close()
| apache-2.0 |
steeve/plugin.video.pulsar | resources/site-packages/bjsonrpc/request.py | 9 | 6728 | """
bjson/request.py
Asynchronous Bidirectional JSON-RPC protocol implementation over TCP/IP
Copyright (c) 2010 David Martinez Marti
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of copyright holders nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDERS OR CONTRIBUTORS
BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
try:
from Queue import Queue
except ImportError:
from queue import Queue
import logging
from threading import Event
import traceback
from bjsonrpc.exceptions import ServerError
import bjsonrpc.jsonlib as json
_log = logging.getLogger(__name__)
class Request(object):
"""
Represents a request to the other end which may be not be completed yet.
This class is automatically created by *method* Proxy.
Parameters:
**conn**
Connection instance which this Request belongs to.
(internally stored as Request.conn)
**request_data**
Dictionary object to serialize as JSON to send to the other end.
(internally stored as Request.data)
Attributes:
**responses**
Queue of JSON Objects of the response, each as a dictionary. If
no response has been received, this is empty.
**event_response**
A threading.Event object, which is set to true when a response has
been received. Useful to wake up threads or to wait exactly until
the response is received.
**callbacks**
List array where the developer can append functions to call when
the response is received. The function will get the Request object
as a first argument.
**request_id**
Number of ID that identifies the call. For notifications this is None.
Be careful because it may be not an integer. Strings and other objects
may be valid for other implementations.
"""
def __init__(self, conn, request_data):
self.conn = conn
self.data = request_data
self.responses = Queue()
# TODO: Now that we have a Queue, do we need an Event (and a cv)?
self.event_response = Event()
self.callbacks = []
self.thread_wait = self.event_response.wait
self.request_id = None
self.auto_close = False
if 'id' in self.data:
self.request_id = self.data['id']
if self.request_id:
self.auto_close = True
self.conn.addrequest(self)
data = json.dumps(self.data, self.conn)
self.conn.write(data)
def hasresponse(self):
"""
Method thet checks if there's a response or not.
Returns True if there it is or False if it haven't arrived yet.
"""
if not self.responses.empty(): return True
self.conn.dispatch_until_empty()
return not self.responses.empty()
def setresponse(self, value):
"""
Method used by Connection instance to tell Request that a Response
is available to this request.
Parameters:
**value**
Value (JSON decoded) received from socket.
"""
self.responses.put(value)
for callback in self.callbacks:
try:
callback(self)
except Exception as exc:
_log.error("Error on callback: %r", exc)
_log.debug(traceback.format_exc())
self.event_response.set() # helper for threads.
if self.auto_close:
self.close()
def wait(self):
"""
Block until there is a response. Will manage the socket and dispatch
messages until the response is found.
"""
#if self.response is None:
# self.conn.read_ensure_thread()
while self.responses.empty():
self.conn.read_and_dispatch(condition=lambda: self.responses.empty())
def __call__(self):
return self.value
def __iter__(self):
return self
def __next__(self):
return self.value
def next(self):
return self.__next__()
def close(self):
reqid, self.request_id, self.auto_close = self.request_id, None, False
if reqid:
self.conn.delrequest(reqid)
def __del__(self):
self.close()
@property
def value(self):
"""
Property to get value response. If the response is not available, it waits
to it (see *wait* method). If the response contains an Error, this
method raises *exceptions.ServerError* with the error text inside.
From version 0.2.0 you can also call the class itself to get the value::
req_stime = rpcconn.method.getServerTime()
print req_stime.value
print req_stime() # equivalent to the prior line.
"""
self.wait()
response = self.responses.get()
err = response.get('error', None)
if err is not None:
raise ServerError(err)
return response['result']
| bsd-3-clause |
david-ragazzi/nupic | nupic/research/TP_shim.py | 6 | 3224 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2014, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
A shim for the TP class that transparently implements TemporalMemory,
for use with OPF.
"""
import numpy
from nupic.research.temporal_memory import TemporalMemory
class TPShim(TemporalMemory):
"""
TP => Temporal Memory shim class.
"""
def __init__(self,
numberOfCols=500,
cellsPerColumn=10,
initialPerm=0.11,
connectedPerm=0.50,
minThreshold=8,
newSynapseCount=15,
permanenceInc=0.10,
permanenceDec=0.10,
permanenceMax=1.0,
globalDecay=0.10,
activationThreshold=12,
seed=42):
"""
Translate parameters and initialize member variables specific to `TP.py`.
"""
super(TPShim, self).__init__(
columnDimensions=(numberOfCols,),
cellsPerColumn=cellsPerColumn,
activationThreshold=activationThreshold,
initialPermanence=initialPerm,
connectedPermanence=connectedPerm,
minThreshold=minThreshold,
maxNewSynapseCount=newSynapseCount,
permanenceIncrement=permanenceInc,
permanenceDecrement=permanenceDec,
seed=seed)
self.infActiveState = {"t": None}
def compute(self, bottomUpInput, enableLearn, computeInfOutput=None):
"""
(From `TP.py`)
Handle one compute, possibly learning.
@param bottomUpInput The bottom-up input, typically from a spatial pooler
@param enableLearn If true, perform learning
@param computeInfOutput If None, default behavior is to disable the inference
output when enableLearn is on.
If true, compute the inference output
If false, do not compute the inference output
"""
super(TPShim, self).compute(set(bottomUpInput.nonzero()[0]),
learn=enableLearn)
numberOfCells = self.numberOfCells()
activeState = numpy.zeros(numberOfCells)
activeState[self.getCellIndices(self.activeCells)] = 1
self.infActiveState["t"] = activeState
output = numpy.zeros(numberOfCells)
output[self.getCellIndices(self.predictiveCells | self.activeCells)] = 1
return output
| gpl-3.0 |
jordanemedlock/psychtruths | temboo/Library/SendGrid/NewsletterAPI/Recipients/GetAttachedRecipientLists.py | 5 | 4071 | # -*- coding: utf-8 -*-
###############################################################################
#
# GetAttachedRecipientLists
# Retrieve Recipient Lists attached to a specified newsletter.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class GetAttachedRecipientLists(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the GetAttachedRecipientLists Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(GetAttachedRecipientLists, self).__init__(temboo_session, '/Library/SendGrid/NewsletterAPI/Recipients/GetAttachedRecipientLists')
def new_input_set(self):
return GetAttachedRecipientListsInputSet()
def _make_result_set(self, result, path):
return GetAttachedRecipientListsResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return GetAttachedRecipientListsChoreographyExecution(session, exec_id, path)
class GetAttachedRecipientListsInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the GetAttachedRecipientLists
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_APIKey(self, value):
"""
Set the value of the APIKey input for this Choreo. ((required, string) The API Key obtained from SendGrid.)
"""
super(GetAttachedRecipientListsInputSet, self)._set_input('APIKey', value)
def set_APIUser(self, value):
"""
Set the value of the APIUser input for this Choreo. ((required, string) The username registered with SendGrid.)
"""
super(GetAttachedRecipientListsInputSet, self)._set_input('APIUser', value)
def set_Name(self, value):
"""
Set the value of the Name input for this Choreo. ((required, string) The name of an existing newsletter, whose recipient lists will be obtained.)
"""
super(GetAttachedRecipientListsInputSet, self)._set_input('Name', value)
def set_ResponseFormat(self, value):
"""
Set the value of the ResponseFormat input for this Choreo. ((optional, string) The format of the response from SendGrid, in either json, or xml. Default is set to json.)
"""
super(GetAttachedRecipientListsInputSet, self)._set_input('ResponseFormat', value)
class GetAttachedRecipientListsResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the GetAttachedRecipientLists Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The response from SendGrid. The format corresponds to the ResponseFormat input. Default is json.)
"""
return self._output.get('Response', None)
class GetAttachedRecipientListsChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return GetAttachedRecipientListsResultSet(response, path)
| apache-2.0 |
kontais/EFI-MIPS | ToolKit/cmds/python/Lib/regex_syntax.py | 17 | 1893 | """Constants for selecting regexp syntaxes for the obsolete regex module.
This module is only for backward compatibility. "regex" has now
been replaced by the new regular expression module, "re".
These bits are passed to regex.set_syntax() to choose among
alternative regexp syntaxes.
"""
# 1 means plain parentheses serve as grouping, and backslash
# parentheses are needed for literal searching.
# 0 means backslash-parentheses are grouping, and plain parentheses
# are for literal searching.
RE_NO_BK_PARENS = 1
# 1 means plain | serves as the "or"-operator, and \| is a literal.
# 0 means \| serves as the "or"-operator, and | is a literal.
RE_NO_BK_VBAR = 2
# 0 means plain + or ? serves as an operator, and \+, \? are literals.
# 1 means \+, \? are operators and plain +, ? are literals.
RE_BK_PLUS_QM = 4
# 1 means | binds tighter than ^ or $.
# 0 means the contrary.
RE_TIGHT_VBAR = 8
# 1 means treat \n as an _OR operator
# 0 means treat it as a normal character
RE_NEWLINE_OR = 16
# 0 means that a special characters (such as *, ^, and $) always have
# their special meaning regardless of the surrounding context.
# 1 means that special characters may act as normal characters in some
# contexts. Specifically, this applies to:
# ^ - only special at the beginning, or after ( or |
# $ - only special at the end, or before ) or |
# *, +, ? - only special when not after the beginning, (, or |
RE_CONTEXT_INDEP_OPS = 32
# ANSI sequences (\n etc) and \xhh
RE_ANSI_HEX = 64
# No GNU extensions
RE_NO_GNU_EXTENSIONS = 128
# Now define combinations of bits for the standard possibilities.
RE_SYNTAX_AWK = (RE_NO_BK_PARENS | RE_NO_BK_VBAR | RE_CONTEXT_INDEP_OPS)
RE_SYNTAX_EGREP = (RE_SYNTAX_AWK | RE_NEWLINE_OR)
RE_SYNTAX_GREP = (RE_BK_PLUS_QM | RE_NEWLINE_OR)
RE_SYNTAX_EMACS = 0
# (Python's obsolete "regexp" module used a syntax similar to awk.)
| bsd-3-clause |
andrewthetechie/slack_rtmbot | slack_rtmbot.py | 1 | 12598 | #!/usr/bin/env python
import sys
sys.dont_write_bytecode = True
import glob
import yaml
import os
import sys
import time
import logging
import re
from threading import Thread
from logging.handlers import RotatingFileHandler
from slackclient import SlackClient
def dbg(debug_string):
"""
Used to write debugging information if debug is set in config
:param debug_string:
:return:
"""
if debug:
main_log.info(debug_string)
class RtmBot(object):
def __init__(self, token):
self.last_ping = 0
self.token = token
self.bot_plugins = []
self.slack_client = None
self.dm_help = []
self.channel_help = []
def connect(self):
"""Convenience method that creates Server instance"""
self.slack_client = SlackClient(self.token)
self.slack_client.rtm_connect()
def start(self):
self.connect()
self.load_plugins()
self.on_start()
self.load_help()
while True:
for reply in self.slack_client.rtm_read():
self.input_logging(reply)
self.input(reply)
self.output()
self.autoping()
time.sleep(config['PING_INTERVAL']
if "PING_INTERVAL" in config else .1)
def autoping(self):
"""
This method keeps the bot connection alive to slack. Requires a ping every 5 seconds if there
is no activity.
:return:
"""
# hardcode the interval to 3 seconds
now = int(time.time())
if now > self.last_ping + 3:
self.slack_client.server.ping()
self.last_ping = now
def load_help(self):
"""
calls the process_help() function in each plugin to setup the help text variables
:return:
"""
global channel_help
global dm_help
for plugin in self.bot_plugins:
plug_help = None
try:
plug_help = plugin.get_help()
if len(plug_help[0]) > 0:
for help in plug_help[0]:
self.dm_help.append(help)
if len(plug_help[1]) > 0:
for help in plug_help[1]:
self.channel_help.append(help)
except AttributeError:
main_log.info(
"{} is a bad bad plugin and doesnt implement process_help".format(plugin))
self.dm_help.append(
"help - Will return a listing of commands the bot responds to")
self.channel_help.append(
"help - Will return a listing of commands the bot responds to")
return
def output_help(self, channel):
"""
Outputs help information to the help channel passed in
:param channel:
:return:
"""
message = "Help for {}\n-------------------\n".format(config[
'BOT_NAME'])
if len(self.dm_help) > 0:
message = "{}DM Commands:\n-------------------\n".format(message)
for help in self.dm_help:
message = "{}\n{}".format(message, help)
if len(self.channel_help) > 0:
message = "{}\n\nChannel Commands:\n-------------------\n".format(
message)
for help in self.channel_help:
message = "{}\n{}".format(message, help)
self.slack_client.api_call(
"chat.postMessage", channel=channel, text=message, as_user=True)
return
def on_start(self):
"""
Runs the process_onstart method for each function that has it
:return:
"""
function_name = "process_onstart"
for plugin in self.bot_plugins:
plugin.do(function_name, None)
def input(self, data):
"""
Receives messages from the RTM api (data) and passes it to methods in the plugins based on data type
For example, a message gets sent to process_message
Also handles input for the help commands and routes them to output_help
:param data:
:return:
"""
if "type" in data:
function_name = "process_" + data["type"]
dbg("got {}".format(function_name))
match = None
if function_name == "process_message":
match = re.findall(r"{} (help|halp|help me)".format(
config['BOT_NAME']), data['text'])
if data['channel'].startswith("D"):
function_name = "process_directmessage"
match = re.findall(r"(help|halp|help me)", data['text'])
if len(match) > 0 and data['user'] != config['BOT_USER_ID']:
return self.output_help(data['channel'])
for plugin in self.bot_plugins:
plugin.do(function_name, data)
def output(self):
"""
Uses the slack web API (not the RTM API) to post a message based on content of
outputs from plugins.
Uses the web api because the RTM api is not able to process formatted messages
:return:
"""
for plugin in self.bot_plugins:
limiter = False
for output in plugin.do_output():
channel = self.slack_client.server.channels.find(output[0])
if channel is not None and output[1] != None:
if limiter == True:
time.sleep(.1)
limiter = False
message = output[1].encode('ascii', 'ignore')
# channel.send_message("{}".format(message))
self.slack_client.api_call(
"chat.postMessage", channel=output[0], text=message, as_user=True)
limiter = True
def load_plugins(self):
"""
Loads all plugins in the /plugins directory
:return:
"""
for plugin in glob.glob(directory + '/plugins/*'):
sys.path.insert(0, plugin)
sys.path.insert(0, directory + '/plugins/')
for plugin in glob.glob(directory + '/plugins/*.py') + \
glob.glob(directory + '/plugins/*/*.py'):
main_log.info(plugin)
name = plugin.split('/')[-1][:-3]
self.bot_plugins.append(Plugin(name))
def input_logging(self, data):
"""
If COMMAND_LOGGING is true in config, logs all input sent at the bot
This is used more for analytics then debugging. If you want
debugging, turn on debugging
:param data:
:return:
"""
# do nothing if we havent defined command logging or it is false
if not "INPUT_LOGGING" in config or not config['INPUT_LOGGING']:
return
# dont log anytyhing that is coming from the bot itself
if "user" in data and data['user'] == config['BOT_USER_ID']:
return
# discard some logs that we just dont need
if data['type'] in config['INPUT_DO_NOT_LOG_TYPES']:
return
input_log.info("{},{},{},{}".format(
data['type'],
data['user'] if "user" in data else None,
data['channel'] if "channel" in data else None,
data['text'] if "text" in data else None))
class Plugin(object):
def __init__(self, name, plugin_config={}):
self.name = name
self.module = __import__(name)
self.outputs = []
if name in config:
main_log.info("config found for: " + name)
self.module.config = config[name]
if 'setup' in dir(self.module):
self.module.setup()
def plugin_worker(self, function_name, data):
"""
Method used to thread plugins
:param function_name:
:param data:
:return:
"""
try:
if function_name == "process_onstart":
eval("self.module." + function_name)()
elif data['user'] != config['BOT_USER_ID']:
eval("self.module." + function_name)(data)
except KeyError:
return
def get_help(self):
"""
Runs the "process_help" function from a plugin and returns the output
:return:
"""
function_name = "process_help"
return eval("self.module." + function_name)()
def do(self, function_name, data):
"""
Runs a plugin if it has a function to match the data being passed to it
:param function_name:
:param data:
:return:
"""
if function_name in dir(self.module):
try:
# stars a thread for this call to a plugin
t = Thread(
target=self.plugin_worker, args=(
function_name, data))
t.start()
except:
dbg("problem in module {} {}".format(function_name, data))
if "catch_all" in dir(self.module):
try:
self.module.catch_all(data)
except:
dbg("problem in catch all")
def do_output(self):
output = []
while True:
if 'outputs' in dir(self.module):
if len(self.module.outputs) > 0:
main_log.info("output from {}".format(self.module))
output.append(self.module.outputs.pop(0))
else:
break
else:
self.module.outputs = []
return output
def do_dm_help(self):
dm_help = []
while True:
if 'dm_help' in dir(self.module):
if self.module.dm_help and len(self.module.dm_help) > 0:
main_log.info("dm_help from {}".format(self.module))
dm_help.append(self.module.dm_help.pop(0))
else:
break
else:
self.module.dm_help = []
return dm_help
def do_channel_help(self):
channel_help = []
while True:
if 'dm_help' in dir(self.module):
if self.module.channel_help and len(self.module.channel_help) > 0:
main_log.info("channel_help from {}".format(self.module))
dm_help.append(self.module.channel_help.pop(0))
else:
break
else:
self.module.channel_help = []
return channel_help
class UnknownChannel(Exception):
pass
def setup_logger(logger_name, log_file, level=logging.INFO):
l = logging.getLogger(logger_name)
formatter = logging.Formatter('%(asctime)s : %(message)s')
fileHandler = RotatingFileHandler(log_file, mode='a', maxBytes=(
config['LOGGING_MAX_SIZE'] if "LOGGING_MAX_SIZE" in config else 10485760),
backupCount=config[
'LOGGING_LOGS_TO_KEEP'] if "LOGGING_LOGS_TO_KEEP" in config else 5
)
fileHandler.setFormatter(formatter)
l.setLevel(level)
l.addHandler(fileHandler)
def main_loop():
"""
Starts up the main bot loop and listens for a keyboard interrupt to quit it
:return:
"""
try:
bot.start()
except KeyboardInterrupt:
sys.exit(0)
except:
main_log.exception('OOPS')
if __name__ == "__main__":
directory = os.path.dirname(sys.argv[0])
if not directory.startswith('/'):
directory = os.path.abspath("{}/{}".format(os.getcwd(),
directory
))
config = yaml.load(file('conf/rtmbot.conf', 'r'))
debug = config["DEBUG"] if "DEBUG" in config else False
input_logging = config[
'INPUT_LOGGING'] if "INPUT_LOGGING" in config else False
bot = RtmBot(config["SLACK_TOKEN"])
site_plugins = []
main_log_file = config[
'LOGPATH'] + config['LOGFILE'] if "LOGPATH" in config and "LOGFILE" else "bot.log"
setup_logger("main_logs", main_log_file, logging.INFO)
main_log = logging.getLogger('main_logs')
if input_logging:
input_log_file = config['LOGPATH'] + config[
'INPUT_LOGFILE'] if "LOGPATH" in config and "INPUT_LOGFILE" else "inputs.log"
setup_logger("input_logs", input_log_file, logging.INFO)
input_log = logging.getLogger('input_logs')
if "DAEMON" in config and config['DAEMON']:
import daemon
with daemon.DaemonContext():
main_loop()
main_loop()
| gpl-3.0 |
cailloumajor/home-web | backend/core/auth/backends.py | 1 | 1129 | # -*- coding: utf-8 -*-
# pylint: skip-file
from django.conf import settings
from django.contrib.auth import get_user_model
from django.contrib.auth.hashers import check_password
class SettingsBackend:
"""
Authenticates against the settings ADMIN_LOGIN and ADMIN_PASSWORD
Use the login name and a hash of the password.
"""
def authenticate(self, username=None, password=None):
UserModel = get_user_model()
login_valid = (username == settings.ADMIN_LOGIN)
pwd_valid = check_password(password, settings.ADMIN_PASSWORD)
if login_valid and pwd_valid:
try:
user = UserModel.objects.get(username=username)
except UserModel.DoesNotExist:
user = UserModel(username=username)
user.is_staff = True
user.is_superuser = True
user.save()
return user
return None
def get_user(self, user_id):
UserModel = get_user_model()
try:
return UserModel.objects.get(pk=user_id)
except UserModel.DoesNotExist:
return None
| gpl-3.0 |
ehashman/oh-mainline | vendor/packages/Django/django/contrib/formtools/tests/wizard/forms.py | 90 | 7721 | from __future__ import unicode_literals
from django import forms, http
from django.conf import settings
from django.db import models
from django.test import TestCase
from django.template.response import TemplateResponse
from django.utils.importlib import import_module
from django.contrib.auth.models import User
from django.contrib.formtools.wizard.views import (WizardView,
SessionWizardView,
CookieWizardView)
class DummyRequest(http.HttpRequest):
def __init__(self, POST=None):
super(DummyRequest, self).__init__()
self.method = POST and "POST" or "GET"
if POST is not None:
self.POST.update(POST)
self.session = {}
self._dont_enforce_csrf_checks = True
def get_request(*args, **kwargs):
request = DummyRequest(*args, **kwargs)
engine = import_module(settings.SESSION_ENGINE)
request.session = engine.SessionStore(None)
return request
class Step1(forms.Form):
name = forms.CharField()
class Step2(forms.Form):
name = forms.CharField()
class Step3(forms.Form):
data = forms.CharField()
class CustomKwargsStep1(Step1):
def __init__(self, test=None, *args, **kwargs):
self.test = test
return super(CustomKwargsStep1, self).__init__(*args, **kwargs)
class TestModel(models.Model):
name = models.CharField(max_length=100)
class Meta:
app_label = 'formtools'
class TestModelForm(forms.ModelForm):
class Meta:
model = TestModel
TestModelFormSet = forms.models.modelformset_factory(TestModel, form=TestModelForm, extra=2)
class TestWizard(WizardView):
storage_name = 'django.contrib.formtools.wizard.storage.session.SessionStorage'
def dispatch(self, request, *args, **kwargs):
response = super(TestWizard, self).dispatch(request, *args, **kwargs)
return response, self
def get_form_kwargs(self, step, *args, **kwargs):
kwargs = super(TestWizard, self).get_form_kwargs(step, *args, **kwargs)
if step == 'kwargs_test':
kwargs['test'] = True
return kwargs
class FormTests(TestCase):
def test_form_init(self):
testform = TestWizard.get_initkwargs([Step1, Step2])
self.assertEqual(testform['form_list'], {'0': Step1, '1': Step2})
testform = TestWizard.get_initkwargs([('start', Step1), ('step2', Step2)])
self.assertEqual(
testform['form_list'], {'start': Step1, 'step2': Step2})
testform = TestWizard.get_initkwargs([Step1, Step2, ('finish', Step3)])
self.assertEqual(
testform['form_list'], {'0': Step1, '1': Step2, 'finish': Step3})
def test_first_step(self):
request = get_request()
testform = TestWizard.as_view([Step1, Step2])
response, instance = testform(request)
self.assertEqual(instance.steps.current, '0')
testform = TestWizard.as_view([('start', Step1), ('step2', Step2)])
response, instance = testform(request)
self.assertEqual(instance.steps.current, 'start')
def test_persistence(self):
testform = TestWizard.as_view([('start', Step1), ('step2', Step2)])
request = get_request({'test_wizard-current_step': 'start',
'name': 'data1'})
response, instance = testform(request)
self.assertEqual(instance.steps.current, 'start')
instance.storage.current_step = 'step2'
testform2 = TestWizard.as_view([('start', Step1), ('step2', Step2)])
request.POST = {'test_wizard-current_step': 'step2'}
response, instance = testform2(request)
self.assertEqual(instance.steps.current, 'step2')
def test_form_condition(self):
request = get_request()
testform = TestWizard.as_view(
[('start', Step1), ('step2', Step2), ('step3', Step3)],
condition_dict={'step2': True})
response, instance = testform(request)
self.assertEqual(instance.get_next_step(), 'step2')
testform = TestWizard.as_view(
[('start', Step1), ('step2', Step2), ('step3', Step3)],
condition_dict={'step2': False})
response, instance = testform(request)
self.assertEqual(instance.get_next_step(), 'step3')
def test_form_kwargs(self):
request = get_request()
testform = TestWizard.as_view([('start', Step1),
('kwargs_test', CustomKwargsStep1)])
response, instance = testform(request)
self.assertEqual(instance.get_form_kwargs('start'), {})
self.assertEqual(instance.get_form_kwargs('kwargs_test'), {'test': True})
self.assertEqual(instance.get_form('kwargs_test').test, True)
def test_form_prefix(self):
request = get_request()
testform = TestWizard.as_view([('start', Step1), ('step2', Step2)])
response, instance = testform(request)
self.assertEqual(instance.get_form_prefix(), 'start')
self.assertEqual(instance.get_form_prefix('another'), 'another')
def test_form_initial(self):
request = get_request()
testform = TestWizard.as_view([('start', Step1), ('step2', Step2)],
initial_dict={'start': {'name': 'value1'}})
response, instance = testform(request)
self.assertEqual(instance.get_form_initial('start'), {'name': 'value1'})
self.assertEqual(instance.get_form_initial('step2'), {})
def test_form_instance(self):
request = get_request()
the_instance = TestModel()
testform = TestWizard.as_view([('start', TestModelForm), ('step2', Step2)],
instance_dict={'start': the_instance})
response, instance = testform(request)
self.assertEqual(
instance.get_form_instance('start'),
the_instance)
self.assertEqual(
instance.get_form_instance('non_exist_instance'),
None)
def test_formset_instance(self):
request = get_request()
the_instance1, created = TestModel.objects.get_or_create(
name='test object 1')
the_instance2, created = TestModel.objects.get_or_create(
name='test object 2')
testform = TestWizard.as_view([('start', TestModelFormSet), ('step2', Step2)],
instance_dict={'start': TestModel.objects.filter(name='test object 1')})
response, instance = testform(request)
self.assertEqual(list(instance.get_form_instance('start')), [the_instance1])
self.assertEqual(instance.get_form_instance('non_exist_instance'), None)
self.assertEqual(instance.get_form().initial_form_count(), 1)
def test_done(self):
request = get_request()
testform = TestWizard.as_view([('start', Step1), ('step2', Step2)])
response, instance = testform(request)
self.assertRaises(NotImplementedError, instance.done, None)
def test_revalidation(self):
request = get_request()
testform = TestWizard.as_view([('start', Step1), ('step2', Step2)])
response, instance = testform(request)
instance.render_done(None)
self.assertEqual(instance.storage.current_step, 'start')
class SessionFormTests(TestCase):
def test_init(self):
request = get_request()
testform = SessionWizardView.as_view([('start', Step1)])
self.assertTrue(isinstance(testform(request), TemplateResponse))
class CookieFormTests(TestCase):
def test_init(self):
request = get_request()
testform = CookieWizardView.as_view([('start', Step1)])
self.assertTrue(isinstance(testform(request), TemplateResponse))
| agpl-3.0 |
zaragoza-sedeelectronica/hackathon-co.sa | bower_components/lumx/changelog.py | 48 | 3091 | #! /usr/bin/env python
from subprocess import Popen, PIPE
import re
def getTags():
Popen('git fetch --tags'.split(), stdout=PIPE).communicate()
(stdout, _) = Popen('git tag'.split(), stdout=PIPE).communicate()
return sorted(stdout.split(), key=lambda s: [int(x) for x in s.replace('v', '').split('.')])
def checkLastChangelogTag():
last = None
with open('CHANGELOG.md', 'r+') as f:
lines = f.readlines()
for line in lines:
m = re.search(r'^##\s+(\S+):', line)
if m:
last = m.group(1)
break
return last
def buildNewLogs(fromTag, toTag):
stdout = ''
if fromTag:
(stdout, _) = Popen(('git rev-list %s..%s' % (fromTag, toTag)).split(), stdout=PIPE).communicate()
else:
(stdout, _) = Popen(('git rev-list %s' % toTag).split(), stdout=PIPE).communicate()
commits = stdout.splitlines()
feats = []
fixs = []
brokens = []
for commit in commits:
(title, _) = Popen(('git show -s --format=%%s %s' % commit).split(), stdout=PIPE).communicate()
(body, _) = Popen(('git show -s --format=%%b %s' % commit).split(), stdout=PIPE).communicate()
if not title:
continue
data = title.split(' ', 1)
if data[0] == 'feat':
feats.append(data[1].rstrip())
elif data[0] == 'fix':
fixs.append(data[1].rstrip())
if 'BROKEN:' in body:
brokens += body.split('BROKEN:')[1].splitlines()
logs = "## %s:\n" % toTag
if not len(feats) and not len(fixs) and not len(brokens):
logs += "*No major changes.*\n\n\n"
else:
if len(feats):
logs += "\n#### New features:\n"
for feat in feats:
logs += " - %s\n" % feat
if len(fixs):
logs += "\n#### Bug fixes:\n"
for fix in fixs:
logs += " - %s\n" % fix
if len(brokens):
logs += "\n#### Breaking changes:\n"
for broken in brokens:
if broken.rstrip() != '':
logs += " - %s\n" % broken
logs += "\n\n"
return logs
if __name__ == "__main__":
tags = getTags()
lastChangelogTag = checkLastChangelogTag()
changelog = ''
tagsToBuild = tags
previousTag = None
if lastChangelogTag:
previousTag = lastChangelogTag
tagsToBuild = tags[tags.index(lastChangelogTag) + 1:]
else:
tagsToBuild = tags[1:] # ignoring first release which contains only the first commit
with open('CHANGELOG.md', 'r+') as f:
changelog = f.read().replace('# Changelog\n\n', '').rstrip() + '\n'
if not len(tagsToBuild):
print "No new changlogs! Last tag (%s) is already in the CHANGELOG.md." % lastChangelogTag
exit(0)
for tag in tagsToBuild:
newLogs = buildNewLogs(previousTag, tag)
previousTag = tag
changelog = newLogs + changelog
changelog = '# Changelog\n\n' + changelog
with open('CHANGELOG.md', 'w') as f:
f.write(changelog)
| apache-2.0 |
aristanetworks/arista-ovs-quantum | quantum/plugins/nicira/nicira_nvp_plugin/NvpApiClient.py | 4 | 7453 | # Copyright 2012 Nicira, Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# @author: Somik Behera, Nicira Networks, Inc.
import httplib # basic HTTP library for HTTPS connections
import logging
from quantum.plugins.nicira.nicira_nvp_plugin.api_client import (
client_eventlet, request_eventlet)
LOG = logging.getLogger("NVPApiHelper")
LOG.setLevel(logging.INFO)
class NVPApiHelper(client_eventlet.NvpApiClientEventlet):
'''
Helper class to do basic login, cookie management, and provide base
method to send HTTP requests.
Implements new eventlet-based framework derived from the management
console nvp_gevent_client module.
'''
def __init__(self, api_providers, user, password, request_timeout,
http_timeout, retries, redirects, failover_time,
concurrent_connections=3):
'''Constructor.
:param api_providers: a list of tuples in the form:
(host, port, is_ssl=True). Passed on to NvpClientEventlet.
:param user: the login username.
:param password: the login password.
:param concurrent_connections: the number of concurrent connections.
:param request_timeout: all operations (including retries, redirects
from unresponsive controllers, etc) should finish within this
timeout.
:param http_timeout: how long to wait before aborting an
unresponsive controller (and allow for retries to another
controller in the cluster)
:param retries: the number of concurrent connections.
:param redirects: the number of concurrent connections.
:param failover_time: minimum time between controller failover and new
connections allowed.
'''
client_eventlet.NvpApiClientEventlet.__init__(
self, api_providers, user, password, concurrent_connections,
failover_time=failover_time)
self._request_timeout = request_timeout
self._http_timeout = http_timeout
self._retries = retries
self._redirects = redirects
def login(self, user=None, password=None):
'''Login to NVP controller.
Assumes same password is used for all controllers.
:param user: NVP controller user (usually admin). Provided for
backwards compatability. In the normal mode of operation
this should be None.
:param password: NVP controller password. Provided for backwards
compatability. In the normal mode of operation this should
be None.
:returns: Does not return a value.
'''
if user:
self._user = user
if password:
self._password = password
return client_eventlet.NvpApiClientEventlet.login(self)
def request(self, method, url, body="", content_type="application/json"):
'''Issues request to controller.'''
g = request_eventlet.NvpGenericRequestEventlet(
self, method, url, body, content_type, auto_login=True,
request_timeout=self._request_timeout,
http_timeout=self._http_timeout,
retries=self._retries, redirects=self._redirects)
g.start()
response = g.join()
LOG.debug('NVPApiHelper.request() returns "%s"' % response)
# response is a modified HTTPResponse object or None.
# response.read() will not work on response as the underlying library
# request_eventlet.NvpApiRequestEventlet has already called this
# method in order to extract the body and headers for processing.
# NvpApiRequestEventlet derived classes call .read() and
# .getheaders() on the HTTPResponse objects and store the results in
# the response object's .body and .headers data members for future
# access.
if response is None:
# Timeout.
LOG.error('Request timed out: %s to %s' % (method, url))
raise RequestTimeout()
status = response.status
if status == httplib.UNAUTHORIZED:
raise UnAuthorizedRequest()
# Fail-fast: Check for exception conditions and raise the
# appropriate exceptions for known error codes.
if status in self.error_codes:
LOG.error("Received error code: %s" % status)
LOG.error("Server Error Message: %s" % response.body)
self.error_codes[status](self)
# Continue processing for non-error condition.
if (status != httplib.OK and status != httplib.CREATED
and status != httplib.NO_CONTENT):
LOG.error("%s to %s, unexpected response code: %d (content = '%s')"
% (method, url, response.status, response.body))
return None
return response.body
def fourZeroFour(self):
raise ResourceNotFound()
def fourZeroNine(self):
raise Conflict()
def fiveZeroThree(self):
raise ServiceUnavailable()
def fourZeroThree(self):
raise Forbidden()
def zero(self):
raise NvpApiException()
# TODO(del): ensure error_codes are handled/raised appropriately
# in api_client.
error_codes = {404: fourZeroFour,
409: fourZeroNine,
503: fiveZeroThree,
403: fourZeroThree,
301: zero,
307: zero,
400: zero,
500: zero,
503: zero}
class NvpApiException(Exception):
'''
Base NvpApiClient Exception
To correctly use this class, inherit from it and define
a 'message' property. That message will get printf'd
with the keyword arguments provided to the constructor.
'''
message = "An unknown exception occurred."
def __init__(self, **kwargs):
try:
self._error_string = self.message % kwargs
except Exception:
# at least get the core message out if something happened
self._error_string = self.message
def __str__(self):
return self._error_string
class UnAuthorizedRequest(NvpApiException):
message = "Server denied session's authentication credentials."
class ResourceNotFound(NvpApiException):
message = "An entity referenced in the request was not found."
class Conflict(NvpApiException):
message = "Request conflicts with configuration on a different entity."
class ServiceUnavailable(NvpApiException):
message = ("Request could not completed because the associated "
"resource could not be reached.")
class Forbidden(NvpApiException):
message = ("The request is forbidden from accessing the "
"referenced resource.")
class RequestTimeout(NvpApiException):
message = "The request has timed out."
| apache-2.0 |
gribozavr/swift | utils/swift_build_support/swift_build_support/host_specific_configuration.py | 1 | 12379 | # swift_build_support/host_configuration_support.py -------------*- python -*-
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2019 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
#
# ----------------------------------------------------------------------------
from argparse import ArgumentError
import diagnostics
from .targets import StdlibDeploymentTarget
class HostSpecificConfiguration(object):
"""Configuration information for an individual host."""
def __init__(self, host_target, args):
"""Initialize for the given `host_target`."""
# Compute the set of deployment targets to configure/build.
if host_target == args.host_target:
# This host is the user's desired product, so honor the requested
# set of targets to configure/build.
stdlib_targets_to_configure = args.stdlib_deployment_targets
if "all" in args.build_stdlib_deployment_targets:
stdlib_targets_to_build = set(stdlib_targets_to_configure)
else:
stdlib_targets_to_build = set(
args.build_stdlib_deployment_targets).intersection(
set(args.stdlib_deployment_targets))
else:
# Otherwise, this is a host we are building as part of
# cross-compiling, so we only need the target itself.
stdlib_targets_to_configure = [host_target]
stdlib_targets_to_build = set(stdlib_targets_to_configure)
# Compute derived information from the arguments.
#
# FIXME: We should move the platform-derived arguments to be entirely
# data driven, so that we can eliminate this code duplication and just
# iterate over all supported platforms.
platforms_to_skip_build = self.__platforms_to_skip_build(args)
platforms_to_skip_test = self.__platforms_to_skip_test(args)
platforms_archs_to_skip_test = \
self.__platforms_archs_to_skip_test(args)
platforms_to_skip_test_host = self.__platforms_to_skip_test_host(args)
# Compute the lists of **CMake** targets for each use case (configure
# vs. build vs. run) and the SDKs to configure with.
self.sdks_to_configure = set()
self.swift_stdlib_build_targets = []
self.swift_test_run_targets = []
self.swift_benchmark_build_targets = []
self.swift_benchmark_run_targets = []
for deployment_target_name in stdlib_targets_to_configure:
# Get the target object.
deployment_target = StdlibDeploymentTarget.get_target_for_name(
deployment_target_name)
if deployment_target is None:
diagnostics.fatal("unknown target: %r" % (
deployment_target_name,))
# Add the SDK to use.
deployment_platform = deployment_target.platform
self.sdks_to_configure.add(deployment_platform.sdk_name)
# If we aren't actually building this target (only configuring
# it), do nothing else.
if deployment_target_name not in stdlib_targets_to_build:
continue
# Compute which actions are desired.
build = (
deployment_platform not in platforms_to_skip_build)
test = (
deployment_platform not in platforms_to_skip_test)
test_host_only = None
dt_supports_benchmark = deployment_target.supports_benchmark
build_benchmarks = build and dt_supports_benchmark
build_external_benchmarks = all([build, dt_supports_benchmark,
args.build_external_benchmarks])
# FIXME: Note, `build-script-impl` computed a property here
# w.r.t. testing, but it was actually unused.
# For platforms which normally require a connected device to
# test, the default behavior is to run tests that only require
# the host (i.e., they do not attempt to execute).
if deployment_platform.uses_host_tests and \
deployment_platform not in \
platforms_to_skip_test_host:
test_host_only = True
name = deployment_target.name
for skip_test_arch in platforms_archs_to_skip_test:
if deployment_target.name == skip_test_arch.name:
test = False
if build:
# Validation, long, and stress tests require building the full
# standard library, whereas the other targets can build a
# slightly smaller subset which is faster to build.
if args.build_swift_stdlib_unittest_extra or \
args.validation_test or args.long_test or \
args.stress_test:
self.swift_stdlib_build_targets.append(
"swift-stdlib-" + name)
else:
self.swift_stdlib_build_targets.append(
"swift-test-stdlib-" + name)
if build_benchmarks:
self.swift_benchmark_build_targets.append(
"swift-benchmark-" + name)
if args.benchmark:
self.swift_benchmark_run_targets.append(
"check-swift-benchmark-" + name)
if build_external_benchmarks:
# Add support for the external benchmarks.
self.swift_benchmark_build_targets.append(
"swift-benchmark-{}-external".format(name))
if args.benchmark:
self.swift_benchmark_run_targets.append(
"check-swift-benchmark-{}-external".format(name))
if test:
if test_host_only:
suffix = "-only_non_executable"
elif args.only_executable_test:
suffix = "-only_executable"
else:
suffix = ""
subset_suffix = ""
if args.validation_test and args.long_test and \
args.stress_test:
subset_suffix = "-all"
elif args.validation_test:
subset_suffix = "-validation"
elif args.long_test:
subset_suffix = "-only_long"
elif args.stress_test:
subset_suffix = "-only_stress"
else:
subset_suffix = ""
# Support for running the macCatalyst tests with
# the iOS-like target triple.
if name == "macosx-x86_64" and args.maccatalyst \
and args.maccatalyst_ios_tests:
(self.swift_test_run_targets
.append("check-swift{}{}-{}".format(
subset_suffix, suffix, "macosx-maccatalyst-x86_64")))
else:
(self.swift_test_run_targets
.append("check-swift{}{}-{}".format(
subset_suffix, suffix, name)))
if args.test_optimized and not test_host_only:
self.swift_test_run_targets.append(
"check-swift{}-optimize-{}".format(
subset_suffix, name))
if args.test_optimize_for_size and not test_host_only:
self.swift_test_run_targets.append(
"check-swift{}-optimize_size-{}".format(
subset_suffix, name))
if args.test_optimize_none_with_implicit_dynamic and \
not test_host_only:
self.swift_test_run_targets.append(
"check-swift{}-optimize_none_with_implicit_dynamic-{}"
.format(subset_suffix, name))
def __platforms_to_skip_build(self, args):
platforms_to_skip_build = set()
if not args.build_linux:
platforms_to_skip_build.add(StdlibDeploymentTarget.Linux)
if not args.build_freebsd:
platforms_to_skip_build.add(StdlibDeploymentTarget.FreeBSD)
if not args.build_cygwin:
platforms_to_skip_build.add(StdlibDeploymentTarget.Cygwin)
if not args.build_osx:
platforms_to_skip_build.add(StdlibDeploymentTarget.OSX)
if not args.build_ios_device:
platforms_to_skip_build.add(StdlibDeploymentTarget.iOS)
if not args.build_ios_simulator:
platforms_to_skip_build.add(StdlibDeploymentTarget.iOSSimulator)
if not args.build_tvos_device:
platforms_to_skip_build.add(StdlibDeploymentTarget.AppleTV)
if not args.build_tvos_simulator:
platforms_to_skip_build.add(
StdlibDeploymentTarget.AppleTVSimulator)
if not args.build_watchos_device:
platforms_to_skip_build.add(StdlibDeploymentTarget.AppleWatch)
if not args.build_watchos_simulator:
platforms_to_skip_build.add(
StdlibDeploymentTarget.AppleWatchSimulator)
if not args.build_android:
platforms_to_skip_build.add(StdlibDeploymentTarget.Android)
return platforms_to_skip_build
def __platforms_to_skip_test(self, args):
platforms_to_skip_test = set()
if not args.test_linux:
platforms_to_skip_test.add(StdlibDeploymentTarget.Linux)
if not args.test_freebsd:
platforms_to_skip_test.add(StdlibDeploymentTarget.FreeBSD)
if not args.test_cygwin:
platforms_to_skip_test.add(StdlibDeploymentTarget.Cygwin)
if not args.test_osx:
platforms_to_skip_test.add(StdlibDeploymentTarget.OSX)
if not args.test_ios_host:
platforms_to_skip_test.add(StdlibDeploymentTarget.iOS)
else:
raise ArgumentError(None,
"error: iOS device tests are not " +
"supported in open-source Swift.")
if not args.test_ios_simulator:
platforms_to_skip_test.add(StdlibDeploymentTarget.iOSSimulator)
if not args.test_tvos_host:
platforms_to_skip_test.add(StdlibDeploymentTarget.AppleTV)
else:
raise ArgumentError(None,
"error: tvOS device tests are not " +
"supported in open-source Swift.")
if not args.test_tvos_simulator:
platforms_to_skip_test.add(StdlibDeploymentTarget.AppleTVSimulator)
if not args.test_watchos_host:
platforms_to_skip_test.add(StdlibDeploymentTarget.AppleWatch)
else:
raise ArgumentError(None,
"error: watchOS device tests are not " +
"supported in open-source Swift.")
if not args.test_watchos_simulator:
platforms_to_skip_test.add(
StdlibDeploymentTarget.AppleWatchSimulator)
if not args.test_android:
platforms_to_skip_test.add(StdlibDeploymentTarget.Android)
return platforms_to_skip_test
def __platforms_archs_to_skip_test(self, args):
platforms_archs_to_skip_test = set()
if not args.test_ios_32bit_simulator:
platforms_archs_to_skip_test.add(
StdlibDeploymentTarget.iOSSimulator.i386)
return platforms_archs_to_skip_test
def __platforms_to_skip_test_host(self, args):
platforms_to_skip_test_host = set()
if not args.test_android_host:
platforms_to_skip_test_host.add(StdlibDeploymentTarget.Android)
if not args.test_ios_host:
platforms_to_skip_test_host.add(StdlibDeploymentTarget.iOS)
if not args.test_tvos_host:
platforms_to_skip_test_host.add(StdlibDeploymentTarget.AppleTV)
if not args.test_watchos_host:
platforms_to_skip_test_host.add(StdlibDeploymentTarget.AppleWatch)
return platforms_to_skip_test_host
| apache-2.0 |
openhatch/oh-mainline | vendor/packages/docutils/test/test_parsers/test_rst/test_tables.py | 16 | 37488 | #! /usr/bin/env python
# $Id: test_tables.py 7313 2012-01-11 20:28:57Z milde $
# Author: David Goodger <[email protected]>
# Copyright: This module has been placed in the public domain.
"""
Tests for states.py.
"""
import os
from __init__ import DocutilsTestSupport
def suite():
s = DocutilsTestSupport.ParserTestSuite()
s.generateTests(totest)
return s
mydir = 'test_parsers/test_rst/'
include2 = os.path.join(mydir, 'test_directives/include2.txt')
totest = {}
totest['grid_tables'] = [
["""\
+-------------------------------------+
| A table with one cell and one line. |
+-------------------------------------+
""",
"""\
<document source="test data">
<table>
<tgroup cols="1">
<colspec colwidth="37">
<tbody>
<row>
<entry>
<paragraph>
A table with one cell and one line.
"""],
["""\
+-----------------------+
| A table with one cell |
| and two lines. |
+-----------------------+
""",
"""\
<document source="test data">
<table>
<tgroup cols="1">
<colspec colwidth="23">
<tbody>
<row>
<entry>
<paragraph>
A table with one cell
and two lines.
"""],
["""\
+-----------------------+
| A malformed table. |
+-----------------------+
""",
"""\
<document source="test data">
<system_message level="3" line="1" source="test data" type="ERROR">
<paragraph>
Malformed table.
<literal_block xml:space="preserve">
+-----------------------+
| A malformed table. |
+-----------------------+
"""],
["""\
+------------------------+
| A well-formed | table. |
+------------------------+
+------------------------+
| This +----------+ too! |
+------------------------+
""",
"""\
<document source="test data">
<table>
<tgroup cols="1">
<colspec colwidth="24">
<tbody>
<row>
<entry>
<paragraph>
A well-formed | table.
<table>
<tgroup cols="1">
<colspec colwidth="24">
<tbody>
<row>
<entry>
<paragraph>
This +----------+ too!
"""],
["""\
+--------------+--------------+
| A table with | two columns. |
+--------------+--------------+
""",
"""\
<document source="test data">
<table>
<tgroup cols="2">
<colspec colwidth="14">
<colspec colwidth="14">
<tbody>
<row>
<entry>
<paragraph>
A table with
<entry>
<paragraph>
two columns.
"""],
["""\
+--------------+
| A table with |
+--------------+
| two rows. |
+--------------+
""",
"""\
<document source="test data">
<table>
<tgroup cols="1">
<colspec colwidth="14">
<tbody>
<row>
<entry>
<paragraph>
A table with
<row>
<entry>
<paragraph>
two rows.
"""],
["""\
+--------------+-------------+
| A table with | two columns |
+--------------+-------------+
| and | two rows. |
+--------------+-------------+
""",
"""\
<document source="test data">
<table>
<tgroup cols="2">
<colspec colwidth="14">
<colspec colwidth="13">
<tbody>
<row>
<entry>
<paragraph>
A table with
<entry>
<paragraph>
two columns
<row>
<entry>
<paragraph>
and
<entry>
<paragraph>
two rows.
"""],
["""\
+--------------+---------------+
| A table with | two columns, |
+--------------+---------------+
| two rows, and a column span. |
+------------------------------+
""",
"""\
<document source="test data">
<table>
<tgroup cols="2">
<colspec colwidth="14">
<colspec colwidth="15">
<tbody>
<row>
<entry>
<paragraph>
A table with
<entry>
<paragraph>
two columns,
<row>
<entry morecols="1">
<paragraph>
two rows, and a column span.
"""],
["""\
+--------------------------+
| A table with three rows, |
+------------+-------------+
| and two | columns. |
+------------+-------------+
| First and last rows |
| contains column spans. |
+--------------------------+
""",
"""\
<document source="test data">
<table>
<tgroup cols="2">
<colspec colwidth="12">
<colspec colwidth="13">
<tbody>
<row>
<entry morecols="1">
<paragraph>
A table with three rows,
<row>
<entry>
<paragraph>
and two
<entry>
<paragraph>
columns.
<row>
<entry morecols="1">
<paragraph>
First and last rows
contains column spans.
"""],
["""\
+--------------+--------------+
| A table with | two columns, |
+--------------+ and a row |
| two rows, | span. |
+--------------+--------------+
""",
"""\
<document source="test data">
<table>
<tgroup cols="2">
<colspec colwidth="14">
<colspec colwidth="14">
<tbody>
<row>
<entry>
<paragraph>
A table with
<entry morerows="1">
<paragraph>
two columns,
and a row
span.
<row>
<entry>
<paragraph>
two rows,
"""],
["""\
+------------+-------------+---------------+
| A table | two rows in | and row spans |
| with three +-------------+ to left and |
| columns, | the middle, | right. |
+------------+-------------+---------------+
""",
"""\
<document source="test data">
<table>
<tgroup cols="3">
<colspec colwidth="12">
<colspec colwidth="13">
<colspec colwidth="15">
<tbody>
<row>
<entry morerows="1">
<paragraph>
A table
with three
columns,
<entry>
<paragraph>
two rows in
<entry morerows="1">
<paragraph>
and row spans
to left and
right.
<row>
<entry>
<paragraph>
the middle,
"""],
["""\
Complex spanning pattern (no edge knows all rows/cols):
+-----------+-------------------------+
| W/NW cell | N/NE cell |
| +-------------+-----------+
| | Middle cell | E/SE cell |
+-----------+-------------+ |
| S/SE cell | |
+-------------------------+-----------+
""",
"""\
<document source="test data">
<paragraph>
Complex spanning pattern (no edge knows all rows/cols):
<table>
<tgroup cols="3">
<colspec colwidth="11">
<colspec colwidth="13">
<colspec colwidth="11">
<tbody>
<row>
<entry morerows="1">
<paragraph>
W/NW cell
<entry morecols="1">
<paragraph>
N/NE cell
<row>
<entry>
<paragraph>
Middle cell
<entry morerows="1">
<paragraph>
E/SE cell
<row>
<entry morecols="1">
<paragraph>
S/SE cell
"""],
["""\
+------------------------+------------+----------+----------+
| Header row, column 1 | Header 2 | Header 3 | Header 4 |
+========================+============+==========+==========+
| body row 1, column 1 | column 2 | column 3 | column 4 |
+------------------------+------------+----------+----------+
| body row 2 | Cells may span columns. |
+------------------------+------------+---------------------+
| body row 3 | Cells may | - Table cells |
+------------------------+ span rows. | - contain |
| body row 4 | | - body elements. |
+------------------------+------------+---------------------+
""",
"""\
<document source="test data">
<table>
<tgroup cols="4">
<colspec colwidth="24">
<colspec colwidth="12">
<colspec colwidth="10">
<colspec colwidth="10">
<thead>
<row>
<entry>
<paragraph>
Header row, column 1
<entry>
<paragraph>
Header 2
<entry>
<paragraph>
Header 3
<entry>
<paragraph>
Header 4
<tbody>
<row>
<entry>
<paragraph>
body row 1, column 1
<entry>
<paragraph>
column 2
<entry>
<paragraph>
column 3
<entry>
<paragraph>
column 4
<row>
<entry>
<paragraph>
body row 2
<entry morecols="2">
<paragraph>
Cells may span columns.
<row>
<entry>
<paragraph>
body row 3
<entry morerows="1">
<paragraph>
Cells may
span rows.
<entry morecols="1" morerows="1">
<bullet_list bullet="-">
<list_item>
<paragraph>
Table cells
<list_item>
<paragraph>
contain
<list_item>
<paragraph>
body elements.
<row>
<entry>
<paragraph>
body row 4
"""],
["""\
+-----------------+--------+
| A simple table | cell 2 |
+-----------------+--------+
| cell 3 | cell 4 |
+-----------------+--------+
No blank line after table.
""",
"""\
<document source="test data">
<table>
<tgroup cols="2">
<colspec colwidth="17">
<colspec colwidth="8">
<tbody>
<row>
<entry>
<paragraph>
A simple table
<entry>
<paragraph>
cell 2
<row>
<entry>
<paragraph>
cell 3
<entry>
<paragraph>
cell 4
<system_message level="2" line="6" source="test data" type="WARNING">
<paragraph>
Blank line required after table.
<paragraph>
No blank line after table.
"""],
["""\
+-----------------+--------+
| A simple table | cell 2 |
+-----------------+--------+
| cell 3 | cell 4 |
+-----------------+--------+
Unexpected indent and no blank line after table.
""",
"""\
<document source="test data">
<table>
<tgroup cols="2">
<colspec colwidth="17">
<colspec colwidth="8">
<tbody>
<row>
<entry>
<paragraph>
A simple table
<entry>
<paragraph>
cell 2
<row>
<entry>
<paragraph>
cell 3
<entry>
<paragraph>
cell 4
<system_message level="3" line="6" source="test data" type="ERROR">
<paragraph>
Unexpected indentation.
<system_message level="2" line="6" source="test data" type="WARNING">
<paragraph>
Blank line required after table.
<block_quote>
<paragraph>
Unexpected indent and no blank line after table.
"""],
["""\
+--------------+-------------+
| A bad table. | |
+--------------+ |
| Cells must be rectangles. |
+----------------------------+
""",
"""\
<document source="test data">
<system_message level="3" line="1" source="test data" type="ERROR">
<paragraph>
Malformed table.
Malformed table; parse incomplete.
<literal_block xml:space="preserve">
+--------------+-------------+
| A bad table. | |
+--------------+ |
| Cells must be rectangles. |
+----------------------------+
"""],
["""\
+------------------------------+
| This table contains another. |
| |
| +-------------------------+ |
| | A table within a table. | |
| +-------------------------+ |
+------------------------------+
""",
"""\
<document source="test data">
<table>
<tgroup cols="1">
<colspec colwidth="30">
<tbody>
<row>
<entry>
<paragraph>
This table contains another.
<table>
<tgroup cols="1">
<colspec colwidth="25">
<tbody>
<row>
<entry>
<paragraph>
A table within a table.
"""],
["""\
+------------------+--------+
| A simple table | |
+------------------+--------+
| with empty cells | |
+------------------+--------+
""",
"""\
<document source="test data">
<table>
<tgroup cols="2">
<colspec colwidth="18">
<colspec colwidth="8">
<tbody>
<row>
<entry>
<paragraph>
A simple table
<entry>
<row>
<entry>
<paragraph>
with empty cells
<entry>
"""],
[("""\
+------------------------------------------------------------------------------+
| .. include:: |
%s
+------------------------------------------------------------------------------+
| (The first cell of this table may expand |
| to accommodate long filesystem paths.) |
+------------------------------------------------------------------------------+
""") % ('\n'.join(['| %-70s |' % include2[part * 70 : (part + 1) * 70]
for part in range(len(include2) // 70 + 1)])),
"""\
<document source="test data">
<table>
<tgroup cols="1">
<colspec colwidth="78">
<tbody>
<row>
<entry>
<paragraph>
Here are some paragraphs
that can appear at any level.
<paragraph>
This file (include2.txt) is used by
<literal>
test_include.py
.
<row>
<entry>
<paragraph>
(The first cell of this table may expand
to accommodate long filesystem paths.)
"""],
[("""\
Something before.
+------------------------------------------------------------------------------+
| .. include:: |
%s
+------------------------------------------------------------------------------+
Something afterwards.
And more.
""") % ('\n'.join(['| %-70s |' % include2[part * 70 : (part + 1) * 70]
for part in range(len(include2) // 70 + 1)])),
"""\
<document source="test data">
<paragraph>
Something before.
<table>
<tgroup cols="1">
<colspec colwidth="78">
<tbody>
<row>
<entry>
<paragraph>
Here are some paragraphs
that can appear at any level.
<paragraph>
This file (include2.txt) is used by
<literal>
test_include.py
.
<paragraph>
Something afterwards.
<paragraph>
And more.
"""],
]
totest['simple_tables'] = [
["""\
============ ============
A table with two columns.
============ ============
Paragraph.
""",
"""\
<document source="test data">
<table>
<tgroup cols="2">
<colspec colwidth="12">
<colspec colwidth="12">
<tbody>
<row>
<entry>
<paragraph>
A table with
<entry>
<paragraph>
two columns.
<paragraph>
Paragraph.
"""],
["""\
============ ============
A table with two columns
and two rows.
============ ============
""",
"""\
<document source="test data">
<table>
<tgroup cols="2">
<colspec colwidth="12">
<colspec colwidth="12">
<tbody>
<row>
<entry>
<paragraph>
A table with
<entry>
<paragraph>
two columns
<row>
<entry>
<paragraph>
and
<entry>
<paragraph>
two rows.
"""],
["""\
============ ==============
A table with two columns,
two rows, and a column span.
============================
""",
"""\
<document source="test data">
<table>
<tgroup cols="2">
<colspec colwidth="12">
<colspec colwidth="14">
<tbody>
<row>
<entry>
<paragraph>
A table with
<entry>
<paragraph>
two columns,
<row>
<entry morecols="1">
<paragraph>
two rows, and a column span.
"""],
["""\
== =========== ===========
1 A table with three rows,
-- ------------------------
2 and three columns.
3 First and third rows
contain column spans.
This row is a multi-line row, and overflows to the right.
-- ------------------------
4 One last row.
== =========== ===========
""",
"""\
<document source="test data">
<table>
<tgroup cols="3">
<colspec colwidth="2">
<colspec colwidth="11">
<colspec colwidth="44">
<tbody>
<row>
<entry>
<paragraph>
1
<entry morecols="1">
<paragraph>
A table with three rows,
<row>
<entry>
<paragraph>
2
<entry>
<paragraph>
and three
<entry>
<paragraph>
columns.
<row>
<entry>
<paragraph>
3
<entry morecols="1">
<paragraph>
First and third rows
contain column spans.
<paragraph>
This row is a multi-line row, and overflows to the right.
<row>
<entry>
<paragraph>
4
<entry>
<paragraph>
One last
<entry>
<paragraph>
row.
"""],
["""\
======= ========= ========
A table with three columns.
================== ========
""",
"""\
<document source="test data">
<table>
<tgroup cols="3">
<colspec colwidth="7">
<colspec colwidth="9">
<colspec colwidth="8">
<tbody>
<row>
<entry morecols="1">
<paragraph>
A table with three
<entry>
<paragraph>
columns.
"""],
["""\
============== ======
A simple table with
no bottom border
""",
"""\
<document source="test data">
<system_message level="3" line="1" source="test data" type="ERROR">
<paragraph>
Malformed table.
No bottom table border found.
<literal_block xml:space="preserve">
============== ======
A simple table with
no bottom border
"""],
["""\
============== ======
A simple table cell 2
cell 3 cell 4
============== ======
No blank line after table.
""",
"""\
<document source="test data">
<system_message level="3" line="1" source="test data" type="ERROR">
<paragraph>
Malformed table.
No bottom table border found or no blank line after table bottom.
<literal_block xml:space="preserve">
============== ======
A simple table cell 2
cell 3 cell 4
============== ======
<system_message level="2" line="5" source="test data" type="WARNING">
<paragraph>
Blank line required after table.
<paragraph>
No blank line after table.
"""],
["""\
============== ======
A simple table cell 2
============== ======
cell 3 cell 4
============== ======
No blank line after table.
""",
"""\
<document source="test data">
<table>
<tgroup cols="2">
<colspec colwidth="14">
<colspec colwidth="6">
<thead>
<row>
<entry>
<paragraph>
A simple table
<entry>
<paragraph>
cell 2
<tbody>
<row>
<entry>
<paragraph>
cell 3
<entry>
<paragraph>
cell 4
<system_message level="2" line="6" source="test data" type="WARNING">
<paragraph>
Blank line required after table.
<paragraph>
No blank line after table.
"""],
["""\
============== ======
A simple table cell 2
cell 3 cell 4
============== ======
Unexpected indent and no blank line after table.
""",
"""\
<document source="test data">
<system_message level="3" line="1" source="test data" type="ERROR">
<paragraph>
Malformed table.
No bottom table border found or no blank line after table bottom.
<literal_block xml:space="preserve">
============== ======
A simple table cell 2
cell 3 cell 4
============== ======
<system_message level="2" line="5" source="test data" type="WARNING">
<paragraph>
Blank line required after table.
<block_quote>
<paragraph>
Unexpected indent and no blank line after table.
"""],
["""\
============== ======
A bad table cell 2
cell 3 cell 4
============ ========
""",
"""\
<document source="test data">
<system_message level="3" line="4" source="test data" type="ERROR">
<paragraph>
Malformed table.
Column span alignment problem in table line 4.
<literal_block xml:space="preserve">
============== ======
A bad table cell 2
cell 3 cell 4
============ ========
"""],
["""\
======== =========
A bad table cell 2
cell 3 cell 4
======== =========
""",
"""\
<document source="test data">
<system_message level="3" line="2" source="test data" type="ERROR">
<paragraph>
Malformed table.
Text in column margin in table line 2.
<literal_block xml:space="preserve">
======== =========
A bad table cell 2
cell 3 cell 4
======== =========
"""],
["""\
== ============================
1 This table contains another.
2 ======= ====== ========
A table within a table.
======= ====== ========
The outer table does have to
have at least two columns
though.
== ============================
""",
"""\
<document source="test data">
<table>
<tgroup cols="2">
<colspec colwidth="2">
<colspec colwidth="28">
<tbody>
<row>
<entry>
<paragraph>
1
<entry>
<paragraph>
This table contains another.
<row>
<entry>
<paragraph>
2
<entry>
<table>
<tgroup cols="3">
<colspec colwidth="7">
<colspec colwidth="6">
<colspec colwidth="8">
<tbody>
<row>
<entry>
<paragraph>
A table
<entry>
<paragraph>
within
<entry>
<paragraph>
a table.
<paragraph>
The outer table does have to
have at least two columns
though.
"""],
["""\
================ ======
A simple table
with empty cells
================ ======
""",
"""\
<document source="test data">
<table>
<tgroup cols="2">
<colspec colwidth="16">
<colspec colwidth="6">
<tbody>
<row>
<entry>
<paragraph>
A simple table
<entry>
<row>
<entry>
<paragraph>
with empty cells
<entry>
"""],
["""\
============== ========
A table with
============== ========
centered cells.
============== ========
""",
"""\
<document source="test data">
<table>
<tgroup cols="2">
<colspec colwidth="14">
<colspec colwidth="8">
<thead>
<row>
<entry>
<paragraph>
A table
<entry>
<paragraph>
with
<tbody>
<row>
<entry>
<paragraph>
centered
<entry>
<paragraph>
cells.
"""],
["""\
============== ======
A simple table this text extends to the right
cell 3 the bottom border below is too long
============== ========
""",
"""\
<document source="test data">
<system_message level="3" line="1" source="test data" type="ERROR">
<paragraph>
Malformed table.
Bottom/header table border does not match top border.
<literal_block xml:space="preserve">
============== ======
A simple table this text extends to the right
cell 3 the bottom border below is too long
============== ========
"""],
["""\
============ =================
A table with row separators.
------------ -----------------
Blank line before.
------------ -----------------
Blank lines before and after.
------------ -----------------
Blank line after.
============ =================
""",
"""\
<document source="test data">
<table>
<tgroup cols="2">
<colspec colwidth="12">
<colspec colwidth="17">
<tbody>
<row>
<entry>
<paragraph>
A table with
<entry>
<paragraph>
row separators.
<row>
<entry>
<paragraph>
Blank line
<entry>
<paragraph>
before.
<row>
<entry>
<paragraph>
Blank lines
<entry>
<paragraph>
before and after.
<row>
<entry>
<paragraph>
Blank line
<entry>
<paragraph>
after.
"""],
["""\
============ ====================
A table with many row separators.
------------ --------------------
------------ --------------------
------------ --------------------
============ ====================
""",
"""\
<document source="test data">
<table>
<tgroup cols="2">
<colspec colwidth="12">
<colspec colwidth="20">
<tbody>
<row>
<entry>
<paragraph>
A table with
<entry>
<paragraph>
many row separators.
<row>
<entry>
<entry>
<row>
<entry>
<entry>
<row>
<entry>
<entry>
"""],
["""\
== =========== ===========
1 Span columns 2 & 3
-- ------------------------
2 Span columns 2 & 3
------------------------
3
== =========== ===========
== =========== ===========
1 Span cols 1&2 but not 3
--------------- -----------
2 Span cols 1&2 but not 3
---------------
3 no spans here
== =========== ===========
== =========== ===========
1 Not a span Not a span
----------- -----------
2
== =========== ===========
""",
"""\
<document source="test data">
<system_message level="3" line="4" source="test data" type="ERROR">
<paragraph>
Malformed table.
Text in column margin in table line 4.
<literal_block xml:space="preserve">
== =========== ===========
1 Span columns 2 & 3
-- ------------------------
2 Span columns 2 & 3
------------------------
3
== =========== ===========
<system_message level="3" line="13" source="test data" type="ERROR">
<paragraph>
Malformed table.
Column span incomplete in table line 5.
<literal_block xml:space="preserve">
== =========== ===========
1 Span cols 1&2 but not 3
--------------- -----------
2 Span cols 1&2 but not 3
---------------
3 no spans here
== =========== ===========
<table>
<tgroup cols="3">
<colspec colwidth="2">
<colspec colwidth="11">
<colspec colwidth="11">
<tbody>
<row>
<entry>
<paragraph>
1
<entry>
<system_message level="4" line="19" source="test data" type="SEVERE">
<paragraph>
Unexpected section title.
<literal_block xml:space="preserve">
Not a span
-----------
<entry>
<system_message level="4" line="19" source="test data" type="SEVERE">
<paragraph>
Unexpected section title.
<literal_block xml:space="preserve">
Not a span
-----------
<row>
<entry>
<paragraph>
2
<entry>
<entry>
"""],
["""\
========= =====================================================================
Inclusion .. include::
%s
Note The first row of this table may expand
to accommodate long filesystem paths.
========= =====================================================================
""" % ('\n'.join([' %-65s' % include2[part * 65 : (part + 1) * 65]
for part in range(len(include2) // 65 + 1)])),
"""\
<document source="test data">
<table>
<tgroup cols="2">
<colspec colwidth="9">
<colspec colwidth="69">
<tbody>
<row>
<entry>
<paragraph>
Inclusion
<entry>
<paragraph>
Here are some paragraphs
that can appear at any level.
<paragraph>
This file (include2.txt) is used by
<literal>
test_include.py
.
<row>
<entry>
<paragraph>
Note
<entry>
<paragraph>
The first row of this table may expand
to accommodate long filesystem paths.
"""],
]
if __name__ == '__main__':
import unittest
unittest.main(defaultTest='suite')
| agpl-3.0 |
palmerjh/iEBE | EBE-Node/superMC/translate.py | 8 | 3447 | #! /usr/bin/env python
# This script translates a list of arguments into one value specified by a rule file. Usage:
# translate ruleFilename key1 key2 ...
# It prints out the value corresponds to [key1, key2, ...] from a dictionary read from ruleFilename.
# To see how the dictionary is generated, see readRules function.
from sys import exit, argv
def processOneLine(aLine, level_indicator="+", key_separator=":", commentSymbol="#"):
"""
Return [level, keys_list, value] list from string aLine.
level is indicated by how many successive level_indicators are there to the left, key and value are separated by key_separator.
"""
# take care of comments:
if commentSymbol in aLine:
aLine = aLine[:aLine.index(commentSymbol)].strip();
# if it's an empty line:
aLine = aLine.strip()
if aLine=="": return []
# check if syntax is correct:
if key_separator not in aLine:
print("translate.processOneLine error: key-value separator "+key_separator+" not included in the line \n"+aLine)
exit(-1)
# get level
level = 0
for i in range(len(aLine)):
if aLine[i]==level_indicator:
level = level + 1
else:
aLine = aLine[i:]
break
# separate key and value
components = aLine.split(key_separator);
keys_list = [x.strip() for x in components[:-1]];
value = components[-1].strip();
# finally...
return [level, keys_list, value]
def readRules(buffer,level_indicator="+", key_separator=":", commentSymbol="#"):
"""
Process the text buffer to get the rule used for translations, line by line. Each line will be transferred into one entry in a rule dictionary. The dictionary will then be returned. The rule dictionary is generated using all the list of all strings between key_separators except the last one as the key, and the last one as value.
For example,
a : b: 1 # comments
will be translates into entry ["a","b"]:"1"
To ease the pain for repeated common keys, a level_indicator can be used to indicate how may shared keys the current line inherits from previous lines. The number of level_indicator means the number of keys the current line should inherit (starts from left) from previous lines.
For example, if the text buffer looks like:
z : 1
+ a : 2
++ b : 3
+ d: 4
The rule dictionary will contain:
("z") : "1"
("z", "a") : 2
("z", "a", "b") : 3
("z", "d") : 4
Note that the following
z : 1
++ a : 2
will raise an error.
"""
D = {}
accumulated_keys = [];
for aLine in buffer:
tmp_result = processOneLine(aLine)
if not tmp_result: continue
level, keys, value = tmp_result
if level>len(accumulated_keys):
print("translates.readRules error: two many "+level_indicator+" signs in the line\n"+aLine)
exit(-1)
else:
accumulated_keys = accumulated_keys[:level]
accumulated_keys.extend(keys)
D[tuple(accumulated_keys)] = value
return D
def translate(ruleFilename, keys_list):
"""
Translate keys_list into the correponding value given in the dictionary generated from ruleFilename using readRules function.
"""
D = readRules(ruleFilename)
result = ""
for ii in range(len(keys_list)): result+=" "+(D[tuple(keys_list[:ii+1])])
return result
if __name__ == '__main__':
if len(argv)<3:
print("Usage: translate ruleFilename key1 key2 ...")
exit(-1)
else:
print(translate(file(argv[1]).readlines(),argv[2:]))
| gpl-3.0 |
amenonsen/ansible | lib/ansible/modules/storage/netapp/na_ontap_igroup_initiator.py | 21 | 6258 | #!/usr/bin/python
''' This is an Ansible module for ONTAP, to manage initiators in an Igroup
(c) 2019, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
'''
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
module: na_ontap_igroup_initiator
short_description: NetApp ONTAP igroup initiator configuration
extends_documentation_fragment:
- netapp.na_ontap
version_added: '2.8'
author: NetApp Ansible Team (@carchi8py) <[email protected]>
description:
- Add/Remove initiators from an igroup
options:
state:
description:
- Whether the specified initiator should exist or not in an igroup.
choices: ['present', 'absent']
default: present
names:
description:
- List of initiators to manage.
required: true
aliases:
- name
initiator_group:
description:
- Name of the initiator group to which the initiator belongs.
required: true
vserver:
description:
- The name of the vserver to use.
required: true
'''
EXAMPLES = '''
- name: Add initiators to an igroup
na_ontap_igroup_initiator:
names: abc.test:def.com,def.test:efg.com
initiator_group: test_group
vserver: ansibleVServer
hostname: "{{ netapp_hostname }}"
username: "{{ netapp_username }}"
password: "{{ netapp_password }}"
- name: Remove an initiator from an igroup
na_ontap_igroup_initiator:
state: absent
names: abc.test:def.com
initiator_group: test_group
vserver: ansibleVServer
hostname: "{{ netapp_hostname }}"
username: "{{ netapp_username }}"
password: "{{ netapp_password }}"
'''
RETURN = '''
'''
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
import ansible.module_utils.netapp as netapp_utils
from ansible.module_utils.netapp_module import NetAppModule
HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
class NetAppOntapIgroupInitiator(object):
def __init__(self):
self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
self.argument_spec.update(dict(
state=dict(required=False, choices=['present', 'absent'], default='present'),
names=dict(required=True, type='list', aliases=['name']),
initiator_group=dict(required=True, type='str'),
vserver=dict(required=True, type='str'),
))
self.module = AnsibleModule(
argument_spec=self.argument_spec,
supports_check_mode=True
)
self.na_helper = NetAppModule()
self.parameters = self.na_helper.set_parameters(self.module.params)
if HAS_NETAPP_LIB is False:
self.module.fail_json(msg="the python NetApp-Lib module is required")
else:
self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
def get_initiators(self):
"""
Get the existing list of initiators from an igroup
:rtype: list() or None
"""
igroup_info = netapp_utils.zapi.NaElement('igroup-get-iter')
attributes = dict(query={'initiator-group-info': {'initiator-group-name': self.parameters['initiator_group'],
'vserver': self.parameters['vserver']}})
igroup_info.translate_struct(attributes)
result, current = None, []
try:
result = self.server.invoke_successfully(igroup_info, True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error fetching igroup info %s: %s' % (self.parameters['initiator_group'],
to_native(error)),
exception=traceback.format_exc())
if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1:
igroup_info = result.get_child_by_name('attributes-list').get_child_by_name('initiator-group-info')
if igroup_info.get_child_by_name('initiators') is not None:
current = [initiator['initiator-name'] for initiator in igroup_info['initiators'].get_children()]
return current
def modify_initiator(self, initiator_name, zapi):
"""
Add or remove an initiator to/from an igroup
"""
options = {'initiator-group-name': self.parameters['initiator_group'],
'initiator': initiator_name}
initiator_modify = netapp_utils.zapi.NaElement.create_node_with_children(zapi, **options)
try:
self.server.invoke_successfully(initiator_modify, enable_tunneling=True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error modifying igroup initiator %s: %s' % (initiator_name,
to_native(error)),
exception=traceback.format_exc())
def autosupport_log(self):
netapp_utils.ems_log_event("na_ontap_igroup_initiator", self.server)
def apply(self):
self.autosupport_log()
initiators = self.get_initiators()
for initiator in self.parameters['names']:
present = None
if initiator in initiators:
present = True
cd_action = self.na_helper.get_cd_action(present, self.parameters)
if self.na_helper.changed:
if self.module.check_mode:
pass
else:
if cd_action == 'create':
self.modify_initiator(initiator, 'igroup-add')
elif cd_action == 'delete':
self.modify_initiator(initiator, 'igroup-remove')
self.module.exit_json(changed=self.na_helper.changed)
def main():
obj = NetAppOntapIgroupInitiator()
obj.apply()
if __name__ == '__main__':
main()
| gpl-3.0 |
efiop/dvc | dvc/repo/reproduce.py | 1 | 8232 | import logging
import typing
from functools import partial
from dvc.exceptions import DvcException, ReproductionError
from dvc.repo.scm_context import scm_context
from . import locked
if typing.TYPE_CHECKING:
from . import Repo
logger = logging.getLogger(__name__)
def _reproduce_stage(stage, **kwargs):
def _run_callback(repro_callback):
_dump_stage(stage)
_track_stage(stage)
repro_callback([stage])
checkpoint_func = kwargs.pop("checkpoint_func", None)
if stage.is_checkpoint:
if checkpoint_func:
kwargs["checkpoint_func"] = partial(_run_callback, checkpoint_func)
else:
raise DvcException(
"Checkpoint stages are not supported in 'dvc repro'. "
"Checkpoint stages must be reproduced with 'dvc exp run' "
"or 'dvc exp resume'."
)
if stage.frozen and not stage.is_import:
logger.warning(
"{} is frozen. Its dependencies are"
" not going to be reproduced.".format(stage)
)
stage = stage.reproduce(**kwargs)
if not stage:
return []
if not kwargs.get("dry", False):
track = checkpoint_func is not None
_dump_stage(stage)
if track:
_track_stage(stage)
return [stage]
def _dump_stage(stage):
from ..dvcfile import Dvcfile
dvcfile = Dvcfile(stage.repo, stage.path)
dvcfile.dump(stage, update_pipeline=False)
def _track_stage(stage):
from dvc.utils import relpath
stage.repo.scm.track_file(stage.dvcfile.relpath)
for dep in stage.deps:
if (
not dep.use_scm_ignore
and dep.is_in_repo
and not stage.repo.repo_fs.isdvc(dep.path_info)
):
stage.repo.scm.track_file(relpath(dep.path_info))
for out in stage.outs:
if not out.use_scm_ignore and out.is_in_repo:
stage.repo.scm.track_file(relpath(out.path_info))
if out.live:
from dvc.repo.live import summary_path_info
summary = summary_path_info(out)
if summary:
stage.repo.scm.track_file(relpath(summary))
stage.repo.scm.track_changed_files()
@locked
@scm_context
def reproduce(
self: "Repo",
targets=None,
recursive=False,
pipeline=False,
all_pipelines=False,
**kwargs,
):
from .graph import get_pipeline, get_pipelines
glob = kwargs.pop("glob", False)
accept_group = not glob
if isinstance(targets, str):
targets = [targets]
if not all_pipelines and not targets:
from dvc.dvcfile import PIPELINE_FILE
targets = [PIPELINE_FILE]
interactive = kwargs.get("interactive", False)
if not interactive:
kwargs["interactive"] = self.config["core"].get("interactive", False)
stages = set()
if pipeline or all_pipelines:
pipelines = get_pipelines(self.graph)
if all_pipelines:
used_pipelines = pipelines
else:
used_pipelines = []
for target in targets:
stage = self.stage.get_target(target)
used_pipelines.append(get_pipeline(pipelines, stage))
for pline in used_pipelines:
for stage in pline:
if pline.in_degree(stage) == 0:
stages.add(stage)
else:
for target in targets:
stages.update(
self.stage.collect(
target,
recursive=recursive,
accept_group=accept_group,
glob=glob,
)
)
return _reproduce_stages(self.graph, list(stages), **kwargs)
def _reproduce_stages(
G, stages, downstream=False, single_item=False, on_unchanged=None, **kwargs
):
r"""Derive the evaluation of the given node for the given graph.
When you _reproduce a stage_, you want to _evaluate the descendants_
to know if it make sense to _recompute_ it. A post-ordered search
will give us an order list of the nodes we want.
For example, let's say that we have the following pipeline:
E
/ \
D F
/ \ \
B C G
\ /
A
The derived evaluation of D would be: [A, B, C, D]
In case that `downstream` option is specified, the desired effect
is to derive the evaluation starting from the given stage up to the
ancestors. However, the `networkx.ancestors` returns a set, without
any guarantee of any order, so we are going to reverse the graph and
use a reverse post-ordered search using the given stage as a starting
point.
E A
/ \ / \
D F B C G
/ \ \ --- reverse --> \ / /
B C G D F
\ / \ /
A E
The derived evaluation of _downstream_ B would be: [B, D, E]
"""
steps = _get_steps(G, stages, downstream, single_item)
force_downstream = kwargs.pop("force_downstream", False)
result = []
unchanged = []
# `ret` is used to add a cosmetic newline.
ret = []
checkpoint_func = kwargs.pop("checkpoint_func", None)
for stage in steps:
if ret:
logger.info("")
if checkpoint_func:
kwargs["checkpoint_func"] = partial(
_repro_callback, checkpoint_func, unchanged
)
from dvc.stage.monitor import CheckpointKilledError
try:
ret = _reproduce_stage(stage, **kwargs)
if len(ret) == 0:
unchanged.extend([stage])
elif force_downstream:
# NOTE: we are walking our pipeline from the top to the
# bottom. If one stage is changed, it will be reproduced,
# which tells us that we should force reproducing all of
# the other stages down below, even if their direct
# dependencies didn't change.
kwargs["force"] = True
if ret:
result.extend(ret)
except CheckpointKilledError:
raise
except Exception as exc:
raise ReproductionError(stage.relpath) from exc
if on_unchanged is not None:
on_unchanged(unchanged)
return result
def _get_steps(G, stages, downstream, single_item):
import networkx as nx
active = G.copy()
if not single_item:
# NOTE: frozen stages don't matter for single_item
for stage in G:
if stage.frozen:
# NOTE: disconnect frozen stage from its dependencies
active.remove_edges_from(G.out_edges(stage))
all_pipelines = []
for stage in stages:
if downstream:
# NOTE (py3 only):
# Python's `deepcopy` defaults to pickle/unpickle the object.
# Stages are complex objects (with references to `repo`,
# `outs`, and `deps`) that cause struggles when you try
# to serialize them. We need to create a copy of the graph
# itself, and then reverse it, instead of using
# graph.reverse() directly because it calls `deepcopy`
# underneath -- unless copy=False is specified.
nodes = nx.dfs_postorder_nodes(active.reverse(copy=False), stage)
all_pipelines += reversed(list(nodes))
else:
all_pipelines += nx.dfs_postorder_nodes(active, stage)
steps = []
for stage in all_pipelines:
if stage not in steps:
# NOTE: order of steps still matters for single_item
if single_item and stage not in stages:
continue
steps.append(stage)
return steps
def _repro_callback(experiments_callback, unchanged, stages):
experiments_callback(unchanged, stages)
| apache-2.0 |
Marketing1by1/petl | petl/test/io/test_csv.py | 3 | 7619 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, division
from tempfile import NamedTemporaryFile
import gzip
import os
import logging
from petl.compat import PY2
from petl.test.helpers import ieq, eq_
from petl.io.csv import fromcsv, fromtsv, tocsv, appendcsv, totsv, appendtsv
logger = logging.getLogger(__name__)
debug = logger.debug
def test_fromcsv():
data = [b'foo,bar',
b'a,1',
b'b,2',
b'c,2']
f = NamedTemporaryFile(mode='wb', delete=False)
f.write(b'\n'.join(data))
f.close()
expect = (('foo', 'bar'),
('a', '1'),
('b', '2'),
('c', '2'))
actual = fromcsv(f.name, encoding='ascii')
debug(actual)
ieq(expect, actual)
ieq(expect, actual) # verify can iterate twice
def test_fromcsv_lineterminators():
data = [b'foo,bar',
b'a,1',
b'b,2',
b'c,2']
expect = (('foo', 'bar'),
('a', '1'),
('b', '2'),
('c', '2'))
for lt in b'\r', b'\n', b'\r\n':
debug(repr(lt))
f = NamedTemporaryFile(mode='wb', delete=False)
f.write(lt.join(data))
f.close()
with open(f.name, 'rb') as g:
debug(repr(g.read()))
actual = fromcsv(f.name, encoding='ascii')
debug(actual)
ieq(expect, actual)
def test_fromcsv_quoted():
import csv
data = [b'"foo","bar"',
b'"a",1',
b'"b",2',
b'"c",2']
f = NamedTemporaryFile(mode='wb', delete=False)
f.write(b'\n'.join(data))
f.close()
expect = (('foo', 'bar'),
('a', 1),
('b', 2),
('c', 2))
actual = fromcsv(f.name, quoting=csv.QUOTE_NONNUMERIC)
debug(actual)
ieq(expect, actual)
ieq(expect, actual) # verify can iterate twice
def test_fromtsv():
data = [b'foo\tbar',
b'a\t1',
b'b\t2',
b'c\t2']
f = NamedTemporaryFile(mode='wb', delete=False)
f.write(b'\n'.join(data))
f.close()
expect = (('foo', 'bar'),
('a', '1'),
('b', '2'),
('c', '2'))
actual = fromtsv(f.name, encoding='ascii')
ieq(expect, actual)
ieq(expect, actual) # verify can iterate twice
def test_fromcsv_gz():
data = [b'foo,bar',
b'a,1',
b'b,2',
b'c,2']
expect = (('foo', 'bar'),
('a', '1'),
('b', '2'),
('c', '2'))
# '\r' not supported in PY2 because universal newline mode is
# not supported by gzip module
if PY2:
lts = b'\n', b'\r\n'
else:
lts = b'\r', b'\n', b'\r\n'
for lt in lts:
f = NamedTemporaryFile(delete=False)
f.close()
fn = f.name + '.gz'
os.rename(f.name, fn)
fz = gzip.open(fn, 'wb')
fz.write(lt.join(data))
fz.close()
actual = fromcsv(fn, encoding='ascii')
ieq(expect, actual)
ieq(expect, actual) # verify can iterate twice
def test_tocsv_appendcsv():
# exercise function
table = (('foo', 'bar'),
('a', 1),
('b', 2),
('c', 2))
f = NamedTemporaryFile(delete=False)
f.close()
tocsv(table, f.name, encoding='ascii', lineterminator='\n')
# check what it did
with open(f.name, 'rb') as o:
data = [b'foo,bar',
b'a,1',
b'b,2',
b'c,2']
# don't forget final terminator
expect = b'\n'.join(data) + b'\n'
actual = o.read()
eq_(expect, actual)
# check appending
table2 = (('foo', 'bar'),
('d', 7),
('e', 9),
('f', 1))
appendcsv(table2, f.name, encoding='ascii', lineterminator='\n')
# check what it did
with open(f.name, 'rb') as o:
data = [b'foo,bar',
b'a,1',
b'b,2',
b'c,2',
b'd,7',
b'e,9',
b'f,1']
# don't forget final terminator
expect = b'\n'.join(data) + b'\n'
actual = o.read()
eq_(expect, actual)
def test_tocsv_noheader():
# check explicit no header
table = (('foo', 'bar'),
('a', 1),
('b', 2),
('c', 2))
f = NamedTemporaryFile(delete=False)
tocsv(table, f.name, encoding='ascii', lineterminator='\n',
write_header=False)
# check what it did
with open(f.name, 'rb') as o:
data = [b'a,1',
b'b,2',
b'c,2']
# don't forget final terminator
expect = b'\n'.join(data) + b'\n'
actual = o.read()
eq_(expect, actual)
def test_totsv_appendtsv():
# exercise function
table = (('foo', 'bar'),
('a', 1),
('b', 2),
('c', 2))
f = NamedTemporaryFile(delete=False)
f.close()
totsv(table, f.name, encoding='ascii', lineterminator='\n')
# check what it did
with open(f.name, 'rb') as o:
data = [b'foo\tbar',
b'a\t1',
b'b\t2',
b'c\t2']
# don't forget final terminator
expect = b'\n'.join(data) + b'\n'
actual = o.read()
eq_(expect, actual)
# check appending
table2 = (('foo', 'bar'),
('d', 7),
('e', 9),
('f', 1))
appendtsv(table2, f.name, encoding='ascii', lineterminator='\n')
# check what it did
with open(f.name, 'rb') as o:
data = [b'foo\tbar',
b'a\t1',
b'b\t2',
b'c\t2',
b'd\t7',
b'e\t9',
b'f\t1']
# don't forget final terminator
expect = b'\n'.join(data) + b'\n'
actual = o.read()
eq_(expect, actual)
def test_tocsv_appendcsv_gz():
# exercise function
table = (('foo', 'bar'),
('a', 1),
('b', 2),
('c', 2))
f = NamedTemporaryFile(delete=False)
fn = f.name + '.gz'
f.close()
tocsv(table, fn, encoding='ascii', lineterminator='\n')
# check what it did
o = gzip.open(fn, 'rb')
try:
data = [b'foo,bar',
b'a,1',
b'b,2',
b'c,2']
# don't forget final terminator
expect = b'\n'.join(data) + b'\n'
actual = o.read()
eq_(expect, actual)
finally:
o.close()
# check appending
table2 = (('foo', 'bar'),
('d', 7),
('e', 9),
('f', 1))
appendcsv(table2, fn, encoding='ascii', lineterminator='\n')
# check what it did
o = gzip.open(fn, 'rb')
try:
data = [b'foo,bar',
b'a,1',
b'b,2',
b'c,2',
b'd,7',
b'e,9',
b'f,1']
# don't forget final terminator
expect = b'\n'.join(data) + b'\n'
actual = o.read()
eq_(expect, actual)
finally:
o.close()
def test_fromcsv_header():
header = ['foo', 'bar']
data = [b'a,1',
b'b,2',
b'c,2']
f = NamedTemporaryFile(mode='wb', delete=False)
f.write(b'\n'.join(data))
f.close()
expect = (('foo', 'bar'),
('a', '1'),
('b', '2'),
('c', '2'))
actual = fromcsv(f.name, encoding='ascii', header=header)
debug(actual)
ieq(expect, actual)
ieq(expect, actual) # verify can iterate twice | mit |
dogless/airavata | airavata-api/airavata-client-sdks/airavata-python-sdk/src/main/resources/samples/createExperiment.py | 6 | 2463 | #!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import sys, ConfigParser
sys.path.append('../lib')
from apache.airavata.api import Airavata
from apache.airavata.api.ttypes import *
from apache.airavata.model.workspace.experiment.ttypes import *
from thrift import Thrift
from thrift.transport import TSocket
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol
try:
# Read Airavata Client properties
airavataConfig = ConfigParser.RawConfigParser()
airavataConfig.read('../conf/airavata-client.properties')
# Create a socket to the Airavata Server
transport = TSocket.TSocket(airavataConfig.get('AiravataServer', 'host'), airavataConfig.get('AiravataServer', 'port'))
# Use Buffered Protocol to speedup over raw sockets
transport = TTransport.TBufferedTransport(transport)
# Airavata currently uses Binary Protocol
protocol = TBinaryProtocol.TBinaryProtocol(transport)
# Create a Airavata client to use the protocol encoder
airavataClient = Airavata.Client(protocol)
# Connect to Airavata Server
transport.open()
#Create a experiment
experiment = Experiment()
experiment.userName = "smarru"
experiment.name = "cli-test-experiment"
experiment.description = "experiment to test python cli"
experiment.applicationId = "Echo_b22f2303-a574-43ef-a6f2-ab8e64e2d0a2"
#experiment.experimentInputs
print 'Created Experiment with Id:', airavataClient.createExperiment("sdsc", experiment)
print 'Airavata Server Version is:', airavataClient.getAPIVersion()
# Close Connection to Airavata Server
transport.close()
except Thrift.TException, tx:
print '%s' % (tx.message)
| apache-2.0 |
eceglov/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/port/xvfbdriver_unittest.py | 118 | 7503 | # Copyright (C) 2012 Zan Dobersek <[email protected]>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
import unittest2 as unittest
from webkitpy.common.system.filesystem_mock import MockFileSystem
from webkitpy.common.system.executive_mock import MockExecutive2
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.common.system.systemhost_mock import MockSystemHost
from webkitpy.port import Port
from webkitpy.port.server_process_mock import MockServerProcess
from webkitpy.port.xvfbdriver import XvfbDriver
from webkitpy.tool.mocktool import MockOptions
_log = logging.getLogger(__name__)
class XvfbDriverTest(unittest.TestCase):
def make_driver(self, worker_number=0, xorg_running=False, executive=None):
port = Port(MockSystemHost(log_executive=True, executive=executive), 'xvfbdrivertestport', options=MockOptions(configuration='Release'))
port._config.build_directory = lambda configuration: "/mock-build"
port._server_process_constructor = MockServerProcess
if xorg_running:
port._executive._running_pids['Xorg'] = 108
driver = XvfbDriver(port, worker_number=worker_number, pixel_tests=True)
driver._startup_delay_secs = 0
return driver
def cleanup_driver(self, driver):
# Setting _xvfb_process member to None is necessary as the Driver object is stopped on deletion,
# killing the Xvfb process if present. Thus, this method should only be called from tests that do not
# intend to test the behavior of XvfbDriver.stop.
driver._xvfb_process = None
def assertDriverStartSuccessful(self, driver, expected_logs, expected_display, pixel_tests=False):
OutputCapture().assert_outputs(self, driver.start, [pixel_tests, []], expected_logs=expected_logs)
self.assertTrue(driver._server_process.started)
self.assertEqual(driver._server_process.env["DISPLAY"], expected_display)
def test_start_no_pixel_tests(self):
driver = self.make_driver()
expected_logs = "MOCK run_command: ['ps', '-eo', 'comm,command'], cwd=None\nMOCK popen: ['Xvfb', ':0', '-screen', '0', '800x600x24', '-nolisten', 'tcp']\n"
self.assertDriverStartSuccessful(driver, expected_logs=expected_logs, expected_display=":0")
self.cleanup_driver(driver)
def test_start_pixel_tests(self):
driver = self.make_driver()
expected_logs = "MOCK run_command: ['ps', '-eo', 'comm,command'], cwd=None\nMOCK popen: ['Xvfb', ':0', '-screen', '0', '800x600x24', '-nolisten', 'tcp']\n"
self.assertDriverStartSuccessful(driver, expected_logs=expected_logs, expected_display=":0", pixel_tests=True)
self.cleanup_driver(driver)
def test_start_arbitrary_worker_number(self):
driver = self.make_driver(worker_number=17)
expected_logs = "MOCK run_command: ['ps', '-eo', 'comm,command'], cwd=None\nMOCK popen: ['Xvfb', ':0', '-screen', '0', '800x600x24', '-nolisten', 'tcp']\n"
self.assertDriverStartSuccessful(driver, expected_logs=expected_logs, expected_display=":0", pixel_tests=True)
self.cleanup_driver(driver)
def test_next_free_display(self):
output = "Xorg /usr/bin/X :0 -auth /var/run/lightdm/root/:0 -nolisten tcp vt7 -novtswitch -background none\nXvfb Xvfb :1 -screen 0 800x600x24 -nolisten tcp"
executive = MockExecutive2(output)
driver = self.make_driver(executive=executive)
self.assertEqual(driver._next_free_display(), 2)
self.cleanup_driver(driver)
output = "X /usr/bin/X :0 vt7 -nolisten tcp -auth /var/run/xauth/A:0-8p7Ybb"
executive = MockExecutive2(output)
driver = self.make_driver(executive=executive)
self.assertEqual(driver._next_free_display(), 1)
self.cleanup_driver(driver)
output = "Xvfb Xvfb :0 -screen 0 800x600x24 -nolisten tcp"
executive = MockExecutive2(output)
driver = self.make_driver(executive=executive)
self.assertEqual(driver._next_free_display(), 1)
self.cleanup_driver(driver)
output = "Xvfb Xvfb :1 -screen 0 800x600x24 -nolisten tcp\nXvfb Xvfb :0 -screen 0 800x600x24 -nolisten tcp\nXvfb Xvfb :3 -screen 0 800x600x24 -nolisten tcp"
executive = MockExecutive2(output)
driver = self.make_driver(executive=executive)
self.assertEqual(driver._next_free_display(), 2)
self.cleanup_driver(driver)
def test_start_next_worker(self):
driver = self.make_driver()
driver._next_free_display = lambda: 0
expected_logs = "MOCK popen: ['Xvfb', ':0', '-screen', '0', '800x600x24', '-nolisten', 'tcp']\n"
self.assertDriverStartSuccessful(driver, expected_logs=expected_logs, expected_display=":0", pixel_tests=True)
self.cleanup_driver(driver)
driver = self.make_driver()
driver._next_free_display = lambda: 3
expected_logs = "MOCK popen: ['Xvfb', ':3', '-screen', '0', '800x600x24', '-nolisten', 'tcp']\n"
self.assertDriverStartSuccessful(driver, expected_logs=expected_logs, expected_display=":3", pixel_tests=True)
self.cleanup_driver(driver)
def test_stop(self):
filesystem = MockFileSystem(files={'/tmp/.X42-lock': '1234\n'})
port = Port(MockSystemHost(log_executive=True, filesystem=filesystem), 'xvfbdrivertestport', options=MockOptions(configuration='Release'))
port._executive.kill_process = lambda x: _log.info("MOCK kill_process pid: " + str(x))
driver = XvfbDriver(port, worker_number=0, pixel_tests=True)
class FakeXvfbProcess(object):
pid = 1234
driver._xvfb_process = FakeXvfbProcess()
driver._lock_file = '/tmp/.X42-lock'
expected_logs = "MOCK kill_process pid: 1234\n"
OutputCapture().assert_outputs(self, driver.stop, [], expected_logs=expected_logs)
self.assertIsNone(driver._xvfb_process)
self.assertFalse(port._filesystem.exists(driver._lock_file))
| bsd-3-clause |
andfoy/margffoy-tuay-server | env/lib/python2.7/site-packages/reportlab-3.2.0-py2.7-linux-x86_64.egg/reportlab/graphics/charts/spider.py | 34 | 16057 | #Copyright ReportLab Europe Ltd. 2000-2004
#see license.txt for license details
#history http://www.reportlab.co.uk/cgi-bin/viewcvs.cgi/public/reportlab/trunk/reportlab/graphics/charts/spider.py
# spider chart, also known as radar chart
__version__=''' $Id$ '''
__doc__="""Spider Chart
Normal use shows variation of 5-10 parameters against some 'norm' or target.
When there is more than one series, place the series with the largest
numbers first, as it will be overdrawn by each successive one.
"""
import copy
from math import sin, cos, pi
from reportlab.lib import colors
from reportlab.lib.validators import isColor, isNumber, isListOfNumbersOrNone,\
isListOfNumbers, isColorOrNone, isString,\
isListOfStringsOrNone, OneOf, SequenceOf,\
isBoolean, isListOfColors, isNumberOrNone,\
isNoneOrListOfNoneOrStrings, isTextAnchor,\
isNoneOrListOfNoneOrNumbers, isBoxAnchor,\
isStringOrNone, isStringOrNone, EitherOr,\
isCallable
from reportlab.lib.attrmap import *
from reportlab.pdfgen.canvas import Canvas
from reportlab.graphics.shapes import Group, Drawing, Line, Rect, Polygon, PolyLine, Ellipse, \
Wedge, String, STATE_DEFAULTS
from reportlab.graphics.widgetbase import Widget, TypedPropertyCollection, PropHolder
from reportlab.graphics.charts.areas import PlotArea
from reportlab.graphics.charts.legends import _objStr
from reportlab.graphics.charts.piecharts import WedgeLabel
from reportlab.graphics.widgets.markers import makeMarker, uSymbol2Symbol, isSymbol
class StrandProperty(PropHolder):
_attrMap = AttrMap(
strokeWidth = AttrMapValue(isNumber,desc='width'),
fillColor = AttrMapValue(isColorOrNone,desc='filling color'),
strokeColor = AttrMapValue(isColorOrNone,desc='stroke color'),
strokeDashArray = AttrMapValue(isListOfNumbersOrNone,desc='dashing pattern, e.g. (3,2)'),
symbol = AttrMapValue(EitherOr((isStringOrNone,isSymbol)), desc='Widget placed at data points.',advancedUsage=1),
symbolSize= AttrMapValue(isNumber, desc='Symbol size.',advancedUsage=1),
name = AttrMapValue(isStringOrNone, desc='Name of the strand.'),
)
def __init__(self):
self.strokeWidth = 1
self.fillColor = None
self.strokeColor = STATE_DEFAULTS["strokeColor"]
self.strokeDashArray = STATE_DEFAULTS["strokeDashArray"]
self.symbol = None
self.symbolSize = 5
self.name = None
class SpokeProperty(PropHolder):
_attrMap = AttrMap(
strokeWidth = AttrMapValue(isNumber,desc='width'),
fillColor = AttrMapValue(isColorOrNone,desc='filling color'),
strokeColor = AttrMapValue(isColorOrNone,desc='stroke color'),
strokeDashArray = AttrMapValue(isListOfNumbersOrNone,desc='dashing pattern, e.g. (2,1)'),
labelRadius = AttrMapValue(isNumber,desc='label radius',advancedUsage=1),
visible = AttrMapValue(isBoolean,desc="True if the spoke line is to be drawn"),
)
def __init__(self,**kw):
self.strokeWidth = 0.5
self.fillColor = None
self.strokeColor = STATE_DEFAULTS["strokeColor"]
self.strokeDashArray = STATE_DEFAULTS["strokeDashArray"]
self.visible = 1
self.labelRadius = 1.05
class SpokeLabel(WedgeLabel):
def __init__(self,**kw):
WedgeLabel.__init__(self,**kw)
if '_text' not in list(kw.keys()): self._text = ''
class StrandLabel(SpokeLabel):
_attrMap = AttrMap(BASE=SpokeLabel,
format = AttrMapValue(EitherOr((isStringOrNone,isCallable)),desc="Format for the label"),
dR = AttrMapValue(isNumberOrNone,desc="radial shift for label"),
)
def __init__(self,**kw):
self.format = ''
self.dR = 0
SpokeLabel.__init__(self,**kw)
def _setupLabel(labelClass, text, radius, cx, cy, angle, car, sar, sty):
L = labelClass()
L._text = text
L.x = cx + radius*car
L.y = cy + radius*sar
L._pmv = angle*180/pi
L.boxAnchor = sty.boxAnchor
L.dx = sty.dx
L.dy = sty.dy
L.angle = sty.angle
L.boxAnchor = sty.boxAnchor
L.boxStrokeColor = sty.boxStrokeColor
L.boxStrokeWidth = sty.boxStrokeWidth
L.boxFillColor = sty.boxFillColor
L.strokeColor = sty.strokeColor
L.strokeWidth = sty.strokeWidth
L.leading = sty.leading
L.width = sty.width
L.maxWidth = sty.maxWidth
L.height = sty.height
L.textAnchor = sty.textAnchor
L.visible = sty.visible
L.topPadding = sty.topPadding
L.leftPadding = sty.leftPadding
L.rightPadding = sty.rightPadding
L.bottomPadding = sty.bottomPadding
L.fontName = sty.fontName
L.fontSize = sty.fontSize
L.fillColor = sty.fillColor
return L
class SpiderChart(PlotArea):
_attrMap = AttrMap(BASE=PlotArea,
data = AttrMapValue(None, desc='Data to be plotted, list of (lists of) numbers.'),
labels = AttrMapValue(isListOfStringsOrNone, desc="optional list of labels to use for each data point"),
startAngle = AttrMapValue(isNumber, desc="angle of first slice; like the compass, 0 is due North"),
direction = AttrMapValue( OneOf('clockwise', 'anticlockwise'), desc="'clockwise' or 'anticlockwise'"),
strands = AttrMapValue(None, desc="collection of strand descriptor objects"),
spokes = AttrMapValue(None, desc="collection of spoke descriptor objects"),
strandLabels = AttrMapValue(None, desc="collection of strand label descriptor objects"),
spokeLabels = AttrMapValue(None, desc="collection of spoke label descriptor objects"),
)
def makeSwatchSample(self, rowNo, x, y, width, height):
baseStyle = self.strands
styleIdx = rowNo % len(baseStyle)
style = baseStyle[styleIdx]
strokeColor = getattr(style, 'strokeColor', getattr(baseStyle,'strokeColor',None))
fillColor = getattr(style, 'fillColor', getattr(baseStyle,'fillColor',None))
strokeDashArray = getattr(style, 'strokeDashArray', getattr(baseStyle,'strokeDashArray',None))
strokeWidth = getattr(style, 'strokeWidth', getattr(baseStyle, 'strokeWidth',0))
symbol = getattr(style, 'symbol', getattr(baseStyle, 'symbol',None))
ym = y+height/2.0
if fillColor is None and strokeColor is not None and strokeWidth>0:
bg = Line(x,ym,x+width,ym,strokeWidth=strokeWidth,strokeColor=strokeColor,
strokeDashArray=strokeDashArray)
elif fillColor is not None:
bg = Rect(x,y,width,height,strokeWidth=strokeWidth,strokeColor=strokeColor,
strokeDashArray=strokeDashArray,fillColor=fillColor)
else:
bg = None
if symbol:
symbol = uSymbol2Symbol(symbol,x+width/2.,ym,color)
if bg:
g = Group()
g.add(bg)
g.add(symbol)
return g
return symbol or bg
def getSeriesName(self,i,default=None):
'''return series name i or default'''
return _objStr(getattr(self.strands[i],'name',default))
def __init__(self):
PlotArea.__init__(self)
self.data = [[10,12,14,16,14,12], [6,8,10,12,9,11]]
self.labels = None # or list of strings
self.labels = ['a','b','c','d','e','f']
self.startAngle = 90
self.direction = "clockwise"
self.strands = TypedPropertyCollection(StrandProperty)
self.spokes = TypedPropertyCollection(SpokeProperty)
self.spokeLabels = TypedPropertyCollection(SpokeLabel)
self.spokeLabels._text = None
self.strandLabels = TypedPropertyCollection(StrandLabel)
self.x = 10
self.y = 10
self.width = 180
self.height = 180
def demo(self):
d = Drawing(200, 200)
d.add(SpiderChart())
return d
def normalizeData(self, outer = 0.0):
"""Turns data into normalized ones where each datum is < 1.0,
and 1.0 = maximum radius. Adds 10% at outside edge by default"""
data = self.data
assert min(list(map(min,data))) >=0, "Cannot do spider plots of negative numbers!"
norm = max(list(map(max,data)))
norm *= (1.0+outer)
if norm<1e-9: norm = 1.0
self._norm = norm
return [[e/norm for e in row] for row in data]
def _innerDrawLabel(self, sty, radius, cx, cy, angle, car, sar, labelClass=StrandLabel):
"Draw a label for a given item in the list."
fmt = sty.format
value = radius*self._norm
if not fmt:
text = None
elif isinstance(fmt,str):
if fmt == 'values':
text = sty._text
else:
text = fmt % value
elif hasattr(fmt,'__call__'):
text = fmt(value)
else:
raise ValueError("Unknown formatter type %s, expected string or function" % fmt)
if text:
dR = sty.dR
if dR:
radius += dR/self._radius
L = _setupLabel(labelClass, text, radius, cx, cy, angle, car, sar, sty)
if dR<0: L._anti = 1
else:
L = None
return L
def draw(self):
# normalize slice data
g = self.makeBackground() or Group()
xradius = self.width/2.0
yradius = self.height/2.0
self._radius = radius = min(xradius, yradius)
cx = self.x + xradius
cy = self.y + yradius
data = self.normalizeData()
self._seriesCount = len(data)
n = len(data[0])
#labels
if self.labels is None:
labels = [''] * n
else:
labels = self.labels
#there's no point in raising errors for less than enough errors if
#we silently create all for the extreme case of no labels.
i = n-len(labels)
if i>0:
labels = labels + ['']*i
S = []
STRANDS = []
STRANDAREAS = []
syms = []
labs = []
csa = []
angle = self.startAngle*pi/180
direction = self.direction == "clockwise" and -1 or 1
angleBetween = direction*(2 * pi)/float(n)
spokes = self.spokes
spokeLabels = self.spokeLabels
for i in range(n):
car = cos(angle)*radius
sar = sin(angle)*radius
csa.append((car,sar,angle))
si = self.spokes[i]
if si.visible:
spoke = Line(cx, cy, cx + car, cy + sar, strokeWidth = si.strokeWidth, strokeColor=si.strokeColor, strokeDashArray=si.strokeDashArray)
S.append(spoke)
sli = spokeLabels[i]
text = sli._text
if not text: text = labels[i]
if text:
S.append(_setupLabel(WedgeLabel, text, si.labelRadius, cx, cy, angle, car, sar, sli))
angle += angleBetween
# now plot the polygons
rowIdx = 0
strands = self.strands
strandLabels = self.strandLabels
for row in data:
# series plot
rsty = strands[rowIdx]
points = []
car, sar = csa[-1][:2]
r = row[-1]
points.append(cx+car*r)
points.append(cy+sar*r)
for i in range(n):
car, sar, angle = csa[i]
r = row[i]
points.append(cx+car*r)
points.append(cy+sar*r)
L = self._innerDrawLabel(strandLabels[(rowIdx,i)], r, cx, cy, angle, car, sar, labelClass=StrandLabel)
if L: labs.append(L)
sty = strands[(rowIdx,i)]
uSymbol = sty.symbol
# put in a marker, if it needs one
if uSymbol:
s_x = cx+car*r
s_y = cy+sar*r
s_fillColor = sty.fillColor
s_strokeColor = sty.strokeColor
s_strokeWidth = sty.strokeWidth
s_angle = 0
s_size = sty.symbolSize
if type(uSymbol) is type(''):
symbol = makeMarker(uSymbol,
size = s_size,
x = s_x,
y = s_y,
fillColor = s_fillColor,
strokeColor = s_strokeColor,
strokeWidth = s_strokeWidth,
angle = s_angle,
)
else:
symbol = uSymbol2Symbol(uSymbol,s_x,s_y,s_fillColor)
for k,v in (('size', s_size), ('fillColor', s_fillColor),
('x', s_x), ('y', s_y),
('strokeColor',s_strokeColor), ('strokeWidth',s_strokeWidth),
('angle',s_angle),):
if getattr(symbol,k,None) is None:
try:
setattr(symbol,k,v)
except:
pass
syms.append(symbol)
# make up the 'strand'
if rsty.fillColor:
strand = Polygon(points)
strand.fillColor = rsty.fillColor
strand.strokeColor = None
strand.strokeWidth = 0
STRANDAREAS.append(strand)
if rsty.strokeColor and rsty.strokeWidth:
strand = PolyLine(points)
strand.strokeColor = rsty.strokeColor
strand.strokeWidth = rsty.strokeWidth
strand.strokeDashArray = rsty.strokeDashArray
STRANDS.append(strand)
rowIdx += 1
for s in (STRANDAREAS+STRANDS+syms+S+labs): g.add(s)
return g
def sample1():
"Make a simple spider chart"
d = Drawing(400, 400)
sp = SpiderChart()
sp.x = 50
sp.y = 50
sp.width = 300
sp.height = 300
sp.data = [[10,12,14,16,14,12], [6,8,10,12,9,15],[7,8,17,4,12,8]]
sp.labels = ['a','b','c','d','e','f']
sp.strands[0].strokeColor = colors.cornsilk
sp.strands[1].strokeColor = colors.cyan
sp.strands[2].strokeColor = colors.palegreen
sp.strands[0].fillColor = colors.cornsilk
sp.strands[1].fillColor = colors.cyan
sp.strands[2].fillColor = colors.palegreen
sp.spokes.strokeDashArray = (2,2)
d.add(sp)
return d
def sample2():
"Make a spider chart with markers, but no fill"
d = Drawing(400, 400)
sp = SpiderChart()
sp.x = 50
sp.y = 50
sp.width = 300
sp.height = 300
sp.data = [[10,12,14,16,14,12], [6,8,10,12,9,15],[7,8,17,4,12,8]]
sp.labels = ['U','V','W','X','Y','Z']
sp.strands.strokeWidth = 1
sp.strands[0].fillColor = colors.pink
sp.strands[1].fillColor = colors.lightblue
sp.strands[2].fillColor = colors.palegreen
sp.strands[0].strokeColor = colors.red
sp.strands[1].strokeColor = colors.blue
sp.strands[2].strokeColor = colors.green
sp.strands.symbol = "FilledDiamond"
sp.strands[1].symbol = makeMarker("Circle")
sp.strands[1].symbol.strokeWidth = 0.5
sp.strands[1].symbol.fillColor = colors.yellow
sp.strands.symbolSize = 6
sp.strandLabels[0,3]._text = 'special'
sp.strandLabels[0,1]._text = 'one'
sp.strandLabels[0,0]._text = 'zero'
sp.strandLabels[1,0]._text = 'Earth'
sp.strandLabels[2,2]._text = 'Mars'
sp.strandLabels.format = 'values'
sp.strandLabels.dR = -5
d.add(sp)
return d
if __name__=='__main__':
d = sample1()
from reportlab.graphics.renderPDF import drawToFile
drawToFile(d, 'spider.pdf')
d = sample2()
drawToFile(d, 'spider2.pdf')
| gpl-2.0 |
kartikshah1/Test | venv/lib/python2.7/site-packages/setuptools/command/install_scripts.py | 111 | 2031 | import distutils.command.install_scripts as orig
from pkg_resources import Distribution, PathMetadata, ensure_directory
import os
from distutils import log
class install_scripts(orig.install_scripts):
"""Do normal script install, plus any egg_info wrapper scripts"""
def initialize_options(self):
orig.install_scripts.initialize_options(self)
self.no_ep = False
def run(self):
from setuptools.command.easy_install import get_script_args
from setuptools.command.easy_install import sys_executable
self.run_command("egg_info")
if self.distribution.scripts:
orig.install_scripts.run(self) # run first to set up self.outfiles
else:
self.outfiles = []
if self.no_ep:
# don't install entry point scripts into .egg file!
return
ei_cmd = self.get_finalized_command("egg_info")
dist = Distribution(
ei_cmd.egg_base, PathMetadata(ei_cmd.egg_base, ei_cmd.egg_info),
ei_cmd.egg_name, ei_cmd.egg_version,
)
bs_cmd = self.get_finalized_command('build_scripts')
executable = getattr(bs_cmd,'executable',sys_executable)
is_wininst = getattr(
self.get_finalized_command("bdist_wininst"), '_is_running', False
)
for args in get_script_args(dist, executable, is_wininst):
self.write_script(*args)
def write_script(self, script_name, contents, mode="t", *ignored):
"""Write an executable file to the scripts directory"""
from setuptools.command.easy_install import chmod, current_umask
log.info("Installing %s script to %s", script_name, self.install_dir)
target = os.path.join(self.install_dir, script_name)
self.outfiles.append(target)
mask = current_umask()
if not self.dry_run:
ensure_directory(target)
f = open(target,"w"+mode)
f.write(contents)
f.close()
chmod(target, 0o777-mask)
| mit |
angr/angr | angr/flirt/build_sig.py | 1 | 10360 | # pylint:disable=consider-using-with
from typing import List, Dict
import json
import subprocess
import argparse
import tempfile
import os
import itertools
from collections import defaultdict
import angr
UNIQUE_STRING_COUNT = 20
# strings longer than MAX_UNIQUE_STRING_LEN will be truncated
MAX_UNIQUE_STRING_LEN = 70
def get_basic_info(ar_path: str) -> Dict[str,str]:
"""
Get basic information of the archive file.
"""
with tempfile.TemporaryDirectory() as tempdirname:
cwd = os.getcwd()
os.chdir(tempdirname)
subprocess.call(["ar", "x", ar_path])
# Load arch and OS information from the first .o file
o_files = [ f for f in os.listdir(".") if f.endswith(".o") ]
if o_files:
proj = angr.Project(o_files[0], auto_load_libs=False)
arch_name = proj.arch.name.lower()
os_name = proj.simos.name.lower()
os.chdir(cwd)
return {
'arch': arch_name,
'platform': os_name,
}
def get_unique_strings(ar_path: str) -> List[str]:
"""
For Linux libraries, this method requires ar (from binutils), nm (from binutils), and strings.
"""
# get symbols
nm_output = subprocess.check_output(["nm", ar_path])
nm_lines = nm_output.decode("utf-8").split("\n")
symbols = set()
for nm_line in nm_lines:
symbol_types = "UuVvTtRrDdWwBbNn"
for symbol_type in symbol_types:
if f" {symbol_type} " in nm_line:
# parse it
symbol = nm_line[nm_line.find(f" {symbol_type}") + 3: ].strip(" ")
if "." in symbol:
symbols |= set(symbol.split("."))
else:
symbols.add(symbol)
break
# extract the archive file into a temporary directory
all_strings = set()
with tempfile.TemporaryDirectory() as tempdirname:
cwd = os.getcwd()
os.chdir(tempdirname)
subprocess.call(["ar", "x", ar_path])
for filename in os.listdir("."):
if filename.endswith(".o"):
strings = subprocess.check_output(["strings", "-n", "8", filename])
strings = strings.decode("utf-8").split("\n")
non_symbol_strings = set()
for s in strings:
if s in symbols:
continue
if "." in s and any(subs in symbols for subs in s.split(".")):
continue
# C++ specific
if "::" in s:
continue
if "_" in s:
# make sure it's not a substring of any symbol
is_substring = False
for symbol in symbols:
if s in symbol:
is_substring = True
break
if is_substring:
continue
non_symbol_strings.add(s)
all_strings |= non_symbol_strings
os.chdir(cwd)
grouped_strings = defaultdict(set)
for s in all_strings:
grouped_strings[s[:5]].add(s)
sorted_strings = list(sorted(all_strings, key=len, reverse=True))
ctr = 0
picked = set()
unique_strings = [ ]
for s in sorted_strings:
if s[:5] in picked:
continue
unique_strings.append(s[:MAX_UNIQUE_STRING_LEN])
picked.add(s[:5])
ctr += 1
if ctr >= UNIQUE_STRING_COUNT:
break
return unique_strings
def run_pelf(pelf_path: str, ar_path: str, output_path: str):
subprocess.check_call([pelf_path, ar_path, output_path])
def run_sigmake(sigmake_path: str, sig_name: str, pat_path: str, sig_path: str):
if " " not in sig_name:
sig_name_arg = f"-n{sig_name}"
else:
sig_name_arg = f"-n\"{sig_name}\""
proc = subprocess.Popen([sigmake_path, sig_name_arg, pat_path, sig_path],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
_, stderr = proc.communicate()
if b"COLLISIONS:" in stderr:
return False
return True
def process_exc_file(exc_path: str):
"""
We are doing the stupidest thing possible: For each batch of conflicts, we pick the most likely
result baed on a set of predefined rules.
TODO: Add caller-callee-based de-duplication.
"""
with open(exc_path, "r") as f:
data = f.read()
lines = data.split("\n")
# parse groups
ctr = itertools.count()
idx = 0
groups = defaultdict(dict)
for line in lines:
if line.startswith(";"):
continue
if not line:
idx = next(ctr)
else:
# parse the function name
func_name = line[:line.index("\t")].strip(" ")
groups[idx][func_name] = line
# for each group, decide the one to keep
for idx in list(groups.keys()):
g = groups[idx]
if len(g) == 1:
# don't pick anything. This is a weird case that I don't understand
continue
if all(func_name.endswith(".cold") for func_name in g):
# .cold functions. doesn't matter what we pick
continue
non_cold_names = [ ]
for func_name in g:
if func_name.endswith(".cold"):
continue
non_cold_names.append(func_name)
# sort it
non_cold_names = list(sorted(non_cold_names, key=len))
# pick the top one
the_chosen_one = non_cold_names[0]
line = g[the_chosen_one]
g[the_chosen_one] = "+" + line
# output
with open(exc_path, "w") as f:
for g in groups.values():
for line in g.values():
f.write(line + "\n")
f.write("\n")
def main():
parser = argparse.ArgumentParser()
parser.add_argument("ar_path", help="Path of the .a file to build signatures for")
parser.add_argument("sig_name", help="Name of the signature (a string inside the signature file)")
parser.add_argument("sig_path", help="File name of the generated signature")
parser.add_argument("--compiler", help="Name of the compiler (e.g., gcc, clang). It will be stored in the meta "
"data file.")
parser.add_argument("--compiler_version", help="Version of the compiler (e.g., 6). It will be stored in the meta "
"data file.")
# parser.add_argument("--platform", help="Name of the platform (e.g., windows/linux/macos). It will be stored in
# the meta data file.")
parser.add_argument("--os", help="Name of the operating system (e.g., ubuntu/debian). It will be stored in the "
"meta data file.")
parser.add_argument("--os_version", help="Version of the operating system (e.g., 20.04). It will be stored in the "
"meta data file.")
parser.add_argument("--pelf_path", help="Path of pelf")
parser.add_argument("--sigmake_path", help="Path of sigmake")
args = parser.parse_args()
if args.pelf_path:
pelf_path = args.pelf_path
elif "pelf_path" in os.environ:
pelf_path = os.environ['pelf_path']
else:
raise ValueError("pelf_path must be specified.")
if args.sigmake_path:
sigmake_path = args.pelf_path
elif "sigmake_path" in os.environ:
sigmake_path = os.environ['sigmake_path']
else:
raise ValueError("sigmake_path must be specified.")
compiler = args.compiler
if compiler:
compiler = compiler.lower()
compiler_version = args.compiler_version
if compiler_version:
compiler_version = compiler_version.lower()
os_name = args.os
if os_name:
os_name = os_name.lower()
os_version = args.os_version
if os_version:
os_version = os_version.lower()
# Get basic information
# Get basic information
basic_info = get_basic_info(args.ar_path)
# Get unique strings from the library
unique_strings = get_unique_strings(args.ar_path)
# Build necessary file paths
sig_path_basename = os.path.basename(args.sig_path)
if "." in sig_path_basename:
sig_dir = os.path.dirname(args.sig_path)
filename = sig_path_basename[:sig_path_basename.rfind(".")]
exc_path = os.path.join(
sig_dir,
filename + ".exc"
)
meta_path = os.path.join(
sig_dir,
filename + ".meta"
)
else:
exc_path = args.sig_path + ".exc"
meta_path = args.sig_path + ".meta"
if os.path.isfile(exc_path):
# Remove existing exc files (if there is one)
os.remove(exc_path)
# Make a temporary directory
with tempfile.TemporaryDirectory() as tmpdirname:
ar_path = args.ar_path
basename = os.path.basename(ar_path)
# sanitize basename since otherwise sigmake is not happy with it
if basename.endswith(".a"):
basename = basename[:-2]
basename = basename.replace("+", "plus")
# sanitize signame as well
sig_name = args.sig_name
sig_name = sig_name.replace("+", "plus")
pat_path = os.path.join(tmpdirname, basename + ".pat")
run_pelf(pelf_path, ar_path, pat_path)
has_collision = not run_sigmake(sigmake_path, sig_name, pat_path, args.sig_path)
if has_collision:
process_exc_file(exc_path)
# run sigmake again
has_collision = not run_sigmake(sigmake_path, args.sig_name, pat_path, args.sig_path)
assert not has_collision
with open(meta_path, "w") as f:
metadata = {
'unique_strings': unique_strings,
}
metadata.update(basic_info)
if compiler_version:
metadata['compiler_version'] = compiler_version
if compiler:
metadata['compiler'] = compiler
if os_name:
metadata['os'] = os_name
if os_version:
metadata['os_version'] = os_version
f.write(json.dumps(metadata, indent=2))
if __name__ == "__main__":
main()
| bsd-2-clause |
CydarLtd/ansible | test/runner/lib/cover.py | 26 | 7465 | """Code coverage utilities."""
from __future__ import absolute_import, print_function
import os
import re
from lib.target import (
walk_module_targets,
walk_compile_targets,
)
from lib.util import (
display,
ApplicationError,
EnvironmentConfig,
run_command,
common_environment,
)
from lib.executor import (
Delegate,
install_command_requirements,
)
COVERAGE_DIR = 'test/results/coverage'
COVERAGE_FILE = os.path.join(COVERAGE_DIR, 'coverage')
COVERAGE_GROUPS = ('command', 'target', 'environment', 'version')
def command_coverage_combine(args):
"""Patch paths in coverage files and merge into a single file.
:type args: CoverageConfig
:rtype: list[str]
"""
coverage = initialize_coverage(args)
modules = dict((t.module, t.path) for t in list(walk_module_targets()))
coverage_files = [os.path.join(COVERAGE_DIR, f) for f in os.listdir(COVERAGE_DIR) if '=coverage.' in f]
ansible_path = os.path.abspath('lib/ansible/') + '/'
root_path = os.getcwd() + '/'
counter = 0
groups = {}
if args.all or args.stub:
sources = sorted(os.path.abspath(target.path) for target in walk_compile_targets())
else:
sources = []
if args.stub:
groups['=stub'] = dict((source, set()) for source in sources)
for coverage_file in coverage_files:
counter += 1
display.info('[%4d/%4d] %s' % (counter, len(coverage_files), coverage_file), verbosity=2)
original = coverage.CoverageData()
group = get_coverage_group(args, coverage_file)
if group is None:
display.warning('Unexpected name for coverage file: %s' % coverage_file)
continue
if os.path.getsize(coverage_file) == 0:
display.warning('Empty coverage file: %s' % coverage_file)
continue
try:
original.read_file(coverage_file)
except Exception as ex: # pylint: disable=locally-disabled, broad-except
display.error(str(ex))
continue
for filename in original.measured_files():
arcs = set(original.arcs(filename) or [])
if not arcs:
# This is most likely due to using an unsupported version of coverage.
display.warning('No arcs found for "%s" in coverage file: %s' % (filename, coverage_file))
continue
if '/ansible_modlib.zip/ansible/' in filename:
new_name = re.sub('^.*/ansible_modlib.zip/ansible/', ansible_path, filename)
display.info('%s -> %s' % (filename, new_name), verbosity=3)
filename = new_name
elif '/ansible_module_' in filename:
module = re.sub('^.*/ansible_module_(?P<module>.*).py$', '\\g<module>', filename)
if module not in modules:
display.warning('Skipping coverage of unknown module: %s' % module)
continue
new_name = os.path.abspath(modules[module])
display.info('%s -> %s' % (filename, new_name), verbosity=3)
filename = new_name
elif re.search('^(/.*?)?/root/ansible/', filename):
new_name = re.sub('^(/.*?)?/root/ansible/', root_path, filename)
display.info('%s -> %s' % (filename, new_name), verbosity=3)
filename = new_name
if group not in groups:
groups[group] = {}
arc_data = groups[group]
if filename not in arc_data:
arc_data[filename] = set()
arc_data[filename].update(arcs)
output_files = []
for group in sorted(groups):
arc_data = groups[group]
updated = coverage.CoverageData()
for filename in arc_data:
if not os.path.isfile(filename):
display.warning('Invalid coverage path: %s' % filename)
continue
updated.add_arcs({filename: list(arc_data[filename])})
if args.all:
updated.add_arcs(dict((source, []) for source in sources))
if not args.explain:
output_file = COVERAGE_FILE + group
updated.write_file(output_file)
output_files.append(output_file)
return sorted(output_files)
def command_coverage_report(args):
"""
:type args: CoverageConfig
"""
output_files = command_coverage_combine(args)
for output_file in output_files:
if args.group_by or args.stub:
display.info('>>> Coverage Group: %s' % ' '.join(os.path.basename(output_file).split('=')[1:]))
env = common_environment()
env.update(dict(COVERAGE_FILE=output_file))
run_command(args, env=env, cmd=['coverage', 'report'])
def command_coverage_html(args):
"""
:type args: CoverageConfig
"""
output_files = command_coverage_combine(args)
for output_file in output_files:
dir_name = 'test/results/reports/%s' % os.path.basename(output_file)
env = common_environment()
env.update(dict(COVERAGE_FILE=output_file))
run_command(args, env=env, cmd=['coverage', 'html', '-d', dir_name])
def command_coverage_xml(args):
"""
:type args: CoverageConfig
"""
output_files = command_coverage_combine(args)
for output_file in output_files:
xml_name = 'test/results/reports/%s.xml' % os.path.basename(output_file)
env = common_environment()
env.update(dict(COVERAGE_FILE=output_file))
run_command(args, env=env, cmd=['coverage', 'xml', '-o', xml_name])
def command_coverage_erase(args):
"""
:type args: CoverageConfig
"""
initialize_coverage(args)
for name in os.listdir(COVERAGE_DIR):
if not name.startswith('coverage') and '=coverage.' not in name:
continue
path = os.path.join(COVERAGE_DIR, name)
if not args.explain:
os.remove(path)
def initialize_coverage(args):
"""
:type args: CoverageConfig
:rtype: coverage
"""
if args.delegate:
raise Delegate()
if args.requirements:
install_command_requirements(args)
try:
import coverage
except ImportError:
coverage = None
if not coverage:
raise ApplicationError('You must install the "coverage" python module to use this command.')
return coverage
def get_coverage_group(args, coverage_file):
"""
:type args: CoverageConfig
:type coverage_file: str
:rtype: str
"""
parts = os.path.basename(coverage_file).split('=', 4)
if len(parts) != 5 or not parts[4].startswith('coverage.'):
return None
names = dict(
command=parts[0],
target=parts[1],
environment=parts[2],
version=parts[3],
)
group = ''
for part in COVERAGE_GROUPS:
if part in args.group_by:
group += '=%s' % names[part]
return group
class CoverageConfig(EnvironmentConfig):
"""Configuration for the coverage command."""
def __init__(self, args):
"""
:type args: any
"""
super(CoverageConfig, self).__init__(args, 'coverage')
self.group_by = frozenset(args.group_by) if 'group_by' in args and args.group_by else set() # type: frozenset[str]
self.all = args.all if 'all' in args else False # type: bool
self.stub = args.stub if 'stub' in args else False # type: bool
| gpl-3.0 |
EdwardBeckett/fail2ban | fail2ban/client/jailsreader.py | 18 | 3062 | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-
# vi: set ft=python sts=4 ts=4 sw=4 noet :
# This file is part of Fail2Ban.
#
# Fail2Ban is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Fail2Ban is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Fail2Ban; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
# Author: Cyril Jaquier
#
__author__ = "Cyril Jaquier"
__copyright__ = "Copyright (c) 2004 Cyril Jaquier"
__license__ = "GPL"
from .configreader import ConfigReader
from .jailreader import JailReader
from ..helpers import getLogger
# Gets the instance of the logger.
logSys = getLogger(__name__)
class JailsReader(ConfigReader):
def __init__(self, force_enable=False, **kwargs):
"""
Parameters
----------
force_enable : bool, optional
Passed to JailReader to force enable the jails.
It is for internal use
"""
ConfigReader.__init__(self, **kwargs)
self.__jails = list()
self.__force_enable = force_enable
@property
def jails(self):
return self.__jails
def read(self):
self.__jails = list()
return ConfigReader.read(self, "jail")
def getOptions(self, section=None):
"""Reads configuration for jail(s) and adds enabled jails to __jails
"""
opts = []
self.__opts = ConfigReader.getOptions(self, "Definition", opts)
if section is None:
sections = self.sections()
else:
sections = [ section ]
# Get the options of all jails.
parse_status = True
for sec in sections:
if sec == 'INCLUDES':
continue
# use the cfg_share for filter/action caching and the same config for all
# jails (use_config=...), therefore don't read it here:
jail = JailReader(sec, force_enable=self.__force_enable,
share_config=self.share_config, use_config=self._cfg)
ret = jail.getOptions()
if ret:
if jail.isEnabled():
# We only add enabled jails
self.__jails.append(jail)
else:
logSys.error("Errors in jail %r. Skipping..." % sec)
parse_status = False
return parse_status
def convert(self, allow_no_files=False):
"""Convert read before __opts and jails to the commands stream
Parameters
----------
allow_missing : bool
Either to allow log files to be missing entirely. Primarily is
used for testing
"""
stream = list()
for opt in self.__opts:
if opt == "":
stream.append([])
# Convert jails
for jail in self.__jails:
stream.extend(jail.convert(allow_no_files=allow_no_files))
# Start jails
for jail in self.__jails:
stream.append(["start", jail.getName()])
return stream
| gpl-2.0 |
sidsarasvati/googletest | test/gtest_throw_on_failure_test.py | 2917 | 5766 | #!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests Google Test's throw-on-failure mode with exceptions disabled.
This script invokes gtest_throw_on_failure_test_ (a program written with
Google Test) with different environments and command line flags.
"""
__author__ = '[email protected] (Zhanyong Wan)'
import os
import gtest_test_utils
# Constants.
# The command line flag for enabling/disabling the throw-on-failure mode.
THROW_ON_FAILURE = 'gtest_throw_on_failure'
# Path to the gtest_throw_on_failure_test_ program, compiled with
# exceptions disabled.
EXE_PATH = gtest_test_utils.GetTestExecutablePath(
'gtest_throw_on_failure_test_')
# Utilities.
def SetEnvVar(env_var, value):
"""Sets an environment variable to a given value; unsets it when the
given value is None.
"""
env_var = env_var.upper()
if value is not None:
os.environ[env_var] = value
elif env_var in os.environ:
del os.environ[env_var]
def Run(command):
"""Runs a command; returns True/False if its exit code is/isn't 0."""
print 'Running "%s". . .' % ' '.join(command)
p = gtest_test_utils.Subprocess(command)
return p.exited and p.exit_code == 0
# The tests. TODO([email protected]): refactor the class to share common
# logic with code in gtest_break_on_failure_unittest.py.
class ThrowOnFailureTest(gtest_test_utils.TestCase):
"""Tests the throw-on-failure mode."""
def RunAndVerify(self, env_var_value, flag_value, should_fail):
"""Runs gtest_throw_on_failure_test_ and verifies that it does
(or does not) exit with a non-zero code.
Args:
env_var_value: value of the GTEST_BREAK_ON_FAILURE environment
variable; None if the variable should be unset.
flag_value: value of the --gtest_break_on_failure flag;
None if the flag should not be present.
should_fail: True iff the program is expected to fail.
"""
SetEnvVar(THROW_ON_FAILURE, env_var_value)
if env_var_value is None:
env_var_value_msg = ' is not set'
else:
env_var_value_msg = '=' + env_var_value
if flag_value is None:
flag = ''
elif flag_value == '0':
flag = '--%s=0' % THROW_ON_FAILURE
else:
flag = '--%s' % THROW_ON_FAILURE
command = [EXE_PATH]
if flag:
command.append(flag)
if should_fail:
should_or_not = 'should'
else:
should_or_not = 'should not'
failed = not Run(command)
SetEnvVar(THROW_ON_FAILURE, None)
msg = ('when %s%s, an assertion failure in "%s" %s cause a non-zero '
'exit code.' %
(THROW_ON_FAILURE, env_var_value_msg, ' '.join(command),
should_or_not))
self.assert_(failed == should_fail, msg)
def testDefaultBehavior(self):
"""Tests the behavior of the default mode."""
self.RunAndVerify(env_var_value=None, flag_value=None, should_fail=False)
def testThrowOnFailureEnvVar(self):
"""Tests using the GTEST_THROW_ON_FAILURE environment variable."""
self.RunAndVerify(env_var_value='0',
flag_value=None,
should_fail=False)
self.RunAndVerify(env_var_value='1',
flag_value=None,
should_fail=True)
def testThrowOnFailureFlag(self):
"""Tests using the --gtest_throw_on_failure flag."""
self.RunAndVerify(env_var_value=None,
flag_value='0',
should_fail=False)
self.RunAndVerify(env_var_value=None,
flag_value='1',
should_fail=True)
def testThrowOnFailureFlagOverridesEnvVar(self):
"""Tests that --gtest_throw_on_failure overrides GTEST_THROW_ON_FAILURE."""
self.RunAndVerify(env_var_value='0',
flag_value='0',
should_fail=False)
self.RunAndVerify(env_var_value='0',
flag_value='1',
should_fail=True)
self.RunAndVerify(env_var_value='1',
flag_value='0',
should_fail=False)
self.RunAndVerify(env_var_value='1',
flag_value='1',
should_fail=True)
if __name__ == '__main__':
gtest_test_utils.Main()
| bsd-3-clause |
thinkopensolutions/geraldo | site/newsite/django_1_0/tests/regressiontests/mail/tests.py | 10 | 1404 | # coding: utf-8
r"""
# Tests for the django.core.mail.
>>> from django.core.mail import EmailMessage
>>> from django.utils.translation import ugettext_lazy
# Test normal ascii character case:
>>> email = EmailMessage('Subject', 'Content', '[email protected]', ['[email protected]'])
>>> message = email.message()
>>> message['Subject']
'Subject'
>>> message.get_payload()
'Content'
>>> message['From']
'[email protected]'
>>> message['To']
'[email protected]'
# Test multiple-recipient case
>>> email = EmailMessage('Subject', 'Content', '[email protected]', ['[email protected]','[email protected]'])
>>> message = email.message()
>>> message['Subject']
'Subject'
>>> message.get_payload()
'Content'
>>> message['From']
'[email protected]'
>>> message['To']
'[email protected], [email protected]'
# Test for header injection
>>> email = EmailMessage('Subject\nInjection Test', 'Content', '[email protected]', ['[email protected]'])
>>> message = email.message()
Traceback (most recent call last):
...
BadHeaderError: Header values can't contain newlines (got u'Subject\nInjection Test' for header 'Subject')
>>> email = EmailMessage(ugettext_lazy('Subject\nInjection Test'), 'Content', '[email protected]', ['[email protected]'])
>>> message = email.message()
Traceback (most recent call last):
...
BadHeaderError: Header values can't contain newlines (got u'Subject\nInjection Test' for header 'Subject')
"""
| lgpl-3.0 |
tudorbarascu/QGIS | python/pyplugin_installer/__init__.py | 45 | 1403 | # -*- coding: utf-8 -*-
"""
***************************************************************************
__init__.py
---------------------
Date : May 2013
Copyright : (C) 2013 by Borys Jurgiel
Email : info at borysjurgiel dot pl
This module is based on former plugin_installer plugin:
Copyright (C) 2007-2008 Matthew Perry
Copyright (C) 2008-2013 Borys Jurgiel
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
__author__ = 'Borys Jurgiel'
__date__ = 'May 2013'
__copyright__ = '(C) 2013, Borys Jurgiel'
# import functions for easier access
from . import installer
from .installer import initPluginInstaller # NOQA
def instance():
if not installer.pluginInstaller:
installer.initPluginInstaller()
return installer.pluginInstaller
| gpl-2.0 |
tudorbarascu/QGIS | python/plugins/processing/algs/qgis/Relief.py | 15 | 6936 | # -*- coding: utf-8 -*-
"""
***************************************************************************
Relief.py
---------------------
Date : December 2016
Copyright : (C) 2016 by Alexander Bruy
Email : alexander dot bruy at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Alexander Bruy'
__date__ = 'December 2016'
__copyright__ = '(C) 2016, Alexander Bruy'
import os
from qgis.PyQt.QtGui import QIcon, QColor
from qgis.analysis import QgsRelief
from qgis.core import (QgsProcessingParameterDefinition,
QgsProcessingParameterRasterLayer,
QgsProcessingParameterNumber,
QgsProcessingParameterBoolean,
QgsProcessingParameterRasterDestination,
QgsProcessingParameterFileDestination,
QgsRasterFileWriter,
QgsProcessingException)
from processing.algs.qgis.QgisAlgorithm import QgisAlgorithm
pluginPath = os.path.split(os.path.split(os.path.dirname(__file__))[0])[0]
class ParameterReliefColors(QgsProcessingParameterDefinition):
def __init__(self, name='', description='', parent=None, optional=True):
super().__init__(name, description, None, optional)
self.parent = parent
self.setMetadata({'widget_wrapper': 'processing.algs.qgis.ui.ReliefColorsWidget.ReliefColorsWidgetWrapper'})
def type(self):
return 'relief_colors'
def clone(self):
return ParameterReliefColors(self.name(), self.description(), self.parent,
self.flags() & QgsProcessingParameterDefinition.FlagOptional)
@staticmethod
def valueToColors(value):
if value is None:
return None
if value == '':
return None
if isinstance(value, str):
return value.split(';')
else:
return ParameterReliefColors.colorsToString(value)
@staticmethod
def colorsToString(colors):
s = ''
for c in colors:
s += '{:f}, {:f}, {:d}, {:d}, {:d};'.format(c[0],
c[1],
c[2],
c[3],
c[4])
return s[:-1]
class Relief(QgisAlgorithm):
INPUT = 'INPUT'
Z_FACTOR = 'Z_FACTOR'
AUTO_COLORS = 'AUTO_COLORS'
COLORS = 'COLORS'
OUTPUT = 'OUTPUT'
FREQUENCY_DISTRIBUTION = 'FREQUENCY_DISTRIBUTION'
def icon(self):
return QIcon(os.path.join(pluginPath, 'images', 'dem.png'))
def group(self):
return self.tr('Raster terrain analysis')
def groupId(self):
return 'rasterterrainanalysis'
def __init__(self):
super().__init__()
def initAlgorithm(self, config=None):
self.addParameter(QgsProcessingParameterRasterLayer(self.INPUT,
self.tr('Elevation layer')))
self.addParameter(QgsProcessingParameterNumber(self.Z_FACTOR,
self.tr('Z factor'), type=QgsProcessingParameterNumber.Double,
minValue=0.00, defaultValue=1.0))
self.addParameter(QgsProcessingParameterBoolean(self.AUTO_COLORS,
self.tr('Generate relief classes automatically'),
defaultValue=False))
self.addParameter(ParameterReliefColors(self.COLORS,
self.tr('Relief colors'),
self.INPUT,
True))
self.addParameter(QgsProcessingParameterRasterDestination(self.OUTPUT,
self.tr('Relief')))
self.addParameter(QgsProcessingParameterFileDestination(self.FREQUENCY_DISTRIBUTION,
self.tr('Frequency distribution'),
'CSV files (*.csv)',
optional=True,
createByDefault=False))
def name(self):
return 'relief'
def displayName(self):
return self.tr('Relief')
def processAlgorithm(self, parameters, context, feedback):
inputFile = self.parameterAsRasterLayer(parameters, self.INPUT, context).source()
zFactor = self.parameterAsDouble(parameters, self.Z_FACTOR, context)
automaticColors = self.parameterAsBoolean(parameters, self.AUTO_COLORS, context)
outputFile = self.parameterAsOutputLayer(parameters, self.OUTPUT, context)
frequencyDistribution = self.parameterAsFileOutput(parameters, self.FREQUENCY_DISTRIBUTION, context)
outputFormat = QgsRasterFileWriter.driverForExtension(os.path.splitext(outputFile)[1])
relief = QgsRelief(inputFile, outputFile, outputFormat)
if automaticColors:
reliefColors = relief.calculateOptimizedReliefClasses()
else:
colors = ParameterReliefColors.valueToColors(parameters[self.COLORS])
if colors is None or len(colors) == 0:
raise QgsProcessingException(
self.tr('Specify relief colors or activate "Generate relief classes automatically" option.'))
reliefColors = []
for c in colors:
v = c.split(',')
color = QgsRelief.ReliefColor(QColor(int(v[2]), int(v[3]), int(v[4])),
float(v[0]),
float(v[1]))
reliefColors.append(color)
relief.setReliefColors(reliefColors)
relief.setZFactor(zFactor)
if frequencyDistribution:
relief.exportFrequencyDistributionToCsv(frequencyDistribution)
relief.processRaster(feedback)
return {self.OUTPUT: outputFile, self.FREQUENCY_DISTRIBUTION: frequencyDistribution}
| gpl-2.0 |
drewp/tahoe-lafs | src/allmydata/util/abbreviate.py | 1 | 2005 |
import re
HOUR = 3600
DAY = 24*3600
WEEK = 7*DAY
MONTH = 30*DAY
YEAR = 365*DAY
def abbreviate_time(s):
def _plural(count, unit):
count = int(count)
if count == 1:
return "%d %s" % (count, unit)
return "%d %ss" % (count, unit)
if s is None:
return "unknown"
if s < 120:
return _plural(s, "second")
if s < 3*HOUR:
return _plural(s/60, "minute")
if s < 2*DAY:
return _plural(s/HOUR, "hour")
if s < 2*MONTH:
return _plural(s/DAY, "day")
if s < 4*YEAR:
return _plural(s/MONTH, "month")
return _plural(s/YEAR, "year")
def abbreviate_space(s, SI=True):
if s is None:
return "unknown"
if SI:
U = 1000.0
isuffix = "B"
else:
U = 1024.0
isuffix = "iB"
def r(count, suffix):
return "%.2f %s%s" % (count, suffix, isuffix)
if s < 1024: # 1000-1023 get emitted as bytes, even in SI mode
return "%d B" % s
if s < U*U:
return r(s/U, "k")
if s < U*U*U:
return r(s/(U*U), "M")
if s < U*U*U*U:
return r(s/(U*U*U), "G")
if s < U*U*U*U*U:
return r(s/(U*U*U*U), "T")
return r(s/(U*U*U*U*U), "P")
def abbreviate_space_both(s):
return "(%s, %s)" % (abbreviate_space(s, True),
abbreviate_space(s, False))
def parse_abbreviated_size(s):
if s is None or s == "":
return None
m = re.match(r"^(\d+)([kKmMgG]?[iB]?[bB]?)$", s)
if not m:
raise ValueError("unparseable value %s" % s)
number, suffix = m.groups()
suffix = suffix.upper()
if suffix.endswith("B"):
suffix = suffix[:-1]
multiplier = {"": 1,
"I": 1,
"K": 1000,
"M": 1000 * 1000,
"G": 1000 * 1000 * 1000,
"KI": 1024,
"MI": 1024*1024,
"GI": 1024*1024*1024,
}[suffix]
return int(number) * multiplier
| gpl-2.0 |
afandria/sky_engine | third_party/ply/yacc.py | 465 | 128492 | # -----------------------------------------------------------------------------
# ply: yacc.py
#
# Copyright (C) 2001-2011,
# David M. Beazley (Dabeaz LLC)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the David Beazley or Dabeaz LLC may be used to
# endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# -----------------------------------------------------------------------------
#
# This implements an LR parser that is constructed from grammar rules defined
# as Python functions. The grammer is specified by supplying the BNF inside
# Python documentation strings. The inspiration for this technique was borrowed
# from John Aycock's Spark parsing system. PLY might be viewed as cross between
# Spark and the GNU bison utility.
#
# The current implementation is only somewhat object-oriented. The
# LR parser itself is defined in terms of an object (which allows multiple
# parsers to co-exist). However, most of the variables used during table
# construction are defined in terms of global variables. Users shouldn't
# notice unless they are trying to define multiple parsers at the same
# time using threads (in which case they should have their head examined).
#
# This implementation supports both SLR and LALR(1) parsing. LALR(1)
# support was originally implemented by Elias Ioup ([email protected]),
# using the algorithm found in Aho, Sethi, and Ullman "Compilers: Principles,
# Techniques, and Tools" (The Dragon Book). LALR(1) has since been replaced
# by the more efficient DeRemer and Pennello algorithm.
#
# :::::::: WARNING :::::::
#
# Construction of LR parsing tables is fairly complicated and expensive.
# To make this module run fast, a *LOT* of work has been put into
# optimization---often at the expensive of readability and what might
# consider to be good Python "coding style." Modify the code at your
# own risk!
# ----------------------------------------------------------------------------
__version__ = "3.4"
__tabversion__ = "3.2" # Table version
#-----------------------------------------------------------------------------
# === User configurable parameters ===
#
# Change these to modify the default behavior of yacc (if you wish)
#-----------------------------------------------------------------------------
yaccdebug = 1 # Debugging mode. If set, yacc generates a
# a 'parser.out' file in the current directory
debug_file = 'parser.out' # Default name of the debugging file
tab_module = 'parsetab' # Default name of the table module
default_lr = 'LALR' # Default LR table generation method
error_count = 3 # Number of symbols that must be shifted to leave recovery mode
yaccdevel = 0 # Set to True if developing yacc. This turns off optimized
# implementations of certain functions.
resultlimit = 40 # Size limit of results when running in debug mode.
pickle_protocol = 0 # Protocol to use when writing pickle files
import re, types, sys, os.path
# Compatibility function for python 2.6/3.0
if sys.version_info[0] < 3:
def func_code(f):
return f.func_code
else:
def func_code(f):
return f.__code__
# Compatibility
try:
MAXINT = sys.maxint
except AttributeError:
MAXINT = sys.maxsize
# Python 2.x/3.0 compatibility.
def load_ply_lex():
if sys.version_info[0] < 3:
import lex
else:
import ply.lex as lex
return lex
# This object is a stand-in for a logging object created by the
# logging module. PLY will use this by default to create things
# such as the parser.out file. If a user wants more detailed
# information, they can create their own logging object and pass
# it into PLY.
class PlyLogger(object):
def __init__(self,f):
self.f = f
def debug(self,msg,*args,**kwargs):
self.f.write((msg % args) + "\n")
info = debug
def warning(self,msg,*args,**kwargs):
self.f.write("WARNING: "+ (msg % args) + "\n")
def error(self,msg,*args,**kwargs):
self.f.write("ERROR: " + (msg % args) + "\n")
critical = debug
# Null logger is used when no output is generated. Does nothing.
class NullLogger(object):
def __getattribute__(self,name):
return self
def __call__(self,*args,**kwargs):
return self
# Exception raised for yacc-related errors
class YaccError(Exception): pass
# Format the result message that the parser produces when running in debug mode.
def format_result(r):
repr_str = repr(r)
if '\n' in repr_str: repr_str = repr(repr_str)
if len(repr_str) > resultlimit:
repr_str = repr_str[:resultlimit]+" ..."
result = "<%s @ 0x%x> (%s)" % (type(r).__name__,id(r),repr_str)
return result
# Format stack entries when the parser is running in debug mode
def format_stack_entry(r):
repr_str = repr(r)
if '\n' in repr_str: repr_str = repr(repr_str)
if len(repr_str) < 16:
return repr_str
else:
return "<%s @ 0x%x>" % (type(r).__name__,id(r))
#-----------------------------------------------------------------------------
# === LR Parsing Engine ===
#
# The following classes are used for the LR parser itself. These are not
# used during table construction and are independent of the actual LR
# table generation algorithm
#-----------------------------------------------------------------------------
# This class is used to hold non-terminal grammar symbols during parsing.
# It normally has the following attributes set:
# .type = Grammar symbol type
# .value = Symbol value
# .lineno = Starting line number
# .endlineno = Ending line number (optional, set automatically)
# .lexpos = Starting lex position
# .endlexpos = Ending lex position (optional, set automatically)
class YaccSymbol:
def __str__(self): return self.type
def __repr__(self): return str(self)
# This class is a wrapper around the objects actually passed to each
# grammar rule. Index lookup and assignment actually assign the
# .value attribute of the underlying YaccSymbol object.
# The lineno() method returns the line number of a given
# item (or 0 if not defined). The linespan() method returns
# a tuple of (startline,endline) representing the range of lines
# for a symbol. The lexspan() method returns a tuple (lexpos,endlexpos)
# representing the range of positional information for a symbol.
class YaccProduction:
def __init__(self,s,stack=None):
self.slice = s
self.stack = stack
self.lexer = None
self.parser= None
def __getitem__(self,n):
if n >= 0: return self.slice[n].value
else: return self.stack[n].value
def __setitem__(self,n,v):
self.slice[n].value = v
def __getslice__(self,i,j):
return [s.value for s in self.slice[i:j]]
def __len__(self):
return len(self.slice)
def lineno(self,n):
return getattr(self.slice[n],"lineno",0)
def set_lineno(self,n,lineno):
self.slice[n].lineno = lineno
def linespan(self,n):
startline = getattr(self.slice[n],"lineno",0)
endline = getattr(self.slice[n],"endlineno",startline)
return startline,endline
def lexpos(self,n):
return getattr(self.slice[n],"lexpos",0)
def lexspan(self,n):
startpos = getattr(self.slice[n],"lexpos",0)
endpos = getattr(self.slice[n],"endlexpos",startpos)
return startpos,endpos
def error(self):
raise SyntaxError
# -----------------------------------------------------------------------------
# == LRParser ==
#
# The LR Parsing engine.
# -----------------------------------------------------------------------------
class LRParser:
def __init__(self,lrtab,errorf):
self.productions = lrtab.lr_productions
self.action = lrtab.lr_action
self.goto = lrtab.lr_goto
self.errorfunc = errorf
def errok(self):
self.errorok = 1
def restart(self):
del self.statestack[:]
del self.symstack[:]
sym = YaccSymbol()
sym.type = '$end'
self.symstack.append(sym)
self.statestack.append(0)
def parse(self,input=None,lexer=None,debug=0,tracking=0,tokenfunc=None):
if debug or yaccdevel:
if isinstance(debug,int):
debug = PlyLogger(sys.stderr)
return self.parsedebug(input,lexer,debug,tracking,tokenfunc)
elif tracking:
return self.parseopt(input,lexer,debug,tracking,tokenfunc)
else:
return self.parseopt_notrack(input,lexer,debug,tracking,tokenfunc)
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# parsedebug().
#
# This is the debugging enabled version of parse(). All changes made to the
# parsing engine should be made here. For the non-debugging version,
# copy this code to a method parseopt() and delete all of the sections
# enclosed in:
#
# #--! DEBUG
# statements
# #--! DEBUG
#
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
def parsedebug(self,input=None,lexer=None,debug=None,tracking=0,tokenfunc=None):
lookahead = None # Current lookahead symbol
lookaheadstack = [ ] # Stack of lookahead symbols
actions = self.action # Local reference to action table (to avoid lookup on self.)
goto = self.goto # Local reference to goto table (to avoid lookup on self.)
prod = self.productions # Local reference to production list (to avoid lookup on self.)
pslice = YaccProduction(None) # Production object passed to grammar rules
errorcount = 0 # Used during error recovery
# --! DEBUG
debug.info("PLY: PARSE DEBUG START")
# --! DEBUG
# If no lexer was given, we will try to use the lex module
if not lexer:
lex = load_ply_lex()
lexer = lex.lexer
# Set up the lexer and parser objects on pslice
pslice.lexer = lexer
pslice.parser = self
# If input was supplied, pass to lexer
if input is not None:
lexer.input(input)
if tokenfunc is None:
# Tokenize function
get_token = lexer.token
else:
get_token = tokenfunc
# Set up the state and symbol stacks
statestack = [ ] # Stack of parsing states
self.statestack = statestack
symstack = [ ] # Stack of grammar symbols
self.symstack = symstack
pslice.stack = symstack # Put in the production
errtoken = None # Err token
# The start state is assumed to be (0,$end)
statestack.append(0)
sym = YaccSymbol()
sym.type = "$end"
symstack.append(sym)
state = 0
while 1:
# Get the next symbol on the input. If a lookahead symbol
# is already set, we just use that. Otherwise, we'll pull
# the next token off of the lookaheadstack or from the lexer
# --! DEBUG
debug.debug('')
debug.debug('State : %s', state)
# --! DEBUG
if not lookahead:
if not lookaheadstack:
lookahead = get_token() # Get the next token
else:
lookahead = lookaheadstack.pop()
if not lookahead:
lookahead = YaccSymbol()
lookahead.type = "$end"
# --! DEBUG
debug.debug('Stack : %s',
("%s . %s" % (" ".join([xx.type for xx in symstack][1:]), str(lookahead))).lstrip())
# --! DEBUG
# Check the action table
ltype = lookahead.type
t = actions[state].get(ltype)
if t is not None:
if t > 0:
# shift a symbol on the stack
statestack.append(t)
state = t
# --! DEBUG
debug.debug("Action : Shift and goto state %s", t)
# --! DEBUG
symstack.append(lookahead)
lookahead = None
# Decrease error count on successful shift
if errorcount: errorcount -=1
continue
if t < 0:
# reduce a symbol on the stack, emit a production
p = prod[-t]
pname = p.name
plen = p.len
# Get production function
sym = YaccSymbol()
sym.type = pname # Production name
sym.value = None
# --! DEBUG
if plen:
debug.info("Action : Reduce rule [%s] with %s and goto state %d", p.str, "["+",".join([format_stack_entry(_v.value) for _v in symstack[-plen:]])+"]",-t)
else:
debug.info("Action : Reduce rule [%s] with %s and goto state %d", p.str, [],-t)
# --! DEBUG
if plen:
targ = symstack[-plen-1:]
targ[0] = sym
# --! TRACKING
if tracking:
t1 = targ[1]
sym.lineno = t1.lineno
sym.lexpos = t1.lexpos
t1 = targ[-1]
sym.endlineno = getattr(t1,"endlineno",t1.lineno)
sym.endlexpos = getattr(t1,"endlexpos",t1.lexpos)
# --! TRACKING
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# below as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
del symstack[-plen:]
del statestack[-plen:]
p.callable(pslice)
# --! DEBUG
debug.info("Result : %s", format_result(pslice[0]))
# --! DEBUG
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead)
symstack.pop()
statestack.pop()
state = statestack[-1]
sym.type = 'error'
lookahead = sym
errorcount = error_count
self.errorok = 0
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
else:
# --! TRACKING
if tracking:
sym.lineno = lexer.lineno
sym.lexpos = lexer.lexpos
# --! TRACKING
targ = [ sym ]
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# above as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
p.callable(pslice)
# --! DEBUG
debug.info("Result : %s", format_result(pslice[0]))
# --! DEBUG
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead)
symstack.pop()
statestack.pop()
state = statestack[-1]
sym.type = 'error'
lookahead = sym
errorcount = error_count
self.errorok = 0
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
if t == 0:
n = symstack[-1]
result = getattr(n,"value",None)
# --! DEBUG
debug.info("Done : Returning %s", format_result(result))
debug.info("PLY: PARSE DEBUG END")
# --! DEBUG
return result
if t == None:
# --! DEBUG
debug.error('Error : %s',
("%s . %s" % (" ".join([xx.type for xx in symstack][1:]), str(lookahead))).lstrip())
# --! DEBUG
# We have some kind of parsing error here. To handle
# this, we are going to push the current token onto
# the tokenstack and replace it with an 'error' token.
# If there are any synchronization rules, they may
# catch it.
#
# In addition to pushing the error token, we call call
# the user defined p_error() function if this is the
# first syntax error. This function is only called if
# errorcount == 0.
if errorcount == 0 or self.errorok:
errorcount = error_count
self.errorok = 0
errtoken = lookahead
if errtoken.type == "$end":
errtoken = None # End of file!
if self.errorfunc:
global errok,token,restart
errok = self.errok # Set some special functions available in error recovery
token = get_token
restart = self.restart
if errtoken and not hasattr(errtoken,'lexer'):
errtoken.lexer = lexer
tok = self.errorfunc(errtoken)
del errok, token, restart # Delete special functions
if self.errorok:
# User must have done some kind of panic
# mode recovery on their own. The
# returned token is the next lookahead
lookahead = tok
errtoken = None
continue
else:
if errtoken:
if hasattr(errtoken,"lineno"): lineno = lookahead.lineno
else: lineno = 0
if lineno:
sys.stderr.write("yacc: Syntax error at line %d, token=%s\n" % (lineno, errtoken.type))
else:
sys.stderr.write("yacc: Syntax error, token=%s" % errtoken.type)
else:
sys.stderr.write("yacc: Parse error in input. EOF\n")
return
else:
errorcount = error_count
# case 1: the statestack only has 1 entry on it. If we're in this state, the
# entire parse has been rolled back and we're completely hosed. The token is
# discarded and we just keep going.
if len(statestack) <= 1 and lookahead.type != "$end":
lookahead = None
errtoken = None
state = 0
# Nuke the pushback stack
del lookaheadstack[:]
continue
# case 2: the statestack has a couple of entries on it, but we're
# at the end of the file. nuke the top entry and generate an error token
# Start nuking entries on the stack
if lookahead.type == "$end":
# Whoa. We're really hosed here. Bail out
return
if lookahead.type != 'error':
sym = symstack[-1]
if sym.type == 'error':
# Hmmm. Error is on top of stack, we'll just nuke input
# symbol and continue
lookahead = None
continue
t = YaccSymbol()
t.type = 'error'
if hasattr(lookahead,"lineno"):
t.lineno = lookahead.lineno
t.value = lookahead
lookaheadstack.append(lookahead)
lookahead = t
else:
symstack.pop()
statestack.pop()
state = statestack[-1] # Potential bug fix
continue
# Call an error function here
raise RuntimeError("yacc: internal parser error!!!\n")
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# parseopt().
#
# Optimized version of parse() method. DO NOT EDIT THIS CODE DIRECTLY.
# Edit the debug version above, then copy any modifications to the method
# below while removing #--! DEBUG sections.
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
def parseopt(self,input=None,lexer=None,debug=0,tracking=0,tokenfunc=None):
lookahead = None # Current lookahead symbol
lookaheadstack = [ ] # Stack of lookahead symbols
actions = self.action # Local reference to action table (to avoid lookup on self.)
goto = self.goto # Local reference to goto table (to avoid lookup on self.)
prod = self.productions # Local reference to production list (to avoid lookup on self.)
pslice = YaccProduction(None) # Production object passed to grammar rules
errorcount = 0 # Used during error recovery
# If no lexer was given, we will try to use the lex module
if not lexer:
lex = load_ply_lex()
lexer = lex.lexer
# Set up the lexer and parser objects on pslice
pslice.lexer = lexer
pslice.parser = self
# If input was supplied, pass to lexer
if input is not None:
lexer.input(input)
if tokenfunc is None:
# Tokenize function
get_token = lexer.token
else:
get_token = tokenfunc
# Set up the state and symbol stacks
statestack = [ ] # Stack of parsing states
self.statestack = statestack
symstack = [ ] # Stack of grammar symbols
self.symstack = symstack
pslice.stack = symstack # Put in the production
errtoken = None # Err token
# The start state is assumed to be (0,$end)
statestack.append(0)
sym = YaccSymbol()
sym.type = '$end'
symstack.append(sym)
state = 0
while 1:
# Get the next symbol on the input. If a lookahead symbol
# is already set, we just use that. Otherwise, we'll pull
# the next token off of the lookaheadstack or from the lexer
if not lookahead:
if not lookaheadstack:
lookahead = get_token() # Get the next token
else:
lookahead = lookaheadstack.pop()
if not lookahead:
lookahead = YaccSymbol()
lookahead.type = '$end'
# Check the action table
ltype = lookahead.type
t = actions[state].get(ltype)
if t is not None:
if t > 0:
# shift a symbol on the stack
statestack.append(t)
state = t
symstack.append(lookahead)
lookahead = None
# Decrease error count on successful shift
if errorcount: errorcount -=1
continue
if t < 0:
# reduce a symbol on the stack, emit a production
p = prod[-t]
pname = p.name
plen = p.len
# Get production function
sym = YaccSymbol()
sym.type = pname # Production name
sym.value = None
if plen:
targ = symstack[-plen-1:]
targ[0] = sym
# --! TRACKING
if tracking:
t1 = targ[1]
sym.lineno = t1.lineno
sym.lexpos = t1.lexpos
t1 = targ[-1]
sym.endlineno = getattr(t1,"endlineno",t1.lineno)
sym.endlexpos = getattr(t1,"endlexpos",t1.lexpos)
# --! TRACKING
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# below as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
del symstack[-plen:]
del statestack[-plen:]
p.callable(pslice)
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead)
symstack.pop()
statestack.pop()
state = statestack[-1]
sym.type = 'error'
lookahead = sym
errorcount = error_count
self.errorok = 0
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
else:
# --! TRACKING
if tracking:
sym.lineno = lexer.lineno
sym.lexpos = lexer.lexpos
# --! TRACKING
targ = [ sym ]
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# above as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
p.callable(pslice)
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead)
symstack.pop()
statestack.pop()
state = statestack[-1]
sym.type = 'error'
lookahead = sym
errorcount = error_count
self.errorok = 0
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
if t == 0:
n = symstack[-1]
return getattr(n,"value",None)
if t == None:
# We have some kind of parsing error here. To handle
# this, we are going to push the current token onto
# the tokenstack and replace it with an 'error' token.
# If there are any synchronization rules, they may
# catch it.
#
# In addition to pushing the error token, we call call
# the user defined p_error() function if this is the
# first syntax error. This function is only called if
# errorcount == 0.
if errorcount == 0 or self.errorok:
errorcount = error_count
self.errorok = 0
errtoken = lookahead
if errtoken.type == '$end':
errtoken = None # End of file!
if self.errorfunc:
global errok,token,restart
errok = self.errok # Set some special functions available in error recovery
token = get_token
restart = self.restart
if errtoken and not hasattr(errtoken,'lexer'):
errtoken.lexer = lexer
tok = self.errorfunc(errtoken)
del errok, token, restart # Delete special functions
if self.errorok:
# User must have done some kind of panic
# mode recovery on their own. The
# returned token is the next lookahead
lookahead = tok
errtoken = None
continue
else:
if errtoken:
if hasattr(errtoken,"lineno"): lineno = lookahead.lineno
else: lineno = 0
if lineno:
sys.stderr.write("yacc: Syntax error at line %d, token=%s\n" % (lineno, errtoken.type))
else:
sys.stderr.write("yacc: Syntax error, token=%s" % errtoken.type)
else:
sys.stderr.write("yacc: Parse error in input. EOF\n")
return
else:
errorcount = error_count
# case 1: the statestack only has 1 entry on it. If we're in this state, the
# entire parse has been rolled back and we're completely hosed. The token is
# discarded and we just keep going.
if len(statestack) <= 1 and lookahead.type != '$end':
lookahead = None
errtoken = None
state = 0
# Nuke the pushback stack
del lookaheadstack[:]
continue
# case 2: the statestack has a couple of entries on it, but we're
# at the end of the file. nuke the top entry and generate an error token
# Start nuking entries on the stack
if lookahead.type == '$end':
# Whoa. We're really hosed here. Bail out
return
if lookahead.type != 'error':
sym = symstack[-1]
if sym.type == 'error':
# Hmmm. Error is on top of stack, we'll just nuke input
# symbol and continue
lookahead = None
continue
t = YaccSymbol()
t.type = 'error'
if hasattr(lookahead,"lineno"):
t.lineno = lookahead.lineno
t.value = lookahead
lookaheadstack.append(lookahead)
lookahead = t
else:
symstack.pop()
statestack.pop()
state = statestack[-1] # Potential bug fix
continue
# Call an error function here
raise RuntimeError("yacc: internal parser error!!!\n")
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# parseopt_notrack().
#
# Optimized version of parseopt() with line number tracking removed.
# DO NOT EDIT THIS CODE DIRECTLY. Copy the optimized version and remove
# code in the #--! TRACKING sections
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
def parseopt_notrack(self,input=None,lexer=None,debug=0,tracking=0,tokenfunc=None):
lookahead = None # Current lookahead symbol
lookaheadstack = [ ] # Stack of lookahead symbols
actions = self.action # Local reference to action table (to avoid lookup on self.)
goto = self.goto # Local reference to goto table (to avoid lookup on self.)
prod = self.productions # Local reference to production list (to avoid lookup on self.)
pslice = YaccProduction(None) # Production object passed to grammar rules
errorcount = 0 # Used during error recovery
# If no lexer was given, we will try to use the lex module
if not lexer:
lex = load_ply_lex()
lexer = lex.lexer
# Set up the lexer and parser objects on pslice
pslice.lexer = lexer
pslice.parser = self
# If input was supplied, pass to lexer
if input is not None:
lexer.input(input)
if tokenfunc is None:
# Tokenize function
get_token = lexer.token
else:
get_token = tokenfunc
# Set up the state and symbol stacks
statestack = [ ] # Stack of parsing states
self.statestack = statestack
symstack = [ ] # Stack of grammar symbols
self.symstack = symstack
pslice.stack = symstack # Put in the production
errtoken = None # Err token
# The start state is assumed to be (0,$end)
statestack.append(0)
sym = YaccSymbol()
sym.type = '$end'
symstack.append(sym)
state = 0
while 1:
# Get the next symbol on the input. If a lookahead symbol
# is already set, we just use that. Otherwise, we'll pull
# the next token off of the lookaheadstack or from the lexer
if not lookahead:
if not lookaheadstack:
lookahead = get_token() # Get the next token
else:
lookahead = lookaheadstack.pop()
if not lookahead:
lookahead = YaccSymbol()
lookahead.type = '$end'
# Check the action table
ltype = lookahead.type
t = actions[state].get(ltype)
if t is not None:
if t > 0:
# shift a symbol on the stack
statestack.append(t)
state = t
symstack.append(lookahead)
lookahead = None
# Decrease error count on successful shift
if errorcount: errorcount -=1
continue
if t < 0:
# reduce a symbol on the stack, emit a production
p = prod[-t]
pname = p.name
plen = p.len
# Get production function
sym = YaccSymbol()
sym.type = pname # Production name
sym.value = None
if plen:
targ = symstack[-plen-1:]
targ[0] = sym
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# below as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
del symstack[-plen:]
del statestack[-plen:]
p.callable(pslice)
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead)
symstack.pop()
statestack.pop()
state = statestack[-1]
sym.type = 'error'
lookahead = sym
errorcount = error_count
self.errorok = 0
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
else:
targ = [ sym ]
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# above as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
p.callable(pslice)
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead)
symstack.pop()
statestack.pop()
state = statestack[-1]
sym.type = 'error'
lookahead = sym
errorcount = error_count
self.errorok = 0
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
if t == 0:
n = symstack[-1]
return getattr(n,"value",None)
if t == None:
# We have some kind of parsing error here. To handle
# this, we are going to push the current token onto
# the tokenstack and replace it with an 'error' token.
# If there are any synchronization rules, they may
# catch it.
#
# In addition to pushing the error token, we call call
# the user defined p_error() function if this is the
# first syntax error. This function is only called if
# errorcount == 0.
if errorcount == 0 or self.errorok:
errorcount = error_count
self.errorok = 0
errtoken = lookahead
if errtoken.type == '$end':
errtoken = None # End of file!
if self.errorfunc:
global errok,token,restart
errok = self.errok # Set some special functions available in error recovery
token = get_token
restart = self.restart
if errtoken and not hasattr(errtoken,'lexer'):
errtoken.lexer = lexer
tok = self.errorfunc(errtoken)
del errok, token, restart # Delete special functions
if self.errorok:
# User must have done some kind of panic
# mode recovery on their own. The
# returned token is the next lookahead
lookahead = tok
errtoken = None
continue
else:
if errtoken:
if hasattr(errtoken,"lineno"): lineno = lookahead.lineno
else: lineno = 0
if lineno:
sys.stderr.write("yacc: Syntax error at line %d, token=%s\n" % (lineno, errtoken.type))
else:
sys.stderr.write("yacc: Syntax error, token=%s" % errtoken.type)
else:
sys.stderr.write("yacc: Parse error in input. EOF\n")
return
else:
errorcount = error_count
# case 1: the statestack only has 1 entry on it. If we're in this state, the
# entire parse has been rolled back and we're completely hosed. The token is
# discarded and we just keep going.
if len(statestack) <= 1 and lookahead.type != '$end':
lookahead = None
errtoken = None
state = 0
# Nuke the pushback stack
del lookaheadstack[:]
continue
# case 2: the statestack has a couple of entries on it, but we're
# at the end of the file. nuke the top entry and generate an error token
# Start nuking entries on the stack
if lookahead.type == '$end':
# Whoa. We're really hosed here. Bail out
return
if lookahead.type != 'error':
sym = symstack[-1]
if sym.type == 'error':
# Hmmm. Error is on top of stack, we'll just nuke input
# symbol and continue
lookahead = None
continue
t = YaccSymbol()
t.type = 'error'
if hasattr(lookahead,"lineno"):
t.lineno = lookahead.lineno
t.value = lookahead
lookaheadstack.append(lookahead)
lookahead = t
else:
symstack.pop()
statestack.pop()
state = statestack[-1] # Potential bug fix
continue
# Call an error function here
raise RuntimeError("yacc: internal parser error!!!\n")
# -----------------------------------------------------------------------------
# === Grammar Representation ===
#
# The following functions, classes, and variables are used to represent and
# manipulate the rules that make up a grammar.
# -----------------------------------------------------------------------------
import re
# regex matching identifiers
_is_identifier = re.compile(r'^[a-zA-Z0-9_-]+$')
# -----------------------------------------------------------------------------
# class Production:
#
# This class stores the raw information about a single production or grammar rule.
# A grammar rule refers to a specification such as this:
#
# expr : expr PLUS term
#
# Here are the basic attributes defined on all productions
#
# name - Name of the production. For example 'expr'
# prod - A list of symbols on the right side ['expr','PLUS','term']
# prec - Production precedence level
# number - Production number.
# func - Function that executes on reduce
# file - File where production function is defined
# lineno - Line number where production function is defined
#
# The following attributes are defined or optional.
#
# len - Length of the production (number of symbols on right hand side)
# usyms - Set of unique symbols found in the production
# -----------------------------------------------------------------------------
class Production(object):
reduced = 0
def __init__(self,number,name,prod,precedence=('right',0),func=None,file='',line=0):
self.name = name
self.prod = tuple(prod)
self.number = number
self.func = func
self.callable = None
self.file = file
self.line = line
self.prec = precedence
# Internal settings used during table construction
self.len = len(self.prod) # Length of the production
# Create a list of unique production symbols used in the production
self.usyms = [ ]
for s in self.prod:
if s not in self.usyms:
self.usyms.append(s)
# List of all LR items for the production
self.lr_items = []
self.lr_next = None
# Create a string representation
if self.prod:
self.str = "%s -> %s" % (self.name," ".join(self.prod))
else:
self.str = "%s -> <empty>" % self.name
def __str__(self):
return self.str
def __repr__(self):
return "Production("+str(self)+")"
def __len__(self):
return len(self.prod)
def __nonzero__(self):
return 1
def __getitem__(self,index):
return self.prod[index]
# Return the nth lr_item from the production (or None if at the end)
def lr_item(self,n):
if n > len(self.prod): return None
p = LRItem(self,n)
# Precompute the list of productions immediately following. Hack. Remove later
try:
p.lr_after = Prodnames[p.prod[n+1]]
except (IndexError,KeyError):
p.lr_after = []
try:
p.lr_before = p.prod[n-1]
except IndexError:
p.lr_before = None
return p
# Bind the production function name to a callable
def bind(self,pdict):
if self.func:
self.callable = pdict[self.func]
# This class serves as a minimal standin for Production objects when
# reading table data from files. It only contains information
# actually used by the LR parsing engine, plus some additional
# debugging information.
class MiniProduction(object):
def __init__(self,str,name,len,func,file,line):
self.name = name
self.len = len
self.func = func
self.callable = None
self.file = file
self.line = line
self.str = str
def __str__(self):
return self.str
def __repr__(self):
return "MiniProduction(%s)" % self.str
# Bind the production function name to a callable
def bind(self,pdict):
if self.func:
self.callable = pdict[self.func]
# -----------------------------------------------------------------------------
# class LRItem
#
# This class represents a specific stage of parsing a production rule. For
# example:
#
# expr : expr . PLUS term
#
# In the above, the "." represents the current location of the parse. Here
# basic attributes:
#
# name - Name of the production. For example 'expr'
# prod - A list of symbols on the right side ['expr','.', 'PLUS','term']
# number - Production number.
#
# lr_next Next LR item. Example, if we are ' expr -> expr . PLUS term'
# then lr_next refers to 'expr -> expr PLUS . term'
# lr_index - LR item index (location of the ".") in the prod list.
# lookaheads - LALR lookahead symbols for this item
# len - Length of the production (number of symbols on right hand side)
# lr_after - List of all productions that immediately follow
# lr_before - Grammar symbol immediately before
# -----------------------------------------------------------------------------
class LRItem(object):
def __init__(self,p,n):
self.name = p.name
self.prod = list(p.prod)
self.number = p.number
self.lr_index = n
self.lookaheads = { }
self.prod.insert(n,".")
self.prod = tuple(self.prod)
self.len = len(self.prod)
self.usyms = p.usyms
def __str__(self):
if self.prod:
s = "%s -> %s" % (self.name," ".join(self.prod))
else:
s = "%s -> <empty>" % self.name
return s
def __repr__(self):
return "LRItem("+str(self)+")"
# -----------------------------------------------------------------------------
# rightmost_terminal()
#
# Return the rightmost terminal from a list of symbols. Used in add_production()
# -----------------------------------------------------------------------------
def rightmost_terminal(symbols, terminals):
i = len(symbols) - 1
while i >= 0:
if symbols[i] in terminals:
return symbols[i]
i -= 1
return None
# -----------------------------------------------------------------------------
# === GRAMMAR CLASS ===
#
# The following class represents the contents of the specified grammar along
# with various computed properties such as first sets, follow sets, LR items, etc.
# This data is used for critical parts of the table generation process later.
# -----------------------------------------------------------------------------
class GrammarError(YaccError): pass
class Grammar(object):
def __init__(self,terminals):
self.Productions = [None] # A list of all of the productions. The first
# entry is always reserved for the purpose of
# building an augmented grammar
self.Prodnames = { } # A dictionary mapping the names of nonterminals to a list of all
# productions of that nonterminal.
self.Prodmap = { } # A dictionary that is only used to detect duplicate
# productions.
self.Terminals = { } # A dictionary mapping the names of terminal symbols to a
# list of the rules where they are used.
for term in terminals:
self.Terminals[term] = []
self.Terminals['error'] = []
self.Nonterminals = { } # A dictionary mapping names of nonterminals to a list
# of rule numbers where they are used.
self.First = { } # A dictionary of precomputed FIRST(x) symbols
self.Follow = { } # A dictionary of precomputed FOLLOW(x) symbols
self.Precedence = { } # Precedence rules for each terminal. Contains tuples of the
# form ('right',level) or ('nonassoc', level) or ('left',level)
self.UsedPrecedence = { } # Precedence rules that were actually used by the grammer.
# This is only used to provide error checking and to generate
# a warning about unused precedence rules.
self.Start = None # Starting symbol for the grammar
def __len__(self):
return len(self.Productions)
def __getitem__(self,index):
return self.Productions[index]
# -----------------------------------------------------------------------------
# set_precedence()
#
# Sets the precedence for a given terminal. assoc is the associativity such as
# 'left','right', or 'nonassoc'. level is a numeric level.
#
# -----------------------------------------------------------------------------
def set_precedence(self,term,assoc,level):
assert self.Productions == [None],"Must call set_precedence() before add_production()"
if term in self.Precedence:
raise GrammarError("Precedence already specified for terminal '%s'" % term)
if assoc not in ['left','right','nonassoc']:
raise GrammarError("Associativity must be one of 'left','right', or 'nonassoc'")
self.Precedence[term] = (assoc,level)
# -----------------------------------------------------------------------------
# add_production()
#
# Given an action function, this function assembles a production rule and
# computes its precedence level.
#
# The production rule is supplied as a list of symbols. For example,
# a rule such as 'expr : expr PLUS term' has a production name of 'expr' and
# symbols ['expr','PLUS','term'].
#
# Precedence is determined by the precedence of the right-most non-terminal
# or the precedence of a terminal specified by %prec.
#
# A variety of error checks are performed to make sure production symbols
# are valid and that %prec is used correctly.
# -----------------------------------------------------------------------------
def add_production(self,prodname,syms,func=None,file='',line=0):
if prodname in self.Terminals:
raise GrammarError("%s:%d: Illegal rule name '%s'. Already defined as a token" % (file,line,prodname))
if prodname == 'error':
raise GrammarError("%s:%d: Illegal rule name '%s'. error is a reserved word" % (file,line,prodname))
if not _is_identifier.match(prodname):
raise GrammarError("%s:%d: Illegal rule name '%s'" % (file,line,prodname))
# Look for literal tokens
for n,s in enumerate(syms):
if s[0] in "'\"":
try:
c = eval(s)
if (len(c) > 1):
raise GrammarError("%s:%d: Literal token %s in rule '%s' may only be a single character" % (file,line,s, prodname))
if not c in self.Terminals:
self.Terminals[c] = []
syms[n] = c
continue
except SyntaxError:
pass
if not _is_identifier.match(s) and s != '%prec':
raise GrammarError("%s:%d: Illegal name '%s' in rule '%s'" % (file,line,s, prodname))
# Determine the precedence level
if '%prec' in syms:
if syms[-1] == '%prec':
raise GrammarError("%s:%d: Syntax error. Nothing follows %%prec" % (file,line))
if syms[-2] != '%prec':
raise GrammarError("%s:%d: Syntax error. %%prec can only appear at the end of a grammar rule" % (file,line))
precname = syms[-1]
prodprec = self.Precedence.get(precname,None)
if not prodprec:
raise GrammarError("%s:%d: Nothing known about the precedence of '%s'" % (file,line,precname))
else:
self.UsedPrecedence[precname] = 1
del syms[-2:] # Drop %prec from the rule
else:
# If no %prec, precedence is determined by the rightmost terminal symbol
precname = rightmost_terminal(syms,self.Terminals)
prodprec = self.Precedence.get(precname,('right',0))
# See if the rule is already in the rulemap
map = "%s -> %s" % (prodname,syms)
if map in self.Prodmap:
m = self.Prodmap[map]
raise GrammarError("%s:%d: Duplicate rule %s. " % (file,line, m) +
"Previous definition at %s:%d" % (m.file, m.line))
# From this point on, everything is valid. Create a new Production instance
pnumber = len(self.Productions)
if not prodname in self.Nonterminals:
self.Nonterminals[prodname] = [ ]
# Add the production number to Terminals and Nonterminals
for t in syms:
if t in self.Terminals:
self.Terminals[t].append(pnumber)
else:
if not t in self.Nonterminals:
self.Nonterminals[t] = [ ]
self.Nonterminals[t].append(pnumber)
# Create a production and add it to the list of productions
p = Production(pnumber,prodname,syms,prodprec,func,file,line)
self.Productions.append(p)
self.Prodmap[map] = p
# Add to the global productions list
try:
self.Prodnames[prodname].append(p)
except KeyError:
self.Prodnames[prodname] = [ p ]
return 0
# -----------------------------------------------------------------------------
# set_start()
#
# Sets the starting symbol and creates the augmented grammar. Production
# rule 0 is S' -> start where start is the start symbol.
# -----------------------------------------------------------------------------
def set_start(self,start=None):
if not start:
start = self.Productions[1].name
if start not in self.Nonterminals:
raise GrammarError("start symbol %s undefined" % start)
self.Productions[0] = Production(0,"S'",[start])
self.Nonterminals[start].append(0)
self.Start = start
# -----------------------------------------------------------------------------
# find_unreachable()
#
# Find all of the nonterminal symbols that can't be reached from the starting
# symbol. Returns a list of nonterminals that can't be reached.
# -----------------------------------------------------------------------------
def find_unreachable(self):
# Mark all symbols that are reachable from a symbol s
def mark_reachable_from(s):
if reachable[s]:
# We've already reached symbol s.
return
reachable[s] = 1
for p in self.Prodnames.get(s,[]):
for r in p.prod:
mark_reachable_from(r)
reachable = { }
for s in list(self.Terminals) + list(self.Nonterminals):
reachable[s] = 0
mark_reachable_from( self.Productions[0].prod[0] )
return [s for s in list(self.Nonterminals)
if not reachable[s]]
# -----------------------------------------------------------------------------
# infinite_cycles()
#
# This function looks at the various parsing rules and tries to detect
# infinite recursion cycles (grammar rules where there is no possible way
# to derive a string of only terminals).
# -----------------------------------------------------------------------------
def infinite_cycles(self):
terminates = {}
# Terminals:
for t in self.Terminals:
terminates[t] = 1
terminates['$end'] = 1
# Nonterminals:
# Initialize to false:
for n in self.Nonterminals:
terminates[n] = 0
# Then propagate termination until no change:
while 1:
some_change = 0
for (n,pl) in self.Prodnames.items():
# Nonterminal n terminates iff any of its productions terminates.
for p in pl:
# Production p terminates iff all of its rhs symbols terminate.
for s in p.prod:
if not terminates[s]:
# The symbol s does not terminate,
# so production p does not terminate.
p_terminates = 0
break
else:
# didn't break from the loop,
# so every symbol s terminates
# so production p terminates.
p_terminates = 1
if p_terminates:
# symbol n terminates!
if not terminates[n]:
terminates[n] = 1
some_change = 1
# Don't need to consider any more productions for this n.
break
if not some_change:
break
infinite = []
for (s,term) in terminates.items():
if not term:
if not s in self.Prodnames and not s in self.Terminals and s != 'error':
# s is used-but-not-defined, and we've already warned of that,
# so it would be overkill to say that it's also non-terminating.
pass
else:
infinite.append(s)
return infinite
# -----------------------------------------------------------------------------
# undefined_symbols()
#
# Find all symbols that were used the grammar, but not defined as tokens or
# grammar rules. Returns a list of tuples (sym, prod) where sym in the symbol
# and prod is the production where the symbol was used.
# -----------------------------------------------------------------------------
def undefined_symbols(self):
result = []
for p in self.Productions:
if not p: continue
for s in p.prod:
if not s in self.Prodnames and not s in self.Terminals and s != 'error':
result.append((s,p))
return result
# -----------------------------------------------------------------------------
# unused_terminals()
#
# Find all terminals that were defined, but not used by the grammar. Returns
# a list of all symbols.
# -----------------------------------------------------------------------------
def unused_terminals(self):
unused_tok = []
for s,v in self.Terminals.items():
if s != 'error' and not v:
unused_tok.append(s)
return unused_tok
# ------------------------------------------------------------------------------
# unused_rules()
#
# Find all grammar rules that were defined, but not used (maybe not reachable)
# Returns a list of productions.
# ------------------------------------------------------------------------------
def unused_rules(self):
unused_prod = []
for s,v in self.Nonterminals.items():
if not v:
p = self.Prodnames[s][0]
unused_prod.append(p)
return unused_prod
# -----------------------------------------------------------------------------
# unused_precedence()
#
# Returns a list of tuples (term,precedence) corresponding to precedence
# rules that were never used by the grammar. term is the name of the terminal
# on which precedence was applied and precedence is a string such as 'left' or
# 'right' corresponding to the type of precedence.
# -----------------------------------------------------------------------------
def unused_precedence(self):
unused = []
for termname in self.Precedence:
if not (termname in self.Terminals or termname in self.UsedPrecedence):
unused.append((termname,self.Precedence[termname][0]))
return unused
# -------------------------------------------------------------------------
# _first()
#
# Compute the value of FIRST1(beta) where beta is a tuple of symbols.
#
# During execution of compute_first1, the result may be incomplete.
# Afterward (e.g., when called from compute_follow()), it will be complete.
# -------------------------------------------------------------------------
def _first(self,beta):
# We are computing First(x1,x2,x3,...,xn)
result = [ ]
for x in beta:
x_produces_empty = 0
# Add all the non-<empty> symbols of First[x] to the result.
for f in self.First[x]:
if f == '<empty>':
x_produces_empty = 1
else:
if f not in result: result.append(f)
if x_produces_empty:
# We have to consider the next x in beta,
# i.e. stay in the loop.
pass
else:
# We don't have to consider any further symbols in beta.
break
else:
# There was no 'break' from the loop,
# so x_produces_empty was true for all x in beta,
# so beta produces empty as well.
result.append('<empty>')
return result
# -------------------------------------------------------------------------
# compute_first()
#
# Compute the value of FIRST1(X) for all symbols
# -------------------------------------------------------------------------
def compute_first(self):
if self.First:
return self.First
# Terminals:
for t in self.Terminals:
self.First[t] = [t]
self.First['$end'] = ['$end']
# Nonterminals:
# Initialize to the empty set:
for n in self.Nonterminals:
self.First[n] = []
# Then propagate symbols until no change:
while 1:
some_change = 0
for n in self.Nonterminals:
for p in self.Prodnames[n]:
for f in self._first(p.prod):
if f not in self.First[n]:
self.First[n].append( f )
some_change = 1
if not some_change:
break
return self.First
# ---------------------------------------------------------------------
# compute_follow()
#
# Computes all of the follow sets for every non-terminal symbol. The
# follow set is the set of all symbols that might follow a given
# non-terminal. See the Dragon book, 2nd Ed. p. 189.
# ---------------------------------------------------------------------
def compute_follow(self,start=None):
# If already computed, return the result
if self.Follow:
return self.Follow
# If first sets not computed yet, do that first.
if not self.First:
self.compute_first()
# Add '$end' to the follow list of the start symbol
for k in self.Nonterminals:
self.Follow[k] = [ ]
if not start:
start = self.Productions[1].name
self.Follow[start] = [ '$end' ]
while 1:
didadd = 0
for p in self.Productions[1:]:
# Here is the production set
for i in range(len(p.prod)):
B = p.prod[i]
if B in self.Nonterminals:
# Okay. We got a non-terminal in a production
fst = self._first(p.prod[i+1:])
hasempty = 0
for f in fst:
if f != '<empty>' and f not in self.Follow[B]:
self.Follow[B].append(f)
didadd = 1
if f == '<empty>':
hasempty = 1
if hasempty or i == (len(p.prod)-1):
# Add elements of follow(a) to follow(b)
for f in self.Follow[p.name]:
if f not in self.Follow[B]:
self.Follow[B].append(f)
didadd = 1
if not didadd: break
return self.Follow
# -----------------------------------------------------------------------------
# build_lritems()
#
# This function walks the list of productions and builds a complete set of the
# LR items. The LR items are stored in two ways: First, they are uniquely
# numbered and placed in the list _lritems. Second, a linked list of LR items
# is built for each production. For example:
#
# E -> E PLUS E
#
# Creates the list
#
# [E -> . E PLUS E, E -> E . PLUS E, E -> E PLUS . E, E -> E PLUS E . ]
# -----------------------------------------------------------------------------
def build_lritems(self):
for p in self.Productions:
lastlri = p
i = 0
lr_items = []
while 1:
if i > len(p):
lri = None
else:
lri = LRItem(p,i)
# Precompute the list of productions immediately following
try:
lri.lr_after = self.Prodnames[lri.prod[i+1]]
except (IndexError,KeyError):
lri.lr_after = []
try:
lri.lr_before = lri.prod[i-1]
except IndexError:
lri.lr_before = None
lastlri.lr_next = lri
if not lri: break
lr_items.append(lri)
lastlri = lri
i += 1
p.lr_items = lr_items
# -----------------------------------------------------------------------------
# == Class LRTable ==
#
# This basic class represents a basic table of LR parsing information.
# Methods for generating the tables are not defined here. They are defined
# in the derived class LRGeneratedTable.
# -----------------------------------------------------------------------------
class VersionError(YaccError): pass
class LRTable(object):
def __init__(self):
self.lr_action = None
self.lr_goto = None
self.lr_productions = None
self.lr_method = None
def read_table(self,module):
if isinstance(module,types.ModuleType):
parsetab = module
else:
if sys.version_info[0] < 3:
exec("import %s as parsetab" % module)
else:
env = { }
exec("import %s as parsetab" % module, env, env)
parsetab = env['parsetab']
if parsetab._tabversion != __tabversion__:
raise VersionError("yacc table file version is out of date")
self.lr_action = parsetab._lr_action
self.lr_goto = parsetab._lr_goto
self.lr_productions = []
for p in parsetab._lr_productions:
self.lr_productions.append(MiniProduction(*p))
self.lr_method = parsetab._lr_method
return parsetab._lr_signature
def read_pickle(self,filename):
try:
import cPickle as pickle
except ImportError:
import pickle
in_f = open(filename,"rb")
tabversion = pickle.load(in_f)
if tabversion != __tabversion__:
raise VersionError("yacc table file version is out of date")
self.lr_method = pickle.load(in_f)
signature = pickle.load(in_f)
self.lr_action = pickle.load(in_f)
self.lr_goto = pickle.load(in_f)
productions = pickle.load(in_f)
self.lr_productions = []
for p in productions:
self.lr_productions.append(MiniProduction(*p))
in_f.close()
return signature
# Bind all production function names to callable objects in pdict
def bind_callables(self,pdict):
for p in self.lr_productions:
p.bind(pdict)
# -----------------------------------------------------------------------------
# === LR Generator ===
#
# The following classes and functions are used to generate LR parsing tables on
# a grammar.
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# digraph()
# traverse()
#
# The following two functions are used to compute set valued functions
# of the form:
#
# F(x) = F'(x) U U{F(y) | x R y}
#
# This is used to compute the values of Read() sets as well as FOLLOW sets
# in LALR(1) generation.
#
# Inputs: X - An input set
# R - A relation
# FP - Set-valued function
# ------------------------------------------------------------------------------
def digraph(X,R,FP):
N = { }
for x in X:
N[x] = 0
stack = []
F = { }
for x in X:
if N[x] == 0: traverse(x,N,stack,F,X,R,FP)
return F
def traverse(x,N,stack,F,X,R,FP):
stack.append(x)
d = len(stack)
N[x] = d
F[x] = FP(x) # F(X) <- F'(x)
rel = R(x) # Get y's related to x
for y in rel:
if N[y] == 0:
traverse(y,N,stack,F,X,R,FP)
N[x] = min(N[x],N[y])
for a in F.get(y,[]):
if a not in F[x]: F[x].append(a)
if N[x] == d:
N[stack[-1]] = MAXINT
F[stack[-1]] = F[x]
element = stack.pop()
while element != x:
N[stack[-1]] = MAXINT
F[stack[-1]] = F[x]
element = stack.pop()
class LALRError(YaccError): pass
# -----------------------------------------------------------------------------
# == LRGeneratedTable ==
#
# This class implements the LR table generation algorithm. There are no
# public methods except for write()
# -----------------------------------------------------------------------------
class LRGeneratedTable(LRTable):
def __init__(self,grammar,method='LALR',log=None):
if method not in ['SLR','LALR']:
raise LALRError("Unsupported method %s" % method)
self.grammar = grammar
self.lr_method = method
# Set up the logger
if not log:
log = NullLogger()
self.log = log
# Internal attributes
self.lr_action = {} # Action table
self.lr_goto = {} # Goto table
self.lr_productions = grammar.Productions # Copy of grammar Production array
self.lr_goto_cache = {} # Cache of computed gotos
self.lr0_cidhash = {} # Cache of closures
self._add_count = 0 # Internal counter used to detect cycles
# Diagonistic information filled in by the table generator
self.sr_conflict = 0
self.rr_conflict = 0
self.conflicts = [] # List of conflicts
self.sr_conflicts = []
self.rr_conflicts = []
# Build the tables
self.grammar.build_lritems()
self.grammar.compute_first()
self.grammar.compute_follow()
self.lr_parse_table()
# Compute the LR(0) closure operation on I, where I is a set of LR(0) items.
def lr0_closure(self,I):
self._add_count += 1
# Add everything in I to J
J = I[:]
didadd = 1
while didadd:
didadd = 0
for j in J:
for x in j.lr_after:
if getattr(x,"lr0_added",0) == self._add_count: continue
# Add B --> .G to J
J.append(x.lr_next)
x.lr0_added = self._add_count
didadd = 1
return J
# Compute the LR(0) goto function goto(I,X) where I is a set
# of LR(0) items and X is a grammar symbol. This function is written
# in a way that guarantees uniqueness of the generated goto sets
# (i.e. the same goto set will never be returned as two different Python
# objects). With uniqueness, we can later do fast set comparisons using
# id(obj) instead of element-wise comparison.
def lr0_goto(self,I,x):
# First we look for a previously cached entry
g = self.lr_goto_cache.get((id(I),x),None)
if g: return g
# Now we generate the goto set in a way that guarantees uniqueness
# of the result
s = self.lr_goto_cache.get(x,None)
if not s:
s = { }
self.lr_goto_cache[x] = s
gs = [ ]
for p in I:
n = p.lr_next
if n and n.lr_before == x:
s1 = s.get(id(n),None)
if not s1:
s1 = { }
s[id(n)] = s1
gs.append(n)
s = s1
g = s.get('$end',None)
if not g:
if gs:
g = self.lr0_closure(gs)
s['$end'] = g
else:
s['$end'] = gs
self.lr_goto_cache[(id(I),x)] = g
return g
# Compute the LR(0) sets of item function
def lr0_items(self):
C = [ self.lr0_closure([self.grammar.Productions[0].lr_next]) ]
i = 0
for I in C:
self.lr0_cidhash[id(I)] = i
i += 1
# Loop over the items in C and each grammar symbols
i = 0
while i < len(C):
I = C[i]
i += 1
# Collect all of the symbols that could possibly be in the goto(I,X) sets
asyms = { }
for ii in I:
for s in ii.usyms:
asyms[s] = None
for x in asyms:
g = self.lr0_goto(I,x)
if not g: continue
if id(g) in self.lr0_cidhash: continue
self.lr0_cidhash[id(g)] = len(C)
C.append(g)
return C
# -----------------------------------------------------------------------------
# ==== LALR(1) Parsing ====
#
# LALR(1) parsing is almost exactly the same as SLR except that instead of
# relying upon Follow() sets when performing reductions, a more selective
# lookahead set that incorporates the state of the LR(0) machine is utilized.
# Thus, we mainly just have to focus on calculating the lookahead sets.
#
# The method used here is due to DeRemer and Pennelo (1982).
#
# DeRemer, F. L., and T. J. Pennelo: "Efficient Computation of LALR(1)
# Lookahead Sets", ACM Transactions on Programming Languages and Systems,
# Vol. 4, No. 4, Oct. 1982, pp. 615-649
#
# Further details can also be found in:
#
# J. Tremblay and P. Sorenson, "The Theory and Practice of Compiler Writing",
# McGraw-Hill Book Company, (1985).
#
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# compute_nullable_nonterminals()
#
# Creates a dictionary containing all of the non-terminals that might produce
# an empty production.
# -----------------------------------------------------------------------------
def compute_nullable_nonterminals(self):
nullable = {}
num_nullable = 0
while 1:
for p in self.grammar.Productions[1:]:
if p.len == 0:
nullable[p.name] = 1
continue
for t in p.prod:
if not t in nullable: break
else:
nullable[p.name] = 1
if len(nullable) == num_nullable: break
num_nullable = len(nullable)
return nullable
# -----------------------------------------------------------------------------
# find_nonterminal_trans(C)
#
# Given a set of LR(0) items, this functions finds all of the non-terminal
# transitions. These are transitions in which a dot appears immediately before
# a non-terminal. Returns a list of tuples of the form (state,N) where state
# is the state number and N is the nonterminal symbol.
#
# The input C is the set of LR(0) items.
# -----------------------------------------------------------------------------
def find_nonterminal_transitions(self,C):
trans = []
for state in range(len(C)):
for p in C[state]:
if p.lr_index < p.len - 1:
t = (state,p.prod[p.lr_index+1])
if t[1] in self.grammar.Nonterminals:
if t not in trans: trans.append(t)
state = state + 1
return trans
# -----------------------------------------------------------------------------
# dr_relation()
#
# Computes the DR(p,A) relationships for non-terminal transitions. The input
# is a tuple (state,N) where state is a number and N is a nonterminal symbol.
#
# Returns a list of terminals.
# -----------------------------------------------------------------------------
def dr_relation(self,C,trans,nullable):
dr_set = { }
state,N = trans
terms = []
g = self.lr0_goto(C[state],N)
for p in g:
if p.lr_index < p.len - 1:
a = p.prod[p.lr_index+1]
if a in self.grammar.Terminals:
if a not in terms: terms.append(a)
# This extra bit is to handle the start state
if state == 0 and N == self.grammar.Productions[0].prod[0]:
terms.append('$end')
return terms
# -----------------------------------------------------------------------------
# reads_relation()
#
# Computes the READS() relation (p,A) READS (t,C).
# -----------------------------------------------------------------------------
def reads_relation(self,C, trans, empty):
# Look for empty transitions
rel = []
state, N = trans
g = self.lr0_goto(C[state],N)
j = self.lr0_cidhash.get(id(g),-1)
for p in g:
if p.lr_index < p.len - 1:
a = p.prod[p.lr_index + 1]
if a in empty:
rel.append((j,a))
return rel
# -----------------------------------------------------------------------------
# compute_lookback_includes()
#
# Determines the lookback and includes relations
#
# LOOKBACK:
#
# This relation is determined by running the LR(0) state machine forward.
# For example, starting with a production "N : . A B C", we run it forward
# to obtain "N : A B C ." We then build a relationship between this final
# state and the starting state. These relationships are stored in a dictionary
# lookdict.
#
# INCLUDES:
#
# Computes the INCLUDE() relation (p,A) INCLUDES (p',B).
#
# This relation is used to determine non-terminal transitions that occur
# inside of other non-terminal transition states. (p,A) INCLUDES (p', B)
# if the following holds:
#
# B -> LAT, where T -> epsilon and p' -L-> p
#
# L is essentially a prefix (which may be empty), T is a suffix that must be
# able to derive an empty string. State p' must lead to state p with the string L.
#
# -----------------------------------------------------------------------------
def compute_lookback_includes(self,C,trans,nullable):
lookdict = {} # Dictionary of lookback relations
includedict = {} # Dictionary of include relations
# Make a dictionary of non-terminal transitions
dtrans = {}
for t in trans:
dtrans[t] = 1
# Loop over all transitions and compute lookbacks and includes
for state,N in trans:
lookb = []
includes = []
for p in C[state]:
if p.name != N: continue
# Okay, we have a name match. We now follow the production all the way
# through the state machine until we get the . on the right hand side
lr_index = p.lr_index
j = state
while lr_index < p.len - 1:
lr_index = lr_index + 1
t = p.prod[lr_index]
# Check to see if this symbol and state are a non-terminal transition
if (j,t) in dtrans:
# Yes. Okay, there is some chance that this is an includes relation
# the only way to know for certain is whether the rest of the
# production derives empty
li = lr_index + 1
while li < p.len:
if p.prod[li] in self.grammar.Terminals: break # No forget it
if not p.prod[li] in nullable: break
li = li + 1
else:
# Appears to be a relation between (j,t) and (state,N)
includes.append((j,t))
g = self.lr0_goto(C[j],t) # Go to next set
j = self.lr0_cidhash.get(id(g),-1) # Go to next state
# When we get here, j is the final state, now we have to locate the production
for r in C[j]:
if r.name != p.name: continue
if r.len != p.len: continue
i = 0
# This look is comparing a production ". A B C" with "A B C ."
while i < r.lr_index:
if r.prod[i] != p.prod[i+1]: break
i = i + 1
else:
lookb.append((j,r))
for i in includes:
if not i in includedict: includedict[i] = []
includedict[i].append((state,N))
lookdict[(state,N)] = lookb
return lookdict,includedict
# -----------------------------------------------------------------------------
# compute_read_sets()
#
# Given a set of LR(0) items, this function computes the read sets.
#
# Inputs: C = Set of LR(0) items
# ntrans = Set of nonterminal transitions
# nullable = Set of empty transitions
#
# Returns a set containing the read sets
# -----------------------------------------------------------------------------
def compute_read_sets(self,C, ntrans, nullable):
FP = lambda x: self.dr_relation(C,x,nullable)
R = lambda x: self.reads_relation(C,x,nullable)
F = digraph(ntrans,R,FP)
return F
# -----------------------------------------------------------------------------
# compute_follow_sets()
#
# Given a set of LR(0) items, a set of non-terminal transitions, a readset,
# and an include set, this function computes the follow sets
#
# Follow(p,A) = Read(p,A) U U {Follow(p',B) | (p,A) INCLUDES (p',B)}
#
# Inputs:
# ntrans = Set of nonterminal transitions
# readsets = Readset (previously computed)
# inclsets = Include sets (previously computed)
#
# Returns a set containing the follow sets
# -----------------------------------------------------------------------------
def compute_follow_sets(self,ntrans,readsets,inclsets):
FP = lambda x: readsets[x]
R = lambda x: inclsets.get(x,[])
F = digraph(ntrans,R,FP)
return F
# -----------------------------------------------------------------------------
# add_lookaheads()
#
# Attaches the lookahead symbols to grammar rules.
#
# Inputs: lookbacks - Set of lookback relations
# followset - Computed follow set
#
# This function directly attaches the lookaheads to productions contained
# in the lookbacks set
# -----------------------------------------------------------------------------
def add_lookaheads(self,lookbacks,followset):
for trans,lb in lookbacks.items():
# Loop over productions in lookback
for state,p in lb:
if not state in p.lookaheads:
p.lookaheads[state] = []
f = followset.get(trans,[])
for a in f:
if a not in p.lookaheads[state]: p.lookaheads[state].append(a)
# -----------------------------------------------------------------------------
# add_lalr_lookaheads()
#
# This function does all of the work of adding lookahead information for use
# with LALR parsing
# -----------------------------------------------------------------------------
def add_lalr_lookaheads(self,C):
# Determine all of the nullable nonterminals
nullable = self.compute_nullable_nonterminals()
# Find all non-terminal transitions
trans = self.find_nonterminal_transitions(C)
# Compute read sets
readsets = self.compute_read_sets(C,trans,nullable)
# Compute lookback/includes relations
lookd, included = self.compute_lookback_includes(C,trans,nullable)
# Compute LALR FOLLOW sets
followsets = self.compute_follow_sets(trans,readsets,included)
# Add all of the lookaheads
self.add_lookaheads(lookd,followsets)
# -----------------------------------------------------------------------------
# lr_parse_table()
#
# This function constructs the parse tables for SLR or LALR
# -----------------------------------------------------------------------------
def lr_parse_table(self):
Productions = self.grammar.Productions
Precedence = self.grammar.Precedence
goto = self.lr_goto # Goto array
action = self.lr_action # Action array
log = self.log # Logger for output
actionp = { } # Action production array (temporary)
log.info("Parsing method: %s", self.lr_method)
# Step 1: Construct C = { I0, I1, ... IN}, collection of LR(0) items
# This determines the number of states
C = self.lr0_items()
if self.lr_method == 'LALR':
self.add_lalr_lookaheads(C)
# Build the parser table, state by state
st = 0
for I in C:
# Loop over each production in I
actlist = [ ] # List of actions
st_action = { }
st_actionp = { }
st_goto = { }
log.info("")
log.info("state %d", st)
log.info("")
for p in I:
log.info(" (%d) %s", p.number, str(p))
log.info("")
for p in I:
if p.len == p.lr_index + 1:
if p.name == "S'":
# Start symbol. Accept!
st_action["$end"] = 0
st_actionp["$end"] = p
else:
# We are at the end of a production. Reduce!
if self.lr_method == 'LALR':
laheads = p.lookaheads[st]
else:
laheads = self.grammar.Follow[p.name]
for a in laheads:
actlist.append((a,p,"reduce using rule %d (%s)" % (p.number,p)))
r = st_action.get(a,None)
if r is not None:
# Whoa. Have a shift/reduce or reduce/reduce conflict
if r > 0:
# Need to decide on shift or reduce here
# By default we favor shifting. Need to add
# some precedence rules here.
sprec,slevel = Productions[st_actionp[a].number].prec
rprec,rlevel = Precedence.get(a,('right',0))
if (slevel < rlevel) or ((slevel == rlevel) and (rprec == 'left')):
# We really need to reduce here.
st_action[a] = -p.number
st_actionp[a] = p
if not slevel and not rlevel:
log.info(" ! shift/reduce conflict for %s resolved as reduce",a)
self.sr_conflicts.append((st,a,'reduce'))
Productions[p.number].reduced += 1
elif (slevel == rlevel) and (rprec == 'nonassoc'):
st_action[a] = None
else:
# Hmmm. Guess we'll keep the shift
if not rlevel:
log.info(" ! shift/reduce conflict for %s resolved as shift",a)
self.sr_conflicts.append((st,a,'shift'))
elif r < 0:
# Reduce/reduce conflict. In this case, we favor the rule
# that was defined first in the grammar file
oldp = Productions[-r]
pp = Productions[p.number]
if oldp.line > pp.line:
st_action[a] = -p.number
st_actionp[a] = p
chosenp,rejectp = pp,oldp
Productions[p.number].reduced += 1
Productions[oldp.number].reduced -= 1
else:
chosenp,rejectp = oldp,pp
self.rr_conflicts.append((st,chosenp,rejectp))
log.info(" ! reduce/reduce conflict for %s resolved using rule %d (%s)", a,st_actionp[a].number, st_actionp[a])
else:
raise LALRError("Unknown conflict in state %d" % st)
else:
st_action[a] = -p.number
st_actionp[a] = p
Productions[p.number].reduced += 1
else:
i = p.lr_index
a = p.prod[i+1] # Get symbol right after the "."
if a in self.grammar.Terminals:
g = self.lr0_goto(I,a)
j = self.lr0_cidhash.get(id(g),-1)
if j >= 0:
# We are in a shift state
actlist.append((a,p,"shift and go to state %d" % j))
r = st_action.get(a,None)
if r is not None:
# Whoa have a shift/reduce or shift/shift conflict
if r > 0:
if r != j:
raise LALRError("Shift/shift conflict in state %d" % st)
elif r < 0:
# Do a precedence check.
# - if precedence of reduce rule is higher, we reduce.
# - if precedence of reduce is same and left assoc, we reduce.
# - otherwise we shift
rprec,rlevel = Productions[st_actionp[a].number].prec
sprec,slevel = Precedence.get(a,('right',0))
if (slevel > rlevel) or ((slevel == rlevel) and (rprec == 'right')):
# We decide to shift here... highest precedence to shift
Productions[st_actionp[a].number].reduced -= 1
st_action[a] = j
st_actionp[a] = p
if not rlevel:
log.info(" ! shift/reduce conflict for %s resolved as shift",a)
self.sr_conflicts.append((st,a,'shift'))
elif (slevel == rlevel) and (rprec == 'nonassoc'):
st_action[a] = None
else:
# Hmmm. Guess we'll keep the reduce
if not slevel and not rlevel:
log.info(" ! shift/reduce conflict for %s resolved as reduce",a)
self.sr_conflicts.append((st,a,'reduce'))
else:
raise LALRError("Unknown conflict in state %d" % st)
else:
st_action[a] = j
st_actionp[a] = p
# Print the actions associated with each terminal
_actprint = { }
for a,p,m in actlist:
if a in st_action:
if p is st_actionp[a]:
log.info(" %-15s %s",a,m)
_actprint[(a,m)] = 1
log.info("")
# Print the actions that were not used. (debugging)
not_used = 0
for a,p,m in actlist:
if a in st_action:
if p is not st_actionp[a]:
if not (a,m) in _actprint:
log.debug(" ! %-15s [ %s ]",a,m)
not_used = 1
_actprint[(a,m)] = 1
if not_used:
log.debug("")
# Construct the goto table for this state
nkeys = { }
for ii in I:
for s in ii.usyms:
if s in self.grammar.Nonterminals:
nkeys[s] = None
for n in nkeys:
g = self.lr0_goto(I,n)
j = self.lr0_cidhash.get(id(g),-1)
if j >= 0:
st_goto[n] = j
log.info(" %-30s shift and go to state %d",n,j)
action[st] = st_action
actionp[st] = st_actionp
goto[st] = st_goto
st += 1
# -----------------------------------------------------------------------------
# write()
#
# This function writes the LR parsing tables to a file
# -----------------------------------------------------------------------------
def write_table(self,modulename,outputdir='',signature=""):
basemodulename = modulename.split(".")[-1]
filename = os.path.join(outputdir,basemodulename) + ".py"
try:
f = open(filename,"w")
f.write("""
# %s
# This file is automatically generated. Do not edit.
_tabversion = %r
_lr_method = %r
_lr_signature = %r
""" % (filename, __tabversion__, self.lr_method, signature))
# Change smaller to 0 to go back to original tables
smaller = 1
# Factor out names to try and make smaller
if smaller:
items = { }
for s,nd in self.lr_action.items():
for name,v in nd.items():
i = items.get(name)
if not i:
i = ([],[])
items[name] = i
i[0].append(s)
i[1].append(v)
f.write("\n_lr_action_items = {")
for k,v in items.items():
f.write("%r:([" % k)
for i in v[0]:
f.write("%r," % i)
f.write("],[")
for i in v[1]:
f.write("%r," % i)
f.write("]),")
f.write("}\n")
f.write("""
_lr_action = { }
for _k, _v in _lr_action_items.items():
for _x,_y in zip(_v[0],_v[1]):
if not _x in _lr_action: _lr_action[_x] = { }
_lr_action[_x][_k] = _y
del _lr_action_items
""")
else:
f.write("\n_lr_action = { ");
for k,v in self.lr_action.items():
f.write("(%r,%r):%r," % (k[0],k[1],v))
f.write("}\n");
if smaller:
# Factor out names to try and make smaller
items = { }
for s,nd in self.lr_goto.items():
for name,v in nd.items():
i = items.get(name)
if not i:
i = ([],[])
items[name] = i
i[0].append(s)
i[1].append(v)
f.write("\n_lr_goto_items = {")
for k,v in items.items():
f.write("%r:([" % k)
for i in v[0]:
f.write("%r," % i)
f.write("],[")
for i in v[1]:
f.write("%r," % i)
f.write("]),")
f.write("}\n")
f.write("""
_lr_goto = { }
for _k, _v in _lr_goto_items.items():
for _x,_y in zip(_v[0],_v[1]):
if not _x in _lr_goto: _lr_goto[_x] = { }
_lr_goto[_x][_k] = _y
del _lr_goto_items
""")
else:
f.write("\n_lr_goto = { ");
for k,v in self.lr_goto.items():
f.write("(%r,%r):%r," % (k[0],k[1],v))
f.write("}\n");
# Write production table
f.write("_lr_productions = [\n")
for p in self.lr_productions:
if p.func:
f.write(" (%r,%r,%d,%r,%r,%d),\n" % (p.str,p.name, p.len, p.func,p.file,p.line))
else:
f.write(" (%r,%r,%d,None,None,None),\n" % (str(p),p.name, p.len))
f.write("]\n")
f.close()
except IOError:
e = sys.exc_info()[1]
sys.stderr.write("Unable to create '%s'\n" % filename)
sys.stderr.write(str(e)+"\n")
return
# -----------------------------------------------------------------------------
# pickle_table()
#
# This function pickles the LR parsing tables to a supplied file object
# -----------------------------------------------------------------------------
def pickle_table(self,filename,signature=""):
try:
import cPickle as pickle
except ImportError:
import pickle
outf = open(filename,"wb")
pickle.dump(__tabversion__,outf,pickle_protocol)
pickle.dump(self.lr_method,outf,pickle_protocol)
pickle.dump(signature,outf,pickle_protocol)
pickle.dump(self.lr_action,outf,pickle_protocol)
pickle.dump(self.lr_goto,outf,pickle_protocol)
outp = []
for p in self.lr_productions:
if p.func:
outp.append((p.str,p.name, p.len, p.func,p.file,p.line))
else:
outp.append((str(p),p.name,p.len,None,None,None))
pickle.dump(outp,outf,pickle_protocol)
outf.close()
# -----------------------------------------------------------------------------
# === INTROSPECTION ===
#
# The following functions and classes are used to implement the PLY
# introspection features followed by the yacc() function itself.
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# get_caller_module_dict()
#
# This function returns a dictionary containing all of the symbols defined within
# a caller further down the call stack. This is used to get the environment
# associated with the yacc() call if none was provided.
# -----------------------------------------------------------------------------
def get_caller_module_dict(levels):
try:
raise RuntimeError
except RuntimeError:
e,b,t = sys.exc_info()
f = t.tb_frame
while levels > 0:
f = f.f_back
levels -= 1
ldict = f.f_globals.copy()
if f.f_globals != f.f_locals:
ldict.update(f.f_locals)
return ldict
# -----------------------------------------------------------------------------
# parse_grammar()
#
# This takes a raw grammar rule string and parses it into production data
# -----------------------------------------------------------------------------
def parse_grammar(doc,file,line):
grammar = []
# Split the doc string into lines
pstrings = doc.splitlines()
lastp = None
dline = line
for ps in pstrings:
dline += 1
p = ps.split()
if not p: continue
try:
if p[0] == '|':
# This is a continuation of a previous rule
if not lastp:
raise SyntaxError("%s:%d: Misplaced '|'" % (file,dline))
prodname = lastp
syms = p[1:]
else:
prodname = p[0]
lastp = prodname
syms = p[2:]
assign = p[1]
if assign != ':' and assign != '::=':
raise SyntaxError("%s:%d: Syntax error. Expected ':'" % (file,dline))
grammar.append((file,dline,prodname,syms))
except SyntaxError:
raise
except Exception:
raise SyntaxError("%s:%d: Syntax error in rule '%s'" % (file,dline,ps.strip()))
return grammar
# -----------------------------------------------------------------------------
# ParserReflect()
#
# This class represents information extracted for building a parser including
# start symbol, error function, tokens, precedence list, action functions,
# etc.
# -----------------------------------------------------------------------------
class ParserReflect(object):
def __init__(self,pdict,log=None):
self.pdict = pdict
self.start = None
self.error_func = None
self.tokens = None
self.files = {}
self.grammar = []
self.error = 0
if log is None:
self.log = PlyLogger(sys.stderr)
else:
self.log = log
# Get all of the basic information
def get_all(self):
self.get_start()
self.get_error_func()
self.get_tokens()
self.get_precedence()
self.get_pfunctions()
# Validate all of the information
def validate_all(self):
self.validate_start()
self.validate_error_func()
self.validate_tokens()
self.validate_precedence()
self.validate_pfunctions()
self.validate_files()
return self.error
# Compute a signature over the grammar
def signature(self):
try:
from hashlib import md5
except ImportError:
from md5 import md5
try:
sig = md5()
if self.start:
sig.update(self.start.encode('latin-1'))
if self.prec:
sig.update("".join(["".join(p) for p in self.prec]).encode('latin-1'))
if self.tokens:
sig.update(" ".join(self.tokens).encode('latin-1'))
for f in self.pfuncs:
if f[3]:
sig.update(f[3].encode('latin-1'))
except (TypeError,ValueError):
pass
return sig.digest()
# -----------------------------------------------------------------------------
# validate_file()
#
# This method checks to see if there are duplicated p_rulename() functions
# in the parser module file. Without this function, it is really easy for
# users to make mistakes by cutting and pasting code fragments (and it's a real
# bugger to try and figure out why the resulting parser doesn't work). Therefore,
# we just do a little regular expression pattern matching of def statements
# to try and detect duplicates.
# -----------------------------------------------------------------------------
def validate_files(self):
# Match def p_funcname(
fre = re.compile(r'\s*def\s+(p_[a-zA-Z_0-9]*)\(')
for filename in self.files.keys():
base,ext = os.path.splitext(filename)
if ext != '.py': return 1 # No idea. Assume it's okay.
try:
f = open(filename)
lines = f.readlines()
f.close()
except IOError:
continue
counthash = { }
for linen,l in enumerate(lines):
linen += 1
m = fre.match(l)
if m:
name = m.group(1)
prev = counthash.get(name)
if not prev:
counthash[name] = linen
else:
self.log.warning("%s:%d: Function %s redefined. Previously defined on line %d", filename,linen,name,prev)
# Get the start symbol
def get_start(self):
self.start = self.pdict.get('start')
# Validate the start symbol
def validate_start(self):
if self.start is not None:
if not isinstance(self.start,str):
self.log.error("'start' must be a string")
# Look for error handler
def get_error_func(self):
self.error_func = self.pdict.get('p_error')
# Validate the error function
def validate_error_func(self):
if self.error_func:
if isinstance(self.error_func,types.FunctionType):
ismethod = 0
elif isinstance(self.error_func, types.MethodType):
ismethod = 1
else:
self.log.error("'p_error' defined, but is not a function or method")
self.error = 1
return
eline = func_code(self.error_func).co_firstlineno
efile = func_code(self.error_func).co_filename
self.files[efile] = 1
if (func_code(self.error_func).co_argcount != 1+ismethod):
self.log.error("%s:%d: p_error() requires 1 argument",efile,eline)
self.error = 1
# Get the tokens map
def get_tokens(self):
tokens = self.pdict.get("tokens",None)
if not tokens:
self.log.error("No token list is defined")
self.error = 1
return
if not isinstance(tokens,(list, tuple)):
self.log.error("tokens must be a list or tuple")
self.error = 1
return
if not tokens:
self.log.error("tokens is empty")
self.error = 1
return
self.tokens = tokens
# Validate the tokens
def validate_tokens(self):
# Validate the tokens.
if 'error' in self.tokens:
self.log.error("Illegal token name 'error'. Is a reserved word")
self.error = 1
return
terminals = {}
for n in self.tokens:
if n in terminals:
self.log.warning("Token '%s' multiply defined", n)
terminals[n] = 1
# Get the precedence map (if any)
def get_precedence(self):
self.prec = self.pdict.get("precedence",None)
# Validate and parse the precedence map
def validate_precedence(self):
preclist = []
if self.prec:
if not isinstance(self.prec,(list,tuple)):
self.log.error("precedence must be a list or tuple")
self.error = 1
return
for level,p in enumerate(self.prec):
if not isinstance(p,(list,tuple)):
self.log.error("Bad precedence table")
self.error = 1
return
if len(p) < 2:
self.log.error("Malformed precedence entry %s. Must be (assoc, term, ..., term)",p)
self.error = 1
return
assoc = p[0]
if not isinstance(assoc,str):
self.log.error("precedence associativity must be a string")
self.error = 1
return
for term in p[1:]:
if not isinstance(term,str):
self.log.error("precedence items must be strings")
self.error = 1
return
preclist.append((term,assoc,level+1))
self.preclist = preclist
# Get all p_functions from the grammar
def get_pfunctions(self):
p_functions = []
for name, item in self.pdict.items():
if name[:2] != 'p_': continue
if name == 'p_error': continue
if isinstance(item,(types.FunctionType,types.MethodType)):
line = func_code(item).co_firstlineno
file = func_code(item).co_filename
p_functions.append((line,file,name,item.__doc__))
# Sort all of the actions by line number
p_functions.sort()
self.pfuncs = p_functions
# Validate all of the p_functions
def validate_pfunctions(self):
grammar = []
# Check for non-empty symbols
if len(self.pfuncs) == 0:
self.log.error("no rules of the form p_rulename are defined")
self.error = 1
return
for line, file, name, doc in self.pfuncs:
func = self.pdict[name]
if isinstance(func, types.MethodType):
reqargs = 2
else:
reqargs = 1
if func_code(func).co_argcount > reqargs:
self.log.error("%s:%d: Rule '%s' has too many arguments",file,line,func.__name__)
self.error = 1
elif func_code(func).co_argcount < reqargs:
self.log.error("%s:%d: Rule '%s' requires an argument",file,line,func.__name__)
self.error = 1
elif not func.__doc__:
self.log.warning("%s:%d: No documentation string specified in function '%s' (ignored)",file,line,func.__name__)
else:
try:
parsed_g = parse_grammar(doc,file,line)
for g in parsed_g:
grammar.append((name, g))
except SyntaxError:
e = sys.exc_info()[1]
self.log.error(str(e))
self.error = 1
# Looks like a valid grammar rule
# Mark the file in which defined.
self.files[file] = 1
# Secondary validation step that looks for p_ definitions that are not functions
# or functions that look like they might be grammar rules.
for n,v in self.pdict.items():
if n[0:2] == 'p_' and isinstance(v, (types.FunctionType, types.MethodType)): continue
if n[0:2] == 't_': continue
if n[0:2] == 'p_' and n != 'p_error':
self.log.warning("'%s' not defined as a function", n)
if ((isinstance(v,types.FunctionType) and func_code(v).co_argcount == 1) or
(isinstance(v,types.MethodType) and func_code(v).co_argcount == 2)):
try:
doc = v.__doc__.split(" ")
if doc[1] == ':':
self.log.warning("%s:%d: Possible grammar rule '%s' defined without p_ prefix",
func_code(v).co_filename, func_code(v).co_firstlineno,n)
except Exception:
pass
self.grammar = grammar
# -----------------------------------------------------------------------------
# yacc(module)
#
# Build a parser
# -----------------------------------------------------------------------------
def yacc(method='LALR', debug=yaccdebug, module=None, tabmodule=tab_module, start=None,
check_recursion=1, optimize=0, write_tables=1, debugfile=debug_file,outputdir='',
debuglog=None, errorlog = None, picklefile=None):
global parse # Reference to the parsing method of the last built parser
# If pickling is enabled, table files are not created
if picklefile:
write_tables = 0
if errorlog is None:
errorlog = PlyLogger(sys.stderr)
# Get the module dictionary used for the parser
if module:
_items = [(k,getattr(module,k)) for k in dir(module)]
pdict = dict(_items)
else:
pdict = get_caller_module_dict(2)
# Collect parser information from the dictionary
pinfo = ParserReflect(pdict,log=errorlog)
pinfo.get_all()
if pinfo.error:
raise YaccError("Unable to build parser")
# Check signature against table files (if any)
signature = pinfo.signature()
# Read the tables
try:
lr = LRTable()
if picklefile:
read_signature = lr.read_pickle(picklefile)
else:
read_signature = lr.read_table(tabmodule)
if optimize or (read_signature == signature):
try:
lr.bind_callables(pinfo.pdict)
parser = LRParser(lr,pinfo.error_func)
parse = parser.parse
return parser
except Exception:
e = sys.exc_info()[1]
errorlog.warning("There was a problem loading the table file: %s", repr(e))
except VersionError:
e = sys.exc_info()
errorlog.warning(str(e))
except Exception:
pass
if debuglog is None:
if debug:
debuglog = PlyLogger(open(debugfile,"w"))
else:
debuglog = NullLogger()
debuglog.info("Created by PLY version %s (http://www.dabeaz.com/ply)", __version__)
errors = 0
# Validate the parser information
if pinfo.validate_all():
raise YaccError("Unable to build parser")
if not pinfo.error_func:
errorlog.warning("no p_error() function is defined")
# Create a grammar object
grammar = Grammar(pinfo.tokens)
# Set precedence level for terminals
for term, assoc, level in pinfo.preclist:
try:
grammar.set_precedence(term,assoc,level)
except GrammarError:
e = sys.exc_info()[1]
errorlog.warning("%s",str(e))
# Add productions to the grammar
for funcname, gram in pinfo.grammar:
file, line, prodname, syms = gram
try:
grammar.add_production(prodname,syms,funcname,file,line)
except GrammarError:
e = sys.exc_info()[1]
errorlog.error("%s",str(e))
errors = 1
# Set the grammar start symbols
try:
if start is None:
grammar.set_start(pinfo.start)
else:
grammar.set_start(start)
except GrammarError:
e = sys.exc_info()[1]
errorlog.error(str(e))
errors = 1
if errors:
raise YaccError("Unable to build parser")
# Verify the grammar structure
undefined_symbols = grammar.undefined_symbols()
for sym, prod in undefined_symbols:
errorlog.error("%s:%d: Symbol '%s' used, but not defined as a token or a rule",prod.file,prod.line,sym)
errors = 1
unused_terminals = grammar.unused_terminals()
if unused_terminals:
debuglog.info("")
debuglog.info("Unused terminals:")
debuglog.info("")
for term in unused_terminals:
errorlog.warning("Token '%s' defined, but not used", term)
debuglog.info(" %s", term)
# Print out all productions to the debug log
if debug:
debuglog.info("")
debuglog.info("Grammar")
debuglog.info("")
for n,p in enumerate(grammar.Productions):
debuglog.info("Rule %-5d %s", n, p)
# Find unused non-terminals
unused_rules = grammar.unused_rules()
for prod in unused_rules:
errorlog.warning("%s:%d: Rule '%s' defined, but not used", prod.file, prod.line, prod.name)
if len(unused_terminals) == 1:
errorlog.warning("There is 1 unused token")
if len(unused_terminals) > 1:
errorlog.warning("There are %d unused tokens", len(unused_terminals))
if len(unused_rules) == 1:
errorlog.warning("There is 1 unused rule")
if len(unused_rules) > 1:
errorlog.warning("There are %d unused rules", len(unused_rules))
if debug:
debuglog.info("")
debuglog.info("Terminals, with rules where they appear")
debuglog.info("")
terms = list(grammar.Terminals)
terms.sort()
for term in terms:
debuglog.info("%-20s : %s", term, " ".join([str(s) for s in grammar.Terminals[term]]))
debuglog.info("")
debuglog.info("Nonterminals, with rules where they appear")
debuglog.info("")
nonterms = list(grammar.Nonterminals)
nonterms.sort()
for nonterm in nonterms:
debuglog.info("%-20s : %s", nonterm, " ".join([str(s) for s in grammar.Nonterminals[nonterm]]))
debuglog.info("")
if check_recursion:
unreachable = grammar.find_unreachable()
for u in unreachable:
errorlog.warning("Symbol '%s' is unreachable",u)
infinite = grammar.infinite_cycles()
for inf in infinite:
errorlog.error("Infinite recursion detected for symbol '%s'", inf)
errors = 1
unused_prec = grammar.unused_precedence()
for term, assoc in unused_prec:
errorlog.error("Precedence rule '%s' defined for unknown symbol '%s'", assoc, term)
errors = 1
if errors:
raise YaccError("Unable to build parser")
# Run the LRGeneratedTable on the grammar
if debug:
errorlog.debug("Generating %s tables", method)
lr = LRGeneratedTable(grammar,method,debuglog)
if debug:
num_sr = len(lr.sr_conflicts)
# Report shift/reduce and reduce/reduce conflicts
if num_sr == 1:
errorlog.warning("1 shift/reduce conflict")
elif num_sr > 1:
errorlog.warning("%d shift/reduce conflicts", num_sr)
num_rr = len(lr.rr_conflicts)
if num_rr == 1:
errorlog.warning("1 reduce/reduce conflict")
elif num_rr > 1:
errorlog.warning("%d reduce/reduce conflicts", num_rr)
# Write out conflicts to the output file
if debug and (lr.sr_conflicts or lr.rr_conflicts):
debuglog.warning("")
debuglog.warning("Conflicts:")
debuglog.warning("")
for state, tok, resolution in lr.sr_conflicts:
debuglog.warning("shift/reduce conflict for %s in state %d resolved as %s", tok, state, resolution)
already_reported = {}
for state, rule, rejected in lr.rr_conflicts:
if (state,id(rule),id(rejected)) in already_reported:
continue
debuglog.warning("reduce/reduce conflict in state %d resolved using rule (%s)", state, rule)
debuglog.warning("rejected rule (%s) in state %d", rejected,state)
errorlog.warning("reduce/reduce conflict in state %d resolved using rule (%s)", state, rule)
errorlog.warning("rejected rule (%s) in state %d", rejected, state)
already_reported[state,id(rule),id(rejected)] = 1
warned_never = []
for state, rule, rejected in lr.rr_conflicts:
if not rejected.reduced and (rejected not in warned_never):
debuglog.warning("Rule (%s) is never reduced", rejected)
errorlog.warning("Rule (%s) is never reduced", rejected)
warned_never.append(rejected)
# Write the table file if requested
if write_tables:
lr.write_table(tabmodule,outputdir,signature)
# Write a pickled version of the tables
if picklefile:
lr.pickle_table(picklefile,signature)
# Build the parser
lr.bind_callables(pinfo.pdict)
parser = LRParser(lr,pinfo.error_func)
parse = parser.parse
return parser
| bsd-3-clause |
09zwcbupt/undergrad_thesis | ext/poxdesk/qx/tool/pylib/graph/algorithms/heuristics/Euclidean.py | 4 | 3477 | # Copyright (c) 2008-2009 Pedro Matiello <[email protected]>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
"""
A* heuristic for euclidean graphs.
"""
# Imports
import warnings
class euclidean(object):
"""
A* heuristic for Euclidean graphs.
This heuristic has three requirements:
1. All nodes should have the attribute 'position';
2. The weight of all edges should be the euclidean distance between the nodes it links;
3. The C{optimize()} method should be called before the heuristic search.
A small example for clarification:
>>> g = graph.graph()
>>> g.add_nodes(['A','B','C'])
>>> g.add_node_attribute('A', ('position',(0,0)))
>>> g.add_node_attribute('B', ('position',(1,1)))
>>> g.add_node_attribute('C', ('position',(0,2)))
>>> g.add_edge('A','B', wt=2)
>>> g.add_edge('B','C', wt=2)
>>> g.add_edge('A','C', wt=4)
>>> h = graph.heuristics.euclidean()
>>> h.optimize(g)
>>> g.heuristic_search('A', 'C', h)
"""
def __init__(self):
"""
Initialize the heuristic object.
"""
self.distances = {}
def optimize(self, graph):
"""
Build a dictionary mapping each pair of nodes to a number (the distance between them).
@type graph: graph
@param graph: Graph.
"""
for start in graph.nodes():
for end in graph.nodes():
for each in graph.get_node_attributes(start):
if (each[0] == 'position'):
start_attr = each[1]
break
for each in graph.get_node_attributes(end):
if (each[0] == 'position'):
end_attr = each[1]
break
dist = 0
for i in xrange(len(start_attr)):
dist = dist + (float(start_attr[i]) - float(end_attr[i]))**2
self.distances[(start,end)] = dist
def __call__(self, start, end):
"""
Estimate how far start is from end.
@type start: node
@param start: Start node.
@type end: node
@param end: End node.
"""
assert len(self.distances.keys()) > 0, "You need to optimize this heuristic for your graph before it can be used to estimate."
return self.distances[(start,end)] | gpl-3.0 |
MichaelTong/cassandra-rapid | doc/source/_util/cql.py | 64 | 6877 | # -*- coding: utf-8 -*-
"""
CQL pygments lexer
~~~~~~~~~~~~~~~~~~
Lexer for the Cassandra Query Language (CQL).
This is heavily inspired from the pygments SQL lexer (and the Postgres one in particular) but adapted to CQL
keywords and specificities.
TODO: This has been hacked quickly, but once it's more tested, we could submit it upstream.
In particular, we have alot of keywords whose meaning depends on the context and we could potentially improve
their handling. For instance, SET is a keyword, but also a type name (that's why currently we also consider
map and list as keywords, not types; we could disambiguate by looking if there is a '<' afterwards). Or things
like USERS, which can is used in some documentation example as a table name but is a keyword too (we could
only consider it a keyword if after LIST for instance). Similarly, type nanes are not reserved, so they and
are sometime used as column identifiers (also, timestamp is both a type and a keyword). I "think" we can
somewhat disambiguate through "states", but unclear how far it's worth going.
We could also add the predefined functions?
"""
import re
from pygments.lexer import Lexer, RegexLexer, do_insertions, bygroups, words
from pygments.token import Punctuation, Whitespace, Error, \
Text, Comment, Operator, Keyword, Name, String, Number, Generic, Literal
from pygments.lexers import get_lexer_by_name, ClassNotFound
from pygments.util import iteritems
__all__ = [ 'CQLLexer' ]
language_re = re.compile(r"\s+LANGUAGE\s+'?(\w+)'?", re.IGNORECASE)
KEYWORDS = (
'SELECT',
'FROM',
'AS',
'WHERE',
'AND',
'KEY',
'KEYS',
'ENTRIES',
'FULL',
'INSERT',
'UPDATE',
'WITH',
'LIMIT',
'PER',
'PARTITION',
'USING',
'USE',
'DISTINCT',
'COUNT',
'SET',
'BEGIN',
'UNLOGGED',
'BATCH',
'APPLY',
'TRUNCATE',
'DELETE',
'IN',
'CREATE',
'KEYSPACE',
'SCHEMA',
'KEYSPACES',
'COLUMNFAMILY',
'TABLE',
'MATERIALIZED',
'VIEW',
'INDEX',
'CUSTOM',
'ON',
'TO',
'DROP',
'PRIMARY',
'INTO',
'VALUES',
'TIMESTAMP',
'TTL',
'CAST',
'ALTER',
'RENAME',
'ADD',
'TYPE',
'COMPACT',
'STORAGE',
'ORDER',
'BY',
'ASC',
'DESC',
'ALLOW',
'FILTERING',
'IF',
'IS',
'CONTAINS',
'GRANT',
'ALL',
'PERMISSION',
'PERMISSIONS',
'OF',
'REVOKE',
'MODIFY',
'AUTHORIZE',
'DESCRIBE',
'EXECUTE',
'NORECURSIVE',
'MBEAN',
'MBEANS',
'USER',
'USERS',
'ROLE',
'ROLES',
'SUPERUSER',
'NOSUPERUSER',
'PASSWORD',
'LOGIN',
'NOLOGIN',
'OPTIONS',
'CLUSTERING',
'TOKEN',
'WRITETIME',
'NULL',
'NOT',
'EXISTS',
'MAP',
'LIST',
'NAN',
'INFINITY',
'TUPLE',
'TRIGGER',
'STATIC',
'FROZEN',
'FUNCTION',
'FUNCTIONS',
'AGGREGATE',
'SFUNC',
'STYPE',
'FINALFUNC',
'INITCOND',
'RETURNS',
'CALLED',
'INPUT',
'LANGUAGE',
'OR',
'REPLACE',
'JSON',
'LIKE',
)
DATATYPES = (
'ASCII',
'BIGINT',
'BLOB',
'BOOLEAN',
'COUNTER',
'DATE',
'DECIMAL',
'DOUBLE',
'EMPTY',
'FLOAT',
'INET',
'INT',
'SMALLINT',
'TEXT',
'TIME',
'TIMESTAMP',
'TIMEUUID',
'TINYINT',
'UUID',
'VARCHAR',
'VARINT',
)
def language_callback(lexer, match):
"""Parse the content of a $-string using a lexer
The lexer is chosen looking for a nearby LANGUAGE or assumed as
java if no LANGUAGE has been found.
"""
l = None
m = language_re.match(lexer.text[max(0, match.start()-100):match.start()])
if m is not None:
l = lexer._get_lexer(m.group(1))
else:
l = lexer._get_lexer('java')
# 1 = $, 2 = delimiter, 3 = $
yield (match.start(1), String, match.group(1))
yield (match.start(2), String.Delimiter, match.group(2))
yield (match.start(3), String, match.group(3))
# 4 = string contents
if l:
for x in l.get_tokens_unprocessed(match.group(4)):
yield x
else:
yield (match.start(4), String, match.group(4))
# 5 = $, 6 = delimiter, 7 = $
yield (match.start(5), String, match.group(5))
yield (match.start(6), String.Delimiter, match.group(6))
yield (match.start(7), String, match.group(7))
class CQLLexer(RegexLexer):
"""
Lexer for the Cassandra Query Language.
"""
name = 'Cassandra Query Language'
aliases = ['cql']
filenames = ['*.cql']
mimetypes = ['text/x-cql']
flags = re.IGNORECASE
tokens = {
'root': [
(r'\s+', Text),
(r'--.*\n?', Comment.Single),
(r'//.*\n?', Comment.Single),
(r'/\*', Comment.Multiline, 'multiline-comments'),
(r'(' + '|'.join(s.replace(" ", "\s+")
for s in DATATYPES)
+ r')\b', Name.Builtin),
(words(KEYWORDS, suffix=r'\b'), Keyword),
(r'[+*/<>=~!@#%^&|`?-]+', Operator),
(r'\$\d+', Name.Variable),
# Using Number instead of the more accurate Literal because the latter don't seem to e highlighted in most
# styles
(r'[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}', Number), # UUIDs
(r'0x[0-9a-fA-F]+', Number), # Blobs
(r'([0-9]*\.[0-9]*|[0-9]+)(e[+-]?[0-9]+)?', Number.Float),
(r'[0-9]+', Number.Integer),
(r"((?:E|U&)?)(')", bygroups(String.Affix, String.Single), 'string'),
# quoted identifier
(r'((?:U&)?)(")', bygroups(String.Affix, String.Name), 'quoted-ident'),
(r'(?s)(\$)([^$]*)(\$)(.*?)(\$)(\2)(\$)', language_callback),
(r'[a-z_]\w*', Name),
(r'[;:()\[\]{},.]', Punctuation),
],
'multiline-comments': [
(r'/\*', Comment.Multiline, 'multiline-comments'),
(r'\*/', Comment.Multiline, '#pop'),
(r'[^/*]+', Comment.Multiline),
(r'[/*]', Comment.Multiline)
],
'string': [
(r"[^']+", String.Single),
(r"''", String.Single),
(r"'", String.Single, '#pop'),
],
'quoted-ident': [
(r'[^"]+', String.Name),
(r'""', String.Name),
(r'"', String.Name, '#pop'),
],
}
def get_tokens_unprocessed(self, text, *args):
# Have a copy of the entire text to be used by `language_callback`.
self.text = text
for x in RegexLexer.get_tokens_unprocessed(self, text, *args):
yield x
def _get_lexer(self, lang):
return get_lexer_by_name(lang, **self.options)
| apache-2.0 |
code4futuredotorg/reeborg_tw | src/libraries/brython_old/Lib/unittest/test/testmock/testcallable.py | 739 | 4234 | # Copyright (C) 2007-2012 Michael Foord & the mock team
# E-mail: fuzzyman AT voidspace DOT org DOT uk
# http://www.voidspace.org.uk/python/mock/
import unittest
from unittest.test.testmock.support import is_instance, X, SomeClass
from unittest.mock import (
Mock, MagicMock, NonCallableMagicMock,
NonCallableMock, patch, create_autospec,
CallableMixin
)
class TestCallable(unittest.TestCase):
def assertNotCallable(self, mock):
self.assertTrue(is_instance(mock, NonCallableMagicMock))
self.assertFalse(is_instance(mock, CallableMixin))
def test_non_callable(self):
for mock in NonCallableMagicMock(), NonCallableMock():
self.assertRaises(TypeError, mock)
self.assertFalse(hasattr(mock, '__call__'))
self.assertIn(mock.__class__.__name__, repr(mock))
def test_heirarchy(self):
self.assertTrue(issubclass(MagicMock, Mock))
self.assertTrue(issubclass(NonCallableMagicMock, NonCallableMock))
def test_attributes(self):
one = NonCallableMock()
self.assertTrue(issubclass(type(one.one), Mock))
two = NonCallableMagicMock()
self.assertTrue(issubclass(type(two.two), MagicMock))
def test_subclasses(self):
class MockSub(Mock):
pass
one = MockSub()
self.assertTrue(issubclass(type(one.one), MockSub))
class MagicSub(MagicMock):
pass
two = MagicSub()
self.assertTrue(issubclass(type(two.two), MagicSub))
def test_patch_spec(self):
patcher = patch('%s.X' % __name__, spec=True)
mock = patcher.start()
self.addCleanup(patcher.stop)
instance = mock()
mock.assert_called_once_with()
self.assertNotCallable(instance)
self.assertRaises(TypeError, instance)
def test_patch_spec_set(self):
patcher = patch('%s.X' % __name__, spec_set=True)
mock = patcher.start()
self.addCleanup(patcher.stop)
instance = mock()
mock.assert_called_once_with()
self.assertNotCallable(instance)
self.assertRaises(TypeError, instance)
def test_patch_spec_instance(self):
patcher = patch('%s.X' % __name__, spec=X())
mock = patcher.start()
self.addCleanup(patcher.stop)
self.assertNotCallable(mock)
self.assertRaises(TypeError, mock)
def test_patch_spec_set_instance(self):
patcher = patch('%s.X' % __name__, spec_set=X())
mock = patcher.start()
self.addCleanup(patcher.stop)
self.assertNotCallable(mock)
self.assertRaises(TypeError, mock)
def test_patch_spec_callable_class(self):
class CallableX(X):
def __call__(self):
pass
class Sub(CallableX):
pass
class Multi(SomeClass, Sub):
pass
for arg in 'spec', 'spec_set':
for Klass in CallableX, Sub, Multi:
with patch('%s.X' % __name__, **{arg: Klass}) as mock:
instance = mock()
mock.assert_called_once_with()
self.assertTrue(is_instance(instance, MagicMock))
# inherited spec
self.assertRaises(AttributeError, getattr, instance,
'foobarbaz')
result = instance()
# instance is callable, result has no spec
instance.assert_called_once_with()
result(3, 2, 1)
result.assert_called_once_with(3, 2, 1)
result.foo(3, 2, 1)
result.foo.assert_called_once_with(3, 2, 1)
def test_create_autopsec(self):
mock = create_autospec(X)
instance = mock()
self.assertRaises(TypeError, instance)
mock = create_autospec(X())
self.assertRaises(TypeError, mock)
def test_create_autospec_instance(self):
mock = create_autospec(SomeClass, instance=True)
self.assertRaises(TypeError, mock)
mock.wibble()
mock.wibble.assert_called_once_with()
self.assertRaises(TypeError, mock.wibble, 'some', 'args')
| agpl-3.0 |
zerlgi/zcswebapp | zcswebapp-1.0/lib/scudcloud.py | 1 | 14669 | #!/usr/bin/env python3
import sys, os
from cookiejar import PersistentCookieJar
from leftpane import LeftPane
from notifier import Notifier
from resources import Resources
from systray import Systray
from wrapper import Wrapper
from os.path import expanduser
from PyQt4 import QtCore, QtGui, QtWebKit
from PyQt4.Qt import QApplication, QKeySequence
from PyQt4.QtCore import QUrl, QSettings
from PyQt4.QtWebKit import QWebSettings
# Auto-detection of Unity and Dbusmenu in gi repository
try:
from gi.repository import Unity, Dbusmenu
except ImportError:
Unity = None
Dbusmenu = None
from launcher import DummyLauncher
class zcswebapp(QtGui.QMainWindow):
plugins = True
debug = False
forceClose = False
messages = 0
def __init__(self, parent = None, settings_path = ""):
super(zcswebapp, self).__init__(parent)
self.setWindowTitle('zcswebapp')
self.settings_path = settings_path
self.notifier = Notifier(Resources.APP_NAME, Resources.get_path('zcswebapp.png'))
self.settings = QSettings(self.settings_path + '/zcswebapp.cfg', QSettings.IniFormat)
self.identifier = self.settings.value("Domain")
if Unity is not None:
self.launcher = Unity.LauncherEntry.get_for_desktop_id("zcswebapp.desktop")
else:
self.launcher = DummyLauncher(self)
self.webSettings()
self.leftPane = LeftPane(self)
webView = Wrapper(self)
webView.page().networkAccessManager().setCookieJar(self.cookiesjar)
self.stackedWidget = QtGui.QStackedWidget()
self.stackedWidget.addWidget(webView)
centralWidget = QtGui.QWidget(self)
layout = QtGui.QHBoxLayout()
layout.setContentsMargins(0, 0, 0, 0)
layout.setSpacing(0)
layout.addWidget(self.leftPane)
layout.addWidget(self.stackedWidget)
centralWidget.setLayout(layout)
self.setCentralWidget(centralWidget)
self.addMenu()
self.tray = Systray(self)
self.systray(zcswebapp.minimized)
self.installEventFilter(self)
if self.identifier is None:
webView.load(QtCore.QUrl(Resources.SIGNIN_URL))
else:
webView.load(QtCore.QUrl(self.domain()))
webView.show()
def webSettings(self):
self.cookiesjar = PersistentCookieJar(self)
self.zoom = self.readZoom()
# Required by Youtube videos (HTML5 video support only on Qt5)
QWebSettings.globalSettings().setAttribute(QWebSettings.PluginsEnabled, self.plugins)
# We don't want Java
QWebSettings.globalSettings().setAttribute(QWebSettings.JavaEnabled, False)
# We don't need History
QWebSettings.globalSettings().setAttribute(QWebSettings.PrivateBrowsingEnabled, True)
# Required for copy and paste clipboard integration
QWebSettings.globalSettings().setAttribute(QWebSettings.JavascriptCanAccessClipboard, True)
# Enabling Inspeclet only when --debug=True (requires more CPU usage)
QWebSettings.globalSettings().setAttribute(QWebSettings.DeveloperExtrasEnabled, self.debug)
def toggleFullScreen(self):
if self.isFullScreen():
self.showMaximized()
else:
self.showFullScreen()
def restore(self):
geometry = self.settings.value("geometry")
if geometry is not None:
self.restoreGeometry(geometry)
windowState = self.settings.value("windowState")
if windowState is not None:
self.restoreState(windowState)
else:
self.showMaximized()
def systray(self, show=None):
if show is None:
show = self.settings.value("Systray") == "True"
if show:
self.tray.show()
self.menus["file"]["close"].setEnabled(True)
self.settings.setValue("Systray", "True")
else:
self.tray.setVisible(False)
self.menus["file"]["close"].setEnabled(False)
self.settings.setValue("Systray", "False")
def readZoom(self):
default = 1
if self.settings.value("Zoom") is not None:
default = float(self.settings.value("Zoom"))
return default
def setZoom(self, factor=1):
if factor > 0:
for i in range(0, self.stackedWidget.count()):
widget = self.stackedWidget.widget(i)
widget.setZoomFactor(factor)
self.settings.setValue("Zoom", factor)
def zoomIn(self):
self.setZoom(self.current().zoomFactor() + 0.1)
def zoomOut(self):
self.setZoom(self.current().zoomFactor() - 0.1)
def zoomReset(self):
self.setZoom()
def addMenu(self):
self.menus = {
"file": {
"preferences": self.createAction("Preferences", self.current().preferences),
"systray": self.createAction("Close to Tray", self.systray, None, True),
"addTeam": self.createAction("Sign in to Another Team", self.current().addTeam),
"signout": self.createAction("Signout", self.current().logout),
"close": self.createAction("Close", self.close, QKeySequence.Close),
"exit": self.createAction("Quit", self.exit, QKeySequence.Quit)
},
"edit": {
"undo": self.current().pageAction(QtWebKit.QWebPage.Undo),
"redo": self.current().pageAction(QtWebKit.QWebPage.Redo),
"cut": self.current().pageAction(QtWebKit.QWebPage.Cut),
"copy": self.current().pageAction(QtWebKit.QWebPage.Copy),
"paste": self.current().pageAction(QtWebKit.QWebPage.Paste),
"back": self.current().pageAction(QtWebKit.QWebPage.Back),
"forward": self.current().pageAction(QtWebKit.QWebPage.Forward),
"reload": self.current().pageAction(QtWebKit.QWebPage.Reload)
},
"view": {
"zoomin": self.createAction("Zoom In", self.zoomIn, QKeySequence.ZoomIn),
"zoomout": self.createAction("Zoom Out", self.zoomOut, QKeySequence.ZoomOut),
"reset": self.createAction("Reset", self.zoomReset, QtCore.Qt.CTRL + QtCore.Qt.Key_0),
"fullscreen": self.createAction("Toggle Full Screen", self.toggleFullScreen, QtCore.Qt.Key_F11)
},
"help": {
"help": self.createAction("Help and Feedback", self.current().help, QKeySequence.HelpContents),
"center": self.createAction("Slack Help Center", self.current().helpCenter),
"about": self.createAction("About", self.current().about)
}
}
menu = self.menuBar()
fileMenu = menu.addMenu("&File")
fileMenu.addAction(self.menus["file"]["preferences"])
fileMenu.addAction(self.menus["file"]["systray"])
fileMenu.addSeparator()
fileMenu.addAction(self.menus["file"]["addTeam"])
fileMenu.addAction(self.menus["file"]["signout"])
fileMenu.addSeparator()
fileMenu.addAction(self.menus["file"]["close"])
fileMenu.addAction(self.menus["file"]["exit"])
editMenu = menu.addMenu("&Edit")
editMenu.addAction(self.menus["edit"]["undo"])
editMenu.addAction(self.menus["edit"]["redo"])
editMenu.addSeparator()
editMenu.addAction(self.menus["edit"]["cut"])
editMenu.addAction(self.menus["edit"]["copy"])
editMenu.addAction(self.menus["edit"]["paste"])
editMenu.addSeparator()
editMenu.addAction(self.menus["edit"]["back"])
editMenu.addAction(self.menus["edit"]["forward"])
editMenu.addAction(self.menus["edit"]["reload"])
viewMenu = menu.addMenu("&View")
viewMenu.addAction(self.menus["view"]["zoomin"])
viewMenu.addAction(self.menus["view"]["zoomout"])
viewMenu.addAction(self.menus["view"]["reset"])
viewMenu.addSeparator()
viewMenu.addAction(self.menus["view"]["fullscreen"])
helpMenu = menu.addMenu("&Help")
helpMenu.addAction(self.menus["help"]["help"])
helpMenu.addAction(self.menus["help"]["center"])
helpMenu.addSeparator()
helpMenu.addAction(self.menus["help"]["about"])
self.enableMenus(False)
showSystray = self.settings.value("Systray") == "True"
self.menus["file"]["systray"].setChecked(showSystray)
self.menus["file"]["close"].setEnabled(showSystray)
def enableMenus(self, enabled):
self.menus["file"]["preferences"].setEnabled(enabled == True)
self.menus["file"]["addTeam"].setEnabled(enabled == True)
self.menus["file"]["signout"].setEnabled(enabled == True)
self.menus["help"]["help"].setEnabled(enabled == True)
def createAction(self, text, slot, shortcut=None, checkable=False):
action = QtGui.QAction(text, self)
if shortcut is not None:
action.setShortcut(shortcut)
action.triggered.connect(slot)
if checkable:
action.setCheckable(True)
return action
def domain(self):
if self.identifier.endswith(".slack.com"):
return self.identifier
else:
return "https://"+self.identifier+".slack.com"
def current(self):
return self.stackedWidget.currentWidget()
def teams(self, teams):
if teams is not None and len(teams) > 1:
self.leftPane.show()
for t in teams:
try:
self.leftPane.addTeam(t['id'], t['team_name'], t['team_url'], t['team_icon']['image_88'], t == teams[0])
except:
self.leftPane.addTeam(t['id'], t['team_name'], t['team_url'], '', t == teams[0])
def switchTo(self, url):
qUrl = QtCore.QUrl(url)
index = -1
for i in range(0, self.stackedWidget.count()):
if self.stackedWidget.widget(i).url().toString().startswith(url):
index = i
break
if index != -1:
self.stackedWidget.setCurrentIndex(index)
else:
webView = Wrapper(self)
webView.page().networkAccessManager().setCookieJar(self.cookiesjar)
webView.load(qUrl)
webView.show()
self.stackedWidget.addWidget(webView)
self.stackedWidget.setCurrentWidget(webView)
self.quicklist(self.current().listChannels())
self.enableMenus(self.current().isConnected())
# Save the last used team as default
self.settings.setValue("Domain", 'https://'+qUrl.host())
def eventFilter(self, obj, event):
if event.type() == QtCore.QEvent.ActivationChange and self.isActiveWindow():
self.focusInEvent(event)
if event.type() == QtCore.QEvent.KeyPress:
# Ctrl + <n>
if QtGui.QApplication.keyboardModifiers() == QtCore.Qt.ControlModifier:
if event.key() == QtCore.Qt.Key_1: self.leftPane.click(0)
elif event.key() == QtCore.Qt.Key_2: self.leftPane.click(1)
elif event.key() == QtCore.Qt.Key_3: self.leftPane.click(2)
elif event.key() == QtCore.Qt.Key_4: self.leftPane.click(3)
elif event.key() == QtCore.Qt.Key_5: self.leftPane.click(4)
elif event.key() == QtCore.Qt.Key_6: self.leftPane.click(5)
elif event.key() == QtCore.Qt.Key_7: self.leftPane.click(6)
elif event.key() == QtCore.Qt.Key_8: self.leftPane.click(7)
elif event.key() == QtCore.Qt.Key_9: self.leftPane.click(8)
# Ctrl + Shift + <key>
if (QtGui.QApplication.keyboardModifiers() & QtCore.Qt.ShiftModifier) and (QtGui.QApplication.keyboardModifiers() & QtCore.Qt.ShiftModifier):
if event.key() == QtCore.Qt.Key_V: self.current().createSnippet()
return QtGui.QMainWindow.eventFilter(self, obj, event);
def focusInEvent(self, event):
self.launcher.set_property("urgent", False)
self.tray.stopAlert()
def titleChanged(self):
self.setWindowTitle(self.current().title())
def closeEvent(self, event):
if not self.forceClose and self.settings.value("Systray") == "True":
self.hide()
event.ignore()
else:
self.cookiesjar.save()
self.settings.setValue("geometry", self.saveGeometry())
self.settings.setValue("windowState", self.saveState())
def show(self):
self.setWindowState(self.windowState() & ~QtCore.Qt.WindowMinimized | QtCore.Qt.WindowActive)
self.activateWindow()
self.setVisible(True)
def exit(self):
self.forceClose = True
self.close()
def quicklist(self, channels):
if Dbusmenu is not None:
ql = Dbusmenu.Menuitem.new()
self.launcher.set_property("quicklist", ql)
if channels is not None:
for c in channels:
if c['is_member']:
item = Dbusmenu.Menuitem.new ()
item.property_set (Dbusmenu.MENUITEM_PROP_LABEL, "#"+c['name'])
item.property_set ("id", c['name'])
item.property_set_bool (Dbusmenu.MENUITEM_PROP_VISIBLE, True)
item.connect(Dbusmenu.MENUITEM_SIGNAL_ITEM_ACTIVATED, self.current().openChannel)
ql.child_append(item)
self.launcher.set_property("quicklist", ql)
def notify(self, title, message):
self.notifier.notify(title, message)
self.alert()
def alert(self):
if not self.isActiveWindow():
self.launcher.set_property("urgent", True)
self.tray.alert()
def count(self):
total = 0
for i in range(0, self.stackedWidget.count()):
widget = self.stackedWidget.widget(i)
if widget.messages == 0:
self.leftPane.stopAlert(widget.team())
else:
self.leftPane.alert(widget.team())
total+=widget.messages
if total > self.messages:
self.alert()
if 0 == total:
self.launcher.set_property("count_visible", False)
self.tray.setCounter(0)
else:
self.tray.setCounter(total)
self.launcher.set_property("count", total)
self.launcher.set_property("count_visible", True)
self.messages = total
| mit |
onceuponatimeforever/oh-mainline | mysite/search/migrations/0020_remove_project_icon_field.py | 17 | 3867 | # This file is part of OpenHatch.
# Copyright (C) 2009 OpenHatch, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from south.db import db
from django.db import models
from mysite.search.models import *
class Migration:
def forwards(self, orm):
# Deleting field 'Project.icon'
db.delete_column('search_project', 'icon')
def backwards(self, orm):
# Adding field 'Project.icon'
db.add_column('search_project', 'icon', orm['search.project:icon'])
models = {
'search.bug': {
'bize_size_tag_name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'canonical_bug_link': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'date_reported': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.TextField', [], {}),
'good_for_newcomers': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'importance': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'last_polled': ('django.db.models.fields.DateTimeField', [], {}),
'last_touched': ('django.db.models.fields.DateTimeField', [], {}),
'looks_closed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'people_involved': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['search.Project']"}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'submitter_realname': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'submitter_username': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'search.project': {
'date_icon_was_fetched_from_ohloh': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'icon_for_profile': ('django.db.models.fields.files.ImageField', [], {'default': 'None', 'max_length': '100', 'null': 'True'}),
'icon_for_search_result': ('django.db.models.fields.files.ImageField', [], {'default': 'None', 'max_length': '100', 'null': 'True'}),
'icon_raw': ('django.db.models.fields.files.ImageField', [], {'default': 'None', 'max_length': '100', 'null': 'True'}),
'icon_smaller_for_badge': ('django.db.models.fields.files.ImageField', [], {'default': 'None', 'max_length': '100', 'null': 'True'}),
'icon_url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'})
}
}
complete_apps = ['search']
| agpl-3.0 |
Jgarcia-IAS/ReporsitorioVacioOdoo | openerp/addons/base_setup/res_config.py | 261 | 5089 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (C) 2004-2012 OpenERP S.A. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
import re
from openerp.report.render.rml2pdf import customfonts
class base_config_settings(osv.osv_memory):
_name = 'base.config.settings'
_inherit = 'res.config.settings'
_columns = {
'module_multi_company': fields.boolean('Manage multiple companies',
help='Work in multi-company environments, with appropriate security access between companies.\n'
'-This installs the module multi_company.'),
'module_share': fields.boolean('Allow documents sharing',
help="""Share or embbed any screen of Odoo."""),
'module_portal': fields.boolean('Activate the customer portal',
help="""Give your customers access to their documents."""),
'module_auth_oauth': fields.boolean('Use external authentication providers, sign in with google, facebook, ...'),
'module_base_import': fields.boolean("Allow users to import data from CSV files"),
'module_google_drive': fields.boolean('Attach Google documents to any record',
help="""This installs the module google_docs."""),
'module_google_calendar': fields.boolean('Allow the users to synchronize their calendar with Google Calendar',
help="""This installs the module google_calendar."""),
'font': fields.many2one('res.font', string="Report Font", domain=[('mode', 'in', ('Normal', 'Regular', 'all', 'Book'))],
help="Set the font into the report header, it will be used as default font in the RML reports of the user company"),
}
_defaults= {
'font': lambda self,cr,uid,c: self.pool.get('res.users').browse(cr, uid, uid, c).company_id.font.id,
}
def open_company(self, cr, uid, ids, context=None):
user = self.pool.get('res.users').browse(cr, uid, uid, context)
return {
'type': 'ir.actions.act_window',
'name': 'Your Company',
'view_type': 'form',
'view_mode': 'form',
'res_model': 'res.company',
'res_id': user.company_id.id,
'target': 'current',
}
def _change_header(self, header,font):
""" Replace default fontname use in header and setfont tag """
default_para = re.sub('fontName.?=.?".*"', 'fontName="%s"'% font,header)
return re.sub('(<setFont.?name.?=.?)(".*?")(.)', '\g<1>"%s"\g<3>'% font,default_para)
def set_base_defaults(self, cr, uid, ids, context=None):
ir_model_data = self.pool.get('ir.model.data')
wizard = self.browse(cr, uid, ids, context)[0]
if wizard.font:
user = self.pool.get('res.users').browse(cr, uid, uid, context)
font_name = wizard.font.name
user.company_id.write({'font': wizard.font.id,'rml_header': self._change_header(user.company_id.rml_header,font_name), 'rml_header2': self._change_header(user.company_id.rml_header2, font_name), 'rml_header3': self._change_header(user.company_id.rml_header3, font_name)})
return {}
def act_discover_fonts(self, cr, uid, ids, context=None):
return self.pool.get("res.font").font_scan(cr, uid, context=context)
# Preferences wizard for Sales & CRM.
# It is defined here because it is inherited independently in modules sale, crm.
class sale_config_settings(osv.osv_memory):
_name = 'sale.config.settings'
_inherit = 'res.config.settings'
_columns = {
'module_web_linkedin': fields.boolean('Get contacts automatically from linkedIn',
help="""When you create a new contact (person or company), you will be able to load all the data from LinkedIn (photos, address, etc)."""),
'module_crm': fields.boolean('CRM'),
'module_sale' : fields.boolean('SALE'),
'module_mass_mailing': fields.boolean(
'Manage mass mailing campaigns',
help='Get access to statistics with your mass mailing, manage campaigns.'),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: | agpl-3.0 |
GaussDing/django | tests/template_tests/filter_tests/test_autoescape.py | 513 | 1342 | from django.test import SimpleTestCase
from ..utils import SafeClass, UnsafeClass, setup
class AutoescapeStringfilterTests(SimpleTestCase):
"""
Filters decorated with stringfilter still respect is_safe.
"""
@setup({'autoescape-stringfilter01': '{{ unsafe|capfirst }}'})
def test_autoescape_stringfilter01(self):
output = self.engine.render_to_string('autoescape-stringfilter01', {'unsafe': UnsafeClass()})
self.assertEqual(output, 'You & me')
@setup({'autoescape-stringfilter02': '{% autoescape off %}{{ unsafe|capfirst }}{% endautoescape %}'})
def test_autoescape_stringfilter02(self):
output = self.engine.render_to_string('autoescape-stringfilter02', {'unsafe': UnsafeClass()})
self.assertEqual(output, 'You & me')
@setup({'autoescape-stringfilter03': '{{ safe|capfirst }}'})
def test_autoescape_stringfilter03(self):
output = self.engine.render_to_string('autoescape-stringfilter03', {'safe': SafeClass()})
self.assertEqual(output, 'You > me')
@setup({'autoescape-stringfilter04': '{% autoescape off %}{{ safe|capfirst }}{% endautoescape %}'})
def test_autoescape_stringfilter04(self):
output = self.engine.render_to_string('autoescape-stringfilter04', {'safe': SafeClass()})
self.assertEqual(output, 'You > me')
| bsd-3-clause |
vipul-sharma20/oh-mainline | vendor/packages/kombu/kombu/transport/redis.py | 15 | 33278 | """
kombu.transport.redis
=====================
Redis transport.
"""
from __future__ import absolute_import
import numbers
import socket
from bisect import bisect
from collections import namedtuple
from contextlib import contextmanager
from time import time
from amqp import promise
from anyjson import loads, dumps
from kombu.exceptions import InconsistencyError, VersionMismatch
from kombu.five import Empty, values, string_t
from kombu.log import get_logger
from kombu.utils import cached_property, uuid
from kombu.utils.eventio import poll, READ, ERR
from kombu.utils.encoding import bytes_to_str
from kombu.utils.url import _parse_url
NO_ROUTE_ERROR = """
Cannot route message for exchange {0!r}: Table empty or key no longer exists.
Probably the key ({1!r}) has been removed from the Redis database.
"""
try:
from billiard.util import register_after_fork
except ImportError: # pragma: no cover
try:
from multiprocessing.util import register_after_fork # noqa
except ImportError:
def register_after_fork(*args, **kwargs): # noqa
pass
try:
import redis
except ImportError: # pragma: no cover
redis = None # noqa
from . import virtual
logger = get_logger('kombu.transport.redis')
crit, warn = logger.critical, logger.warn
DEFAULT_PORT = 6379
DEFAULT_DB = 0
PRIORITY_STEPS = [0, 3, 6, 9]
error_classes_t = namedtuple('error_classes_t', (
'connection_errors', 'channel_errors',
))
# This implementation may seem overly complex, but I assure you there is
# a good reason for doing it this way.
#
# Consuming from several connections enables us to emulate channels,
# which means we can have different service guarantees for individual
# channels.
#
# So we need to consume messages from multiple connections simultaneously,
# and using epoll means we don't have to do so using multiple threads.
#
# Also it means we can easily use PUBLISH/SUBSCRIBE to do fanout
# exchanges (broadcast), as an alternative to pushing messages to fanout-bound
# queues manually.
def get_redis_error_classes():
from redis import exceptions
# This exception suddenly changed name between redis-py versions
if hasattr(exceptions, 'InvalidData'):
DataError = exceptions.InvalidData
else:
DataError = exceptions.DataError
return error_classes_t(
(virtual.Transport.connection_errors + (
InconsistencyError,
socket.error,
IOError,
OSError,
exceptions.ConnectionError,
exceptions.AuthenticationError)),
(virtual.Transport.channel_errors + (
DataError,
exceptions.InvalidResponse,
exceptions.ResponseError)),
)
class MutexHeld(Exception):
pass
@contextmanager
def Mutex(client, name, expire):
lock_id = uuid()
i_won = client.setnx(name, lock_id)
try:
if i_won:
client.expire(name, expire)
yield
else:
if not client.ttl(name):
client.expire(name, expire)
raise MutexHeld()
finally:
if i_won:
pipe = client.pipeline(True)
try:
pipe.watch(name)
if pipe.get(name) == lock_id:
pipe.multi()
pipe.delete(name)
pipe.execute()
pipe.unwatch()
except redis.WatchError:
pass
class QoS(virtual.QoS):
restore_at_shutdown = True
def __init__(self, *args, **kwargs):
super(QoS, self).__init__(*args, **kwargs)
self._vrestore_count = 0
def append(self, message, delivery_tag):
delivery = message.delivery_info
EX, RK = delivery['exchange'], delivery['routing_key']
with self.pipe_or_acquire() as pipe:
pipe.zadd(self.unacked_index_key, delivery_tag, time()) \
.hset(self.unacked_key, delivery_tag,
dumps([message._raw, EX, RK])) \
.execute()
super(QoS, self).append(message, delivery_tag)
def restore_unacked(self):
for tag in self._delivered:
self.restore_by_tag(tag)
self._delivered.clear()
def ack(self, delivery_tag):
self._remove_from_indices(delivery_tag).execute()
super(QoS, self).ack(delivery_tag)
def reject(self, delivery_tag, requeue=False):
if requeue:
self.restore_by_tag(delivery_tag, leftmost=True)
self.ack(delivery_tag)
@contextmanager
def pipe_or_acquire(self, pipe=None):
if pipe:
yield pipe
else:
with self.channel.conn_or_acquire() as client:
yield client.pipeline()
def _remove_from_indices(self, delivery_tag, pipe=None):
with self.pipe_or_acquire(pipe) as pipe:
return pipe.zrem(self.unacked_index_key, delivery_tag) \
.hdel(self.unacked_key, delivery_tag)
def restore_visible(self, start=0, num=10, interval=10):
self._vrestore_count += 1
if (self._vrestore_count - 1) % interval:
return
with self.channel.conn_or_acquire() as client:
ceil = time() - self.visibility_timeout
try:
with Mutex(client, self.unacked_mutex_key,
self.unacked_mutex_expire):
visible = client.zrevrangebyscore(
self.unacked_index_key, ceil, 0,
start=num and start, num=num, withscores=True)
for tag, score in visible or []:
self.restore_by_tag(tag, client)
except MutexHeld:
pass
def restore_by_tag(self, tag, client=None, leftmost=False):
with self.channel.conn_or_acquire(client) as client:
p, _, _ = self._remove_from_indices(
tag, client.pipeline().hget(self.unacked_key, tag)).execute()
if p:
M, EX, RK = loads(bytes_to_str(p)) # json is unicode
self.channel._do_restore_message(M, EX, RK, client, leftmost)
@cached_property
def unacked_key(self):
return self.channel.unacked_key
@cached_property
def unacked_index_key(self):
return self.channel.unacked_index_key
@cached_property
def unacked_mutex_key(self):
return self.channel.unacked_mutex_key
@cached_property
def unacked_mutex_expire(self):
return self.channel.unacked_mutex_expire
@cached_property
def visibility_timeout(self):
return self.channel.visibility_timeout
class MultiChannelPoller(object):
eventflags = READ | ERR
#: Set by :meth:`get` while reading from the socket.
_in_protected_read = False
#: Set of one-shot callbacks to call after reading from socket.
after_read = None
def __init__(self):
# active channels
self._channels = set()
# file descriptor -> channel map.
self._fd_to_chan = {}
# channel -> socket map
self._chan_to_sock = {}
# poll implementation (epoll/kqueue/select)
self.poller = poll()
# one-shot callbacks called after reading from socket.
self.after_read = set()
def close(self):
for fd in values(self._chan_to_sock):
try:
self.poller.unregister(fd)
except (KeyError, ValueError):
pass
self._channels.clear()
self._fd_to_chan.clear()
self._chan_to_sock.clear()
def add(self, channel):
self._channels.add(channel)
def discard(self, channel):
self._channels.discard(channel)
def _on_connection_disconnect(self, connection):
sock = getattr(connection, '_sock', None)
if sock is not None:
self.poller.unregister(sock)
def _register(self, channel, client, type):
if (channel, client, type) in self._chan_to_sock:
self._unregister(channel, client, type)
if client.connection._sock is None: # not connected yet.
client.connection.connect()
sock = client.connection._sock
self._fd_to_chan[sock.fileno()] = (channel, type)
self._chan_to_sock[(channel, client, type)] = sock
self.poller.register(sock, self.eventflags)
def _unregister(self, channel, client, type):
self.poller.unregister(self._chan_to_sock[(channel, client, type)])
def _register_BRPOP(self, channel):
"""enable BRPOP mode for channel."""
ident = channel, channel.client, 'BRPOP'
if channel.client.connection._sock is None or \
ident not in self._chan_to_sock:
channel._in_poll = False
self._register(*ident)
if not channel._in_poll: # send BRPOP
channel._brpop_start()
def _register_LISTEN(self, channel):
"""enable LISTEN mode for channel."""
if channel.subclient.connection._sock is None:
channel._in_listen = False
self._register(channel, channel.subclient, 'LISTEN')
if not channel._in_listen:
channel._subscribe() # send SUBSCRIBE
def on_poll_start(self):
for channel in self._channels:
if channel.active_queues: # BRPOP mode?
if channel.qos.can_consume():
self._register_BRPOP(channel)
if channel.active_fanout_queues: # LISTEN mode?
self._register_LISTEN(channel)
def on_poll_init(self, poller):
self.poller = poller
for channel in self._channels:
return channel.qos.restore_visible(
num=channel.unacked_restore_limit,
)
def maybe_restore_messages(self):
for channel in self._channels:
if channel.active_queues:
# only need to do this once, as they are not local to channel.
return channel.qos.restore_visible(
num=channel.unacked_restore_limit,
)
def on_readable(self, fileno):
chan, type = self._fd_to_chan[fileno]
if chan.qos.can_consume():
return chan.handlers[type]()
def handle_event(self, fileno, event):
if event & READ:
return self.on_readable(fileno), self
elif event & ERR:
chan, type = self._fd_to_chan[fileno]
chan._poll_error(type)
def get(self, timeout=None):
self._in_protected_read = True
try:
for channel in self._channels:
if channel.active_queues: # BRPOP mode?
if channel.qos.can_consume():
self._register_BRPOP(channel)
if channel.active_fanout_queues: # LISTEN mode?
self._register_LISTEN(channel)
events = self.poller.poll(timeout)
for fileno, event in events or []:
ret = self.handle_event(fileno, event)
if ret:
return ret
# - no new data, so try to restore messages.
# - reset active redis commands.
self.maybe_restore_messages()
raise Empty()
finally:
self._in_protected_read = False
while self.after_read:
try:
fun = self.after_read.pop()
except KeyError:
break
else:
fun()
@property
def fds(self):
return self._fd_to_chan
class Channel(virtual.Channel):
QoS = QoS
_client = None
_subclient = None
supports_fanout = True
keyprefix_queue = '_kombu.binding.%s'
keyprefix_fanout = '/{db}.'
sep = '\x06\x16'
_in_poll = False
_in_listen = False
_fanout_queues = {}
ack_emulation = True
unacked_key = 'unacked'
unacked_index_key = 'unacked_index'
unacked_mutex_key = 'unacked_mutex'
unacked_mutex_expire = 300 # 5 minutes
unacked_restore_limit = None
visibility_timeout = 3600 # 1 hour
priority_steps = PRIORITY_STEPS
socket_timeout = None
max_connections = 10
#: Transport option to enable disable fanout keyprefix.
#: Should be enabled by default, but that is not
#: backwards compatible. Can also be string, in which
#: case it changes the default prefix ('/{db}.') into to something
#: else. The prefix must include a leading slash and a trailing dot.
fanout_prefix = False
#: If enabled the fanout exchange will support patterns in routing
#: and binding keys (like a topic exchange but using PUB/SUB).
#: This will be enabled by default in a future version.
fanout_patterns = False
_pool = None
from_transport_options = (
virtual.Channel.from_transport_options +
('ack_emulation',
'unacked_key',
'unacked_index_key',
'unacked_mutex_key',
'unacked_mutex_expire',
'visibility_timeout',
'unacked_restore_limit',
'fanout_prefix',
'fanout_patterns',
'socket_timeout',
'max_connections',
'priority_steps') # <-- do not add comma here!
)
def __init__(self, *args, **kwargs):
super_ = super(Channel, self)
super_.__init__(*args, **kwargs)
if not self.ack_emulation: # disable visibility timeout
self.QoS = virtual.QoS
self._queue_cycle = []
self.Client = self._get_client()
self.ResponseError = self._get_response_error()
self.active_fanout_queues = set()
self.auto_delete_queues = set()
self._fanout_to_queue = {}
self.handlers = {'BRPOP': self._brpop_read, 'LISTEN': self._receive}
if self.fanout_prefix:
if isinstance(self.fanout_prefix, string_t):
self.keyprefix_fanout = self.fanout_prefix
else:
# previous versions did not set a fanout, so cannot enable
# by default.
self.keyprefix_fanout = ''
# Evaluate connection.
try:
self.client.info()
except Exception:
if self._pool:
self._pool.disconnect()
raise
self.connection.cycle.add(self) # add to channel poller.
# copy errors, in case channel closed but threads still
# are still waiting for data.
self.connection_errors = self.connection.connection_errors
register_after_fork(self, self._after_fork)
def _after_fork(self):
if self._pool is not None:
self._pool.disconnect()
def _on_connection_disconnect(self, connection):
if self.connection and self.connection.cycle:
self.connection.cycle._on_connection_disconnect(connection)
def _do_restore_message(self, payload, exchange, routing_key,
client=None, leftmost=False):
with self.conn_or_acquire(client) as client:
try:
try:
payload['headers']['redelivered'] = True
except KeyError:
pass
for queue in self._lookup(exchange, routing_key):
(client.lpush if leftmost else client.rpush)(
queue, dumps(payload),
)
except Exception:
crit('Could not restore message: %r', payload, exc_info=True)
def _restore(self, message, leftmost=False):
if not self.ack_emulation:
return super(Channel, self)._restore(message)
tag = message.delivery_tag
with self.conn_or_acquire() as client:
P, _ = client.pipeline() \
.hget(self.unacked_key, tag) \
.hdel(self.unacked_key, tag) \
.execute()
if P:
M, EX, RK = loads(bytes_to_str(P)) # json is unicode
self._do_restore_message(M, EX, RK, client, leftmost)
def _restore_at_beginning(self, message):
return self._restore(message, leftmost=True)
def basic_consume(self, queue, *args, **kwargs):
if queue in self._fanout_queues:
exchange, _ = self._fanout_queues[queue]
self.active_fanout_queues.add(queue)
self._fanout_to_queue[exchange] = queue
ret = super(Channel, self).basic_consume(queue, *args, **kwargs)
self._update_cycle()
return ret
def basic_cancel(self, consumer_tag):
# If we are busy reading messages we may experience
# a race condition where a message is consumed after
# cancelling, so we must delay this operation until reading
# is complete (Issue celery/celery#1773).
connection = self.connection
if connection:
if connection.cycle._in_protected_read:
return connection.cycle.after_read.add(
promise(self._basic_cancel, (consumer_tag, )),
)
return self._basic_cancel(consumer_tag)
def _basic_cancel(self, consumer_tag):
try:
queue = self._tag_to_queue[consumer_tag]
except KeyError:
return
try:
self.active_fanout_queues.remove(queue)
except KeyError:
pass
else:
self._unsubscribe_from(queue)
try:
exchange, _ = self._fanout_queues[queue]
self._fanout_to_queue.pop(exchange)
except KeyError:
pass
ret = super(Channel, self).basic_cancel(consumer_tag)
self._update_cycle()
return ret
def _get_publish_topic(self, exchange, routing_key):
if routing_key and self.fanout_patterns:
return ''.join([self.keyprefix_fanout, exchange, '/', routing_key])
return ''.join([self.keyprefix_fanout, exchange])
def _get_subscribe_topic(self, queue):
exchange, routing_key = self._fanout_queues[queue]
return self._get_publish_topic(exchange, routing_key)
def _subscribe(self):
keys = [self._get_subscribe_topic(queue)
for queue in self.active_fanout_queues]
if not keys:
return
c = self.subclient
if c.connection._sock is None:
c.connection.connect()
self._in_listen = True
c.psubscribe(keys)
def _unsubscribe_from(self, queue):
topic = self._get_subscribe_topic(queue)
c = self.subclient
should_disconnect = False
if c.connection._sock is None:
c.connection.connect()
should_disconnect = True
try:
c.unsubscribe([topic])
finally:
if should_disconnect and c.connection:
c.connection.disconnect()
def _handle_message(self, client, r):
if bytes_to_str(r[0]) == 'unsubscribe' and r[2] == 0:
client.subscribed = False
elif bytes_to_str(r[0]) == 'pmessage':
return {'type': r[0], 'pattern': r[1],
'channel': r[2], 'data': r[3]}
else:
return {'type': r[0], 'pattern': None,
'channel': r[1], 'data': r[2]}
def _receive(self):
c = self.subclient
response = None
try:
response = c.parse_response()
except self.connection_errors:
self._in_listen = False
raise Empty()
if response is not None:
payload = self._handle_message(c, response)
if bytes_to_str(payload['type']).endswith('message'):
channel = bytes_to_str(payload['channel'])
if payload['data']:
if channel[0] == '/':
_, _, channel = channel.partition('.')
try:
message = loads(bytes_to_str(payload['data']))
except (TypeError, ValueError):
warn('Cannot process event on channel %r: %s',
channel, repr(payload)[:4096], exc_info=1)
raise Empty()
exchange = channel.split('/', 1)[0]
return message, self._fanout_to_queue[exchange]
raise Empty()
def _brpop_start(self, timeout=1):
queues = self._consume_cycle()
if not queues:
return
keys = [self._q_for_pri(queue, pri) for pri in PRIORITY_STEPS
for queue in queues] + [timeout or 0]
self._in_poll = True
self.client.connection.send_command('BRPOP', *keys)
def _brpop_read(self, **options):
try:
try:
dest__item = self.client.parse_response(self.client.connection,
'BRPOP',
**options)
except self.connection_errors:
# if there's a ConnectionError, disconnect so the next
# iteration will reconnect automatically.
self.client.connection.disconnect()
raise Empty()
if dest__item:
dest, item = dest__item
dest = bytes_to_str(dest).rsplit(self.sep, 1)[0]
self._rotate_cycle(dest)
return loads(bytes_to_str(item)), dest
else:
raise Empty()
finally:
self._in_poll = False
def _poll_error(self, type, **options):
if type == 'LISTEN':
self.subclient.parse_response()
else:
self.client.parse_response(self.client.connection, type)
def _get(self, queue):
with self.conn_or_acquire() as client:
for pri in PRIORITY_STEPS:
item = client.rpop(self._q_for_pri(queue, pri))
if item:
return loads(bytes_to_str(item))
raise Empty()
def _size(self, queue):
with self.conn_or_acquire() as client:
cmds = client.pipeline()
for pri in PRIORITY_STEPS:
cmds = cmds.llen(self._q_for_pri(queue, pri))
sizes = cmds.execute()
return sum(size for size in sizes
if isinstance(size, numbers.Integral))
def _q_for_pri(self, queue, pri):
pri = self.priority(pri)
return '%s%s%s' % ((queue, self.sep, pri) if pri else (queue, '', ''))
def priority(self, n):
steps = self.priority_steps
return steps[bisect(steps, n) - 1]
def _put(self, queue, message, **kwargs):
"""Deliver message."""
try:
pri = max(min(int(
message['properties']['delivery_info']['priority']), 9), 0)
except (TypeError, ValueError, KeyError):
pri = 0
with self.conn_or_acquire() as client:
client.lpush(self._q_for_pri(queue, pri), dumps(message))
def _put_fanout(self, exchange, message, routing_key, **kwargs):
"""Deliver fanout message."""
with self.conn_or_acquire() as client:
client.publish(
self._get_publish_topic(exchange, routing_key),
dumps(message),
)
def _new_queue(self, queue, auto_delete=False, **kwargs):
if auto_delete:
self.auto_delete_queues.add(queue)
def _queue_bind(self, exchange, routing_key, pattern, queue):
if self.typeof(exchange).type == 'fanout':
# Mark exchange as fanout.
self._fanout_queues[queue] = (
exchange, routing_key.replace('#', '*'),
)
with self.conn_or_acquire() as client:
client.sadd(self.keyprefix_queue % (exchange, ),
self.sep.join([routing_key or '',
pattern or '',
queue or '']))
def _delete(self, queue, exchange, routing_key, pattern, *args):
self.auto_delete_queues.discard(queue)
with self.conn_or_acquire() as client:
client.srem(self.keyprefix_queue % (exchange, ),
self.sep.join([routing_key or '',
pattern or '',
queue or '']))
cmds = client.pipeline()
for pri in PRIORITY_STEPS:
cmds = cmds.delete(self._q_for_pri(queue, pri))
cmds.execute()
def _has_queue(self, queue, **kwargs):
with self.conn_or_acquire() as client:
cmds = client.pipeline()
for pri in PRIORITY_STEPS:
cmds = cmds.exists(self._q_for_pri(queue, pri))
return any(cmds.execute())
def get_table(self, exchange):
key = self.keyprefix_queue % exchange
with self.conn_or_acquire() as client:
values = client.smembers(key)
if not values:
raise InconsistencyError(NO_ROUTE_ERROR.format(exchange, key))
return [tuple(bytes_to_str(val).split(self.sep)) for val in values]
def _purge(self, queue):
with self.conn_or_acquire() as client:
cmds = client.pipeline()
for pri in PRIORITY_STEPS:
priq = self._q_for_pri(queue, pri)
cmds = cmds.llen(priq).delete(priq)
sizes = cmds.execute()
return sum(sizes[::2])
def close(self):
if self._pool:
self._pool.disconnect()
if not self.closed:
# remove from channel poller.
self.connection.cycle.discard(self)
# delete fanout bindings
for queue in self._fanout_queues:
if queue in self.auto_delete_queues:
self.queue_delete(queue)
self._close_clients()
super(Channel, self).close()
def _close_clients(self):
# Close connections
for attr in 'client', 'subclient':
try:
self.__dict__[attr].connection.disconnect()
except (KeyError, AttributeError, self.ResponseError):
pass
def _prepare_virtual_host(self, vhost):
if not isinstance(vhost, numbers.Integral):
if not vhost or vhost == '/':
vhost = DEFAULT_DB
elif vhost.startswith('/'):
vhost = vhost[1:]
try:
vhost = int(vhost)
except ValueError:
raise ValueError(
'Database is int between 0 and limit - 1, not {0}'.format(
vhost,
))
return vhost
def _connparams(self):
conninfo = self.connection.client
connparams = {'host': conninfo.hostname or '127.0.0.1',
'port': conninfo.port or DEFAULT_PORT,
'virtual_host': conninfo.virtual_host,
'password': conninfo.password,
'max_connections': self.max_connections,
'socket_timeout': self.socket_timeout}
host = connparams['host']
if '://' in host:
scheme, _, _, _, _, path, query = _parse_url(host)
if scheme == 'socket':
connparams.update({
'connection_class': redis.UnixDomainSocketConnection,
'path': '/' + path}, **query)
connparams.pop('host', None)
connparams.pop('port', None)
connparams['db'] = self._prepare_virtual_host(
connparams.pop('virtual_host', None))
channel = self
connection_cls = (
connparams.get('connection_class') or
redis.Connection
)
class Connection(connection_cls):
def disconnect(self):
channel._on_connection_disconnect(self)
super(Connection, self).disconnect()
connparams['connection_class'] = Connection
return connparams
def _create_client(self):
return self.Client(connection_pool=self.pool)
def _get_pool(self):
params = self._connparams()
self.keyprefix_fanout = self.keyprefix_fanout.format(db=params['db'])
return redis.ConnectionPool(**params)
def _get_client(self):
if redis.VERSION < (2, 4, 4):
raise VersionMismatch(
'Redis transport requires redis-py versions 2.4.4 or later. '
'You have {0.__version__}'.format(redis))
# KombuRedis maintains a connection attribute on it's instance and
# uses that when executing commands
# This was added after redis-py was changed.
class KombuRedis(redis.Redis): # pragma: no cover
def __init__(self, *args, **kwargs):
super(KombuRedis, self).__init__(*args, **kwargs)
self.connection = self.connection_pool.get_connection('_')
return KombuRedis
@contextmanager
def conn_or_acquire(self, client=None):
if client:
yield client
else:
if self._in_poll:
client = self._create_client()
try:
yield client
finally:
self.pool.release(client.connection)
else:
yield self.client
@property
def pool(self):
if self._pool is None:
self._pool = self._get_pool()
return self._pool
@cached_property
def client(self):
"""Client used to publish messages, BRPOP etc."""
return self._create_client()
@cached_property
def subclient(self):
"""Pub/Sub connection used to consume fanout queues."""
client = self._create_client()
pubsub = client.pubsub()
pool = pubsub.connection_pool
pubsub.connection = pool.get_connection('pubsub', pubsub.shard_hint)
return pubsub
def _update_cycle(self):
"""Update fair cycle between queues.
We cycle between queues fairly to make sure that
each queue is equally likely to be consumed from,
so that a very busy queue will not block others.
This works by using Redis's `BRPOP` command and
by rotating the most recently used queue to the
and of the list. See Kombu github issue #166 for
more discussion of this method.
"""
self._queue_cycle = list(self.active_queues)
def _consume_cycle(self):
"""Get a fresh list of queues from the queue cycle."""
active = len(self.active_queues)
return self._queue_cycle[0:active]
def _rotate_cycle(self, used):
"""Move most recently used queue to end of list."""
cycle = self._queue_cycle
try:
cycle.append(cycle.pop(cycle.index(used)))
except ValueError:
pass
def _get_response_error(self):
from redis import exceptions
return exceptions.ResponseError
@property
def active_queues(self):
"""Set of queues being consumed from (excluding fanout queues)."""
return set(queue for queue in self._active_queues
if queue not in self.active_fanout_queues)
class Transport(virtual.Transport):
Channel = Channel
polling_interval = None # disable sleep between unsuccessful polls.
default_port = DEFAULT_PORT
supports_ev = True
driver_type = 'redis'
driver_name = 'redis'
def __init__(self, *args, **kwargs):
if redis is None:
raise ImportError('Missing redis library (pip install redis)')
super(Transport, self).__init__(*args, **kwargs)
# Get redis-py exceptions.
self.connection_errors, self.channel_errors = self._get_errors()
# All channels share the same poller.
self.cycle = MultiChannelPoller()
def driver_version(self):
return redis.__version__
def register_with_event_loop(self, connection, loop):
cycle = self.cycle
cycle.on_poll_init(loop.poller)
cycle_poll_start = cycle.on_poll_start
add_reader = loop.add_reader
on_readable = self.on_readable
def _on_disconnect(connection):
if connection._sock:
loop.remove(connection._sock)
cycle._on_connection_disconnect = _on_disconnect
def on_poll_start():
cycle_poll_start()
[add_reader(fd, on_readable, fd) for fd in cycle.fds]
loop.on_tick.add(on_poll_start)
loop.call_repeatedly(10, cycle.maybe_restore_messages)
def on_readable(self, fileno):
"""Handle AIO event for one of our file descriptors."""
item = self.cycle.on_readable(fileno)
if item:
message, queue = item
if not queue or queue not in self._callbacks:
raise KeyError(
'Message for queue {0!r} without consumers: {1}'.format(
queue, message))
self._callbacks[queue](message)
def _get_errors(self):
"""Utility to import redis-py's exceptions at runtime."""
return get_redis_error_classes()
| agpl-3.0 |
ABaldwinHunter/django-clone | tests/forms_tests/tests/test_media.py | 76 | 23851 | # -*- coding: utf-8 -*-
from django.forms import CharField, Form, Media, MultiWidget, TextInput
from django.template import Context, Template
from django.test import SimpleTestCase, override_settings
from django.utils.encoding import force_text
@override_settings(
STATIC_URL='http://media.example.com/static/',
)
class FormsMediaTestCase(SimpleTestCase):
"""Tests for the media handling on widgets and forms"""
def test_construction(self):
# Check construction of media objects
m = Media(
css={'all': ('path/to/css1', '/path/to/css2')},
js=('/path/to/js1', 'http://media.other.com/path/to/js2', 'https://secure.other.com/path/to/js3'),
)
self.assertEqual(
str(m),
"""<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>"""
)
class Foo:
css = {
'all': ('path/to/css1', '/path/to/css2')
}
js = ('/path/to/js1', 'http://media.other.com/path/to/js2', 'https://secure.other.com/path/to/js3')
m3 = Media(Foo)
self.assertEqual(
str(m3),
"""<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>"""
)
# A widget can exist without a media definition
class MyWidget(TextInput):
pass
w = MyWidget()
self.assertEqual(str(w.media), '')
def test_media_dsl(self):
###############################################################
# DSL Class-based media definitions
###############################################################
# A widget can define media if it needs to.
# Any absolute path will be preserved; relative paths are combined
# with the value of settings.MEDIA_URL
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1', '/path/to/css2')
}
js = ('/path/to/js1', 'http://media.other.com/path/to/js2', 'https://secure.other.com/path/to/js3')
w1 = MyWidget1()
self.assertEqual(
str(w1.media),
"""<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>"""
)
# Media objects can be interrogated by media type
self.assertEqual(
str(w1.media['css']),
"""<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />"""
)
self.assertEqual(
str(w1.media['js']),
"""<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>"""
)
def test_combine_media(self):
# Media objects can be combined. Any given media resource will appear only
# once. Duplicated media definitions are ignored.
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1', '/path/to/css2')
}
js = ('/path/to/js1', 'http://media.other.com/path/to/js2', 'https://secure.other.com/path/to/js3')
class MyWidget2(TextInput):
class Media:
css = {
'all': ('/path/to/css2', '/path/to/css3')
}
js = ('/path/to/js1', '/path/to/js4')
class MyWidget3(TextInput):
class Media:
css = {
'all': ('/path/to/css3', 'path/to/css1')
}
js = ('/path/to/js1', '/path/to/js4')
w1 = MyWidget1()
w2 = MyWidget2()
w3 = MyWidget3()
self.assertEqual(
str(w1.media + w2.media + w3.media),
"""<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>"""
)
# Check that media addition hasn't affected the original objects
self.assertEqual(
str(w1.media),
"""<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>"""
)
# Regression check for #12879: specifying the same CSS or JS file
# multiple times in a single Media instance should result in that file
# only being included once.
class MyWidget4(TextInput):
class Media:
css = {'all': ('/path/to/css1', '/path/to/css1')}
js = ('/path/to/js1', '/path/to/js1')
w4 = MyWidget4()
self.assertEqual(str(w4.media), """<link href="/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>""")
def test_media_property(self):
###############################################################
# Property-based media definitions
###############################################################
# Widget media can be defined as a property
class MyWidget4(TextInput):
def _media(self):
return Media(css={'all': ('/some/path',)}, js=('/some/js',))
media = property(_media)
w4 = MyWidget4()
self.assertEqual(str(w4.media), """<link href="/some/path" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/some/js"></script>""")
# Media properties can reference the media of their parents
class MyWidget5(MyWidget4):
def _media(self):
return super(MyWidget5, self).media + Media(css={'all': ('/other/path',)}, js=('/other/js',))
media = property(_media)
w5 = MyWidget5()
self.assertEqual(str(w5.media), """<link href="/some/path" type="text/css" media="all" rel="stylesheet" />
<link href="/other/path" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/some/js"></script>
<script type="text/javascript" src="/other/js"></script>""")
def test_media_property_parent_references(self):
# Media properties can reference the media of their parents,
# even if the parent media was defined using a class
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1', '/path/to/css2')
}
js = ('/path/to/js1', 'http://media.other.com/path/to/js2', 'https://secure.other.com/path/to/js3')
class MyWidget6(MyWidget1):
def _media(self):
return super(MyWidget6, self).media + Media(css={'all': ('/other/path',)}, js=('/other/js',))
media = property(_media)
w6 = MyWidget6()
self.assertEqual(
str(w6.media),
"""<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/other/path" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/other/js"></script>"""
)
def test_media_inheritance(self):
###############################################################
# Inheritance of media
###############################################################
# If a widget extends another but provides no media definition, it inherits the parent widget's media
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1', '/path/to/css2')
}
js = ('/path/to/js1', 'http://media.other.com/path/to/js2', 'https://secure.other.com/path/to/js3')
class MyWidget7(MyWidget1):
pass
w7 = MyWidget7()
self.assertEqual(
str(w7.media),
"""<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>"""
)
# If a widget extends another but defines media, it extends the parent widget's media by default
class MyWidget8(MyWidget1):
class Media:
css = {
'all': ('/path/to/css3', 'path/to/css1')
}
js = ('/path/to/js1', '/path/to/js4')
w8 = MyWidget8()
self.assertEqual(
str(w8.media),
"""<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>"""
)
def test_media_inheritance_from_property(self):
# If a widget extends another but defines media, it extends the parents widget's media,
# even if the parent defined media using a property.
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1', '/path/to/css2')
}
js = ('/path/to/js1', 'http://media.other.com/path/to/js2', 'https://secure.other.com/path/to/js3')
class MyWidget4(TextInput):
def _media(self):
return Media(css={'all': ('/some/path',)}, js=('/some/js',))
media = property(_media)
class MyWidget9(MyWidget4):
class Media:
css = {
'all': ('/other/path',)
}
js = ('/other/js',)
w9 = MyWidget9()
self.assertEqual(
str(w9.media),
"""<link href="/some/path" type="text/css" media="all" rel="stylesheet" />
<link href="/other/path" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/some/js"></script>
<script type="text/javascript" src="/other/js"></script>"""
)
# A widget can disable media inheritance by specifying 'extend=False'
class MyWidget10(MyWidget1):
class Media:
extend = False
css = {
'all': ('/path/to/css3', 'path/to/css1')
}
js = ('/path/to/js1', '/path/to/js4')
w10 = MyWidget10()
self.assertEqual(str(w10.media), """<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="/path/to/js4"></script>""")
def test_media_inheritance_extends(self):
# A widget can explicitly enable full media inheritance by specifying 'extend=True'
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1', '/path/to/css2')
}
js = ('/path/to/js1', 'http://media.other.com/path/to/js2', 'https://secure.other.com/path/to/js3')
class MyWidget11(MyWidget1):
class Media:
extend = True
css = {
'all': ('/path/to/css3', 'path/to/css1')
}
js = ('/path/to/js1', '/path/to/js4')
w11 = MyWidget11()
self.assertEqual(
str(w11.media),
"""<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>"""
)
def test_media_inheritance_single_type(self):
# A widget can enable inheritance of one media type by specifying extend as a tuple
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1', '/path/to/css2')
}
js = ('/path/to/js1', 'http://media.other.com/path/to/js2', 'https://secure.other.com/path/to/js3')
class MyWidget12(MyWidget1):
class Media:
extend = ('css',)
css = {
'all': ('/path/to/css3', 'path/to/css1')
}
js = ('/path/to/js1', '/path/to/js4')
w12 = MyWidget12()
self.assertEqual(
str(w12.media),
"""<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="/path/to/js4"></script>"""
)
def test_multi_media(self):
###############################################################
# Multi-media handling for CSS
###############################################################
# A widget can define CSS media for multiple output media types
class MultimediaWidget(TextInput):
class Media:
css = {
'screen, print': ('/file1', '/file2'),
'screen': ('/file3',),
'print': ('/file4',)
}
js = ('/path/to/js1', '/path/to/js4')
multimedia = MultimediaWidget()
self.assertEqual(
str(multimedia.media),
"""<link href="/file4" type="text/css" media="print" rel="stylesheet" />
<link href="/file3" type="text/css" media="screen" rel="stylesheet" />
<link href="/file1" type="text/css" media="screen, print" rel="stylesheet" />
<link href="/file2" type="text/css" media="screen, print" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="/path/to/js4"></script>"""
)
def test_multi_widget(self):
###############################################################
# Multiwidget media handling
###############################################################
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1', '/path/to/css2')
}
js = ('/path/to/js1', 'http://media.other.com/path/to/js2', 'https://secure.other.com/path/to/js3')
class MyWidget2(TextInput):
class Media:
css = {
'all': ('/path/to/css2', '/path/to/css3')
}
js = ('/path/to/js1', '/path/to/js4')
class MyWidget3(TextInput):
class Media:
css = {
'all': ('/path/to/css3', 'path/to/css1')
}
js = ('/path/to/js1', '/path/to/js4')
# MultiWidgets have a default media definition that gets all the
# media from the component widgets
class MyMultiWidget(MultiWidget):
def __init__(self, attrs=None):
widgets = [MyWidget1, MyWidget2, MyWidget3]
super(MyMultiWidget, self).__init__(widgets, attrs)
mymulti = MyMultiWidget()
self.assertEqual(
str(mymulti.media),
"""<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>"""
)
def test_form_media(self):
###############################################################
# Media processing for forms
###############################################################
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1', '/path/to/css2')
}
js = ('/path/to/js1', 'http://media.other.com/path/to/js2', 'https://secure.other.com/path/to/js3')
class MyWidget2(TextInput):
class Media:
css = {
'all': ('/path/to/css2', '/path/to/css3')
}
js = ('/path/to/js1', '/path/to/js4')
class MyWidget3(TextInput):
class Media:
css = {
'all': ('/path/to/css3', 'path/to/css1')
}
js = ('/path/to/js1', '/path/to/js4')
# You can ask a form for the media required by its widgets.
class MyForm(Form):
field1 = CharField(max_length=20, widget=MyWidget1())
field2 = CharField(max_length=20, widget=MyWidget2())
f1 = MyForm()
self.assertEqual(
str(f1.media),
"""<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>"""
)
# Form media can be combined to produce a single media definition.
class AnotherForm(Form):
field3 = CharField(max_length=20, widget=MyWidget3())
f2 = AnotherForm()
self.assertEqual(
str(f1.media + f2.media),
"""<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>"""
)
# Forms can also define media, following the same rules as widgets.
class FormWithMedia(Form):
field1 = CharField(max_length=20, widget=MyWidget1())
field2 = CharField(max_length=20, widget=MyWidget2())
class Media:
js = ('/some/form/javascript',)
css = {
'all': ('/some/form/css',)
}
f3 = FormWithMedia()
self.assertEqual(
str(f3.media),
"""<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<link href="/some/form/css" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>
<script type="text/javascript" src="/some/form/javascript"></script>"""
)
# Media works in templates
self.assertEqual(
Template("{{ form.media.js }}{{ form.media.css }}").render(Context({'form': f3})),
"""<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>
<script type="text/javascript" src="/some/form/javascript"></script>"""
"""<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<link href="/some/form/css" type="text/css" media="all" rel="stylesheet" />"""
)
def test_html_safe(self):
media = Media(css={'all': ['/path/to/css']}, js=['/path/to/js'])
self.assertTrue(hasattr(Media, '__html__'))
self.assertEqual(force_text(media), media.__html__())
| bsd-3-clause |
EricMuller/mywebmarks-backend | requirements/twisted/Twisted-17.1.0/src/twisted/trial/_dist/test/test_workertrial.py | 12 | 5153 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.trial._dist.workertrial}.
"""
import errno
import sys
import os
from twisted.protocols.amp import AMP
from twisted.python.compat import _PY3, NativeStringIO as StringIO
from twisted.test.proto_helpers import StringTransport
from twisted.trial.unittest import TestCase
from twisted.trial._dist.workertrial import WorkerLogObserver, main, _setupPath
from twisted.trial._dist import (
workertrial, _WORKER_AMP_STDIN, _WORKER_AMP_STDOUT, workercommands,
managercommands)
class FakeAMP(AMP):
"""
A fake amp protocol.
"""
class WorkerLogObserverTests(TestCase):
"""
Tests for L{WorkerLogObserver}.
"""
def test_emit(self):
"""
L{WorkerLogObserver} forwards data to L{managercommands.TestWrite}.
"""
calls = []
class FakeClient(object):
def callRemote(self, method, **kwargs):
calls.append((method, kwargs))
observer = WorkerLogObserver(FakeClient())
observer.emit({'message': ['Some log']})
self.assertEqual(
calls, [(managercommands.TestWrite, {'out': 'Some log'})])
class MainTests(TestCase):
"""
Tests for L{main}.
"""
def setUp(self):
self.readStream = StringIO()
self.writeStream = StringIO()
self.patch(workertrial, 'startLoggingWithObserver',
self.startLoggingWithObserver)
self.addCleanup(setattr, sys, "argv", sys.argv)
sys.argv = ["trial"]
def fdopen(self, fd, mode=None):
"""
Fake C{os.fdopen} implementation which returns C{self.readStream} for
the stdin fd and C{self.writeStream} for the stdout fd.
"""
if fd == _WORKER_AMP_STDIN:
self.assertIdentical(None, mode)
return self.readStream
elif fd == _WORKER_AMP_STDOUT:
self.assertEqual('w', mode)
return self.writeStream
else:
raise AssertionError("Unexpected fd %r" % (fd,))
def startLoggingWithObserver(self, emit, setStdout):
"""
Override C{startLoggingWithObserver} for not starting logging.
"""
self.assertFalse(setStdout)
def test_empty(self):
"""
If no data is ever written, L{main} exits without writing data out.
"""
main(self.fdopen)
self.assertEqual('', self.writeStream.getvalue())
def test_forwardCommand(self):
"""
L{main} forwards data from its input stream to a L{WorkerProtocol}
instance which writes data to the output stream.
"""
client = FakeAMP()
clientTransport = StringTransport()
client.makeConnection(clientTransport)
client.callRemote(workercommands.Run, testCase=b"doesntexist")
self.readStream = clientTransport.io
self.readStream.seek(0, 0)
main(self.fdopen)
self.assertIn(
"No module named 'doesntexist'", self.writeStream.getvalue())
if _PY3:
test_forwardCommand.skip = "Does not work on Python 3 (https://tm.tl/8944)"
def test_readInterrupted(self):
"""
If reading the input stream fails with a C{IOError} with errno
C{EINTR}, L{main} ignores it and continues reading.
"""
excInfos = []
class FakeStream(object):
count = 0
def read(oself, size):
oself.count += 1
if oself.count == 1:
raise IOError(errno.EINTR)
else:
excInfos.append(sys.exc_info())
return ''
self.readStream = FakeStream()
main(self.fdopen)
self.assertEqual('', self.writeStream.getvalue())
self.assertEqual([(None, None, None)], excInfos)
def test_otherReadError(self):
"""
L{main} only ignores C{IOError} with C{EINTR} errno: otherwise, the
error pops out.
"""
class FakeStream(object):
count = 0
def read(oself, size):
oself.count += 1
if oself.count == 1:
raise IOError("Something else")
return ''
self.readStream = FakeStream()
self.assertRaises(IOError, main, self.fdopen)
class SetupPathTests(TestCase):
"""
Tests for L{_setupPath} C{sys.path} manipulation.
"""
def setUp(self):
self.addCleanup(setattr, sys, "path", sys.path[:])
def test_overridePath(self):
"""
L{_setupPath} overrides C{sys.path} if B{TRIAL_PYTHONPATH} is specified
in the environment.
"""
environ = {"TRIAL_PYTHONPATH": os.pathsep.join(["foo", "bar"])}
_setupPath(environ)
self.assertEqual(["foo", "bar"], sys.path)
def test_noVariable(self):
"""
L{_setupPath} doesn't change C{sys.path} if B{TRIAL_PYTHONPATH} is not
present in the environment.
"""
originalPath = sys.path[:]
_setupPath({})
self.assertEqual(originalPath, sys.path)
| mit |
samtx/whatsmyrankine | venv/lib/python2.7/site-packages/flask/views.py | 782 | 5642 | # -*- coding: utf-8 -*-
"""
flask.views
~~~~~~~~~~~
This module provides class-based views inspired by the ones in Django.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
from .globals import request
from ._compat import with_metaclass
http_method_funcs = frozenset(['get', 'post', 'head', 'options',
'delete', 'put', 'trace', 'patch'])
class View(object):
"""Alternative way to use view functions. A subclass has to implement
:meth:`dispatch_request` which is called with the view arguments from
the URL routing system. If :attr:`methods` is provided the methods
do not have to be passed to the :meth:`~flask.Flask.add_url_rule`
method explicitly::
class MyView(View):
methods = ['GET']
def dispatch_request(self, name):
return 'Hello %s!' % name
app.add_url_rule('/hello/<name>', view_func=MyView.as_view('myview'))
When you want to decorate a pluggable view you will have to either do that
when the view function is created (by wrapping the return value of
:meth:`as_view`) or you can use the :attr:`decorators` attribute::
class SecretView(View):
methods = ['GET']
decorators = [superuser_required]
def dispatch_request(self):
...
The decorators stored in the decorators list are applied one after another
when the view function is created. Note that you can *not* use the class
based decorators since those would decorate the view class and not the
generated view function!
"""
#: A for which methods this pluggable view can handle.
methods = None
#: The canonical way to decorate class-based views is to decorate the
#: return value of as_view(). However since this moves parts of the
#: logic from the class declaration to the place where it's hooked
#: into the routing system.
#:
#: You can place one or more decorators in this list and whenever the
#: view function is created the result is automatically decorated.
#:
#: .. versionadded:: 0.8
decorators = []
def dispatch_request(self):
"""Subclasses have to override this method to implement the
actual view function code. This method is called with all
the arguments from the URL rule.
"""
raise NotImplementedError()
@classmethod
def as_view(cls, name, *class_args, **class_kwargs):
"""Converts the class into an actual view function that can be used
with the routing system. Internally this generates a function on the
fly which will instantiate the :class:`View` on each request and call
the :meth:`dispatch_request` method on it.
The arguments passed to :meth:`as_view` are forwarded to the
constructor of the class.
"""
def view(*args, **kwargs):
self = view.view_class(*class_args, **class_kwargs)
return self.dispatch_request(*args, **kwargs)
if cls.decorators:
view.__name__ = name
view.__module__ = cls.__module__
for decorator in cls.decorators:
view = decorator(view)
# we attach the view class to the view function for two reasons:
# first of all it allows us to easily figure out what class-based
# view this thing came from, secondly it's also used for instantiating
# the view class so you can actually replace it with something else
# for testing purposes and debugging.
view.view_class = cls
view.__name__ = name
view.__doc__ = cls.__doc__
view.__module__ = cls.__module__
view.methods = cls.methods
return view
class MethodViewType(type):
def __new__(cls, name, bases, d):
rv = type.__new__(cls, name, bases, d)
if 'methods' not in d:
methods = set(rv.methods or [])
for key in d:
if key in http_method_funcs:
methods.add(key.upper())
# if we have no method at all in there we don't want to
# add a method list. (This is for instance the case for
# the baseclass or another subclass of a base method view
# that does not introduce new methods).
if methods:
rv.methods = sorted(methods)
return rv
class MethodView(with_metaclass(MethodViewType, View)):
"""Like a regular class-based view but that dispatches requests to
particular methods. For instance if you implement a method called
:meth:`get` it means you will response to ``'GET'`` requests and
the :meth:`dispatch_request` implementation will automatically
forward your request to that. Also :attr:`options` is set for you
automatically::
class CounterAPI(MethodView):
def get(self):
return session.get('counter', 0)
def post(self):
session['counter'] = session.get('counter', 0) + 1
return 'OK'
app.add_url_rule('/counter', view_func=CounterAPI.as_view('counter'))
"""
def dispatch_request(self, *args, **kwargs):
meth = getattr(self, request.method.lower(), None)
# if the request method is HEAD and we don't have a handler for it
# retry with GET
if meth is None and request.method == 'HEAD':
meth = getattr(self, 'get', None)
assert meth is not None, 'Unimplemented method %r' % request.method
return meth(*args, **kwargs)
| mit |
alxgu/ansible | lib/ansible/plugins/lookup/credstash.py | 43 | 4385 | # (c) 2015, Ensighten <[email protected]>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
lookup: credstash
version_added: "2.0"
short_description: retrieve secrets from Credstash on AWS
requirements:
- credstash (python library)
description:
- "Credstash is a small utility for managing secrets using AWS's KMS and DynamoDB: https://github.com/fugue/credstash"
options:
_terms:
description: term or list of terms to lookup in the credit store
type: list
required: True
table:
description: name of the credstash table to query
default: 'credential-store'
required: True
version:
description: Credstash version
region:
description: AWS region
profile_name:
description: AWS profile to use for authentication
env:
- name: AWS_PROFILE
aws_access_key_id:
description: AWS access key ID
env:
- name: AWS_ACCESS_KEY_ID
aws_secret_access_key:
description: AWS access key
env:
- name: AWS_SECRET_ACCESS_KEY
aws_session_token:
description: AWS session token
env:
- name: AWS_SESSION_TOKEN
"""
EXAMPLES = """
- name: first use credstash to store your secrets
shell: credstash put my-github-password secure123
- name: "Test credstash lookup plugin -- get my github password"
debug: msg="Credstash lookup! {{ lookup('credstash', 'my-github-password') }}"
- name: "Test credstash lookup plugin -- get my other password from us-west-1"
debug: msg="Credstash lookup! {{ lookup('credstash', 'my-other-password', region='us-west-1') }}"
- name: "Test credstash lookup plugin -- get the company's github password"
debug: msg="Credstash lookup! {{ lookup('credstash', 'company-github-password', table='company-passwords') }}"
- name: Example play using the 'context' feature
hosts: localhost
vars:
context:
app: my_app
environment: production
tasks:
- name: "Test credstash lookup plugin -- get the password with a context passed as a variable"
debug: msg="{{ lookup('credstash', 'some-password', context=context) }}"
- name: "Test credstash lookup plugin -- get the password with a context defined here"
debug: msg="{{ lookup('credstash', 'some-password', context=dict(app='my_app', environment='production')) }}"
"""
RETURN = """
_raw:
description:
- value(s) stored in Credstash
"""
import os
from ansible.errors import AnsibleError
from ansible.plugins.lookup import LookupBase
CREDSTASH_INSTALLED = False
try:
import credstash
CREDSTASH_INSTALLED = True
except ImportError:
CREDSTASH_INSTALLED = False
class LookupModule(LookupBase):
def run(self, terms, variables, **kwargs):
if not CREDSTASH_INSTALLED:
raise AnsibleError('The credstash lookup plugin requires credstash to be installed.')
ret = []
for term in terms:
try:
version = kwargs.pop('version', '')
region = kwargs.pop('region', None)
table = kwargs.pop('table', 'credential-store')
profile_name = kwargs.pop('profile_name', os.getenv('AWS_PROFILE', None))
aws_access_key_id = kwargs.pop('aws_access_key_id', os.getenv('AWS_ACCESS_KEY_ID', None))
aws_secret_access_key = kwargs.pop('aws_secret_access_key', os.getenv('AWS_SECRET_ACCESS_KEY', None))
aws_session_token = kwargs.pop('aws_session_token', os.getenv('AWS_SESSION_TOKEN', None))
kwargs_pass = {'profile_name': profile_name, 'aws_access_key_id': aws_access_key_id,
'aws_secret_access_key': aws_secret_access_key, 'aws_session_token': aws_session_token}
val = credstash.getSecret(term, version, region, table, context=kwargs, **kwargs_pass)
except credstash.ItemNotFound:
raise AnsibleError('Key {0} not found'.format(term))
except Exception as e:
raise AnsibleError('Encountered exception while fetching {0}: {1}'.format(term, e))
ret.append(val)
return ret
| gpl-3.0 |
jgcaaprom/android_external_chromium_org | third_party/closure_linter/closure_linter/errors.py | 99 | 4184 | #!/usr/bin/env python
# Copyright 2007 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Error codes for JavaScript style checker."""
__author__ = ('[email protected] (Robert Walker)',
'[email protected] (Andy Perelson)')
def ByName(name):
"""Get the error code for the given error name.
Args:
name: The name of the error
Returns:
The error code
"""
return globals()[name]
# "File-fatal" errors - these errors stop further parsing of a single file
FILE_NOT_FOUND = -1
FILE_DOES_NOT_PARSE = -2
# Spacing
EXTRA_SPACE = 1
MISSING_SPACE = 2
EXTRA_LINE = 3
MISSING_LINE = 4
ILLEGAL_TAB = 5
WRONG_INDENTATION = 6
WRONG_BLANK_LINE_COUNT = 7
# Semicolons
MISSING_SEMICOLON = 10
MISSING_SEMICOLON_AFTER_FUNCTION = 11
ILLEGAL_SEMICOLON_AFTER_FUNCTION = 12
REDUNDANT_SEMICOLON = 13
# Miscellaneous
ILLEGAL_PROTOTYPE_MEMBER_VALUE = 100
LINE_TOO_LONG = 110
LINE_STARTS_WITH_OPERATOR = 120
COMMA_AT_END_OF_LITERAL = 121
MULTI_LINE_STRING = 130
UNNECESSARY_DOUBLE_QUOTED_STRING = 131
UNUSED_PRIVATE_MEMBER = 132
UNUSED_LOCAL_VARIABLE = 133
# Requires, provides
GOOG_REQUIRES_NOT_ALPHABETIZED = 140
GOOG_PROVIDES_NOT_ALPHABETIZED = 141
MISSING_GOOG_REQUIRE = 142
MISSING_GOOG_PROVIDE = 143
EXTRA_GOOG_REQUIRE = 144
EXTRA_GOOG_PROVIDE = 145
# JsDoc
INVALID_JSDOC_TAG = 200
INVALID_USE_OF_DESC_TAG = 201
NO_BUG_NUMBER_AFTER_BUG_TAG = 202
MISSING_PARAMETER_DOCUMENTATION = 210
EXTRA_PARAMETER_DOCUMENTATION = 211
WRONG_PARAMETER_DOCUMENTATION = 212
MISSING_JSDOC_TAG_TYPE = 213
MISSING_JSDOC_TAG_DESCRIPTION = 214
MISSING_JSDOC_PARAM_NAME = 215
OUT_OF_ORDER_JSDOC_TAG_TYPE = 216
MISSING_RETURN_DOCUMENTATION = 217
UNNECESSARY_RETURN_DOCUMENTATION = 218
MISSING_BRACES_AROUND_TYPE = 219
MISSING_MEMBER_DOCUMENTATION = 220
MISSING_PRIVATE = 221
EXTRA_PRIVATE = 222
INVALID_OVERRIDE_PRIVATE = 223
INVALID_INHERIT_DOC_PRIVATE = 224
MISSING_JSDOC_TAG_THIS = 225
UNNECESSARY_BRACES_AROUND_INHERIT_DOC = 226
INVALID_AUTHOR_TAG_DESCRIPTION = 227
JSDOC_PREFER_QUESTION_TO_PIPE_NULL = 230
JSDOC_ILLEGAL_QUESTION_WITH_PIPE = 231
JSDOC_MISSING_OPTIONAL_TYPE = 232
JSDOC_MISSING_OPTIONAL_PREFIX = 233
JSDOC_MISSING_VAR_ARGS_TYPE = 234
JSDOC_MISSING_VAR_ARGS_NAME = 235
# TODO(robbyw): Split this in to more specific syntax problems.
INCORRECT_SUPPRESS_SYNTAX = 250
INVALID_SUPPRESS_TYPE = 251
UNNECESSARY_SUPPRESS = 252
# File ending
FILE_MISSING_NEWLINE = 300
FILE_IN_BLOCK = 301
# Interfaces
INTERFACE_CONSTRUCTOR_CANNOT_HAVE_PARAMS = 400
INTERFACE_METHOD_CANNOT_HAVE_CODE = 401
# Comments
MISSING_END_OF_SCOPE_COMMENT = 500
MALFORMED_END_OF_SCOPE_COMMENT = 501
# goog.scope - Namespace aliasing
# TODO(nnaze) Add additional errors here and in aliaspass.py
INVALID_USE_OF_GOOG_SCOPE = 600
EXTRA_GOOG_SCOPE_USAGE = 601
# ActionScript specific errors:
# TODO(user): move these errors to their own file and move all JavaScript
# specific errors to their own file as well.
# All ActionScript specific errors should have error number at least 1000.
FUNCTION_MISSING_RETURN_TYPE = 1132
PARAMETER_MISSING_TYPE = 1133
VAR_MISSING_TYPE = 1134
PARAMETER_MISSING_DEFAULT_VALUE = 1135
IMPORTS_NOT_ALPHABETIZED = 1140
IMPORT_CONTAINS_WILDCARD = 1141
UNUSED_IMPORT = 1142
INVALID_TRACE_SEVERITY_LEVEL = 1250
MISSING_TRACE_SEVERITY_LEVEL = 1251
MISSING_TRACE_MESSAGE = 1252
REMOVE_TRACE_BEFORE_SUBMIT = 1253
REMOVE_COMMENT_BEFORE_SUBMIT = 1254
# End of list of ActionScript specific errors.
NEW_ERRORS = frozenset([
# Errors added after 2.0.2:
WRONG_INDENTATION,
MISSING_SEMICOLON,
# Errors added after 2.3.9:
JSDOC_MISSING_VAR_ARGS_TYPE,
JSDOC_MISSING_VAR_ARGS_NAME,
# Errors added after 2.3.13:
])
| bsd-3-clause |
teltek/edx-platform | common/djangoapps/third_party_auth/migrations/0019_consolidate_slug.py | 14 | 2234 | # -*- coding: utf-8 -*-
"""
Custom migration script to add slug field to all ProviderConfig models.
"""
from __future__ import unicode_literals
from django.db import migrations, models
from django.utils.text import slugify
def fill_slug_field(apps, schema_editor):
"""
Fill in the slug field for each ProviderConfig class for backwards compatability.
"""
OAuth2ProviderConfig = apps.get_model('third_party_auth', 'OAuth2ProviderConfig')
SAMLProviderConfig = apps.get_model('third_party_auth', 'SAMLProviderConfig')
LTIProviderConfig = apps.get_model('third_party_auth', 'LTIProviderConfig')
for config in OAuth2ProviderConfig.objects.all():
config.slug = config.provider_slug
config.save()
for config in SAMLProviderConfig.objects.all():
config.slug = config.idp_slug
config.save()
for config in LTIProviderConfig.objects.all():
config.slug = slugify(config.lti_consumer_key)
config.save()
class Migration(migrations.Migration):
dependencies = [
('third_party_auth', '0018_auto_20180327_1631'),
]
operations = [
migrations.AddField(
model_name='ltiproviderconfig',
name='slug',
field=models.SlugField(default=b'default', help_text=b'A short string uniquely identifying this provider. Cannot contain spaces and should be a usable as a CSS class. Examples: "ubc", "mit-staging"', max_length=30),
),
migrations.AddField(
model_name='oauth2providerconfig',
name='slug',
field=models.SlugField(default=b'default', help_text=b'A short string uniquely identifying this provider. Cannot contain spaces and should be a usable as a CSS class. Examples: "ubc", "mit-staging"', max_length=30),
),
migrations.AddField(
model_name='samlproviderconfig',
name='slug',
field=models.SlugField(default=b'default', help_text=b'A short string uniquely identifying this provider. Cannot contain spaces and should be a usable as a CSS class. Examples: "ubc", "mit-staging"', max_length=30),
),
migrations.RunPython(fill_slug_field, reverse_code=migrations.RunPython.noop),
]
| agpl-3.0 |
Beauhurst/django | django/db/models/options.py | 6 | 34598 | import copy
import warnings
from bisect import bisect
from collections import OrderedDict, defaultdict
from itertools import chain
from django.apps import apps
from django.conf import settings
from django.core.exceptions import FieldDoesNotExist, ImproperlyConfigured
from django.db import connections
from django.db.models import Manager
from django.db.models.fields import AutoField
from django.db.models.fields.proxy import OrderWrt
from django.db.models.query_utils import PathInfo
from django.utils.datastructures import ImmutableList, OrderedSet
from django.utils.deprecation import RemovedInDjango21Warning
from django.utils.functional import cached_property
from django.utils.text import camel_case_to_spaces, format_lazy
from django.utils.translation import override
PROXY_PARENTS = object()
EMPTY_RELATION_TREE = tuple()
IMMUTABLE_WARNING = (
"The return type of '%s' should never be mutated. If you want to manipulate this list "
"for your own use, make a copy first."
)
DEFAULT_NAMES = (
'verbose_name', 'verbose_name_plural', 'db_table', 'ordering',
'unique_together', 'permissions', 'get_latest_by', 'order_with_respect_to',
'app_label', 'db_tablespace', 'abstract', 'managed', 'proxy', 'swappable',
'auto_created', 'index_together', 'apps', 'default_permissions',
'select_on_save', 'default_related_name', 'required_db_features',
'required_db_vendor', 'base_manager_name', 'default_manager_name',
'indexes',
)
def normalize_together(option_together):
"""
option_together can be either a tuple of tuples, or a single
tuple of two strings. Normalize it to a tuple of tuples, so that
calling code can uniformly expect that.
"""
try:
if not option_together:
return ()
if not isinstance(option_together, (tuple, list)):
raise TypeError
first_element = next(iter(option_together))
if not isinstance(first_element, (tuple, list)):
option_together = (option_together,)
# Normalize everything to tuples
return tuple(tuple(ot) for ot in option_together)
except TypeError:
# If the value of option_together isn't valid, return it
# verbatim; this will be picked up by the check framework later.
return option_together
def make_immutable_fields_list(name, data):
return ImmutableList(data, warning=IMMUTABLE_WARNING % name)
class Options:
FORWARD_PROPERTIES = {
'fields', 'many_to_many', 'concrete_fields', 'local_concrete_fields',
'_forward_fields_map', 'managers', 'managers_map', 'base_manager',
'default_manager',
}
REVERSE_PROPERTIES = {'related_objects', 'fields_map', '_relation_tree'}
default_apps = apps
def __init__(self, meta, app_label=None):
self._get_fields_cache = {}
self.local_fields = []
self.local_many_to_many = []
self.private_fields = []
self.local_managers = []
self.base_manager_name = None
self.default_manager_name = None
self.model_name = None
self.verbose_name = None
self.verbose_name_plural = None
self.db_table = ''
self.ordering = []
self._ordering_clash = False
self.indexes = []
self.unique_together = []
self.index_together = []
self.select_on_save = False
self.default_permissions = ('add', 'change', 'delete')
self.permissions = []
self.object_name = None
self.app_label = app_label
self.get_latest_by = None
self.order_with_respect_to = None
self.db_tablespace = settings.DEFAULT_TABLESPACE
self.required_db_features = []
self.required_db_vendor = None
self.meta = meta
self.pk = None
self.auto_field = None
self.abstract = False
self.managed = True
self.proxy = False
# For any class that is a proxy (including automatically created
# classes for deferred object loading), proxy_for_model tells us
# which class this model is proxying. Note that proxy_for_model
# can create a chain of proxy models. For non-proxy models, the
# variable is always None.
self.proxy_for_model = None
# For any non-abstract class, the concrete class is the model
# in the end of the proxy_for_model chain. In particular, for
# concrete models, the concrete_model is always the class itself.
self.concrete_model = None
self.swappable = None
self.parents = OrderedDict()
self.auto_created = False
# List of all lookups defined in ForeignKey 'limit_choices_to' options
# from *other* models. Needed for some admin checks. Internal use only.
self.related_fkey_lookups = []
# A custom app registry to use, if you're making a separate model set.
self.apps = self.default_apps
self.default_related_name = None
@property
def label(self):
return '%s.%s' % (self.app_label, self.object_name)
@property
def label_lower(self):
return '%s.%s' % (self.app_label, self.model_name)
@property
def app_config(self):
# Don't go through get_app_config to avoid triggering imports.
return self.apps.app_configs.get(self.app_label)
@property
def installed(self):
return self.app_config is not None
def contribute_to_class(self, cls, name):
from django.db import connection
from django.db.backends.utils import truncate_name
cls._meta = self
self.model = cls
# First, construct the default values for these options.
self.object_name = cls.__name__
self.model_name = self.object_name.lower()
self.verbose_name = camel_case_to_spaces(self.object_name)
# Store the original user-defined values for each option,
# for use when serializing the model definition
self.original_attrs = {}
# Next, apply any overridden values from 'class Meta'.
if self.meta:
meta_attrs = self.meta.__dict__.copy()
for name in self.meta.__dict__:
# Ignore any private attributes that Django doesn't care about.
# NOTE: We can't modify a dictionary's contents while looping
# over it, so we loop over the *original* dictionary instead.
if name.startswith('_'):
del meta_attrs[name]
for attr_name in DEFAULT_NAMES:
if attr_name in meta_attrs:
setattr(self, attr_name, meta_attrs.pop(attr_name))
self.original_attrs[attr_name] = getattr(self, attr_name)
elif hasattr(self.meta, attr_name):
setattr(self, attr_name, getattr(self.meta, attr_name))
self.original_attrs[attr_name] = getattr(self, attr_name)
self.unique_together = normalize_together(self.unique_together)
self.index_together = normalize_together(self.index_together)
# verbose_name_plural is a special case because it uses a 's'
# by default.
if self.verbose_name_plural is None:
self.verbose_name_plural = format_lazy('{}s', self.verbose_name)
# order_with_respect_and ordering are mutually exclusive.
self._ordering_clash = bool(self.ordering and self.order_with_respect_to)
# Any leftover attributes must be invalid.
if meta_attrs != {}:
raise TypeError("'class Meta' got invalid attribute(s): %s" % ','.join(meta_attrs.keys()))
else:
self.verbose_name_plural = format_lazy('{}s', self.verbose_name)
del self.meta
# If the db_table wasn't provided, use the app_label + model_name.
if not self.db_table:
self.db_table = "%s_%s" % (self.app_label, self.model_name)
self.db_table = truncate_name(self.db_table, connection.ops.max_name_length())
def _prepare(self, model):
if self.order_with_respect_to:
# The app registry will not be ready at this point, so we cannot
# use get_field().
query = self.order_with_respect_to
try:
self.order_with_respect_to = next(
f for f in self._get_fields(reverse=False)
if f.name == query or f.attname == query
)
except StopIteration:
raise FieldDoesNotExist("%s has no field named '%s'" % (self.object_name, query))
self.ordering = ('_order',)
if not any(isinstance(field, OrderWrt) for field in model._meta.local_fields):
model.add_to_class('_order', OrderWrt())
else:
self.order_with_respect_to = None
if self.pk is None:
if self.parents:
# Promote the first parent link in lieu of adding yet another
# field.
field = next(iter(self.parents.values()))
# Look for a local field with the same name as the
# first parent link. If a local field has already been
# created, use it instead of promoting the parent
already_created = [fld for fld in self.local_fields if fld.name == field.name]
if already_created:
field = already_created[0]
field.primary_key = True
self.setup_pk(field)
if not field.remote_field.parent_link:
raise ImproperlyConfigured(
'Add parent_link=True to %s.' % field,
)
else:
auto = AutoField(verbose_name='ID', primary_key=True, auto_created=True)
model.add_to_class('id', auto)
def add_manager(self, manager):
self.local_managers.append(manager)
self._expire_cache()
def add_field(self, field, private=False):
# Insert the given field in the order in which it was created, using
# the "creation_counter" attribute of the field.
# Move many-to-many related fields from self.fields into
# self.many_to_many.
if private:
self.private_fields.append(field)
elif field.is_relation and field.many_to_many:
self.local_many_to_many.insert(bisect(self.local_many_to_many, field), field)
else:
self.local_fields.insert(bisect(self.local_fields, field), field)
self.setup_pk(field)
# If the field being added is a relation to another known field,
# expire the cache on this field and the forward cache on the field
# being referenced, because there will be new relationships in the
# cache. Otherwise, expire the cache of references *to* this field.
# The mechanism for getting at the related model is slightly odd -
# ideally, we'd just ask for field.related_model. However, related_model
# is a cached property, and all the models haven't been loaded yet, so
# we need to make sure we don't cache a string reference.
if field.is_relation and hasattr(field.remote_field, 'model') and field.remote_field.model:
try:
field.remote_field.model._meta._expire_cache(forward=False)
except AttributeError:
pass
self._expire_cache()
else:
self._expire_cache(reverse=False)
def setup_pk(self, field):
if not self.pk and field.primary_key:
self.pk = field
field.serialize = False
def setup_proxy(self, target):
"""
Do the internal setup so that the current model is a proxy for
"target".
"""
self.pk = target._meta.pk
self.proxy_for_model = target
self.db_table = target._meta.db_table
def __repr__(self):
return '<Options for %s>' % self.object_name
def __str__(self):
return "%s.%s" % (self.app_label, self.model_name)
def can_migrate(self, connection):
"""
Return True if the model can/should be migrated on the `connection`.
`connection` can be either a real connection or a connection alias.
"""
if self.proxy or self.swapped or not self.managed:
return False
if isinstance(connection, str):
connection = connections[connection]
if self.required_db_vendor:
return self.required_db_vendor == connection.vendor
if self.required_db_features:
return all(getattr(connection.features, feat, False)
for feat in self.required_db_features)
return True
@property
def verbose_name_raw(self):
"""Return the untranslated verbose name."""
with override(None):
return str(self.verbose_name)
@property
def swapped(self):
"""
Has this model been swapped out for another? If so, return the model
name of the replacement; otherwise, return None.
For historical reasons, model name lookups using get_model() are
case insensitive, so we make sure we are case insensitive here.
"""
if self.swappable:
swapped_for = getattr(settings, self.swappable, None)
if swapped_for:
try:
swapped_label, swapped_object = swapped_for.split('.')
except ValueError:
# setting not in the format app_label.model_name
# raising ImproperlyConfigured here causes problems with
# test cleanup code - instead it is raised in get_user_model
# or as part of validation.
return swapped_for
if '%s.%s' % (swapped_label, swapped_object.lower()) != self.label_lower:
return swapped_for
return None
@cached_property
def managers(self):
managers = []
seen_managers = set()
bases = (b for b in self.model.mro() if hasattr(b, '_meta'))
for depth, base in enumerate(bases):
for manager in base._meta.local_managers:
if manager.name in seen_managers:
continue
manager = copy.copy(manager)
manager.model = self.model
seen_managers.add(manager.name)
managers.append((depth, manager.creation_counter, manager))
return make_immutable_fields_list(
"managers",
(m[2] for m in sorted(managers)),
)
@cached_property
def managers_map(self):
return {manager.name: manager for manager in self.managers}
@cached_property
def base_manager(self):
base_manager_name = self.base_manager_name
if not base_manager_name:
# Get the first parent's base_manager_name if there's one.
for parent in self.model.mro()[1:]:
if hasattr(parent, '_meta'):
if parent._base_manager.name != '_base_manager':
base_manager_name = parent._base_manager.name
break
if base_manager_name:
try:
return self.managers_map[base_manager_name]
except KeyError:
raise ValueError(
"%s has no manager named %r" % (
self.object_name,
base_manager_name,
)
)
manager = Manager()
manager.name = '_base_manager'
manager.model = self.model
manager.auto_created = True
return manager
@cached_property
def default_manager(self):
default_manager_name = self.default_manager_name
if not default_manager_name and not self.local_managers:
# Get the first parent's default_manager_name if there's one.
for parent in self.model.mro()[1:]:
if hasattr(parent, '_meta'):
default_manager_name = parent._meta.default_manager_name
break
if default_manager_name:
try:
return self.managers_map[default_manager_name]
except KeyError:
raise ValueError(
"%s has no manager named %r" % (
self.object_name,
default_manager_name,
)
)
if self.managers:
return self.managers[0]
@cached_property
def fields(self):
"""
Return a list of all forward fields on the model and its parents,
excluding ManyToManyFields.
Private API intended only to be used by Django itself; get_fields()
combined with filtering of field properties is the public API for
obtaining this field list.
"""
# For legacy reasons, the fields property should only contain forward
# fields that are not private or with a m2m cardinality. Therefore we
# pass these three filters as filters to the generator.
# The third lambda is a longwinded way of checking f.related_model - we don't
# use that property directly because related_model is a cached property,
# and all the models may not have been loaded yet; we don't want to cache
# the string reference to the related_model.
def is_not_an_m2m_field(f):
return not (f.is_relation and f.many_to_many)
def is_not_a_generic_relation(f):
return not (f.is_relation and f.one_to_many)
def is_not_a_generic_foreign_key(f):
return not (
f.is_relation and f.many_to_one and not (hasattr(f.remote_field, 'model') and f.remote_field.model)
)
return make_immutable_fields_list(
"fields",
(f for f in self._get_fields(reverse=False)
if is_not_an_m2m_field(f) and is_not_a_generic_relation(f) and is_not_a_generic_foreign_key(f))
)
@cached_property
def concrete_fields(self):
"""
Return a list of all concrete fields on the model and its parents.
Private API intended only to be used by Django itself; get_fields()
combined with filtering of field properties is the public API for
obtaining this field list.
"""
return make_immutable_fields_list(
"concrete_fields", (f for f in self.fields if f.concrete)
)
@cached_property
def local_concrete_fields(self):
"""
Return a list of all concrete fields on the model.
Private API intended only to be used by Django itself; get_fields()
combined with filtering of field properties is the public API for
obtaining this field list.
"""
return make_immutable_fields_list(
"local_concrete_fields", (f for f in self.local_fields if f.concrete)
)
@cached_property
def many_to_many(self):
"""
Return a list of all many to many fields on the model and its parents.
Private API intended only to be used by Django itself; get_fields()
combined with filtering of field properties is the public API for
obtaining this list.
"""
return make_immutable_fields_list(
"many_to_many",
(f for f in self._get_fields(reverse=False) if f.is_relation and f.many_to_many)
)
@cached_property
def related_objects(self):
"""
Return all related objects pointing to the current model. The related
objects can come from a one-to-one, one-to-many, or many-to-many field
relation type.
Private API intended only to be used by Django itself; get_fields()
combined with filtering of field properties is the public API for
obtaining this field list.
"""
all_related_fields = self._get_fields(forward=False, reverse=True, include_hidden=True)
return make_immutable_fields_list(
"related_objects",
(obj for obj in all_related_fields if not obj.hidden or obj.field.many_to_many)
)
@cached_property
def _forward_fields_map(self):
res = {}
fields = self._get_fields(reverse=False)
for field in fields:
res[field.name] = field
# Due to the way Django's internals work, get_field() should also
# be able to fetch a field by attname. In the case of a concrete
# field with relation, includes the *_id name too
try:
res[field.attname] = field
except AttributeError:
pass
return res
@cached_property
def fields_map(self):
res = {}
fields = self._get_fields(forward=False, include_hidden=True)
for field in fields:
res[field.name] = field
# Due to the way Django's internals work, get_field() should also
# be able to fetch a field by attname. In the case of a concrete
# field with relation, includes the *_id name too
try:
res[field.attname] = field
except AttributeError:
pass
return res
def get_field(self, field_name):
"""
Return a field instance given the name of a forward or reverse field.
"""
try:
# In order to avoid premature loading of the relation tree
# (expensive) we prefer checking if the field is a forward field.
return self._forward_fields_map[field_name]
except KeyError:
# If the app registry is not ready, reverse fields are
# unavailable, therefore we throw a FieldDoesNotExist exception.
if not self.apps.models_ready:
raise FieldDoesNotExist(
"%s has no field named '%s'. The app cache isn't ready yet, "
"so if this is an auto-created related field, it won't "
"be available yet." % (self.object_name, field_name)
)
try:
# Retrieve field instance by name from cached or just-computed
# field map.
return self.fields_map[field_name]
except KeyError:
raise FieldDoesNotExist("%s has no field named '%s'" % (self.object_name, field_name))
def get_base_chain(self, model):
"""
Return a list of parent classes leading to `model` (ordered from
closest to most distant ancestor). This has to handle the case where
`model` is a grandparent or even more distant relation.
"""
if not self.parents:
return []
if model in self.parents:
return [model]
for parent in self.parents:
res = parent._meta.get_base_chain(model)
if res:
res.insert(0, parent)
return res
return []
def get_parent_list(self):
"""
Return all the ancestors of this model as a list ordered by MRO.
Useful for determining if something is an ancestor, regardless of lineage.
"""
result = OrderedSet(self.parents)
for parent in self.parents:
for ancestor in parent._meta.get_parent_list():
result.add(ancestor)
return list(result)
def get_ancestor_link(self, ancestor):
"""
Return the field on the current model which points to the given
"ancestor". This is possible an indirect link (a pointer to a parent
model, which points, eventually, to the ancestor). Used when
constructing table joins for model inheritance.
Return None if the model isn't an ancestor of this one.
"""
if ancestor in self.parents:
return self.parents[ancestor]
for parent in self.parents:
# Tries to get a link field from the immediate parent
parent_link = parent._meta.get_ancestor_link(ancestor)
if parent_link:
# In case of a proxied model, the first link
# of the chain to the ancestor is that parent
# links
return self.parents[parent] or parent_link
def get_path_to_parent(self, parent):
"""
Return a list of PathInfos containing the path from the current
model to the parent model, or an empty list if parent is not a
parent of the current model.
"""
if self.model is parent:
return []
# Skip the chain of proxy to the concrete proxied model.
proxied_model = self.concrete_model
path = []
opts = self
for int_model in self.get_base_chain(parent):
if int_model is proxied_model:
opts = int_model._meta
else:
final_field = opts.parents[int_model]
targets = (final_field.remote_field.get_related_field(),)
opts = int_model._meta
path.append(PathInfo(final_field.model._meta, opts, targets, final_field, False, True))
return path
def get_path_from_parent(self, parent):
"""
Return a list of PathInfos containing the path from the parent
model to the current model, or an empty list if parent is not a
parent of the current model.
"""
if self.model is parent:
return []
model = self.concrete_model
# Get a reversed base chain including both the current and parent
# models.
chain = model._meta.get_base_chain(parent)
chain.reverse()
chain.append(model)
# Construct a list of the PathInfos between models in chain.
path = []
for i, ancestor in enumerate(chain[:-1]):
child = chain[i + 1]
link = child._meta.get_ancestor_link(ancestor)
path.extend(link.get_reverse_path_info())
return path
def _populate_directed_relation_graph(self):
"""
This method is used by each model to find its reverse objects. As this
method is very expensive and is accessed frequently (it looks up every
field in a model, in every app), it is computed on first access and then
is set as a property on every model.
"""
related_objects_graph = defaultdict(list)
all_models = self.apps.get_models(include_auto_created=True)
for model in all_models:
opts = model._meta
# Abstract model's fields are copied to child models, hence we will
# see the fields from the child models.
if opts.abstract:
continue
fields_with_relations = (
f for f in opts._get_fields(reverse=False, include_parents=False)
if f.is_relation and f.related_model is not None
)
for f in fields_with_relations:
if not isinstance(f.remote_field.model, str):
related_objects_graph[f.remote_field.model._meta.concrete_model._meta].append(f)
for model in all_models:
# Set the relation_tree using the internal __dict__. In this way
# we avoid calling the cached property. In attribute lookup,
# __dict__ takes precedence over a data descriptor (such as
# @cached_property). This means that the _meta._relation_tree is
# only called if related_objects is not in __dict__.
related_objects = related_objects_graph[model._meta.concrete_model._meta]
model._meta.__dict__['_relation_tree'] = related_objects
# It seems it is possible that self is not in all_models, so guard
# against that with default for get().
return self.__dict__.get('_relation_tree', EMPTY_RELATION_TREE)
@cached_property
def _relation_tree(self):
return self._populate_directed_relation_graph()
def _expire_cache(self, forward=True, reverse=True):
# This method is usually called by apps.cache_clear(), when the
# registry is finalized, or when a new field is added.
if forward:
for cache_key in self.FORWARD_PROPERTIES:
if cache_key in self.__dict__:
delattr(self, cache_key)
if reverse and not self.abstract:
for cache_key in self.REVERSE_PROPERTIES:
if cache_key in self.__dict__:
delattr(self, cache_key)
self._get_fields_cache = {}
def get_fields(self, include_parents=True, include_hidden=False):
"""
Return a list of fields associated to the model. By default, include
forward and reverse fields, fields derived from inheritance, but not
hidden fields. The returned fields can be changed using the parameters:
- include_parents: include fields derived from inheritance
- include_hidden: include fields that have a related_name that
starts with a "+"
"""
if include_parents is False:
include_parents = PROXY_PARENTS
return self._get_fields(include_parents=include_parents, include_hidden=include_hidden)
def _get_fields(self, forward=True, reverse=True, include_parents=True, include_hidden=False,
seen_models=None):
"""
Internal helper function to return fields of the model.
* If forward=True, then fields defined on this model are returned.
* If reverse=True, then relations pointing to this model are returned.
* If include_hidden=True, then fields with is_hidden=True are returned.
* The include_parents argument toggles if fields from parent models
should be included. It has three values: True, False, and
PROXY_PARENTS. When set to PROXY_PARENTS, the call will return all
fields defined for the current model or any of its parents in the
parent chain to the model's concrete model.
"""
if include_parents not in (True, False, PROXY_PARENTS):
raise TypeError("Invalid argument for include_parents: %s" % (include_parents,))
# This helper function is used to allow recursion in ``get_fields()``
# implementation and to provide a fast way for Django's internals to
# access specific subsets of fields.
# We must keep track of which models we have already seen. Otherwise we
# could include the same field multiple times from different models.
topmost_call = False
if seen_models is None:
seen_models = set()
topmost_call = True
seen_models.add(self.model)
# Creates a cache key composed of all arguments
cache_key = (forward, reverse, include_parents, include_hidden, topmost_call)
try:
# In order to avoid list manipulation. Always return a shallow copy
# of the results.
return self._get_fields_cache[cache_key]
except KeyError:
pass
fields = []
# Recursively call _get_fields() on each parent, with the same
# options provided in this call.
if include_parents is not False:
for parent in self.parents:
# In diamond inheritance it is possible that we see the same
# model from two different routes. In that case, avoid adding
# fields from the same parent again.
if parent in seen_models:
continue
if (parent._meta.concrete_model != self.concrete_model and
include_parents == PROXY_PARENTS):
continue
for obj in parent._meta._get_fields(
forward=forward, reverse=reverse, include_parents=include_parents,
include_hidden=include_hidden, seen_models=seen_models):
if getattr(obj, 'parent_link', False) and obj.model != self.concrete_model:
continue
fields.append(obj)
if reverse and not self.proxy:
# Tree is computed once and cached until the app cache is expired.
# It is composed of a list of fields pointing to the current model
# from other models.
all_fields = self._relation_tree
for field in all_fields:
# If hidden fields should be included or the relation is not
# intentionally hidden, add to the fields dict.
if include_hidden or not field.remote_field.hidden:
fields.append(field.remote_field)
if forward:
fields.extend(
field for field in chain(self.local_fields, self.local_many_to_many)
)
# Private fields are recopied to each child model, and they get a
# different model as field.model in each child. Hence we have to
# add the private fields separately from the topmost call. If we
# did this recursively similar to local_fields, we would get field
# instances with field.model != self.model.
if topmost_call:
fields.extend(
f for f in self.private_fields
)
# In order to avoid list manipulation. Always
# return a shallow copy of the results
fields = make_immutable_fields_list("get_fields()", fields)
# Store result into cache for later access
self._get_fields_cache[cache_key] = fields
return fields
@property
def has_auto_field(self):
warnings.warn(
'Model._meta.has_auto_field is deprecated in favor of checking if '
'Model._meta.auto_field is not None.',
RemovedInDjango21Warning, stacklevel=2
)
return self.auto_field is not None
@has_auto_field.setter
def has_auto_field(self, value):
pass
@cached_property
def _property_names(self):
"""
Return a set of the names of the properties defined on the model.
Internal helper for model initialization.
"""
return frozenset({
attr for attr in
dir(self.model) if isinstance(getattr(self.model, attr), property)
})
| bsd-3-clause |
melonproject/oyente | oyente/batch_run.py | 3 | 1356 | import json
import glob
from tqdm import tqdm
import os
import sys
import urllib2
contract_dir = 'contract_data'
cfiles = glob.glob(contract_dir+'/contract1.json')
cjson = {}
print "Loading contracts..."
for cfile in tqdm(cfiles):
cjson.update(json.loads(open(cfile).read()))
results = {}
missed = []
print "Running analysis..."
contracts = cjson.keys()
if os.path.isfile('results.json'):
old_res = json.loads(open('results.json').read())
old_res = old_res.keys()
contracts = [c for c in contracts if c not in old_res]
cores=0
job=0
if len(sys.argv)>=3:
cores = int(sys.argv[1])
job = int(sys.argv[2])
contracts = contracts[(len(contracts)/cores)*job:(len(contracts)/cores)*(job+1)]
print "Job %d: Running on %d contracts..." % (job, len(contracts))
for c in tqdm(contracts):
with open('tmp.evm','w') as of:
of.write(cjson[c][1][2:])
os.system('python oyente.py -ll 30 -s tmp.evm -j -b')
try:
results[c] = json.loads(open('tmp.evm.json').read())
except:
missed.append(c)
with open('results.json', 'w') as of:
of.write(json.dumps(results,indent=1))
with open('missed.json', 'w') as of:
of.write(json.dumps(missed,indent=1))
# urllib2.urlopen('https://dweet.io/dweet/for/oyente-%d-%d?completed=%d&missed=%d&remaining=%d' % (job,cores,len(results),len(missed),len(contracts)-len(results)-len(missed)))
print "Completed."
| gpl-3.0 |
xukunfeng/ardupilot | mk/PX4/Tools/genmsg/src/genmsg/deps.py | 216 | 3993 | # Software License Agreement (BSD License)
#
# Copyright (c) 2011, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import os
import genmsg.msg_loader
import genmsg
# pkg_name - string
# msg_file - string full path
# search_paths - dict of {'pkg':'msg_dir'}
def find_msg_dependencies_with_type(pkg_name, msg_file, search_paths):
# Read and parse the source msg file
msg_context = genmsg.msg_loader.MsgContext.create_default()
full_type_name = genmsg.gentools.compute_full_type_name(pkg_name, os.path.basename(msg_file))
spec = genmsg.msg_loader.load_msg_from_file(msg_context, msg_file, full_type_name)
try:
genmsg.msg_loader.load_depends(msg_context, spec, search_paths)
except genmsg.InvalidMsgSpec as e:
raise genmsg.MsgGenerationException("Cannot read .msg for %s: %s"%(full_type_name, str(e)))
deps = set()
for dep_type_name in msg_context.get_all_depends(full_type_name):
deps.add((dep_type_name, msg_context.get_file(dep_type_name)))
return list(deps)
def find_msg_dependencies(pkg_name, msg_file, search_paths):
deps = find_msg_dependencies_with_type(pkg_name, msg_file, search_paths)
return [d[1] for d in deps]
def find_srv_dependencies_with_type(pkg_name, msg_file, search_paths):
# Read and parse the source msg file
msg_context = genmsg.msg_loader.MsgContext.create_default()
full_type_name = genmsg.gentools.compute_full_type_name(pkg_name, os.path.basename(msg_file))
spec = genmsg.msg_loader.load_srv_from_file(msg_context, msg_file, full_type_name)
try:
genmsg.msg_loader.load_depends(msg_context, spec, search_paths)
except genmsg.InvalidMsgSpec as e:
raise genmsg.MsgGenerationException("Cannot read .msg for %s: %s"%(full_type_name, str(e)))
deps = set()
for dep_type_name in msg_context.get_all_depends(spec.request.full_name):
deps.add((dep_type_name, msg_context.get_file(dep_type_name)))
for dep_type_name in msg_context.get_all_depends(spec.response.full_name):
deps.add((dep_type_name, msg_context.get_file(dep_type_name)))
return list(deps)
def find_srv_dependencies(pkg_name, msg_file, search_paths):
deps = find_srv_dependencies_with_type(pkg_name, msg_file, search_paths)
return [d[1] for d in deps]
#paths = {'std_msgs':'/u/mkjargaard/repositories/mkjargaard/dist-sandbox/std_msgs/msg'}
#file = '/u/mkjargaard/repositories/mkjargaard/dist-sandbox/quux_msgs/msg/QuuxString.msg'
#find_msg_dependencies('quux_msgs', file, paths)
| gpl-3.0 |
jeasoft/odoo | addons/account_followup/__openerp__.py | 261 | 2938 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Payment Follow-up Management',
'version': '1.0',
'category': 'Accounting & Finance',
'description': """
Module to automate letters for unpaid invoices, with multi-level recalls.
=========================================================================
You can define your multiple levels of recall through the menu:
---------------------------------------------------------------
Configuration / Follow-up / Follow-up Levels
Once it is defined, you can automatically print recalls every day through simply clicking on the menu:
------------------------------------------------------------------------------------------------------
Payment Follow-Up / Send Email and letters
It will generate a PDF / send emails / set manual actions according to the the different levels
of recall defined. You can define different policies for different companies.
Note that if you want to check the follow-up level for a given partner/account entry, you can do from in the menu:
------------------------------------------------------------------------------------------------------------------
Reporting / Accounting / **Follow-ups Analysis
""",
'author': 'OpenERP SA',
'website': 'https://www.odoo.com/page/billing',
'depends': ['account_accountant', 'mail'],
'data': [
'security/account_followup_security.xml',
'security/ir.model.access.csv',
'report/account_followup_report.xml',
'account_followup_data.xml',
'account_followup_view.xml',
'account_followup_customers.xml',
'wizard/account_followup_print_view.xml',
'res_config_view.xml',
'views/report_followup.xml',
'account_followup_reports.xml'
],
'demo': ['account_followup_demo.xml'],
'test': [
'test/account_followup.yml',
],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
xiandiancloud/ji | lms/djangoapps/bulk_email/models.py | 24 | 9630 | """
Models for bulk email
WE'RE USING MIGRATIONS!
If you make changes to this model, be sure to create an appropriate migration
file and check it in at the same time as your model changes. To do that,
1. Go to the edx-platform dir
2. ./manage.py lms schemamigration bulk_email --auto description_of_your_change
3. Add the migration file created in edx-platform/lms/djangoapps/bulk_email/migrations/
"""
import logging
from django.conf import settings
from django.contrib.auth.models import User
from django.db import models, transaction
from html_to_text import html_to_text
from mail_utils import wrap_message
from xmodule_django.models import CourseKeyField
log = logging.getLogger(__name__)
# Bulk email to_options - the send to options that users can
# select from when they send email.
SEND_TO_MYSELF = 'myself'
SEND_TO_STAFF = 'staff'
SEND_TO_ALL = 'all'
TO_OPTIONS = [SEND_TO_MYSELF, SEND_TO_STAFF, SEND_TO_ALL]
class Email(models.Model):
"""
Abstract base class for common information for an email.
"""
sender = models.ForeignKey(User, default=1, blank=True, null=True)
slug = models.CharField(max_length=128, db_index=True)
subject = models.CharField(max_length=128, blank=True)
html_message = models.TextField(null=True, blank=True)
text_message = models.TextField(null=True, blank=True)
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
class Meta: # pylint: disable=C0111
abstract = True
class CourseEmail(Email):
"""
Stores information for an email to a course.
"""
# Three options for sending that we provide from the instructor dashboard:
# * Myself: This sends an email to the staff member that is composing the email.
#
# * Staff and instructors: This sends an email to anyone in the staff group and
# anyone in the instructor group
#
# * All: This sends an email to anyone enrolled in the course, with any role
# (student, staff, or instructor)
#
TO_OPTION_CHOICES = (
(SEND_TO_MYSELF, 'Myself'),
(SEND_TO_STAFF, 'Staff and instructors'),
(SEND_TO_ALL, 'All')
)
course_id = CourseKeyField(max_length=255, db_index=True)
to_option = models.CharField(max_length=64, choices=TO_OPTION_CHOICES, default=SEND_TO_MYSELF)
def __unicode__(self):
return self.subject
@classmethod
def create(cls, course_id, sender, to_option, subject, html_message, text_message=None):
"""
Create an instance of CourseEmail.
The CourseEmail.save_now method makes sure the CourseEmail entry is committed.
When called from any view that is wrapped by TransactionMiddleware,
and thus in a "commit-on-success" transaction, an autocommit buried within here
will cause any pending transaction to be committed by a successful
save here. Any future database operations will take place in a
separate transaction.
"""
# automatically generate the stripped version of the text from the HTML markup:
if text_message is None:
text_message = html_to_text(html_message)
# perform some validation here:
if to_option not in TO_OPTIONS:
fmt = 'Course email being sent to unrecognized to_option: "{to_option}" for "{course}", subject "{subject}"'
msg = fmt.format(to_option=to_option, course=course_id, subject=subject)
raise ValueError(msg)
# create the task, then save it immediately:
course_email = cls(
course_id=course_id,
sender=sender,
to_option=to_option,
subject=subject,
html_message=html_message,
text_message=text_message,
)
course_email.save_now()
return course_email
@transaction.autocommit
def save_now(self):
"""
Writes CourseEmail immediately, ensuring the transaction is committed.
Autocommit annotation makes sure the database entry is committed.
When called from any view that is wrapped by TransactionMiddleware,
and thus in a "commit-on-success" transaction, this autocommit here
will cause any pending transaction to be committed by a successful
save here. Any future database operations will take place in a
separate transaction.
"""
self.save()
class Optout(models.Model):
"""
Stores users that have opted out of receiving emails from a course.
"""
# Allowing null=True to support data migration from email->user.
# We need to first create the 'user' column with some sort of default in order to run the data migration,
# and given the unique index, 'null' is the best default value.
user = models.ForeignKey(User, db_index=True, null=True)
course_id = CourseKeyField(max_length=255, db_index=True)
class Meta: # pylint: disable=C0111
unique_together = ('user', 'course_id')
# Defines the tag that must appear in a template, to indicate
# the location where the email message body is to be inserted.
COURSE_EMAIL_MESSAGE_BODY_TAG = '{{message_body}}'
class CourseEmailTemplate(models.Model):
"""
Stores templates for all emails to a course to use.
This is expected to be a singleton, to be shared across all courses.
Initialization takes place in a migration that in turn loads a fixture.
The admin console interface disables add and delete operations.
Validation is handled in the CourseEmailTemplateForm class.
"""
html_template = models.TextField(null=True, blank=True)
plain_template = models.TextField(null=True, blank=True)
@staticmethod
def get_template():
"""
Fetch the current template
If one isn't stored, an exception is thrown.
"""
try:
return CourseEmailTemplate.objects.get()
except CourseEmailTemplate.DoesNotExist:
log.exception("Attempting to fetch a non-existent course email template")
raise
@staticmethod
def _render(format_string, message_body, context):
"""
Create a text message using a template, message body and context.
Convert message body (`message_body`) into an email message
using the provided template. The template is a format string,
which is rendered using format() with the provided `context` dict.
This doesn't insert user's text into template, until such time we can
support proper error handling due to errors in the message body
(e.g. due to the use of curly braces).
Instead, for now, we insert the message body *after* the substitutions
have been performed, so that anything in the message body that might
interfere will be innocently returned as-is.
Output is returned as a unicode string. It is not encoded as utf-8.
Such encoding is left to the email code, which will use the value
of settings.DEFAULT_CHARSET to encode the message.
"""
# If we wanted to support substitution, we'd call:
# format_string = format_string.replace(COURSE_EMAIL_MESSAGE_BODY_TAG, message_body)
result = format_string.format(**context)
# Note that the body tag in the template will now have been
# "formatted", so we need to do the same to the tag being
# searched for.
message_body_tag = COURSE_EMAIL_MESSAGE_BODY_TAG.format()
result = result.replace(message_body_tag, message_body, 1)
# finally, return the result, after wrapping long lines and without converting to an encoded byte array.
return wrap_message(result)
def render_plaintext(self, plaintext, context):
"""
Create plain text message.
Convert plain text body (`plaintext`) into plaintext email message using the
stored plain template and the provided `context` dict.
"""
return CourseEmailTemplate._render(self.plain_template, plaintext, context)
def render_htmltext(self, htmltext, context):
"""
Create HTML text message.
Convert HTML text body (`htmltext`) into HTML email message using the
stored HTML template and the provided `context` dict.
"""
return CourseEmailTemplate._render(self.html_template, htmltext, context)
class CourseAuthorization(models.Model):
"""
Enable the course email feature on a course-by-course basis.
"""
# The course that these features are attached to.
course_id = CourseKeyField(max_length=255, db_index=True, unique=True)
# Whether or not to enable instructor email
email_enabled = models.BooleanField(default=False)
@classmethod
def instructor_email_enabled(cls, course_id):
"""
Returns whether or not email is enabled for the given course id.
If email has not been explicitly enabled, returns False.
"""
# If settings.FEATURES['REQUIRE_COURSE_EMAIL_AUTH'] is
# set to False, then we enable email for every course.
if not settings.FEATURES['REQUIRE_COURSE_EMAIL_AUTH']:
return True
try:
record = cls.objects.get(course_id=course_id)
return record.email_enabled
except cls.DoesNotExist:
return False
def __unicode__(self):
not_en = "Not "
if self.email_enabled:
not_en = ""
# pylint: disable=no-member
return u"Course '{}': Instructor Email {}Enabled".format(self.course_id.to_deprecated_string(), not_en)
| agpl-3.0 |
yipenggao/moose | python/chigger/misc/AxisSource.py | 6 | 1994 | #pylint: disable=missing-docstring
#################################################################
# DO NOT MODIFY THIS HEADER #
# MOOSE - Multiphysics Object Oriented Simulation Environment #
# #
# (c) 2010 Battelle Energy Alliance, LLC #
# ALL RIGHTS RESERVED #
# #
# Prepared by Battelle Energy Alliance, LLC #
# Under Contract No. DE-AC07-05ID14517 #
# With the U. S. Department of Energy #
# #
# See COPYRIGHT for full restrictions #
#################################################################
import vtk
from .. import base
from .. import utils
class AxisSource(base.ChiggerFilterSourceBase):
"""
Creates a Axis source for use with the ColorBar.
"""
VTKACTOR_TYPE = vtk.vtkContextActor
@staticmethod
def getOptions():
opt = base.ChiggerFilterSourceBase.getOptions()
opt += utils.AxisOptions.get_options()
return opt
def __init__(self, **kwargs):
super(AxisSource, self).__init__(vtkactor_type=vtk.vtkContextActor, vtkmapper_type=None,
**kwargs)
self._vtksource = vtk.vtkAxis()
self._vtkactor.GetScene().AddItem(self._vtksource)
def getVTKSource(self):
"""
Return the vtkAxis object.
"""
return self._vtksource
def update(self, **kwargs):
"""
Update the vtkAxis with given settings. (override)
Inputs:
see ChiggerFilterSourceBase
"""
super(AxisSource, self).update(**kwargs)
utils.AxisOptions.set_options(self._vtksource, self._options)
self._vtksource.Update()
| lgpl-2.1 |
gfyoung/pandas | pandas/tests/indexes/common.py | 2 | 28221 | import gc
from typing import Type
import numpy as np
import pytest
from pandas._libs import iNaT
from pandas.errors import InvalidIndexError
from pandas.core.dtypes.common import is_datetime64tz_dtype
from pandas.core.dtypes.dtypes import CategoricalDtype
import pandas as pd
from pandas import (
CategoricalIndex,
DatetimeIndex,
Index,
Int64Index,
IntervalIndex,
MultiIndex,
PeriodIndex,
RangeIndex,
Series,
TimedeltaIndex,
UInt64Index,
isna,
)
import pandas._testing as tm
from pandas.core.indexes.datetimelike import DatetimeIndexOpsMixin
class Base:
""" base class for index sub-class tests """
_holder: Type[Index]
_compat_props = ["shape", "ndim", "size", "nbytes"]
def create_index(self) -> Index:
raise NotImplementedError("Method not implemented")
def test_pickle_compat_construction(self):
# need an object to create with
msg = (
r"Index\(\.\.\.\) must be called with a collection of some "
r"kind, None was passed|"
r"__new__\(\) missing 1 required positional argument: 'data'|"
r"__new__\(\) takes at least 2 arguments \(1 given\)"
)
with pytest.raises(TypeError, match=msg):
self._holder()
@pytest.mark.parametrize("name", [None, "new_name"])
def test_to_frame(self, name):
# see GH-15230, GH-22580
idx = self.create_index()
if name:
idx_name = name
else:
idx_name = idx.name or 0
df = idx.to_frame(name=idx_name)
assert df.index is idx
assert len(df.columns) == 1
assert df.columns[0] == idx_name
assert df[idx_name].values is not idx.values
df = idx.to_frame(index=False, name=idx_name)
assert df.index is not idx
def test_shift(self):
# GH8083 test the base class for shift
idx = self.create_index()
msg = (
f"This method is only implemented for DatetimeIndex, PeriodIndex and "
f"TimedeltaIndex; Got type {type(idx).__name__}"
)
with pytest.raises(NotImplementedError, match=msg):
idx.shift(1)
with pytest.raises(NotImplementedError, match=msg):
idx.shift(1, 2)
def test_constructor_name_unhashable(self):
# GH#29069 check that name is hashable
# See also same-named test in tests.series.test_constructors
idx = self.create_index()
with pytest.raises(TypeError, match="Index.name must be a hashable type"):
type(idx)(idx, name=[])
def test_create_index_existing_name(self):
# GH11193, when an existing index is passed, and a new name is not
# specified, the new index should inherit the previous object name
expected = self.create_index()
if not isinstance(expected, MultiIndex):
expected.name = "foo"
result = Index(expected)
tm.assert_index_equal(result, expected)
result = Index(expected, name="bar")
expected.name = "bar"
tm.assert_index_equal(result, expected)
else:
expected.names = ["foo", "bar"]
result = Index(expected)
tm.assert_index_equal(
result,
Index(
Index(
[
("foo", "one"),
("foo", "two"),
("bar", "one"),
("baz", "two"),
("qux", "one"),
("qux", "two"),
],
dtype="object",
),
names=["foo", "bar"],
),
)
result = Index(expected, names=["A", "B"])
tm.assert_index_equal(
result,
Index(
Index(
[
("foo", "one"),
("foo", "two"),
("bar", "one"),
("baz", "two"),
("qux", "one"),
("qux", "two"),
],
dtype="object",
),
names=["A", "B"],
),
)
def test_numeric_compat(self):
idx = self.create_index()
# Check that this doesn't cover MultiIndex case, if/when it does,
# we can remove multi.test_compat.test_numeric_compat
assert not isinstance(idx, MultiIndex)
if type(idx) is Index:
return
typ = type(idx._data).__name__
lmsg = "|".join(
[
rf"unsupported operand type\(s\) for \*: '{typ}' and 'int'",
"cannot perform (__mul__|__truediv__|__floordiv__) with "
f"this index type: {typ}",
]
)
with pytest.raises(TypeError, match=lmsg):
idx * 1
rmsg = "|".join(
[
rf"unsupported operand type\(s\) for \*: 'int' and '{typ}'",
"cannot perform (__rmul__|__rtruediv__|__rfloordiv__) with "
f"this index type: {typ}",
]
)
with pytest.raises(TypeError, match=rmsg):
1 * idx
div_err = lmsg.replace("*", "/")
with pytest.raises(TypeError, match=div_err):
idx / 1
div_err = rmsg.replace("*", "/")
with pytest.raises(TypeError, match=div_err):
1 / idx
floordiv_err = lmsg.replace("*", "//")
with pytest.raises(TypeError, match=floordiv_err):
idx // 1
floordiv_err = rmsg.replace("*", "//")
with pytest.raises(TypeError, match=floordiv_err):
1 // idx
def test_logical_compat(self):
idx = self.create_index()
with pytest.raises(TypeError, match="cannot perform all"):
idx.all()
with pytest.raises(TypeError, match="cannot perform any"):
idx.any()
def test_reindex_base(self):
idx = self.create_index()
expected = np.arange(idx.size, dtype=np.intp)
actual = idx.get_indexer(idx)
tm.assert_numpy_array_equal(expected, actual)
with pytest.raises(ValueError, match="Invalid fill method"):
idx.get_indexer(idx, method="invalid")
def test_get_indexer_consistency(self, index):
# See GH 16819
if isinstance(index, IntervalIndex):
# requires index.is_non_overlapping
return
if index.is_unique:
indexer = index.get_indexer(index[0:2])
assert isinstance(indexer, np.ndarray)
assert indexer.dtype == np.intp
else:
e = "Reindexing only valid with uniquely valued Index objects"
with pytest.raises(InvalidIndexError, match=e):
index.get_indexer(index[0:2])
indexer, _ = index.get_indexer_non_unique(index[0:2])
assert isinstance(indexer, np.ndarray)
assert indexer.dtype == np.intp
def test_ndarray_compat_properties(self):
idx = self.create_index()
assert idx.T.equals(idx)
assert idx.transpose().equals(idx)
values = idx.values
for prop in self._compat_props:
assert getattr(idx, prop) == getattr(values, prop)
# test for validity
idx.nbytes
idx.values.nbytes
def test_repr_roundtrip(self):
idx = self.create_index()
tm.assert_index_equal(eval(repr(idx)), idx)
def test_repr_max_seq_item_setting(self):
# GH10182
idx = self.create_index()
idx = idx.repeat(50)
with pd.option_context("display.max_seq_items", None):
repr(idx)
assert "..." not in str(idx)
def test_copy_name(self, index):
# gh-12309: Check that the "name" argument
# passed at initialization is honored.
if isinstance(index, MultiIndex):
return
first = type(index)(index, copy=True, name="mario")
second = type(first)(first, copy=False)
# Even though "copy=False", we want a new object.
assert first is not second
# Not using tm.assert_index_equal() since names differ.
assert index.equals(first)
assert first.name == "mario"
assert second.name == "mario"
s1 = Series(2, index=first)
s2 = Series(3, index=second[:-1])
if not isinstance(index, CategoricalIndex):
# See gh-13365
s3 = s1 * s2
assert s3.index.name == "mario"
def test_copy_name2(self, index):
# gh-35592
if isinstance(index, MultiIndex):
return
assert index.copy(name="mario").name == "mario"
with pytest.raises(ValueError, match="Length of new names must be 1, got 2"):
index.copy(name=["mario", "luigi"])
msg = f"{type(index).__name__}.name must be a hashable type"
with pytest.raises(TypeError, match=msg):
index.copy(name=[["mario"]])
def test_copy_dtype_deprecated(self, index):
# GH35853
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
index.copy(dtype=object)
def test_ensure_copied_data(self, index):
# Check the "copy" argument of each Index.__new__ is honoured
# GH12309
init_kwargs = {}
if isinstance(index, PeriodIndex):
# Needs "freq" specification:
init_kwargs["freq"] = index.freq
elif isinstance(index, (RangeIndex, MultiIndex, CategoricalIndex)):
# RangeIndex cannot be initialized from data
# MultiIndex and CategoricalIndex are tested separately
return
index_type = type(index)
result = index_type(index.values, copy=True, **init_kwargs)
if is_datetime64tz_dtype(index.dtype):
result = result.tz_localize("UTC").tz_convert(index.tz)
if isinstance(index, (DatetimeIndex, TimedeltaIndex)):
index = index._with_freq(None)
tm.assert_index_equal(index, result)
if isinstance(index, PeriodIndex):
# .values an object array of Period, thus copied
result = index_type(ordinal=index.asi8, copy=False, **init_kwargs)
tm.assert_numpy_array_equal(index.asi8, result.asi8, check_same="same")
elif isinstance(index, IntervalIndex):
# checked in test_interval.py
pass
else:
result = index_type(index.values, copy=False, **init_kwargs)
tm.assert_numpy_array_equal(index.values, result.values, check_same="same")
def test_memory_usage(self, index):
index._engine.clear_mapping()
result = index.memory_usage()
if index.empty:
# we report 0 for no-length
assert result == 0
return
# non-zero length
index.get_loc(index[0])
result2 = index.memory_usage()
result3 = index.memory_usage(deep=True)
# RangeIndex, IntervalIndex
# don't have engines
if not isinstance(index, (RangeIndex, IntervalIndex)):
assert result2 > result
if index.inferred_type == "object":
assert result3 > result2
def test_argsort(self, request, index):
# separately tested
if isinstance(index, CategoricalIndex):
return
result = index.argsort()
expected = np.array(index).argsort()
tm.assert_numpy_array_equal(result, expected, check_dtype=False)
def test_numpy_argsort(self, index):
result = np.argsort(index)
expected = index.argsort()
tm.assert_numpy_array_equal(result, expected)
# these are the only two types that perform
# pandas compatibility input validation - the
# rest already perform separate (or no) such
# validation via their 'values' attribute as
# defined in pandas.core.indexes/base.py - they
# cannot be changed at the moment due to
# backwards compatibility concerns
if isinstance(type(index), (CategoricalIndex, RangeIndex)):
# TODO: why type(index)?
msg = "the 'axis' parameter is not supported"
with pytest.raises(ValueError, match=msg):
np.argsort(index, axis=1)
msg = "the 'kind' parameter is not supported"
with pytest.raises(ValueError, match=msg):
np.argsort(index, kind="mergesort")
msg = "the 'order' parameter is not supported"
with pytest.raises(ValueError, match=msg):
np.argsort(index, order=("a", "b"))
def test_repeat(self):
rep = 2
i = self.create_index()
expected = Index(i.values.repeat(rep), name=i.name)
tm.assert_index_equal(i.repeat(rep), expected)
i = self.create_index()
rep = np.arange(len(i))
expected = Index(i.values.repeat(rep), name=i.name)
tm.assert_index_equal(i.repeat(rep), expected)
def test_numpy_repeat(self):
rep = 2
i = self.create_index()
expected = i.repeat(rep)
tm.assert_index_equal(np.repeat(i, rep), expected)
msg = "the 'axis' parameter is not supported"
with pytest.raises(ValueError, match=msg):
np.repeat(i, rep, axis=0)
@pytest.mark.parametrize("klass", [list, tuple, np.array, Series])
def test_where(self, klass):
i = self.create_index()
if isinstance(i, (pd.DatetimeIndex, pd.TimedeltaIndex)):
# where does not preserve freq
i = i._with_freq(None)
cond = [True] * len(i)
result = i.where(klass(cond))
expected = i
tm.assert_index_equal(result, expected)
cond = [False] + [True] * len(i[1:])
expected = Index([i._na_value] + i[1:].tolist(), dtype=i.dtype)
result = i.where(klass(cond))
tm.assert_index_equal(result, expected)
def test_insert_base(self, index):
result = index[1:4]
if not len(index):
return
# test 0th element
assert index[0:4].equals(result.insert(0, index[0]))
def test_delete_base(self, index):
if not len(index):
return
if isinstance(index, RangeIndex):
# tested in class
return
expected = index[1:]
result = index.delete(0)
assert result.equals(expected)
assert result.name == expected.name
expected = index[:-1]
result = index.delete(-1)
assert result.equals(expected)
assert result.name == expected.name
length = len(index)
msg = f"index {length} is out of bounds for axis 0 with size {length}"
with pytest.raises(IndexError, match=msg):
index.delete(length)
def test_equals(self, index):
if isinstance(index, IntervalIndex):
# IntervalIndex tested separately, the index.equals(index.astype(object))
# fails for IntervalIndex
return
assert index.equals(index)
assert index.equals(index.copy())
assert index.equals(index.astype(object))
assert not index.equals(list(index))
assert not index.equals(np.array(index))
# Cannot pass in non-int64 dtype to RangeIndex
if not isinstance(index, RangeIndex):
same_values = Index(index, dtype=object)
assert index.equals(same_values)
assert same_values.equals(index)
if index.nlevels == 1:
# do not test MultiIndex
assert not index.equals(Series(index))
def test_equals_op(self):
# GH9947, GH10637
index_a = self.create_index()
n = len(index_a)
index_b = index_a[0:-1]
index_c = index_a[0:-1].append(index_a[-2:-1])
index_d = index_a[0:1]
msg = "Lengths must match|could not be broadcast"
with pytest.raises(ValueError, match=msg):
index_a == index_b
expected1 = np.array([True] * n)
expected2 = np.array([True] * (n - 1) + [False])
tm.assert_numpy_array_equal(index_a == index_a, expected1)
tm.assert_numpy_array_equal(index_a == index_c, expected2)
# test comparisons with numpy arrays
array_a = np.array(index_a)
array_b = np.array(index_a[0:-1])
array_c = np.array(index_a[0:-1].append(index_a[-2:-1]))
array_d = np.array(index_a[0:1])
with pytest.raises(ValueError, match=msg):
index_a == array_b
tm.assert_numpy_array_equal(index_a == array_a, expected1)
tm.assert_numpy_array_equal(index_a == array_c, expected2)
# test comparisons with Series
series_a = Series(array_a)
series_b = Series(array_b)
series_c = Series(array_c)
series_d = Series(array_d)
with pytest.raises(ValueError, match=msg):
index_a == series_b
tm.assert_numpy_array_equal(index_a == series_a, expected1)
tm.assert_numpy_array_equal(index_a == series_c, expected2)
# cases where length is 1 for one of them
with pytest.raises(ValueError, match="Lengths must match"):
index_a == index_d
with pytest.raises(ValueError, match="Lengths must match"):
index_a == series_d
with pytest.raises(ValueError, match="Lengths must match"):
index_a == array_d
msg = "Can only compare identically-labeled Series objects"
with pytest.raises(ValueError, match=msg):
series_a == series_d
with pytest.raises(ValueError, match="Lengths must match"):
series_a == array_d
# comparing with a scalar should broadcast; note that we are excluding
# MultiIndex because in this case each item in the index is a tuple of
# length 2, and therefore is considered an array of length 2 in the
# comparison instead of a scalar
if not isinstance(index_a, MultiIndex):
expected3 = np.array([False] * (len(index_a) - 2) + [True, False])
# assuming the 2nd to last item is unique in the data
item = index_a[-2]
tm.assert_numpy_array_equal(index_a == item, expected3)
# For RangeIndex we can convert to Int64Index
tm.assert_series_equal(series_a == item, Series(expected3))
def test_format(self):
# GH35439
idx = self.create_index()
expected = [str(x) for x in idx]
assert idx.format() == expected
def test_format_empty(self):
# GH35712
empty_idx = self._holder([])
assert empty_idx.format() == []
assert empty_idx.format(name=True) == [""]
def test_hasnans_isnans(self, index):
# GH 11343, added tests for hasnans / isnans
if isinstance(index, MultiIndex):
return
# cases in indices doesn't include NaN
idx = index.copy(deep=True)
expected = np.array([False] * len(idx), dtype=bool)
tm.assert_numpy_array_equal(idx._isnan, expected)
assert idx.hasnans is False
idx = index.copy(deep=True)
values = np.asarray(idx.values)
if len(index) == 0:
return
elif isinstance(index, DatetimeIndexOpsMixin):
values[1] = iNaT
elif isinstance(index, (Int64Index, UInt64Index)):
return
else:
values[1] = np.nan
if isinstance(index, PeriodIndex):
idx = type(index)(values, freq=index.freq)
else:
idx = type(index)(values)
expected = np.array([False] * len(idx), dtype=bool)
expected[1] = True
tm.assert_numpy_array_equal(idx._isnan, expected)
assert idx.hasnans is True
def test_fillna(self, index):
# GH 11343
if len(index) == 0:
pass
elif isinstance(index, MultiIndex):
idx = index.copy(deep=True)
msg = "isna is not defined for MultiIndex"
with pytest.raises(NotImplementedError, match=msg):
idx.fillna(idx[0])
else:
idx = index.copy(deep=True)
result = idx.fillna(idx[0])
tm.assert_index_equal(result, idx)
assert result is not idx
msg = "'value' must be a scalar, passed: "
with pytest.raises(TypeError, match=msg):
idx.fillna([idx[0]])
idx = index.copy(deep=True)
values = np.asarray(idx.values)
if isinstance(index, DatetimeIndexOpsMixin):
values[1] = iNaT
elif isinstance(index, (Int64Index, UInt64Index)):
return
else:
values[1] = np.nan
if isinstance(index, PeriodIndex):
idx = type(index)(values, freq=index.freq)
else:
idx = type(index)(values)
expected = np.array([False] * len(idx), dtype=bool)
expected[1] = True
tm.assert_numpy_array_equal(idx._isnan, expected)
assert idx.hasnans is True
def test_nulls(self, index):
# this is really a smoke test for the methods
# as these are adequately tested for function elsewhere
if len(index) == 0:
tm.assert_numpy_array_equal(index.isna(), np.array([], dtype=bool))
elif isinstance(index, MultiIndex):
idx = index.copy()
msg = "isna is not defined for MultiIndex"
with pytest.raises(NotImplementedError, match=msg):
idx.isna()
elif not index.hasnans:
tm.assert_numpy_array_equal(index.isna(), np.zeros(len(index), dtype=bool))
tm.assert_numpy_array_equal(index.notna(), np.ones(len(index), dtype=bool))
else:
result = isna(index)
tm.assert_numpy_array_equal(index.isna(), result)
tm.assert_numpy_array_equal(index.notna(), ~result)
def test_empty(self):
# GH 15270
index = self.create_index()
assert not index.empty
assert index[:0].empty
def test_join_self_unique(self, join_type):
index = self.create_index()
if index.is_unique:
joined = index.join(index, how=join_type)
assert (index == joined).all()
def test_map(self):
# callable
index = self.create_index()
# we don't infer UInt64
if isinstance(index, pd.UInt64Index):
expected = index.astype("int64")
else:
expected = index
result = index.map(lambda x: x)
# For RangeIndex we convert to Int64Index
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize(
"mapper",
[
lambda values, index: {i: e for e, i in zip(values, index)},
lambda values, index: Series(values, index),
],
)
def test_map_dictlike(self, mapper):
index = self.create_index()
if isinstance(index, (pd.CategoricalIndex, pd.IntervalIndex)):
pytest.skip(f"skipping tests for {type(index)}")
identity = mapper(index.values, index)
# we don't infer to UInt64 for a dict
if isinstance(index, pd.UInt64Index) and isinstance(identity, dict):
expected = index.astype("int64")
else:
expected = index
result = index.map(identity)
# For RangeIndex we convert to Int64Index
tm.assert_index_equal(result, expected)
# empty mappable
expected = Index([np.nan] * len(index))
result = index.map(mapper(expected, index))
tm.assert_index_equal(result, expected)
def test_map_str(self):
# GH 31202
index = self.create_index()
result = index.map(str)
expected = Index([str(x) for x in index], dtype=object)
tm.assert_index_equal(result, expected)
def test_putmask_with_wrong_mask(self):
# GH18368
index = self.create_index()
fill = index[0]
msg = "putmask: mask and data must be the same size"
with pytest.raises(ValueError, match=msg):
index.putmask(np.ones(len(index) + 1, np.bool_), fill)
with pytest.raises(ValueError, match=msg):
index.putmask(np.ones(len(index) - 1, np.bool_), fill)
with pytest.raises(ValueError, match=msg):
index.putmask("foo", fill)
@pytest.mark.parametrize("copy", [True, False])
@pytest.mark.parametrize("name", [None, "foo"])
@pytest.mark.parametrize("ordered", [True, False])
def test_astype_category(self, copy, name, ordered):
# GH 18630
index = self.create_index()
if name:
index = index.rename(name)
# standard categories
dtype = CategoricalDtype(ordered=ordered)
result = index.astype(dtype, copy=copy)
expected = CategoricalIndex(index.values, name=name, ordered=ordered)
tm.assert_index_equal(result, expected)
# non-standard categories
dtype = CategoricalDtype(index.unique().tolist()[:-1], ordered)
result = index.astype(dtype, copy=copy)
expected = CategoricalIndex(index.values, name=name, dtype=dtype)
tm.assert_index_equal(result, expected)
if ordered is False:
# dtype='category' defaults to ordered=False, so only test once
result = index.astype("category", copy=copy)
expected = CategoricalIndex(index.values, name=name)
tm.assert_index_equal(result, expected)
def test_is_unique(self):
# initialize a unique index
index = self.create_index().drop_duplicates()
assert index.is_unique is True
# empty index should be unique
index_empty = index[:0]
assert index_empty.is_unique is True
# test basic dupes
index_dup = index.insert(0, index[0])
assert index_dup.is_unique is False
# single NA should be unique
index_na = index.insert(0, np.nan)
assert index_na.is_unique is True
# multiple NA should not be unique
index_na_dup = index_na.insert(0, np.nan)
assert index_na_dup.is_unique is False
@pytest.mark.arm_slow
def test_engine_reference_cycle(self):
# GH27585
index = self.create_index()
nrefs_pre = len(gc.get_referrers(index))
index._engine
assert len(gc.get_referrers(index)) == nrefs_pre
def test_getitem_2d_deprecated(self):
# GH#30588
idx = self.create_index()
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
res = idx[:, None]
assert isinstance(res, np.ndarray), type(res)
def test_contains_requires_hashable_raises(self):
idx = self.create_index()
msg = "unhashable type: 'list'"
with pytest.raises(TypeError, match=msg):
[] in idx
msg = "|".join(
[
r"unhashable type: 'dict'",
r"must be real number, not dict",
r"an integer is required",
r"\{\}",
r"pandas\._libs\.interval\.IntervalTree' is not iterable",
]
)
with pytest.raises(TypeError, match=msg):
{} in idx._engine
def test_copy_shares_cache(self):
# GH32898, GH36840
idx = self.create_index()
idx.get_loc(idx[0]) # populates the _cache.
copy = idx.copy()
assert copy._cache is idx._cache
def test_shallow_copy_shares_cache(self):
# GH32669, GH36840
idx = self.create_index()
idx.get_loc(idx[0]) # populates the _cache.
shallow_copy = idx._shallow_copy()
assert shallow_copy._cache is idx._cache
shallow_copy = idx._shallow_copy(idx._data)
assert shallow_copy._cache is not idx._cache
assert shallow_copy._cache == {}
| bsd-3-clause |
hyqneuron/pylearn2-maxsom | pylearn2/scripts/papers/maxout/mytests/mytest4.py | 1 | 8009 | from pylearn2.models.mlp import MLP
from pylearn2.models.maxout import Maxout
from pylearn2.training_algorithms.sgd import SGD
import logging
import warnings
import sys
import numpy as np
from theano.compat import six
from theano import config
from theano import function
from theano.gof.op import get_debug_values
import theano.tensor as T
from pylearn2.compat import OrderedDict, first_key
from pylearn2.monitor import Monitor
from pylearn2.space import CompositeSpace, NullSpace
from pylearn2.train_extensions import TrainExtension
from pylearn2.training_algorithms.training_algorithm import TrainingAlgorithm
from pylearn2.training_algorithms.learning_rule import Momentum
from pylearn2.training_algorithms.learning_rule import MomentumAdjustor \
as LRMomentumAdjustor
from pylearn2.utils.iteration import is_stochastic, has_uniform_batch_size
from pylearn2.utils import py_integer_types, py_float_types
from pylearn2.utils import safe_zip
from pylearn2.utils import serial
from pylearn2.utils import sharedX
from pylearn2.utils import contains_nan
from pylearn2.utils import contains_inf
from pylearn2.utils import isfinite
from pylearn2.utils.data_specs import DataSpecsMapping
from pylearn2.utils.exc import reraise_as
from pylearn2.utils.timing import log_timing
from pylearn2.utils.rng import make_np_rng
log = logging.getLogger(__name__)
class TestAlgo(SGD):
# this train function mainly to hack into weight tracking
def train(self, dataset):
"""
Runs one epoch of SGD training on the specified dataset.
Parameters
----------
dataset : Dataset
"""
self.first = False
rng = self.rng
if not is_stochastic(self.train_iteration_mode):
rng = None
data_specs = self.cost.get_data_specs(self.model)
# The iterator should be built from flat data specs, so it returns
# flat, non-redundent tuples of data.
mapping = DataSpecsMapping(data_specs)
space_tuple = mapping.flatten(data_specs[0], return_tuple=True)
source_tuple = mapping.flatten(data_specs[1], return_tuple=True)
if len(space_tuple) == 0:
# No data will be returned by the iterator, and it is impossible
# to know the size of the actual batch.
# It is not decided yet what the right thing to do should be.
raise NotImplementedError("Unable to train with SGD, because "
"the cost does not actually use data from the data set. "
"data_specs: %s" % str(data_specs))
flat_data_specs = (CompositeSpace(space_tuple), source_tuple)
iterator = dataset.iterator(mode=self.train_iteration_mode,
batch_size=self.batch_size,
data_specs=flat_data_specs, return_tuple=True,
rng = rng, num_batches = self.batches_per_iter)
"""
if not hasattr(self, 'batch_count'):
self.batch_count=0
self.param_records=[]
print "Going into first batch"
param_init = self.model.get_param_values()
"""
on_load_batch = self.on_load_batch
for batch in iterator:
for callback in on_load_batch:
callback(*batch)
self.sgd_update(*batch)
# iterator might return a smaller batch if dataset size
# isn't divisible by batch_size
# Note: if data_specs[0] is a NullSpace, there is no way to know
# how many examples would actually have been in the batch,
# since it was empty, so actual_batch_size would be reported as 0.
actual_batch_size = flat_data_specs[0].np_batch_size(batch)
self.monitor.report_batch(actual_batch_size)
for callback in self.update_callbacks:
callback(self)
"""
param_first = self.model.get_param_values()
with log_timing(log, "Saving initial param and first param"):
serial.save("param_init_first.pkl", (param_init, param_first))
sys.exit(0)
# Now, we record the weights every 50 minibatches
# So 10 records per epoch
self.batch_count+=1
if self.batch_count%50==0:
self.param_records.append(self.model.get_param_values())
# for every 2 epochs, we save the param_records
if self.batch_count%(50*20)==0:
record_path = './mytest/'+str(self.batch_count)+'.pkl'
print "We are now about to same lots of param records"
with log_timing(log, 'Saving param records to'+record_path):
serial.save(record_path, self.param_records)
self.param_records=[]
"""
class SOMaxout(Maxout):
"""
A SOM-Maxout layer based on Maxout
Each maxout unit is a group, and units within the same group learn
"together" by copying each other's update in an SOM-like manner.
Usually, in a maxout group, if a unit is winning/maxing all the time, the
other units in its group will never be used, never get updated, and thus get
stuck forever. This wastes maxout's capacity.
SOM-Maxout solves this problem by asking units within the same somaxout
group to be each others' buddies. The winners will help their neighbours to
learn "together". That is, if the winner gets a delta w, it will ask its
neighbours to get a SOM_factor * delta w.
decay_rate
"""
def __init__(self, *args, **kwargs):
super(SOMaxout, self).__init__(*args, **kwargs)
print "initiating mytest4"
assert self.num_pieces==5, "This test only support 5-piece per group"
matrix_value = np.asarray([[ 1. , 0.8, 0.5, 0.2, 0. ],
[ 0.8, 1. , 0.8, 0.5, 0.2],
[ 0.5, 0.8, 1. , 0.8, 0.5],
[ 0.2, 0.5, 0.8, 1. , 0.8],
[ 0. , 0.2, 0.5, 0.8, 1. ]])
self.SOM_copy_matrix = sharedX(matrix_value)
self.standardize_norm = True
print "SOM_copy_matrix established for layer "+self.layer_name
print matrix_value
def modify_grads(self, grads):
"""
W is a matrix n-input by n-maxout unit
The objective of this function is to ask nearby units in the same SOM
group to learn from each other by asking them to copy each other's
grads
[1, 0.8]
[0.8, 1]
"""
W, = self.transformer.get_params()
grad_old = grads[W]
npi = self.num_pieces
# within each Maxout unit, we perform a within-group copy of grads.
# each within-group copy produces an input-size by num_pieces matrix.
grad_list= [ T.dot(grad_old[:, i*npi:(i+1)*npi ], self.SOM_copy_matrix)
for i in xrange(self.num_units)]
# we then concatenate all those matrices into an input-size by
# num_units*num_pieces matrix
grads[W] = T.concatenate(grad_list, axis=1)
print "Gradients for layer "+self.layer_name+" modified."
def _modify_updates(self, updates):
"""
At each update, make sure all units in the same somaxout group has equal
norm
"""
W, = self.transformer.get_params()
update_old = updates[W]
npi = self.num_pieces
if self.standardize_norm:
norms = T.sqrt(T.sum(T.sqr(update_old), axis=0))
norm_mean = norms.reshape([self.num_units, self.num_pieces]).mean(axis=1)
norm_desired=T.repeat(norm_mean, npi)
if self.max_col_norm is not None:
norm_desired = T.clip(norm_desired, 0, self.max_col_norm)
updates[W] = update_old * norm_desired / norms
print "Updates for layer "+self.layer_name+" modified with within-group norm standardization"
| bsd-3-clause |
Xperia-Nicki/android_platform_sony_nicki | external/webkit/Tools/Scripts/webkitpy/common/system/ospath_unittest.py | 15 | 2518 | # Copyright (C) 2010 Chris Jerdonek ([email protected])
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit tests for ospath.py."""
import os
import unittest
from webkitpy.common.system.ospath import relpath
# Make sure the tests in this class are platform independent.
class RelPathTest(unittest.TestCase):
"""Tests relpath()."""
os_path_abspath = lambda self, path: path
def _rel_path(self, path, abs_start_path):
return relpath(path, abs_start_path, self.os_path_abspath)
def test_same_path(self):
rel_path = self._rel_path("WebKit", "WebKit")
self.assertEquals(rel_path, "")
def test_long_rel_path(self):
start_path = "WebKit"
expected_rel_path = os.path.join("test", "Foo.txt")
path = os.path.join(start_path, expected_rel_path)
rel_path = self._rel_path(path, start_path)
self.assertEquals(expected_rel_path, rel_path)
def test_none_rel_path(self):
"""Test _rel_path() with None return value."""
start_path = "WebKit"
path = os.path.join("other_dir", "foo.txt")
rel_path = self._rel_path(path, start_path)
self.assertTrue(rel_path is None)
rel_path = self._rel_path("Tools", "WebKit")
self.assertTrue(rel_path is None)
| apache-2.0 |
troya2/pjsip | doc/pjsip-book/conf.py | 61 | 8065 | # -*- coding: utf-8 -*-
#
# The PJSIP Book documentation build configuration file, created by
# sphinx-quickstart on Sat Nov 30 06:36:26 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [ 'breathe', 'sphinx.ext.todo', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'PJSUA2 Documentation'
copyright = u'2014, Teluu Ltd.'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0-alpha'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'PJSUA2Doc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'PJSUA2Doc.tex', u'PJSUA2 Documentation',
u'Sauw Ming Liong, Benny Prijono', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pjsua2doc', u'PJSUA2 Documentation',
[u'Sauw Ming Liong', u'Benny Prijono'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'PJSUA2Doc', u'PJSUA2 Documentation',
u'Sauw Ming Liong@*Benny Prijono', 'ThePJSIPBook', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
breathe_projects = {
"pjsua2": "xml/",
}
breathe_default_project = "pjsua2"
breathe_projects_source = {
"pjsua2":"../../pjsip/include/pjsua2"
}
breathe_domain_by_extension = {
"hpp":"cpp"
}
| gpl-2.0 |
ojake/django | django/contrib/gis/db/backends/postgis/models.py | 396 | 2158 | """
The GeometryColumns and SpatialRefSys models for the PostGIS backend.
"""
from django.contrib.gis.db.backends.base.models import SpatialRefSysMixin
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class PostGISGeometryColumns(models.Model):
"""
The 'geometry_columns' table from the PostGIS. See the PostGIS
documentation at Ch. 4.3.2.
On PostGIS 2, this is a view.
"""
f_table_catalog = models.CharField(max_length=256)
f_table_schema = models.CharField(max_length=256)
f_table_name = models.CharField(max_length=256)
f_geometry_column = models.CharField(max_length=256)
coord_dimension = models.IntegerField()
srid = models.IntegerField(primary_key=True)
type = models.CharField(max_length=30)
class Meta:
app_label = 'gis'
db_table = 'geometry_columns'
managed = False
@classmethod
def table_name_col(cls):
"""
Returns the name of the metadata column used to store the feature table
name.
"""
return 'f_table_name'
@classmethod
def geom_col_name(cls):
"""
Returns the name of the metadata column used to store the feature
geometry column.
"""
return 'f_geometry_column'
def __str__(self):
return "%s.%s - %dD %s field (SRID: %d)" % \
(self.f_table_name, self.f_geometry_column,
self.coord_dimension, self.type, self.srid)
class PostGISSpatialRefSys(models.Model, SpatialRefSysMixin):
"""
The 'spatial_ref_sys' table from PostGIS. See the PostGIS
documentaiton at Ch. 4.2.1.
"""
srid = models.IntegerField(primary_key=True)
auth_name = models.CharField(max_length=256)
auth_srid = models.IntegerField()
srtext = models.CharField(max_length=2048)
proj4text = models.CharField(max_length=2048)
class Meta:
app_label = 'gis'
db_table = 'spatial_ref_sys'
managed = False
@property
def wkt(self):
return self.srtext
@classmethod
def wkt_col(cls):
return 'srtext'
| bsd-3-clause |