repo_name
stringlengths
6
100
path
stringlengths
4
294
copies
stringclasses
981 values
size
stringlengths
4
6
content
stringlengths
606
896k
license
stringclasses
15 values
whiteshield/EHScripter
EHScripter/netsparker.py
1
6143
##!/usr/bin/env python3 # -*- coding: utf-8 -*- import os import sys import re import string from io import StringIO from lxml import etree try: from .util import * except Exception as e: from util import * class NetsparkerToMarkdown: def __init__(self, options): self.options=options self.template=string.Template(self.options['template']) if self.options['merge']: self.template=string.Template(self.options['merge_template']) self.merge_findinglist_template=string.Template(self.options['merge_findinglist_template']) self.process() def process(self): if not os.path.exists(self.options['output_dir']): os.makedirs(self.options['output_dir']) filelist=[] if os.path.isfile(self.options['load_file']): filelist.append(self.options['load_file']) elif os.path.isdir(self.options['load_file']): for name in os.listdir(self.options["load_file"]): if os.path.isfile(self.options['load_file']+'/'+name) and len(name)>11 and name[-11:]==".netsparker": filelist.append(self.options["load_file"]+'/'+name) counter=1 findings={} for processfile in filelist: content=open(processfile).read() fileparts=content.split('<!-- Vulnerability Details -->') vulns=fileparts[1].split('<h1') fullparser=etree.HTMLParser() fullhtml=etree.parse(StringIO(content), fullparser) Target=self.attrib(fullhtml.xpath("//span[@class='dashboard-url']/a"),'href','N/A') for vuln in vulns[1:]: vuln='<h1'+vuln parser=etree.HTMLParser() vulnobj=etree.parse(StringIO(vuln), parser) h1=self.value(vulnobj.xpath('//h1//text()'),'N/A') Vulnerability=re.sub(r'\d+\\\. ','',h1) Risk=self.value(vulnobj.xpath("//div[@class='vuln-block']/div[2]//text()"),'N/A').title() VulnDesc=self.value(vulnobj.xpath("//div[@class='vulndesc']//text()"),'N/A') if Risk=='Information': Risk='Info' if Risk=='Important': Risk='High' VulnDetails=vulnobj.xpath("//div[@class='vulnerability-detail']") for VulnDetail in VulnDetails: h2=self.value(VulnDetail.xpath('./div/h2//text()'),'N/A') SubVulnerability=re.sub(r'\d+\.\d+\. ','',h2) Link=self.attrib(VulnDetail.xpath('./div/div[2]/a'),'href','N/A') ParamTableRows=VulnDetail.xpath('./div/table//tr') lines=0; ParamTable='' for ParamTableRow in ParamTableRows: ParamTableCells=ParamTableRow.xpath('./td') cells=0 for ParamTableCell in ParamTableCells: cell=self.value(ParamTableCell.xpath('.//text()'),'N/A').strip() ParamTable+='| %s '%cell cells+=1 ParamTable='%s|\n'%ParamTable if lines==0: sepstr='' for i in range(0,cells): sepstr+='| ------- ' sepstr='%s|\n'%sepstr ParamTable+=sepstr lines+=1 d={'Target':Target, 'Vulnerability':Vulnerability, 'Risk':Risk, 'VulnDesc':VulnDesc, 'SubVulnerability':SubVulnerability, 'Link':Link, 'ParamTable':ParamTable,'findinglist':''} if not self.options['merge']: dirname=slugify('%s-%s-%s-%04d-netsparker'%(Risk, Target, Vulnerability, counter)) if not os.path.exists(self.options['output_dir']+'/'+dirname): os.makedirs(self.options['output_dir']+'/'+dirname) counter+=1 temp=self.template text=temp.substitute(d) if self.options['result_overwrite'] or (not os.path.exists(self.options['output_dir']+'/'+dirname+'/document.md')): tmpfile = open(self.options['output_dir']+'/'+dirname+'/document.md', 'w'); tmpfile.write(text) tmpfile.close() else : slug=slugify('%s-%s-netsparker'%(Risk, Vulnerability)) if not findings.get(slug): findings[slug]=[] findings[slug].append(d) for key, values in findings.items(): findinglist = '' for d in values: d['VulnDesc']=d['VulnDesc'].replace('$','$$') d['ParamTable']=d['ParamTable'].replace('$','$$') d['Link']=d['Link'].replace('$','$$') temp=self.merge_findinglist_template text=temp.substitute(d) findinglist+=text+"\n\n" d['findinglist']=findinglist filename=key+".md"; temp=self.template text=temp.substitute(d) if self.options['result_overwrite'] or (not os.path.exists(self.options['output_dir']+'/'+filename)): tmpfile = open(self.options['output_dir']+'/'+filename, 'w'); tmpfile.write(text) tmpfile.close() def value(self, x, default): try: #ret=x[0].strip() ret="\n".join([html2markdown(html2markdown(y.strip(), True)) for y in x]) except Exception as e: try: ret=x.strip() except Exception as ee: ret=default return ret def attrib(self, x, attr, default): try: ret=x[0].attrib[attr] except Exception as e: try: ret=x.attrib[attr] except Exception as ee: ret=default return ret
gpl-2.0
OpenCanada/website
people/migrations/0003_create_list_page.py
2
1217
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django import VERSION as DJANGO_VERSION from django.db import migrations, models def create_page(apps, schema_editor): Page = apps.get_model("wagtailcore", "Page") ContributorListPage = apps.get_model("people", "ContributorListPage") home_page = Page.objects.get(slug="home") ContentType = apps.get_model("contenttypes", "ContentType") contributor_list_page_content_type, created = ContentType.objects.get_or_create( model='contributorlistpage', app_label='people', defaults={'name': 'contributorlistpage'} if DJANGO_VERSION < (1, 8) else {} ) # Create features page features_page = ContributorListPage.objects.create( title="Contributors", slug='contributors', content_type_id=contributor_list_page_content_type.pk, path='000100010003', depth=3, numchild=0, url_path='/home/contributors/', ) home_page.numchild += 1 home_page.save() class Migration(migrations.Migration): dependencies = [ ('people', '0002_contributorlistpage'), ] operations = [ migrations.RunPython(create_page), ]
mit
GinnyN/Team-Fortress-RPG-Generators
build/lib/django/contrib/staticfiles/management/commands/findstatic.py
244
1230
import os from optparse import make_option from django.core.management.base import LabelCommand from django.utils.encoding import smart_str, smart_unicode from django.contrib.staticfiles import finders class Command(LabelCommand): help = "Finds the absolute paths for the given static file(s)." args = "[file ...]" label = 'static file' option_list = LabelCommand.option_list + ( make_option('--first', action='store_false', dest='all', default=True, help="Only return the first match for each static file."), ) def handle_label(self, path, **options): verbosity = int(options.get('verbosity', 1)) result = finders.find(path, all=options['all']) path = smart_unicode(path) if result: if not isinstance(result, (list, tuple)): result = [result] output = u'\n '.join( (smart_unicode(os.path.realpath(path)) for path in result)) self.stdout.write( smart_str(u"Found '%s' here:\n %s\n" % (path, output))) else: if verbosity >= 1: self.stderr.write( smart_str("No matching file found for '%s'.\n" % path))
bsd-3-clause
hryamzik/ansible
lib/ansible/modules/monitoring/grafana_plugin.py
14
7425
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright: (c) 2017, Thierry Sallé (@seuf) # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function ANSIBLE_METADATA = { 'status': ['preview'], 'supported_by': 'community', 'metadata_version': '1.1' } DOCUMENTATION = ''' --- module: grafana_plugin author: - Thierry Sallé (@tsalle) version_added: "2.5" short_description: Manage Grafana plugins via grafana-cli description: - Install and remove Grafana plugins. options: name: description: - Name of the plugin. required: true version: description: - Version of the plugin to install. - Default to latest. grafana_plugins_dir: description: - Directory where Grafana plugin will be installed. grafana_repo: description: - Grafana repository. If not set, gafana-cli will use the default value C(https://grafana.net/api/plugins). grafana_plugin_url: description: - Custom Grafana plugin URL. - Requires grafana 4.6.x or later. state: description: - Status of the Grafana plugin. - If latest is set, the version parameter will be ignored. choices: [ absent, present ] default: present ''' EXAMPLES = ''' --- - name: Install - update Grafana piechart panel plugin grafana_plugin: name: grafana-piechart-panel version: latest state: present ''' RETURN = ''' --- version: description: version of the installed / removed plugin. type: string returned: allways ''' import base64 import json import os from ansible.module_utils.basic import AnsibleModule __metaclass__ = type class GrafanaCliException(Exception): pass def grafana_cli_bin(params): ''' Get the grafana-cli binary path with global options. Raise a GrafanaCliException if the grafana-cli is not present or not in PATH :param params: ansible module params. Used to fill grafana-cli global params. ''' program = 'grafana-cli' grafana_cli = None def is_exe(fpath): return os.path.isfile(fpath) and os.access(fpath, os.X_OK) fpath, fname = os.path.split(program) if fpath: if is_exe(program): grafana_cli = program else: for path in os.environ["PATH"].split(os.pathsep): path = path.strip('"') exe_file = os.path.join(path, program) if is_exe(exe_file): grafana_cli = exe_file break if grafana_cli is None: raise GrafanaCliException('grafana-cli binary is not present or not in PATH') else: if 'grafana_plugin_url' in params and params['grafana_plugin_url']: grafana_cli = '{} {} {}'.format(grafana_cli, '--pluginUrl', params['grafana_plugin_url']) if 'grafana_plugins_dir' in params and params['grafana_plugins_dir']: grafana_cli = '{} {} {}'.format(grafana_cli, '--pluginsDir', params['grafana_plugins_dir']) if 'grafana_repo' in params and params['grafana_repo']: grafana_cli = '{} {} {}'.format(grafana_cli, '--repo', params['grafana_repo']) if 'validate_certs' in params and params['validate_certs'] is False: grafana_cli = '{} {}'.format(grafana_cli, '--insecure') return '{} {}'.format(grafana_cli, 'plugins') def get_grafana_plugin_version(module, params): ''' Fetch grafana installed plugin version. Return None if plugin is not installed. :param module: ansible module object. used to run system commands. :param params: ansible module params. ''' grafana_cli = grafana_cli_bin(params) rc, stdout, stderr = module.run_command('{} ls'.format(grafana_cli)) stdout_lines = stdout.split("\n") for line in stdout_lines: if line.find(' @ ') != -1: line = line.rstrip() plugin_name, plugin_version = line.split(' @ ') if plugin_name == params['name']: return plugin_version return None def grafana_plugin(module, params): ''' Install update or remove grafana plugin :param module: ansible module object. used to run system commands. :param params: ansible module params. ''' grafana_cli = grafana_cli_bin(params) if params['state'] == 'present': grafana_plugin_version = get_grafana_plugin_version(module, params) if grafana_plugin_version is not None: if 'version' in params and params['version']: if params['version'] == grafana_plugin_version: return {'msg': 'Grafana plugin already installed', 'changed': False, 'version': grafana_plugin_version} else: if params['version'] == 'latest' or params['version'] is None: cmd = '{} update {}'.format(grafana_cli, params['name']) else: cmd = '{} install {} {}'.format(grafana_cli, params['name'], params['version']) else: return {'msg': 'Grafana plugin already installed', 'changed': False, 'version': grafana_plugin_version} else: if 'version' in params: if params['version'] == 'latest' or params['version'] is None: cmd = '{} install {}'.format(grafana_cli, params['name']) else: cmd = '{} install {} {}'.format(grafana_cli, params['name'], params['version']) else: cmd = '{} install {}'.format(grafana_cli, params['name']) else: cmd = '{} uninstall {}'.format(grafana_cli, params['name']) rc, stdout, stderr = module.run_command(cmd) if rc == 0: stdout_lines = stdout.split("\n") for line in stdout_lines: if line.find(params['name']): if line.find(' @ ') != -1: line = line.rstrip() plugin_name, plugin_version = line.split(' @ ') else: plugin_version = None return {'msg': 'Grafana plugin {} installed : {}'.format(params['name'], cmd), 'changed': True, 'version': plugin_version} else: raise GrafanaCliException("'{}' execution returned an error : [{}] {} {}".format(cmd, rc, stdout, stderr)) def main(): module = AnsibleModule( argument_spec=dict( name=dict(required=True, type='str'), version=dict(type='str'), grafana_plugins_dir=dict(type='str'), grafana_repo=dict(type='str'), grafana_plugin_url=dict(type='str'), state=dict(choices=['present', 'absent'], default='present') ), supports_check_mode=False ) try: result = grafana_plugin(module, module.params) except GrafanaCliException as e: module.fail_json( failed=True, msg="{}".format(e) ) return except Exception as e: module.fail_json( failed=True, msg="{} : {} ".format(type(e), e) ) return module.exit_json( failed=False, **result ) return if __name__ == '__main__': main()
gpl-3.0
leoc/home-assistant
homeassistant/components/sensor/rfxtrx.py
6
4781
""" Support for RFXtrx sensors. For more details about this platform, please refer to the documentation at https://home-assistant.io/components/sensor.rfxtrx/ """ import logging import voluptuous as vol import homeassistant.components.rfxtrx as rfxtrx import homeassistant.helpers.config_validation as cv from homeassistant.const import CONF_PLATFORM from homeassistant.helpers.entity import Entity from homeassistant.util import slugify from homeassistant.components.rfxtrx import ( ATTR_AUTOMATIC_ADD, ATTR_NAME, ATTR_FIREEVENT, CONF_DEVICES, ATTR_DATA_TYPE, DATA_TYPES, ATTR_ENTITY_ID) DEPENDENCIES = ['rfxtrx'] _LOGGER = logging.getLogger(__name__) PLATFORM_SCHEMA = vol.Schema({ vol.Required(CONF_PLATFORM): rfxtrx.DOMAIN, vol.Optional(CONF_DEVICES, default={}): vol.All(dict, rfxtrx.valid_sensor), vol.Optional(ATTR_AUTOMATIC_ADD, default=False): cv.boolean, }, extra=vol.ALLOW_EXTRA) def setup_platform(hass, config, add_devices_callback, discovery_info=None): """Setup the RFXtrx platform.""" # pylint: disable=too-many-locals from RFXtrx import SensorEvent sensors = [] for packet_id, entity_info in config[CONF_DEVICES].items(): event = rfxtrx.get_rfx_object(packet_id) device_id = "sensor_" + slugify(event.device.id_string.lower()) if device_id in rfxtrx.RFX_DEVICES: continue _LOGGER.info("Add %s rfxtrx.sensor", entity_info[ATTR_NAME]) sub_sensors = {} data_types = entity_info[ATTR_DATA_TYPE] if len(data_types) == 0: data_types = [''] for data_type in DATA_TYPES: if data_type in event.values: data_types = [data_type] break for _data_type in data_types: new_sensor = RfxtrxSensor(None, entity_info[ATTR_NAME], _data_type, entity_info[ATTR_FIREEVENT]) sensors.append(new_sensor) sub_sensors[_data_type] = new_sensor rfxtrx.RFX_DEVICES[device_id] = sub_sensors add_devices_callback(sensors) def sensor_update(event): """Callback for sensor updates from the RFXtrx gateway.""" if not isinstance(event, SensorEvent): return device_id = "sensor_" + slugify(event.device.id_string.lower()) if device_id in rfxtrx.RFX_DEVICES: sensors = rfxtrx.RFX_DEVICES[device_id] for key in sensors: sensor = sensors[key] sensor.event = event # Fire event if sensors[key].should_fire_event: sensor.hass.bus.fire( "signal_received", { ATTR_ENTITY_ID: sensors[key].entity_id, } ) return # Add entity if not exist and the automatic_add is True if not config[ATTR_AUTOMATIC_ADD]: return pkt_id = "".join("{0:02x}".format(x) for x in event.data) _LOGGER.info("Automatic add rfxtrx.sensor: %s", pkt_id) data_type = '' for _data_type in DATA_TYPES: if _data_type in event.values: data_type = _data_type break new_sensor = RfxtrxSensor(event, pkt_id, data_type) sub_sensors = {} sub_sensors[new_sensor.data_type] = new_sensor rfxtrx.RFX_DEVICES[device_id] = sub_sensors add_devices_callback([new_sensor]) if sensor_update not in rfxtrx.RECEIVED_EVT_SUBSCRIBERS: rfxtrx.RECEIVED_EVT_SUBSCRIBERS.append(sensor_update) class RfxtrxSensor(Entity): """Representation of a RFXtrx sensor.""" def __init__(self, event, name, data_type, should_fire_event=False): """Initialize the sensor.""" self.event = event self._name = name self.should_fire_event = should_fire_event self.data_type = data_type self._unit_of_measurement = DATA_TYPES.get(data_type, '') def __str__(self): """Return the name of the sensor.""" return self._name @property def state(self): """Return the state of the sensor.""" if not self.event: return None return self.event.values.get(self.data_type) @property def name(self): """Get the name of the sensor.""" return self._name @property def device_state_attributes(self): """Return the state attributes.""" if not self.event: return None return self.event.values @property def unit_of_measurement(self): """Return the unit this state is expressed in.""" return self._unit_of_measurement
mit
darktears/chromium-crosswalk
third_party/WebKit/Tools/Scripts/webkitpy/tool/mocktool_unittest.py
56
2400
# Copyright (C) 2010 Google Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import unittest from mocktool import MockOptions class MockOptionsTest(unittest.TestCase): # MockOptions() should implement the same semantics that # optparse.Values does. def test_get__set(self): # Test that we can still set options after we construct the # object. options = MockOptions() options.foo = 'bar' self.assertEqual(options.foo, 'bar') def test_get__unset(self): # Test that unset options raise an exception (regular Mock # objects return an object and hence are different from # optparse.Values()). options = MockOptions() self.assertRaises(AttributeError, lambda: options.foo) def test_kwarg__set(self): # Test that keyword arguments work in the constructor. options = MockOptions(foo='bar') self.assertEqual(options.foo, 'bar')
bsd-3-clause
ntuecon/server
pyenv/Lib/site-packages/twisted/web/domhelpers.py
12
8640
# -*- test-case-name: twisted.web.test.test_domhelpers -*- # Copyright (c) Twisted Matrix Laboratories. # See LICENSE for details. """ A library for performing interesting tasks with DOM objects. """ import StringIO from twisted.web import microdom from twisted.web.microdom import getElementsByTagName, escape, unescape # These modules are imported here as a shortcut. escape getElementsByTagName class NodeLookupError(Exception): pass def substitute(request, node, subs): """ Look through the given node's children for strings, and attempt to do string substitution with the given parameter. """ for child in node.childNodes: if hasattr(child, 'nodeValue') and child.nodeValue: child.replaceData(0, len(child.nodeValue), child.nodeValue % subs) substitute(request, child, subs) def _get(node, nodeId, nodeAttrs=('id','class','model','pattern')): """ (internal) Get a node with the specified C{nodeId} as any of the C{class}, C{id} or C{pattern} attributes. """ if hasattr(node, 'hasAttributes') and node.hasAttributes(): for nodeAttr in nodeAttrs: if (str (node.getAttribute(nodeAttr)) == nodeId): return node if node.hasChildNodes(): if hasattr(node.childNodes, 'length'): length = node.childNodes.length else: length = len(node.childNodes) for childNum in range(length): result = _get(node.childNodes[childNum], nodeId) if result: return result def get(node, nodeId): """ Get a node with the specified C{nodeId} as any of the C{class}, C{id} or C{pattern} attributes. If there is no such node, raise L{NodeLookupError}. """ result = _get(node, nodeId) if result: return result raise NodeLookupError(nodeId) def getIfExists(node, nodeId): """ Get a node with the specified C{nodeId} as any of the C{class}, C{id} or C{pattern} attributes. If there is no such node, return L{None}. """ return _get(node, nodeId) def getAndClear(node, nodeId): """Get a node with the specified C{nodeId} as any of the C{class}, C{id} or C{pattern} attributes. If there is no such node, raise L{NodeLookupError}. Remove all child nodes before returning. """ result = get(node, nodeId) if result: clearNode(result) return result def clearNode(node): """ Remove all children from the given node. """ node.childNodes[:] = [] def locateNodes(nodeList, key, value, noNesting=1): """ Find subnodes in the given node where the given attribute has the given value. """ returnList = [] if not isinstance(nodeList, type([])): return locateNodes(nodeList.childNodes, key, value, noNesting) for childNode in nodeList: if not hasattr(childNode, 'getAttribute'): continue if str(childNode.getAttribute(key)) == value: returnList.append(childNode) if noNesting: continue returnList.extend(locateNodes(childNode, key, value, noNesting)) return returnList def superSetAttribute(node, key, value): if not hasattr(node, 'setAttribute'): return node.setAttribute(key, value) if node.hasChildNodes(): for child in node.childNodes: superSetAttribute(child, key, value) def superPrependAttribute(node, key, value): if not hasattr(node, 'setAttribute'): return old = node.getAttribute(key) if old: node.setAttribute(key, value+'/'+old) else: node.setAttribute(key, value) if node.hasChildNodes(): for child in node.childNodes: superPrependAttribute(child, key, value) def superAppendAttribute(node, key, value): if not hasattr(node, 'setAttribute'): return old = node.getAttribute(key) if old: node.setAttribute(key, old + '/' + value) else: node.setAttribute(key, value) if node.hasChildNodes(): for child in node.childNodes: superAppendAttribute(child, key, value) def gatherTextNodes(iNode, dounescape=0, joinWith=""): """Visit each child node and collect its text data, if any, into a string. For example:: >>> doc=microdom.parseString('<a>1<b>2<c>3</c>4</b></a>') >>> gatherTextNodes(doc.documentElement) '1234' With dounescape=1, also convert entities back into normal characters. @return: the gathered nodes as a single string @rtype: str """ gathered=[] gathered_append=gathered.append slice=[iNode] while len(slice)>0: c=slice.pop(0) if hasattr(c, 'nodeValue') and c.nodeValue is not None: if dounescape: val=unescape(c.nodeValue) else: val=c.nodeValue gathered_append(val) slice[:0]=c.childNodes return joinWith.join(gathered) class RawText(microdom.Text): """This is an evil and horrible speed hack. Basically, if you have a big chunk of XML that you want to insert into the DOM, but you don't want to incur the cost of parsing it, you can construct one of these and insert it into the DOM. This will most certainly only work with microdom as the API for converting nodes to xml is different in every DOM implementation. This could be improved by making this class a Lazy parser, so if you inserted this into the DOM and then later actually tried to mutate this node, it would be parsed then. """ def writexml(self, writer, indent="", addindent="", newl="", strip=0, nsprefixes=None, namespace=None): writer.write("%s%s%s" % (indent, self.data, newl)) def findNodes(parent, matcher, accum=None): if accum is None: accum = [] if not parent.hasChildNodes(): return accum for child in parent.childNodes: # print child, child.nodeType, child.nodeName if matcher(child): accum.append(child) findNodes(child, matcher, accum) return accum def findNodesShallowOnMatch(parent, matcher, recurseMatcher, accum=None): if accum is None: accum = [] if not parent.hasChildNodes(): return accum for child in parent.childNodes: # print child, child.nodeType, child.nodeName if matcher(child): accum.append(child) if recurseMatcher(child): findNodesShallowOnMatch(child, matcher, recurseMatcher, accum) return accum def findNodesShallow(parent, matcher, accum=None): if accum is None: accum = [] if not parent.hasChildNodes(): return accum for child in parent.childNodes: if matcher(child): accum.append(child) else: findNodes(child, matcher, accum) return accum def findElementsWithAttributeShallow(parent, attribute): """ Return an iterable of the elements which are direct children of C{parent} and which have the C{attribute} attribute. """ return findNodesShallow(parent, lambda n: getattr(n, 'tagName', None) is not None and n.hasAttribute(attribute)) def findElements(parent, matcher): """ Return an iterable of the elements which are children of C{parent} for which the predicate C{matcher} returns true. """ return findNodes( parent, lambda n, matcher=matcher: getattr(n, 'tagName', None) is not None and matcher(n)) def findElementsWithAttribute(parent, attribute, value=None): if value: return findElements( parent, lambda n, attribute=attribute, value=value: n.hasAttribute(attribute) and n.getAttribute(attribute) == value) else: return findElements( parent, lambda n, attribute=attribute: n.hasAttribute(attribute)) def findNodesNamed(parent, name): return findNodes(parent, lambda n, name=name: n.nodeName == name) def writeNodeData(node, oldio): for subnode in node.childNodes: if hasattr(subnode, 'data'): oldio.write(subnode.data) else: writeNodeData(subnode, oldio) def getNodeText(node): oldio = StringIO.StringIO() writeNodeData(node, oldio) return oldio.getvalue() def getParents(node): l = [] while node: l.append(node) node = node.parentNode return l def namedChildren(parent, nodeName): """namedChildren(parent, nodeName) -> children (not descendants) of parent that have tagName == nodeName """ return [n for n in parent.childNodes if getattr(n, 'tagName', '')==nodeName]
bsd-3-clause
BlindHunter/django
tests/template_tests/filter_tests/test_rjust.py
521
1030
from django.template.defaultfilters import rjust from django.test import SimpleTestCase from django.utils.safestring import mark_safe from ..utils import setup class RjustTests(SimpleTestCase): @setup({'rjust01': '{% autoescape off %}.{{ a|rjust:"5" }}. .{{ b|rjust:"5" }}.{% endautoescape %}'}) def test_rjust01(self): output = self.engine.render_to_string('rjust01', {"a": "a&b", "b": mark_safe("a&b")}) self.assertEqual(output, ". a&b. . a&b.") @setup({'rjust02': '.{{ a|rjust:"5" }}. .{{ b|rjust:"5" }}.'}) def test_rjust02(self): output = self.engine.render_to_string('rjust02', {"a": "a&b", "b": mark_safe("a&b")}) self.assertEqual(output, ". a&amp;b. . a&b.") class FunctionTests(SimpleTestCase): def test_rjust(self): self.assertEqual(rjust('test', 10), ' test') def test_less_than_string_length(self): self.assertEqual(rjust('test', 3), 'test') def test_non_string_input(self): self.assertEqual(rjust(123, 4), ' 123')
bsd-3-clause
bistromath/gr-smartnet
src/python/logging_receiver.py
2
4743
#!/usr/env/python from gnuradio import blks2, gr, gru from grc_gnuradio import blks2 as grc_blks2 from gnuradio import smartnet import string import random import time, datetime import os class logging_receiver(gr.hier_block2): def __init__(self, talkgroup, options): gr.hier_block2.__init__(self, "fsk_demod", gr.io_signature(1, 1, gr.sizeof_gr_complex), # Input signature gr.io_signature(0, 0, gr.sizeof_char)) # Output signature #print "Starting log_receiver init()" self.audiorate = options.audiorate self.rate = options.rate self.talkgroup = talkgroup self.directory = options.directory if options.squelch is None: options.squelch = 28 if options.volume is None: options.volume = 3.0 self.audiotaps = gr.firdes.low_pass(1, self.rate, 8000, 2000, gr.firdes.WIN_HANN) self.prefilter_decim = int(self.rate / self.audiorate) #the audio prefilter is a channel selection filter. self.audio_prefilter = gr.freq_xlating_fir_filter_ccf(self.prefilter_decim, #decimation self.audiotaps, #taps 0, #freq offset int(self.rate)) #sampling rate #on a trunked network where you know you will have good signal, a carrier power squelch works well. real FM receviers use a noise squelch, where #the received audio is high-passed above the cutoff and then fed to a reverse squelch. If the power is then BELOW a threshold, open the squelch. self.squelch = gr.pwr_squelch_cc(options.squelch, #squelch point alpha = 0.1, #wat ramp = 10, #wat gate = True) #gated so that the audio recording doesn't contain blank spaces between transmissions self.audiodemod = blks2.fm_demod_cf(self.rate/self.prefilter_decim, #rate 1, #audio decimation 4000, #deviation 3000, #audio passband 4000, #audio stopband options.volume, #gain 75e-6) #deemphasis constant #the filtering removes FSK data woobling from the subaudible channel self.audiofilttaps = gr.firdes.high_pass(1, self.audiorate, 300, 50, gr.firdes.WIN_HANN) self.audiofilt = gr.fir_filter_fff(1, self.audiofilttaps) #self.audiogain = gr.multiply_const_ff(options.volume) #here we generate a random filename in the form /tmp/[random].wav, and then use it for the wavstamp block. this avoids collisions later on. remember to clean up these files when deallocating. self.tmpfilename = "/tmp/%s.wav" % ("".join([random.choice(string.letters+string.digits) for x in range(8)])) #if this looks glaringly different, it's because i totally cribbed it from a blog. self.valve = grc_blks2.valve(gr.sizeof_float, bool(1)) #self.prefiltervalve = grc_blks2.valve(gr.sizeof_gr_complex, bool(1)) #open the logfile for appending self.timestampfilename = "%s/%i.txt" % (self.directory, self.talkgroup) self.timestampfile = open(self.timestampfilename, 'a'); self.filename = "%s/%i.wav" % (self.directory, self.talkgroup) self.audiosink = smartnet.wavsink(self.filename, 1, self.audiorate, 8) #this version allows appending to existing files. self.connect(self, self.audio_prefilter, self.squelch, self.audiodemod, self.valve, self.audiofilt, self.audiosink) self.timestamp = 0.0 #print "Finishing logging receiver init()." self.mute() #start off muted. def __del__(self): #self.close() #self.audiosink.close() #os.system("rm %s" % self.tmpfilename) #remove the temp file you used for wav stamping self.timestampfile.close() def tuneoffset(self, target_freq, rffreq): self.audio_prefilter.set_center_freq(rffreq-target_freq*1e6) self.freq = target_freq def getfreq(self, rffreq): return self.freq def close(self): #close out and quit! self.mute() #make sure you aren't going to be writing self.audiosink.close() #if you write after this it's going to throw all the errors def mute(self): self.valve.set_open(bool(1)) # self.prefiltervalve.set_open(bool(1)) def unmute(self): self.valve.set_open(bool(0)) # self.prefiltervalve.set_open(bool(0)) if (self.timeout()) >= 3: self.stamp() self.timestamp = time.time() def timeout(self): return time.time() - self.timestamp def stamp(self): #print "Stamp says the current wavtime is %f" % self.audiosink.get_time() current_wavtime = self.audiosink.get_time() #gets the time in fractional seconds corresponding to the current position in the audio file current_timestring = time.strftime("%m/%d/%y %H:%M:%S") current_timestampstring = str(datetime.timedelta(seconds=current_wavtime)) + ": " + current_timestring + "\n" self.timestampfile.write(current_timestampstring) self.timestampfile.flush() #so you can follow along
gpl-3.0
rhdedgar/openshift-tools
openshift/installer/vendored/openshift-ansible-3.7.0/roles/lib_openshift/src/ansible/oc_obj.py
25
1253
# pylint: skip-file # flake8: noqa # pylint: disable=too-many-branches def main(): ''' ansible oc module for services ''' module = AnsibleModule( argument_spec=dict( kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'), state=dict(default='present', type='str', choices=['present', 'absent', 'list']), debug=dict(default=False, type='bool'), namespace=dict(default='default', type='str'), all_namespaces=dict(defaul=False, type='bool'), name=dict(default=None, type='str'), files=dict(default=None, type='list'), kind=dict(required=True, type='str'), delete_after=dict(default=False, type='bool'), content=dict(default=None, type='dict'), force=dict(default=False, type='bool'), selector=dict(default=None, type='str'), ), mutually_exclusive=[["content", "files"], ["selector", "name"]], supports_check_mode=True, ) rval = OCObject.run_ansible(module.params, module.check_mode) if 'failed' in rval: module.fail_json(**rval) module.exit_json(**rval) if __name__ == '__main__': main()
apache-2.0
zulip/django
tests/string_lookup/models.py
281
1533
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models from django.utils.encoding import python_2_unicode_compatible @python_2_unicode_compatible class Foo(models.Model): name = models.CharField(max_length=50) friend = models.CharField(max_length=50, blank=True) def __str__(self): return "Foo %s" % self.name @python_2_unicode_compatible class Bar(models.Model): name = models.CharField(max_length=50) normal = models.ForeignKey(Foo, models.CASCADE, related_name='normal_foo') fwd = models.ForeignKey("Whiz", models.CASCADE) back = models.ForeignKey("Foo", models.CASCADE) def __str__(self): return "Bar %s" % self.place.name @python_2_unicode_compatible class Whiz(models.Model): name = models.CharField(max_length=50) def __str__(self): return "Whiz %s" % self.name @python_2_unicode_compatible class Child(models.Model): parent = models.OneToOneField('Base', models.CASCADE) name = models.CharField(max_length=50) def __str__(self): return "Child %s" % self.name @python_2_unicode_compatible class Base(models.Model): name = models.CharField(max_length=50) def __str__(self): return "Base %s" % self.name @python_2_unicode_compatible class Article(models.Model): name = models.CharField(max_length=50) text = models.TextField() submitted_from = models.GenericIPAddressField(blank=True, null=True) def __str__(self): return "Article %s" % self.name
bsd-3-clause
swcurran/tfrs
backend/api/models/UserViewModel.py
1
1507
""" REST API Documentation for the NRS TFRS Credit Trading Application The Transportation Fuels Reporting System is being designed to streamline compliance reporting for transportation fuel suppliers in accordance with the Renewable & Low Carbon Fuel Requirements Regulation. OpenAPI spec version: v1 Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from django.db import models class UserViewModel(models.Model): given_name = models.CharField(max_length=255, blank=True, null=True) surname = models.CharField(max_length=255, blank=True, null=True) email = models.CharField(max_length=255, blank=True, null=True) active = models.BooleanField() sm_authorization_id = models.CharField(max_length=255, blank=True, null=True) user_roles = models.ManyToManyField('UserRole', related_name='UserViewModeluser_roles', blank=True) class Meta: abstract = True
apache-2.0
jbzdak/edx-platform
common/lib/capa/capa/tests/test_shuffle.py
196
13736
"""Tests the capa shuffle and name-masking.""" import unittest import textwrap from . import test_capa_system, new_loncapa_problem from capa.responsetypes import LoncapaProblemError class CapaShuffleTest(unittest.TestCase): """Capa problem tests for shuffling and choice-name masking.""" def setUp(self): super(CapaShuffleTest, self).setUp() self.system = test_capa_system() def test_shuffle_4_choices(self): xml_str = textwrap.dedent(""" <problem> <multiplechoiceresponse> <choicegroup type="MultipleChoice" shuffle="true"> <choice correct="false">Apple</choice> <choice correct="false">Banana</choice> <choice correct="false">Chocolate</choice> <choice correct ="true">Donut</choice> </choicegroup> </multiplechoiceresponse> </problem> """) problem = new_loncapa_problem(xml_str, seed=0) # shuffling 4 things with seed of 0 yields: B A C D # Check that the choices are shuffled the_html = problem.get_html() self.assertRegexpMatches(the_html, r"<div>.*\[.*'Banana'.*'Apple'.*'Chocolate'.*'Donut'.*\].*</div>") # Check that choice name masking is enabled and that unmasking works response = problem.responders.values()[0] self.assertFalse(response.has_mask()) self.assertEqual(response.unmask_order(), ['choice_1', 'choice_0', 'choice_2', 'choice_3']) self.assertEqual(the_html, problem.get_html(), 'should be able to call get_html() twice') def test_shuffle_custom_names(self): xml_str = textwrap.dedent(""" <problem> <multiplechoiceresponse> <choicegroup type="MultipleChoice" shuffle="true"> <choice correct="false" name="aaa">Apple</choice> <choice correct="false">Banana</choice> <choice correct="false">Chocolate</choice> <choice correct ="true" name="ddd">Donut</choice> </choicegroup> </multiplechoiceresponse> </problem> """) problem = new_loncapa_problem(xml_str, seed=0) # B A C D # Check that the custom name= names come through response = problem.responders.values()[0] self.assertFalse(response.has_mask()) self.assertTrue(response.has_shuffle()) self.assertEqual(response.unmask_order(), ['choice_0', 'choice_aaa', 'choice_1', 'choice_ddd']) def test_shuffle_different_seed(self): xml_str = textwrap.dedent(""" <problem> <multiplechoiceresponse> <choicegroup type="MultipleChoice" shuffle="true"> <choice correct="false">Apple</choice> <choice correct="false">Banana</choice> <choice correct="false">Chocolate</choice> <choice correct ="true">Donut</choice> </choicegroup> </multiplechoiceresponse> </problem> """) problem = new_loncapa_problem(xml_str, seed=341) # yields D A B C the_html = problem.get_html() self.assertRegexpMatches(the_html, r"<div>.*\[.*'Donut'.*'Apple'.*'Banana'.*'Chocolate'.*\].*</div>") def test_shuffle_1_choice(self): xml_str = textwrap.dedent(""" <problem> <multiplechoiceresponse> <choicegroup type="MultipleChoice" shuffle="true"> <choice correct="true">Apple</choice> </choicegroup> </multiplechoiceresponse> </problem> """) problem = new_loncapa_problem(xml_str, seed=0) the_html = problem.get_html() self.assertRegexpMatches(the_html, r"<div>.*\[.*'Apple'.*\].*</div>") response = problem.responders.values()[0] self.assertFalse(response.has_mask()) self.assertTrue(response.has_shuffle()) self.assertEqual(response.unmask_order(), ['choice_0']) def test_shuffle_6_choices(self): xml_str = textwrap.dedent(""" <problem> <multiplechoiceresponse> <choicegroup type="MultipleChoice" shuffle="true"> <choice correct="false">Apple</choice> <choice correct="false">Banana</choice> <choice correct="false">Chocolate</choice> <choice correct ="true">Zonut</choice> <choice correct ="false">Eggplant</choice> <choice correct ="false">Filet Mignon</choice> </choicegroup> </multiplechoiceresponse> </problem> """) problem = new_loncapa_problem(xml_str, seed=0) # yields: C E A B D F # Donut -> Zonut to show that there is not some hidden alphabetic ordering going on the_html = problem.get_html() self.assertRegexpMatches(the_html, r"<div>.*\[.*'Chocolate'.*'Eggplant'.*'Apple'.*'Banana'.*'Zonut'.*'Filet Mignon'.*\].*</div>") def test_shuffle_false(self): xml_str = textwrap.dedent(""" <problem> <multiplechoiceresponse> <choicegroup type="MultipleChoice" shuffle="false"> <choice correct="false">Apple</choice> <choice correct="false">Banana</choice> <choice correct="false">Chocolate</choice> <choice correct ="true">Donut</choice> </choicegroup> </multiplechoiceresponse> </problem> """) problem = new_loncapa_problem(xml_str) the_html = problem.get_html() self.assertRegexpMatches(the_html, r"<div>.*\[.*'Apple'.*'Banana'.*'Chocolate'.*'Donut'.*\].*</div>") response = problem.responders.values()[0] self.assertFalse(response.has_mask()) self.assertFalse(response.has_shuffle()) def test_shuffle_fixed_head_end(self): xml_str = textwrap.dedent(""" <problem> <multiplechoiceresponse> <choicegroup type="MultipleChoice" shuffle="true"> <choice correct="false" fixed="true">Alpha</choice> <choice correct="false" fixed="true">Beta</choice> <choice correct="false">A</choice> <choice correct="false">B</choice> <choice correct="false">C</choice> <choice correct ="true">D</choice> </choicegroup> </multiplechoiceresponse> </problem> """) problem = new_loncapa_problem(xml_str, seed=0) the_html = problem.get_html() # Alpha Beta held back from shuffle (head end) self.assertRegexpMatches(the_html, r"<div>.*\[.*'Alpha'.*'Beta'.*'B'.*'A'.*'C'.*'D'.*\].*</div>") def test_shuffle_fixed_tail_end(self): xml_str = textwrap.dedent(""" <problem> <multiplechoiceresponse> <choicegroup type="MultipleChoice" shuffle="true"> <choice correct="false">A</choice> <choice correct="false">B</choice> <choice correct="false">C</choice> <choice correct ="true">D</choice> <choice correct="false" fixed="true">Alpha</choice> <choice correct="false" fixed="true">Beta</choice> </choicegroup> </multiplechoiceresponse> </problem> """) problem = new_loncapa_problem(xml_str, seed=0) the_html = problem.get_html() # Alpha Beta held back from shuffle (tail end) self.assertRegexpMatches(the_html, r"<div>.*\[.*'B'.*'A'.*'C'.*'D'.*'Alpha'.*'Beta'.*\].*</div>") def test_shuffle_fixed_both_ends(self): xml_str = textwrap.dedent(""" <problem> <multiplechoiceresponse> <choicegroup type="MultipleChoice" shuffle="true"> <choice correct="false" fixed="true">Alpha</choice> <choice correct="false" fixed="true">Beta</choice> <choice correct="false">A</choice> <choice correct="false">B</choice> <choice correct="false">C</choice> <choice correct ="true">D</choice> <choice correct="false" fixed="true">Psi</choice> <choice correct="false" fixed="true">Omega</choice> </choicegroup> </multiplechoiceresponse> </problem> """) problem = new_loncapa_problem(xml_str, seed=0) the_html = problem.get_html() self.assertRegexpMatches(the_html, r"<div>.*\[.*'Alpha'.*'Beta'.*'B'.*'A'.*'C'.*'D'.*'Psi'.*'Omega'.*\].*</div>") def test_shuffle_fixed_both_ends_thin(self): xml_str = textwrap.dedent(""" <problem> <multiplechoiceresponse> <choicegroup type="MultipleChoice" shuffle="true"> <choice correct="false" fixed="true">Alpha</choice> <choice correct="false">A</choice> <choice correct="true" fixed="true">Omega</choice> </choicegroup> </multiplechoiceresponse> </problem> """) problem = new_loncapa_problem(xml_str, seed=0) the_html = problem.get_html() self.assertRegexpMatches(the_html, r"<div>.*\[.*'Alpha'.*'A'.*'Omega'.*\].*</div>") def test_shuffle_fixed_all(self): xml_str = textwrap.dedent(""" <problem> <multiplechoiceresponse> <choicegroup type="MultipleChoice" shuffle="true"> <choice correct="false" fixed="true">A</choice> <choice correct="false" fixed="true">B</choice> <choice correct="true" fixed="true">C</choice> </choicegroup> </multiplechoiceresponse> </problem> """) problem = new_loncapa_problem(xml_str, seed=0) the_html = problem.get_html() self.assertRegexpMatches(the_html, r"<div>.*\[.*'A'.*'B'.*'C'.*\].*</div>") def test_shuffle_island(self): """A fixed 'island' choice not at the head or tail end gets lumped into the tail end.""" xml_str = textwrap.dedent(""" <problem> <multiplechoiceresponse> <choicegroup type="MultipleChoice" shuffle="true"> <choice correct="false" fixed="true">A</choice> <choice correct="false">Mid</choice> <choice correct="true" fixed="true">C</choice> <choice correct="False">Mid</choice> <choice correct="false" fixed="true">D</choice> </choicegroup> </multiplechoiceresponse> </problem> """) problem = new_loncapa_problem(xml_str, seed=0) the_html = problem.get_html() self.assertRegexpMatches(the_html, r"<div>.*\[.*'A'.*'Mid'.*'Mid'.*'C'.*'D'.*\].*</div>") def test_multiple_shuffle_responses(self): xml_str = textwrap.dedent(""" <problem> <multiplechoiceresponse> <choicegroup type="MultipleChoice" shuffle="true"> <choice correct="false">Apple</choice> <choice correct="false">Banana</choice> <choice correct="false">Chocolate</choice> <choice correct ="true">Donut</choice> </choicegroup> </multiplechoiceresponse> <p>Here is some text</p> <multiplechoiceresponse> <choicegroup type="MultipleChoice" shuffle="true"> <choice correct="false">A</choice> <choice correct="false">B</choice> <choice correct="false">C</choice> <choice correct ="true">D</choice> </choicegroup> </multiplechoiceresponse> </problem> """) problem = new_loncapa_problem(xml_str, seed=0) orig_html = problem.get_html() self.assertEqual(orig_html, problem.get_html(), 'should be able to call get_html() twice') html = orig_html.replace('\n', ' ') # avoid headaches with .* matching print html self.assertRegexpMatches(html, r"<div>.*\[.*'Banana'.*'Apple'.*'Chocolate'.*'Donut'.*\].*</div>.*" + r"<div>.*\[.*'C'.*'A'.*'D'.*'B'.*\].*</div>") # Look at the responses in their authored order responses = sorted(problem.responders.values(), key=lambda resp: int(resp.id[resp.id.rindex('_') + 1:])) self.assertFalse(responses[0].has_mask()) self.assertTrue(responses[0].has_shuffle()) self.assertTrue(responses[1].has_shuffle()) self.assertEqual(responses[0].unmask_order(), ['choice_1', 'choice_0', 'choice_2', 'choice_3']) self.assertEqual(responses[1].unmask_order(), ['choice_2', 'choice_0', 'choice_3', 'choice_1']) def test_shuffle_not_with_answerpool(self): """Raise error if shuffle and answer-pool are both used.""" xml_str = textwrap.dedent(""" <problem> <multiplechoiceresponse> <choicegroup type="MultipleChoice" shuffle="true" answer-pool="4"> <choice correct="false" fixed="true">A</choice> <choice correct="false">Mid</choice> <choice correct="true" fixed="true">C</choice> <choice correct="False">Mid</choice> <choice correct="false" fixed="true">D</choice> </choicegroup> </multiplechoiceresponse> </problem> """) with self.assertRaisesRegexp(LoncapaProblemError, "shuffle and answer-pool"): new_loncapa_problem(xml_str)
agpl-3.0
blois/AndroidSDKCloneMin
ndk/prebuilt/linux-x86_64/lib/python2.7/test/test_funcattrs.py
117
12749
from test import test_support import types import unittest class FuncAttrsTest(unittest.TestCase): def setUp(self): class F: def a(self): pass def b(): return 3 self.f = F self.fi = F() self.b = b def cannot_set_attr(self, obj, name, value, exceptions): # Helper method for other tests. try: setattr(obj, name, value) except exceptions: pass else: self.fail("shouldn't be able to set %s to %r" % (name, value)) try: delattr(obj, name) except exceptions: pass else: self.fail("shouldn't be able to del %s" % name) class FunctionPropertiesTest(FuncAttrsTest): # Include the external setUp method that is common to all tests def test_module(self): self.assertEqual(self.b.__module__, __name__) def test_dir_includes_correct_attrs(self): self.b.known_attr = 7 self.assertIn('known_attr', dir(self.b), "set attributes not in dir listing of method") # Test on underlying function object of method self.f.a.im_func.known_attr = 7 self.assertIn('known_attr', dir(self.f.a), "set attribute on unbound method implementation in " "class not in dir") self.assertIn('known_attr', dir(self.fi.a), "set attribute on unbound method implementations, " "should show up in next dir") def test_duplicate_function_equality(self): # Body of `duplicate' is the exact same as self.b def duplicate(): 'my docstring' return 3 self.assertNotEqual(self.b, duplicate) def test_copying_func_code(self): def test(): pass self.assertEqual(test(), None) test.func_code = self.b.func_code self.assertEqual(test(), 3) # self.b always returns 3, arbitrarily def test_func_globals(self): self.assertIs(self.b.func_globals, globals()) self.cannot_set_attr(self.b, 'func_globals', 2, TypeError) def test_func_closure(self): a = 12 def f(): print a c = f.func_closure self.assertIsInstance(c, tuple) self.assertEqual(len(c), 1) # don't have a type object handy self.assertEqual(c[0].__class__.__name__, "cell") self.cannot_set_attr(f, "func_closure", c, TypeError) def test_empty_cell(self): def f(): print a try: f.func_closure[0].cell_contents except ValueError: pass else: self.fail("shouldn't be able to read an empty cell") a = 12 def test_func_name(self): self.assertEqual(self.b.__name__, 'b') self.assertEqual(self.b.func_name, 'b') self.b.__name__ = 'c' self.assertEqual(self.b.__name__, 'c') self.assertEqual(self.b.func_name, 'c') self.b.func_name = 'd' self.assertEqual(self.b.__name__, 'd') self.assertEqual(self.b.func_name, 'd') # __name__ and func_name must be a string self.cannot_set_attr(self.b, '__name__', 7, TypeError) self.cannot_set_attr(self.b, 'func_name', 7, TypeError) # __name__ must be available when in restricted mode. Exec will raise # AttributeError if __name__ is not available on f. s = """def f(): pass\nf.__name__""" exec s in {'__builtins__': {}} # Test on methods, too self.assertEqual(self.f.a.__name__, 'a') self.assertEqual(self.fi.a.__name__, 'a') self.cannot_set_attr(self.f.a, "__name__", 'a', AttributeError) self.cannot_set_attr(self.fi.a, "__name__", 'a', AttributeError) def test_func_code(self): num_one, num_two = 7, 8 def a(): pass def b(): return 12 def c(): return num_one def d(): return num_two def e(): return num_one, num_two for func in [a, b, c, d, e]: self.assertEqual(type(func.func_code), types.CodeType) self.assertEqual(c(), 7) self.assertEqual(d(), 8) d.func_code = c.func_code self.assertEqual(c.func_code, d.func_code) self.assertEqual(c(), 7) # self.assertEqual(d(), 7) try: b.func_code = c.func_code except ValueError: pass else: self.fail("func_code with different numbers of free vars should " "not be possible") try: e.func_code = d.func_code except ValueError: pass else: self.fail("func_code with different numbers of free vars should " "not be possible") def test_blank_func_defaults(self): self.assertEqual(self.b.func_defaults, None) del self.b.func_defaults self.assertEqual(self.b.func_defaults, None) def test_func_default_args(self): def first_func(a, b): return a+b def second_func(a=1, b=2): return a+b self.assertEqual(first_func.func_defaults, None) self.assertEqual(second_func.func_defaults, (1, 2)) first_func.func_defaults = (1, 2) self.assertEqual(first_func.func_defaults, (1, 2)) self.assertEqual(first_func(), 3) self.assertEqual(first_func(3), 5) self.assertEqual(first_func(3, 5), 8) del second_func.func_defaults self.assertEqual(second_func.func_defaults, None) try: second_func() except TypeError: pass else: self.fail("func_defaults does not update; deleting it does not " "remove requirement") class InstancemethodAttrTest(FuncAttrsTest): def test_im_class(self): self.assertEqual(self.f.a.im_class, self.f) self.assertEqual(self.fi.a.im_class, self.f) self.cannot_set_attr(self.f.a, "im_class", self.f, TypeError) self.cannot_set_attr(self.fi.a, "im_class", self.f, TypeError) def test_im_func(self): self.f.b = self.b self.assertEqual(self.f.b.im_func, self.b) self.assertEqual(self.fi.b.im_func, self.b) self.cannot_set_attr(self.f.b, "im_func", self.b, TypeError) self.cannot_set_attr(self.fi.b, "im_func", self.b, TypeError) def test_im_self(self): self.assertEqual(self.f.a.im_self, None) self.assertEqual(self.fi.a.im_self, self.fi) self.cannot_set_attr(self.f.a, "im_self", None, TypeError) self.cannot_set_attr(self.fi.a, "im_self", self.fi, TypeError) def test_im_func_non_method(self): # Behavior should be the same when a method is added via an attr # assignment self.f.id = types.MethodType(id, None, self.f) self.assertEqual(self.fi.id(), id(self.fi)) self.assertNotEqual(self.fi.id(), id(self.f)) # Test usage try: self.f.id.unknown_attr except AttributeError: pass else: self.fail("using unknown attributes should raise AttributeError") # Test assignment and deletion self.cannot_set_attr(self.f.id, 'unknown_attr', 2, AttributeError) self.cannot_set_attr(self.fi.id, 'unknown_attr', 2, AttributeError) def test_implicit_method_properties(self): self.f.a.im_func.known_attr = 7 self.assertEqual(self.f.a.known_attr, 7) self.assertEqual(self.fi.a.known_attr, 7) class ArbitraryFunctionAttrTest(FuncAttrsTest): def test_set_attr(self): # setting attributes only works on function objects self.b.known_attr = 7 self.assertEqual(self.b.known_attr, 7) for func in [self.f.a, self.fi.a]: try: func.known_attr = 7 except AttributeError: pass else: self.fail("setting attributes on methods should raise error") def test_delete_unknown_attr(self): try: del self.b.unknown_attr except AttributeError: pass else: self.fail("deleting unknown attribute should raise TypeError") def test_setting_attrs_duplicates(self): try: self.f.a.klass = self.f except AttributeError: pass else: self.fail("setting arbitrary attribute in unbound function " " should raise AttributeError") self.f.a.im_func.klass = self.f for method in [self.f.a, self.fi.a, self.fi.a.im_func]: self.assertEqual(method.klass, self.f) def test_unset_attr(self): for func in [self.b, self.f.a, self.fi.a]: try: func.non_existent_attr except AttributeError: pass else: self.fail("using unknown attributes should raise " "AttributeError") class FunctionDictsTest(FuncAttrsTest): def test_setting_dict_to_invalid(self): self.cannot_set_attr(self.b, '__dict__', None, TypeError) self.cannot_set_attr(self.b, 'func_dict', None, TypeError) from UserDict import UserDict d = UserDict({'known_attr': 7}) self.cannot_set_attr(self.f.a.im_func, '__dict__', d, TypeError) self.cannot_set_attr(self.fi.a.im_func, '__dict__', d, TypeError) def test_setting_dict_to_valid(self): d = {'known_attr': 7} self.b.__dict__ = d # Setting dict is only possible on the underlying function objects self.f.a.im_func.__dict__ = d # Test assignment self.assertIs(d, self.b.__dict__) self.assertIs(d, self.b.func_dict) # ... and on all the different ways of referencing the method's func self.assertIs(d, self.f.a.im_func.__dict__) self.assertIs(d, self.f.a.__dict__) self.assertIs(d, self.fi.a.im_func.__dict__) self.assertIs(d, self.fi.a.__dict__) # Test value self.assertEqual(self.b.known_attr, 7) self.assertEqual(self.b.__dict__['known_attr'], 7) self.assertEqual(self.b.func_dict['known_attr'], 7) # ... and again, on all the different method's names self.assertEqual(self.f.a.im_func.known_attr, 7) self.assertEqual(self.f.a.known_attr, 7) self.assertEqual(self.fi.a.im_func.known_attr, 7) self.assertEqual(self.fi.a.known_attr, 7) def test_delete_func_dict(self): try: del self.b.__dict__ except TypeError: pass else: self.fail("deleting function dictionary should raise TypeError") try: del self.b.func_dict except TypeError: pass else: self.fail("deleting function dictionary should raise TypeError") def test_unassigned_dict(self): self.assertEqual(self.b.__dict__, {}) def test_func_as_dict_key(self): value = "Some string" d = {} d[self.b] = value self.assertEqual(d[self.b], value) class FunctionDocstringTest(FuncAttrsTest): def test_set_docstring_attr(self): self.assertEqual(self.b.__doc__, None) self.assertEqual(self.b.func_doc, None) docstr = "A test method that does nothing" self.b.__doc__ = self.f.a.im_func.__doc__ = docstr self.assertEqual(self.b.__doc__, docstr) self.assertEqual(self.b.func_doc, docstr) self.assertEqual(self.f.a.__doc__, docstr) self.assertEqual(self.fi.a.__doc__, docstr) self.cannot_set_attr(self.f.a, "__doc__", docstr, AttributeError) self.cannot_set_attr(self.fi.a, "__doc__", docstr, AttributeError) def test_delete_docstring(self): self.b.__doc__ = "The docstring" del self.b.__doc__ self.assertEqual(self.b.__doc__, None) self.assertEqual(self.b.func_doc, None) self.b.func_doc = "The docstring" del self.b.func_doc self.assertEqual(self.b.__doc__, None) self.assertEqual(self.b.func_doc, None) class StaticMethodAttrsTest(unittest.TestCase): def test_func_attribute(self): def f(): pass c = classmethod(f) self.assertTrue(c.__func__ is f) s = staticmethod(f) self.assertTrue(s.__func__ is f) def test_main(): test_support.run_unittest(FunctionPropertiesTest, InstancemethodAttrTest, ArbitraryFunctionAttrTest, FunctionDictsTest, FunctionDocstringTest, StaticMethodAttrsTest) if __name__ == "__main__": test_main()
apache-2.0
vanesa/kid-o
kido/admin/utils.py
1
3921
# -*- coding: utf-8 -*- """ Flask-Admin utilities.""" from flask import abort, redirect, request, url_for from flask_admin import AdminIndexView, expose from flask_admin.base import MenuLink from flask_admin.contrib.sqla import ModelView from flask_login import current_user from functools import wraps from kido import app from kido.constants import PERMISSION_ADMIN def admin_required(f): @wraps(f) def decorated(*args, **kwargs): if not current_user.is_authenticated: return redirect(url_for("views.general.login", next=request.url)) users_permissions = current_user.permissions if PERMISSION_ADMIN not in users_permissions: app.logger.debug("Not an admin") abort(404) return f(*args, **kwargs) return decorated def permission_required(permissions): if not isinstance(permissions, (list, set, tuple)): permissions = [permissions] permissions = [x.upper() for x in permissions] def decorator(method): @wraps(method) def f(*args, **kwargs): if not current_user.is_authenticated: return redirect(url_for("views.general.login", next=request.url)) users_permissions = current_user.permissions if PERMISSION_ADMIN not in users_permissions: for permission in permissions: if permission not in users_permissions: app.logger.debug("Missing permission: {0}".format(permission)) abort(404) return method(*args, **kwargs) return f return decorator class AuthenticatedMenuLink(MenuLink): def is_accessible(self): return current_user.is_authenticated class CustomAdminIndexView(AdminIndexView): extra_css = None extra_js = None @expose("/") @admin_required def index(self): if not current_user.is_authenticated: return redirect(url_for("views.general.login", next=request.url)) return super(CustomAdminIndexView, self).index() @expose("/login/") def login_view(self): return redirect(url_for("views.general.login", next=request.url)) @expose("/logout/") def logout_view(self): return redirect("/logout") class CustomModelView(ModelView): page_size = 50 extra_css = None extra_js = None action_template = "admin/action.html" edit_template = "admin/model/edit.html" create_template = "admin/model/create.html" list_template = "admin/model/custom_list.html" _include = None class_attributes = [ "page_size", "can_create", "can_edit", "can_delete", "column_searchable_list", "column_filters", "column_exclude_list", "column_default_sort", ] def __init__(self, *args, **kwargs): if "exclude" in kwargs: self.form_excluded_columns = kwargs["exclude"] del kwargs["exclude"] if "include" in kwargs: self._include = kwargs["include"] del kwargs["include"] for item in self.class_attributes: if item in kwargs: setattr(self, item, kwargs[item]) del kwargs[item] super(CustomModelView, self).__init__(*args, **kwargs) def get_list_columns(self): if self._include: return self.get_column_names( only_columns=self.scaffold_list_columns() + self._include, excluded_columns=self.column_exclude_list, ) return super(CustomModelView, self).get_list_columns() def is_accessible(self): if not current_user.is_authenticated: return False users_permissions = current_user.permissions return PERMISSION_ADMIN in users_permissions def inaccessible_callback(self, name, **kwargs): return abort(404)
bsd-3-clause
rickshawman/twitter
venv/lib/python2.7/site-packages/pip/pep425tags.py
79
11853
"""Generate and work with PEP 425 Compatibility Tags.""" from __future__ import absolute_import import re import sys import warnings import platform import logging import ctypes try: import sysconfig except ImportError: # pragma nocover # Python < 2.7 import distutils.sysconfig as sysconfig import distutils.util from pip.compat import OrderedDict logger = logging.getLogger(__name__) _osx_arch_pat = re.compile(r'(.+)_(\d+)_(\d+)_(.+)') def get_config_var(var): try: return sysconfig.get_config_var(var) except IOError as e: # Issue #1074 warnings.warn("{0}".format(e), RuntimeWarning) return None def get_abbr_impl(): """Return abbreviated implementation name.""" if hasattr(sys, 'pypy_version_info'): pyimpl = 'pp' elif sys.platform.startswith('java'): pyimpl = 'jy' elif sys.platform == 'cli': pyimpl = 'ip' else: pyimpl = 'cp' return pyimpl def get_impl_ver(): """Return implementation version.""" impl_ver = get_config_var("py_version_nodot") if not impl_ver or get_abbr_impl() == 'pp': impl_ver = ''.join(map(str, get_impl_version_info())) return impl_ver def get_impl_version_info(): """Return sys.version_info-like tuple for use in decrementing the minor version.""" if get_abbr_impl() == 'pp': # as per https://github.com/pypa/pip/issues/2882 return (sys.version_info[0], sys.pypy_version_info.major, sys.pypy_version_info.minor) else: return sys.version_info[0], sys.version_info[1] def get_impl_tag(): """ Returns the Tag for this specific implementation. """ return "{0}{1}".format(get_abbr_impl(), get_impl_ver()) def get_flag(var, fallback, expected=True, warn=True): """Use a fallback method for determining SOABI flags if the needed config var is unset or unavailable.""" val = get_config_var(var) if val is None: if warn: logger.debug("Config variable '%s' is unset, Python ABI tag may " "be incorrect", var) return fallback() return val == expected def get_abi_tag(): """Return the ABI tag based on SOABI (if available) or emulate SOABI (CPython 2, PyPy).""" soabi = get_config_var('SOABI') impl = get_abbr_impl() if not soabi and impl in ('cp', 'pp') and hasattr(sys, 'maxunicode'): d = '' m = '' u = '' if get_flag('Py_DEBUG', lambda: hasattr(sys, 'gettotalrefcount'), warn=(impl == 'cp')): d = 'd' if get_flag('WITH_PYMALLOC', lambda: impl == 'cp', warn=(impl == 'cp')): m = 'm' if get_flag('Py_UNICODE_SIZE', lambda: sys.maxunicode == 0x10ffff, expected=4, warn=(impl == 'cp' and sys.version_info < (3, 3))) \ and sys.version_info < (3, 3): u = 'u' abi = '%s%s%s%s%s' % (impl, get_impl_ver(), d, m, u) elif soabi and soabi.startswith('cpython-'): abi = 'cp' + soabi.split('-')[1] elif soabi: abi = soabi.replace('.', '_').replace('-', '_') else: abi = None return abi def _is_running_32bit(): return sys.maxsize == 2147483647 def get_platform(): """Return our platform name 'win32', 'linux_x86_64'""" if sys.platform == 'darwin': # distutils.util.get_platform() returns the release based on the value # of MACOSX_DEPLOYMENT_TARGET on which Python was built, which may # be signficantly older than the user's current machine. release, _, machine = platform.mac_ver() split_ver = release.split('.') if machine == "x86_64" and _is_running_32bit(): machine = "i386" elif machine == "ppc64" and _is_running_32bit(): machine = "ppc" return 'macosx_{0}_{1}_{2}'.format(split_ver[0], split_ver[1], machine) # XXX remove distutils dependency result = distutils.util.get_platform().replace('.', '_').replace('-', '_') if result == "linux_x86_64" and _is_running_32bit(): # 32 bit Python program (running on a 64 bit Linux): pip should only # install and run 32 bit compiled extensions in that case. result = "linux_i686" return result def is_manylinux1_compatible(): # Only Linux, and only x86-64 / i686 if get_platform() not in ("linux_x86_64", "linux_i686"): return False # Check for presence of _manylinux module try: import _manylinux return bool(_manylinux.manylinux1_compatible) except (ImportError, AttributeError): # Fall through to heuristic check below pass # Check glibc version. CentOS 5 uses glibc 2.5. return have_compatible_glibc(2, 5) # Separated out from have_compatible_glibc for easier unit testing def check_glibc_version(version_str, needed_major, needed_minor): # Parse string and check against requested version. # # We use a regexp instead of str.split because we want to discard any # random junk that might come after the minor version -- this might happen # in patched/forked versions of glibc (e.g. Linaro's version of glibc # uses version strings like "2.20-2014.11"). See gh-3588. m = re.match(r"(?P<major>[0-9]+)\.(?P<minor>[0-9]+)", version_str) if not m: warnings.warn("Expected glibc version with 2 components major.minor," " got: %s" % version_str, RuntimeWarning) return False return (int(m.group("major")) == needed_major and int(m.group("minor")) >= needed_minor) def have_compatible_glibc(major, minimum_minor): # ctypes.CDLL(None) internally calls dlopen(NULL), and as the dlopen # manpage says, "If filename is NULL, then the returned handle is for the # main program". This way we can let the linker do the work to figure out # which libc our process is actually using. process_namespace = ctypes.CDLL(None) try: gnu_get_libc_version = process_namespace.gnu_get_libc_version except AttributeError: # Symbol doesn't exist -> therefore, we are not linked to # glibc. return False # Call gnu_get_libc_version, which returns a string like "2.5". gnu_get_libc_version.restype = ctypes.c_char_p version_str = gnu_get_libc_version() # py2 / py3 compatibility: if not isinstance(version_str, str): version_str = version_str.decode("ascii") return check_glibc_version(version_str, major, minimum_minor) def get_darwin_arches(major, minor, machine): """Return a list of supported arches (including group arches) for the given major, minor and machine architecture of an OS X machine. """ arches = [] def _supports_arch(major, minor, arch): # Looking at the application support for OS X versions in the chart # provided by https://en.wikipedia.org/wiki/OS_X#Versions it appears # our timeline looks roughly like: # # 10.0 - Introduces ppc support. # 10.4 - Introduces ppc64, i386, and x86_64 support, however the ppc64 # and x86_64 support is CLI only, and cannot be used for GUI # applications. # 10.5 - Extends ppc64 and x86_64 support to cover GUI applications. # 10.6 - Drops support for ppc64 # 10.7 - Drops support for ppc # # Given that we do not know if we're installing a CLI or a GUI # application, we must be conservative and assume it might be a GUI # application and behave as if ppc64 and x86_64 support did not occur # until 10.5. # # Note: The above information is taken from the "Application support" # column in the chart not the "Processor support" since I believe # that we care about what instruction sets an application can use # not which processors the OS supports. if arch == 'ppc': return (major, minor) <= (10, 5) if arch == 'ppc64': return (major, minor) == (10, 5) if arch == 'i386': return (major, minor) >= (10, 4) if arch == 'x86_64': return (major, minor) >= (10, 5) if arch in groups: for garch in groups[arch]: if _supports_arch(major, minor, garch): return True return False groups = OrderedDict([ ("fat", ("i386", "ppc")), ("intel", ("x86_64", "i386")), ("fat64", ("x86_64", "ppc64")), ("fat32", ("x86_64", "i386", "ppc")), ]) if _supports_arch(major, minor, machine): arches.append(machine) for garch in groups: if machine in groups[garch] and _supports_arch(major, minor, garch): arches.append(garch) arches.append('universal') return arches def get_supported(versions=None, noarch=False): """Return a list of supported tags for each version specified in `versions`. :param versions: a list of string versions, of the form ["33", "32"], or None. The first version will be assumed to support our ABI. """ supported = [] # Versions must be given with respect to the preference if versions is None: versions = [] version_info = get_impl_version_info() major = version_info[:-1] # Support all previous minor Python versions. for minor in range(version_info[-1], -1, -1): versions.append(''.join(map(str, major + (minor,)))) impl = get_abbr_impl() abis = [] abi = get_abi_tag() if abi: abis[0:0] = [abi] abi3s = set() import imp for suffix in imp.get_suffixes(): if suffix[0].startswith('.abi'): abi3s.add(suffix[0].split('.', 2)[1]) abis.extend(sorted(list(abi3s))) abis.append('none') if not noarch: arch = get_platform() if sys.platform == 'darwin': # support macosx-10.6-intel on macosx-10.9-x86_64 match = _osx_arch_pat.match(arch) if match: name, major, minor, actual_arch = match.groups() tpl = '{0}_{1}_%i_%s'.format(name, major) arches = [] for m in reversed(range(int(minor) + 1)): for a in get_darwin_arches(int(major), m, actual_arch): arches.append(tpl % (m, a)) else: # arch pattern didn't match (?!) arches = [arch] elif is_manylinux1_compatible(): arches = [arch.replace('linux', 'manylinux1'), arch] else: arches = [arch] # Current version, current API (built specifically for our Python): for abi in abis: for arch in arches: supported.append(('%s%s' % (impl, versions[0]), abi, arch)) # Has binaries, does not use the Python API: for arch in arches: supported.append(('py%s' % (versions[0][0]), 'none', arch)) # No abi / arch, but requires our implementation: supported.append(('%s%s' % (impl, versions[0]), 'none', 'any')) # Tagged specifically as being cross-version compatible # (with just the major version specified) supported.append(('%s%s' % (impl, versions[0][0]), 'none', 'any')) # No abi / arch, generic Python for i, version in enumerate(versions): supported.append(('py%s' % (version,), 'none', 'any')) if i == 0: supported.append(('py%s' % (version[0]), 'none', 'any')) return supported supported_tags = get_supported() supported_tags_noarch = get_supported(noarch=True) implementation_tag = get_impl_tag()
gpl-3.0
wdwvt1/qiime
scripts/filter_fasta.py
12
7969
#!/usr/bin/env python # File created on 18 May 2010 from __future__ import division __author__ = "Greg Caporaso" __copyright__ = "Copyright 2011, The QIIME Project" __credits__ = ["Greg Caporaso", "Jens Reeder", "Yoshiki Vazquez Baeza"] __license__ = "GPL" __version__ = "1.9.1-dev" __maintainer__ = "Greg Caporaso" __email__ = "[email protected]" from qiime.util import make_option from qiime.util import parse_command_line_parameters, get_options_lookup from qiime.parse import fields_to_dict from qiime.filter import (filter_fasta, filter_fastq, get_seqs_to_keep_lookup_from_seq_id_file, get_seqs_to_keep_lookup_from_fasta_file, sample_ids_from_metadata_description, get_seqs_to_keep_lookup_from_biom) options_lookup = get_options_lookup() script_info = {} script_info[ 'brief_description'] = "This script can be applied to remove sequences from a fasta or fastq file based on input criteria." script_info['script_description'] = "" script_info['script_usage'] = [] script_info[ 'script_usage'].append(("OTU map-based filtering", "Keep all sequences that show up in an OTU map.", "%prog -f inseqs.fasta -o otu_map_filtered_seqs.fasta -m otu_map.txt")) script_info[ 'script_usage'].append(("Chimeric sequence filtering", "Discard all sequences that show up in chimera checking output. NOTE: It is very important to pass -n here as this tells the script to negate the request, or discard all sequences that are listed via -s. This is necessary to remove the identified chimeras from inseqs.fasta.", "%prog -f inseqs.fasta -o non_chimeric_seqs.fasta -s chimeric_seqs.txt -n")) script_info[ 'script_usage'].append(("Sequence list filtering", "Keep all sequences from as fasta file that are listed in a text file.", "%prog -f inseqs.fasta -o list_filtered_seqs.fasta -s seqs_to_keep.txt")) script_info[ 'script_usage'].append(("biom-based filtering", "Keep all sequences that are listed as observations in a biom file.", "%prog -f inseqs.fastq -o biom_filtered_seqs.fastq -b otu_table.biom")) script_info[ 'script_usage'].append(("fastq filtering", "Keep all sequences from a fastq file that are listed in a text file (note: file name must end with .fastq to support fastq filtering).", "%prog -f inseqs.fastq -o list_filtered_seqs.fastq -s seqs_to_keep.txt")) script_info[ 'script_usage'].append(("sample id list filtering", "Keep all sequences from a fasta file where the sample id portion of the sequence identifier is listed in a text file (sequence identifiers in fasta file must be in post-split libraries format: sampleID_seqID).", "%prog -f sl_inseqs.fasta -o sample_id_list_filtered_seqs.fasta --sample_id_fp map.txt")) script_info['output_description'] = "" script_info['required_options'] = [ options_lookup['input_fasta'], make_option( '-o', '--output_fasta_fp', type='new_filepath', help='the output fasta filepath') ] script_info['optional_options'] = [ make_option('-m', '--otu_map', type='existing_filepath', help="An OTU map where sequences ids are those which should be " "retained."), make_option('-s', '--seq_id_fp', type='existing_filepath', help="A list of sequence identifiers (or tab-delimited lines " "with a seq identifier in the first field) which should " "be retained."), make_option('-b', '--biom_fp', type='existing_filepath', help='A biom file where otu identifiers should be retained.'), make_option('-a', '--subject_fasta_fp', type='existing_filepath', help='A fasta file where the seq ids should be retained.'), make_option('-p', '--seq_id_prefix', type='string', help='Keep seqs where seq_id starts with this prefix.'), make_option('--sample_id_fp', type='existing_filepath', help="Keep seqs where seq_id starts with a sample id listed in " "this file. Must be newline delimited and may not contain " "a header."), make_option('-n', '--negate', help='Discard passed seq ids rather than' ' keep passed seq ids. [default: %default]', default=False, action='store_true'), make_option('--mapping_fp', type='existing_filepath', help="Mapping file path (for use with --valid_states). " "[default: %default]"), make_option('--valid_states', type='string', help="Description of sample ids to retain (for use with " "--mapping_fp). [default: %default]") ] script_info['version'] = __version__ def get_seqs_to_keep_lookup_from_otu_map(seqs_to_keep_f): """Generate a lookup dictionary from an OTU map""" otu_map = fields_to_dict(seqs_to_keep_f) seqs_to_keep = [] for seq_ids in otu_map.values(): seqs_to_keep += seq_ids return {}.fromkeys(seqs_to_keep) def get_seqs_to_keep_lookup_from_sample_ids(sample_ids): sample_ids = set(sample_ids) return sample_ids def get_seqs_to_keep_lookup_from_mapping_file(mapping_f, valid_states): sample_ids = set(sample_ids_from_metadata_description(mapping_f, valid_states)) return sample_ids def main(): option_parser, opts, args =\ parse_command_line_parameters(**script_info) negate = opts.negate error_msg = "Must pass exactly one of -a, -b, -s, -p, -m, or --valid_states and --mapping_fp." if 1 != sum(map(bool, [opts.otu_map, opts.seq_id_fp, opts.subject_fasta_fp, opts.seq_id_prefix, opts.biom_fp, opts.sample_id_fp, opts.mapping_fp and opts.valid_states])): option_parser.error(error_msg) seqid_f = None if opts.otu_map: seqs_to_keep_lookup =\ get_seqs_to_keep_lookup_from_otu_map( open(opts.otu_map, 'U')) elif opts.seq_id_fp: seqs_to_keep_lookup =\ get_seqs_to_keep_lookup_from_seq_id_file( open(opts.seq_id_fp, 'U')) elif opts.subject_fasta_fp: seqs_to_keep_lookup =\ get_seqs_to_keep_lookup_from_fasta_file( open(opts.subject_fasta_fp, 'U')) elif opts.seq_id_prefix: seqs_to_keep_lookup = None seqid_f = lambda x: x.startswith(opts.seq_id_prefix) elif opts.mapping_fp and opts.valid_states: seqs_to_keep_lookup =\ get_seqs_to_keep_lookup_from_mapping_file( open(opts.mapping_fp, 'U'), opts.valid_states) seqid_f = lambda x: x.split()[0].rsplit('_')[0] in seqs_to_keep_lookup elif opts.biom_fp: seqs_to_keep_lookup = \ get_seqs_to_keep_lookup_from_biom(opts.biom_fp) elif opts.sample_id_fp: sample_ids = set([e.strip().split()[0] for e in open(opts.sample_id_fp, 'U')]) seqs_to_keep_lookup = \ get_seqs_to_keep_lookup_from_sample_ids(sample_ids) seqid_f = lambda x: x.split()[0].rsplit('_')[0] in seqs_to_keep_lookup else: option_parser.error(error_msg) if opts.input_fasta_fp.endswith('.fastq'): filter_fp_f = filter_fastq else: filter_fp_f = filter_fasta input_fasta_f = open(opts.input_fasta_fp, 'U') output_fasta_f = open(opts.output_fasta_fp, 'w') filter_fp_f(input_fasta_f, output_fasta_f, seqs_to_keep_lookup, negate, seqid_f=seqid_f) if __name__ == "__main__": main()
gpl-2.0
flavour/eden
modules/feedparser5213.py
9
159822
"""Universal feed parser Handles RSS 0.9x, RSS 1.0, RSS 2.0, CDF, Atom 0.3, and Atom 1.0 feeds Visit https://code.google.com/p/feedparser/ for the latest version Visit http://packages.python.org/feedparser/ for the latest documentation Required: Python 2.4 or later Recommended: iconv_codec <http://cjkpython.i18n.org/> """ __version__ = "5.2.1" __license__ = """ Copyright 2010-2015 Kurt McKee <[email protected]> Copyright 2002-2008 Mark Pilgrim All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.""" __author__ = "Mark Pilgrim <http://diveintomark.org/>" __contributors__ = ["Jason Diamond <http://injektilo.org/>", "John Beimler <http://john.beimler.org/>", "Fazal Majid <http://www.majid.info/mylos/weblog/>", "Aaron Swartz <http://aaronsw.com/>", "Kevin Marks <http://epeus.blogspot.com/>", "Sam Ruby <http://intertwingly.net/>", "Ade Oshineye <http://blog.oshineye.com/>", "Martin Pool <http://sourcefrog.net/>", "Kurt McKee <http://kurtmckee.org/>", "Bernd Schlapsi <https://github.com/brot>",] # HTTP "User-Agent" header to send to servers when downloading feeds. # If you are embedding feedparser in a larger application, you should # change this to your application name and URL. USER_AGENT = "UniversalFeedParser/%s +https://code.google.com/p/feedparser/" % __version__ # HTTP "Accept" header to send to servers when downloading feeds. If you don't # want to send an Accept header, set this to None. ACCEPT_HEADER = "application/atom+xml,application/rdf+xml,application/rss+xml,application/x-netcdf,application/xml;q=0.9,text/xml;q=0.2,*/*;q=0.1" # List of preferred XML parsers, by SAX driver name. These will be tried first, # but if they're not installed, Python will keep searching through its own list # of pre-installed parsers until it finds one that supports everything we need. PREFERRED_XML_PARSERS = ["drv_libxml2"] # If you want feedparser to automatically resolve all relative URIs, set this # to 1. RESOLVE_RELATIVE_URIS = 1 # If you want feedparser to automatically sanitize all potentially unsafe # HTML content, set this to 1. SANITIZE_HTML = 1 # ---------- Python 3 modules (make it work if possible) ---------- try: import rfc822 except ImportError: from email import _parseaddr as rfc822 try: # Python 3.1 introduces bytes.maketrans and simultaneously # deprecates string.maketrans; use bytes.maketrans if possible _maketrans = bytes.maketrans except (NameError, AttributeError): import string _maketrans = string.maketrans # base64 support for Atom feeds that contain embedded binary data try: import base64, binascii except ImportError: base64 = binascii = None else: # Python 3.1 deprecates decodestring in favor of decodebytes _base64decode = getattr(base64, 'decodebytes', base64.decodestring) # _s2bytes: convert a UTF-8 str to bytes if the interpreter is Python 3 # _l2bytes: convert a list of ints to bytes if the interpreter is Python 3 try: if bytes is str: # In Python 2.5 and below, bytes doesn't exist (NameError) # In Python 2.6 and above, bytes and str are the same type raise NameError except NameError: # Python 2 def _s2bytes(s): return s def _l2bytes(l): return ''.join(map(chr, l)) else: # Python 3 def _s2bytes(s): return bytes(s, 'utf8') def _l2bytes(l): return bytes(l) # If you want feedparser to allow all URL schemes, set this to () # List culled from Python's urlparse documentation at: # http://docs.python.org/library/urlparse.html # as well as from "URI scheme" at Wikipedia: # https://secure.wikimedia.org/wikipedia/en/wiki/URI_scheme # Many more will likely need to be added! ACCEPTABLE_URI_SCHEMES = ( 'file', 'ftp', 'gopher', 'h323', 'hdl', 'http', 'https', 'imap', 'magnet', 'mailto', 'mms', 'news', 'nntp', 'prospero', 'rsync', 'rtsp', 'rtspu', 'sftp', 'shttp', 'sip', 'sips', 'snews', 'svn', 'svn+ssh', 'telnet', 'wais', # Additional common-but-unofficial schemes 'aim', 'callto', 'cvs', 'facetime', 'feed', 'git', 'gtalk', 'irc', 'ircs', 'irc6', 'itms', 'mms', 'msnim', 'skype', 'ssh', 'smb', 'svn', 'ymsg', ) #ACCEPTABLE_URI_SCHEMES = () # ---------- required modules (should come with any Python distribution) ---------- import cgi import codecs import copy import datetime import itertools import re import struct import time import types import urllib.request, urllib.parse, urllib.error import urllib.request, urllib.error, urllib.parse import urllib.parse import warnings from html.entities import name2codepoint, codepoint2name, entitydefs try: from io import BytesIO as _StringIO except ImportError: try: from io import StringIO as _StringIO except ImportError: from io import StringIO as _StringIO # ---------- optional modules (feedparser will work without these, but with reduced functionality) ---------- # gzip is included with most Python distributions, but may not be available if you compiled your own try: import gzip except ImportError: gzip = None try: import zlib except ImportError: zlib = None # If a real XML parser is available, feedparser will attempt to use it. feedparser has # been tested with the built-in SAX parser and libxml2. On platforms where the # Python distribution does not come with an XML parser (such as Mac OS X 10.2 and some # versions of FreeBSD), feedparser will quietly fall back on regex-based parsing. try: import xml.sax from xml.sax.saxutils import escape as _xmlescape except ImportError: _XML_AVAILABLE = 0 def _xmlescape(data,entities={}): data = data.replace('&', '&amp;') data = data.replace('>', '&gt;') data = data.replace('<', '&lt;') for char, entity in entities: data = data.replace(char, entity) return data else: try: xml.sax.make_parser(PREFERRED_XML_PARSERS) # test for valid parsers except xml.sax.SAXReaderNotAvailable: _XML_AVAILABLE = 0 else: _XML_AVAILABLE = 1 # sgmllib is not available by default in Python 3; if the end user doesn't have # it available then we'll lose illformed XML parsing and content santizing try: import sgmllib except ImportError: # This is probably Python 3, which doesn't include sgmllib anymore _SGML_AVAILABLE = 0 # Mock sgmllib enough to allow subclassing later on class sgmllib(object): class SGMLParser(object): def goahead(self, i): pass def parse_starttag(self, i): pass else: _SGML_AVAILABLE = 1 # sgmllib defines a number of module-level regular expressions that are # insufficient for the XML parsing feedparser needs. Rather than modify # the variables directly in sgmllib, they're defined here using the same # names, and the compiled code objects of several sgmllib.SGMLParser # methods are copied into _BaseHTMLProcessor so that they execute in # feedparser's scope instead of sgmllib's scope. charref = re.compile('&#(\d+|[xX][0-9a-fA-F]+);') tagfind = re.compile('[a-zA-Z][-_.:a-zA-Z0-9]*') attrfind = re.compile( r'\s*([a-zA-Z_][-:.a-zA-Z_0-9]*)[$]?(\s*=\s*' r'(\'[^\']*\'|"[^"]*"|[][\-a-zA-Z0-9./,:;+*%?!&$\(\)_#=~\'"@]*))?' ) # Unfortunately, these must be copied over to prevent NameError exceptions entityref = sgmllib.entityref incomplete = sgmllib.incomplete interesting = sgmllib.interesting shorttag = sgmllib.shorttag shorttagopen = sgmllib.shorttagopen starttagopen = sgmllib.starttagopen class _EndBracketRegEx: def __init__(self): # Overriding the built-in sgmllib.endbracket regex allows the # parser to find angle brackets embedded in element attributes. self.endbracket = re.compile('''([^'"<>]|"[^"]*"(?=>|/|\s|\w+=)|'[^']*'(?=>|/|\s|\w+=))*(?=[<>])|.*?(?=[<>])''') def search(self, target, index=0): match = self.endbracket.match(target, index) if match is not None: # Returning a new object in the calling thread's context # resolves a thread-safety. return EndBracketMatch(match) return None class EndBracketMatch: def __init__(self, match): self.match = match def start(self, n): return self.match.end(n) endbracket = _EndBracketRegEx() # iconv_codec provides support for more character encodings. # It's available from http://cjkpython.i18n.org/ try: import iconv_codec except ImportError: pass # chardet library auto-detects character encodings # Download from http://chardet.feedparser.org/ try: import chardet except ImportError: chardet = None # ---------- don't touch these ---------- class ThingsNobodyCaresAboutButMe(Exception): pass class CharacterEncodingOverride(ThingsNobodyCaresAboutButMe): pass class CharacterEncodingUnknown(ThingsNobodyCaresAboutButMe): pass class NonXMLContentType(ThingsNobodyCaresAboutButMe): pass class UndeclaredNamespace(Exception): pass SUPPORTED_VERSIONS = {'': 'unknown', 'rss090': 'RSS 0.90', 'rss091n': 'RSS 0.91 (Netscape)', 'rss091u': 'RSS 0.91 (Userland)', 'rss092': 'RSS 0.92', 'rss093': 'RSS 0.93', 'rss094': 'RSS 0.94', 'rss20': 'RSS 2.0', 'rss10': 'RSS 1.0', 'rss': 'RSS (unknown version)', 'atom01': 'Atom 0.1', 'atom02': 'Atom 0.2', 'atom03': 'Atom 0.3', 'atom10': 'Atom 1.0', 'atom': 'Atom (unknown version)', 'cdf': 'CDF', } class FeedParserDict(dict): keymap = {'channel': 'feed', 'items': 'entries', 'guid': 'id', 'date': 'updated', 'date_parsed': 'updated_parsed', 'description': ['summary', 'subtitle'], 'description_detail': ['summary_detail', 'subtitle_detail'], 'url': ['href'], 'modified': 'updated', 'modified_parsed': 'updated_parsed', 'issued': 'published', 'issued_parsed': 'published_parsed', 'copyright': 'rights', 'copyright_detail': 'rights_detail', 'tagline': 'subtitle', 'tagline_detail': 'subtitle_detail'} def __getitem__(self, key): ''' :return: A :class:`FeedParserDict`. ''' if key == 'category': try: return dict.__getitem__(self, 'tags')[0]['term'] except IndexError: raise KeyError("object doesn't have key 'category'") elif key == 'enclosures': norel = lambda link: FeedParserDict([(name,value) for (name,value) in list(link.items()) if name!='rel']) return [norel(link) for link in dict.__getitem__(self, 'links') if link['rel']=='enclosure'] elif key == 'license': for link in dict.__getitem__(self, 'links'): if link['rel']=='license' and 'href' in link: return link['href'] elif key == 'updated': # Temporarily help developers out by keeping the old # broken behavior that was reported in issue 310. # This fix was proposed in issue 328. if not dict.__contains__(self, 'updated') and \ dict.__contains__(self, 'published'): warnings.warn("To avoid breaking existing software while " "fixing issue 310, a temporary mapping has been created " "from `updated` to `published` if `updated` doesn't " "exist. This fallback will be removed in a future version " "of feedparser.", DeprecationWarning) return dict.__getitem__(self, 'published') return dict.__getitem__(self, 'updated') elif key == 'updated_parsed': if not dict.__contains__(self, 'updated_parsed') and \ dict.__contains__(self, 'published_parsed'): warnings.warn("To avoid breaking existing software while " "fixing issue 310, a temporary mapping has been created " "from `updated_parsed` to `published_parsed` if " "`updated_parsed` doesn't exist. This fallback will be " "removed in a future version of feedparser.", DeprecationWarning) return dict.__getitem__(self, 'published_parsed') return dict.__getitem__(self, 'updated_parsed') else: realkey = self.keymap.get(key, key) if isinstance(realkey, list): for k in realkey: if dict.__contains__(self, k): return dict.__getitem__(self, k) elif dict.__contains__(self, realkey): return dict.__getitem__(self, realkey) return dict.__getitem__(self, key) def __contains__(self, key): if key in ('updated', 'updated_parsed'): # Temporarily help developers out by keeping the old # broken behavior that was reported in issue 310. # This fix was proposed in issue 328. return dict.__contains__(self, key) try: self.__getitem__(key) except KeyError: return False else: return True has_key = __contains__ def get(self, key, default=None): ''' :return: A :class:`FeedParserDict`. ''' try: return self.__getitem__(key) except KeyError: return default def __setitem__(self, key, value): key = self.keymap.get(key, key) if isinstance(key, list): key = key[0] return dict.__setitem__(self, key, value) def setdefault(self, key, value): if key not in self: self[key] = value return value return self[key] def __getattr__(self, key): # __getattribute__() is called first; this will be called # only if an attribute was not already found try: return self.__getitem__(key) except KeyError: raise AttributeError("object has no attribute '%s'" % key) def __hash__(self): return id(self) _cp1252 = { 128: chr(8364), # euro sign 130: chr(8218), # single low-9 quotation mark 131: chr( 402), # latin small letter f with hook 132: chr(8222), # double low-9 quotation mark 133: chr(8230), # horizontal ellipsis 134: chr(8224), # dagger 135: chr(8225), # double dagger 136: chr( 710), # modifier letter circumflex accent 137: chr(8240), # per mille sign 138: chr( 352), # latin capital letter s with caron 139: chr(8249), # single left-pointing angle quotation mark 140: chr( 338), # latin capital ligature oe 142: chr( 381), # latin capital letter z with caron 145: chr(8216), # left single quotation mark 146: chr(8217), # right single quotation mark 147: chr(8220), # left double quotation mark 148: chr(8221), # right double quotation mark 149: chr(8226), # bullet 150: chr(8211), # en dash 151: chr(8212), # em dash 152: chr( 732), # small tilde 153: chr(8482), # trade mark sign 154: chr( 353), # latin small letter s with caron 155: chr(8250), # single right-pointing angle quotation mark 156: chr( 339), # latin small ligature oe 158: chr( 382), # latin small letter z with caron 159: chr( 376), # latin capital letter y with diaeresis } _urifixer = re.compile('^([A-Za-z][A-Za-z0-9+-.]*://)(/*)(.*?)') def _urljoin(base, uri): uri = _urifixer.sub(r'\1\3', uri) if not isinstance(uri, str): uri = uri.decode('utf-8', 'ignore') try: uri = urllib.parse.urljoin(base, uri) except ValueError: uri = '' if not isinstance(uri, str): return uri.decode('utf-8', 'ignore') return uri class _FeedParserMixin: namespaces = { '': '', 'http://backend.userland.com/rss': '', 'http://blogs.law.harvard.edu/tech/rss': '', 'http://purl.org/rss/1.0/': '', 'http://my.netscape.com/rdf/simple/0.9/': '', 'http://example.com/newformat#': '', 'http://example.com/necho': '', 'http://purl.org/echo/': '', 'uri/of/echo/namespace#': '', 'http://purl.org/pie/': '', 'http://purl.org/atom/ns#': '', 'http://www.w3.org/2005/Atom': '', 'http://purl.org/rss/1.0/modules/rss091#': '', 'http://webns.net/mvcb/': 'admin', 'http://purl.org/rss/1.0/modules/aggregation/': 'ag', 'http://purl.org/rss/1.0/modules/annotate/': 'annotate', 'http://media.tangent.org/rss/1.0/': 'audio', 'http://backend.userland.com/blogChannelModule': 'blogChannel', 'http://web.resource.org/cc/': 'cc', 'http://backend.userland.com/creativeCommonsRssModule': 'creativeCommons', 'http://purl.org/rss/1.0/modules/company': 'co', 'http://purl.org/rss/1.0/modules/content/': 'content', 'http://my.theinfo.org/changed/1.0/rss/': 'cp', 'http://purl.org/dc/elements/1.1/': 'dc', 'http://purl.org/dc/terms/': 'dcterms', 'http://purl.org/rss/1.0/modules/email/': 'email', 'http://purl.org/rss/1.0/modules/event/': 'ev', 'http://rssnamespace.org/feedburner/ext/1.0': 'feedburner', 'http://freshmeat.net/rss/fm/': 'fm', 'http://xmlns.com/foaf/0.1/': 'foaf', 'http://www.w3.org/2003/01/geo/wgs84_pos#': 'geo', 'http://www.georss.org/georss': 'georss', 'http://www.opengis.net/gml': 'gml', 'http://postneo.com/icbm/': 'icbm', 'http://purl.org/rss/1.0/modules/image/': 'image', 'http://www.itunes.com/DTDs/PodCast-1.0.dtd': 'itunes', 'http://example.com/DTDs/PodCast-1.0.dtd': 'itunes', 'http://purl.org/rss/1.0/modules/link/': 'l', 'http://search.yahoo.com/mrss': 'media', # Version 1.1.2 of the Media RSS spec added the trailing slash on the namespace 'http://search.yahoo.com/mrss/': 'media', 'http://madskills.com/public/xml/rss/module/pingback/': 'pingback', 'http://prismstandard.org/namespaces/1.2/basic/': 'prism', 'http://www.w3.org/1999/02/22-rdf-syntax-ns#': 'rdf', 'http://www.w3.org/2000/01/rdf-schema#': 'rdfs', 'http://purl.org/rss/1.0/modules/reference/': 'ref', 'http://purl.org/rss/1.0/modules/richequiv/': 'reqv', 'http://purl.org/rss/1.0/modules/search/': 'search', 'http://purl.org/rss/1.0/modules/slash/': 'slash', 'http://schemas.xmlsoap.org/soap/envelope/': 'soap', 'http://purl.org/rss/1.0/modules/servicestatus/': 'ss', 'http://hacks.benhammersley.com/rss/streaming/': 'str', 'http://purl.org/rss/1.0/modules/subscription/': 'sub', 'http://purl.org/rss/1.0/modules/syndication/': 'sy', 'http://schemas.pocketsoap.com/rss/myDescModule/': 'szf', 'http://purl.org/rss/1.0/modules/taxonomy/': 'taxo', 'http://purl.org/rss/1.0/modules/threading/': 'thr', 'http://purl.org/rss/1.0/modules/textinput/': 'ti', 'http://madskills.com/public/xml/rss/module/trackback/': 'trackback', 'http://wellformedweb.org/commentAPI/': 'wfw', 'http://purl.org/rss/1.0/modules/wiki/': 'wiki', 'http://www.w3.org/1999/xhtml': 'xhtml', 'http://www.w3.org/1999/xlink': 'xlink', 'http://www.w3.org/XML/1998/namespace': 'xml', 'http://podlove.org/simple-chapters': 'psc', } _matchnamespaces = {} can_be_relative_uri = set(['link', 'id', 'wfw_comment', 'wfw_commentrss', 'docs', 'url', 'href', 'comments', 'icon', 'logo']) can_contain_relative_uris = set(['content', 'title', 'summary', 'info', 'tagline', 'subtitle', 'copyright', 'rights', 'description']) can_contain_dangerous_markup = set(['content', 'title', 'summary', 'info', 'tagline', 'subtitle', 'copyright', 'rights', 'description']) html_types = ['text/html', 'application/xhtml+xml'] def __init__(self, baseuri=None, baselang=None, encoding='utf-8'): if not self._matchnamespaces: for k, v in list(self.namespaces.items()): self._matchnamespaces[k.lower()] = v self.feeddata = FeedParserDict() # feed-level data self.encoding = encoding # character encoding self.entries = [] # list of entry-level data self.version = '' # feed type/version, see SUPPORTED_VERSIONS self.namespacesInUse = {} # dictionary of namespaces defined by the feed # the following are used internally to track state; # this is really out of control and should be refactored self.infeed = 0 self.inentry = 0 self.incontent = 0 self.intextinput = 0 self.inimage = 0 self.inauthor = 0 self.incontributor = 0 self.inpublisher = 0 self.insource = 0 # georss self.ingeometry = 0 self.sourcedata = FeedParserDict() self.contentparams = FeedParserDict() self._summaryKey = None self.namespacemap = {} self.elementstack = [] self.basestack = [] self.langstack = [] self.baseuri = baseuri or '' self.lang = baselang or None self.svgOK = 0 self.title_depth = -1 self.depth = 0 # psc_chapters_flag prevents multiple psc_chapters from being # captured in a single entry or item. The transition states are # None -> True -> False. psc_chapter elements will only be # captured while it is True. self.psc_chapters_flag = None if baselang: self.feeddata['language'] = baselang.replace('_','-') # A map of the following form: # { # object_that_value_is_set_on: { # property_name: depth_of_node_property_was_extracted_from, # other_property: depth_of_node_property_was_extracted_from, # }, # } self.property_depth_map = {} def _normalize_attributes(self, kv): k = kv[0].lower() v = k in ('rel', 'type') and kv[1].lower() or kv[1] # the sgml parser doesn't handle entities in attributes, nor # does it pass the attribute values through as unicode, while # strict xml parsers do -- account for this difference if isinstance(self, _LooseFeedParser): v = v.replace('&amp;', '&') if not isinstance(v, str): v = v.decode('utf-8') return (k, v) def unknown_starttag(self, tag, attrs): # increment depth counter self.depth += 1 # normalize attrs attrs = list(map(self._normalize_attributes, attrs)) # track xml:base and xml:lang attrsD = dict(attrs) baseuri = attrsD.get('xml:base', attrsD.get('base')) or self.baseuri if not isinstance(baseuri, str): baseuri = baseuri.decode(self.encoding, 'ignore') # ensure that self.baseuri is always an absolute URI that # uses a whitelisted URI scheme (e.g. not `javscript:`) if self.baseuri: self.baseuri = _makeSafeAbsoluteURI(self.baseuri, baseuri) or self.baseuri else: self.baseuri = _urljoin(self.baseuri, baseuri) lang = attrsD.get('xml:lang', attrsD.get('lang')) if lang == '': # xml:lang could be explicitly set to '', we need to capture that lang = None elif lang is None: # if no xml:lang is specified, use parent lang lang = self.lang if lang: if tag in ('feed', 'rss', 'rdf:RDF'): self.feeddata['language'] = lang.replace('_','-') self.lang = lang self.basestack.append(self.baseuri) self.langstack.append(lang) # track namespaces for prefix, uri in attrs: if prefix.startswith('xmlns:'): self.trackNamespace(prefix[6:], uri) elif prefix == 'xmlns': self.trackNamespace(None, uri) # track inline content if self.incontent and not self.contentparams.get('type', 'xml').endswith('xml'): if tag in ('xhtml:div', 'div'): return # typepad does this 10/2007 # element declared itself as escaped markup, but it isn't really self.contentparams['type'] = 'application/xhtml+xml' if self.incontent and self.contentparams.get('type') == 'application/xhtml+xml': if tag.find(':') != -1: prefix, tag = tag.split(':', 1) namespace = self.namespacesInUse.get(prefix, '') if tag=='math' and namespace=='http://www.w3.org/1998/Math/MathML': attrs.append(('xmlns',namespace)) if tag=='svg' and namespace=='http://www.w3.org/2000/svg': attrs.append(('xmlns',namespace)) if tag == 'svg': self.svgOK += 1 return self.handle_data('<%s%s>' % (tag, self.strattrs(attrs)), escape=0) # match namespaces if tag.find(':') != -1: prefix, suffix = tag.split(':', 1) else: prefix, suffix = '', tag prefix = self.namespacemap.get(prefix, prefix) if prefix: prefix = prefix + '_' # special hack for better tracking of empty textinput/image elements in illformed feeds if (not prefix) and tag not in ('title', 'link', 'description', 'name'): self.intextinput = 0 if (not prefix) and tag not in ('title', 'link', 'description', 'url', 'href', 'width', 'height'): self.inimage = 0 # call special handler (if defined) or default handler methodname = '_start_' + prefix + suffix try: method = getattr(self, methodname) return method(attrsD) except AttributeError: # Since there's no handler or something has gone wrong we explicitly add the element and its attributes unknown_tag = prefix + suffix if len(attrsD) == 0: # No attributes so merge it into the encosing dictionary return self.push(unknown_tag, 1) else: # Has attributes so create it in its own dictionary context = self._getContext() context[unknown_tag] = attrsD def unknown_endtag(self, tag): # match namespaces if tag.find(':') != -1: prefix, suffix = tag.split(':', 1) else: prefix, suffix = '', tag prefix = self.namespacemap.get(prefix, prefix) if prefix: prefix = prefix + '_' if suffix == 'svg' and self.svgOK: self.svgOK -= 1 # call special handler (if defined) or default handler methodname = '_end_' + prefix + suffix try: if self.svgOK: raise AttributeError() method = getattr(self, methodname) method() except AttributeError: self.pop(prefix + suffix) # track inline content if self.incontent and not self.contentparams.get('type', 'xml').endswith('xml'): # element declared itself as escaped markup, but it isn't really if tag in ('xhtml:div', 'div'): return # typepad does this 10/2007 self.contentparams['type'] = 'application/xhtml+xml' if self.incontent and self.contentparams.get('type') == 'application/xhtml+xml': tag = tag.split(':')[-1] self.handle_data('</%s>' % tag, escape=0) # track xml:base and xml:lang going out of scope if self.basestack: self.basestack.pop() if self.basestack and self.basestack[-1]: self.baseuri = self.basestack[-1] if self.langstack: self.langstack.pop() if self.langstack: # and (self.langstack[-1] is not None): self.lang = self.langstack[-1] self.depth -= 1 def handle_charref(self, ref): # called for each character reference, e.g. for '&#160;', ref will be '160' if not self.elementstack: return ref = ref.lower() if ref in ('34', '38', '39', '60', '62', 'x22', 'x26', 'x27', 'x3c', 'x3e'): text = '&#%s;' % ref else: if ref[0] == 'x': c = int(ref[1:], 16) else: c = int(ref) text = chr(c).encode('utf-8') self.elementstack[-1][2].append(text) def handle_entityref(self, ref): # called for each entity reference, e.g. for '&copy;', ref will be 'copy' if not self.elementstack: return if ref in ('lt', 'gt', 'quot', 'amp', 'apos'): text = '&%s;' % ref elif ref in self.entities: text = self.entities[ref] if text.startswith('&#') and text.endswith(';'): return self.handle_entityref(text) else: try: name2codepoint[ref] except KeyError: text = '&%s;' % ref else: text = chr(name2codepoint[ref]).encode('utf-8') self.elementstack[-1][2].append(text) def handle_data(self, text, escape=1): # called for each block of plain text, i.e. outside of any tag and # not containing any character or entity references if not self.elementstack: return if escape and self.contentparams.get('type') == 'application/xhtml+xml': text = _xmlescape(text) self.elementstack[-1][2].append(text) def handle_comment(self, text): # called for each comment, e.g. <!-- insert message here --> pass def handle_pi(self, text): # called for each processing instruction, e.g. <?instruction> pass def handle_decl(self, text): pass def parse_declaration(self, i): # override internal declaration handler to handle CDATA blocks if self.rawdata[i:i+9] == '<![CDATA[': k = self.rawdata.find(']]>', i) if k == -1: # CDATA block began but didn't finish k = len(self.rawdata) return k self.handle_data(_xmlescape(self.rawdata[i+9:k]), 0) return k+3 else: k = self.rawdata.find('>', i) if k >= 0: return k+1 else: # We have an incomplete CDATA block. return k def mapContentType(self, contentType): contentType = contentType.lower() if contentType == 'text' or contentType == 'plain': contentType = 'text/plain' elif contentType == 'html': contentType = 'text/html' elif contentType == 'xhtml': contentType = 'application/xhtml+xml' return contentType def trackNamespace(self, prefix, uri): loweruri = uri.lower() if not self.version: if (prefix, loweruri) == (None, 'http://my.netscape.com/rdf/simple/0.9/'): self.version = 'rss090' elif loweruri == 'http://purl.org/rss/1.0/': self.version = 'rss10' elif loweruri == 'http://www.w3.org/2005/atom': self.version = 'atom10' if loweruri.find('backend.userland.com/rss') != -1: # match any backend.userland.com namespace uri = 'http://backend.userland.com/rss' loweruri = uri if loweruri in self._matchnamespaces: self.namespacemap[prefix] = self._matchnamespaces[loweruri] self.namespacesInUse[self._matchnamespaces[loweruri]] = uri else: self.namespacesInUse[prefix or ''] = uri def resolveURI(self, uri): return _urljoin(self.baseuri or '', uri) def decodeEntities(self, element, data): return data def strattrs(self, attrs): return ''.join([' %s="%s"' % (t[0],_xmlescape(t[1],{'"':'&quot;'})) for t in attrs]) def push(self, element, expectingText): self.elementstack.append([element, expectingText, []]) def pop(self, element, stripWhitespace=1): if not self.elementstack: return if self.elementstack[-1][0] != element: return element, expectingText, pieces = self.elementstack.pop() if self.version == 'atom10' and self.contentparams.get('type', 'text') == 'application/xhtml+xml': # remove enclosing child element, but only if it is a <div> and # only if all the remaining content is nested underneath it. # This means that the divs would be retained in the following: # <div>foo</div><div>bar</div> while pieces and len(pieces)>1 and not pieces[-1].strip(): del pieces[-1] while pieces and len(pieces)>1 and not pieces[0].strip(): del pieces[0] if pieces and (pieces[0] == '<div>' or pieces[0].startswith('<div ')) and pieces[-1]=='</div>': depth = 0 for piece in pieces[:-1]: if piece.startswith('</'): depth -= 1 if depth == 0: break elif piece.startswith('<') and not piece.endswith('/>'): depth += 1 else: pieces = pieces[1:-1] # Ensure each piece is a str for Python 3 for (i, v) in enumerate(pieces): if not isinstance(v, str): pieces[i] = v.decode('utf-8') output = ''.join(pieces) if stripWhitespace: output = output.strip() if not expectingText: return output # decode base64 content if base64 and self.contentparams.get('base64', 0): try: output = _base64decode(output) except binascii.Error: pass except binascii.Incomplete: pass except TypeError: # In Python 3, base64 takes and outputs bytes, not str # This may not be the most correct way to accomplish this output = _base64decode(output.encode('utf-8')).decode('utf-8') # resolve relative URIs if (element in self.can_be_relative_uri) and output: # do not resolve guid elements with isPermalink="false" if not element == 'id' or self.guidislink: output = self.resolveURI(output) # decode entities within embedded markup if not self.contentparams.get('base64', 0): output = self.decodeEntities(element, output) # some feed formats require consumers to guess # whether the content is html or plain text if not self.version.startswith('atom') and self.contentparams.get('type') == 'text/plain': if self.lookslikehtml(output): self.contentparams['type'] = 'text/html' # remove temporary cruft from contentparams try: del self.contentparams['mode'] except KeyError: pass try: del self.contentparams['base64'] except KeyError: pass is_htmlish = self.mapContentType(self.contentparams.get('type', 'text/html')) in self.html_types # resolve relative URIs within embedded markup if is_htmlish and RESOLVE_RELATIVE_URIS: if element in self.can_contain_relative_uris: output = _resolveRelativeURIs(output, self.baseuri, self.encoding, self.contentparams.get('type', 'text/html')) # sanitize embedded markup if is_htmlish and SANITIZE_HTML: if element in self.can_contain_dangerous_markup: output = _sanitizeHTML(output, self.encoding, self.contentparams.get('type', 'text/html')) if self.encoding and not isinstance(output, str): output = output.decode(self.encoding, 'ignore') # address common error where people take data that is already # utf-8, presume that it is iso-8859-1, and re-encode it. if self.encoding in ('utf-8', 'utf-8_INVALID_PYTHON_3') and isinstance(output, str): try: output = output.encode('iso-8859-1').decode('utf-8') except (UnicodeEncodeError, UnicodeDecodeError): pass # map win-1252 extensions to the proper code points if isinstance(output, str): output = output.translate(_cp1252) # categories/tags/keywords/whatever are handled in _end_category or _end_tags or _end_itunes_keywords if element in ('category', 'tags', 'itunes_keywords'): return output if element == 'title' and -1 < self.title_depth <= self.depth: return output # store output in appropriate place(s) if self.inentry and not self.insource: if element == 'content': self.entries[-1].setdefault(element, []) contentparams = copy.deepcopy(self.contentparams) contentparams['value'] = output self.entries[-1][element].append(contentparams) elif element == 'link': if not self.inimage: # query variables in urls in link elements are improperly # converted from `?a=1&b=2` to `?a=1&b;=2` as if they're # unhandled character references. fix this special case. output = output.replace('&amp;', '&') output = re.sub("&([A-Za-z0-9_]+);", "&\g<1>", output) self.entries[-1][element] = output if output: self.entries[-1]['links'][-1]['href'] = output else: if element == 'description': element = 'summary' old_value_depth = self.property_depth_map.setdefault(self.entries[-1], {}).get(element) if old_value_depth is None or self.depth <= old_value_depth: self.property_depth_map[self.entries[-1]][element] = self.depth self.entries[-1][element] = output if self.incontent: contentparams = copy.deepcopy(self.contentparams) contentparams['value'] = output self.entries[-1][element + '_detail'] = contentparams elif (self.infeed or self.insource):# and (not self.intextinput) and (not self.inimage): context = self._getContext() if element == 'description': element = 'subtitle' context[element] = output if element == 'link': # fix query variables; see above for the explanation output = re.sub("&([A-Za-z0-9_]+);", "&\g<1>", output) context[element] = output context['links'][-1]['href'] = output elif self.incontent: contentparams = copy.deepcopy(self.contentparams) contentparams['value'] = output context[element + '_detail'] = contentparams return output def pushContent(self, tag, attrsD, defaultContentType, expectingText): self.incontent += 1 if self.lang: self.lang=self.lang.replace('_','-') self.contentparams = FeedParserDict({ 'type': self.mapContentType(attrsD.get('type', defaultContentType)), 'language': self.lang, 'base': self.baseuri}) self.contentparams['base64'] = self._isBase64(attrsD, self.contentparams) self.push(tag, expectingText) def popContent(self, tag): value = self.pop(tag) self.incontent -= 1 self.contentparams.clear() return value # a number of elements in a number of RSS variants are nominally plain # text, but this is routinely ignored. This is an attempt to detect # the most common cases. As false positives often result in silent # data loss, this function errs on the conservative side. @staticmethod def lookslikehtml(s): # must have a close tag or an entity reference to qualify if not (re.search(r'</(\w+)>',s) or re.search("&#?\w+;",s)): return # all tags must be in a restricted subset of valid HTML tags if [t for t in re.findall(r'</?(\w+)',s) if t.lower() not in _HTMLSanitizer.acceptable_elements]: return # all entities must have been defined as valid HTML entities if [e for e in re.findall(r'&(\w+);', s) if e not in list(entitydefs.keys())]: return return 1 def _mapToStandardPrefix(self, name): colonpos = name.find(':') if colonpos != -1: prefix = name[:colonpos] suffix = name[colonpos+1:] prefix = self.namespacemap.get(prefix, prefix) name = prefix + ':' + suffix return name def _getAttribute(self, attrsD, name): return attrsD.get(self._mapToStandardPrefix(name)) def _isBase64(self, attrsD, contentparams): if attrsD.get('mode', '') == 'base64': return 1 if self.contentparams['type'].startswith('text/'): return 0 if self.contentparams['type'].endswith('+xml'): return 0 if self.contentparams['type'].endswith('/xml'): return 0 return 1 def _itsAnHrefDamnIt(self, attrsD): href = attrsD.get('url', attrsD.get('uri', attrsD.get('href', None))) if href: try: del attrsD['url'] except KeyError: pass try: del attrsD['uri'] except KeyError: pass attrsD['href'] = href return attrsD def _save(self, key, value, overwrite=False): context = self._getContext() if overwrite: context[key] = value else: context.setdefault(key, value) def _start_rss(self, attrsD): versionmap = {'0.91': 'rss091u', '0.92': 'rss092', '0.93': 'rss093', '0.94': 'rss094'} #If we're here then this is an RSS feed. #If we don't have a version or have a version that starts with something #other than RSS then there's been a mistake. Correct it. if not self.version or not self.version.startswith('rss'): attr_version = attrsD.get('version', '') version = versionmap.get(attr_version) if version: self.version = version elif attr_version.startswith('2.'): self.version = 'rss20' else: self.version = 'rss' def _start_channel(self, attrsD): self.infeed = 1 self._cdf_common(attrsD) def _cdf_common(self, attrsD): if 'lastmod' in attrsD: self._start_modified({}) self.elementstack[-1][-1] = attrsD['lastmod'] self._end_modified() if 'href' in attrsD: self._start_link({}) self.elementstack[-1][-1] = attrsD['href'] self._end_link() def _start_feed(self, attrsD): self.infeed = 1 versionmap = {'0.1': 'atom01', '0.2': 'atom02', '0.3': 'atom03'} if not self.version: attr_version = attrsD.get('version') version = versionmap.get(attr_version) if version: self.version = version else: self.version = 'atom' def _end_channel(self): self.infeed = 0 _end_feed = _end_channel def _start_image(self, attrsD): context = self._getContext() if not self.inentry: context.setdefault('image', FeedParserDict()) self.inimage = 1 self.title_depth = -1 self.push('image', 0) def _end_image(self): self.pop('image') self.inimage = 0 def _start_textinput(self, attrsD): context = self._getContext() context.setdefault('textinput', FeedParserDict()) self.intextinput = 1 self.title_depth = -1 self.push('textinput', 0) _start_textInput = _start_textinput def _end_textinput(self): self.pop('textinput') self.intextinput = 0 _end_textInput = _end_textinput def _start_author(self, attrsD): self.inauthor = 1 self.push('author', 1) # Append a new FeedParserDict when expecting an author context = self._getContext() context.setdefault('authors', []) context['authors'].append(FeedParserDict()) _start_managingeditor = _start_author _start_dc_author = _start_author _start_dc_creator = _start_author _start_itunes_author = _start_author def _end_author(self): self.pop('author') self.inauthor = 0 self._sync_author_detail() _end_managingeditor = _end_author _end_dc_author = _end_author _end_dc_creator = _end_author _end_itunes_author = _end_author def _start_itunes_owner(self, attrsD): self.inpublisher = 1 self.push('publisher', 0) def _end_itunes_owner(self): self.pop('publisher') self.inpublisher = 0 self._sync_author_detail('publisher') def _start_contributor(self, attrsD): self.incontributor = 1 context = self._getContext() context.setdefault('contributors', []) context['contributors'].append(FeedParserDict()) self.push('contributor', 0) def _end_contributor(self): self.pop('contributor') self.incontributor = 0 def _start_dc_contributor(self, attrsD): self.incontributor = 1 context = self._getContext() context.setdefault('contributors', []) context['contributors'].append(FeedParserDict()) self.push('name', 0) def _end_dc_contributor(self): self._end_name() self.incontributor = 0 def _start_name(self, attrsD): self.push('name', 0) _start_itunes_name = _start_name def _end_name(self): value = self.pop('name') if self.inpublisher: self._save_author('name', value, 'publisher') elif self.inauthor: self._save_author('name', value) elif self.incontributor: self._save_contributor('name', value) elif self.intextinput: context = self._getContext() context['name'] = value _end_itunes_name = _end_name def _start_width(self, attrsD): self.push('width', 0) def _end_width(self): value = self.pop('width') try: value = int(value) except ValueError: value = 0 if self.inimage: context = self._getContext() context['width'] = value def _start_height(self, attrsD): self.push('height', 0) def _end_height(self): value = self.pop('height') try: value = int(value) except ValueError: value = 0 if self.inimage: context = self._getContext() context['height'] = value def _start_url(self, attrsD): self.push('href', 1) _start_homepage = _start_url _start_uri = _start_url def _end_url(self): value = self.pop('href') if self.inauthor: self._save_author('href', value) elif self.incontributor: self._save_contributor('href', value) _end_homepage = _end_url _end_uri = _end_url def _start_email(self, attrsD): self.push('email', 0) _start_itunes_email = _start_email def _end_email(self): value = self.pop('email') if self.inpublisher: self._save_author('email', value, 'publisher') elif self.inauthor: self._save_author('email', value) elif self.incontributor: self._save_contributor('email', value) _end_itunes_email = _end_email def _getContext(self): if self.insource: context = self.sourcedata elif self.inimage and 'image' in self.feeddata: context = self.feeddata['image'] elif self.intextinput: context = self.feeddata['textinput'] elif self.inentry: context = self.entries[-1] else: context = self.feeddata return context def _save_author(self, key, value, prefix='author'): context = self._getContext() context.setdefault(prefix + '_detail', FeedParserDict()) context[prefix + '_detail'][key] = value self._sync_author_detail() context.setdefault('authors', [FeedParserDict()]) context['authors'][-1][key] = value def _save_contributor(self, key, value): context = self._getContext() context.setdefault('contributors', [FeedParserDict()]) context['contributors'][-1][key] = value def _sync_author_detail(self, key='author'): context = self._getContext() detail = context.get('%ss' % key, [FeedParserDict()])[-1] if detail: name = detail.get('name') email = detail.get('email') if name and email: context[key] = '%s (%s)' % (name, email) elif name: context[key] = name elif email: context[key] = email else: author, email = context.get(key), None if not author: return emailmatch = re.search(r'''(([a-zA-Z0-9\_\-\.\+]+)@((\[[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.)|(([a-zA-Z0-9\-]+\.)+))([a-zA-Z]{2,4}|[0-9]{1,3})(\]?))(\?subject=\S+)?''', author) if emailmatch: email = emailmatch.group(0) # probably a better way to do the following, but it passes all the tests author = author.replace(email, '') author = author.replace('()', '') author = author.replace('<>', '') author = author.replace('&lt;&gt;', '') author = author.strip() if author and (author[0] == '('): author = author[1:] if author and (author[-1] == ')'): author = author[:-1] author = author.strip() if author or email: context.setdefault('%s_detail' % key, detail) if author: detail['name'] = author if email: detail['email'] = email def _start_subtitle(self, attrsD): self.pushContent('subtitle', attrsD, 'text/plain', 1) _start_tagline = _start_subtitle _start_itunes_subtitle = _start_subtitle def _end_subtitle(self): self.popContent('subtitle') _end_tagline = _end_subtitle _end_itunes_subtitle = _end_subtitle def _start_rights(self, attrsD): self.pushContent('rights', attrsD, 'text/plain', 1) _start_dc_rights = _start_rights _start_copyright = _start_rights def _end_rights(self): self.popContent('rights') _end_dc_rights = _end_rights _end_copyright = _end_rights def _start_item(self, attrsD): self.entries.append(FeedParserDict()) self.push('item', 0) self.inentry = 1 self.guidislink = 0 self.title_depth = -1 self.psc_chapters_flag = None id = self._getAttribute(attrsD, 'rdf:about') if id: context = self._getContext() context['id'] = id self._cdf_common(attrsD) _start_entry = _start_item def _end_item(self): self.pop('item') self.inentry = 0 _end_entry = _end_item def _start_dc_language(self, attrsD): self.push('language', 1) _start_language = _start_dc_language def _end_dc_language(self): self.lang = self.pop('language') _end_language = _end_dc_language def _start_dc_publisher(self, attrsD): self.push('publisher', 1) _start_webmaster = _start_dc_publisher def _end_dc_publisher(self): self.pop('publisher') self._sync_author_detail('publisher') _end_webmaster = _end_dc_publisher def _start_dcterms_valid(self, attrsD): self.push('validity', 1) def _end_dcterms_valid(self): for validity_detail in self.pop('validity').split(';'): if '=' in validity_detail: key, value = validity_detail.split('=', 1) if key == 'start': self._save('validity_start', value, overwrite=True) self._save('validity_start_parsed', _parse_date(value), overwrite=True) elif key == 'end': self._save('validity_end', value, overwrite=True) self._save('validity_end_parsed', _parse_date(value), overwrite=True) def _start_published(self, attrsD): self.push('published', 1) _start_dcterms_issued = _start_published _start_issued = _start_published _start_pubdate = _start_published def _end_published(self): value = self.pop('published') self._save('published_parsed', _parse_date(value), overwrite=True) _end_dcterms_issued = _end_published _end_issued = _end_published _end_pubdate = _end_published def _start_updated(self, attrsD): self.push('updated', 1) _start_modified = _start_updated _start_dcterms_modified = _start_updated _start_dc_date = _start_updated _start_lastbuilddate = _start_updated def _end_updated(self): value = self.pop('updated') parsed_value = _parse_date(value) self._save('updated_parsed', parsed_value, overwrite=True) _end_modified = _end_updated _end_dcterms_modified = _end_updated _end_dc_date = _end_updated _end_lastbuilddate = _end_updated def _start_created(self, attrsD): self.push('created', 1) _start_dcterms_created = _start_created def _end_created(self): value = self.pop('created') self._save('created_parsed', _parse_date(value), overwrite=True) _end_dcterms_created = _end_created def _start_expirationdate(self, attrsD): self.push('expired', 1) def _end_expirationdate(self): self._save('expired_parsed', _parse_date(self.pop('expired')), overwrite=True) # geospatial location, or "where", from georss.org def _start_georssgeom(self, attrsD): self.push('geometry', 0) context = self._getContext() context['where'] = FeedParserDict() _start_georss_point = _start_georssgeom _start_georss_line = _start_georssgeom _start_georss_polygon = _start_georssgeom _start_georss_box = _start_georssgeom def _save_where(self, geometry): context = self._getContext() context['where'].update(geometry) def _end_georss_point(self): geometry = _parse_georss_point(self.pop('geometry')) if geometry: self._save_where(geometry) def _end_georss_line(self): geometry = _parse_georss_line(self.pop('geometry')) if geometry: self._save_where(geometry) def _end_georss_polygon(self): this = self.pop('geometry') geometry = _parse_georss_polygon(this) if geometry: self._save_where(geometry) def _end_georss_box(self): geometry = _parse_georss_box(self.pop('geometry')) if geometry: self._save_where(geometry) def _start_where(self, attrsD): self.push('where', 0) context = self._getContext() context['where'] = FeedParserDict() _start_georss_where = _start_where def _parse_srs_attrs(self, attrsD): srsName = attrsD.get('srsname') try: srsDimension = int(attrsD.get('srsdimension', '2')) except ValueError: srsDimension = 2 context = self._getContext() context['where']['srsName'] = srsName context['where']['srsDimension'] = srsDimension def _start_gml_point(self, attrsD): self._parse_srs_attrs(attrsD) self.ingeometry = 1 self.push('geometry', 0) def _start_gml_linestring(self, attrsD): self._parse_srs_attrs(attrsD) self.ingeometry = 'linestring' self.push('geometry', 0) def _start_gml_polygon(self, attrsD): self._parse_srs_attrs(attrsD) self.push('geometry', 0) def _start_gml_exterior(self, attrsD): self.push('geometry', 0) def _start_gml_linearring(self, attrsD): self.ingeometry = 'polygon' self.push('geometry', 0) def _start_gml_pos(self, attrsD): self.push('pos', 0) def _end_gml_pos(self): this = self.pop('pos') context = self._getContext() srsName = context['where'].get('srsName') srsDimension = context['where'].get('srsDimension', 2) swap = True if srsName and "EPSG" in srsName: epsg = int(srsName.split(":")[-1]) swap = bool(epsg in _geogCS) geometry = _parse_georss_point(this, swap=swap, dims=srsDimension) if geometry: self._save_where(geometry) def _start_gml_poslist(self, attrsD): self.push('pos', 0) def _end_gml_poslist(self): this = self.pop('pos') context = self._getContext() srsName = context['where'].get('srsName') srsDimension = context['where'].get('srsDimension', 2) swap = True if srsName and "EPSG" in srsName: epsg = int(srsName.split(":")[-1]) swap = bool(epsg in _geogCS) geometry = _parse_poslist( this, self.ingeometry, swap=swap, dims=srsDimension) if geometry: self._save_where(geometry) def _end_geom(self): self.ingeometry = 0 self.pop('geometry') _end_gml_point = _end_geom _end_gml_linestring = _end_geom _end_gml_linearring = _end_geom _end_gml_exterior = _end_geom _end_gml_polygon = _end_geom def _end_where(self): self.pop('where') _end_georss_where = _end_where # end geospatial def _start_cc_license(self, attrsD): context = self._getContext() value = self._getAttribute(attrsD, 'rdf:resource') attrsD = FeedParserDict() attrsD['rel'] = 'license' if value: attrsD['href']=value context.setdefault('links', []).append(attrsD) def _start_creativecommons_license(self, attrsD): self.push('license', 1) _start_creativeCommons_license = _start_creativecommons_license def _end_creativecommons_license(self): value = self.pop('license') context = self._getContext() attrsD = FeedParserDict() attrsD['rel'] = 'license' if value: attrsD['href'] = value context.setdefault('links', []).append(attrsD) del context['license'] _end_creativeCommons_license = _end_creativecommons_license def _addTag(self, term, scheme, label): context = self._getContext() tags = context.setdefault('tags', []) if (not term) and (not scheme) and (not label): return value = FeedParserDict(term=term, scheme=scheme, label=label) if value not in tags: tags.append(value) def _start_tags(self, attrsD): # This is a completely-made up element. Its semantics are determined # only by a single feed that precipitated bug report 392 on Google Code. # In short, this is junk code. self.push('tags', 1) def _end_tags(self): for term in self.pop('tags').split(','): self._addTag(term.strip(), None, None) def _start_category(self, attrsD): term = attrsD.get('term') scheme = attrsD.get('scheme', attrsD.get('domain')) label = attrsD.get('label') self._addTag(term, scheme, label) self.push('category', 1) _start_dc_subject = _start_category _start_keywords = _start_category def _start_media_category(self, attrsD): attrsD.setdefault('scheme', 'http://search.yahoo.com/mrss/category_schema') self._start_category(attrsD) def _end_itunes_keywords(self): for term in self.pop('itunes_keywords').split(','): if term.strip(): self._addTag(term.strip(), 'http://www.itunes.com/', None) def _end_media_keywords(self): for term in self.pop('media_keywords').split(','): if term.strip(): self._addTag(term.strip(), None, None) def _start_itunes_category(self, attrsD): self._addTag(attrsD.get('text'), 'http://www.itunes.com/', None) self.push('category', 1) def _end_category(self): value = self.pop('category') if not value: return context = self._getContext() tags = context['tags'] if value and len(tags) and not tags[-1]['term']: tags[-1]['term'] = value else: self._addTag(value, None, None) _end_dc_subject = _end_category _end_keywords = _end_category _end_itunes_category = _end_category _end_media_category = _end_category def _start_cloud(self, attrsD): self._getContext()['cloud'] = FeedParserDict(attrsD) def _start_link(self, attrsD): attrsD.setdefault('rel', 'alternate') if attrsD['rel'] == 'self': attrsD.setdefault('type', 'application/atom+xml') else: attrsD.setdefault('type', 'text/html') context = self._getContext() attrsD = self._itsAnHrefDamnIt(attrsD) if 'href' in attrsD: attrsD['href'] = self.resolveURI(attrsD['href']) expectingText = self.infeed or self.inentry or self.insource context.setdefault('links', []) if not (self.inentry and self.inimage): context['links'].append(FeedParserDict(attrsD)) if 'href' in attrsD: expectingText = 0 if (attrsD.get('rel') == 'alternate') and (self.mapContentType(attrsD.get('type')) in self.html_types): context['link'] = attrsD['href'] else: self.push('link', expectingText) def _end_link(self): value = self.pop('link') def _start_guid(self, attrsD): self.guidislink = (attrsD.get('ispermalink', 'true') == 'true') self.push('id', 1) _start_id = _start_guid def _end_guid(self): value = self.pop('id') self._save('guidislink', self.guidislink and 'link' not in self._getContext()) if self.guidislink: # guid acts as link, but only if 'ispermalink' is not present or is 'true', # and only if the item doesn't already have a link element self._save('link', value) _end_id = _end_guid def _start_title(self, attrsD): if self.svgOK: return self.unknown_starttag('title', list(attrsD.items())) self.pushContent('title', attrsD, 'text/plain', self.infeed or self.inentry or self.insource) _start_dc_title = _start_title _start_media_title = _start_title def _end_title(self): if self.svgOK: return value = self.popContent('title') if not value: return self.title_depth = self.depth _end_dc_title = _end_title def _end_media_title(self): title_depth = self.title_depth self._end_title() self.title_depth = title_depth def _start_description(self, attrsD): context = self._getContext() if 'summary' in context: self._summaryKey = 'content' self._start_content(attrsD) else: self.pushContent('description', attrsD, 'text/html', self.infeed or self.inentry or self.insource) _start_dc_description = _start_description _start_media_description = _start_description def _start_abstract(self, attrsD): self.pushContent('description', attrsD, 'text/plain', self.infeed or self.inentry or self.insource) def _end_description(self): if self._summaryKey == 'content': self._end_content() else: value = self.popContent('description') self._summaryKey = None _end_abstract = _end_description _end_dc_description = _end_description _end_media_description = _end_description def _start_info(self, attrsD): self.pushContent('info', attrsD, 'text/plain', 1) _start_feedburner_browserfriendly = _start_info def _end_info(self): self.popContent('info') _end_feedburner_browserfriendly = _end_info def _start_generator(self, attrsD): if attrsD: attrsD = self._itsAnHrefDamnIt(attrsD) if 'href' in attrsD: attrsD['href'] = self.resolveURI(attrsD['href']) self._getContext()['generator_detail'] = FeedParserDict(attrsD) self.push('generator', 1) def _end_generator(self): value = self.pop('generator') context = self._getContext() if 'generator_detail' in context: context['generator_detail']['name'] = value def _start_admin_generatoragent(self, attrsD): self.push('generator', 1) value = self._getAttribute(attrsD, 'rdf:resource') if value: self.elementstack[-1][2].append(value) self.pop('generator') self._getContext()['generator_detail'] = FeedParserDict({'href': value}) def _start_admin_errorreportsto(self, attrsD): self.push('errorreportsto', 1) value = self._getAttribute(attrsD, 'rdf:resource') if value: self.elementstack[-1][2].append(value) self.pop('errorreportsto') def _start_summary(self, attrsD): context = self._getContext() if 'summary' in context: self._summaryKey = 'content' self._start_content(attrsD) else: self._summaryKey = 'summary' self.pushContent(self._summaryKey, attrsD, 'text/plain', 1) _start_itunes_summary = _start_summary def _end_summary(self): if self._summaryKey == 'content': self._end_content() else: self.popContent(self._summaryKey or 'summary') self._summaryKey = None _end_itunes_summary = _end_summary def _start_enclosure(self, attrsD): attrsD = self._itsAnHrefDamnIt(attrsD) context = self._getContext() attrsD['rel'] = 'enclosure' context.setdefault('links', []).append(FeedParserDict(attrsD)) def _start_source(self, attrsD): if 'url' in attrsD: # This means that we're processing a source element from an RSS 2.0 feed self.sourcedata['href'] = attrsD['url'] self.push('source', 1) self.insource = 1 self.title_depth = -1 def _end_source(self): self.insource = 0 value = self.pop('source') if value: self.sourcedata['title'] = value self._getContext()['source'] = copy.deepcopy(self.sourcedata) self.sourcedata.clear() def _start_content(self, attrsD): self.pushContent('content', attrsD, 'text/plain', 1) src = attrsD.get('src') if src: self.contentparams['src'] = src self.push('content', 1) def _start_body(self, attrsD): self.pushContent('content', attrsD, 'application/xhtml+xml', 1) _start_xhtml_body = _start_body def _start_content_encoded(self, attrsD): self.pushContent('content', attrsD, 'text/html', 1) _start_fullitem = _start_content_encoded def _end_content(self): copyToSummary = self.mapContentType(self.contentparams.get('type')) in (['text/plain'] + self.html_types) value = self.popContent('content') if copyToSummary: self._save('summary', value) _end_body = _end_content _end_xhtml_body = _end_content _end_content_encoded = _end_content _end_fullitem = _end_content def _start_itunes_image(self, attrsD): self.push('itunes_image', 0) if attrsD.get('href'): self._getContext()['image'] = FeedParserDict({'href': attrsD.get('href')}) elif attrsD.get('url'): self._getContext()['image'] = FeedParserDict({'href': attrsD.get('url')}) _start_itunes_link = _start_itunes_image def _end_itunes_block(self): value = self.pop('itunes_block', 0) self._getContext()['itunes_block'] = (value == 'yes') and 1 or 0 def _end_itunes_explicit(self): value = self.pop('itunes_explicit', 0) # Convert 'yes' -> True, 'clean' to False, and any other value to None # False and None both evaluate as False, so the difference can be ignored # by applications that only need to know if the content is explicit. self._getContext()['itunes_explicit'] = (None, False, True)[(value == 'yes' and 2) or value == 'clean' or 0] def _start_media_group(self, attrsD): # don't do anything, but don't break the enclosed tags either pass def _start_media_rating(self, attrsD): context = self._getContext() context.setdefault('media_rating', attrsD) self.push('rating', 1) def _end_media_rating(self): rating = self.pop('rating') if rating is not None and rating.strip(): context = self._getContext() context['media_rating']['content'] = rating def _start_media_credit(self, attrsD): context = self._getContext() context.setdefault('media_credit', []) context['media_credit'].append(attrsD) self.push('credit', 1) def _end_media_credit(self): credit = self.pop('credit') if credit != None and len(credit.strip()) != 0: context = self._getContext() context['media_credit'][-1]['content'] = credit def _start_media_restriction(self, attrsD): context = self._getContext() context.setdefault('media_restriction', attrsD) self.push('restriction', 1) def _end_media_restriction(self): restriction = self.pop('restriction') if restriction != None and len(restriction.strip()) != 0: context = self._getContext() context['media_restriction']['content'] = [cc.strip().lower() for cc in restriction.split(' ')] def _start_media_license(self, attrsD): context = self._getContext() context.setdefault('media_license', attrsD) self.push('license', 1) def _end_media_license(self): license = self.pop('license') if license != None and len(license.strip()) != 0: context = self._getContext() context['media_license']['content'] = license def _start_media_content(self, attrsD): context = self._getContext() context.setdefault('media_content', []) context['media_content'].append(attrsD) def _start_media_thumbnail(self, attrsD): context = self._getContext() context.setdefault('media_thumbnail', []) self.push('url', 1) # new context['media_thumbnail'].append(attrsD) def _end_media_thumbnail(self): url = self.pop('url') context = self._getContext() if url != None and len(url.strip()) != 0: if 'url' not in context['media_thumbnail'][-1]: context['media_thumbnail'][-1]['url'] = url def _start_media_player(self, attrsD): self.push('media_player', 0) self._getContext()['media_player'] = FeedParserDict(attrsD) def _end_media_player(self): value = self.pop('media_player') context = self._getContext() context['media_player']['content'] = value def _start_newlocation(self, attrsD): self.push('newlocation', 1) def _end_newlocation(self): url = self.pop('newlocation') context = self._getContext() # don't set newlocation if the context isn't right if context is not self.feeddata: return context['newlocation'] = _makeSafeAbsoluteURI(self.baseuri, url.strip()) def _start_psc_chapters(self, attrsD): if self.psc_chapters_flag is None: # Transition from None -> True self.psc_chapters_flag = True attrsD['chapters'] = [] self._getContext()['psc_chapters'] = FeedParserDict(attrsD) def _end_psc_chapters(self): # Transition from True -> False self.psc_chapters_flag = False def _start_psc_chapter(self, attrsD): if self.psc_chapters_flag: start = self._getAttribute(attrsD, 'start') attrsD['start_parsed'] = _parse_psc_chapter_start(start) context = self._getContext()['psc_chapters'] context['chapters'].append(FeedParserDict(attrsD)) if _XML_AVAILABLE: class _StrictFeedParser(_FeedParserMixin, xml.sax.handler.ContentHandler): def __init__(self, baseuri, baselang, encoding): xml.sax.handler.ContentHandler.__init__(self) _FeedParserMixin.__init__(self, baseuri, baselang, encoding) self.bozo = 0 self.exc = None self.decls = {} def startPrefixMapping(self, prefix, uri): if not uri: return # Jython uses '' instead of None; standardize on None prefix = prefix or None self.trackNamespace(prefix, uri) if prefix and uri == 'http://www.w3.org/1999/xlink': self.decls['xmlns:' + prefix] = uri def startElementNS(self, name, qname, attrs): namespace, localname = name lowernamespace = str(namespace or '').lower() if lowernamespace.find('backend.userland.com/rss') != -1: # match any backend.userland.com namespace namespace = 'http://backend.userland.com/rss' lowernamespace = namespace if qname and qname.find(':') > 0: givenprefix = qname.split(':')[0] else: givenprefix = None prefix = self._matchnamespaces.get(lowernamespace, givenprefix) if givenprefix and (prefix == None or (prefix == '' and lowernamespace == '')) and givenprefix not in self.namespacesInUse: raise UndeclaredNamespace("'%s' is not associated with a namespace" % givenprefix) localname = str(localname).lower() # qname implementation is horribly broken in Python 2.1 (it # doesn't report any), and slightly broken in Python 2.2 (it # doesn't report the xml: namespace). So we match up namespaces # with a known list first, and then possibly override them with # the qnames the SAX parser gives us (if indeed it gives us any # at all). Thanks to MatejC for helping me test this and # tirelessly telling me that it didn't work yet. attrsD, self.decls = self.decls, {} if localname=='math' and namespace=='http://www.w3.org/1998/Math/MathML': attrsD['xmlns']=namespace if localname=='svg' and namespace=='http://www.w3.org/2000/svg': attrsD['xmlns']=namespace if prefix: localname = prefix.lower() + ':' + localname elif namespace and not qname: #Expat for name,value in list(self.namespacesInUse.items()): if name and value == namespace: localname = name + ':' + localname break for (namespace, attrlocalname), attrvalue in list(attrs.items()): lowernamespace = (namespace or '').lower() prefix = self._matchnamespaces.get(lowernamespace, '') if prefix: attrlocalname = prefix + ':' + attrlocalname attrsD[str(attrlocalname).lower()] = attrvalue for qname in attrs.getQNames(): attrsD[str(qname).lower()] = attrs.getValueByQName(qname) localname = str(localname).lower() self.unknown_starttag(localname, list(attrsD.items())) def characters(self, text): self.handle_data(text) def endElementNS(self, name, qname): namespace, localname = name lowernamespace = str(namespace or '').lower() if qname and qname.find(':') > 0: givenprefix = qname.split(':')[0] else: givenprefix = '' prefix = self._matchnamespaces.get(lowernamespace, givenprefix) if prefix: localname = prefix + ':' + localname elif namespace and not qname: #Expat for name,value in list(self.namespacesInUse.items()): if name and value == namespace: localname = name + ':' + localname break localname = str(localname).lower() self.unknown_endtag(localname) def error(self, exc): self.bozo = 1 self.exc = exc # drv_libxml2 calls warning() in some cases warning = error def fatalError(self, exc): self.error(exc) raise exc class _BaseHTMLProcessor(sgmllib.SGMLParser): special = re.compile('''[<>'"]''') bare_ampersand = re.compile("&(?!#\d+;|#x[0-9a-fA-F]+;|\w+;)") elements_no_end_tag = set([ 'area', 'base', 'basefont', 'br', 'col', 'command', 'embed', 'frame', 'hr', 'img', 'input', 'isindex', 'keygen', 'link', 'meta', 'param', 'source', 'track', 'wbr' ]) def __init__(self, encoding, _type): self.encoding = encoding self._type = _type sgmllib.SGMLParser.__init__(self) def reset(self): self.pieces = [] sgmllib.SGMLParser.reset(self) def _shorttag_replace(self, match): tag = match.group(1) if tag in self.elements_no_end_tag: return '<' + tag + ' />' else: return '<' + tag + '></' + tag + '>' # By declaring these methods and overriding their compiled code # with the code from sgmllib, the original code will execute in # feedparser's scope instead of sgmllib's. This means that the # `tagfind` and `charref` regular expressions will be found as # they're declared above, not as they're declared in sgmllib. def goahead(self, i): pass goahead.__code__ = sgmllib.SGMLParser.goahead.__code__ def __parse_starttag(self, i): pass __parse_starttag.__code__ = sgmllib.SGMLParser.parse_starttag.__code__ def parse_starttag(self,i): j = self.__parse_starttag(i) if self._type == 'application/xhtml+xml': if j>2 and self.rawdata[j-2:j]=='/>': self.unknown_endtag(self.lasttag) return j def feed(self, data): data = re.compile(r'<!((?!DOCTYPE|--|\[))', re.IGNORECASE).sub(r'&lt;!\1', data) data = re.sub(r'<([^<>\s]+?)\s*/>', self._shorttag_replace, data) data = data.replace('&#39;', "'") data = data.replace('&#34;', '"') try: bytes if bytes is str: raise NameError self.encoding = self.encoding + '_INVALID_PYTHON_3' except NameError: if self.encoding and isinstance(data, str): data = data.encode(self.encoding) sgmllib.SGMLParser.feed(self, data) sgmllib.SGMLParser.close(self) def normalize_attrs(self, attrs): if not attrs: return attrs # utility method to be called by descendants attrs = list(dict([(k.lower(), v) for k, v in attrs]).items()) attrs = [(k, k in ('rel', 'type') and v.lower() or v) for k, v in attrs] attrs.sort() return attrs def unknown_starttag(self, tag, attrs): # called for each start tag # attrs is a list of (attr, value) tuples # e.g. for <pre class='screen'>, tag='pre', attrs=[('class', 'screen')] uattrs = [] strattrs='' if attrs: for key, value in attrs: value=value.replace('>','&gt;').replace('<','&lt;').replace('"','&quot;') value = self.bare_ampersand.sub("&amp;", value) # thanks to Kevin Marks for this breathtaking hack to deal with (valid) high-bit attribute values in UTF-8 feeds if not isinstance(value, str): value = value.decode(self.encoding, 'ignore') try: # Currently, in Python 3 the key is already a str, and cannot be decoded again uattrs.append((str(key, self.encoding), value)) except TypeError: uattrs.append((key, value)) strattrs = ''.join([' %s="%s"' % (key, value) for key, value in uattrs]) if self.encoding: try: strattrs = strattrs.encode(self.encoding) except (UnicodeEncodeError, LookupError): pass if tag in self.elements_no_end_tag: self.pieces.append('<%s%s />' % (tag, strattrs)) else: self.pieces.append('<%s%s>' % (tag, strattrs)) def unknown_endtag(self, tag): # called for each end tag, e.g. for </pre>, tag will be 'pre' # Reconstruct the original end tag. if tag not in self.elements_no_end_tag: self.pieces.append("</%s>" % tag) def handle_charref(self, ref): # called for each character reference, e.g. for '&#160;', ref will be '160' # Reconstruct the original character reference. ref = ref.lower() if ref.startswith('x'): value = int(ref[1:], 16) else: value = int(ref) if value in _cp1252: self.pieces.append('&#%s;' % hex(ord(_cp1252[value]))[1:]) else: self.pieces.append('&#%s;' % ref) def handle_entityref(self, ref): # called for each entity reference, e.g. for '&copy;', ref will be 'copy' # Reconstruct the original entity reference. if ref in name2codepoint or ref == 'apos': self.pieces.append('&%s;' % ref) else: self.pieces.append('&amp;%s' % ref) def handle_data(self, text): # called for each block of plain text, i.e. outside of any tag and # not containing any character or entity references # Store the original text verbatim. self.pieces.append(text) def handle_comment(self, text): # called for each HTML comment, e.g. <!-- insert Javascript code here --> # Reconstruct the original comment. self.pieces.append('<!--%s-->' % text) def handle_pi(self, text): # called for each processing instruction, e.g. <?instruction> # Reconstruct original processing instruction. self.pieces.append('<?%s>' % text) def handle_decl(self, text): # called for the DOCTYPE, if present, e.g. # <!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" # "http://www.w3.org/TR/html4/loose.dtd"> # Reconstruct original DOCTYPE self.pieces.append('<!%s>' % text) _new_declname_match = re.compile(r'[a-zA-Z][-_.a-zA-Z0-9:]*\s*').match def _scan_name(self, i, declstartpos): rawdata = self.rawdata n = len(rawdata) if i == n: return None, -1 m = self._new_declname_match(rawdata, i) if m: s = m.group() name = s.strip() if (i + len(s)) == n: return None, -1 # end of buffer return name.lower(), m.end() else: self.handle_data(rawdata) # self.updatepos(declstartpos, i) return None, -1 def convert_charref(self, name): return '&#%s;' % name def convert_entityref(self, name): return '&%s;' % name def output(self): '''Return processed HTML as a single string''' return ''.join([str(p) for p in self.pieces]) def parse_declaration(self, i): try: return sgmllib.SGMLParser.parse_declaration(self, i) except sgmllib.SGMLParseError: # escape the doctype declaration and continue parsing self.handle_data('&lt;') return i+1 class _LooseFeedParser(_FeedParserMixin, _BaseHTMLProcessor): def __init__(self, baseuri, baselang, encoding, entities): sgmllib.SGMLParser.__init__(self) _FeedParserMixin.__init__(self, baseuri, baselang, encoding) _BaseHTMLProcessor.__init__(self, encoding, 'application/xhtml+xml') self.entities=entities def decodeEntities(self, element, data): data = data.replace('&#60;', '&lt;') data = data.replace('&#x3c;', '&lt;') data = data.replace('&#x3C;', '&lt;') data = data.replace('&#62;', '&gt;') data = data.replace('&#x3e;', '&gt;') data = data.replace('&#x3E;', '&gt;') data = data.replace('&#38;', '&amp;') data = data.replace('&#x26;', '&amp;') data = data.replace('&#34;', '&quot;') data = data.replace('&#x22;', '&quot;') data = data.replace('&#39;', '&apos;') data = data.replace('&#x27;', '&apos;') if not self.contentparams.get('type', 'xml').endswith('xml'): data = data.replace('&lt;', '<') data = data.replace('&gt;', '>') data = data.replace('&amp;', '&') data = data.replace('&quot;', '"') data = data.replace('&apos;', "'") data = data.replace('&#x2f;', '/') data = data.replace('&#x2F;', '/') return data def strattrs(self, attrs): return ''.join([' %s="%s"' % (n,v.replace('"','&quot;')) for n,v in attrs]) class _RelativeURIResolver(_BaseHTMLProcessor): relative_uris = set([('a', 'href'), ('applet', 'codebase'), ('area', 'href'), ('audio', 'src'), ('blockquote', 'cite'), ('body', 'background'), ('del', 'cite'), ('form', 'action'), ('frame', 'longdesc'), ('frame', 'src'), ('iframe', 'longdesc'), ('iframe', 'src'), ('head', 'profile'), ('img', 'longdesc'), ('img', 'src'), ('img', 'usemap'), ('input', 'src'), ('input', 'usemap'), ('ins', 'cite'), ('link', 'href'), ('object', 'classid'), ('object', 'codebase'), ('object', 'data'), ('object', 'usemap'), ('q', 'cite'), ('script', 'src'), ('source', 'src'), ('video', 'poster'), ('video', 'src')]) def __init__(self, baseuri, encoding, _type): _BaseHTMLProcessor.__init__(self, encoding, _type) self.baseuri = baseuri def resolveURI(self, uri): return _makeSafeAbsoluteURI(self.baseuri, uri.strip()) def unknown_starttag(self, tag, attrs): attrs = self.normalize_attrs(attrs) attrs = [(key, ((tag, key) in self.relative_uris) and self.resolveURI(value) or value) for key, value in attrs] _BaseHTMLProcessor.unknown_starttag(self, tag, attrs) def _resolveRelativeURIs(htmlSource, baseURI, encoding, _type): if not _SGML_AVAILABLE: return htmlSource p = _RelativeURIResolver(baseURI, encoding, _type) p.feed(htmlSource) return p.output() def _makeSafeAbsoluteURI(base, rel=None): # bail if ACCEPTABLE_URI_SCHEMES is empty if not ACCEPTABLE_URI_SCHEMES: return _urljoin(base, rel or '') if not base: return rel or '' if not rel: try: scheme = urllib.parse.urlparse(base)[0] except ValueError: return '' if not scheme or scheme in ACCEPTABLE_URI_SCHEMES: return base return '' uri = _urljoin(base, rel) if uri.strip().split(':', 1)[0] not in ACCEPTABLE_URI_SCHEMES: return '' return uri class _HTMLSanitizer(_BaseHTMLProcessor): acceptable_elements = set(['a', 'abbr', 'acronym', 'address', 'area', 'article', 'aside', 'audio', 'b', 'big', 'blockquote', 'br', 'button', 'canvas', 'caption', 'center', 'cite', 'code', 'col', 'colgroup', 'command', 'datagrid', 'datalist', 'dd', 'del', 'details', 'dfn', 'dialog', 'dir', 'div', 'dl', 'dt', 'em', 'event-source', 'fieldset', 'figcaption', 'figure', 'footer', 'font', 'form', 'header', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'hr', 'i', 'img', 'input', 'ins', 'keygen', 'kbd', 'label', 'legend', 'li', 'm', 'map', 'menu', 'meter', 'multicol', 'nav', 'nextid', 'ol', 'output', 'optgroup', 'option', 'p', 'pre', 'progress', 'q', 's', 'samp', 'section', 'select', 'small', 'sound', 'source', 'spacer', 'span', 'strike', 'strong', 'sub', 'sup', 'table', 'tbody', 'td', 'textarea', 'time', 'tfoot', 'th', 'thead', 'tr', 'tt', 'u', 'ul', 'var', 'video', 'noscript']) acceptable_attributes = set(['abbr', 'accept', 'accept-charset', 'accesskey', 'action', 'align', 'alt', 'autocomplete', 'autofocus', 'axis', 'background', 'balance', 'bgcolor', 'bgproperties', 'border', 'bordercolor', 'bordercolordark', 'bordercolorlight', 'bottompadding', 'cellpadding', 'cellspacing', 'ch', 'challenge', 'char', 'charoff', 'choff', 'charset', 'checked', 'cite', 'class', 'clear', 'color', 'cols', 'colspan', 'compact', 'contenteditable', 'controls', 'coords', 'data', 'datafld', 'datapagesize', 'datasrc', 'datetime', 'default', 'delay', 'dir', 'disabled', 'draggable', 'dynsrc', 'enctype', 'end', 'face', 'for', 'form', 'frame', 'galleryimg', 'gutter', 'headers', 'height', 'hidefocus', 'hidden', 'high', 'href', 'hreflang', 'hspace', 'icon', 'id', 'inputmode', 'ismap', 'keytype', 'label', 'leftspacing', 'lang', 'list', 'longdesc', 'loop', 'loopcount', 'loopend', 'loopstart', 'low', 'lowsrc', 'max', 'maxlength', 'media', 'method', 'min', 'multiple', 'name', 'nohref', 'noshade', 'nowrap', 'open', 'optimum', 'pattern', 'ping', 'point-size', 'poster', 'pqg', 'preload', 'prompt', 'radiogroup', 'readonly', 'rel', 'repeat-max', 'repeat-min', 'replace', 'required', 'rev', 'rightspacing', 'rows', 'rowspan', 'rules', 'scope', 'selected', 'shape', 'size', 'span', 'src', 'start', 'step', 'summary', 'suppress', 'tabindex', 'target', 'template', 'title', 'toppadding', 'type', 'unselectable', 'usemap', 'urn', 'valign', 'value', 'variable', 'volume', 'vspace', 'vrml', 'width', 'wrap', 'xml:lang']) unacceptable_elements_with_end_tag = set(['script', 'applet', 'style']) acceptable_css_properties = set(['azimuth', 'background-color', 'border-bottom-color', 'border-collapse', 'border-color', 'border-left-color', 'border-right-color', 'border-top-color', 'clear', 'color', 'cursor', 'direction', 'display', 'elevation', 'float', 'font', 'font-family', 'font-size', 'font-style', 'font-variant', 'font-weight', 'height', 'letter-spacing', 'line-height', 'overflow', 'pause', 'pause-after', 'pause-before', 'pitch', 'pitch-range', 'richness', 'speak', 'speak-header', 'speak-numeral', 'speak-punctuation', 'speech-rate', 'stress', 'text-align', 'text-decoration', 'text-indent', 'unicode-bidi', 'vertical-align', 'voice-family', 'volume', 'white-space', 'width']) # survey of common keywords found in feeds acceptable_css_keywords = set(['auto', 'aqua', 'black', 'block', 'blue', 'bold', 'both', 'bottom', 'brown', 'center', 'collapse', 'dashed', 'dotted', 'fuchsia', 'gray', 'green', '!important', 'italic', 'left', 'lime', 'maroon', 'medium', 'none', 'navy', 'normal', 'nowrap', 'olive', 'pointer', 'purple', 'red', 'right', 'solid', 'silver', 'teal', 'top', 'transparent', 'underline', 'white', 'yellow']) valid_css_values = re.compile('^(#[0-9a-f]+|rgb\(\d+%?,\d*%?,?\d*%?\)?|' + '\d{0,2}\.?\d{0,2}(cm|em|ex|in|mm|pc|pt|px|%|,|\))?)$') mathml_elements = set([ 'annotation', 'annotation-xml', 'maction', 'maligngroup', 'malignmark', 'math', 'menclose', 'merror', 'mfenced', 'mfrac', 'mglyph', 'mi', 'mlabeledtr', 'mlongdiv', 'mmultiscripts', 'mn', 'mo', 'mover', 'mpadded', 'mphantom', 'mprescripts', 'mroot', 'mrow', 'ms', 'mscarries', 'mscarry', 'msgroup', 'msline', 'mspace', 'msqrt', 'msrow', 'mstack', 'mstyle', 'msub', 'msubsup', 'msup', 'mtable', 'mtd', 'mtext', 'mtr', 'munder', 'munderover', 'none', 'semantics', ]) mathml_attributes = set([ 'accent', 'accentunder', 'actiontype', 'align', 'alignmentscope', 'altimg', 'altimg-height', 'altimg-valign', 'altimg-width', 'alttext', 'bevelled', 'charalign', 'close', 'columnalign', 'columnlines', 'columnspacing', 'columnspan', 'columnwidth', 'crossout', 'decimalpoint', 'denomalign', 'depth', 'dir', 'display', 'displaystyle', 'edge', 'encoding', 'equalcolumns', 'equalrows', 'fence', 'fontstyle', 'fontweight', 'form', 'frame', 'framespacing', 'groupalign', 'height', 'href', 'id', 'indentalign', 'indentalignfirst', 'indentalignlast', 'indentshift', 'indentshiftfirst', 'indentshiftlast', 'indenttarget', 'infixlinebreakstyle', 'largeop', 'length', 'linebreak', 'linebreakmultchar', 'linebreakstyle', 'lineleading', 'linethickness', 'location', 'longdivstyle', 'lquote', 'lspace', 'mathbackground', 'mathcolor', 'mathsize', 'mathvariant', 'maxsize', 'minlabelspacing', 'minsize', 'movablelimits', 'notation', 'numalign', 'open', 'other', 'overflow', 'position', 'rowalign', 'rowlines', 'rowspacing', 'rowspan', 'rquote', 'rspace', 'scriptlevel', 'scriptminsize', 'scriptsizemultiplier', 'selection', 'separator', 'separators', 'shift', 'side', 'src', 'stackalign', 'stretchy', 'subscriptshift', 'superscriptshift', 'symmetric', 'voffset', 'width', 'xlink:href', 'xlink:show', 'xlink:type', 'xmlns', 'xmlns:xlink', ]) # svgtiny - foreignObject + linearGradient + radialGradient + stop svg_elements = set(['a', 'animate', 'animateColor', 'animateMotion', 'animateTransform', 'circle', 'defs', 'desc', 'ellipse', 'foreignObject', 'font-face', 'font-face-name', 'font-face-src', 'g', 'glyph', 'hkern', 'linearGradient', 'line', 'marker', 'metadata', 'missing-glyph', 'mpath', 'path', 'polygon', 'polyline', 'radialGradient', 'rect', 'set', 'stop', 'svg', 'switch', 'text', 'title', 'tspan', 'use']) # svgtiny + class + opacity + offset + xmlns + xmlns:xlink svg_attributes = set(['accent-height', 'accumulate', 'additive', 'alphabetic', 'arabic-form', 'ascent', 'attributeName', 'attributeType', 'baseProfile', 'bbox', 'begin', 'by', 'calcMode', 'cap-height', 'class', 'color', 'color-rendering', 'content', 'cx', 'cy', 'd', 'dx', 'dy', 'descent', 'display', 'dur', 'end', 'fill', 'fill-opacity', 'fill-rule', 'font-family', 'font-size', 'font-stretch', 'font-style', 'font-variant', 'font-weight', 'from', 'fx', 'fy', 'g1', 'g2', 'glyph-name', 'gradientUnits', 'hanging', 'height', 'horiz-adv-x', 'horiz-origin-x', 'id', 'ideographic', 'k', 'keyPoints', 'keySplines', 'keyTimes', 'lang', 'mathematical', 'marker-end', 'marker-mid', 'marker-start', 'markerHeight', 'markerUnits', 'markerWidth', 'max', 'min', 'name', 'offset', 'opacity', 'orient', 'origin', 'overline-position', 'overline-thickness', 'panose-1', 'path', 'pathLength', 'points', 'preserveAspectRatio', 'r', 'refX', 'refY', 'repeatCount', 'repeatDur', 'requiredExtensions', 'requiredFeatures', 'restart', 'rotate', 'rx', 'ry', 'slope', 'stemh', 'stemv', 'stop-color', 'stop-opacity', 'strikethrough-position', 'strikethrough-thickness', 'stroke', 'stroke-dasharray', 'stroke-dashoffset', 'stroke-linecap', 'stroke-linejoin', 'stroke-miterlimit', 'stroke-opacity', 'stroke-width', 'systemLanguage', 'target', 'text-anchor', 'to', 'transform', 'type', 'u1', 'u2', 'underline-position', 'underline-thickness', 'unicode', 'unicode-range', 'units-per-em', 'values', 'version', 'viewBox', 'visibility', 'width', 'widths', 'x', 'x-height', 'x1', 'x2', 'xlink:actuate', 'xlink:arcrole', 'xlink:href', 'xlink:role', 'xlink:show', 'xlink:title', 'xlink:type', 'xml:base', 'xml:lang', 'xml:space', 'xmlns', 'xmlns:xlink', 'y', 'y1', 'y2', 'zoomAndPan']) svg_attr_map = None svg_elem_map = None acceptable_svg_properties = set([ 'fill', 'fill-opacity', 'fill-rule', 'stroke', 'stroke-width', 'stroke-linecap', 'stroke-linejoin', 'stroke-opacity']) def reset(self): _BaseHTMLProcessor.reset(self) self.unacceptablestack = 0 self.mathmlOK = 0 self.svgOK = 0 def unknown_starttag(self, tag, attrs): acceptable_attributes = self.acceptable_attributes keymap = {} if not tag in self.acceptable_elements or self.svgOK: if tag in self.unacceptable_elements_with_end_tag: self.unacceptablestack += 1 # add implicit namespaces to html5 inline svg/mathml if self._type.endswith('html'): if not dict(attrs).get('xmlns'): if tag=='svg': attrs.append( ('xmlns','http://www.w3.org/2000/svg') ) if tag=='math': attrs.append( ('xmlns','http://www.w3.org/1998/Math/MathML') ) # not otherwise acceptable, perhaps it is MathML or SVG? if tag=='math' and ('xmlns','http://www.w3.org/1998/Math/MathML') in attrs: self.mathmlOK += 1 if tag=='svg' and ('xmlns','http://www.w3.org/2000/svg') in attrs: self.svgOK += 1 # chose acceptable attributes based on tag class, else bail if self.mathmlOK and tag in self.mathml_elements: acceptable_attributes = self.mathml_attributes elif self.svgOK and tag in self.svg_elements: # for most vocabularies, lowercasing is a good idea. Many # svg elements, however, are camel case if not self.svg_attr_map: lower=[attr.lower() for attr in self.svg_attributes] mix=[a for a in self.svg_attributes if a not in lower] self.svg_attributes = lower self.svg_attr_map = dict([(a.lower(),a) for a in mix]) lower=[attr.lower() for attr in self.svg_elements] mix=[a for a in self.svg_elements if a not in lower] self.svg_elements = lower self.svg_elem_map = dict([(a.lower(),a) for a in mix]) acceptable_attributes = self.svg_attributes tag = self.svg_elem_map.get(tag,tag) keymap = self.svg_attr_map elif not tag in self.acceptable_elements: return # declare xlink namespace, if needed if self.mathmlOK or self.svgOK: if [n_v for n_v in attrs if n_v[0].startswith('xlink:')]: if not ('xmlns:xlink','http://www.w3.org/1999/xlink') in attrs: attrs.append(('xmlns:xlink','http://www.w3.org/1999/xlink')) clean_attrs = [] for key, value in self.normalize_attrs(attrs): if key in acceptable_attributes: key=keymap.get(key,key) # make sure the uri uses an acceptable uri scheme if key == 'href': value = _makeSafeAbsoluteURI(value) clean_attrs.append((key,value)) elif key=='style': clean_value = self.sanitize_style(value) if clean_value: clean_attrs.append((key,clean_value)) _BaseHTMLProcessor.unknown_starttag(self, tag, clean_attrs) def unknown_endtag(self, tag): if not tag in self.acceptable_elements: if tag in self.unacceptable_elements_with_end_tag: self.unacceptablestack -= 1 if self.mathmlOK and tag in self.mathml_elements: if tag == 'math' and self.mathmlOK: self.mathmlOK -= 1 elif self.svgOK and tag in self.svg_elements: tag = self.svg_elem_map.get(tag,tag) if tag == 'svg' and self.svgOK: self.svgOK -= 1 else: return _BaseHTMLProcessor.unknown_endtag(self, tag) def handle_pi(self, text): pass def handle_decl(self, text): pass def handle_data(self, text): if not self.unacceptablestack: _BaseHTMLProcessor.handle_data(self, text) def sanitize_style(self, style): # disallow urls style=re.compile('url\s*\(\s*[^\s)]+?\s*\)\s*').sub(' ',style) # gauntlet if not re.match("""^([:,;#%.\sa-zA-Z0-9!]|\w-\w|'[\s\w]+'|"[\s\w]+"|\([\d,\s]+\))*$""", style): return '' # This replaced a regexp that used re.match and was prone to pathological back-tracking. if re.sub("\s*[-\w]+\s*:\s*[^:;]*;?", '', style).strip(): return '' clean = [] for prop,value in re.findall("([-\w]+)\s*:\s*([^:;]*)",style): if not value: continue if prop.lower() in self.acceptable_css_properties: clean.append(prop + ': ' + value + ';') elif prop.split('-')[0].lower() in ['background','border','margin','padding']: for keyword in value.split(): if not keyword in self.acceptable_css_keywords and \ not self.valid_css_values.match(keyword): break else: clean.append(prop + ': ' + value + ';') elif self.svgOK and prop.lower() in self.acceptable_svg_properties: clean.append(prop + ': ' + value + ';') return ' '.join(clean) def parse_comment(self, i, report=1): ret = _BaseHTMLProcessor.parse_comment(self, i, report) if ret >= 0: return ret # if ret == -1, this may be a malicious attempt to circumvent # sanitization, or a page-destroying unclosed comment match = re.compile(r'--[^>]*>').search(self.rawdata, i+4) if match: return match.end() # unclosed comment; deliberately fail to handle_data() return len(self.rawdata) def _sanitizeHTML(htmlSource, encoding, _type): if not _SGML_AVAILABLE: return htmlSource p = _HTMLSanitizer(encoding, _type) htmlSource = htmlSource.replace('<![CDATA[', '&lt;![CDATA[') p.feed(htmlSource) data = p.output() data = data.strip().replace('\r\n', '\n') return data class _FeedURLHandler(urllib.request.HTTPDigestAuthHandler, urllib.request.HTTPRedirectHandler, urllib.request.HTTPDefaultErrorHandler): def http_error_default(self, req, fp, code, msg, headers): # The default implementation just raises HTTPError. # Forget that. fp.status = code return fp def http_error_301(self, req, fp, code, msg, hdrs): result = urllib.request.HTTPRedirectHandler.http_error_301(self, req, fp, code, msg, hdrs) result.status = code result.newurl = result.geturl() return result # The default implementations in urllib2.HTTPRedirectHandler # are identical, so hardcoding a http_error_301 call above # won't affect anything http_error_300 = http_error_301 http_error_302 = http_error_301 http_error_303 = http_error_301 http_error_307 = http_error_301 def http_error_401(self, req, fp, code, msg, headers): # Check if # - server requires digest auth, AND # - we tried (unsuccessfully) with basic auth, AND # If all conditions hold, parse authentication information # out of the Authorization header we sent the first time # (for the username and password) and the WWW-Authenticate # header the server sent back (for the realm) and retry # the request with the appropriate digest auth headers instead. # This evil genius hack has been brought to you by Aaron Swartz. host = urllib.parse.urlparse(req.get_full_url())[1] if base64 is None or 'Authorization' not in req.headers \ or 'WWW-Authenticate' not in headers: return self.http_error_default(req, fp, code, msg, headers) auth = _base64decode(req.headers['Authorization'].split(' ')[1]) user, passw = auth.split(':') realm = re.findall('realm="([^"]*)"', headers['WWW-Authenticate'])[0] self.add_password(realm, host, user, passw) retry = self.http_error_auth_reqed('www-authenticate', host, req, headers) self.reset_retry_count() return retry def _open_resource(url_file_stream_or_string, etag, modified, agent, referrer, handlers, request_headers): """URL, filename, or string --> stream This function lets you define parsers that take any input source (URL, pathname to local or network file, or actual data as a string) and deal with it in a uniform manner. Returned object is guaranteed to have all the basic stdio read methods (read, readline, readlines). Just .close() the object when you're done with it. If the etag argument is supplied, it will be used as the value of an If-None-Match request header. If the modified argument is supplied, it can be a tuple of 9 integers (as returned by gmtime() in the standard Python time module) or a date string in any format supported by feedparser. Regardless, it MUST be in GMT (Greenwich Mean Time). It will be reformatted into an RFC 1123-compliant date and used as the value of an If-Modified-Since request header. If the agent argument is supplied, it will be used as the value of a User-Agent request header. If the referrer argument is supplied, it will be used as the value of a Referer[sic] request header. If handlers is supplied, it is a list of handlers used to build a urllib2 opener. if request_headers is supplied it is a dictionary of HTTP request headers that will override the values generated by FeedParser. :return: A :class:`StringIO.StringIO` or :class:`io.BytesIO`. """ if hasattr(url_file_stream_or_string, 'read'): return url_file_stream_or_string if isinstance(url_file_stream_or_string, str) \ and urllib.parse.urlparse(url_file_stream_or_string)[0] in ('http', 'https', 'ftp', 'file', 'feed'): # Deal with the feed URI scheme if url_file_stream_or_string.startswith('feed:http'): url_file_stream_or_string = url_file_stream_or_string[5:] elif url_file_stream_or_string.startswith('feed:'): url_file_stream_or_string = 'http:' + url_file_stream_or_string[5:] if not agent: agent = USER_AGENT # Test for inline user:password credentials for HTTP basic auth auth = None if base64 and not url_file_stream_or_string.startswith('ftp:'): urltype, rest = urllib.parse.splittype(url_file_stream_or_string) realhost, rest = urllib.parse.splithost(rest) if realhost: user_passwd, realhost = urllib.parse.splituser(realhost) if user_passwd: url_file_stream_or_string = '%s://%s%s' % (urltype, realhost, rest) auth = base64.standard_b64encode(user_passwd).strip() # iri support if isinstance(url_file_stream_or_string, str): url_file_stream_or_string = _convert_to_idn(url_file_stream_or_string) # try to open with urllib2 (to use optional headers) request = _build_urllib2_request(url_file_stream_or_string, agent, etag, modified, referrer, auth, request_headers) opener = urllib.request.build_opener(*tuple(handlers + [_FeedURLHandler()])) opener.addheaders = [] # RMK - must clear so we only send our custom User-Agent try: return opener.open(request) finally: opener.close() # JohnD # try to open with native open function (if url_file_stream_or_string is a filename) try: return open(url_file_stream_or_string, 'rb') except (IOError, UnicodeEncodeError, TypeError): # if url_file_stream_or_string is a unicode object that # cannot be converted to the encoding returned by # sys.getfilesystemencoding(), a UnicodeEncodeError # will be thrown # If url_file_stream_or_string is a string that contains NULL # (such as an XML document encoded in UTF-32), TypeError will # be thrown. pass # treat url_file_stream_or_string as string if isinstance(url_file_stream_or_string, str): return _StringIO(url_file_stream_or_string.encode('utf-8')) return _StringIO(url_file_stream_or_string) def _convert_to_idn(url): """Convert a URL to IDN notation""" # this function should only be called with a unicode string # strategy: if the host cannot be encoded in ascii, then # it'll be necessary to encode it in idn form parts = list(urllib.parse.urlsplit(url)) try: parts[1].encode('ascii') except UnicodeEncodeError: # the url needs to be converted to idn notation host = parts[1].rsplit(':', 1) newhost = [] port = '' if len(host) == 2: port = host.pop() for h in host[0].split('.'): newhost.append(h.encode('idna').decode('utf-8')) parts[1] = '.'.join(newhost) if port: parts[1] += ':' + port return urllib.parse.urlunsplit(parts) else: return url def _build_urllib2_request(url, agent, etag, modified, referrer, auth, request_headers): request = urllib.request.Request(url) request.add_header('User-Agent', agent) if etag: request.add_header('If-None-Match', etag) if isinstance(modified, str): modified = _parse_date(modified) elif isinstance(modified, datetime.datetime): modified = modified.utctimetuple() if modified: # format into an RFC 1123-compliant timestamp. We can't use # time.strftime() since the %a and %b directives can be affected # by the current locale, but RFC 2616 states that dates must be # in English. short_weekdays = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun'] months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'] request.add_header('If-Modified-Since', '%s, %02d %s %04d %02d:%02d:%02d GMT' % (short_weekdays[modified[6]], modified[2], months[modified[1] - 1], modified[0], modified[3], modified[4], modified[5])) if referrer: request.add_header('Referer', referrer) if gzip and zlib: request.add_header('Accept-encoding', 'gzip, deflate') elif gzip: request.add_header('Accept-encoding', 'gzip') elif zlib: request.add_header('Accept-encoding', 'deflate') else: request.add_header('Accept-encoding', '') if auth: request.add_header('Authorization', 'Basic %s' % auth) if ACCEPT_HEADER: request.add_header('Accept', ACCEPT_HEADER) # use this for whatever -- cookies, special headers, etc # [('Cookie','Something'),('x-special-header','Another Value')] for header_name, header_value in list(request_headers.items()): request.add_header(header_name, header_value) request.add_header('A-IM', 'feed') # RFC 3229 support return request def _parse_psc_chapter_start(start): FORMAT = r'^((\d{2}):)?(\d{2}):(\d{2})(\.(\d{3}))?$' m = re.compile(FORMAT).match(start) if m is None: return None _, h, m, s, _, ms = m.groups() h, m, s, ms = (int(h or 0), int(m), int(s), int(ms or 0)) return datetime.timedelta(0, h*60*60 + m*60 + s, ms*1000) _date_handlers = [] def registerDateHandler(func): '''Register a date handler function (takes string, returns 9-tuple date in GMT)''' _date_handlers.insert(0, func) # ISO-8601 date parsing routines written by Fazal Majid. # The ISO 8601 standard is very convoluted and irregular - a full ISO 8601 # parser is beyond the scope of feedparser and would be a worthwhile addition # to the Python library. # A single regular expression cannot parse ISO 8601 date formats into groups # as the standard is highly irregular (for instance is 030104 2003-01-04 or # 0301-04-01), so we use templates instead. # Please note the order in templates is significant because we need a # greedy match. _iso8601_tmpl = ['YYYY-?MM-?DD', 'YYYY-0MM?-?DD', 'YYYY-MM', 'YYYY-?OOO', 'YY-?MM-?DD', 'YY-?OOO', 'YYYY', '-YY-?MM', '-OOO', '-YY', '--MM-?DD', '--MM', '---DD', 'CC', ''] _iso8601_re = [ tmpl.replace( 'YYYY', r'(?P<year>\d{4})').replace( 'YY', r'(?P<year>\d\d)').replace( 'MM', r'(?P<month>[01]\d)').replace( 'DD', r'(?P<day>[0123]\d)').replace( 'OOO', r'(?P<ordinal>[0123]\d\d)').replace( 'CC', r'(?P<century>\d\d$)') + r'(T?(?P<hour>\d{2}):(?P<minute>\d{2})' + r'(:(?P<second>\d{2}))?' + r'(\.(?P<fracsecond>\d+))?' + r'(?P<tz>[+-](?P<tzhour>\d{2})(:(?P<tzmin>\d{2}))?|Z)?)?' for tmpl in _iso8601_tmpl] try: del tmpl except NameError: pass _iso8601_matches = [re.compile(regex).match for regex in _iso8601_re] try: del regex except NameError: pass def _parse_date_iso8601(dateString): '''Parse a variety of ISO-8601-compatible formats like 20040105''' m = None for _iso8601_match in _iso8601_matches: m = _iso8601_match(dateString) if m: break if not m: return if m.span() == (0, 0): return params = m.groupdict() ordinal = params.get('ordinal', 0) if ordinal: ordinal = int(ordinal) else: ordinal = 0 year = params.get('year', '--') if not year or year == '--': year = time.gmtime()[0] elif len(year) == 2: # ISO 8601 assumes current century, i.e. 93 -> 2093, NOT 1993 year = 100 * int(time.gmtime()[0] / 100) + int(year) else: year = int(year) month = params.get('month', '-') if not month or month == '-': # ordinals are NOT normalized by mktime, we simulate them # by setting month=1, day=ordinal if ordinal: month = 1 else: month = time.gmtime()[1] month = int(month) day = params.get('day', 0) if not day: # see above if ordinal: day = ordinal elif params.get('century', 0) or \ params.get('year', 0) or params.get('month', 0): day = 1 else: day = time.gmtime()[2] else: day = int(day) # special case of the century - is the first year of the 21st century # 2000 or 2001 ? The debate goes on... if 'century' in params: year = (int(params['century']) - 1) * 100 + 1 # in ISO 8601 most fields are optional for field in ['hour', 'minute', 'second', 'tzhour', 'tzmin']: if not params.get(field, None): params[field] = 0 hour = int(params.get('hour', 0)) minute = int(params.get('minute', 0)) second = int(float(params.get('second', 0))) # weekday is normalized by mktime(), we can ignore it weekday = 0 daylight_savings_flag = -1 tm = [year, month, day, hour, minute, second, weekday, ordinal, daylight_savings_flag] # ISO 8601 time zone adjustments tz = params.get('tz') if tz and tz != 'Z': if tz[0] == '-': tm[3] += int(params.get('tzhour', 0)) tm[4] += int(params.get('tzmin', 0)) elif tz[0] == '+': tm[3] -= int(params.get('tzhour', 0)) tm[4] -= int(params.get('tzmin', 0)) else: return None # Python's time.mktime() is a wrapper around the ANSI C mktime(3c) # which is guaranteed to normalize d/m/y/h/m/s. # Many implementations have bugs, but we'll pretend they don't. return time.localtime(time.mktime(tuple(tm))) registerDateHandler(_parse_date_iso8601) # 8-bit date handling routines written by ytrewq1. _korean_year = '\ub144' # b3e2 in euc-kr _korean_month = '\uc6d4' # bff9 in euc-kr _korean_day = '\uc77c' # c0cf in euc-kr _korean_am = '\uc624\uc804' # bfc0 c0fc in euc-kr _korean_pm = '\uc624\ud6c4' # bfc0 c8c4 in euc-kr _korean_onblog_date_re = \ re.compile('(\d{4})%s\s+(\d{2})%s\s+(\d{2})%s\s+(\d{2}):(\d{2}):(\d{2})' % \ (_korean_year, _korean_month, _korean_day)) _korean_nate_date_re = \ re.compile('(\d{4})-(\d{2})-(\d{2})\s+(%s|%s)\s+(\d{,2}):(\d{,2}):(\d{,2})' % \ (_korean_am, _korean_pm)) def _parse_date_onblog(dateString): '''Parse a string according to the OnBlog 8-bit date format''' m = _korean_onblog_date_re.match(dateString) if not m: return w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \ {'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\ 'hour': m.group(4), 'minute': m.group(5), 'second': m.group(6),\ 'zonediff': '+09:00'} return _parse_date_w3dtf(w3dtfdate) registerDateHandler(_parse_date_onblog) def _parse_date_nate(dateString): '''Parse a string according to the Nate 8-bit date format''' m = _korean_nate_date_re.match(dateString) if not m: return hour = int(m.group(5)) ampm = m.group(4) if (ampm == _korean_pm): hour += 12 hour = str(hour) if len(hour) == 1: hour = '0' + hour w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \ {'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\ 'hour': hour, 'minute': m.group(6), 'second': m.group(7),\ 'zonediff': '+09:00'} return _parse_date_w3dtf(w3dtfdate) registerDateHandler(_parse_date_nate) # Unicode strings for Greek date strings _greek_months = \ { \ '\u0399\u03b1\u03bd': 'Jan', # c9e1ed in iso-8859-7 '\u03a6\u03b5\u03b2': 'Feb', # d6e5e2 in iso-8859-7 '\u039c\u03ac\u03ce': 'Mar', # ccdcfe in iso-8859-7 '\u039c\u03b1\u03ce': 'Mar', # cce1fe in iso-8859-7 '\u0391\u03c0\u03c1': 'Apr', # c1f0f1 in iso-8859-7 '\u039c\u03ac\u03b9': 'May', # ccdce9 in iso-8859-7 '\u039c\u03b1\u03ca': 'May', # cce1fa in iso-8859-7 '\u039c\u03b1\u03b9': 'May', # cce1e9 in iso-8859-7 '\u0399\u03bf\u03cd\u03bd': 'Jun', # c9effded in iso-8859-7 '\u0399\u03bf\u03bd': 'Jun', # c9efed in iso-8859-7 '\u0399\u03bf\u03cd\u03bb': 'Jul', # c9effdeb in iso-8859-7 '\u0399\u03bf\u03bb': 'Jul', # c9f9eb in iso-8859-7 '\u0391\u03cd\u03b3': 'Aug', # c1fde3 in iso-8859-7 '\u0391\u03c5\u03b3': 'Aug', # c1f5e3 in iso-8859-7 '\u03a3\u03b5\u03c0': 'Sep', # d3e5f0 in iso-8859-7 '\u039f\u03ba\u03c4': 'Oct', # cfeaf4 in iso-8859-7 '\u039d\u03bf\u03ad': 'Nov', # cdefdd in iso-8859-7 '\u039d\u03bf\u03b5': 'Nov', # cdefe5 in iso-8859-7 '\u0394\u03b5\u03ba': 'Dec', # c4e5ea in iso-8859-7 } _greek_wdays = \ { \ '\u039a\u03c5\u03c1': 'Sun', # caf5f1 in iso-8859-7 '\u0394\u03b5\u03c5': 'Mon', # c4e5f5 in iso-8859-7 '\u03a4\u03c1\u03b9': 'Tue', # d4f1e9 in iso-8859-7 '\u03a4\u03b5\u03c4': 'Wed', # d4e5f4 in iso-8859-7 '\u03a0\u03b5\u03bc': 'Thu', # d0e5ec in iso-8859-7 '\u03a0\u03b1\u03c1': 'Fri', # d0e1f1 in iso-8859-7 '\u03a3\u03b1\u03b2': 'Sat', # d3e1e2 in iso-8859-7 } _greek_date_format_re = \ re.compile('([^,]+),\s+(\d{2})\s+([^\s]+)\s+(\d{4})\s+(\d{2}):(\d{2}):(\d{2})\s+([^\s]+)') def _parse_date_greek(dateString): '''Parse a string according to a Greek 8-bit date format.''' m = _greek_date_format_re.match(dateString) if not m: return wday = _greek_wdays[m.group(1)] month = _greek_months[m.group(3)] rfc822date = '%(wday)s, %(day)s %(month)s %(year)s %(hour)s:%(minute)s:%(second)s %(zonediff)s' % \ {'wday': wday, 'day': m.group(2), 'month': month, 'year': m.group(4),\ 'hour': m.group(5), 'minute': m.group(6), 'second': m.group(7),\ 'zonediff': m.group(8)} return _parse_date_rfc822(rfc822date) registerDateHandler(_parse_date_greek) # Unicode strings for Hungarian date strings _hungarian_months = \ { \ 'janu\u00e1r': '01', # e1 in iso-8859-2 'febru\u00e1ri': '02', # e1 in iso-8859-2 'm\u00e1rcius': '03', # e1 in iso-8859-2 '\u00e1prilis': '04', # e1 in iso-8859-2 'm\u00e1ujus': '05', # e1 in iso-8859-2 'j\u00fanius': '06', # fa in iso-8859-2 'j\u00falius': '07', # fa in iso-8859-2 'augusztus': '08', 'szeptember': '09', 'okt\u00f3ber': '10', # f3 in iso-8859-2 'november': '11', 'december': '12', } _hungarian_date_format_re = \ re.compile('(\d{4})-([^-]+)-(\d{,2})T(\d{,2}):(\d{2})((\+|-)(\d{,2}:\d{2}))') def _parse_date_hungarian(dateString): '''Parse a string according to a Hungarian 8-bit date format.''' m = _hungarian_date_format_re.match(dateString) if not m or m.group(2) not in _hungarian_months: return None month = _hungarian_months[m.group(2)] day = m.group(3) if len(day) == 1: day = '0' + day hour = m.group(4) if len(hour) == 1: hour = '0' + hour w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s%(zonediff)s' % \ {'year': m.group(1), 'month': month, 'day': day,\ 'hour': hour, 'minute': m.group(5),\ 'zonediff': m.group(6)} return _parse_date_w3dtf(w3dtfdate) registerDateHandler(_parse_date_hungarian) timezonenames = { 'ut': 0, 'gmt': 0, 'z': 0, 'adt': -3, 'ast': -4, 'at': -4, 'edt': -4, 'est': -5, 'et': -5, 'cdt': -5, 'cst': -6, 'ct': -6, 'mdt': -6, 'mst': -7, 'mt': -7, 'pdt': -7, 'pst': -8, 'pt': -8, 'a': -1, 'n': 1, 'm': -12, 'y': 12, } # W3 date and time format parser # http://www.w3.org/TR/NOTE-datetime # Also supports MSSQL-style datetimes as defined at: # http://msdn.microsoft.com/en-us/library/ms186724.aspx # (basically, allow a space as a date/time/timezone separator) def _parse_date_w3dtf(datestr): if not datestr.strip(): return None parts = datestr.lower().split('t') if len(parts) == 1: # This may be a date only, or may be an MSSQL-style date parts = parts[0].split() if len(parts) == 1: # Treat this as a date only parts.append('00:00:00z') elif len(parts) > 2: return None date = parts[0].split('-', 2) if not date or len(date[0]) != 4: return None # Ensure that `date` has 3 elements. Using '1' sets the default # month to January and the default day to the 1st of the month. date.extend(['1'] * (3 - len(date))) try: year, month, day = [int(i) for i in date] except ValueError: # `date` may have more than 3 elements or may contain # non-integer strings. return None if parts[1].endswith('z'): parts[1] = parts[1][:-1] parts.append('z') # Append the numeric timezone offset, if any, to parts. # If this is an MSSQL-style date then parts[2] already contains # the timezone information, so `append()` will not affect it. # Add 1 to each value so that if `find()` returns -1 it will be # treated as False. loc = parts[1].find('-') + 1 or parts[1].find('+') + 1 or len(parts[1]) + 1 loc = loc - 1 parts.append(parts[1][loc:]) parts[1] = parts[1][:loc] time = parts[1].split(':', 2) # Ensure that time has 3 elements. Using '0' means that the # minutes and seconds, if missing, will default to 0. time.extend(['0'] * (3 - len(time))) tzhour = 0 tzmin = 0 if parts[2][:1] in ('-', '+'): try: tzhour = int(parts[2][1:3]) tzmin = int(parts[2][4:]) except ValueError: return None if parts[2].startswith('-'): tzhour = tzhour * -1 tzmin = tzmin * -1 else: tzhour = timezonenames.get(parts[2], 0) try: hour, minute, second = [int(float(i)) for i in time] except ValueError: return None # Create the datetime object and timezone delta objects try: stamp = datetime.datetime(year, month, day, hour, minute, second) except ValueError: return None delta = datetime.timedelta(0, 0, 0, 0, tzmin, tzhour) # Return the date and timestamp in a UTC 9-tuple try: return (stamp - delta).utctimetuple() except (OverflowError, ValueError): # IronPython throws ValueErrors instead of OverflowErrors return None registerDateHandler(_parse_date_w3dtf) def _parse_date_rfc822(date): """Parse RFC 822 dates and times http://tools.ietf.org/html/rfc822#section-5 There are some formatting differences that are accounted for: 1. Years may be two or four digits. 2. The month and day can be swapped. 3. Additional timezone names are supported. 4. A default time and timezone are assumed if only a date is present. """ daynames = set(['mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun']) months = { 'jan': 1, 'feb': 2, 'mar': 3, 'apr': 4, 'may': 5, 'jun': 6, 'jul': 7, 'aug': 8, 'sep': 9, 'oct': 10, 'nov': 11, 'dec': 12, } parts = date.lower().split() if len(parts) < 5: # Assume that the time and timezone are missing parts.extend(('00:00:00', '0000')) # Remove the day name if parts[0][:3] in daynames: parts = parts[1:] if len(parts) < 5: # If there are still fewer than five parts, there's not enough # information to interpret this return None try: day = int(parts[0]) except ValueError: # Check if the day and month are swapped if months.get(parts[0][:3]): try: day = int(parts[1]) except ValueError: return None else: parts[1] = parts[0] else: return None month = months.get(parts[1][:3]) if not month: return None try: year = int(parts[2]) except ValueError: return None # Normalize two-digit years: # Anything in the 90's is interpreted as 1990 and on # Anything 89 or less is interpreted as 2089 or before if len(parts[2]) <= 2: year += (1900, 2000)[year < 90] timeparts = parts[3].split(':') timeparts = timeparts + ([0] * (3 - len(timeparts))) try: (hour, minute, second) = list(map(int, timeparts)) except ValueError: return None tzhour = 0 tzmin = 0 # Strip 'Etc/' from the timezone if parts[4].startswith('etc/'): parts[4] = parts[4][4:] # Normalize timezones that start with 'gmt': # GMT-05:00 => -0500 # GMT => GMT if parts[4].startswith('gmt'): parts[4] = ''.join(parts[4][3:].split(':')) or 'gmt' # Handle timezones like '-0500', '+0500', and 'EST' if parts[4] and parts[4][0] in ('-', '+'): try: tzhour = int(parts[4][1:3]) tzmin = int(parts[4][3:]) except ValueError: return None if parts[4].startswith('-'): tzhour = tzhour * -1 tzmin = tzmin * -1 else: tzhour = timezonenames.get(parts[4], 0) # Create the datetime object and timezone delta objects try: stamp = datetime.datetime(year, month, day, hour, minute, second) except ValueError: return None delta = datetime.timedelta(0, 0, 0, 0, tzmin, tzhour) # Return the date and timestamp in a UTC 9-tuple try: return (stamp - delta).utctimetuple() except (OverflowError, ValueError): # IronPython throws ValueErrors instead of OverflowErrors return None registerDateHandler(_parse_date_rfc822) _months = ['jan', 'feb', 'mar', 'apr', 'may', 'jun', 'jul', 'aug', 'sep', 'oct', 'nov', 'dec'] def _parse_date_asctime(dt): """Parse asctime-style dates. Converts asctime to RFC822-compatible dates and uses the RFC822 parser to do the actual parsing. Supported formats (format is standardized to the first one listed): * {weekday name} {month name} dd hh:mm:ss {+-tz} yyyy * {weekday name} {month name} dd hh:mm:ss yyyy """ parts = dt.split() # Insert a GMT timezone, if needed. if len(parts) == 5: parts.insert(4, '+0000') # Exit if there are not six parts. if len(parts) != 6: return None # Reassemble the parts in an RFC822-compatible order and parse them. return _parse_date_rfc822(' '.join([ parts[0], parts[2], parts[1], parts[5], parts[3], parts[4], ])) registerDateHandler(_parse_date_asctime) def _parse_date_perforce(aDateString): """parse a date in yyyy/mm/dd hh:mm:ss TTT format""" # Fri, 2006/09/15 08:19:53 EDT _my_date_pattern = re.compile( \ r'(\w{,3}), (\d{,4})/(\d{,2})/(\d{2}) (\d{,2}):(\d{2}):(\d{2}) (\w{,3})') m = _my_date_pattern.search(aDateString) if m is None: return None dow, year, month, day, hour, minute, second, tz = m.groups() months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'] dateString = "%s, %s %s %s %s:%s:%s %s" % (dow, day, months[int(month) - 1], year, hour, minute, second, tz) tm = rfc822.parsedate_tz(dateString) if tm: return time.gmtime(rfc822.mktime_tz(tm)) registerDateHandler(_parse_date_perforce) def _parse_date(dateString): '''Parses a variety of date formats into a 9-tuple in GMT''' if not dateString: return None for handler in _date_handlers: try: date9tuple = handler(dateString) except (KeyError, OverflowError, ValueError): continue if not date9tuple: continue if len(date9tuple) != 9: continue return date9tuple return None # Each marker represents some of the characters of the opening XML # processing instruction ('<?xm') in the specified encoding. EBCDIC_MARKER = _l2bytes([0x4C, 0x6F, 0xA7, 0x94]) UTF16BE_MARKER = _l2bytes([0x00, 0x3C, 0x00, 0x3F]) UTF16LE_MARKER = _l2bytes([0x3C, 0x00, 0x3F, 0x00]) UTF32BE_MARKER = _l2bytes([0x00, 0x00, 0x00, 0x3C]) UTF32LE_MARKER = _l2bytes([0x3C, 0x00, 0x00, 0x00]) ZERO_BYTES = _l2bytes([0x00, 0x00]) # Match the opening XML declaration. # Example: <?xml version="1.0" encoding="utf-8"?> RE_XML_DECLARATION = re.compile('^<\?xml[^>]*?>') # Capture the value of the XML processing instruction's encoding attribute. # Example: <?xml version="1.0" encoding="utf-8"?> RE_XML_PI_ENCODING = re.compile(_s2bytes('^<\?.*encoding=[\'"](.*?)[\'"].*\?>')) def convert_to_utf8(http_headers, data): '''Detect and convert the character encoding to UTF-8. http_headers is a dictionary data is a raw string (not Unicode)''' # This is so much trickier than it sounds, it's not even funny. # According to RFC 3023 ('XML Media Types'), if the HTTP Content-Type # is application/xml, application/*+xml, # application/xml-external-parsed-entity, or application/xml-dtd, # the encoding given in the charset parameter of the HTTP Content-Type # takes precedence over the encoding given in the XML prefix within the # document, and defaults to 'utf-8' if neither are specified. But, if # the HTTP Content-Type is text/xml, text/*+xml, or # text/xml-external-parsed-entity, the encoding given in the XML prefix # within the document is ALWAYS IGNORED and only the encoding given in # the charset parameter of the HTTP Content-Type header should be # respected, and it defaults to 'us-ascii' if not specified. # Furthermore, discussion on the atom-syntax mailing list with the # author of RFC 3023 leads me to the conclusion that any document # served with a Content-Type of text/* and no charset parameter # must be treated as us-ascii. (We now do this.) And also that it # must always be flagged as non-well-formed. (We now do this too.) # If Content-Type is unspecified (input was local file or non-HTTP source) # or unrecognized (server just got it totally wrong), then go by the # encoding given in the XML prefix of the document and default to # 'iso-8859-1' as per the HTTP specification (RFC 2616). # Then, assuming we didn't find a character encoding in the HTTP headers # (and the HTTP Content-type allowed us to look in the body), we need # to sniff the first few bytes of the XML data and try to determine # whether the encoding is ASCII-compatible. Section F of the XML # specification shows the way here: # http://www.w3.org/TR/REC-xml/#sec-guessing-no-ext-info # If the sniffed encoding is not ASCII-compatible, we need to make it # ASCII compatible so that we can sniff further into the XML declaration # to find the encoding attribute, which will tell us the true encoding. # Of course, none of this guarantees that we will be able to parse the # feed in the declared character encoding (assuming it was declared # correctly, which many are not). iconv_codec can help a lot; # you should definitely install it if you can. # http://cjkpython.i18n.org/ bom_encoding = '' xml_encoding = '' rfc3023_encoding = '' # Look at the first few bytes of the document to guess what # its encoding may be. We only need to decode enough of the # document that we can use an ASCII-compatible regular # expression to search for an XML encoding declaration. # The heuristic follows the XML specification, section F: # http://www.w3.org/TR/REC-xml/#sec-guessing-no-ext-info # Check for BOMs first. if data[:4] == codecs.BOM_UTF32_BE: bom_encoding = 'utf-32be' data = data[4:] elif data[:4] == codecs.BOM_UTF32_LE: bom_encoding = 'utf-32le' data = data[4:] elif data[:2] == codecs.BOM_UTF16_BE and data[2:4] != ZERO_BYTES: bom_encoding = 'utf-16be' data = data[2:] elif data[:2] == codecs.BOM_UTF16_LE and data[2:4] != ZERO_BYTES: bom_encoding = 'utf-16le' data = data[2:] elif data[:3] == codecs.BOM_UTF8: bom_encoding = 'utf-8' data = data[3:] # Check for the characters '<?xm' in several encodings. elif data[:4] == EBCDIC_MARKER: bom_encoding = 'cp037' elif data[:4] == UTF16BE_MARKER: bom_encoding = 'utf-16be' elif data[:4] == UTF16LE_MARKER: bom_encoding = 'utf-16le' elif data[:4] == UTF32BE_MARKER: bom_encoding = 'utf-32be' elif data[:4] == UTF32LE_MARKER: bom_encoding = 'utf-32le' tempdata = data try: if bom_encoding: tempdata = data.decode(bom_encoding).encode('utf-8') except (UnicodeDecodeError, LookupError): # feedparser recognizes UTF-32 encodings that aren't # available in Python 2.4 and 2.5, so it's possible to # encounter a LookupError during decoding. xml_encoding_match = None else: xml_encoding_match = RE_XML_PI_ENCODING.match(tempdata) if xml_encoding_match: xml_encoding = xml_encoding_match.groups()[0].decode('utf-8').lower() # Normalize the xml_encoding if necessary. if bom_encoding and (xml_encoding in ( 'u16', 'utf-16', 'utf16', 'utf_16', 'u32', 'utf-32', 'utf32', 'utf_32', 'iso-10646-ucs-2', 'iso-10646-ucs-4', 'csucs4', 'csunicode', 'ucs-2', 'ucs-4' )): xml_encoding = bom_encoding # Find the HTTP Content-Type and, hopefully, a character # encoding provided by the server. The Content-Type is used # to choose the "correct" encoding among the BOM encoding, # XML declaration encoding, and HTTP encoding, following the # heuristic defined in RFC 3023. http_content_type = http_headers.get('content-type') or '' http_content_type, params = cgi.parse_header(http_content_type) http_encoding = params.get('charset', '').replace("'", "") if not isinstance(http_encoding, str): http_encoding = http_encoding.decode('utf-8', 'ignore') acceptable_content_type = 0 application_content_types = ('application/xml', 'application/xml-dtd', 'application/xml-external-parsed-entity') text_content_types = ('text/xml', 'text/xml-external-parsed-entity') if (http_content_type in application_content_types) or \ (http_content_type.startswith('application/') and http_content_type.endswith('+xml')): acceptable_content_type = 1 rfc3023_encoding = http_encoding or xml_encoding or 'utf-8' elif (http_content_type in text_content_types) or \ (http_content_type.startswith('text/') and http_content_type.endswith('+xml')): acceptable_content_type = 1 rfc3023_encoding = http_encoding or 'us-ascii' elif http_content_type.startswith('text/'): rfc3023_encoding = http_encoding or 'us-ascii' elif http_headers and 'content-type' not in http_headers: rfc3023_encoding = xml_encoding or 'iso-8859-1' else: rfc3023_encoding = xml_encoding or 'utf-8' # gb18030 is a superset of gb2312, so always replace gb2312 # with gb18030 for greater compatibility. if rfc3023_encoding.lower() == 'gb2312': rfc3023_encoding = 'gb18030' if xml_encoding.lower() == 'gb2312': xml_encoding = 'gb18030' # there are four encodings to keep track of: # - http_encoding is the encoding declared in the Content-Type HTTP header # - xml_encoding is the encoding declared in the <?xml declaration # - bom_encoding is the encoding sniffed from the first 4 bytes of the XML data # - rfc3023_encoding is the actual encoding, as per RFC 3023 and a variety of other conflicting specifications error = None if http_headers and (not acceptable_content_type): if 'content-type' in http_headers: msg = '%s is not an XML media type' % http_headers['content-type'] else: msg = 'no Content-type specified' error = NonXMLContentType(msg) # determine character encoding known_encoding = 0 lazy_chardet_encoding = None tried_encodings = [] if chardet: def lazy_chardet_encoding(): chardet_encoding = chardet.detect(data)['encoding'] if not chardet_encoding: chardet_encoding = '' if not isinstance(chardet_encoding, str): chardet_encoding = str(chardet_encoding, 'ascii', 'ignore') return chardet_encoding # try: HTTP encoding, declared XML encoding, encoding sniffed from BOM for proposed_encoding in (rfc3023_encoding, xml_encoding, bom_encoding, lazy_chardet_encoding, 'utf-8', 'windows-1252', 'iso-8859-2'): if callable(proposed_encoding): proposed_encoding = proposed_encoding() if not proposed_encoding: continue if proposed_encoding in tried_encodings: continue tried_encodings.append(proposed_encoding) try: data = data.decode(proposed_encoding) except (UnicodeDecodeError, LookupError): pass else: known_encoding = 1 # Update the encoding in the opening XML processing instruction. new_declaration = '''<?xml version='1.0' encoding='utf-8'?>''' if RE_XML_DECLARATION.search(data): data = RE_XML_DECLARATION.sub(new_declaration, data) else: data = new_declaration + '\n' + data data = data.encode('utf-8') break # if still no luck, give up if not known_encoding: error = CharacterEncodingUnknown( 'document encoding unknown, I tried ' + '%s, %s, utf-8, windows-1252, and iso-8859-2 but nothing worked' % (rfc3023_encoding, xml_encoding)) rfc3023_encoding = '' elif proposed_encoding != rfc3023_encoding: error = CharacterEncodingOverride( 'document declared as %s, but parsed as %s' % (rfc3023_encoding, proposed_encoding)) rfc3023_encoding = proposed_encoding return data, rfc3023_encoding, error # Match XML entity declarations. # Example: <!ENTITY copyright "(C)"> RE_ENTITY_PATTERN = re.compile(_s2bytes(r'^\s*<!ENTITY([^>]*?)>'), re.MULTILINE) # Match XML DOCTYPE declarations. # Example: <!DOCTYPE feed [ ]> RE_DOCTYPE_PATTERN = re.compile(_s2bytes(r'^\s*<!DOCTYPE([^>]*?)>'), re.MULTILINE) # Match safe entity declarations. # This will allow hexadecimal character references through, # as well as text, but not arbitrary nested entities. # Example: cubed "&#179;" # Example: copyright "(C)" # Forbidden: explode1 "&explode2;&explode2;" RE_SAFE_ENTITY_PATTERN = re.compile(_s2bytes('\s+(\w+)\s+"(&#\w+;|[^&"]*)"')) def replace_doctype(data): '''Strips and replaces the DOCTYPE, returns (rss_version, stripped_data) rss_version may be 'rss091n' or None stripped_data is the same XML document with a replaced DOCTYPE ''' # Divide the document into two groups by finding the location # of the first element that doesn't begin with '<?' or '<!'. start = re.search(_s2bytes('<\w'), data) start = start and start.start() or -1 head, data = data[:start+1], data[start+1:] # Save and then remove all of the ENTITY declarations. entity_results = RE_ENTITY_PATTERN.findall(head) head = RE_ENTITY_PATTERN.sub(_s2bytes(''), head) # Find the DOCTYPE declaration and check the feed type. doctype_results = RE_DOCTYPE_PATTERN.findall(head) doctype = doctype_results and doctype_results[0] or _s2bytes('') if _s2bytes('netscape') in doctype.lower(): version = 'rss091n' else: version = None # Re-insert the safe ENTITY declarations if a DOCTYPE was found. replacement = _s2bytes('') if len(doctype_results) == 1 and entity_results: match_safe_entities = lambda e: RE_SAFE_ENTITY_PATTERN.match(e) safe_entities = list(filter(match_safe_entities, entity_results)) if safe_entities: replacement = _s2bytes('<!DOCTYPE feed [\n<!ENTITY') \ + _s2bytes('>\n<!ENTITY ').join(safe_entities) \ + _s2bytes('>\n]>') data = RE_DOCTYPE_PATTERN.sub(replacement, head) + data # Precompute the safe entities for the loose parser. safe_entities = dict((k.decode('utf-8'), v.decode('utf-8')) for k, v in RE_SAFE_ENTITY_PATTERN.findall(replacement)) return version, data, safe_entities # GeoRSS geometry parsers. Each return a dict with 'type' and 'coordinates' # items, or None in the case of a parsing error. def _parse_poslist(value, geom_type, swap=True, dims=2): if geom_type == 'linestring': return _parse_georss_line(value, swap, dims) elif geom_type == 'polygon': ring = _parse_georss_line(value, swap, dims) return {'type': 'Polygon', 'coordinates': (ring['coordinates'],)} else: return None def _gen_georss_coords(value, swap=True, dims=2): # A generator of (lon, lat) pairs from a string of encoded GeoRSS # coordinates. Converts to floats and swaps order. latlons = map(float, value.strip().replace(',', ' ').split()) nxt = latlons.__next__ while True: t = [nxt(), nxt()][::swap and -1 or 1] if dims == 3: t.append(nxt()) yield tuple(t) def _parse_georss_point(value, swap=True, dims=2): # A point contains a single latitude-longitude pair, separated by # whitespace. We'll also handle comma separators. try: coords = list(_gen_georss_coords(value, swap, dims)) return {'type': 'Point', 'coordinates': coords[0]} except (IndexError, ValueError): return None def _parse_georss_line(value, swap=True, dims=2): # A line contains a space separated list of latitude-longitude pairs in # WGS84 coordinate reference system, with each pair separated by # whitespace. There must be at least two pairs. try: coords = list(_gen_georss_coords(value, swap, dims)) return {'type': 'LineString', 'coordinates': coords} except (IndexError, ValueError): return None def _parse_georss_polygon(value, swap=True, dims=2): # A polygon contains a space separated list of latitude-longitude pairs, # with each pair separated by whitespace. There must be at least four # pairs, with the last being identical to the first (so a polygon has a # minimum of three actual points). try: ring = list(_gen_georss_coords(value, swap, dims)) except (IndexError, ValueError): return None if len(ring) < 4: return None return {'type': 'Polygon', 'coordinates': (ring,)} def _parse_georss_box(value, swap=True, dims=2): # A bounding box is a rectangular region, often used to define the extents # of a map or a rough area of interest. A box contains two space seperate # latitude-longitude pairs, with each pair separated by whitespace. The # first pair is the lower corner, the second is the upper corner. try: coords = list(_gen_georss_coords(value, swap, dims)) return {'type': 'Box', 'coordinates': tuple(coords)} except (IndexError, ValueError): return None # end geospatial parsers def parse(url_file_stream_or_string, etag=None, modified=None, agent=None, referrer=None, handlers=None, request_headers=None, response_headers=None): '''Parse a feed from a URL, file, stream, or string. request_headers, if given, is a dict from http header name to value to add to the request; this overrides internally generated values. :return: A :class:`FeedParserDict`. ''' if handlers is None: handlers = [] if request_headers is None: request_headers = {} if response_headers is None: response_headers = {} result = FeedParserDict() result['feed'] = FeedParserDict() result['entries'] = [] result['bozo'] = 0 if not isinstance(handlers, list): handlers = [handlers] try: f = _open_resource(url_file_stream_or_string, etag, modified, agent, referrer, handlers, request_headers) data = f.read() except Exception as e: result['bozo'] = 1 result['bozo_exception'] = e data = None f = None if hasattr(f, 'headers'): result['headers'] = dict(f.headers) # overwrite existing headers using response_headers if 'headers' in result: result['headers'].update(response_headers) elif response_headers: result['headers'] = copy.deepcopy(response_headers) # lowercase all of the HTTP headers for comparisons per RFC 2616 if 'headers' in result: http_headers = dict((k.lower(), v) for k, v in list(result['headers'].items())) else: http_headers = {} # if feed is gzip-compressed, decompress it if f and data and http_headers: if gzip and 'gzip' in http_headers.get('content-encoding', ''): try: data = gzip.GzipFile(fileobj=_StringIO(data)).read() except (IOError, struct.error) as e: # IOError can occur if the gzip header is bad. # struct.error can occur if the data is damaged. result['bozo'] = 1 result['bozo_exception'] = e if isinstance(e, struct.error): # A gzip header was found but the data is corrupt. # Ideally, we should re-request the feed without the # 'Accept-encoding: gzip' header, but we don't. data = None elif zlib and 'deflate' in http_headers.get('content-encoding', ''): try: data = zlib.decompress(data) except zlib.error as e: try: # The data may have no headers and no checksum. data = zlib.decompress(data, -15) except zlib.error as e: result['bozo'] = 1 result['bozo_exception'] = e # save HTTP headers if http_headers: if 'etag' in http_headers: etag = http_headers.get('etag', '') if not isinstance(etag, str): etag = etag.decode('utf-8', 'ignore') if etag: result['etag'] = etag if 'last-modified' in http_headers: modified = http_headers.get('last-modified', '') if modified: result['modified'] = modified result['modified_parsed'] = _parse_date(modified) if hasattr(f, 'url'): if not isinstance(f.url, str): result['href'] = f.url.decode('utf-8', 'ignore') else: result['href'] = f.url result['status'] = 200 if hasattr(f, 'status'): result['status'] = f.status if hasattr(f, 'close'): f.close() if data is None: return result # Stop processing if the server sent HTTP 304 Not Modified. if getattr(f, 'code', 0) == 304: result['version'] = '' result['debug_message'] = 'The feed has not changed since you last checked, ' + \ 'so the server sent no data. This is a feature, not a bug!' return result data, result['encoding'], error = convert_to_utf8(http_headers, data) use_strict_parser = result['encoding'] and True or False if error is not None: result['bozo'] = 1 result['bozo_exception'] = error result['version'], data, entities = replace_doctype(data) # Ensure that baseuri is an absolute URI using an acceptable URI scheme. contentloc = http_headers.get('content-location', '') href = result.get('href', '') baseuri = _makeSafeAbsoluteURI(href, contentloc) or _makeSafeAbsoluteURI(contentloc) or href baselang = http_headers.get('content-language', None) if not isinstance(baselang, str) and baselang is not None: baselang = baselang.decode('utf-8', 'ignore') if not _XML_AVAILABLE: use_strict_parser = 0 if use_strict_parser: # initialize the SAX parser feedparser = _StrictFeedParser(baseuri, baselang, 'utf-8') saxparser = xml.sax.make_parser(PREFERRED_XML_PARSERS) saxparser.setFeature(xml.sax.handler.feature_namespaces, 1) try: # disable downloading external doctype references, if possible saxparser.setFeature(xml.sax.handler.feature_external_ges, 0) except xml.sax.SAXNotSupportedException: pass saxparser.setContentHandler(feedparser) saxparser.setErrorHandler(feedparser) source = xml.sax.xmlreader.InputSource() source.setByteStream(_StringIO(data)) try: saxparser.parse(source) except xml.sax.SAXException as e: result['bozo'] = 1 result['bozo_exception'] = feedparser.exc or e use_strict_parser = 0 if not use_strict_parser and _SGML_AVAILABLE: feedparser = _LooseFeedParser(baseuri, baselang, 'utf-8', entities) feedparser.feed(data.decode('utf-8', 'replace')) result['feed'] = feedparser.feeddata result['entries'] = feedparser.entries result['version'] = result['version'] or feedparser.version result['namespaces'] = feedparser.namespacesInUse return result # The list of EPSG codes for geographic (latitude/longitude) coordinate # systems to support decoding of GeoRSS GML profiles. _geogCS = [ 3819, 3821, 3824, 3889, 3906, 4001, 4002, 4003, 4004, 4005, 4006, 4007, 4008, 4009, 4010, 4011, 4012, 4013, 4014, 4015, 4016, 4018, 4019, 4020, 4021, 4022, 4023, 4024, 4025, 4027, 4028, 4029, 4030, 4031, 4032, 4033, 4034, 4035, 4036, 4041, 4042, 4043, 4044, 4045, 4046, 4047, 4052, 4053, 4054, 4055, 4075, 4081, 4120, 4121, 4122, 4123, 4124, 4125, 4126, 4127, 4128, 4129, 4130, 4131, 4132, 4133, 4134, 4135, 4136, 4137, 4138, 4139, 4140, 4141, 4142, 4143, 4144, 4145, 4146, 4147, 4148, 4149, 4150, 4151, 4152, 4153, 4154, 4155, 4156, 4157, 4158, 4159, 4160, 4161, 4162, 4163, 4164, 4165, 4166, 4167, 4168, 4169, 4170, 4171, 4172, 4173, 4174, 4175, 4176, 4178, 4179, 4180, 4181, 4182, 4183, 4184, 4185, 4188, 4189, 4190, 4191, 4192, 4193, 4194, 4195, 4196, 4197, 4198, 4199, 4200, 4201, 4202, 4203, 4204, 4205, 4206, 4207, 4208, 4209, 4210, 4211, 4212, 4213, 4214, 4215, 4216, 4218, 4219, 4220, 4221, 4222, 4223, 4224, 4225, 4226, 4227, 4228, 4229, 4230, 4231, 4232, 4233, 4234, 4235, 4236, 4237, 4238, 4239, 4240, 4241, 4242, 4243, 4244, 4245, 4246, 4247, 4248, 4249, 4250, 4251, 4252, 4253, 4254, 4255, 4256, 4257, 4258, 4259, 4260, 4261, 4262, 4263, 4264, 4265, 4266, 4267, 4268, 4269, 4270, 4271, 4272, 4273, 4274, 4275, 4276, 4277, 4278, 4279, 4280, 4281, 4282, 4283, 4284, 4285, 4286, 4287, 4288, 4289, 4291, 4292, 4293, 4294, 4295, 4296, 4297, 4298, 4299, 4300, 4301, 4302, 4303, 4304, 4306, 4307, 4308, 4309, 4310, 4311, 4312, 4313, 4314, 4315, 4316, 4317, 4318, 4319, 4322, 4324, 4326, 4463, 4470, 4475, 4483, 4490, 4555, 4558, 4600, 4601, 4602, 4603, 4604, 4605, 4606, 4607, 4608, 4609, 4610, 4611, 4612, 4613, 4614, 4615, 4616, 4617, 4618, 4619, 4620, 4621, 4622, 4623, 4624, 4625, 4626, 4627, 4628, 4629, 4630, 4631, 4632, 4633, 4634, 4635, 4636, 4637, 4638, 4639, 4640, 4641, 4642, 4643, 4644, 4645, 4646, 4657, 4658, 4659, 4660, 4661, 4662, 4663, 4664, 4665, 4666, 4667, 4668, 4669, 4670, 4671, 4672, 4673, 4674, 4675, 4676, 4677, 4678, 4679, 4680, 4681, 4682, 4683, 4684, 4685, 4686, 4687, 4688, 4689, 4690, 4691, 4692, 4693, 4694, 4695, 4696, 4697, 4698, 4699, 4700, 4701, 4702, 4703, 4704, 4705, 4706, 4707, 4708, 4709, 4710, 4711, 4712, 4713, 4714, 4715, 4716, 4717, 4718, 4719, 4720, 4721, 4722, 4723, 4724, 4725, 4726, 4727, 4728, 4729, 4730, 4731, 4732, 4733, 4734, 4735, 4736, 4737, 4738, 4739, 4740, 4741, 4742, 4743, 4744, 4745, 4746, 4747, 4748, 4749, 4750, 4751, 4752, 4753, 4754, 4755, 4756, 4757, 4758, 4759, 4760, 4761, 4762, 4763, 4764, 4765, 4801, 4802, 4803, 4804, 4805, 4806, 4807, 4808, 4809, 4810, 4811, 4813, 4814, 4815, 4816, 4817, 4818, 4819, 4820, 4821, 4823, 4824, 4901, 4902, 4903, 4904, 4979 ]
mit
takis/django
tests/managers_regress/models.py
245
3566
""" Various edge-cases for model managers. """ from django.contrib.contenttypes.fields import ( GenericForeignKey, GenericRelation, ) from django.contrib.contenttypes.models import ContentType from django.db import models from django.utils.encoding import force_text, python_2_unicode_compatible class OnlyFred(models.Manager): def get_queryset(self): return super(OnlyFred, self).get_queryset().filter(name='fred') class OnlyBarney(models.Manager): def get_queryset(self): return super(OnlyBarney, self).get_queryset().filter(name='barney') class Value42(models.Manager): def get_queryset(self): return super(Value42, self).get_queryset().filter(value=42) class AbstractBase1(models.Model): name = models.CharField(max_length=50) class Meta: abstract = True # Custom managers manager1 = OnlyFred() manager2 = OnlyBarney() objects = models.Manager() class AbstractBase2(models.Model): value = models.IntegerField() class Meta: abstract = True # Custom manager restricted = Value42() # No custom manager on this class to make sure the default case doesn't break. class AbstractBase3(models.Model): comment = models.CharField(max_length=50) class Meta: abstract = True @python_2_unicode_compatible class Parent(models.Model): name = models.CharField(max_length=50) manager = OnlyFred() def __str__(self): return self.name # Managers from base classes are inherited and, if no manager is specified # *and* the parent has a manager specified, the first one (in the MRO) will # become the default. @python_2_unicode_compatible class Child1(AbstractBase1): data = models.CharField(max_length=25) def __str__(self): return self.data @python_2_unicode_compatible class Child2(AbstractBase1, AbstractBase2): data = models.CharField(max_length=25) def __str__(self): return self.data @python_2_unicode_compatible class Child3(AbstractBase1, AbstractBase3): data = models.CharField(max_length=25) def __str__(self): return self.data @python_2_unicode_compatible class Child4(AbstractBase1): data = models.CharField(max_length=25) # Should be the default manager, although the parent managers are # inherited. default = models.Manager() def __str__(self): return self.data @python_2_unicode_compatible class Child5(AbstractBase3): name = models.CharField(max_length=25) default = OnlyFred() objects = models.Manager() def __str__(self): return self.name # Will inherit managers from AbstractBase1, but not Child4. class Child6(Child4): value = models.IntegerField() # Will not inherit default manager from parent. class Child7(Parent): pass # RelatedManagers @python_2_unicode_compatible class RelatedModel(models.Model): test_gfk = GenericRelation('RelationModel', content_type_field='gfk_ctype', object_id_field='gfk_id') exact = models.NullBooleanField() def __str__(self): return force_text(self.pk) @python_2_unicode_compatible class RelationModel(models.Model): fk = models.ForeignKey(RelatedModel, models.CASCADE, related_name='test_fk') m2m = models.ManyToManyField(RelatedModel, related_name='test_m2m') gfk_ctype = models.ForeignKey(ContentType, models.SET_NULL, null=True) gfk_id = models.IntegerField(null=True) gfk = GenericForeignKey(ct_field='gfk_ctype', fk_field='gfk_id') def __str__(self): return force_text(self.pk)
bsd-3-clause
eepgwde/pyeg0
gmus/GMus0.py
1
1699
## @file GMus0.py # @brief Application support class for the Unofficial Google Music API. # @author weaves # # @details # This class uses @c gmusicapi. # # @note # An application support class is one that uses a set of driver classes # to provide a set of higher-level application specific methods. # # @see # https://github.com/simon-weber/Unofficial-Google-Music-API # http://unofficial-google-music-api.readthedocs.org/en/latest/ from __future__ import print_function from GMus00 import GMus00 import logging import ConfigParser, os, logging import pandas as pd import json from gmusicapi import Mobileclient ## Set of file paths for the configuration file. paths = ['site.cfg', os.path.expanduser('~/share/site/.safe/gmusic.cfg')] ## Google Music API login, search and result cache. # # The class needs to a configuration file with these contents. (The # values of the keys must be a valid Google Play account.) # # <pre> # [credentials] # username=username\@gmail.com # password=SomePassword9 # </pre> class GMus0(GMus00): ## Ad-hoc method to find the indices of duplicated entries. def duplicated(self): # self._df = self._df.sort(['album', 'title', 'creationTimestamp'], # ascending=[1, 1, 0]) df = self.df[list(['title', 'album', 'creationTimestamp'])] df['n0'] = df['title'] + '|' + df['album'] df = df.sort(['n0','creationTimestamp'], ascending=[1, 0]) # Only rely on counts of 2. s0 = pd.Series(df.n0) s1 = s0.value_counts() s2 = set( (s1[s1.values >= 2]).index ) df1 = df[df.n0.isin(s2)] df1['d'] = df1.duplicated('n0') s3 = list(df1[df1.d].index) return s3
gpl-3.0
nkhuyu/bazel
third_party/py/mock/setup.py
91
2134
#! /usr/bin/env python # Copyright (C) 2007-2012 Michael Foord & the mock team # E-mail: fuzzyman AT voidspace DOT org DOT uk # http://www.voidspace.org.uk/python/mock/ from mock import __version__ import os NAME = 'mock' MODULES = ['mock'] DESCRIPTION = 'A Python Mocking and Patching Library for Testing' URL = "http://www.voidspace.org.uk/python/mock/" readme = os.path.join(os.path.dirname(__file__), 'README.txt') LONG_DESCRIPTION = open(readme).read() CLASSIFIERS = [ 'Development Status :: 5 - Production/Stable', 'Environment :: Console', 'Intended Audience :: Developers', 'License :: OSI Approved :: BSD License', 'Programming Language :: Python', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 2.5', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3.1', 'Programming Language :: Python :: 3.2', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: Implementation :: CPython', 'Programming Language :: Python :: Implementation :: PyPy', 'Programming Language :: Python :: Implementation :: Jython', 'Operating System :: OS Independent', 'Topic :: Software Development :: Libraries', 'Topic :: Software Development :: Libraries :: Python Modules', 'Topic :: Software Development :: Testing', ] AUTHOR = 'Michael Foord' AUTHOR_EMAIL = '[email protected]' KEYWORDS = ("testing test mock mocking unittest patching " "stubs fakes doubles").split(' ') params = dict( name=NAME, version=__version__, py_modules=MODULES, # metadata for upload to PyPI author=AUTHOR, author_email=AUTHOR_EMAIL, description=DESCRIPTION, long_description=LONG_DESCRIPTION, keywords=KEYWORDS, url=URL, classifiers=CLASSIFIERS, ) try: from setuptools import setup except ImportError: from distutils.core import setup else: params['tests_require'] = ['unittest2'] params['test_suite'] = 'unittest2.collector' setup(**params)
apache-2.0
samfpetersen/gnuradio
gr-zeromq/examples/python/server.py
27
4700
# # Copyright 2013 Free Software Foundation, Inc. # # This file is part of GNU Radio. # # This is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3, or (at your option) # any later version. # # This software is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this software; see the file COPYING. If not, write to # the Free Software Foundation, Inc., 51 Franklin Street, # Boston, MA 02110-1301, USA. # ############################################################################### # Imports ############################################################################### from gnuradio import zeromq from gnuradio import gr from gnuradio import blocks from gnuradio import analog from gnuradio import eng_notation from gnuradio.eng_option import eng_option from optparse import OptionParser import numpy import sys from threading import Thread import time ############################################################################### # GNU Radio top_block ############################################################################### class top_block(gr.top_block): def __init__(self, options): gr.top_block.__init__(self) self.options = options # socket addresses rpc_adr = "tcp://*:6666" probe_adr = "tcp://*:5556" sink_adr = "tcp://*:5555" # the strange sampling rate gives a nice movement in the plot :P self.samp_rate = samp_rate = 48200 # blocks self.gr_sig_source = analog.sig_source_f(samp_rate, analog.GR_SIN_WAVE , 1000, 1, 0) self.throttle = blocks.throttle(gr.sizeof_float, samp_rate) self.mult = blocks.multiply_const_ff(1) #self.zmq_sink = zeromq.rep_sink(gr.sizeof_float, 1, sink_adr) self.zmq_sink = zeromq.pub_sink(gr.sizeof_float, 1, sink_adr) #self.zmq_sink = zeromq.push_sink(gr.sizeof_float, 1, sink_adr) #self.zmq_probe = zeromq.push_sink(gr.sizeof_float, 1, probe_adr) self.zmq_probe = zeromq.pub_sink(gr.sizeof_float, 1, probe_adr) #self.null_sink = blocks.null_sink(gr.sizeof_float) # connects self.connect(self.gr_sig_source, self.mult, self.throttle, self.zmq_sink) self.connect(self.throttle, self.zmq_probe) # ZeroMQ self.rpc_manager = zeromq.rpc_manager() self.rpc_manager.set_reply_socket(rpc_adr) self.rpc_manager.add_interface("start_fg",self.start_fg) self.rpc_manager.add_interface("stop_fg",self.stop_fg) self.rpc_manager.add_interface("set_waveform",self.set_waveform) self.rpc_manager.add_interface("set_k",self.mult.set_k) self.rpc_manager.add_interface("get_sample_rate",self.throttle.sample_rate) self.rpc_manager.start_watcher() def start_fg(self): print "Start Flowgraph" try: self.start() except RuntimeError: print "Can't start, flowgraph already running!" def stop_fg(self): print "Stop Flowgraph" self.stop() self.wait() def set_waveform(self, waveform_str): waveform = {'Constant' : analog.GR_CONST_WAVE, 'Sine' : analog.GR_SIN_WAVE, 'Cosine' : analog.GR_COS_WAVE, 'Square' : analog.GR_SQR_WAVE, 'Triangle' : analog.GR_TRI_WAVE, 'Saw Tooth' : analog.GR_SAW_WAVE}[waveform_str] self.gr_sig_source.set_waveform(waveform) ############################################################################### # Options Parser ############################################################################### def parse_options(): """ Options parser. """ parser = OptionParser(option_class=eng_option, usage="%prog: [options]") (options, args) = parser.parse_args() return options ############################################################################### # Main ############################################################################### if __name__ == "__main__": options = parse_options() tb = top_block(options) try: # keep the program running when flowgraph is stopped while True: time.sleep(1) except KeyboardInterrupt: pass print "Shutting down flowgraph." tb.rpc_manager.stop_watcher() tb.stop() tb.wait() tb = None
gpl-3.0
Asana/mypipe
avro/lang/c/jansson/doc/conf.py
58
7023
# -*- coding: utf-8 -*- # # Jansson documentation build configuration file, created by # sphinx-quickstart on Sun Sep 5 21:47:20 2010. # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys, os # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.insert(0, os.path.abspath('ext')) # -- General configuration ----------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['refcounting'] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'Jansson' copyright = u'2009-2011, Petri Lehtinen' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '2.1' # The full version, including alpha/beta/rc tags. release = '2.1' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build'] # The reST default role (used for this markup: `text`) to use for all documents. default_role = 'c:func' primary_domain = 'c' # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. #html_theme = 'default' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". #html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'Janssondoc' # -- Options for LaTeX output -------------------------------------------------- # The paper size ('letter' or 'a4'). #latex_paper_size = 'letter' # The font size ('10pt', '11pt' or '12pt'). #latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ('index', 'Jansson.tex', u'Jansson Documentation', u'Petri Lehtinen', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Additional stuff for the LaTeX preamble. #latex_preamble = '' # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output -------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'jansson', u'Jansson Documentation', [u'Petri Lehtinen'], 1) ]
apache-2.0
marcosmodesto/django-testapp
django/django/contrib/gis/geos/prototypes/geom.py
102
4430
from ctypes import c_char_p, c_int, c_size_t, c_ubyte, POINTER from django.contrib.gis.geos.libgeos import CS_PTR, GEOM_PTR from django.contrib.gis.geos.prototypes.errcheck import ( check_geom, check_minus_one, check_sized_string, check_string, check_zero) from django.contrib.gis.geos.prototypes.threadsafe import GEOSFunc # This is the return type used by binary output (WKB, HEX) routines. c_uchar_p = POINTER(c_ubyte) # We create a simple subclass of c_char_p here because when the response # type is set to c_char_p, you get a _Python_ string and there's no way # to access the string's address inside the error checking function. # In other words, you can't free the memory allocated inside GEOS. Previously, # the return type would just be omitted and the integer address would be # used -- but this allows us to be specific in the function definition and # keeps the reference so it may be free'd. class geos_char_p(c_char_p): pass ### ctypes generation functions ### def bin_constructor(func): "Generates a prototype for binary construction (HEX, WKB) GEOS routines." func.argtypes = [c_char_p, c_size_t] func.restype = GEOM_PTR func.errcheck = check_geom return func # HEX & WKB output def bin_output(func): "Generates a prototype for the routines that return a a sized string." func.argtypes = [GEOM_PTR, POINTER(c_size_t)] func.errcheck = check_sized_string func.restype = c_uchar_p return func def geom_output(func, argtypes): "For GEOS routines that return a geometry." if argtypes: func.argtypes = argtypes func.restype = GEOM_PTR func.errcheck = check_geom return func def geom_index(func): "For GEOS routines that return geometries from an index." return geom_output(func, [GEOM_PTR, c_int]) def int_from_geom(func, zero=False): "Argument is a geometry, return type is an integer." func.argtypes = [GEOM_PTR] func.restype = c_int if zero: func.errcheck = check_zero else: func.errcheck = check_minus_one return func def string_from_geom(func): "Argument is a Geometry, return type is a string." func.argtypes = [GEOM_PTR] func.restype = geos_char_p func.errcheck = check_string return func ### ctypes prototypes ### # Deprecated creation routines from WKB, HEX, WKT from_hex = bin_constructor(GEOSFunc('GEOSGeomFromHEX_buf')) from_wkb = bin_constructor(GEOSFunc('GEOSGeomFromWKB_buf')) from_wkt = geom_output(GEOSFunc('GEOSGeomFromWKT'), [c_char_p]) # Deprecated output routines to_hex = bin_output(GEOSFunc('GEOSGeomToHEX_buf')) to_wkb = bin_output(GEOSFunc('GEOSGeomToWKB_buf')) to_wkt = string_from_geom(GEOSFunc('GEOSGeomToWKT')) # The GEOS geometry type, typeid, num_coordites and number of geometries geos_normalize = int_from_geom(GEOSFunc('GEOSNormalize')) geos_type = string_from_geom(GEOSFunc('GEOSGeomType')) geos_typeid = int_from_geom(GEOSFunc('GEOSGeomTypeId')) get_dims = int_from_geom(GEOSFunc('GEOSGeom_getDimensions'), zero=True) get_num_coords = int_from_geom(GEOSFunc('GEOSGetNumCoordinates')) get_num_geoms = int_from_geom(GEOSFunc('GEOSGetNumGeometries')) # Geometry creation factories create_point = geom_output(GEOSFunc('GEOSGeom_createPoint'), [CS_PTR]) create_linestring = geom_output(GEOSFunc('GEOSGeom_createLineString'), [CS_PTR]) create_linearring = geom_output(GEOSFunc('GEOSGeom_createLinearRing'), [CS_PTR]) # Polygon and collection creation routines are special and will not # have their argument types defined. create_polygon = geom_output(GEOSFunc('GEOSGeom_createPolygon'), None) create_collection = geom_output(GEOSFunc('GEOSGeom_createCollection'), None) # Ring routines get_extring = geom_output(GEOSFunc('GEOSGetExteriorRing'), [GEOM_PTR]) get_intring = geom_index(GEOSFunc('GEOSGetInteriorRingN')) get_nrings = int_from_geom(GEOSFunc('GEOSGetNumInteriorRings')) # Collection Routines get_geomn = geom_index(GEOSFunc('GEOSGetGeometryN')) # Cloning geom_clone = GEOSFunc('GEOSGeom_clone') geom_clone.argtypes = [GEOM_PTR] geom_clone.restype = GEOM_PTR # Destruction routine. destroy_geom = GEOSFunc('GEOSGeom_destroy') destroy_geom.argtypes = [GEOM_PTR] destroy_geom.restype = None # SRID routines geos_get_srid = GEOSFunc('GEOSGetSRID') geos_get_srid.argtypes = [GEOM_PTR] geos_get_srid.restype = c_int geos_set_srid = GEOSFunc('GEOSSetSRID') geos_set_srid.argtypes = [GEOM_PTR, c_int] geos_set_srid.restype = None
bsd-3-clause
bobobox/ansible
lib/ansible/modules/files/replace.py
11
6445
#!/usr/bin/python # -*- coding: utf-8 -*- # (c) 2013, Evan Kaufman <[email protected] # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. ANSIBLE_METADATA = {'status': ['stableinterface'], 'supported_by': 'community', 'version': '1.0'} DOCUMENTATION = """ --- module: replace author: "Evan Kaufman (@EvanK)" extends_documentation_fragment: - files - validate short_description: Replace all instances of a particular string in a file using a back-referenced regular expression. description: - This module will replace all instances of a pattern within a file. - It is up to the user to maintain idempotence by ensuring that the same pattern would never match any replacements made. version_added: "1.6" options: path: required: true aliases: [ dest, destfile, name ] description: - The file to modify. - Before 2.3 this option was only usable as I(dest), I(destfile) and I(name). regexp: required: true description: - The regular expression to look for in the contents of the file. Uses Python regular expressions; see U(http://docs.python.org/2/library/re.html). Uses multiline mode, which means C(^) and C($) match the beginning and end respectively of I(each line) of the file. replace: required: false description: - The string to replace regexp matches. May contain backreferences that will get expanded with the regexp capture groups if the regexp matches. If not set, matches are removed entirely. backup: required: false default: "no" choices: [ "yes", "no" ] description: - Create a backup file including the timestamp information so you can get the original file back if you somehow clobbered it incorrectly. others: description: - All arguments accepted by the M(file) module also work here. required: false follow: required: false default: "no" choices: [ "yes", "no" ] version_added: "1.9" description: - 'This flag indicates that filesystem links, if they exist, should be followed.' notes: - As of Ansible 2.3, the I(dest) option has been changed to I(path) as default, but I(dest) still works as well. """ EXAMPLES = r""" # Before 2.3, option 'dest', 'destfile' or 'name' was used instead of 'path' - replace: path: /etc/hosts regexp: '(\s+)old\.host\.name(\s+.*)?$' replace: '\1new.host.name\2' backup: yes - replace: path: /home/jdoe/.ssh/known_hosts regexp: '^old\.host\.name[^\n]*\n' owner: jdoe group: jdoe mode: 0644 - replace: path: /etc/apache/ports regexp: '^(NameVirtualHost|Listen)\s+80\s*$' replace: '\1 127.0.0.1:8080' validate: '/usr/sbin/apache2ctl -f %s -t' """ import os import re import tempfile from ansible.module_utils._text import to_text, to_bytes from ansible.module_utils.basic import AnsibleModule def write_changes(module, contents, path): tmpfd, tmpfile = tempfile.mkstemp() f = os.fdopen(tmpfd,'wb') f.write(to_bytes(contents)) f.close() validate = module.params.get('validate', None) valid = not validate if validate: if "%s" not in validate: module.fail_json(msg="validate must contain %%s: %s" % (validate)) (rc, out, err) = module.run_command(validate % tmpfile) valid = rc == 0 if rc != 0: module.fail_json(msg='failed to validate: ' 'rc:%s error:%s' % (rc,err)) if valid: module.atomic_move(tmpfile, path, unsafe_writes=module.params['unsafe_writes']) def check_file_attrs(module, changed, message): file_args = module.load_file_common_arguments(module.params) if module.set_file_attributes_if_different(file_args, False): if changed: message += " and " changed = True message += "ownership, perms or SE linux context changed" return message, changed def main(): module = AnsibleModule( argument_spec=dict( path=dict(required=True, aliases=['dest', 'destfile', 'name'], type='path'), regexp=dict(required=True), replace=dict(default='', type='str'), backup=dict(default=False, type='bool'), validate=dict(default=None, type='str'), ), add_file_common_args=True, supports_check_mode=True ) params = module.params path = os.path.expanduser(params['path']) res_args = dict() if os.path.isdir(path): module.fail_json(rc=256, msg='Path %s is a directory !' % path) if not os.path.exists(path): module.fail_json(rc=257, msg='Path %s does not exist !' % path) else: f = open(path, 'rb') contents = to_text(f.read(), errors='surrogate_or_strict') f.close() mre = re.compile(params['regexp'], re.MULTILINE) result = re.subn(mre, params['replace'], contents, 0) if result[1] > 0 and contents != result[0]: msg = '%s replacements made' % result[1] changed = True if module._diff: res_args['diff'] = { 'before_header': path, 'before': contents, 'after_header': path, 'after': result[0], } else: msg = '' changed = False if changed and not module.check_mode: if params['backup'] and os.path.exists(path): res_args['backup_file'] = module.backup_local(path) if params['follow'] and os.path.islink(path): path = os.path.realpath(path) write_changes(module, result[0], path) res_args['msg'], res_args['changed'] = check_file_attrs(module, changed, msg) module.exit_json(**res_args) if __name__ == '__main__': main()
gpl-3.0
captainpete/rethinkdb
test/memcached_workloads/multi_serial_mix.py
29
3527
#!/usr/bin/env python # Copyright 2010-2012 RethinkDB, all rights reserved. from __future__ import print_function import sys, os sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir, 'common'))) import multiprocessing, time, pickle import memcached_workload_common, serial_mix from vcoptparse import * def child(opts, log_path, load, save): # This is run in a separate process import sys # TODO: this overwrites existing log files sys.stdout = sys.stderr = file(log_path, "w") if load is None: clone, deleted = {}, set() else: print("Loading from %r..." % load) with open(load) as f: clone, deleted = pickle.load(f) print("Starting test against server at %s..." % opts["address"]) with memcached_workload_common.make_memcache_connection(opts) as mc: serial_mix.test(opts, mc, clone, deleted) if save is not None: print("Saving to %r..." % save) with open(save, "w") as f: pickle.dump((clone, deleted), f) print("Done with test.") op = serial_mix.option_parser_for_serial_mix() op["num_testers"] = IntFlag("--num-testers", 16) op["load"] = StringFlag("--load", None) op["save"] = StringFlag("--save", None) opts = op.parse(sys.argv) shutdown_grace_period = 15 tester_log_dir = "multi_serial_mix_out" if not os.path.isdir(tester_log_dir): os.mkdir(tester_log_dir) processes = [] try: print("Starting %d child processes..." % opts["num_testers"]) print("Writing output from child processes to %r" % tester_log_dir) for id in xrange(opts["num_testers"]): log_path = os.path.join(tester_log_dir, "%d.txt" % id) load_path = opts["load"] + "_%d" % id if opts["load"] is not None else None save_path = opts["save"] + "_%d" % id if opts["save"] is not None else None opts2 = dict(opts) opts2["keysuffix"] = "_%d" % id # Prevent collisions between tests process = multiprocessing.Process(target=child, args=(opts2, log_path, load_path, save_path)) process.start() processes.append((process, id)) print("Waiting for child processes...") start_time = time.time() def time_remaining(): time_elapsed = time.time() - start_time # Give subprocesses lots of extra time return opts["duration"] * 2 - time_elapsed + 1 for process, id in processes: tr = time_remaining() if tr <= 0: tr = shutdown_grace_period process.join(tr) stuck = sorted(id for (process, id) in processes if process.is_alive()) failed = sorted(id for (process, id) in processes if not process.is_alive() and process.exitcode != 0) if stuck or failed: for id in stuck + failed: with file(os.path.join(tester_log_dir, str(id) + ".txt")) as f: for line in f: sys.stdout.write(line) if len(stuck) == opts["num_testers"]: raise ValueError("All %d processes did not finish in time." % opts["num_testers"]) elif len(failed) == opts["num_testers"]: raise ValueError("All %d processes failed." % opts["num_testers"]) else: raise ValueError( "Of processes [1 ... %d], the following did not finish in time: " "%s and the following failed: %s" % (opts["num_testers"], stuck, failed) ) finally: for (process, id) in processes: if process.is_alive(): process.terminate() print("Done.")
agpl-3.0
mdmueller/ascii-profiling
parallel.py
1
4245
import timeit import time from astropy.io import ascii import pandas import numpy as np from astropy.table import Table, Column from tempfile import NamedTemporaryFile import random import string import matplotlib.pyplot as plt import webbrowser def make_table(table, size=10000, n_floats=10, n_ints=0, n_strs=0, float_format=None, str_val=None): if str_val is None: str_val = "abcde12345" cols = [] for i in xrange(n_floats): dat = np.random.uniform(low=1, high=10, size=size) cols.append(Column(dat, name='f{}'.format(i))) for i in xrange(n_ints): dat = np.random.randint(low=-9999999, high=9999999, size=size) cols.append(Column(dat, name='i{}'.format(i))) for i in xrange(n_strs): if str_val == 'random': dat = np.array([''.join([random.choice(string.letters) for j in range(10)]) for k in range(size)]) else: dat = np.repeat(str_val, size) cols.append(Column(dat, name='s{}'.format(i))) t = Table(cols) if float_format is not None: for col in t.columns.values(): if col.name.startswith('f'): col.format = float_format t.write(table.name, format='ascii') output_text = [] def plot_case(n_floats=10, n_ints=0, n_strs=0, float_format=None, str_val=None): global table1, output_text n_rows = (10000, 20000, 50000, 100000, 200000) # include 200000 for publish run numbers = (1, 1, 1, 1, 1) repeats = (3, 2, 1, 1, 1) times_fast = [] times_fast_parallel = [] times_pandas = [] for n_row, number, repeat in zip(n_rows, numbers, repeats): table1 = NamedTemporaryFile() make_table(table1, n_row, n_floats, n_ints, n_strs, float_format, str_val) t = timeit.repeat("ascii.read(table1.name, format='basic', guess=False, use_fast_converter=True)", setup='from __main__ import ascii, table1', number=number, repeat=repeat) times_fast.append(min(t) / number) t = timeit.repeat("ascii.read(table1.name, format='basic', guess=False, parallel=True, use_fast_converter=True)", setup='from __main__ import ascii, table1', number=number, repeat=repeat) times_fast_parallel.append(min(t) / number) t = timeit.repeat("pandas.read_csv(table1.name, sep=' ', header=0)", setup='from __main__ import table1, pandas', number=number, repeat=repeat) times_pandas.append(min(t) / number) plt.loglog(n_rows, times_fast, '-or', label='io.ascii Fast-c') plt.loglog(n_rows, times_fast_parallel, '-og', label='io.ascii Parallel Fast-c') plt.loglog(n_rows, times_pandas, '-oc', label='Pandas') plt.grid() plt.legend(loc='best') plt.title('n_floats={} n_ints={} n_strs={} float_format={} str_val={}'.format( n_floats, n_ints, n_strs, float_format, str_val)) plt.xlabel('Number of rows') plt.ylabel('Time (sec)') img_file = 'graph{}.png'.format(len(output_text) + 1) plt.savefig(img_file) plt.clf() text = 'Pandas to io.ascii Fast-C speed ratio: {:.2f} : 1<br/>'.format(times_fast[-1] / times_pandas[-1]) text += 'io.ascii parallel to Pandas speed ratio: {:.2f} : 1'.format(times_pandas[-1] / times_fast_parallel[-1]) output_text.append((img_file, text)) plot_case(n_floats=10, n_ints=0, n_strs=0) plot_case(n_floats=10, n_ints=10, n_strs=10) plot_case(n_floats=10, n_ints=10, n_strs=10, float_format='%.4f') plot_case(n_floats=10, n_ints=0, n_strs=0, float_format='%.4f') plot_case(n_floats=0, n_ints=0, n_strs=10) plot_case(n_floats=0, n_ints=0, n_strs=10, str_val="'asdf asdfa'") plot_case(n_floats=0, n_ints=0, n_strs=10, str_val="random") plot_case(n_floats=0, n_ints=10, n_strs=0) html_file = open('out.html', 'w') html_file.write('<html><head><meta charset="utf-8"/><meta content="text/html;charset=UTF-8" http-equiv="Content-type"/>') html_file.write('</html><body><h1 style="text-align:center;">Profile of io.ascii</h1>') for img, descr in output_text: html_file.write('<img src="{}"><p style="font-weight:bold;">{}</p><hr>'.format(img, descr)) html_file.write('</body></html>') html_file.close() webbrowser.open('out.html')
mit
patriciohc/carga-de-xls-a-MySQL
Choose_file.py
1
3639
# -*- coding: utf-8 -*- # Form implementation generated from reading ui file 'Choose_file.ui' # # Created: Sat Oct 17 15:55:19 2015 # by: PyQt4 UI code generator 4.10.4 # # WARNING! All changes made in this file will be lost! from PyQt4 import QtCore, QtGui try: _fromUtf8 = QtCore.QString.fromUtf8 except AttributeError: def _fromUtf8(s): return s try: _encoding = QtGui.QApplication.UnicodeUTF8 def _translate(context, text, disambig): return QtGui.QApplication.translate(context, text, disambig, _encoding) except AttributeError: def _translate(context, text, disambig): return QtGui.QApplication.translate(context, text, disambig) class Ui_MainWindow(object): def setupUi(self, MainWindow): MainWindow.setObjectName(_fromUtf8("MainWindow")) MainWindow.resize(524, 146) self.centralwidget = QtGui.QWidget(MainWindow) self.centralwidget.setObjectName(_fromUtf8("centralwidget")) self.verticalLayoutWidget = QtGui.QWidget(self.centralwidget) self.verticalLayoutWidget.setGeometry(QtCore.QRect(10, 10, 501, 81)) self.verticalLayoutWidget.setObjectName(_fromUtf8("verticalLayoutWidget")) self.verticalLayout = QtGui.QVBoxLayout(self.verticalLayoutWidget) self.verticalLayout.setMargin(0) self.verticalLayout.setObjectName(_fromUtf8("verticalLayout")) self.horizontalLayout_2 = QtGui.QHBoxLayout() self.horizontalLayout_2.setObjectName(_fromUtf8("horizontalLayout_2")) self.label = QtGui.QLabel(self.verticalLayoutWidget) self.label.setObjectName(_fromUtf8("label")) self.horizontalLayout_2.addWidget(self.label) self.txtFile = QtGui.QLineEdit(self.verticalLayoutWidget) self.txtFile.setObjectName(_fromUtf8("txtFile")) self.horizontalLayout_2.addWidget(self.txtFile) self.btChooseFile = QtGui.QPushButton(self.verticalLayoutWidget) self.btChooseFile.setObjectName(_fromUtf8("btChooseFile")) self.horizontalLayout_2.addWidget(self.btChooseFile) self.verticalLayout.addLayout(self.horizontalLayout_2) self.horizontalLayout = QtGui.QHBoxLayout() self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout")) self.btClose = QtGui.QPushButton(self.verticalLayoutWidget) self.btClose.setObjectName(_fromUtf8("btClose")) self.horizontalLayout.addWidget(self.btClose) self.btLoadFile = QtGui.QPushButton(self.verticalLayoutWidget) self.btLoadFile.setObjectName(_fromUtf8("btLoadFile")) self.horizontalLayout.addWidget(self.btLoadFile) self.verticalLayout.addLayout(self.horizontalLayout) MainWindow.setCentralWidget(self.centralwidget) self.menubar = QtGui.QMenuBar(MainWindow) self.menubar.setGeometry(QtCore.QRect(0, 0, 524, 25)) self.menubar.setObjectName(_fromUtf8("menubar")) MainWindow.setMenuBar(self.menubar) self.statusbar = QtGui.QStatusBar(MainWindow) self.statusbar.setObjectName(_fromUtf8("statusbar")) MainWindow.setStatusBar(self.statusbar) self.retranslateUi(MainWindow) QtCore.QMetaObject.connectSlotsByName(MainWindow) def retranslateUi(self, MainWindow): MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow", None)) self.label.setText(_translate("MainWindow", "File", None)) self.btChooseFile.setText(_translate("MainWindow", "Choose", None)) self.btClose.setText(_translate("MainWindow", "Cerrar", None)) self.btLoadFile.setText(_translate("MainWindow", "Cargar Archivo", None))
apache-2.0
Endika/partner-contact
partner_firstname/tests/test_name.py
25
3241
# -*- coding: utf-8 -*- # Authors: Nemry Jonathan # Copyright (c) 2014 Acsone SA/NV (http://www.acsone.eu) # All Rights Reserved # # WARNING: This program as such is intended to be used by professional # programmers who take the whole responsibility of assessing all potential # consequences resulting from its eventual inadequacies and bugs. # End users who are looking for a ready-to-use solution with commercial # guarantees and support are strongly advised to contact a Free Software # Service Company. # # This program is Free Software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. """Test naming logic. To have more accurate results, remove the ``mail`` module before testing. """ from .base import BaseCase class PartnerContactCase(BaseCase): def test_update_lastname(self): """Change lastname.""" self.expect(u"newlästname", self.firstname) self.original.name = self.name def test_update_firstname(self): """Change firstname.""" self.expect(self.lastname, u"newfïrstname") self.original.name = self.name def test_whitespace_cleanup(self): """Check that whitespace in name gets cleared.""" self.expect(u"newlästname", u"newfïrstname") self.original.name = " newlästname newfïrstname " # Need this to refresh the ``name`` field self.original.invalidate_cache() class PartnerCompanyCase(BaseCase): def create_original(self): super(PartnerCompanyCase, self).create_original() self.original.is_company = True def test_copy(self): """Copy the partner and compare the result.""" super(PartnerCompanyCase, self).test_copy() self.expect(self.name, False, self.name) def test_company_inverse(self): """Test the inverse method in a company record.""" name = u"Thïs is a Companŷ" self.expect(name, False, name) self.original.name = name class UserCase(PartnerContactCase): def create_original(self): name = u"%s %s" % (self.lastname, self.firstname) # Cannot create users if ``mail`` is installed if self.mail_installed(): self.original = self.env.ref("base.user_demo") self.original.name = name else: self.original = self.env["res.users"].create({ "name": name, "login": "[email protected]"}) def test_copy(self): """Copy the partner and compare the result.""" # Skip if ``mail`` is installed if not self.mail_installed(): super(UserCase, self).test_copy()
agpl-3.0
evancich/apm_motor
mk/PX4/Tools/genmsg/test/test_genmsg_srvs.py
216
3233
# Software License Agreement (BSD License) # # Copyright (c) 2011, Willow Garage, Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided # with the distribution. # * Neither the name of Willow Garage, Inc. nor the names of its # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE # COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. import os import sys def get_test_dir(): return os.path.abspath(os.path.join(os.path.dirname(__file__), 'files')) def test_SrvSpec(): from genmsg import MsgSpec, SrvSpec types = ['int32'] names = ['a'] constants = [] text = 'int32 a' msg_a = MsgSpec(types, names, constants, text, 'a/Int') assert msg_a.full_name == 'a/Int' assert msg_a.package == 'a' assert msg_a.short_name == 'Int' types = ['int64'] names = ['b'] constants = [] text = 'int64 b' msg_b = MsgSpec(types, names, constants, text, 'b/Int') assert msg_b.full_name == 'b/Int' assert msg_b.package == 'b' assert msg_b.short_name == 'Int' text = msg_a.text + '\n---\n' + msg_b.text spec = SrvSpec(msg_a, msg_b, text) assert msg_a == spec.request assert msg_b == spec.response assert text == spec.text assert '' == spec.full_name assert '' == spec.short_name assert '' == spec.package # tripwire assert repr(spec) assert str(spec) # exercise eq assert spec != 'spec' assert not spec == 'spec' spec2 = SrvSpec(msg_a, msg_b, text) assert spec == spec2 assert not spec != spec2 # - full_name spec2.full_name = 'something' assert spec != spec2 spec2.full_name = '' assert spec == spec2 # - short_name spec2.short_name = 'something' assert spec != spec2 spec2.short_name = '' assert spec == spec2 # - package spec2.package = 'something' assert spec != spec2 spec2.package = '' assert spec == spec2
gpl-3.0
benssson/flatbuffers
python/flatbuffers/compat.py
19
1465
# Copyright 2016 Google Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ A tiny version of `six` to help with backwards compability. """ import sys PY2 = sys.version_info[0] == 2 PY26 = sys.version_info[0:2] == (2, 6) PY27 = sys.version_info[0:2] == (2, 7) PY275 = sys.version_info[0:3] >= (2, 7, 5) PY3 = sys.version_info[0] == 3 PY34 = sys.version_info[0:2] >= (3, 4) if PY3: string_types = (str,) binary_types = (bytes,bytearray) range_func = range memoryview_type = memoryview struct_bool_decl = "?" else: string_types = (unicode,) if PY26 or PY27: binary_types = (str,bytearray) else: binary_types = (str,) range_func = xrange if PY26 or (PY27 and not PY275): memoryview_type = buffer struct_bool_decl = "<b" else: memoryview_type = memoryview struct_bool_decl = "?" # NOTE: Future Jython support may require code here (look at `six`).
apache-2.0
ppanczyk/ansible
lib/ansible/modules/network/f5/bigip_user.py
10
18341
#!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright (c) 2017 F5 Networks Inc. # GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: bigip_user short_description: Manage user accounts and user attributes on a BIG-IP. description: - Manage user accounts and user attributes on a BIG-IP. version_added: "2.4" options: full_name: description: - Full name of the user. username_credential: description: - Name of the user to create, remove or modify. required: True aliases: - name password_credential: description: - Set the users password to this unencrypted value. C(password_credential) is required when creating a new account. shell: description: - Optionally set the users shell. choices: - bash - none - tmsh partition_access: description: - Specifies the administrative partition to which the user has access. C(partition_access) is required when creating a new account. Should be in the form "partition:role". Valid roles include C(acceleration-policy-editor), C(admin), C(application-editor), C(auditor) C(certificate-manager), C(guest), C(irule-manager), C(manager), C(no-access) C(operator), C(resource-admin), C(user-manager), C(web-application-security-administrator), and C(web-application-security-editor). Partition portion of tuple should be an existing partition or the value 'all'. state: description: - Whether the account should exist or not, taking action if the state is different from what is stated. default: present choices: - present - absent update_password: description: - C(always) will allow to update passwords if the user chooses to do so. C(on_create) will only set the password for newly created users. default: on_create choices: - always - on_create notes: - Requires the f5-sdk Python package on the host. This is as easy as pip install f5-sdk. - Requires BIG-IP versions >= 12.0.0 extends_documentation_fragment: f5 requirements: - f5-sdk author: - Tim Rupp (@caphrim007) - Wojciech Wypior (@wojtek0806) ''' EXAMPLES = ''' - name: Add the user 'johnd' as an admin bigip_user: server: "lb.mydomain.com" user: "admin" password: "secret" username_credential: "johnd" password_credential: "password" full_name: "John Doe" partition_access: "all:admin" update_password: "on_create" state: "present" delegate_to: localhost - name: Change the user "johnd's" role and shell bigip_user: server: "lb.mydomain.com" user: "admin" password: "secret" username_credential: "johnd" partition_access: "NewPartition:manager" shell: "tmsh" state: "present" delegate_to: localhost - name: Make the user 'johnd' an admin and set to advanced shell bigip_user: server: "lb.mydomain.com" user: "admin" password: "secret" name: "johnd" partition_access: "all:admin" shell: "bash" state: "present" delegate_to: localhost - name: Remove the user 'johnd' bigip_user: server: "lb.mydomain.com" user: "admin" password: "secret" name: "johnd" state: "absent" delegate_to: localhost - name: Update password bigip_user: server: "lb.mydomain.com" user: "admin" password: "secret" state: "present" username_credential: "johnd" password_credential: "newsupersecretpassword" delegate_to: localhost # Note that the second time this task runs, it would fail because # The password has been changed. Therefore, it is recommended that # you either, # # * Put this in its own playbook that you run when you need to # * Put this task in a `block` # * Include `ignore_errors` on this task - name: Change the Admin password bigip_user: server: "lb.mydomain.com" user: "admin" password: "secret" state: "present" username_credential: "admin" password_credential: "NewSecretPassword" delegate_to: localhost ''' RETURN = ''' full_name: description: Full name of the user returned: changed and success type: string sample: "John Doe" partition_access: description: - List of strings containing the user's roles and which partitions they are applied to. They are specified in the form "partition:role". returned: changed and success type: list sample: "['all:admin']" shell: description: The shell assigned to the user account returned: changed and success type: string sample: "tmsh" ''' from distutils.version import LooseVersion from ansible.module_utils.f5_utils import ( AnsibleF5Client, AnsibleF5Parameters, HAS_F5SDK, F5ModuleError, iControlUnexpectedHTTPError ) class Parameters(AnsibleF5Parameters): api_map = { 'partitionAccess': 'partition_access', 'description': 'full_name', } updatables = [ 'partition_access', 'full_name', 'shell', 'password_credential' ] returnables = [ 'shell', 'partition_access', 'full_name', 'username_credential' ] api_attributes = [ 'shell', 'partitionAccess', 'description', 'name', 'password' ] @property def partition_access(self): """Partition access values will require some transformation. This operates on both user and device returned values. Check if the element is a string from user input in the format of name:role, if it is split it and create dictionary out of it. If the access value is a dictionary (returned from device, or already processed) and contains nameReference key, delete it and append the remaining dictionary element into a list. If the nameReference key is removed just append the dictionary into the list. :returns list of dictionaries """ if self._values['partition_access'] is None: return result = [] part_access = self._values['partition_access'] for access in part_access: if isinstance(access, dict): if 'nameReference' in access: del access['nameReference'] result.append(access) else: result.append(access) if isinstance(access, str): acl = access.split(':') if acl[0].lower() == 'all': acl[0] = 'all-partitions' value = dict( name=acl[0], role=acl[1] ) result.append(value) return result def to_return(self): result = {} for returnable in self.returnables: result[returnable] = getattr(self, returnable) result = self._filter_params(result) return result def api_params(self): result = {} for api_attribute in self.api_attributes: if api_attribute in self.api_map: result[api_attribute] = getattr( self, self.api_map[api_attribute]) elif api_attribute == 'password': result[api_attribute] = self._values['password_credential'] else: result[api_attribute] = getattr(self, api_attribute) result = self._filter_params(result) return result class ModuleManager(object): def __init__(self, client): self.client = client def exec_module(self): if self.is_version_less_than_13(): manager = UnparitionedManager(self.client) else: manager = PartitionedManager(self.client) return manager.exec_module() def is_version_less_than_13(self): """Checks to see if the TMOS version is less than 13 Anything less than BIG-IP 13.x does not support users on different partitions. :return: Bool """ version = self.client.api.tmos_version if LooseVersion(version) < LooseVersion('13.0.0'): return True else: return False class BaseManager(object): def __init__(self, client): self.client = client self.have = None self.want = Parameters(self.client.module.params) self.changes = Parameters() def exec_module(self): changed = False result = dict() state = self.want.state try: if state == "present": changed = self.present() elif state == "absent": changed = self.absent() except iControlUnexpectedHTTPError as e: raise F5ModuleError(str(e)) changes = self.changes.to_return() result.update(**changes) result.update(dict(changed=changed)) return result def _set_changed_options(self): changed = {} for key in Parameters.returnables: if getattr(self.want, key) is not None: changed[key] = getattr(self.want, key) if changed: self.changes = Parameters(changed) def _update_changed_options(self): changed = {} for key in Parameters.updatables: if getattr(self.want, key) is not None: if key == 'password_credential': new_pass = getattr(self.want, key) if self.want.update_password == 'always': changed[key] = new_pass else: # We set the shell parameter to 'none' when bigip does # not return it. if self.want.shell == 'bash': self.validate_shell_parameter() if self.want.shell == 'none' and \ self.have.shell is None: self.have.shell = 'none' attr1 = getattr(self.want, key) attr2 = getattr(self.have, key) if attr1 != attr2: changed[key] = attr1 if changed: self.changes = Parameters(changed) return True return False def validate_shell_parameter(self): """Method to validate shell parameters. Raise when shell attribute is set to 'bash' with roles set to either 'admin' or 'resource-admin'. NOTE: Admin and Resource-Admin roles automatically enable access to all partitions, removing any other roles that the user might have had. There are few other roles which do that but those roles, do not allow bash. """ err = "Shell access is only available to " \ "'admin' or 'resource-admin' roles" permit = ['admin', 'resource-admin'] if self.have is not None: have = self.have.partition_access if not any(r['role'] for r in have if r['role'] in permit): raise F5ModuleError(err) # This check is needed if we want to modify shell AND # partition_access attribute. # This check will also trigger on create. if self.want.partition_access is not None: want = self.want.partition_access if not any(r['role'] for r in want if r['role'] in permit): raise F5ModuleError(err) def present(self): if self.exists(): return self.update() else: return self.create() def absent(self): if self.exists(): return self.remove() return False def should_update(self): result = self._update_changed_options() if result: return True return False def validate_create_parameters(self): """Password credentials and partition access are mandatory, when creating a user resource. """ if self.want.password_credential and \ self.want.update_password != 'on_create': err = "The 'update_password' option " \ "needs to be set to 'on_create' when creating " \ "a resource with a password." raise F5ModuleError(err) if self.want.partition_access is None: err = "The 'partition_access' option " \ "is required when creating a resource." raise F5ModuleError(err) def update(self): self.have = self.read_current_from_device() if not self.should_update(): return False if self.client.check_mode: return True self.update_on_device() return True def remove(self): if self.client.check_mode: return True self.remove_from_device() if self.exists(): raise F5ModuleError("Failed to delete the user") return True def create(self): self.validate_create_parameters() if self.want.shell == 'bash': self.validate_shell_parameter() self._set_changed_options() if self.client.check_mode: return True self.create_on_device() return True class UnparitionedManager(BaseManager): def create_on_device(self): params = self.want.api_params() self.client.api.tm.auth.users.user.create(**params) def update_on_device(self): params = self.want.api_params() result = self.client.api.tm.auth.users.user.load(name=self.want.name) result.modify(**params) def read_current_from_device(self): tmp_res = self.client.api.tm.auth.users.user.load(name=self.want.name) result = tmp_res.attrs return Parameters(result) def exists(self): return self.client.api.tm.auth.users.user.exists(name=self.want.name) def remove_from_device(self): result = self.client.api.tm.auth.users.user.load(name=self.want.name) if result: result.delete() class PartitionedManager(BaseManager): def create_on_device(self): params = self.want.api_params() self.client.api.tm.auth.users.user.create( partition=self.want.partition, **params ) def _read_one_resource_from_collection(self): collection = self.client.api.tm.auth.users.get_collection( requests_params=dict( params="$filter=partition+eq+'{0}'".format(self.want.partition) ) ) collection = [x for x in collection if x.name == self.want.name] if len(collection) == 1: resource = collection.pop() return resource elif len(collection) == 0: raise F5ModuleError( "No accounts with the provided name were found" ) else: raise F5ModuleError( "Multiple users with the provided name were found!" ) def update_on_device(self): params = self.want.api_params() try: resource = self._read_one_resource_from_collection() resource.modify(**params) except iControlUnexpectedHTTPError as ex: # TODO: Patch this in the F5 SDK so that I dont need this check if 'updated successfully' not in str(ex): raise F5ModuleError( "Failed to update the specified user" ) def read_current_from_device(self): resource = self._read_one_resource_from_collection() result = resource.attrs return Parameters(result) def exists(self): collection = self.client.api.tm.auth.users.get_collection( requests_params=dict( params="$filter=partition+eq+'{0}'".format(self.want.partition) ) ) collection = [x for x in collection if x.name == self.want.name] if len(collection) == 1: result = True elif len(collection) == 0: result = False else: raise F5ModuleError( "Multiple users with the provided name were found!" ) return result def remove_from_device(self): resource = self._read_one_resource_from_collection() if resource: resource.delete() class ArgumentSpec(object): def __init__(self): self.supports_check_mode = True self.argument_spec = dict( name=dict( required=True, aliases=['username_credential'] ), password_credential=dict( no_log=True, ), partition_access=dict( type='list' ), full_name=dict(), shell=dict( choices=['none', 'bash', 'tmsh'] ), update_password=dict( default='always', choices=['always', 'on_create'] ) ) self.f5_product_name = 'bigip' def main(): if not HAS_F5SDK: raise F5ModuleError("The python f5-sdk module is required") spec = ArgumentSpec() client = AnsibleF5Client( argument_spec=spec.argument_spec, supports_check_mode=spec.supports_check_mode, f5_product_name=spec.f5_product_name ) try: mm = ModuleManager(client) results = mm.exec_module() client.module.exit_json(**results) except F5ModuleError as e: client.module.fail_json(msg=str(e)) if __name__ == '__main__': main()
gpl-3.0
nzavagli/UnrealPy
UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/boto-2.38.0/tests/unit/emr/test_emr_responses.py
98
17266
# Copyright (c) 2010 Jeremy Thurgood <[email protected]> # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, dis- # tribute, sublicense, and/or sell copies of the Software, and to permit # persons to whom the Software is furnished to do so, subject to the fol- # lowing conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. # NOTE: These tests only cover the very simple cases I needed to test # for the InstanceGroup fix. import xml.sax from boto import handler from boto.emr import emrobject from boto.resultset import ResultSet from tests.compat import unittest JOB_FLOW_EXAMPLE = b""" <DescribeJobFlowsResponse xmlns="http://elasticmapreduce.amazonaws.com/doc/2009-01-15"> <DescribeJobFlowsResult> <JobFlows> <member> <ExecutionStatusDetail> <CreationDateTime>2009-01-28T21:49:16Z</CreationDateTime> <StartDateTime>2009-01-28T21:49:16Z</StartDateTime> <State>STARTING</State> </ExecutionStatusDetail> <BootstrapActions> <member> <BootstrapActionConfig> <ScriptBootstrapAction> <Args/> <Path>s3://elasticmapreduce/libs/hue/install-hue</Path> </ScriptBootstrapAction> <Name>Install Hue</Name> </BootstrapActionConfig> </member> </BootstrapActions> <VisibleToAllUsers>true</VisibleToAllUsers> <SupportedProducts> <member>Hue</member> </SupportedProducts> <Name>MyJobFlowName</Name> <LogUri>mybucket/subdir/</LogUri> <Steps> <member> <ExecutionStatusDetail> <CreationDateTime>2009-01-28T21:49:16Z</CreationDateTime> <State>PENDING</State> </ExecutionStatusDetail> <StepConfig> <HadoopJarStep> <Jar>MyJarFile</Jar> <MainClass>MyMailClass</MainClass> <Args> <member>arg1</member> <member>arg2</member> </Args> <Properties/> </HadoopJarStep> <Name>MyStepName</Name> <ActionOnFailure>CONTINUE</ActionOnFailure> </StepConfig> </member> </Steps> <JobFlowId>j-3UN6WX5RRO2AG</JobFlowId> <Instances> <Placement> <AvailabilityZone>us-east-1a</AvailabilityZone> </Placement> <SlaveInstanceType>m1.small</SlaveInstanceType> <MasterInstanceType>m1.small</MasterInstanceType> <Ec2KeyName>myec2keyname</Ec2KeyName> <InstanceCount>4</InstanceCount> <KeepJobFlowAliveWhenNoSteps>true</KeepJobFlowAliveWhenNoSteps> </Instances> </member> </JobFlows> </DescribeJobFlowsResult> <ResponseMetadata> <RequestId>9cea3229-ed85-11dd-9877-6fad448a8419</RequestId> </ResponseMetadata> </DescribeJobFlowsResponse> """ JOB_FLOW_COMPLETED = b""" <DescribeJobFlowsResponse xmlns="http://elasticmapreduce.amazonaws.com/doc/2009-03-31"> <DescribeJobFlowsResult> <JobFlows> <member> <ExecutionStatusDetail> <CreationDateTime>2010-10-21T01:00:25Z</CreationDateTime> <LastStateChangeReason>Steps completed</LastStateChangeReason> <StartDateTime>2010-10-21T01:03:59Z</StartDateTime> <ReadyDateTime>2010-10-21T01:03:59Z</ReadyDateTime> <State>COMPLETED</State> <EndDateTime>2010-10-21T01:44:18Z</EndDateTime> </ExecutionStatusDetail> <BootstrapActions/> <Name>RealJobFlowName</Name> <LogUri>s3n://example.emrtest.scripts/jobflow_logs/</LogUri> <Steps> <member> <StepConfig> <HadoopJarStep> <Jar>s3n://us-east-1.elasticmapreduce/libs/script-runner/script-runner.jar</Jar> <Args> <member>s3n://us-east-1.elasticmapreduce/libs/state-pusher/0.1/fetch</member> </Args> <Properties/> </HadoopJarStep> <Name>Setup Hadoop Debugging</Name> <ActionOnFailure>TERMINATE_JOB_FLOW</ActionOnFailure> </StepConfig> <ExecutionStatusDetail> <CreationDateTime>2010-10-21T01:00:25Z</CreationDateTime> <StartDateTime>2010-10-21T01:03:59Z</StartDateTime> <State>COMPLETED</State> <EndDateTime>2010-10-21T01:04:22Z</EndDateTime> </ExecutionStatusDetail> </member> <member> <StepConfig> <HadoopJarStep> <Jar>/home/hadoop/contrib/streaming/hadoop-0.20-streaming.jar</Jar> <Args> <member>-mapper</member> <member>s3://example.emrtest.scripts/81d8-5a9d3df4a86c-InitialMapper.py</member> <member>-reducer</member> <member>s3://example.emrtest.scripts/81d8-5a9d3df4a86c-InitialReducer.py</member> <member>-input</member> <member>s3://example.emrtest.data/raw/2010/10/20/*</member> <member>-input</member> <member>s3://example.emrtest.data/raw/2010/10/19/*</member> <member>-input</member> <member>s3://example.emrtest.data/raw/2010/10/18/*</member> <member>-input</member> <member>s3://example.emrtest.data/raw/2010/10/17/*</member> <member>-input</member> <member>s3://example.emrtest.data/raw/2010/10/16/*</member> <member>-input</member> <member>s3://example.emrtest.data/raw/2010/10/15/*</member> <member>-input</member> <member>s3://example.emrtest.data/raw/2010/10/14/*</member> <member>-output</member> <member>s3://example.emrtest.crunched/</member> </Args> <Properties/> </HadoopJarStep> <Name>testjob_Initial</Name> <ActionOnFailure>TERMINATE_JOB_FLOW</ActionOnFailure> </StepConfig> <ExecutionStatusDetail> <CreationDateTime>2010-10-21T01:00:25Z</CreationDateTime> <StartDateTime>2010-10-21T01:04:22Z</StartDateTime> <State>COMPLETED</State> <EndDateTime>2010-10-21T01:36:18Z</EndDateTime> </ExecutionStatusDetail> </member> <member> <StepConfig> <HadoopJarStep> <Jar>/home/hadoop/contrib/streaming/hadoop-0.20-streaming.jar</Jar> <Args> <member>-mapper</member> <member>s3://example.emrtest.scripts/81d8-5a9d3df4a86c-step1Mapper.py</member> <member>-reducer</member> <member>s3://example.emrtest.scripts/81d8-5a9d3df4a86c-step1Reducer.py</member> <member>-input</member> <member>s3://example.emrtest.crunched/*</member> <member>-output</member> <member>s3://example.emrtest.step1/</member> </Args> <Properties/> </HadoopJarStep> <Name>testjob_step1</Name> <ActionOnFailure>TERMINATE_JOB_FLOW</ActionOnFailure> </StepConfig> <ExecutionStatusDetail> <CreationDateTime>2010-10-21T01:00:25Z</CreationDateTime> <StartDateTime>2010-10-21T01:36:18Z</StartDateTime> <State>COMPLETED</State> <EndDateTime>2010-10-21T01:37:51Z</EndDateTime> </ExecutionStatusDetail> </member> <member> <StepConfig> <HadoopJarStep> <Jar>/home/hadoop/contrib/streaming/hadoop-0.20-streaming.jar</Jar> <Args> <member>-mapper</member> <member>s3://example.emrtest.scripts/81d8-5a9d3df4a86c-step2Mapper.py</member> <member>-reducer</member> <member>s3://example.emrtest.scripts/81d8-5a9d3df4a86c-step2Reducer.py</member> <member>-input</member> <member>s3://example.emrtest.crunched/*</member> <member>-output</member> <member>s3://example.emrtest.step2/</member> </Args> <Properties/> </HadoopJarStep> <Name>testjob_step2</Name> <ActionOnFailure>TERMINATE_JOB_FLOW</ActionOnFailure> </StepConfig> <ExecutionStatusDetail> <CreationDateTime>2010-10-21T01:00:25Z</CreationDateTime> <StartDateTime>2010-10-21T01:37:51Z</StartDateTime> <State>COMPLETED</State> <EndDateTime>2010-10-21T01:39:32Z</EndDateTime> </ExecutionStatusDetail> </member> <member> <StepConfig> <HadoopJarStep> <Jar>/home/hadoop/contrib/streaming/hadoop-0.20-streaming.jar</Jar> <Args> <member>-mapper</member> <member>s3://example.emrtest.scripts/81d8-5a9d3df4a86c-step3Mapper.py</member> <member>-reducer</member> <member>s3://example.emrtest.scripts/81d8-5a9d3df4a86c-step3Reducer.py</member> <member>-input</member> <member>s3://example.emrtest.step1/*</member> <member>-output</member> <member>s3://example.emrtest.step3/</member> </Args> <Properties/> </HadoopJarStep> <Name>testjob_step3</Name> <ActionOnFailure>TERMINATE_JOB_FLOW</ActionOnFailure> </StepConfig> <ExecutionStatusDetail> <CreationDateTime>2010-10-21T01:00:25Z</CreationDateTime> <StartDateTime>2010-10-21T01:39:32Z</StartDateTime> <State>COMPLETED</State> <EndDateTime>2010-10-21T01:41:22Z</EndDateTime> </ExecutionStatusDetail> </member> <member> <StepConfig> <HadoopJarStep> <Jar>/home/hadoop/contrib/streaming/hadoop-0.20-streaming.jar</Jar> <Args> <member>-mapper</member> <member>s3://example.emrtest.scripts/81d8-5a9d3df4a86c-step4Mapper.py</member> <member>-reducer</member> <member>s3://example.emrtest.scripts/81d8-5a9d3df4a86c-step4Reducer.py</member> <member>-input</member> <member>s3://example.emrtest.step1/*</member> <member>-output</member> <member>s3://example.emrtest.step4/</member> </Args> <Properties/> </HadoopJarStep> <Name>testjob_step4</Name> <ActionOnFailure>TERMINATE_JOB_FLOW</ActionOnFailure> </StepConfig> <ExecutionStatusDetail> <CreationDateTime>2010-10-21T01:00:25Z</CreationDateTime> <StartDateTime>2010-10-21T01:41:22Z</StartDateTime> <State>COMPLETED</State> <EndDateTime>2010-10-21T01:43:03Z</EndDateTime> </ExecutionStatusDetail> </member> </Steps> <JobFlowId>j-3H3Q13JPFLU22</JobFlowId> <Instances> <SlaveInstanceType>m1.large</SlaveInstanceType> <MasterInstanceId>i-64c21609</MasterInstanceId> <Placement> <AvailabilityZone>us-east-1b</AvailabilityZone> </Placement> <InstanceGroups> <member> <CreationDateTime>2010-10-21T01:00:25Z</CreationDateTime> <InstanceRunningCount>0</InstanceRunningCount> <StartDateTime>2010-10-21T01:02:09Z</StartDateTime> <ReadyDateTime>2010-10-21T01:03:03Z</ReadyDateTime> <State>ENDED</State> <EndDateTime>2010-10-21T01:44:18Z</EndDateTime> <InstanceRequestCount>1</InstanceRequestCount> <InstanceType>m1.large</InstanceType> <Market>ON_DEMAND</Market> <LastStateChangeReason>Job flow terminated</LastStateChangeReason> <InstanceRole>MASTER</InstanceRole> <InstanceGroupId>ig-EVMHOZJ2SCO8</InstanceGroupId> <Name>master</Name> </member> <member> <CreationDateTime>2010-10-21T01:00:25Z</CreationDateTime> <InstanceRunningCount>0</InstanceRunningCount> <StartDateTime>2010-10-21T01:03:59Z</StartDateTime> <ReadyDateTime>2010-10-21T01:03:59Z</ReadyDateTime> <State>ENDED</State> <EndDateTime>2010-10-21T01:44:18Z</EndDateTime> <InstanceRequestCount>9</InstanceRequestCount> <InstanceType>m1.large</InstanceType> <Market>ON_DEMAND</Market> <LastStateChangeReason>Job flow terminated</LastStateChangeReason> <InstanceRole>CORE</InstanceRole> <InstanceGroupId>ig-YZHDYVITVHKB</InstanceGroupId> <Name>slave</Name> </member> </InstanceGroups> <NormalizedInstanceHours>40</NormalizedInstanceHours> <HadoopVersion>0.20</HadoopVersion> <MasterInstanceType>m1.large</MasterInstanceType> <MasterPublicDnsName>ec2-184-72-153-139.compute-1.amazonaws.com</MasterPublicDnsName> <Ec2KeyName>myubersecurekey</Ec2KeyName> <InstanceCount>10</InstanceCount> <KeepJobFlowAliveWhenNoSteps>false</KeepJobFlowAliveWhenNoSteps> </Instances> </member> </JobFlows> </DescribeJobFlowsResult> <ResponseMetadata> <RequestId>c31e701d-dcb4-11df-b5d9-337fc7fe4773</RequestId> </ResponseMetadata> </DescribeJobFlowsResponse> """ class TestEMRResponses(unittest.TestCase): def _parse_xml(self, body, markers): rs = ResultSet(markers) h = handler.XmlHandler(rs, None) xml.sax.parseString(body, h) return rs def _assert_fields(self, response, **fields): for field, expected in fields.items(): actual = getattr(response, field) self.assertEquals(expected, actual, "Field %s: %r != %r" % (field, expected, actual)) def test_JobFlows_example(self): [jobflow] = self._parse_xml(JOB_FLOW_EXAMPLE, [('member', emrobject.JobFlow)]) self._assert_fields(jobflow, creationdatetime='2009-01-28T21:49:16Z', startdatetime='2009-01-28T21:49:16Z', state='STARTING', instancecount='4', jobflowid='j-3UN6WX5RRO2AG', loguri='mybucket/subdir/', name='MyJobFlowName', availabilityzone='us-east-1a', slaveinstancetype='m1.small', masterinstancetype='m1.small', ec2keyname='myec2keyname', keepjobflowalivewhennosteps='true') def test_JobFlows_completed(self): [jobflow] = self._parse_xml(JOB_FLOW_COMPLETED, [('member', emrobject.JobFlow)]) self._assert_fields(jobflow, creationdatetime='2010-10-21T01:00:25Z', startdatetime='2010-10-21T01:03:59Z', enddatetime='2010-10-21T01:44:18Z', state='COMPLETED', instancecount='10', jobflowid='j-3H3Q13JPFLU22', loguri='s3n://example.emrtest.scripts/jobflow_logs/', name='RealJobFlowName', availabilityzone='us-east-1b', slaveinstancetype='m1.large', masterinstancetype='m1.large', ec2keyname='myubersecurekey', keepjobflowalivewhennosteps='false') self.assertEquals(6, len(jobflow.steps)) self.assertEquals(2, len(jobflow.instancegroups))
mit
x111ong/odoo
addons/base_import/models.py
222
14243
import csv import itertools import logging import operator try: from cStringIO import StringIO except ImportError: from StringIO import StringIO import psycopg2 from openerp.osv import orm, fields from openerp.tools.translate import _ FIELDS_RECURSION_LIMIT = 2 ERROR_PREVIEW_BYTES = 200 _logger = logging.getLogger(__name__) class ir_import(orm.TransientModel): _name = 'base_import.import' # allow imports to survive for 12h in case user is slow _transient_max_hours = 12.0 _columns = { 'res_model': fields.char('Model'), 'file': fields.binary( 'File', help="File to check and/or import, raw binary (not base64)"), 'file_name': fields.char('File Name'), 'file_type': fields.char('File Type'), } def get_fields(self, cr, uid, model, context=None, depth=FIELDS_RECURSION_LIMIT): """ Recursively get fields for the provided model (through fields_get) and filter them according to importability The output format is a list of ``Field``, with ``Field`` defined as: .. class:: Field .. attribute:: id (str) A non-unique identifier for the field, used to compute the span of the ``required`` attribute: if multiple ``required`` fields have the same id, only one of them is necessary. .. attribute:: name (str) The field's logical (Odoo) name within the scope of its parent. .. attribute:: string (str) The field's human-readable name (``@string``) .. attribute:: required (bool) Whether the field is marked as required in the model. Clients must provide non-empty import values for all required fields or the import will error out. .. attribute:: fields (list(Field)) The current field's subfields. The database and external identifiers for m2o and m2m fields; a filtered and transformed fields_get for o2m fields (to a variable depth defined by ``depth``). Fields with no sub-fields will have an empty list of sub-fields. :param str model: name of the model to get fields form :param int landing: depth of recursion into o2m fields """ model_obj = self.pool[model] fields = [{ 'id': 'id', 'name': 'id', 'string': _("External ID"), 'required': False, 'fields': [], }] fields_got = model_obj.fields_get(cr, uid, context=context) blacklist = orm.MAGIC_COLUMNS + [model_obj.CONCURRENCY_CHECK_FIELD] for name, field in fields_got.iteritems(): if name in blacklist: continue # an empty string means the field is deprecated, @deprecated must # be absent or False to mean not-deprecated if field.get('deprecated', False) is not False: continue if field.get('readonly'): states = field.get('states') if not states: continue # states = {state: [(attr, value), (attr2, value2)], state2:...} if not any(attr == 'readonly' and value is False for attr, value in itertools.chain.from_iterable( states.itervalues())): continue f = { 'id': name, 'name': name, 'string': field['string'], # Y U NO ALWAYS HAS REQUIRED 'required': bool(field.get('required')), 'fields': [], } if field['type'] in ('many2many', 'many2one'): f['fields'] = [ dict(f, name='id', string=_("External ID")), dict(f, name='.id', string=_("Database ID")), ] elif field['type'] == 'one2many' and depth: f['fields'] = self.get_fields( cr, uid, field['relation'], context=context, depth=depth-1) if self.pool['res.users'].has_group(cr, uid, 'base.group_no_one'): f['fields'].append({'id' : '.id', 'name': '.id', 'string': _("Database ID"), 'required': False, 'fields': []}) fields.append(f) # TODO: cache on model? return fields def _read_csv(self, record, options): """ Returns a CSV-parsed iterator of all empty lines in the file :throws csv.Error: if an error is detected during CSV parsing :throws UnicodeDecodeError: if ``options.encoding`` is incorrect """ csv_iterator = csv.reader( StringIO(record.file), quotechar=str(options['quoting']), delimiter=str(options['separator'])) def nonempty(row): return any(x for x in row if x.strip()) csv_nonempty = itertools.ifilter(nonempty, csv_iterator) # TODO: guess encoding with chardet? Or https://github.com/aadsm/jschardet encoding = options.get('encoding', 'utf-8') return itertools.imap( lambda row: [item.decode(encoding) for item in row], csv_nonempty) def _match_header(self, header, fields, options): """ Attempts to match a given header to a field of the imported model. :param str header: header name from the CSV file :param fields: :param dict options: :returns: an empty list if the header couldn't be matched, or all the fields to traverse :rtype: list(Field) """ string_match = None for field in fields: # FIXME: should match all translations & original # TODO: use string distance (levenshtein? hamming?) if header.lower() == field['name'].lower(): return [field] if header.lower() == field['string'].lower(): # matching string are not reliable way because # strings have no unique constraint string_match = field if string_match: # this behavior is only applied if there is no matching field['name'] return [string_match] if '/' not in header: return [] # relational field path traversal = [] subfields = fields # Iteratively dive into fields tree for section in header.split('/'): # Strip section in case spaces are added around '/' for # readability of paths match = self._match_header(section.strip(), subfields, options) # Any match failure, exit if not match: return [] # prep subfields for next iteration within match[0] field = match[0] subfields = field['fields'] traversal.append(field) return traversal def _match_headers(self, rows, fields, options): """ Attempts to match the imported model's fields to the titles of the parsed CSV file, if the file is supposed to have headers. Will consume the first line of the ``rows`` iterator. Returns a pair of (None, None) if headers were not requested or the list of headers and a dict mapping cell indices to key paths in the ``fields`` tree :param Iterator rows: :param dict fields: :param dict options: :rtype: (None, None) | (list(str), dict(int: list(str))) """ if not options.get('headers'): return None, None headers = next(rows) return headers, dict( (index, [field['name'] for field in self._match_header(header, fields, options)] or None) for index, header in enumerate(headers) ) def parse_preview(self, cr, uid, id, options, count=10, context=None): """ Generates a preview of the uploaded files, and performs fields-matching between the import's file data and the model's columns. If the headers are not requested (not options.headers), ``matches`` and ``headers`` are both ``False``. :param id: identifier of the import :param int count: number of preview lines to generate :param options: format-specific options. CSV: {encoding, quoting, separator, headers} :type options: {str, str, str, bool} :returns: {fields, matches, headers, preview} | {error, preview} :rtype: {dict(str: dict(...)), dict(int, list(str)), list(str), list(list(str))} | {str, str} """ (record,) = self.browse(cr, uid, [id], context=context) fields = self.get_fields(cr, uid, record.res_model, context=context) try: rows = self._read_csv(record, options) headers, matches = self._match_headers(rows, fields, options) # Match should have consumed the first row (iif headers), get # the ``count`` next rows for preview preview = list(itertools.islice(rows, count)) assert preview, "CSV file seems to have no content" return { 'fields': fields, 'matches': matches or False, 'headers': headers or False, 'preview': preview, } except Exception, e: # Due to lazy generators, UnicodeDecodeError (for # instance) may only be raised when serializing the # preview to a list in the return. _logger.debug("Error during CSV parsing preview", exc_info=True) return { 'error': str(e), # iso-8859-1 ensures decoding will always succeed, # even if it yields non-printable characters. This is # in case of UnicodeDecodeError (or csv.Error # compounded with UnicodeDecodeError) 'preview': record.file[:ERROR_PREVIEW_BYTES] .decode( 'iso-8859-1'), } def _convert_import_data(self, record, fields, options, context=None): """ Extracts the input browse_record and fields list (with ``False``-y placeholders for fields to *not* import) into a format Model.import_data can use: a fields list without holes and the precisely matching data matrix :param browse_record record: :param list(str|bool): fields :returns: (data, fields) :rtype: (list(list(str)), list(str)) :raises ValueError: in case the import data could not be converted """ # Get indices for non-empty fields indices = [index for index, field in enumerate(fields) if field] if not indices: raise ValueError(_("You must configure at least one field to import")) # If only one index, itemgetter will return an atom rather # than a 1-tuple if len(indices) == 1: mapper = lambda row: [row[indices[0]]] else: mapper = operator.itemgetter(*indices) # Get only list of actually imported fields import_fields = filter(None, fields) rows_to_import = self._read_csv(record, options) if options.get('headers'): rows_to_import = itertools.islice( rows_to_import, 1, None) data = [ row for row in itertools.imap(mapper, rows_to_import) # don't try inserting completely empty rows (e.g. from # filtering out o2m fields) if any(row) ] return data, import_fields def do(self, cr, uid, id, fields, options, dryrun=False, context=None): """ Actual execution of the import :param fields: import mapping: maps each column to a field, ``False`` for the columns to ignore :type fields: list(str|bool) :param dict options: :param bool dryrun: performs all import operations (and validations) but rollbacks writes, allows getting as much errors as possible without the risk of clobbering the database. :returns: A list of errors. If the list is empty the import executed fully and correctly. If the list is non-empty it contains dicts with 3 keys ``type`` the type of error (``error|warning``); ``message`` the error message associated with the error (a string) and ``record`` the data which failed to import (or ``false`` if that data isn't available or provided) :rtype: list({type, message, record}) """ cr.execute('SAVEPOINT import') (record,) = self.browse(cr, uid, [id], context=context) try: data, import_fields = self._convert_import_data( record, fields, options, context=context) except ValueError, e: return [{ 'type': 'error', 'message': unicode(e), 'record': False, }] _logger.info('importing %d rows...', len(data)) import_result = self.pool[record.res_model].load( cr, uid, import_fields, data, context=context) _logger.info('done') # If transaction aborted, RELEASE SAVEPOINT is going to raise # an InternalError (ROLLBACK should work, maybe). Ignore that. # TODO: to handle multiple errors, create savepoint around # write and release it in case of write error (after # adding error to errors array) => can keep on trying to # import stuff, and rollback at the end if there is any # error in the results. try: if dryrun: cr.execute('ROLLBACK TO SAVEPOINT import') else: cr.execute('RELEASE SAVEPOINT import') except psycopg2.InternalError: pass return import_result['messages']
agpl-3.0
sgraham/nope
tools/telemetry/telemetry/image_processing/image_util_numpy_impl.py
6
6585
# Copyright 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. from __future__ import division from telemetry.core import util from telemetry.image_processing import histogram from telemetry.image_processing import rgba_color from telemetry.util import external_modules util.AddDirToPythonPath(util.GetTelemetryDir(), 'third_party', 'png') import png # pylint: disable=F0401 cv2 = external_modules.ImportOptionalModule('cv2') np = external_modules.ImportRequiredModule('numpy') def Channels(image): return image.shape[2] def Width(image): return image.shape[1] def Height(image): return image.shape[0] def Pixels(image): return bytearray(np.uint8(image[:, :, ::-1]).flat) # Convert from bgr to rgb. def GetPixelColor(image, x, y): bgr = image[y][x] return rgba_color.RgbaColor(bgr[2], bgr[1], bgr[0]) def WritePngFile(image, path): if cv2 is not None: cv2.imwrite(path, image) else: with open(path, "wb") as f: metadata = {} metadata['size'] = (Width(image), Height(image)) metadata['alpha'] = False metadata['bitdepth'] = 8 img = image[:, :, ::-1] pixels = img.reshape(-1).tolist() png.Writer(**metadata).write_array(f, pixels) def FromRGBPixels(width, height, pixels, bpp): img = np.array(pixels, order='F', dtype=np.uint8) img.resize((height, width, bpp)) if bpp == 4: img = img[:, :, :3] # Drop alpha. return img[:, :, ::-1] # Convert from rgb to bgr. def FromPngFile(path): if cv2 is not None: img = cv2.imread(path, cv2.CV_LOAD_IMAGE_COLOR) if img is None: raise ValueError('Image at path {0} could not be read'.format(path)) return img else: with open(path, "rb") as f: return FromPng(f.read()) def FromPng(png_data): if cv2 is not None: file_bytes = np.asarray(bytearray(png_data), dtype=np.uint8) return cv2.imdecode(file_bytes, cv2.CV_LOAD_IMAGE_COLOR) else: width, height, pixels, meta = png.Reader(bytes=png_data).read_flat() return FromRGBPixels(width, height, pixels, 4 if meta['alpha'] else 3) def _SimpleDiff(image1, image2): if cv2 is not None: return cv2.absdiff(image1, image2) else: amax = np.maximum(image1, image2) amin = np.minimum(image1, image2) return amax - amin def AreEqual(image1, image2, tolerance, likely_equal): if image1.shape != image2.shape: return False self_image = image1 other_image = image2 if tolerance: if likely_equal: return np.amax(_SimpleDiff(image1, image2)) <= tolerance else: for row in xrange(Height(image1)): if np.amax(_SimpleDiff(image1[row], image2[row])) > tolerance: return False return True else: if likely_equal: return (self_image == other_image).all() else: for row in xrange(Height(image1)): if not (self_image[row] == other_image[row]).all(): return False return True def Diff(image1, image2): self_image = image1 other_image = image2 if image1.shape[2] != image2.shape[2]: raise ValueError('Cannot diff images of differing bit depth') if image1.shape[:2] != image2.shape[:2]: width = max(Width(image1), Width(image2)) height = max(Height(image1), Height(image2)) self_image = np.zeros((width, height, image1.shape[2]), np.uint8) other_image = np.zeros((width, height, image1.shape[2]), np.uint8) self_image[0:Height(image1), 0:Width(image1)] = image1 other_image[0:Height(image2), 0:Width(image2)] = image2 return _SimpleDiff(self_image, other_image) def GetBoundingBox(image, color, tolerance): if cv2 is not None: color = np.array([color.b, color.g, color.r]) img = cv2.inRange(image, np.subtract(color[0:3], tolerance), np.add(color[0:3], tolerance)) count = cv2.countNonZero(img) if count == 0: return None, 0 contours, _ = cv2.findContours(img, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE) contour = np.concatenate(contours) return cv2.boundingRect(contour), count else: if tolerance: color = np.array([color.b, color.g, color.r]) colorm = color - tolerance colorp = color + tolerance b = image[:, :, 0] g = image[:, :, 1] r = image[:, :, 2] w = np.where(((b >= colorm[0]) & (b <= colorp[0]) & (g >= colorm[1]) & (g <= colorp[1]) & (r >= colorm[2]) & (r <= colorp[2]))) else: w = np.where((image[:, :, 0] == color.b) & (image[:, :, 1] == color.g) & (image[:, :, 2] == color.r)) if len(w[0]) == 0: return None, 0 return (w[1][0], w[0][0], w[1][-1] - w[1][0] + 1, w[0][-1] - w[0][0] + 1), \ len(w[0]) def Crop(image, left, top, width, height): img_height, img_width = image.shape[:2] if (left < 0 or top < 0 or (left + width) > img_width or (top + height) > img_height): raise ValueError('Invalid dimensions') return image[top:top + height, left:left + width] def GetColorHistogram(image, ignore_color, tolerance): if cv2 is not None: mask = None if ignore_color is not None: color = np.array([ignore_color.b, ignore_color.g, ignore_color.r]) mask = ~cv2.inRange(image, np.subtract(color, tolerance), np.add(color, tolerance)) flatten = np.ndarray.flatten hist_b = flatten(cv2.calcHist([image], [0], mask, [256], [0, 256])) hist_g = flatten(cv2.calcHist([image], [1], mask, [256], [0, 256])) hist_r = flatten(cv2.calcHist([image], [2], mask, [256], [0, 256])) else: filtered = image.reshape(-1, 3) if ignore_color is not None: color = np.array([ignore_color.b, ignore_color.g, ignore_color.r]) colorm = np.array(color) - tolerance colorp = np.array(color) + tolerance in_range = ((filtered[:, 0] < colorm[0]) | (filtered[:, 0] > colorp[0]) | (filtered[:, 1] < colorm[1]) | (filtered[:, 1] > colorp[1]) | (filtered[:, 2] < colorm[2]) | (filtered[:, 2] > colorp[2])) filtered = np.compress(in_range, filtered, axis = 0) if len(filtered[:, 0]) == 0: return histogram.ColorHistogram(np.zeros((256)), np.zeros((256)), np.zeros((256)), ignore_color) hist_b = np.bincount(filtered[:, 0], minlength=256) hist_g = np.bincount(filtered[:, 1], minlength=256) hist_r = np.bincount(filtered[:, 2], minlength=256) return histogram.ColorHistogram(hist_r, hist_g, hist_b, ignore_color)
bsd-3-clause
Lujeni/ansible
lib/ansible/modules/cloud/docker/docker_config.py
16
9206
#!/usr/bin/python # # Copyright 2016 Red Hat | Ansible # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: docker_config short_description: Manage docker configs. version_added: "2.8" description: - Create and remove Docker configs in a Swarm environment. Similar to C(docker config create) and C(docker config rm). - Adds to the metadata of new configs 'ansible_key', an encrypted hash representation of the data, which is then used in future runs to test if a config has changed. If 'ansible_key' is not present, then a config will not be updated unless the I(force) option is set. - Updates to configs are performed by removing the config and creating it again. options: data: description: - The value of the config. Required when state is C(present). type: str data_is_b64: description: - If set to C(true), the data is assumed to be Base64 encoded and will be decoded before being used. - To use binary I(data), it is better to keep it Base64 encoded and let it be decoded by this option. type: bool default: no labels: description: - "A map of key:value meta data, where both the I(key) and I(value) are expected to be a string." - If new meta data is provided, or existing meta data is modified, the config will be updated by removing it and creating it again. type: dict force: description: - Use with state C(present) to always remove and recreate an existing config. - If C(true), an existing config will be replaced, even if it has not been changed. type: bool default: no name: description: - The name of the config. type: str required: yes state: description: - Set to C(present), if the config should exist, and C(absent), if it should not. type: str default: present choices: - absent - present extends_documentation_fragment: - docker - docker.docker_py_2_documentation requirements: - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 2.6.0" - "Docker API >= 1.30" author: - Chris Houseknecht (@chouseknecht) - John Hu (@ushuz) ''' EXAMPLES = ''' - name: Create config foo (from a file on the control machine) docker_config: name: foo # If the file is JSON or binary, Ansible might modify it (because # it is first decoded and later re-encoded). Base64-encoding the # file directly after reading it prevents this to happen. data: "{{ lookup('file', '/path/to/config/file') | b64encode }}" data_is_b64: true state: present - name: Change the config data docker_config: name: foo data: Goodnight everyone! labels: bar: baz one: '1' state: present - name: Add a new label docker_config: name: foo data: Goodnight everyone! labels: bar: baz one: '1' # Adding a new label will cause a remove/create of the config two: '2' state: present - name: No change docker_config: name: foo data: Goodnight everyone! labels: bar: baz one: '1' # Even though 'two' is missing, there is no change to the existing config state: present - name: Update an existing label docker_config: name: foo data: Goodnight everyone! labels: bar: monkey # Changing a label will cause a remove/create of the config one: '1' state: present - name: Force the (re-)creation of the config docker_config: name: foo data: Goodnight everyone! force: yes state: present - name: Remove config foo docker_config: name: foo state: absent ''' RETURN = ''' config_id: description: - The ID assigned by Docker to the config object. returned: success and I(state) is C(present) type: str sample: 'hzehrmyjigmcp2gb6nlhmjqcv' ''' import base64 import hashlib import traceback try: from docker.errors import DockerException, APIError except ImportError: # missing Docker SDK for Python handled in ansible.module_utils.docker.common pass from ansible.module_utils.docker.common import ( AnsibleDockerClient, DockerBaseClass, compare_generic, RequestException, ) from ansible.module_utils._text import to_native, to_bytes class ConfigManager(DockerBaseClass): def __init__(self, client, results): super(ConfigManager, self).__init__() self.client = client self.results = results self.check_mode = self.client.check_mode parameters = self.client.module.params self.name = parameters.get('name') self.state = parameters.get('state') self.data = parameters.get('data') if self.data is not None: if parameters.get('data_is_b64'): self.data = base64.b64decode(self.data) else: self.data = to_bytes(self.data) self.labels = parameters.get('labels') self.force = parameters.get('force') self.data_key = None def __call__(self): if self.state == 'present': self.data_key = hashlib.sha224(self.data).hexdigest() self.present() elif self.state == 'absent': self.absent() def get_config(self): ''' Find an existing config. ''' try: configs = self.client.configs(filters={'name': self.name}) except APIError as exc: self.client.fail("Error accessing config %s: %s" % (self.name, to_native(exc))) for config in configs: if config['Spec']['Name'] == self.name: return config return None def create_config(self): ''' Create a new config ''' config_id = None # We can't see the data after creation, so adding a label we can use for idempotency check labels = { 'ansible_key': self.data_key } if self.labels: labels.update(self.labels) try: if not self.check_mode: config_id = self.client.create_config(self.name, self.data, labels=labels) except APIError as exc: self.client.fail("Error creating config: %s" % to_native(exc)) if isinstance(config_id, dict): config_id = config_id['ID'] return config_id def present(self): ''' Handles state == 'present', creating or updating the config ''' config = self.get_config() if config: self.results['config_id'] = config['ID'] data_changed = False attrs = config.get('Spec', {}) if attrs.get('Labels', {}).get('ansible_key'): if attrs['Labels']['ansible_key'] != self.data_key: data_changed = True labels_changed = not compare_generic(self.labels, attrs.get('Labels'), 'allow_more_present', 'dict') if data_changed or labels_changed or self.force: # if something changed or force, delete and re-create the config self.absent() config_id = self.create_config() self.results['changed'] = True self.results['config_id'] = config_id else: self.results['changed'] = True self.results['config_id'] = self.create_config() def absent(self): ''' Handles state == 'absent', removing the config ''' config = self.get_config() if config: try: if not self.check_mode: self.client.remove_config(config['ID']) except APIError as exc: self.client.fail("Error removing config %s: %s" % (self.name, to_native(exc))) self.results['changed'] = True def main(): argument_spec = dict( name=dict(type='str', required=True), state=dict(type='str', default='present', choices=['absent', 'present']), data=dict(type='str'), data_is_b64=dict(type='bool', default=False), labels=dict(type='dict'), force=dict(type='bool', default=False) ) required_if = [ ('state', 'present', ['data']) ] client = AnsibleDockerClient( argument_spec=argument_spec, supports_check_mode=True, required_if=required_if, min_docker_version='2.6.0', min_docker_api_version='1.30', ) try: results = dict( changed=False, ) ConfigManager(client, results)() client.module.exit_json(**results) except DockerException as e: client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc()) except RequestException as e: client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc()) if __name__ == '__main__': main()
gpl-3.0
RitaLee79/android_kernel_xiaomi_armani-kk
tools/perf/python/twatch.py
7370
1334
#! /usr/bin/python # -*- python -*- # -*- coding: utf-8 -*- # twatch - Experimental use of the perf python interface # Copyright (C) 2011 Arnaldo Carvalho de Melo <[email protected]> # # This application is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; version 2. # # This application is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. import perf def main(): cpus = perf.cpu_map() threads = perf.thread_map() evsel = perf.evsel(task = 1, comm = 1, mmap = 0, wakeup_events = 1, watermark = 1, sample_id_all = 1, sample_type = perf.SAMPLE_PERIOD | perf.SAMPLE_TID | perf.SAMPLE_CPU | perf.SAMPLE_TID) evsel.open(cpus = cpus, threads = threads); evlist = perf.evlist(cpus, threads) evlist.add(evsel) evlist.mmap() while True: evlist.poll(timeout = -1) for cpu in cpus: event = evlist.read_on_cpu(cpu) if not event: continue print "cpu: %2d, pid: %4d, tid: %4d" % (event.sample_cpu, event.sample_pid, event.sample_tid), print event if __name__ == '__main__': main()
gpl-2.0
FusionSP/external_chromium_org_third_party_skia
tools/add_codereview_message.py
83
1716
#!/usr/bin/python2 # Copyright 2014 Google Inc. # # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Add message to codereview issue. This script takes a codereview issue number as its argument and a (possibly multi-line) message on stdin. It appends the message to the given issue. Usage: echo MESSAGE | %prog CODEREVIEW_ISSUE or: %prog CODEREVIEW_ISSUE <<EOF MESSAGE EOF or: %prog --help """ import optparse import sys import fix_pythonpath # pylint: disable=W0611 from common.py.utils import find_depot_tools # pylint: disable=W0611 import rietveld RIETVELD_URL = 'https://codereview.chromium.org' def add_codereview_message(issue, message): """Add a message to a given codereview. Args: codereview_url: (string) we will extract the issue number from this url, or this could simply be the issue number. message: (string) message to add. """ # Passing None for the email and password will result in a prompt or # reuse of existing cached credentials. my_rietveld = rietveld.Rietveld(RIETVELD_URL, email=None, password=None) my_rietveld.add_comment(issue, message) def main(argv): """main function; see module-level docstring and GetOptionParser help. Args: argv: sys.argv[1:]-type argument list. """ option_parser = optparse.OptionParser(usage=__doc__) _, arguments = option_parser.parse_args(argv) if len(arguments) > 1: option_parser.error('Extra arguments.') if len(arguments) != 1: option_parser.error('Missing issue number.') message = sys.stdin.read() add_codereview_message(int(arguments[0]), message) if __name__ == '__main__': main(sys.argv[1:])
bsd-3-clause
ge0rgi/cinder
cinder/api/contrib/capabilities.py
2
2657
# Copyright (c) 2015 Hitachi Data Systems, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import oslo_messaging from cinder.api import extensions from cinder.api.openstack import wsgi from cinder.api.views import capabilities as capabilities_view from cinder import exception from cinder.i18n import _ from cinder import objects from cinder.volume import rpcapi def authorize(context, action_name): extensions.extension_authorizer('volume', action_name)(context) class CapabilitiesController(wsgi.Controller): """The Capabilities controller for the OpenStack API.""" _view_builder_class = capabilities_view.ViewBuilder def __init__(self): # FIXME(jdg): Is it kosher that this just # skips the volume.api and goes straight to RPC # from here? self.volume_api = rpcapi.VolumeAPI() super(CapabilitiesController, self).__init__() def show(self, req, id): """Return capabilities list of given backend.""" context = req.environ['cinder.context'] authorize(context, 'capabilities') filters = {'host_or_cluster': id, 'binary': 'cinder-volume'} services = objects.ServiceList.get_all(context, filters) if not services: msg = (_("Can't find service: %s") % id) raise exception.NotFound(msg) topic = services[0].service_topic_queue try: capabilities = self.volume_api.get_capabilities(context, topic, False) except oslo_messaging.MessagingTimeout: raise exception.RPCTimeout(service=topic) return self._view_builder.summary(req, capabilities, topic) class Capabilities(extensions.ExtensionDescriptor): """Capabilities support.""" name = "Capabilities" alias = "capabilities" updated = "2015-08-31T00:00:00+00:00" def get_resources(self): resources = [] res = extensions.ResourceExtension( Capabilities.alias, CapabilitiesController()) resources.append(res) return resources
apache-2.0
aherlihy/mongo-python-driver
pymongo/client_options.py
17
7755
# Copyright 2014-2015 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you # may not use this file except in compliance with the License. You # may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. See the License for the specific language governing # permissions and limitations under the License. """Tools to parse mongo client options.""" from bson.codec_options import _parse_codec_options from pymongo.auth import _build_credentials_tuple from pymongo.common import validate_boolean from pymongo import common from pymongo.errors import ConfigurationError from pymongo.monitoring import _EventListeners from pymongo.pool import PoolOptions from pymongo.read_concern import ReadConcern from pymongo.read_preferences import make_read_preference from pymongo.ssl_support import get_ssl_context from pymongo.write_concern import WriteConcern def _parse_credentials(username, password, database, options): """Parse authentication credentials.""" mechanism = options.get('authmechanism', 'DEFAULT') if username is None and mechanism != 'MONGODB-X509': return None source = options.get('authsource', database or 'admin') return _build_credentials_tuple( mechanism, source, username, password, options) def _parse_read_preference(options): """Parse read preference options.""" if 'read_preference' in options: return options['read_preference'] mode = options.get('readpreference', 0) tags = options.get('readpreferencetags') max_staleness = options.get('maxstalenessseconds', -1) return make_read_preference(mode, tags, max_staleness) def _parse_write_concern(options): """Parse write concern options.""" concern = options.get('w') wtimeout = options.get('wtimeout') j = options.get('j', options.get('journal')) fsync = options.get('fsync') return WriteConcern(concern, wtimeout, j, fsync) def _parse_read_concern(options): """Parse read concern options.""" concern = options.get('readconcernlevel') return ReadConcern(concern) def _parse_ssl_options(options): """Parse ssl options.""" use_ssl = options.get('ssl') if use_ssl is not None: validate_boolean('ssl', use_ssl) certfile = options.get('ssl_certfile') keyfile = options.get('ssl_keyfile') passphrase = options.get('ssl_pem_passphrase') ca_certs = options.get('ssl_ca_certs') cert_reqs = options.get('ssl_cert_reqs') match_hostname = options.get('ssl_match_hostname', True) crlfile = options.get('ssl_crlfile') ssl_kwarg_keys = [k for k in options if k.startswith('ssl_') and options[k]] if use_ssl == False and ssl_kwarg_keys: raise ConfigurationError("ssl has not been enabled but the " "following ssl parameters have been set: " "%s. Please set `ssl=True` or remove." % ', '.join(ssl_kwarg_keys)) if ssl_kwarg_keys and use_ssl is None: # ssl options imply ssl = True use_ssl = True if use_ssl is True: ctx = get_ssl_context( certfile, keyfile, passphrase, ca_certs, cert_reqs, crlfile) return ctx, match_hostname return None, match_hostname def _parse_pool_options(options): """Parse connection pool options.""" max_pool_size = options.get('maxpoolsize', common.MAX_POOL_SIZE) min_pool_size = options.get('minpoolsize', common.MIN_POOL_SIZE) max_idle_time_ms = options.get('maxidletimems', common.MAX_IDLE_TIME_MS) if max_pool_size is not None and min_pool_size > max_pool_size: raise ValueError("minPoolSize must be smaller or equal to maxPoolSize") connect_timeout = options.get('connecttimeoutms', common.CONNECT_TIMEOUT) socket_keepalive = options.get('socketkeepalive', False) socket_timeout = options.get('sockettimeoutms') wait_queue_timeout = options.get('waitqueuetimeoutms') wait_queue_multiple = options.get('waitqueuemultiple') event_listeners = options.get('event_listeners') appname = options.get('appname') ssl_context, ssl_match_hostname = _parse_ssl_options(options) return PoolOptions(max_pool_size, min_pool_size, max_idle_time_ms, connect_timeout, socket_timeout, wait_queue_timeout, wait_queue_multiple, ssl_context, ssl_match_hostname, socket_keepalive, _EventListeners(event_listeners), appname) class ClientOptions(object): """ClientOptions""" def __init__(self, username, password, database, options): self.__options = options self.__codec_options = _parse_codec_options(options) self.__credentials = _parse_credentials( username, password, database, options) self.__local_threshold_ms = options.get( 'localthresholdms', common.LOCAL_THRESHOLD_MS) # self.__server_selection_timeout is in seconds. Must use full name for # common.SERVER_SELECTION_TIMEOUT because it is set directly by tests. self.__server_selection_timeout = options.get( 'serverselectiontimeoutms', common.SERVER_SELECTION_TIMEOUT) self.__pool_options = _parse_pool_options(options) self.__read_preference = _parse_read_preference(options) self.__replica_set_name = options.get('replicaset') self.__write_concern = _parse_write_concern(options) self.__read_concern = _parse_read_concern(options) self.__connect = options.get('connect') self.__heartbeat_frequency = options.get( 'heartbeatfrequencyms', common.HEARTBEAT_FREQUENCY) @property def _options(self): """The original options used to create this ClientOptions.""" return self.__options @property def connect(self): """Whether to begin discovering a MongoDB topology automatically.""" return self.__connect @property def codec_options(self): """A :class:`~bson.codec_options.CodecOptions` instance.""" return self.__codec_options @property def credentials(self): """A :class:`~pymongo.auth.MongoCredentials` instance or None.""" return self.__credentials @property def local_threshold_ms(self): """The local threshold for this instance.""" return self.__local_threshold_ms @property def server_selection_timeout(self): """The server selection timeout for this instance in seconds.""" return self.__server_selection_timeout @property def heartbeat_frequency(self): """The monitoring frequency in seconds.""" return self.__heartbeat_frequency @property def pool_options(self): """A :class:`~pymongo.pool.PoolOptions` instance.""" return self.__pool_options @property def read_preference(self): """A read preference instance.""" return self.__read_preference @property def replica_set_name(self): """Replica set name or None.""" return self.__replica_set_name @property def write_concern(self): """A :class:`~pymongo.write_concern.WriteConcern` instance.""" return self.__write_concern @property def read_concern(self): """A :class:`~pymongo.read_concern.ReadConcern` instance.""" return self.__read_concern
apache-2.0
NeuralProsthesisLab/unlock
unlock/analysis/test/test_data_bank.py
1
3421
# Copyright (c) James Percent and Unlock contributors. # All rights reserved. # Redistribution and use in source and binary forms, with or without modification, # are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of Unlock nor the names of its contributors may be used # to endorse or promote products derived from this software without # specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR # ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON # ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.import socket __author__ = 'jpercent' from .. import switch import threading import time import random import unittest class AttrTest(object): def __init__(self): super(AttrTest, self).__init__() self.a = 0 self.b = 1 self.c = 2 def d(self): self.d_value = True def e(self, e, e1): self.e_value = e self.e1_value = e1 class MiscTests(unittest.TestCase): def testSwitch(self): correct = False incorrect = False val = 'v' for case in switch(val): if case('v'): correct = True break if case('d'): incorrect = True break if case (): incorrect = True break self.assertTrue(correct and not incorrect) correct = False incorrect = False val = 'd' for case in switch(val): if case('v'): incorrect = True break if case('d'): correct = True break if case (): incorrect = True break self.assertTrue(correct and not incorrect) correct = False incorrect = False val = ['efg', 'v'] for case in switch(val): if case('v'): incorrect = True break if case('d'): incorrect = True break if case (['efg', 'v']): correct = True break if case (): incorrect = True break self.assertTrue(correct and not incorrect) def getSuite(): return unittest.makeSuite(MiscTests,'test') if __name__ == "__main__": unittest.main()
bsd-3-clause
NL66278/OCB
addons/account/report/account_balance.py
183
6162
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import time from openerp.osv import osv from openerp.report import report_sxw from common_report_header import common_report_header class account_balance(report_sxw.rml_parse, common_report_header): _name = 'report.account.account.balance' def __init__(self, cr, uid, name, context=None): super(account_balance, self).__init__(cr, uid, name, context=context) self.sum_debit = 0.00 self.sum_credit = 0.00 self.date_lst = [] self.date_lst_string = '' self.result_acc = [] self.localcontext.update({ 'time': time, 'lines': self.lines, 'sum_debit': self._sum_debit, 'sum_credit': self._sum_credit, 'get_fiscalyear':self._get_fiscalyear, 'get_filter': self._get_filter, 'get_start_period': self.get_start_period, 'get_end_period': self.get_end_period , 'get_account': self._get_account, 'get_journal': self._get_journal, 'get_start_date':self._get_start_date, 'get_end_date':self._get_end_date, 'get_target_move': self._get_target_move, }) self.context = context def set_context(self, objects, data, ids, report_type=None): new_ids = ids if (data['model'] == 'ir.ui.menu'): new_ids = 'chart_account_id' in data['form'] and [data['form']['chart_account_id']] or [] objects = self.pool.get('account.account').browse(self.cr, self.uid, new_ids) return super(account_balance, self).set_context(objects, data, new_ids, report_type=report_type) def _get_account(self, data): if data['model']=='account.account': return self.pool.get('account.account').browse(self.cr, self.uid, data['form']['id']).company_id.name return super(account_balance ,self)._get_account(data) def lines(self, form, ids=None, done=None): def _process_child(accounts, disp_acc, parent): account_rec = [acct for acct in accounts if acct['id']==parent][0] currency_obj = self.pool.get('res.currency') acc_id = self.pool.get('account.account').browse(self.cr, self.uid, account_rec['id']) currency = acc_id.currency_id and acc_id.currency_id or acc_id.company_id.currency_id res = { 'id': account_rec['id'], 'type': account_rec['type'], 'code': account_rec['code'], 'name': account_rec['name'], 'level': account_rec['level'], 'debit': account_rec['debit'], 'credit': account_rec['credit'], 'balance': account_rec['balance'], 'parent_id': account_rec['parent_id'], 'bal_type': '', } self.sum_debit += account_rec['debit'] self.sum_credit += account_rec['credit'] if disp_acc == 'movement': if not currency_obj.is_zero(self.cr, self.uid, currency, res['credit']) or not currency_obj.is_zero(self.cr, self.uid, currency, res['debit']) or not currency_obj.is_zero(self.cr, self.uid, currency, res['balance']): self.result_acc.append(res) elif disp_acc == 'not_zero': if not currency_obj.is_zero(self.cr, self.uid, currency, res['balance']): self.result_acc.append(res) else: self.result_acc.append(res) if account_rec['child_id']: for child in account_rec['child_id']: _process_child(accounts,disp_acc,child) obj_account = self.pool.get('account.account') if not ids: ids = self.ids if not ids: return [] if not done: done={} ctx = self.context.copy() ctx['fiscalyear'] = form['fiscalyear_id'] if form['filter'] == 'filter_period': ctx['period_from'] = form['period_from'] ctx['period_to'] = form['period_to'] elif form['filter'] == 'filter_date': ctx['date_from'] = form['date_from'] ctx['date_to'] = form['date_to'] ctx['state'] = form['target_move'] parents = ids child_ids = obj_account._get_children_and_consol(self.cr, self.uid, ids, ctx) if child_ids: ids = child_ids accounts = obj_account.read(self.cr, self.uid, ids, ['type','code','name','debit','credit','balance','parent_id','level','child_id'], ctx) for parent in parents: if parent in done: continue done[parent] = 1 _process_child(accounts,form['display_account'],parent) return self.result_acc class report_trialbalance(osv.AbstractModel): _name = 'report.account.report_trialbalance' _inherit = 'report.abstract_report' _template = 'account.report_trialbalance' _wrapped_report_class = account_balance # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
jskew/gnuradio
gr-zeromq/python/zeromq/__init__.py
47
1139
# # Copyright 2013 Free Software Foundation, Inc. # # This file is part of GNU Radio # # GNU Radio is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3, or (at your option) # any later version. # # GNU Radio is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with GNU Radio; see the file COPYING. If not, write to # the Free Software Foundation, Inc., 51 Franklin Street, # Boston, MA 02110-1301, USA. # ''' Blocks for interfacing with ZeroMQ endpoints. ''' import os try: from zeromq_swig import * except ImportError: dirname, filename = os.path.split(os.path.abspath(__file__)) __path__.append(os.path.join(dirname, "..", "..", "swig")) from zeromq_swig import * from probe_manager import probe_manager from rpc_manager import rpc_manager
gpl-3.0
c0cky/mediathread
mediathread/djangosherd/api.py
1
4549
# pylint: disable-msg=R0904 from tastypie import fields from tastypie.resources import ModelResource from mediathread.api import UserResource, TagResource from mediathread.assetmgr.models import Asset from mediathread.djangosherd.models import SherdNote, DiscussionIndex from mediathread.projects.models import ProjectNote from mediathread.taxonomy.api import TermResource from mediathread.taxonomy.models import TermRelationship class SherdNoteResource(ModelResource): author = fields.ForeignKey(UserResource, 'author', full=True, null=True, blank=True) class Meta: queryset = SherdNote.objects.select_related('asset').order_by("id") excludes = ['tags', 'body', 'added', 'modified'] list_allowed_methods = [] detail_allowed_methods = [] def dehydrate(self, bundle): try: bundle.data['is_global_annotation'] = \ bundle.obj.is_global_annotation() bundle.data['asset_id'] = str(bundle.obj.asset.id) bundle.data['is_null'] = bundle.obj.is_null() bundle.data['annotation'] = bundle.obj.annotation() bundle.data['url'] = bundle.obj.get_absolute_url() modified = bundle.obj.modified.strftime("%m/%d/%y %I:%M %p") \ if bundle.obj.modified else '' bundle.data['metadata'] = { 'tags': TagResource().render_list(bundle.request, bundle.obj.tags_split()), 'body': bundle.obj.body.strip() if bundle.obj.body else '', 'primary_type': bundle.obj.asset.primary.label, 'modified': modified, 'timecode': bundle.obj.range_as_timecode(), 'title': bundle.obj.title } editable = (bundle.request.user.id == getattr(bundle.obj, 'author_id', -1)) citable = bundle.request.GET.get('citable', '') == 'true' # assumed: there is only one ProjectNote per annotation reference = ProjectNote.objects.filter( annotation__id=bundle.obj.id).first() if reference: # notes in a submitted response are not editable editable = editable and not reference.project.is_submitted() if citable: # this is a heavy operation. don't call it unless needed citable = reference.project.can_cite(bundle.request.course, bundle.request.user) bundle.data['editable'] = editable bundle.data['citable'] = citable termResource = TermResource() vocabulary = {} related = TermRelationship.objects.get_for_object( bundle.obj).prefetch_related('term__vocabulary') for rel in related: if rel.term.vocabulary.id not in vocabulary: vocabulary[rel.term.vocabulary.id] = { 'id': rel.term.vocabulary.id, 'display_name': rel.term.vocabulary.display_name, 'terms': [] } vocabulary[rel.term.vocabulary.id]['terms'].append( termResource.render_one(bundle.request, rel.term)) bundle.data['vocabulary'] = vocabulary.values() except Asset.DoesNotExist: bundle.data['asset_id'] = '' bundle.data['metadata'] = {'title': 'Item Deleted'} return bundle def render_one(self, request, selection, asset_key): # assumes user is allowed to see this note bundle = self.build_bundle(obj=selection, request=request) dehydrated = self.full_dehydrate(bundle) bundle.data['asset_key'] = '%s_%s' % (asset_key, bundle.data['asset_id']) return self._meta.serializer.to_simple(dehydrated, None) class DiscussionIndexResource(object): def render_list(self, request, indicies): collaborations = DiscussionIndex.with_permission(request, indicies) ctx = { 'references': [{ 'id': obj.collaboration.object_pk, 'title': obj.collaboration.title, 'type': obj.get_type_label(), 'url': obj.get_absolute_url(), 'modified': obj.modified.strftime("%m/%d/%y %I:%M %p")} for obj in collaborations]} return ctx
gpl-2.0
joariasl/odoo
addons/account_budget/wizard/account_budget_crossovered_report.py
375
2089
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import time from openerp.osv import fields, osv class account_budget_crossvered_report(osv.osv_memory): _name = "account.budget.crossvered.report" _description = "Account Budget crossvered report" _columns = { 'date_from': fields.date('Start of period', required=True), 'date_to': fields.date('End of period', required=True), } _defaults = { 'date_from': lambda *a: time.strftime('%Y-01-01'), 'date_to': lambda *a: time.strftime('%Y-%m-%d'), } def check_report(self, cr, uid, ids, context=None): if context is None: context = {} data = self.read(cr, uid, ids, context=context)[0] datas = { 'ids': context.get('active_ids', []), 'model': 'crossovered.budget', 'form': data } datas['form']['ids'] = datas['ids'] datas['form']['report'] = 'analytic-full' return self.pool['report'].get_action(cr, uid, [], 'account_budget.report_crossoveredbudget', data=datas, context=context) # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
jjmleiro/hue
desktop/core/ext-py/Django-1.6.10/tests/custom_managers_regress/models.py
64
1192
""" Regression tests for custom manager classes. """ from django.db import models from django.utils.encoding import python_2_unicode_compatible class RestrictedManager(models.Manager): """ A manager that filters out non-public instances. """ def get_queryset(self): return super(RestrictedManager, self).get_queryset().filter(is_public=True) @python_2_unicode_compatible class RelatedModel(models.Model): name = models.CharField(max_length=50) def __str__(self): return self.name @python_2_unicode_compatible class RestrictedModel(models.Model): name = models.CharField(max_length=50) is_public = models.BooleanField(default=False) related = models.ForeignKey(RelatedModel) objects = RestrictedManager() plain_manager = models.Manager() def __str__(self): return self.name @python_2_unicode_compatible class OneToOneRestrictedModel(models.Model): name = models.CharField(max_length=50) is_public = models.BooleanField(default=False) related = models.OneToOneField(RelatedModel) objects = RestrictedManager() plain_manager = models.Manager() def __str__(self): return self.name
apache-2.0
x111ong/django
tests/auth_tests/test_views.py
183
44561
# -*- coding: utf-8 -*- from __future__ import unicode_literals import datetime import itertools import os import re from importlib import import_module from django.apps import apps from django.conf import settings from django.contrib.admin.models import LogEntry from django.contrib.auth import REDIRECT_FIELD_NAME, SESSION_KEY from django.contrib.auth.forms import ( AuthenticationForm, PasswordChangeForm, SetPasswordForm, ) from django.contrib.auth.models import User from django.contrib.auth.tests.custom_user import CustomUser from django.contrib.auth.views import login as login_view, redirect_to_login from django.contrib.sessions.middleware import SessionMiddleware from django.contrib.sites.requests import RequestSite from django.core import mail from django.core.urlresolvers import NoReverseMatch, reverse, reverse_lazy from django.db import connection from django.http import HttpRequest, QueryDict from django.middleware.csrf import CsrfViewMiddleware, get_token from django.test import ( TestCase, ignore_warnings, modify_settings, override_settings, ) from django.test.utils import patch_logger from django.utils.deprecation import RemovedInDjango110Warning from django.utils.encoding import force_text from django.utils.http import urlquote from django.utils.six.moves.urllib.parse import ParseResult, urlparse from django.utils.translation import LANGUAGE_SESSION_KEY from .models import UUIDUser from .settings import AUTH_TEMPLATES @override_settings( LANGUAGES=[ ('en', 'English'), ], LANGUAGE_CODE='en', TEMPLATES=AUTH_TEMPLATES, USE_TZ=False, PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'], ROOT_URLCONF='auth_tests.urls', ) class AuthViewsTestCase(TestCase): """ Helper base class for all the follow test cases. """ @classmethod def setUpTestData(cls): cls.u1 = User.objects.create( password='sha1$6efc0$f93efe9fd7542f25a7be94871ea45aa95de57161', last_login=datetime.datetime(2006, 12, 17, 7, 3, 31), is_superuser=False, username='testclient', first_name='Test', last_name='Client', email='[email protected]', is_staff=False, is_active=True, date_joined=datetime.datetime(2006, 12, 17, 7, 3, 31) ) cls.u2 = User.objects.create( password='sha1$6efc0$f93efe9fd7542f25a7be94871ea45aa95de57161', last_login=datetime.datetime(2006, 12, 17, 7, 3, 31), is_superuser=False, username='inactive', first_name='Inactive', last_name='User', email='[email protected]', is_staff=False, is_active=False, date_joined=datetime.datetime(2006, 12, 17, 7, 3, 31) ) cls.u3 = User.objects.create( password='sha1$6efc0$f93efe9fd7542f25a7be94871ea45aa95de57161', last_login=datetime.datetime(2006, 12, 17, 7, 3, 31), is_superuser=False, username='staff', first_name='Staff', last_name='Member', email='[email protected]', is_staff=True, is_active=True, date_joined=datetime.datetime(2006, 12, 17, 7, 3, 31) ) cls.u4 = User.objects.create( password='', last_login=datetime.datetime(2006, 12, 17, 7, 3, 31), is_superuser=False, username='empty_password', first_name='Empty', last_name='Password', email='[email protected]', is_staff=False, is_active=True, date_joined=datetime.datetime(2006, 12, 17, 7, 3, 31) ) cls.u5 = User.objects.create( password='$', last_login=datetime.datetime(2006, 12, 17, 7, 3, 31), is_superuser=False, username='unmanageable_password', first_name='Unmanageable', last_name='Password', email='[email protected]', is_staff=False, is_active=True, date_joined=datetime.datetime(2006, 12, 17, 7, 3, 31) ) cls.u6 = User.objects.create( password='foo$bar', last_login=datetime.datetime(2006, 12, 17, 7, 3, 31), is_superuser=False, username='unknown_password', first_name='Unknown', last_name='Password', email='[email protected]', is_staff=False, is_active=True, date_joined=datetime.datetime(2006, 12, 17, 7, 3, 31) ) def login(self, username='testclient', password='password'): response = self.client.post('/login/', { 'username': username, 'password': password, }) self.assertIn(SESSION_KEY, self.client.session) return response def logout(self): response = self.client.get('/admin/logout/') self.assertEqual(response.status_code, 200) self.assertNotIn(SESSION_KEY, self.client.session) def assertFormError(self, response, error): """Assert that error is found in response.context['form'] errors""" form_errors = list(itertools.chain(*response.context['form'].errors.values())) self.assertIn(force_text(error), form_errors) def assertURLEqual(self, url, expected, parse_qs=False): """ Given two URLs, make sure all their components (the ones given by urlparse) are equal, only comparing components that are present in both URLs. If `parse_qs` is True, then the querystrings are parsed with QueryDict. This is useful if you don't want the order of parameters to matter. Otherwise, the query strings are compared as-is. """ fields = ParseResult._fields for attr, x, y in zip(fields, urlparse(url), urlparse(expected)): if parse_qs and attr == 'query': x, y = QueryDict(x), QueryDict(y) if x and y and x != y: self.fail("%r != %r (%s doesn't match)" % (url, expected, attr)) @override_settings(ROOT_URLCONF='django.contrib.auth.urls') class AuthViewNamedURLTests(AuthViewsTestCase): def test_named_urls(self): "Named URLs should be reversible" expected_named_urls = [ ('login', [], {}), ('logout', [], {}), ('password_change', [], {}), ('password_change_done', [], {}), ('password_reset', [], {}), ('password_reset_done', [], {}), ('password_reset_confirm', [], { 'uidb64': 'aaaaaaa', 'token': '1111-aaaaa', }), ('password_reset_complete', [], {}), ] for name, args, kwargs in expected_named_urls: try: reverse(name, args=args, kwargs=kwargs) except NoReverseMatch: self.fail("Reversal of url named '%s' failed with NoReverseMatch" % name) class PasswordResetTest(AuthViewsTestCase): def test_email_not_found(self): """If the provided email is not registered, don't raise any error but also don't send any email.""" response = self.client.get('/password_reset/') self.assertEqual(response.status_code, 200) response = self.client.post('/password_reset/', {'email': '[email protected]'}) self.assertEqual(response.status_code, 302) self.assertEqual(len(mail.outbox), 0) def test_email_found(self): "Email is sent if a valid email address is provided for password reset" response = self.client.post('/password_reset/', {'email': '[email protected]'}) self.assertEqual(response.status_code, 302) self.assertEqual(len(mail.outbox), 1) self.assertIn("http://", mail.outbox[0].body) self.assertEqual(settings.DEFAULT_FROM_EMAIL, mail.outbox[0].from_email) # optional multipart text/html email has been added. Make sure original, # default functionality is 100% the same self.assertFalse(mail.outbox[0].message().is_multipart()) def test_html_mail_template(self): """ A multipart email with text/plain and text/html is sent if the html_email_template parameter is passed to the view """ response = self.client.post('/password_reset/html_email_template/', {'email': '[email protected]'}) self.assertEqual(response.status_code, 302) self.assertEqual(len(mail.outbox), 1) message = mail.outbox[0].message() self.assertEqual(len(message.get_payload()), 2) self.assertTrue(message.is_multipart()) self.assertEqual(message.get_payload(0).get_content_type(), 'text/plain') self.assertEqual(message.get_payload(1).get_content_type(), 'text/html') self.assertNotIn('<html>', message.get_payload(0).get_payload()) self.assertIn('<html>', message.get_payload(1).get_payload()) def test_email_found_custom_from(self): "Email is sent if a valid email address is provided for password reset when a custom from_email is provided." response = self.client.post('/password_reset_from_email/', {'email': '[email protected]'}) self.assertEqual(response.status_code, 302) self.assertEqual(len(mail.outbox), 1) self.assertEqual("[email protected]", mail.outbox[0].from_email) @ignore_warnings(category=RemovedInDjango110Warning) @override_settings(ALLOWED_HOSTS=['adminsite.com']) def test_admin_reset(self): "If the reset view is marked as being for admin, the HTTP_HOST header is used for a domain override." response = self.client.post('/admin_password_reset/', {'email': '[email protected]'}, HTTP_HOST='adminsite.com' ) self.assertEqual(response.status_code, 302) self.assertEqual(len(mail.outbox), 1) self.assertIn("http://adminsite.com", mail.outbox[0].body) self.assertEqual(settings.DEFAULT_FROM_EMAIL, mail.outbox[0].from_email) # Skip any 500 handler action (like sending more mail...) @override_settings(DEBUG_PROPAGATE_EXCEPTIONS=True) def test_poisoned_http_host(self): "Poisoned HTTP_HOST headers can't be used for reset emails" # This attack is based on the way browsers handle URLs. The colon # should be used to separate the port, but if the URL contains an @, # the colon is interpreted as part of a username for login purposes, # making 'evil.com' the request domain. Since HTTP_HOST is used to # produce a meaningful reset URL, we need to be certain that the # HTTP_HOST header isn't poisoned. This is done as a check when get_host() # is invoked, but we check here as a practical consequence. with patch_logger('django.security.DisallowedHost', 'error') as logger_calls: response = self.client.post( '/password_reset/', {'email': '[email protected]'}, HTTP_HOST='www.example:[email protected]' ) self.assertEqual(response.status_code, 400) self.assertEqual(len(mail.outbox), 0) self.assertEqual(len(logger_calls), 1) # Skip any 500 handler action (like sending more mail...) @override_settings(DEBUG_PROPAGATE_EXCEPTIONS=True) def test_poisoned_http_host_admin_site(self): "Poisoned HTTP_HOST headers can't be used for reset emails on admin views" with patch_logger('django.security.DisallowedHost', 'error') as logger_calls: response = self.client.post( '/admin_password_reset/', {'email': '[email protected]'}, HTTP_HOST='www.example:[email protected]' ) self.assertEqual(response.status_code, 400) self.assertEqual(len(mail.outbox), 0) self.assertEqual(len(logger_calls), 1) def _test_confirm_start(self): # Start by creating the email self.client.post('/password_reset/', {'email': '[email protected]'}) self.assertEqual(len(mail.outbox), 1) return self._read_signup_email(mail.outbox[0]) def _read_signup_email(self, email): urlmatch = re.search(r"https?://[^/]*(/.*reset/\S*)", email.body) self.assertIsNotNone(urlmatch, "No URL found in sent email") return urlmatch.group(), urlmatch.groups()[0] def test_confirm_valid(self): url, path = self._test_confirm_start() response = self.client.get(path) # redirect to a 'complete' page: self.assertContains(response, "Please enter your new password") def test_confirm_invalid(self): url, path = self._test_confirm_start() # Let's munge the token in the path, but keep the same length, # in case the URLconf will reject a different length. path = path[:-5] + ("0" * 4) + path[-1] response = self.client.get(path) self.assertContains(response, "The password reset link was invalid") def test_confirm_invalid_user(self): # Ensure that we get a 200 response for a non-existent user, not a 404 response = self.client.get('/reset/123456/1-1/') self.assertContains(response, "The password reset link was invalid") def test_confirm_overflow_user(self): # Ensure that we get a 200 response for a base36 user id that overflows int response = self.client.get('/reset/zzzzzzzzzzzzz/1-1/') self.assertContains(response, "The password reset link was invalid") def test_confirm_invalid_post(self): # Same as test_confirm_invalid, but trying # to do a POST instead. url, path = self._test_confirm_start() path = path[:-5] + ("0" * 4) + path[-1] self.client.post(path, { 'new_password1': 'anewpassword', 'new_password2': ' anewpassword', }) # Check the password has not been changed u = User.objects.get(email='[email protected]') self.assertTrue(not u.check_password("anewpassword")) def test_confirm_complete(self): url, path = self._test_confirm_start() response = self.client.post(path, {'new_password1': 'anewpassword', 'new_password2': 'anewpassword'}) # Check the password has been changed u = User.objects.get(email='[email protected]') self.assertTrue(u.check_password("anewpassword")) # Check we can't use the link again response = self.client.get(path) self.assertContains(response, "The password reset link was invalid") def test_confirm_different_passwords(self): url, path = self._test_confirm_start() response = self.client.post(path, {'new_password1': 'anewpassword', 'new_password2': 'x'}) self.assertFormError(response, SetPasswordForm.error_messages['password_mismatch']) def test_reset_redirect_default(self): response = self.client.post('/password_reset/', {'email': '[email protected]'}) self.assertEqual(response.status_code, 302) self.assertURLEqual(response.url, '/password_reset/done/') def test_reset_custom_redirect(self): response = self.client.post('/password_reset/custom_redirect/', {'email': '[email protected]'}) self.assertEqual(response.status_code, 302) self.assertURLEqual(response.url, '/custom/') def test_reset_custom_redirect_named(self): response = self.client.post('/password_reset/custom_redirect/named/', {'email': '[email protected]'}) self.assertEqual(response.status_code, 302) self.assertURLEqual(response.url, '/password_reset/') def test_confirm_redirect_default(self): url, path = self._test_confirm_start() response = self.client.post(path, {'new_password1': 'anewpassword', 'new_password2': 'anewpassword'}) self.assertEqual(response.status_code, 302) self.assertURLEqual(response.url, '/reset/done/') def test_confirm_redirect_custom(self): url, path = self._test_confirm_start() path = path.replace('/reset/', '/reset/custom/') response = self.client.post(path, {'new_password1': 'anewpassword', 'new_password2': 'anewpassword'}) self.assertEqual(response.status_code, 302) self.assertURLEqual(response.url, '/custom/') def test_confirm_redirect_custom_named(self): url, path = self._test_confirm_start() path = path.replace('/reset/', '/reset/custom/named/') response = self.client.post(path, {'new_password1': 'anewpassword', 'new_password2': 'anewpassword'}) self.assertEqual(response.status_code, 302) self.assertURLEqual(response.url, '/password_reset/') def test_confirm_display_user_from_form(self): url, path = self._test_confirm_start() response = self.client.get(path) # #16919 -- The ``password_reset_confirm`` view should pass the user # object to the ``SetPasswordForm``, even on GET requests. # For this test, we render ``{{ form.user }}`` in the template # ``registration/password_reset_confirm.html`` so that we can test this. username = User.objects.get(email='[email protected]').username self.assertContains(response, "Hello, %s." % username) # However, the view should NOT pass any user object on a form if the # password reset link was invalid. response = self.client.get('/reset/zzzzzzzzzzzzz/1-1/') self.assertContains(response, "Hello, .") @override_settings(AUTH_USER_MODEL='auth.CustomUser') class CustomUserPasswordResetTest(AuthViewsTestCase): user_email = '[email protected]' @classmethod def setUpTestData(cls): cls.u1 = CustomUser.custom_objects.create( password='sha1$6efc0$f93efe9fd7542f25a7be94871ea45aa95de57161', last_login=datetime.datetime(2006, 12, 17, 7, 3, 31), email='[email protected]', is_active=True, is_admin=False, date_of_birth=datetime.date(1976, 11, 8) ) def _test_confirm_start(self): # Start by creating the email response = self.client.post('/password_reset/', {'email': self.user_email}) self.assertEqual(response.status_code, 302) self.assertEqual(len(mail.outbox), 1) return self._read_signup_email(mail.outbox[0]) def _read_signup_email(self, email): urlmatch = re.search(r"https?://[^/]*(/.*reset/\S*)", email.body) self.assertIsNotNone(urlmatch, "No URL found in sent email") return urlmatch.group(), urlmatch.groups()[0] def test_confirm_valid_custom_user(self): url, path = self._test_confirm_start() response = self.client.get(path) # redirect to a 'complete' page: self.assertContains(response, "Please enter your new password") # then submit a new password response = self.client.post(path, { 'new_password1': 'anewpassword', 'new_password2': 'anewpassword', }) self.assertRedirects(response, '/reset/done/') @override_settings(AUTH_USER_MODEL='auth.UUIDUser') class UUIDUserPasswordResetTest(CustomUserPasswordResetTest): def _test_confirm_start(self): # instead of fixture UUIDUser.objects.create_user( email=self.user_email, username='foo', password='foo', ) return super(UUIDUserPasswordResetTest, self)._test_confirm_start() class ChangePasswordTest(AuthViewsTestCase): def fail_login(self, password='password'): response = self.client.post('/login/', { 'username': 'testclient', 'password': password, }) self.assertFormError(response, AuthenticationForm.error_messages['invalid_login'] % { 'username': User._meta.get_field('username').verbose_name }) def logout(self): self.client.get('/logout/') def test_password_change_fails_with_invalid_old_password(self): self.login() response = self.client.post('/password_change/', { 'old_password': 'donuts', 'new_password1': 'password1', 'new_password2': 'password1', }) self.assertFormError(response, PasswordChangeForm.error_messages['password_incorrect']) def test_password_change_fails_with_mismatched_passwords(self): self.login() response = self.client.post('/password_change/', { 'old_password': 'password', 'new_password1': 'password1', 'new_password2': 'donuts', }) self.assertFormError(response, SetPasswordForm.error_messages['password_mismatch']) def test_password_change_succeeds(self): self.login() self.client.post('/password_change/', { 'old_password': 'password', 'new_password1': 'password1', 'new_password2': 'password1', }) self.fail_login() self.login(password='password1') def test_password_change_done_succeeds(self): self.login() response = self.client.post('/password_change/', { 'old_password': 'password', 'new_password1': 'password1', 'new_password2': 'password1', }) self.assertEqual(response.status_code, 302) self.assertURLEqual(response.url, '/password_change/done/') @override_settings(LOGIN_URL='/login/') def test_password_change_done_fails(self): response = self.client.get('/password_change/done/') self.assertEqual(response.status_code, 302) self.assertURLEqual(response.url, '/login/?next=/password_change/done/') def test_password_change_redirect_default(self): self.login() response = self.client.post('/password_change/', { 'old_password': 'password', 'new_password1': 'password1', 'new_password2': 'password1', }) self.assertEqual(response.status_code, 302) self.assertURLEqual(response.url, '/password_change/done/') def test_password_change_redirect_custom(self): self.login() response = self.client.post('/password_change/custom/', { 'old_password': 'password', 'new_password1': 'password1', 'new_password2': 'password1', }) self.assertEqual(response.status_code, 302) self.assertURLEqual(response.url, '/custom/') def test_password_change_redirect_custom_named(self): self.login() response = self.client.post('/password_change/custom/named/', { 'old_password': 'password', 'new_password1': 'password1', 'new_password2': 'password1', }) self.assertEqual(response.status_code, 302) self.assertURLEqual(response.url, '/password_reset/') @modify_settings(MIDDLEWARE_CLASSES={ 'append': 'django.contrib.auth.middleware.SessionAuthenticationMiddleware', }) class SessionAuthenticationTests(AuthViewsTestCase): def test_user_password_change_updates_session(self): """ #21649 - Ensure contrib.auth.views.password_change updates the user's session auth hash after a password change so the session isn't logged out. """ self.login() response = self.client.post('/password_change/', { 'old_password': 'password', 'new_password1': 'password1', 'new_password2': 'password1', }) # if the hash isn't updated, retrieving the redirection page will fail. self.assertRedirects(response, '/password_change/done/') class LoginTest(AuthViewsTestCase): def test_current_site_in_context_after_login(self): response = self.client.get(reverse('login')) self.assertEqual(response.status_code, 200) if apps.is_installed('django.contrib.sites'): Site = apps.get_model('sites.Site') site = Site.objects.get_current() self.assertEqual(response.context['site'], site) self.assertEqual(response.context['site_name'], site.name) else: self.assertIsInstance(response.context['site'], RequestSite) self.assertIsInstance(response.context['form'], AuthenticationForm) def test_security_check(self, password='password'): login_url = reverse('login') # Those URLs should not pass the security check for bad_url in ('http://example.com', 'http:///example.com', 'https://example.com', 'ftp://exampel.com', '///example.com', '//example.com', 'javascript:alert("XSS")'): nasty_url = '%(url)s?%(next)s=%(bad_url)s' % { 'url': login_url, 'next': REDIRECT_FIELD_NAME, 'bad_url': urlquote(bad_url), } response = self.client.post(nasty_url, { 'username': 'testclient', 'password': password, }) self.assertEqual(response.status_code, 302) self.assertNotIn(bad_url, response.url, "%s should be blocked" % bad_url) # These URLs *should* still pass the security check for good_url in ('/view/?param=http://example.com', '/view/?param=https://example.com', '/view?param=ftp://exampel.com', 'view/?param=//example.com', 'https://testserver/', 'HTTPS://testserver/', '//testserver/', '/url%20with%20spaces/'): # see ticket #12534 safe_url = '%(url)s?%(next)s=%(good_url)s' % { 'url': login_url, 'next': REDIRECT_FIELD_NAME, 'good_url': urlquote(good_url), } response = self.client.post(safe_url, { 'username': 'testclient', 'password': password, }) self.assertEqual(response.status_code, 302) self.assertIn(good_url, response.url, "%s should be allowed" % good_url) def test_login_form_contains_request(self): # 15198 self.client.post('/custom_requestauth_login/', { 'username': 'testclient', 'password': 'password', }, follow=True) # the custom authentication form used by this login asserts # that a request is passed to the form successfully. def test_login_csrf_rotate(self, password='password'): """ Makes sure that a login rotates the currently-used CSRF token. """ # Do a GET to establish a CSRF token # TestClient isn't used here as we're testing middleware, essentially. req = HttpRequest() CsrfViewMiddleware().process_view(req, login_view, (), {}) # get_token() triggers CSRF token inclusion in the response get_token(req) resp = login_view(req) resp2 = CsrfViewMiddleware().process_response(req, resp) csrf_cookie = resp2.cookies.get(settings.CSRF_COOKIE_NAME, None) token1 = csrf_cookie.coded_value # Prepare the POST request req = HttpRequest() req.COOKIES[settings.CSRF_COOKIE_NAME] = token1 req.method = "POST" req.POST = {'username': 'testclient', 'password': password, 'csrfmiddlewaretoken': token1} # Use POST request to log in SessionMiddleware().process_request(req) CsrfViewMiddleware().process_view(req, login_view, (), {}) req.META["SERVER_NAME"] = "testserver" # Required to have redirect work in login view req.META["SERVER_PORT"] = 80 resp = login_view(req) resp2 = CsrfViewMiddleware().process_response(req, resp) csrf_cookie = resp2.cookies.get(settings.CSRF_COOKIE_NAME, None) token2 = csrf_cookie.coded_value # Check the CSRF token switched self.assertNotEqual(token1, token2) def test_session_key_flushed_on_login(self): """ To avoid reusing another user's session, ensure a new, empty session is created if the existing session corresponds to a different authenticated user. """ self.login() original_session_key = self.client.session.session_key self.login(username='staff') self.assertNotEqual(original_session_key, self.client.session.session_key) def test_session_key_flushed_on_login_after_password_change(self): """ As above, but same user logging in after a password change. """ self.login() original_session_key = self.client.session.session_key # If no password change, session key should not be flushed. self.login() self.assertEqual(original_session_key, self.client.session.session_key) user = User.objects.get(username='testclient') user.set_password('foobar') user.save() self.login(password='foobar') self.assertNotEqual(original_session_key, self.client.session.session_key) def test_login_session_without_hash_session_key(self): """ Session without django.contrib.auth.HASH_SESSION_KEY should login without an exception. """ user = User.objects.get(username='testclient') engine = import_module(settings.SESSION_ENGINE) session = engine.SessionStore() session[SESSION_KEY] = user.id session.save() original_session_key = session.session_key self.client.cookies[settings.SESSION_COOKIE_NAME] = original_session_key self.login() self.assertNotEqual(original_session_key, self.client.session.session_key) class LoginURLSettings(AuthViewsTestCase): """Tests for settings.LOGIN_URL.""" def assertLoginURLEquals(self, url, parse_qs=False): response = self.client.get('/login_required/') self.assertEqual(response.status_code, 302) self.assertURLEqual(response.url, url, parse_qs=parse_qs) @override_settings(LOGIN_URL='/login/') def test_standard_login_url(self): self.assertLoginURLEquals('/login/?next=/login_required/') @override_settings(LOGIN_URL='login') def test_named_login_url(self): self.assertLoginURLEquals('/login/?next=/login_required/') @override_settings(LOGIN_URL='http://remote.example.com/login') def test_remote_login_url(self): quoted_next = urlquote('http://testserver/login_required/') expected = 'http://remote.example.com/login?next=%s' % quoted_next self.assertLoginURLEquals(expected) @override_settings(LOGIN_URL='https:///login/') def test_https_login_url(self): quoted_next = urlquote('http://testserver/login_required/') expected = 'https:///login/?next=%s' % quoted_next self.assertLoginURLEquals(expected) @override_settings(LOGIN_URL='/login/?pretty=1') def test_login_url_with_querystring(self): self.assertLoginURLEquals('/login/?pretty=1&next=/login_required/', parse_qs=True) @override_settings(LOGIN_URL='http://remote.example.com/login/?next=/default/') def test_remote_login_url_with_next_querystring(self): quoted_next = urlquote('http://testserver/login_required/') expected = 'http://remote.example.com/login/?next=%s' % quoted_next self.assertLoginURLEquals(expected) @override_settings(LOGIN_URL=reverse_lazy('login')) def test_lazy_login_url(self): self.assertLoginURLEquals('/login/?next=/login_required/') class LoginRedirectUrlTest(AuthViewsTestCase): """Tests for settings.LOGIN_REDIRECT_URL.""" def assertLoginRedirectURLEqual(self, url): response = self.login() self.assertEqual(response.status_code, 302) self.assertURLEqual(response.url, url) def test_default(self): self.assertLoginRedirectURLEqual('/accounts/profile/') @override_settings(LOGIN_REDIRECT_URL='/custom/') def test_custom(self): self.assertLoginRedirectURLEqual('/custom/') @override_settings(LOGIN_REDIRECT_URL='password_reset') def test_named(self): self.assertLoginRedirectURLEqual('/password_reset/') @override_settings(LOGIN_REDIRECT_URL='http://remote.example.com/welcome/') def test_remote(self): self.assertLoginRedirectURLEqual('http://remote.example.com/welcome/') class RedirectToLoginTests(AuthViewsTestCase): """Tests for the redirect_to_login view""" @override_settings(LOGIN_URL=reverse_lazy('login')) def test_redirect_to_login_with_lazy(self): login_redirect_response = redirect_to_login(next='/else/where/') expected = '/login/?next=/else/where/' self.assertEqual(expected, login_redirect_response.url) @override_settings(LOGIN_URL=reverse_lazy('login')) def test_redirect_to_login_with_lazy_and_unicode(self): login_redirect_response = redirect_to_login(next='/else/where/झ/') expected = '/login/?next=/else/where/%E0%A4%9D/' self.assertEqual(expected, login_redirect_response.url) class LogoutTest(AuthViewsTestCase): def confirm_logged_out(self): self.assertNotIn(SESSION_KEY, self.client.session) def test_logout_default(self): "Logout without next_page option renders the default template" self.login() response = self.client.get('/logout/') self.assertContains(response, 'Logged out') self.confirm_logged_out() def test_14377(self): # Bug 14377 self.login() response = self.client.get('/logout/') self.assertIn('site', response.context) def test_logout_with_overridden_redirect_url(self): # Bug 11223 self.login() response = self.client.get('/logout/next_page/') self.assertEqual(response.status_code, 302) self.assertURLEqual(response.url, '/somewhere/') response = self.client.get('/logout/next_page/?next=/login/') self.assertEqual(response.status_code, 302) self.assertURLEqual(response.url, '/login/') self.confirm_logged_out() def test_logout_with_next_page_specified(self): "Logout with next_page option given redirects to specified resource" self.login() response = self.client.get('/logout/next_page/') self.assertEqual(response.status_code, 302) self.assertURLEqual(response.url, '/somewhere/') self.confirm_logged_out() def test_logout_with_redirect_argument(self): "Logout with query string redirects to specified resource" self.login() response = self.client.get('/logout/?next=/login/') self.assertEqual(response.status_code, 302) self.assertURLEqual(response.url, '/login/') self.confirm_logged_out() def test_logout_with_custom_redirect_argument(self): "Logout with custom query string redirects to specified resource" self.login() response = self.client.get('/logout/custom_query/?follow=/somewhere/') self.assertEqual(response.status_code, 302) self.assertURLEqual(response.url, '/somewhere/') self.confirm_logged_out() def test_logout_with_named_redirect(self): "Logout resolves names or URLs passed as next_page." self.login() response = self.client.get('/logout/next_page/named/') self.assertEqual(response.status_code, 302) self.assertURLEqual(response.url, '/password_reset/') self.confirm_logged_out() def test_security_check(self, password='password'): logout_url = reverse('logout') # Those URLs should not pass the security check for bad_url in ('http://example.com', 'http:///example.com', 'https://example.com', 'ftp://exampel.com', '///example.com', '//example.com', 'javascript:alert("XSS")'): nasty_url = '%(url)s?%(next)s=%(bad_url)s' % { 'url': logout_url, 'next': REDIRECT_FIELD_NAME, 'bad_url': urlquote(bad_url), } self.login() response = self.client.get(nasty_url) self.assertEqual(response.status_code, 302) self.assertNotIn(bad_url, response.url, "%s should be blocked" % bad_url) self.confirm_logged_out() # These URLs *should* still pass the security check for good_url in ('/view/?param=http://example.com', '/view/?param=https://example.com', '/view?param=ftp://exampel.com', 'view/?param=//example.com', 'https://testserver/', 'HTTPS://testserver/', '//testserver/', '/url%20with%20spaces/'): # see ticket #12534 safe_url = '%(url)s?%(next)s=%(good_url)s' % { 'url': logout_url, 'next': REDIRECT_FIELD_NAME, 'good_url': urlquote(good_url), } self.login() response = self.client.get(safe_url) self.assertEqual(response.status_code, 302) self.assertIn(good_url, response.url, "%s should be allowed" % good_url) self.confirm_logged_out() def test_logout_preserve_language(self): """Check that language stored in session is preserved after logout""" # Create a new session with language engine = import_module(settings.SESSION_ENGINE) session = engine.SessionStore() session[LANGUAGE_SESSION_KEY] = 'pl' session.save() self.client.cookies[settings.SESSION_COOKIE_NAME] = session.session_key self.client.get('/logout/') self.assertEqual(self.client.session[LANGUAGE_SESSION_KEY], 'pl') # Redirect in test_user_change_password will fail if session auth hash # isn't updated after password change (#21649) @modify_settings(MIDDLEWARE_CLASSES={ 'append': 'django.contrib.auth.middleware.SessionAuthenticationMiddleware', }) @override_settings( PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'], ROOT_URLCONF='auth_tests.urls_admin', ) class ChangelistTests(AuthViewsTestCase): def setUp(self): # Make me a superuser before logging in. User.objects.filter(username='testclient').update(is_staff=True, is_superuser=True) self.login() self.admin = User.objects.get(pk=self.u1.pk) def get_user_data(self, user): return { 'username': user.username, 'password': user.password, 'email': user.email, 'is_active': user.is_active, 'is_staff': user.is_staff, 'is_superuser': user.is_superuser, 'last_login_0': user.last_login.strftime('%Y-%m-%d'), 'last_login_1': user.last_login.strftime('%H:%M:%S'), 'initial-last_login_0': user.last_login.strftime('%Y-%m-%d'), 'initial-last_login_1': user.last_login.strftime('%H:%M:%S'), 'date_joined_0': user.date_joined.strftime('%Y-%m-%d'), 'date_joined_1': user.date_joined.strftime('%H:%M:%S'), 'initial-date_joined_0': user.date_joined.strftime('%Y-%m-%d'), 'initial-date_joined_1': user.date_joined.strftime('%H:%M:%S'), 'first_name': user.first_name, 'last_name': user.last_name, } # #20078 - users shouldn't be allowed to guess password hashes via # repeated password__startswith queries. def test_changelist_disallows_password_lookups(self): # A lookup that tries to filter on password isn't OK with patch_logger('django.security.DisallowedModelAdminLookup', 'error') as logger_calls: response = self.client.get(reverse('auth_test_admin:auth_user_changelist') + '?password__startswith=sha1$') self.assertEqual(response.status_code, 400) self.assertEqual(len(logger_calls), 1) def test_user_change_email(self): data = self.get_user_data(self.admin) data['email'] = 'new_' + data['email'] response = self.client.post( reverse('auth_test_admin:auth_user_change', args=(self.admin.pk,)), data ) self.assertRedirects(response, reverse('auth_test_admin:auth_user_changelist')) row = LogEntry.objects.latest('id') self.assertEqual(row.change_message, 'Changed email.') def test_user_not_change(self): response = self.client.post( reverse('auth_test_admin:auth_user_change', args=(self.admin.pk,)), self.get_user_data(self.admin) ) self.assertRedirects(response, reverse('auth_test_admin:auth_user_changelist')) row = LogEntry.objects.latest('id') self.assertEqual(row.change_message, 'No fields changed.') def test_user_change_password(self): user_change_url = reverse('auth_test_admin:auth_user_change', args=(self.admin.pk,)) password_change_url = reverse('auth_test_admin:auth_user_password_change', args=(self.admin.pk,)) response = self.client.get(user_change_url) # Test the link inside password field help_text. rel_link = re.search( r'you can change the password using <a href="([^"]*)">this form</a>', force_text(response.content) ).groups()[0] self.assertEqual( os.path.normpath(user_change_url + rel_link), os.path.normpath(password_change_url) ) response = self.client.post( password_change_url, { 'password1': 'password1', 'password2': 'password1', } ) self.assertRedirects(response, user_change_url) row = LogEntry.objects.latest('id') self.assertEqual(row.change_message, 'Changed password.') self.logout() self.login(password='password1') def test_user_change_different_user_password(self): u = User.objects.get(email='[email protected]') response = self.client.post( reverse('auth_test_admin:auth_user_password_change', args=(u.pk,)), { 'password1': 'password1', 'password2': 'password1', } ) self.assertRedirects(response, reverse('auth_test_admin:auth_user_change', args=(u.pk,))) row = LogEntry.objects.latest('id') self.assertEqual(row.user_id, self.admin.pk) self.assertEqual(row.object_id, str(u.pk)) self.assertEqual(row.change_message, 'Changed password.') def test_password_change_bad_url(self): response = self.client.get(reverse('auth_test_admin:auth_user_password_change', args=('foobar',))) self.assertEqual(response.status_code, 404) @override_settings( AUTH_USER_MODEL='auth.UUIDUser', ROOT_URLCONF='auth_tests.urls_custom_user_admin', ) class UUIDUserTests(TestCase): def test_admin_password_change(self): u = UUIDUser.objects.create_superuser(username='uuid', email='[email protected]', password='test') self.assertTrue(self.client.login(username='uuid', password='test')) user_change_url = reverse('custom_user_admin:auth_uuiduser_change', args=(u.pk,)) response = self.client.get(user_change_url) self.assertEqual(response.status_code, 200) password_change_url = reverse('custom_user_admin:auth_user_password_change', args=(u.pk,)) response = self.client.get(password_change_url) self.assertEqual(response.status_code, 200) # A LogEntry is created with pk=1 which breaks a FK constraint on MySQL with connection.constraint_checks_disabled(): response = self.client.post(password_change_url, { 'password1': 'password1', 'password2': 'password1', }) self.assertRedirects(response, user_change_url) row = LogEntry.objects.latest('id') self.assertEqual(row.user_id, 1) # harcoded in CustomUserAdmin.log_change() self.assertEqual(row.object_id, str(u.pk)) self.assertEqual(row.change_message, 'Changed password.')
bsd-3-clause
Ingenico-ePayments/connect-sdk-python2
ingenico/connect/sdk/domain/definitions/fraugster_results.py
2
1783
# -*- coding: utf-8 -*- # # This class was auto-generated from the API references found at # https://epayments-api.developer-ingenico.com/s2sapi/v1/ # from ingenico.connect.sdk.data_object import DataObject class FraugsterResults(DataObject): __fraud_investigation_points = None __fraud_score = None @property def fraud_investigation_points(self): """ | Result of the Fraugster check | Contains the investigation points used during the evaluation Type: str """ return self.__fraud_investigation_points @fraud_investigation_points.setter def fraud_investigation_points(self, value): self.__fraud_investigation_points = value @property def fraud_score(self): """ | Result of the Fraugster check | Contains the overall Fraud score which is an integer between 0 and 99 Type: int """ return self.__fraud_score @fraud_score.setter def fraud_score(self, value): self.__fraud_score = value def to_dictionary(self): dictionary = super(FraugsterResults, self).to_dictionary() if self.fraud_investigation_points is not None: dictionary['fraudInvestigationPoints'] = self.fraud_investigation_points if self.fraud_score is not None: dictionary['fraudScore'] = self.fraud_score return dictionary def from_dictionary(self, dictionary): super(FraugsterResults, self).from_dictionary(dictionary) if 'fraudInvestigationPoints' in dictionary: self.fraud_investigation_points = dictionary['fraudInvestigationPoints'] if 'fraudScore' in dictionary: self.fraud_score = dictionary['fraudScore'] return self
mit
brianjgeiger/osf.io
api_tests/taxonomies/views/test_taxonomy_list.py
15
4498
import pytest from django.db.models import BooleanField, Case, When from api.base.settings.defaults import API_BASE from osf.models import Subject from osf_tests.factories import SubjectFactory @pytest.mark.django_db class TestTaxonomy: @pytest.fixture(autouse=True) def subject(self): return SubjectFactory(text='A') @pytest.fixture(autouse=True) def subject_other(self): return SubjectFactory(text='Other Sub') @pytest.fixture(autouse=True) def subject_a(self): return SubjectFactory(text='Z') @pytest.fixture(autouse=True) def subject_child_one(self, subject): return SubjectFactory(parent=subject) @pytest.fixture(autouse=True) def subject_child_two(self, subject): return SubjectFactory(parent=subject) @pytest.fixture() def subjects(self): return Subject.objects.all().annotate(is_other=Case( When(text__istartswith='other', then=True), default=False, output_field=BooleanField() )).order_by('is_other', 'text') @pytest.fixture() def url_subject_list(self): return '/{}taxonomies/'.format(API_BASE) @pytest.fixture() def res_subject_list(self, app, url_subject_list): return app.get(url_subject_list) @pytest.fixture() def data_subject_list(self, app, res_subject_list): return res_subject_list.json['data'] def test_taxonomy_other_ordering(self, subject_other, data_subject_list): assert data_subject_list[-1]['id'] == subject_other._id def test_taxonomy_success( self, subject, subject_child_one, subject_child_two, subjects, res_subject_list): # make sure there are subjects to filter through assert len(subjects) > 0 assert res_subject_list.status_code == 200 assert res_subject_list.content_type == 'application/vnd.api+json' def test_taxonomy_text(self, subjects, data_subject_list): for index, subject in enumerate(subjects): if index >= len(data_subject_list): break # only iterate though first page of results assert data_subject_list[index]['attributes']['text'] == subject.text def test_taxonomy_parents(self, subjects, data_subject_list): for index, subject in enumerate(subjects): if index >= len(data_subject_list): break parents_ids = [] for parent in data_subject_list[index]['attributes']['parents']: parents_ids.append(parent['id']) if subject.parent: assert subject.parent._id in parents_ids def test_taxonomy_filter_top_level( self, app, subject, subject_child_one, subject_child_two, url_subject_list): top_level_subjects = Subject.objects.filter(parent__isnull=True) top_level_url = '{}?filter[parents]=null'.format(url_subject_list) res = app.get(top_level_url) assert res.status_code == 200 data = res.json['data'] assert len(top_level_subjects) == len(data) assert len(top_level_subjects) > 0 for subject in data: assert subject['attributes']['parents'] == [] def test_taxonomy_filter_by_parent(self, app, url_subject_list, subject): children_subjects = Subject.objects.filter(parent__id=subject.id) children_url = '{}?filter[parents]={}'.format( url_subject_list, subject._id) res = app.get(children_url) assert res.status_code == 200 data = res.json['data'] assert len(children_subjects) == len(data) for subject_ in data: parents_ids = [] for parent in subject_['attributes']['parents']: parents_ids.append(parent['id']) assert subject._id in parents_ids def test_is_deprecated(self, app, url_subject_list): res = app.get( '{}?version=2.6'.format(url_subject_list), expect_errors=True) assert res.status_code == 404 def test_taxonomy_path(self, data_subject_list): for item in data_subject_list: subj = Subject.objects.get(_id=item['id']) path_parts = item['attributes']['path'].split('|') assert path_parts[0] == subj.provider.share_title for index, text in enumerate( [s.text for s in subj.object_hierarchy]): assert path_parts[index + 1] == text
apache-2.0
rtindru/django
django/contrib/gis/gdal/geomtype.py
297
3228
from django.contrib.gis.gdal.error import GDALException from django.utils import six class OGRGeomType(object): "Encapulates OGR Geometry Types." wkb25bit = -2147483648 # Dictionary of acceptable OGRwkbGeometryType s and their string names. _types = {0: 'Unknown', 1: 'Point', 2: 'LineString', 3: 'Polygon', 4: 'MultiPoint', 5: 'MultiLineString', 6: 'MultiPolygon', 7: 'GeometryCollection', 100: 'None', 101: 'LinearRing', 1 + wkb25bit: 'Point25D', 2 + wkb25bit: 'LineString25D', 3 + wkb25bit: 'Polygon25D', 4 + wkb25bit: 'MultiPoint25D', 5 + wkb25bit: 'MultiLineString25D', 6 + wkb25bit: 'MultiPolygon25D', 7 + wkb25bit: 'GeometryCollection25D', } # Reverse type dictionary, keyed by lower-case of the name. _str_types = {v.lower(): k for k, v in _types.items()} def __init__(self, type_input): "Figures out the correct OGR Type based upon the input." if isinstance(type_input, OGRGeomType): num = type_input.num elif isinstance(type_input, six.string_types): type_input = type_input.lower() if type_input == 'geometry': type_input = 'unknown' num = self._str_types.get(type_input) if num is None: raise GDALException('Invalid OGR String Type "%s"' % type_input) elif isinstance(type_input, int): if type_input not in self._types: raise GDALException('Invalid OGR Integer Type: %d' % type_input) num = type_input else: raise TypeError('Invalid OGR input type given.') # Setting the OGR geometry type number. self.num = num def __str__(self): "Returns the value of the name property." return self.name def __eq__(self, other): """ Does an equivalence test on the OGR type with the given other OGRGeomType, the short-hand string, or the integer. """ if isinstance(other, OGRGeomType): return self.num == other.num elif isinstance(other, six.string_types): return self.name.lower() == other.lower() elif isinstance(other, int): return self.num == other else: return False def __ne__(self, other): return not (self == other) @property def name(self): "Returns a short-hand string form of the OGR Geometry type." return self._types[self.num] @property def django(self): "Returns the Django GeometryField for this OGR Type." s = self.name.replace('25D', '') if s in ('LinearRing', 'None'): return None elif s == 'Unknown': s = 'Geometry' return s + 'Field' def to_multi(self): """ Transform Point, LineString, Polygon, and their 25D equivalents to their Multi... counterpart. """ if self.name.startswith(('Point', 'LineString', 'Polygon')): self.num += 3
bsd-3-clause
Farkal/kivy
kivy/uix/gridlayout.py
12
14937
''' Grid Layout =========== .. only:: html .. image:: images/gridlayout.gif :align: right .. only:: latex .. image:: images/gridlayout.png :align: right .. versionadded:: 1.0.4 The :class:`GridLayout` arranges children in a matrix. It takes the available space and divides it into columns and rows, then adds widgets to the resulting "cells". .. versionchanged:: 1.0.7 The implementation has changed to use the widget size_hint for calculating column/row sizes. `uniform_width` and `uniform_height` have been removed and other properties have added to give you more control. Background ---------- Unlike many other toolkits, you cannot explicitly place a widget in a specific column/row. Each child is automatically assigned a position determined by the layout configuration and the child's index in the children list. A GridLayout must always have at least one input constraint: :attr:`GridLayout.cols` or :attr:`GridLayout.rows`. If you do not specify cols or rows, the Layout will throw an exception. Column Width and Row Height --------------------------- The column width/row height are determined in 3 steps: - The initial size is given by the :attr:`col_default_width` and :attr:`row_default_height` properties. To customize the size of a single column or row, use :attr:`cols_minimum` or :attr:`rows_minimum`. - The `size_hint_x`/`size_hint_y` of the children are taken into account. If no widgets have a size hint, the maximum size is used for all children. - You can force the default size by setting the :attr:`col_force_default` or :attr:`row_force_default` property. This will force the layout to ignore the `width` and `size_hint` properties of children and use the default size. Using a GridLayout ------------------ In the example below, all widgets will have an equal size. By default, the `size_hint` is (1, 1), so a Widget will take the full size of the parent:: layout = GridLayout(cols=2) layout.add_widget(Button(text='Hello 1')) layout.add_widget(Button(text='World 1')) layout.add_widget(Button(text='Hello 2')) layout.add_widget(Button(text='World 2')) .. image:: images/gridlayout_1.jpg Now, let's fix the size of Hello buttons to 100px instead of using size_hint_x=1:: layout = GridLayout(cols=2) layout.add_widget(Button(text='Hello 1', size_hint_x=None, width=100)) layout.add_widget(Button(text='World 1')) layout.add_widget(Button(text='Hello 2', size_hint_x=None, width=100)) layout.add_widget(Button(text='World 2')) .. image:: images/gridlayout_2.jpg Next, let's fix the row height to a specific size:: layout = GridLayout(cols=2, row_force_default=True, row_default_height=40) layout.add_widget(Button(text='Hello 1', size_hint_x=None, width=100)) layout.add_widget(Button(text='World 1')) layout.add_widget(Button(text='Hello 2', size_hint_x=None, width=100)) layout.add_widget(Button(text='World 2')) .. image:: images/gridlayout_3.jpg ''' __all__ = ('GridLayout', 'GridLayoutException') from kivy.logger import Logger from kivy.uix.layout import Layout from kivy.properties import NumericProperty, BooleanProperty, DictProperty, \ BoundedNumericProperty, ReferenceListProperty, VariableListProperty from math import ceil def nmax(*args): '''(internal) Implementation of a max() function that supports None. ''' # merge into one list args = [x for x in args if x is not None] return max(args) class GridLayoutException(Exception): '''Exception for errors if the grid layout manipulation fails. ''' pass class GridLayout(Layout): '''Grid layout class. See module documentation for more information. ''' spacing = VariableListProperty([0, 0], length=2) '''Spacing between children: [spacing_horizontal, spacing_vertical]. spacing also accepts a one argument form [spacing]. :attr:`spacing` is a :class:`~kivy.properties.VariableListProperty` and defaults to [0, 0]. ''' padding = VariableListProperty([0, 0, 0, 0]) '''Padding between the layout box and it's children: [padding_left, padding_top, padding_right, padding_bottom]. padding also accepts a two argument form [padding_horizontal, padding_vertical] and a one argument form [padding]. .. versionchanged:: 1.7.0 Replaced NumericProperty with VariableListProperty. :attr:`padding` is a :class:`~kivy.properties.VariableListProperty` and defaults to [0, 0, 0, 0]. ''' cols = BoundedNumericProperty(None, min=0, allownone=True) '''Number of columns in the grid. .. versionchanged:: 1.0.8 Changed from a NumericProperty to BoundedNumericProperty. You can no longer set this to a negative value. :attr:`cols` is a :class:`~kivy.properties.NumericProperty` and defaults to 0. ''' rows = BoundedNumericProperty(None, min=0, allownone=True) '''Number of rows in the grid. .. versionchanged:: 1.0.8 Changed from a NumericProperty to a BoundedNumericProperty. You can no longer set this to a negative value. :attr:`rows` is a :class:`~kivy.properties.NumericProperty` and defaults to 0. ''' col_default_width = NumericProperty(0) '''Default minimum size to use for a column. .. versionadded:: 1.0.7 :attr:`col_default_width` is a :class:`~kivy.properties.NumericProperty` and defaults to 0. ''' row_default_height = NumericProperty(0) '''Default minimum size to use for row. .. versionadded:: 1.0.7 :attr:`row_default_height` is a :class:`~kivy.properties.NumericProperty` and defaults to 0. ''' col_force_default = BooleanProperty(False) '''If True, ignore the width and size_hint_x of the child and use the default column width. .. versionadded:: 1.0.7 :attr:`col_force_default` is a :class:`~kivy.properties.BooleanProperty` and defaults to False. ''' row_force_default = BooleanProperty(False) '''If True, ignore the height and size_hint_y of the child and use the default row height. .. versionadded:: 1.0.7 :attr:`row_force_default` is a :class:`~kivy.properties.BooleanProperty` and defaults to False. ''' cols_minimum = DictProperty({}) '''List of minimum sizes for each column. .. versionadded:: 1.0.7 :attr:`cols_minimum` is a :class:`~kivy.properties.DictProperty` and defaults to {}. ''' rows_minimum = DictProperty({}) '''List of minimum sizes for each row. .. versionadded:: 1.0.7 :attr:`rows_minimum` is a :class:`~kivy.properties.DictProperty` and defaults to {}. ''' minimum_width = NumericProperty(0) '''Minimum width needed to contain all children. .. versionadded:: 1.0.8 :attr:`minimum_width` is a :class:`kivy.properties.NumericProperty` and defaults to 0. ''' minimum_height = NumericProperty(0) '''Minimum height needed to contain all children. .. versionadded:: 1.0.8 :attr:`minimum_height` is a :class:`kivy.properties.NumericProperty` and defaults to 0. ''' minimum_size = ReferenceListProperty(minimum_width, minimum_height) '''Minimum size needed to contain all children. .. versionadded:: 1.0.8 :attr:`minimum_size` is a :class:`~kivy.properties.ReferenceListProperty` of (:attr:`minimum_width`, :attr:`minimum_height`) properties. ''' def __init__(self, **kwargs): self._cols = self._rows = None super(GridLayout, self).__init__(**kwargs) self.bind( col_default_width=self._trigger_layout, row_default_height=self._trigger_layout, col_force_default=self._trigger_layout, row_force_default=self._trigger_layout, cols=self._trigger_layout, rows=self._trigger_layout, parent=self._trigger_layout, spacing=self._trigger_layout, padding=self._trigger_layout, children=self._trigger_layout, size=self._trigger_layout, pos=self._trigger_layout) def get_max_widgets(self): if self.cols and not self.rows: return None if self.rows and not self.cols: return None if not self.cols and not self.rows: return None return self.rows * self.cols def on_children(self, instance, value): # if that makes impossible to construct things with deffered method, # migrate this test in do_layout, and/or issue a warning. smax = self.get_max_widgets() if smax and len(value) > smax: raise GridLayoutException( 'Too many children in GridLayout. Increase rows/cols!') def update_minimum_size(self, *largs): # the goal here is to calculate the minimum size of every cols/rows # and determine if they have stretch or not current_cols = self.cols current_rows = self.rows children = self.children len_children = len(children) # if no cols or rows are set, we can't calculate minimum size. # the grid must be contrained at least on one side if not current_cols and not current_rows: Logger.warning('%r have no cols or rows set, ' 'layout is not triggered.' % self) return None if current_cols is None: current_cols = int(ceil(len_children / float(current_rows))) elif current_rows is None: current_rows = int(ceil(len_children / float(current_cols))) current_cols = max(1, current_cols) current_rows = max(1, current_rows) cols = [self.col_default_width] * current_cols cols_sh = [None] * current_cols rows = [self.row_default_height] * current_rows rows_sh = [None] * current_rows # update minimum size from the dicts # FIXME index might be outside the bounds ? for index, value in self.cols_minimum.items(): cols[index] = value for index, value in self.rows_minimum.items(): rows[index] = value # calculate minimum size for each columns and rows i = len_children - 1 for row in range(current_rows): for col in range(current_cols): # don't go further is we don't have child left if i < 0: break # get initial information from the child c = children[i] shw = c.size_hint_x shh = c.size_hint_y w = c.width h = c.height # compute minimum size / maximum stretch needed if shw is None: cols[col] = nmax(cols[col], w) else: cols_sh[col] = nmax(cols_sh[col], shw) if shh is None: rows[row] = nmax(rows[row], h) else: rows_sh[row] = nmax(rows_sh[row], shh) # next child i = i - 1 # calculate minimum width/height needed, starting from padding + # spacing padding_x = self.padding[0] + self.padding[2] padding_y = self.padding[1] + self.padding[3] spacing_x, spacing_y = self.spacing width = padding_x + spacing_x * (current_cols - 1) height = padding_y + spacing_y * (current_rows - 1) # then add the cell size width += sum(cols) height += sum(rows) # remember for layout self._cols = cols self._rows = rows self._cols_sh = cols_sh self._rows_sh = rows_sh # finally, set the minimum size self.minimum_size = (width, height) def do_layout(self, *largs): self.update_minimum_size() if self._cols is None: return if self.cols is None and self.rows is None: raise GridLayoutException('Need at least cols or rows constraint.') children = self.children len_children = len(children) if len_children == 0: return # speedup padding_left = self.padding[0] padding_top = self.padding[1] spacing_x, spacing_y = self.spacing selfx = self.x selfw = self.width selfh = self.height # resolve size for each column if self.col_force_default: cols = [self.col_default_width] * len(self._cols) for index, value in self.cols_minimum.items(): cols[index] = value else: cols = self._cols[:] cols_sh = self._cols_sh cols_weigth = sum([x for x in cols_sh if x]) strech_w = max(0, selfw - self.minimum_width) for index in range(len(cols)): # if the col don't have strech information, nothing to do col_stretch = cols_sh[index] if col_stretch is None: continue # calculate the column stretch, and take the maximum from # minimum size and the calculated stretch col_width = cols[index] col_width = max(col_width, strech_w * col_stretch / cols_weigth) cols[index] = col_width # same algo for rows if self.row_force_default: rows = [self.row_default_height] * len(self._rows) for index, value in self.rows_minimum.items(): rows[index] = value else: rows = self._rows[:] rows_sh = self._rows_sh rows_weigth = sum([x for x in rows_sh if x]) strech_h = max(0, selfh - self.minimum_height) for index in range(len(rows)): # if the row don't have strech information, nothing to do row_stretch = rows_sh[index] if row_stretch is None: continue # calculate the row stretch, and take the maximum from minimum # size and the calculated stretch row_height = rows[index] row_height = max(row_height, strech_h * row_stretch / rows_weigth) rows[index] = row_height # reposition every child i = len_children - 1 y = self.top - padding_top for row_height in rows: x = selfx + padding_left for col_width in cols: if i < 0: break c = children[i] c.x = x c.y = y - row_height c.width = col_width c.height = row_height i = i - 1 x = x + col_width + spacing_x y -= row_height + spacing_y
mit
fdslight/fdslight
freenet/handlers/tundev.py
1
5566
#!/usr/bin/env python3 import os, sys import pywind.evtframework.handlers.handler as handler import freenet.lib.fn_utils as fn_utils import freenet.lib.simple_qos as simple_qos try: import fcntl except ImportError: pass class tun_base(handler.handler): __creator_fd = None # 要写入到tun的IP包 ___ip_packets_for_write = [] # 写入tun设备的最大IP数据包的个数 __MAX_WRITE_QUEUE_SIZE = 1024 # 当前需要写入tun设备的IP数据包的个数 __current_write_queue_n = 0 __BLOCK_SIZE = 16 * 1024 __qos = None def __create_tun_dev(self, name): """创建tun 设备 :param name: :return fd: """ tun_fd = fn_utils.tuntap_create(name, fn_utils.IFF_TUN | fn_utils.IFF_NO_PI) fn_utils.interface_up(name) if tun_fd < 0: raise SystemError("can not create tun device,please check your root") return tun_fd @property def creator(self): return self.__creator_fd def init_func(self, creator_fd, tun_dev_name, *args, **kwargs): """ :param creator_fd: :param tun_dev_name:tun 设备名称 :param subnet:如果是服务端则需要则个参数 """ tun_fd = self.__create_tun_dev(tun_dev_name) if tun_fd < 3: print("error:create tun device failed:%s" % tun_dev_name) sys.exit(-1) self.__creator_fd = creator_fd self.__qos = simple_qos.qos(simple_qos.QTYPE_DST) self.set_fileno(tun_fd) fcntl.fcntl(tun_fd, fcntl.F_SETFL, os.O_NONBLOCK) self.dev_init(tun_dev_name, *args, **kwargs) return tun_fd def dev_init(self, dev_name, *args, **kwargs): pass def evt_read(self): for i in range(32): try: ip_packet = os.read(self.fileno, self.__BLOCK_SIZE) except BlockingIOError: break self.__qos.add_to_queue(ip_packet) self.__qos_from_tundev() def task_loop(self): self.__qos_from_tundev() def __qos_from_tundev(self): results = self.__qos.get_queue() for ip_packet in results: self.handle_ip_packet_from_read(ip_packet) if not results: self.del_loop_task(self.fileno) else: self.add_to_loop_task(self.fileno) def evt_write(self): try: ip_packet = self.___ip_packets_for_write.pop(0) except IndexError: self.remove_evt_write(self.fileno) return self.__current_write_queue_n -= 1 try: os.write(self.fileno, ip_packet) except BlockingIOError: self.__current_write_queue_n += 1 self.___ip_packets_for_write.insert(0, ip_packet) return '''''' def handle_ip_packet_from_read(self, ip_packet): """处理读取过来的IP包,重写这个方法 :param ip_packet: :return None: """ pass def handle_ip_packet_for_write(self, ip_packet): """处理要写入的IP包,重写这个方法 :param ip_packet: :return new_ip_packet: """ pass def error(self): self.dev_error() def dev_error(self): """重写这个方法 :return: """ pass def timeout(self): self.dev_timeout() def dev_timeout(self): """重写这个方法 :return: """ pass def delete(self): self.dev_delete() def dev_delete(self): """重写这个方法 :return: """ pass def add_to_sent_queue(self, ip_packet): # 丢到超出规定的数据包,防止内存过度消耗 n_ip_message = self.handle_ip_packet_for_write(ip_packet) if not n_ip_message: return if self.__current_write_queue_n == self.__MAX_WRITE_QUEUE_SIZE: # 删除第一个包,防止队列过多 self.__current_write_queue_n -= 1 self.___ip_packets_for_write.pop(0) return self.__current_write_queue_n += 1 self.___ip_packets_for_write.append(n_ip_message) class tundevs(tun_base): """服务端的tun数据处理 """ def dev_init(self, dev_name): self.register(self.fileno) self.add_evt_read(self.fileno) def handle_ip_packet_from_read(self, ip_packet): self.dispatcher.send_msg_to_tunnel_from_tun(ip_packet) def handle_ip_packet_for_write(self, ip_packet): return ip_packet def dev_delete(self): self.unregister(self.fileno) os.close(self.fileno) def dev_error(self): self.delete_handler(self.fileno) def dev_timeout(self): pass def handle_msg_from_tunnel(self, message): self.add_to_sent_queue(message) self.add_evt_write(self.fileno) class tundevc(tun_base): def dev_init(self, dev_name): self.register(self.fileno) self.add_evt_read(self.fileno) def handle_ip_packet_from_read(self, ip_packet): self.dispatcher.handle_msg_from_tundev(ip_packet) def handle_ip_packet_for_write(self, ip_packet): return ip_packet def dev_delete(self): self.unregister(self.fileno) os.close(self.fileno) def dev_error(self): self.delete_handler(self.fileno) def dev_timeout(self): pass def msg_from_tunnel(self, message): self.add_to_sent_queue(message) self.add_evt_write(self.fileno)
bsd-2-clause
nuodb/nuodb-django
test/auth/tests/tokens.py
1
2747
import sys from datetime import date, timedelta from django.conf import settings from django.contrib.auth.models import User from django.contrib.auth.tokens import PasswordResetTokenGenerator from django.contrib.auth.tests.utils import skipIfCustomUser from django.test import TestCase from django.utils import unittest from django.test.testcases import skipIfDBFeature @skipIfCustomUser class TokenGeneratorTest(TestCase): def test_make_token(self): """ Ensure that we can make a token and that it is valid """ user = User.objects.create_user('tokentestuser', '[email protected]', 'testpw') p0 = PasswordResetTokenGenerator() tk1 = p0.make_token(user) self.assertTrue(p0.check_token(user, tk1)) @skipIfDBFeature('supports_transactions') def test_10265(self): """ Ensure that the token generated for a user created in the same request will work correctly. """ # See ticket #10265 user = User.objects.create_user('comebackkid', '[email protected]', 'testpw') p0 = PasswordResetTokenGenerator() tk1 = p0.make_token(user) reload = User.objects.get(username='comebackkid') tk2 = p0.make_token(reload) self.assertEqual(tk1, tk2) def test_timeout(self): """ Ensure we can use the token after n days, but no greater. """ # Uses a mocked version of PasswordResetTokenGenerator so we can change # the value of 'today' class Mocked(PasswordResetTokenGenerator): def __init__(self, today): self._today_val = today def _today(self): return self._today_val user = User.objects.create_user('tokentestuser', '[email protected]', 'testpw') p0 = PasswordResetTokenGenerator() tk1 = p0.make_token(user) p1 = Mocked(date.today() + timedelta(settings.PASSWORD_RESET_TIMEOUT_DAYS)) self.assertTrue(p1.check_token(user, tk1)) p2 = Mocked(date.today() + timedelta(settings.PASSWORD_RESET_TIMEOUT_DAYS + 1)) self.assertFalse(p2.check_token(user, tk1)) @unittest.skipIf(sys.version_info[:2] >= (3, 0), "Unnecessary test with Python 3") def test_date_length(self): """ Make sure we don't allow overly long dates, causing a potential DoS. """ user = User.objects.create_user('ima1337h4x0r', '[email protected]', 'p4ssw0rd') p0 = PasswordResetTokenGenerator() # This will put a 14-digit base36 timestamp into the token, which is too large. self.assertRaises(ValueError, p0._make_token_with_timestamp, user, 175455491841851871349)
bsd-3-clause
Evervolv/android_external_chromium_org
chrome/common/extensions/docs/server2/subversion_file_system.py
23
7618
# Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import logging import posixpath import traceback import xml.dom.minidom as xml from xml.parsers.expat import ExpatError from appengine_url_fetcher import AppEngineUrlFetcher from docs_server_utils import StringIdentity from file_system import FileSystem, FileNotFoundError, StatInfo, ToUnicode from future import Future import svn_constants import url_constants def _ParseHTML(html): '''Unfortunately, the viewvc page has a stray </div> tag, so this takes care of all mismatched tags. ''' try: return xml.parseString(html) except ExpatError as e: return _ParseHTML('\n'.join( line for (i, line) in enumerate(html.split('\n')) if e.lineno != i + 1)) def _InnerText(node): '''Like node.innerText in JS DOM, but strips surrounding whitespace. ''' text = [] if node.nodeValue: text.append(node.nodeValue) if hasattr(node, 'childNodes'): for child_node in node.childNodes: text.append(_InnerText(child_node)) return ''.join(text).strip() def _CreateStatInfo(html): parent_version = None child_versions = {} # Try all of the tables until we find the ones that contain the data (the # directory and file versions are in different tables). for table in _ParseHTML(html).getElementsByTagName('table'): # Within the table there is a list of files. However, there may be some # things beforehand; a header, "parent directory" list, etc. We will deal # with that below by being generous and just ignoring such rows. rows = table.getElementsByTagName('tr') for row in rows: cells = row.getElementsByTagName('td') # The version of the directory will eventually appear in the soup of # table rows, like this: # # <tr> # <td>Directory revision:</td> # <td><a href=... title="Revision 214692">214692</a> (of...)</td> # </tr> # # So look out for that. if len(cells) == 2 and _InnerText(cells[0]) == 'Directory revision:': links = cells[1].getElementsByTagName('a') if len(links) != 2: raise ValueError('ViewVC assumption invalid: directory revision ' + 'content did not have 2 <a> elements, instead %s' % _InnerText(cells[1])) this_parent_version = _InnerText(links[0]) int(this_parent_version) # sanity check if parent_version is not None: raise ValueError('There was already a parent version %s, and we ' + 'just found a second at %s' % (parent_version, this_parent_version)) parent_version = this_parent_version # The version of each file is a list of rows with 5 cells: name, version, # age, author, and last log entry. Maybe the columns will change; we're # at the mercy viewvc, but this constant can be easily updated. if len(cells) != 5: continue name_element, version_element, _, __, ___ = cells name = _InnerText(name_element) # note: will end in / for directories try: version = int(_InnerText(version_element)) except StandardError: continue child_versions[name] = str(version) if parent_version and child_versions: break return StatInfo(parent_version, child_versions) class _AsyncFetchFuture(object): def __init__(self, paths, fetcher, binary, args=None): def apply_args(path): return path if args is None else '%s?%s' % (path, args) # A list of tuples of the form (path, Future). self._fetches = [(path, fetcher.FetchAsync(apply_args(path))) for path in paths] self._value = {} self._error = None self._binary = binary def _ListDir(self, directory): dom = xml.parseString(directory) files = [elem.childNodes[0].data for elem in dom.getElementsByTagName('a')] if '..' in files: files.remove('..') return files def Get(self): for path, future in self._fetches: try: result = future.Get() except Exception as e: raise FileNotFoundError( '%s fetching %s for Get: %s' % (e.__class__.__name__, path, e)) if result.status_code == 404: raise FileNotFoundError('Got 404 when fetching %s for Get' % path) elif path.endswith('/'): self._value[path] = self._ListDir(result.content) elif not self._binary: self._value[path] = ToUnicode(result.content) else: self._value[path] = result.content if self._error is not None: raise self._error return self._value class SubversionFileSystem(FileSystem): '''Class to fetch resources from src.chromium.org. ''' @staticmethod def Create(branch='trunk', revision=None): if branch == 'trunk': svn_path = 'trunk/src/%s' % svn_constants.EXTENSIONS_PATH else: svn_path = 'branches/%s/src/%s' % (branch, svn_constants.EXTENSIONS_PATH) return SubversionFileSystem( AppEngineUrlFetcher('%s/%s' % (url_constants.SVN_URL, svn_path)), AppEngineUrlFetcher('%s/%s' % (url_constants.VIEWVC_URL, svn_path)), svn_path, revision=revision) def __init__(self, file_fetcher, stat_fetcher, svn_path, revision=None): self._file_fetcher = file_fetcher self._stat_fetcher = stat_fetcher self._svn_path = svn_path self._revision = revision def Read(self, paths, binary=False): args = None if self._revision is not None: # |fetcher| gets from svn.chromium.org which uses p= for version. args = 'p=%s' % self._revision return Future(delegate=_AsyncFetchFuture(paths, self._file_fetcher, binary, args=args)) def Stat(self, path): directory, filename = posixpath.split(path) directory += '/' if self._revision is not None: # |stat_fetch| uses viewvc which uses pathrev= for version. directory += '?pathrev=%s' % self._revision try: result = self._stat_fetcher.Fetch(directory) except Exception as e: # Convert all errors (typically some sort of DeadlineExceededError but # explicitly catching that seems not to work) to a FileNotFoundError to # reduce the exception-catching surface area of this class. raise FileNotFoundError( '%s fetching %s for Stat: %s' % (e.__class__.__name__, path, e)) if result.status_code != 200: raise FileNotFoundError('Got %s when fetching %s for Stat' % ( result.status_code, path)) stat_info = _CreateStatInfo(result.content) if stat_info.version is None: raise ValueError('Failed to find version of dir %s' % directory) if path.endswith('/'): return stat_info if filename not in stat_info.child_versions: raise FileNotFoundError( '%s from %s was not in child versions for Stat' % (filename, path)) return StatInfo(stat_info.child_versions[filename]) def GetIdentity(self): # NOTE: no revision here, since it would mess up the caching of reads. It # probably doesn't matter since all the caching classes will use the result # of Stat to decide whether to re-read - and Stat has a ceiling of the # revision - so when the revision changes, so might Stat. That is enough. return '@'.join((self.__class__.__name__, StringIdentity(self._svn_path)))
bsd-3-clause
sdague/home-assistant
homeassistant/scripts/ensure_config.py
18
1344
"""Script to ensure a configuration file exists.""" import argparse import asyncio import os import homeassistant.config as config_util from homeassistant.core import HomeAssistant # mypy: allow-untyped-calls, allow-untyped-defs def run(args): """Handle ensure config commandline script.""" parser = argparse.ArgumentParser( description=("Ensure a Home Assistant config exists, creates one if necessary.") ) parser.add_argument( "-c", "--config", metavar="path_to_config_dir", default=config_util.get_default_config_dir(), help="Directory that contains the Home Assistant configuration", ) parser.add_argument("--script", choices=["ensure_config"]) args = parser.parse_args() config_dir = os.path.join(os.getcwd(), args.config) # Test if configuration directory exists if not os.path.isdir(config_dir): print("Creating directory", config_dir) os.makedirs(config_dir) config_path = asyncio.run(async_run(config_dir)) print("Configuration file:", config_path) return 0 async def async_run(config_dir): """Make sure config exists.""" hass = HomeAssistant() hass.config.config_dir = config_dir path = await config_util.async_ensure_config_exists(hass) await hass.async_stop(force=True) return path
apache-2.0
joelddiaz/openshift-tools
ansible/roles/lib_zabbix/library/zbx_trigger.py
13
8058
#!/usr/bin/env python ''' ansible module for zabbix triggers ''' # vim: expandtab:tabstop=4:shiftwidth=4 # # Zabbix trigger ansible module # # # Copyright 2015 Red Hat Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # This is in place because each module looks similar to each other. # These need duplicate code as their behavior is very similar # but different for each zabbix class. # pylint: disable=duplicate-code # pylint: disable=import-error from openshift_tools.zbxapi import ZabbixAPI, ZabbixConnection def exists(content, key='result'): ''' Check if key exists in content or the size of content[key] > 0 ''' if not content.has_key(key): return False if not content[key]: return False return True def get_priority(priority): ''' determine priority ''' prior = 0 if 'info' in priority: prior = 1 elif 'warn' in priority: prior = 2 elif 'avg' == priority or 'ave' in priority: prior = 3 elif 'high' in priority: prior = 4 elif 'dis' in priority: prior = 5 return prior def get_deps(zapi, deps): ''' get trigger dependencies ''' results = [] for desc in deps: content = zapi.get_content('trigger', 'get', {'filter': {'description': desc}, 'expandExpression': True, 'selectDependencies': 'triggerid', }) if content.has_key('result'): results.append({'triggerid': content['result'][0]['triggerid']}) return results def get_trigger_status(inc_status): ''' Determine the trigger's status 0 is enabled 1 is disabled ''' r_status = 0 if inc_status == 'disabled': r_status = 1 return r_status def get_template_id(zapi, template_name): ''' get related templates ''' template_ids = [] app_ids = {} # Fetch templates by name content = zapi.get_content('template', 'get', {'search': {'host': template_name}, 'selectApplications': ['applicationid', 'name']}) if content.has_key('result'): template_ids.append(content['result'][0]['templateid']) for app in content['result'][0]['applications']: app_ids[app['name']] = app['applicationid'] return template_ids, app_ids def main(): ''' Create a trigger in zabbix Example: "params": { "description": "Processor load is too high on {HOST.NAME}", "expression": "{Linux server:system.cpu.load[percpu,avg1].last()}>5", "dependencies": [ { "triggerid": "14062" } ] }, ''' module = AnsibleModule( argument_spec=dict( zbx_server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'), zbx_user=dict(default=os.environ.get('ZABBIX_USER', None), type='str'), zbx_password=dict(default=os.environ.get('ZABBIX_PASSWORD', None), type='str'), zbx_debug=dict(default=False, type='bool'), expression=dict(default=None, type='str'), name=dict(default=None, type='str'), description=dict(default=None, type='str'), dependencies=dict(default=[], type='list'), priority=dict(default='avg', type='str'), url=dict(default=None, type='str'), status=dict(default=None, type='str'), state=dict(default='present', type='str'), template_name=dict(default=None, type='str'), hostgroup_name=dict(default=None, type='str'), query_type=dict(default='filter', choices=['filter', 'search'], type='str'), ), #supports_check_mode=True ) zapi = ZabbixAPI(ZabbixConnection(module.params['zbx_server'], module.params['zbx_user'], module.params['zbx_password'], module.params['zbx_debug'])) #Set the instance and the template for the rest of the calls zbx_class_name = 'trigger' idname = "triggerid" state = module.params['state'] tname = module.params['name'] templateid = None if module.params['template_name']: templateid, _ = get_template_id(zapi, module.params['template_name']) content = zapi.get_content(zbx_class_name, 'get', {module.params['query_type']: {'description': tname}, 'expandExpression': True, 'selectDependencies': 'triggerid', 'templateids': templateid, 'group': module.params['hostgroup_name'], }) # Get if state == 'list': module.exit_json(changed=False, results=content['result'], state="list") # Delete if state == 'absent': if not exists(content): module.exit_json(changed=False, state="absent") content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0][idname]]) module.exit_json(changed=True, results=content['result'], state="absent") # Create and Update if state == 'present': params = {'description': tname, 'comments': module.params['description'], 'expression': module.params['expression'], 'dependencies': get_deps(zapi, module.params['dependencies']), 'priority': get_priority(module.params['priority']), 'url': module.params['url'], 'status': get_trigger_status(module.params['status']), } # Remove any None valued params _ = [params.pop(key, None) for key in params.keys() if params[key] is None] #******# # CREATE #******# if not exists(content): # if we didn't find it, create it content = zapi.get_content(zbx_class_name, 'create', params) if content.has_key('error'): module.fail_json(msg=content['error']) module.exit_json(changed=True, results=content['result'], state='present') ######## # UPDATE ######## differences = {} zab_results = content['result'][0] for key, value in params.items(): if zab_results[key] != value and zab_results[key] != str(value): differences[key] = value if not differences: module.exit_json(changed=False, results=zab_results, state="present") # We have differences and need to update differences[idname] = zab_results[idname] content = zapi.get_content(zbx_class_name, 'update', differences) if content.has_key('error'): module.fail_json(msg=content['error']) module.exit_json(changed=True, results=content['result'], state="present") module.exit_json(failed=True, changed=False, results='Unknown state passed. %s' % state, state="unknown") # pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled # import module snippets. This are required from ansible.module_utils.basic import * main()
apache-2.0
JuliBakagianni/CEF-ELRC
lib/python2.7/site-packages/django_jenkins/tasks/run_pyflakes.py
3
1798
# -*- coding: utf-8 -*- import os import re import sys from pyflakes.scripts import pyflakes from cStringIO import StringIO from django_jenkins.functions import relpath from django_jenkins.tasks import BaseTask, get_apps_locations class Task(BaseTask): def __init__(self, test_labels, options): super(Task, self).__init__(test_labels, options) self.test_all = options['test_all'] if options.get('pyflakes_file_output', True): output_dir = options['output_dir'] if not os.path.exists(output_dir): os.makedirs(output_dir) self.output = open(os.path.join(output_dir, 'pyflakes.report'), 'w') else: self.output = sys.stdout def teardown_test_environment(self, **kwargs): locations = get_apps_locations(self.test_labels, self.test_all) # run pyflakes tool with captured output old_stdout, pyflakes_output = sys.stdout, StringIO() sys.stdout = pyflakes_output try: for location in locations: if os.path.isdir(location): for dirpath, dirnames, filenames in os.walk(relpath(location)): for filename in filenames: if filename.endswith('.py'): pyflakes.checkPath(os.path.join(dirpath, filename)) else: pyflakes.checkPath(relpath(location)) finally: sys.stdout = old_stdout # save report pyflakes_output.reset() while True: line = pyflakes_output.readline() if not line: break message = re.sub(r': ', r': [E] PYFLAKES:', line) self.output.write(message) self.output.close()
bsd-3-clause
pjdelport/django
tests/modeltests/update_only_fields/tests.py
7
9587
from __future__ import absolute_import from django.db.models.signals import pre_save, post_save from django.test import TestCase from .models import Person, Employee, ProxyEmployee, Profile, Account class UpdateOnlyFieldsTests(TestCase): def test_update_fields_basic(self): s = Person.objects.create(name='Sara', gender='F') self.assertEqual(s.gender, 'F') s.gender = 'M' s.name = 'Ian' s.save(update_fields=['name']) s = Person.objects.get(pk=s.pk) self.assertEqual(s.gender, 'F') self.assertEqual(s.name, 'Ian') def test_update_fields_deferred(self): s = Person.objects.create(name='Sara', gender='F', pid=22) self.assertEqual(s.gender, 'F') s1 = Person.objects.defer("gender", "pid").get(pk=s.pk) s1.name = "Emily" s1.gender = "M" with self.assertNumQueries(1): s1.save() s2 = Person.objects.get(pk=s1.pk) self.assertEqual(s2.name, "Emily") self.assertEqual(s2.gender, "M") def test_update_fields_only_1(self): s = Person.objects.create(name='Sara', gender='F') self.assertEqual(s.gender, 'F') s1 = Person.objects.only('name').get(pk=s.pk) s1.name = "Emily" s1.gender = "M" with self.assertNumQueries(1): s1.save() s2 = Person.objects.get(pk=s1.pk) self.assertEqual(s2.name, "Emily") self.assertEqual(s2.gender, "M") def test_update_fields_only_2(self): s = Person.objects.create(name='Sara', gender='F', pid=22) self.assertEqual(s.gender, 'F') s1 = Person.objects.only('name').get(pk=s.pk) s1.name = "Emily" s1.gender = "M" with self.assertNumQueries(2): s1.save(update_fields=['pid']) s2 = Person.objects.get(pk=s1.pk) self.assertEqual(s2.name, "Sara") self.assertEqual(s2.gender, "F") def test_update_fields_only_repeated(self): s = Person.objects.create(name='Sara', gender='F') self.assertEqual(s.gender, 'F') s1 = Person.objects.only('name').get(pk=s.pk) s1.gender = 'M' with self.assertNumQueries(1): s1.save() # Test that the deferred class does not remember that gender was # set, instead the instace should remember this. s1 = Person.objects.only('name').get(pk=s.pk) with self.assertNumQueries(1): s1.save() def test_update_fields_inheritance_defer(self): profile_boss = Profile.objects.create(name='Boss', salary=3000) e1 = Employee.objects.create(name='Sara', gender='F', employee_num=1, profile=profile_boss) e1 = Employee.objects.only('name').get(pk=e1.pk) e1.name = 'Linda' with self.assertNumQueries(1): e1.save() self.assertEqual(Employee.objects.get(pk=e1.pk).name, 'Linda') def test_update_fields_fk_defer(self): profile_boss = Profile.objects.create(name='Boss', salary=3000) profile_receptionist = Profile.objects.create(name='Receptionist', salary=1000) e1 = Employee.objects.create(name='Sara', gender='F', employee_num=1, profile=profile_boss) e1 = Employee.objects.only('profile').get(pk=e1.pk) e1.profile = profile_receptionist with self.assertNumQueries(1): e1.save() self.assertEqual(Employee.objects.get(pk=e1.pk).profile, profile_receptionist) e1.profile_id = profile_boss.pk with self.assertNumQueries(1): e1.save() self.assertEqual(Employee.objects.get(pk=e1.pk).profile, profile_boss) def test_select_related_only_interaction(self): profile_boss = Profile.objects.create(name='Boss', salary=3000) e1 = Employee.objects.create(name='Sara', gender='F', employee_num=1, profile=profile_boss) e1 = Employee.objects.only('profile__salary').select_related('profile').get(pk=e1.pk) profile_boss.name = 'Clerk' profile_boss.salary = 1000 profile_boss.save() # The loaded salary of 3000 gets saved, the name of 'Clerk' isn't # overwritten. with self.assertNumQueries(1): e1.profile.save() reloaded_profile = Profile.objects.get(pk=profile_boss.pk) self.assertEqual(reloaded_profile.name, profile_boss.name) self.assertEqual(reloaded_profile.salary, 3000) def test_update_fields_m2m(self): profile_boss = Profile.objects.create(name='Boss', salary=3000) e1 = Employee.objects.create(name='Sara', gender='F', employee_num=1, profile=profile_boss) a1 = Account.objects.create(num=1) a2 = Account.objects.create(num=2) e1.accounts = [a1,a2] with self.assertRaises(ValueError): e1.save(update_fields=['accounts']) def test_update_fields_inheritance(self): profile_boss = Profile.objects.create(name='Boss', salary=3000) profile_receptionist = Profile.objects.create(name='Receptionist', salary=1000) e1 = Employee.objects.create(name='Sara', gender='F', employee_num=1, profile=profile_boss) e1.name = 'Ian' e1.gender = 'M' e1.save(update_fields=['name']) e2 = Employee.objects.get(pk=e1.pk) self.assertEqual(e2.name, 'Ian') self.assertEqual(e2.gender, 'F') self.assertEqual(e2.profile, profile_boss) e2.profile = profile_receptionist e2.name = 'Sara' e2.save(update_fields=['profile']) e3 = Employee.objects.get(pk=e1.pk) self.assertEqual(e3.name, 'Ian') self.assertEqual(e3.profile, profile_receptionist) with self.assertNumQueries(1): e3.profile = profile_boss e3.save(update_fields=['profile_id']) e4 = Employee.objects.get(pk=e3.pk) self.assertEqual(e4.profile, profile_boss) self.assertEqual(e4.profile_id, profile_boss.pk) def test_update_fields_inheritance_with_proxy_model(self): profile_boss = Profile.objects.create(name='Boss', salary=3000) profile_receptionist = Profile.objects.create(name='Receptionist', salary=1000) e1 = ProxyEmployee.objects.create(name='Sara', gender='F', employee_num=1, profile=profile_boss) e1.name = 'Ian' e1.gender = 'M' e1.save(update_fields=['name']) e2 = ProxyEmployee.objects.get(pk=e1.pk) self.assertEqual(e2.name, 'Ian') self.assertEqual(e2.gender, 'F') self.assertEqual(e2.profile, profile_boss) e2.profile = profile_receptionist e2.name = 'Sara' e2.save(update_fields=['profile']) e3 = ProxyEmployee.objects.get(pk=e1.pk) self.assertEqual(e3.name, 'Ian') self.assertEqual(e3.profile, profile_receptionist) def test_update_fields_signals(self): p = Person.objects.create(name='Sara', gender='F') pre_save_data = [] def pre_save_receiver(**kwargs): pre_save_data.append(kwargs['update_fields']) pre_save.connect(pre_save_receiver) post_save_data = [] def post_save_receiver(**kwargs): post_save_data.append(kwargs['update_fields']) post_save.connect(post_save_receiver) p.save(update_fields=['name']) self.assertEqual(len(pre_save_data), 1) self.assertEqual(len(pre_save_data[0]), 1) self.assertTrue('name' in pre_save_data[0]) self.assertEqual(len(post_save_data), 1) self.assertEqual(len(post_save_data[0]), 1) self.assertTrue('name' in post_save_data[0]) def test_update_fields_incorrect_params(self): s = Person.objects.create(name='Sara', gender='F') with self.assertRaises(ValueError): s.save(update_fields=['first_name']) with self.assertRaises(ValueError): s.save(update_fields="name") def test_empty_update_fields(self): s = Person.objects.create(name='Sara', gender='F') pre_save_data = [] def pre_save_receiver(**kwargs): pre_save_data.append(kwargs['update_fields']) pre_save.connect(pre_save_receiver) post_save_data = [] def post_save_receiver(**kwargs): post_save_data.append(kwargs['update_fields']) post_save.connect(post_save_receiver) # Save is skipped. with self.assertNumQueries(0): s.save(update_fields=[]) # Signals were skipped, too... self.assertEqual(len(pre_save_data), 0) self.assertEqual(len(post_save_data), 0) def test_num_queries_inheritance(self): s = Employee.objects.create(name='Sara', gender='F') s.employee_num = 1 s.name = 'Emily' with self.assertNumQueries(1): s.save(update_fields=['employee_num']) s = Employee.objects.get(pk=s.pk) self.assertEqual(s.employee_num, 1) self.assertEqual(s.name, 'Sara') s.employee_num = 2 s.name = 'Emily' with self.assertNumQueries(1): s.save(update_fields=['name']) s = Employee.objects.get(pk=s.pk) self.assertEqual(s.name, 'Emily') self.assertEqual(s.employee_num, 1) # A little sanity check that we actually did updates... self.assertEqual(Employee.objects.count(), 1) self.assertEqual(Person.objects.count(), 1) with self.assertNumQueries(2): s.save(update_fields=['name', 'employee_num'])
bsd-3-clause
sserrot/champion_relationships
venv/Lib/site-packages/pygments/styles/vim.py
4
1976
# -*- coding: utf-8 -*- """ pygments.styles.vim ~~~~~~~~~~~~~~~~~~~ A highlighting style for Pygments, inspired by vim. :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ from pygments.style import Style from pygments.token import Keyword, Name, Comment, String, Error, \ Number, Operator, Generic, Whitespace, Token class VimStyle(Style): """ Styles somewhat like vim 7.0 """ background_color = "#000000" highlight_color = "#222222" default_style = "#cccccc" styles = { Token: "#cccccc", Whitespace: "", Comment: "#000080", Comment.Preproc: "", Comment.Special: "bold #cd0000", Keyword: "#cdcd00", Keyword.Declaration: "#00cd00", Keyword.Namespace: "#cd00cd", Keyword.Pseudo: "", Keyword.Type: "#00cd00", Operator: "#3399cc", Operator.Word: "#cdcd00", Name: "", Name.Class: "#00cdcd", Name.Builtin: "#cd00cd", Name.Exception: "bold #666699", Name.Variable: "#00cdcd", String: "#cd0000", Number: "#cd00cd", Generic.Heading: "bold #000080", Generic.Subheading: "bold #800080", Generic.Deleted: "#cd0000", Generic.Inserted: "#00cd00", Generic.Error: "#FF0000", Generic.Emph: "italic", Generic.Strong: "bold", Generic.Prompt: "bold #000080", Generic.Output: "#888", Generic.Traceback: "#04D", Error: "border:#FF0000" }
mit
darkleons/lama
addons/procurement/procurement.py
44
15869
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import time from psycopg2 import OperationalError from openerp import SUPERUSER_ID from openerp.osv import fields, osv import openerp.addons.decimal_precision as dp from openerp.tools.translate import _ import openerp PROCUREMENT_PRIORITIES = [('0', 'Not urgent'), ('1', 'Normal'), ('2', 'Urgent'), ('3', 'Very Urgent')] class procurement_group(osv.osv): ''' The procurement group class is used to group products together when computing procurements. (tasks, physical products, ...) The goal is that when you have one sale order of several products and the products are pulled from the same or several location(s), to keep having the moves grouped into pickings that represent the sale order. Used in: sales order (to group delivery order lines like the so), pull/push rules (to pack like the delivery order), on orderpoints (e.g. for wave picking all the similar products together). Grouping is made only if the source and the destination is the same. Suppose you have 4 lines on a picking from Output where 2 lines will need to come from Input (crossdock) and 2 lines coming from Stock -> Output As the four procurement orders will have the same group ids from the SO, the move from input will have a stock.picking with 2 grouped lines and the move from stock will have 2 grouped lines also. The name is usually the name of the original document (sale order) or a sequence computed if created manually. ''' _name = 'procurement.group' _description = 'Procurement Requisition' _order = "id desc" _columns = { 'name': fields.char('Reference', required=True), 'move_type': fields.selection([ ('direct', 'Partial'), ('one', 'All at once')], 'Delivery Method', required=True), 'procurement_ids': fields.one2many('procurement.order', 'group_id', 'Procurements'), } _defaults = { 'name': lambda self, cr, uid, c: self.pool.get('ir.sequence').get(cr, uid, 'procurement.group') or '', 'move_type': lambda self, cr, uid, c: 'direct' } class procurement_rule(osv.osv): ''' A rule describe what a procurement should do; produce, buy, move, ... ''' _name = 'procurement.rule' _description = "Procurement Rule" _order = "name" def _get_action(self, cr, uid, context=None): return [] _columns = { 'name': fields.char('Name', required=True, help="This field will fill the packing origin and the name of its moves"), 'active': fields.boolean('Active', help="If unchecked, it will allow you to hide the rule without removing it."), 'group_propagation_option': fields.selection([('none', 'Leave Empty'), ('propagate', 'Propagate'), ('fixed', 'Fixed')], string="Propagation of Procurement Group"), 'group_id': fields.many2one('procurement.group', 'Fixed Procurement Group'), 'action': fields.selection(selection=lambda s, cr, uid, context=None: s._get_action(cr, uid, context=context), string='Action', required=True), 'sequence': fields.integer('Sequence'), 'company_id': fields.many2one('res.company', 'Company'), } _defaults = { 'group_propagation_option': 'propagate', 'sequence': 20, 'active': True, } class procurement_order(osv.osv): """ Procurement Orders """ _name = "procurement.order" _description = "Procurement" _order = 'priority desc, date_planned, id asc' _inherit = ['mail.thread'] _log_create = False _columns = { 'name': fields.text('Description', required=True), 'origin': fields.char('Source Document', help="Reference of the document that created this Procurement.\n" "This is automatically completed by Odoo."), 'company_id': fields.many2one('res.company', 'Company', required=True), # These two fields are used for shceduling 'priority': fields.selection(PROCUREMENT_PRIORITIES, 'Priority', required=True, select=True, track_visibility='onchange'), 'date_planned': fields.datetime('Scheduled Date', required=True, select=True, track_visibility='onchange'), 'group_id': fields.many2one('procurement.group', 'Procurement Group'), 'rule_id': fields.many2one('procurement.rule', 'Rule', track_visibility='onchange', help="Chosen rule for the procurement resolution. Usually chosen by the system but can be manually set by the procurement manager to force an unusual behavior."), 'product_id': fields.many2one('product.product', 'Product', required=True, states={'confirmed': [('readonly', False)]}, readonly=True), 'product_qty': fields.float('Quantity', digits_compute=dp.get_precision('Product Unit of Measure'), required=True, states={'confirmed': [('readonly', False)]}, readonly=True), 'product_uom': fields.many2one('product.uom', 'Product Unit of Measure', required=True, states={'confirmed': [('readonly', False)]}, readonly=True), 'product_uos_qty': fields.float('UoS Quantity', states={'confirmed': [('readonly', False)]}, readonly=True), 'product_uos': fields.many2one('product.uom', 'Product UoS', states={'confirmed': [('readonly', False)]}, readonly=True), 'state': fields.selection([ ('cancel', 'Cancelled'), ('confirmed', 'Confirmed'), ('exception', 'Exception'), ('running', 'Running'), ('done', 'Done') ], 'Status', required=True, track_visibility='onchange', copy=False), } _defaults = { 'state': 'confirmed', 'priority': '1', 'date_planned': lambda *a: time.strftime('%Y-%m-%d %H:%M:%S'), 'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'procurement.order', context=c) } def unlink(self, cr, uid, ids, context=None): procurements = self.read(cr, uid, ids, ['state'], context=context) unlink_ids = [] for s in procurements: if s['state'] == 'cancel': unlink_ids.append(s['id']) else: raise osv.except_osv(_('Invalid Action!'), _('Cannot delete Procurement Order(s) which are in %s state.') % s['state']) return osv.osv.unlink(self, cr, uid, unlink_ids, context=context) def do_view_procurements(self, cr, uid, ids, context=None): ''' This function returns an action that display existing procurement orders of same procurement group of given ids. ''' act_obj = self.pool.get('ir.actions.act_window') action_id = self.pool.get('ir.model.data').xmlid_to_res_id(cr, uid, 'procurement.do_view_procurements', raise_if_not_found=True) result = act_obj.read(cr, uid, [action_id], context=context)[0] group_ids = set([proc.group_id.id for proc in self.browse(cr, uid, ids, context=context) if proc.group_id]) result['domain'] = "[('group_id','in',[" + ','.join(map(str, list(group_ids))) + "])]" return result def onchange_product_id(self, cr, uid, ids, product_id, context=None): """ Finds UoM and UoS of changed product. @param product_id: Changed id of product. @return: Dictionary of values. """ if product_id: w = self.pool.get('product.product').browse(cr, uid, product_id, context=context) v = { 'product_uom': w.uom_id.id, 'product_uos': w.uos_id and w.uos_id.id or w.uom_id.id } return {'value': v} return {} def get_cancel_ids(self, cr, uid, ids, context=None): return [proc.id for proc in self.browse(cr, uid, ids, context=context) if proc.state != 'done'] def cancel(self, cr, uid, ids, context=None): #cancel only the procurements that aren't done already to_cancel_ids = self.get_cancel_ids(cr, uid, ids, context=context) if to_cancel_ids: return self.write(cr, uid, to_cancel_ids, {'state': 'cancel'}, context=context) def reset_to_confirmed(self, cr, uid, ids, context=None): return self.write(cr, uid, ids, {'state': 'confirmed'}, context=context) def run(self, cr, uid, ids, autocommit=False, context=None): for procurement_id in ids: #we intentionnaly do the browse under the for loop to avoid caching all ids which would be resource greedy #and useless as we'll make a refresh later that will invalidate all the cache (and thus the next iteration #will fetch all the ids again) procurement = self.browse(cr, uid, procurement_id, context=context) if procurement.state not in ("running", "done"): try: if self._assign(cr, uid, procurement, context=context): procurement.refresh() res = self._run(cr, uid, procurement, context=context or {}) if res: self.write(cr, uid, [procurement.id], {'state': 'running'}, context=context) else: self.write(cr, uid, [procurement.id], {'state': 'exception'}, context=context) else: self.message_post(cr, uid, [procurement.id], body=_('No rule matching this procurement'), context=context) self.write(cr, uid, [procurement.id], {'state': 'exception'}, context=context) if autocommit: cr.commit() except OperationalError: if autocommit: cr.rollback() continue else: raise return True def check(self, cr, uid, ids, autocommit=False, context=None): done_ids = [] for procurement in self.browse(cr, uid, ids, context=context): try: result = self._check(cr, uid, procurement, context=context) if result: done_ids.append(procurement.id) if autocommit: cr.commit() except OperationalError: if autocommit: cr.rollback() continue else: raise if done_ids: self.write(cr, uid, done_ids, {'state': 'done'}, context=context) return done_ids # # Method to overwrite in different procurement modules # def _find_suitable_rule(self, cr, uid, procurement, context=None): '''This method returns a procurement.rule that depicts what to do with the given procurement in order to complete its needs. It returns False if no suiting rule is found. :param procurement: browse record :rtype: int or False ''' return False def _assign(self, cr, uid, procurement, context=None): '''This method check what to do with the given procurement in order to complete its needs. It returns False if no solution is found, otherwise it stores the matching rule (if any) and returns True. :param procurement: browse record :rtype: boolean ''' #if the procurement already has a rule assigned, we keep it (it has a higher priority as it may have been chosen manually) if procurement.rule_id: return True elif procurement.product_id.type != 'service': rule_id = self._find_suitable_rule(cr, uid, procurement, context=context) if rule_id: self.write(cr, uid, [procurement.id], {'rule_id': rule_id}, context=context) return True return False def _run(self, cr, uid, procurement, context=None): '''This method implements the resolution of the given procurement :param procurement: browse record :returns: True if the resolution of the procurement was a success, False otherwise to set it in exception ''' return True def _check(self, cr, uid, procurement, context=None): '''Returns True if the given procurement is fulfilled, False otherwise :param procurement: browse record :rtype: boolean ''' return False # # Scheduler # def run_scheduler(self, cr, uid, use_new_cursor=False, company_id = False, context=None): ''' Call the scheduler to check the procurement order. This is intented to be done for all existing companies at the same time, so we're running all the methods as SUPERUSER to avoid intercompany and access rights issues. @param self: The object pointer @param cr: The current row, from the database cursor, @param uid: The current user ID for security checks @param ids: List of selected IDs @param use_new_cursor: if set, use a dedicated cursor and auto-commit after processing each procurement. This is appropriate for batch jobs only. @param context: A standard dictionary for contextual values @return: Dictionary of values ''' if context is None: context = {} try: if use_new_cursor: cr = openerp.registry(cr.dbname).cursor() # Run confirmed procurements dom = [('state', '=', 'confirmed')] if company_id: dom += [('company_id', '=', company_id)] prev_ids = [] while True: ids = self.search(cr, SUPERUSER_ID, dom, context=context) if not ids or prev_ids == ids: break else: prev_ids = ids self.run(cr, SUPERUSER_ID, ids, autocommit=use_new_cursor, context=context) if use_new_cursor: cr.commit() # Check if running procurements are done offset = 0 dom = [('state', '=', 'running')] if company_id: dom += [('company_id', '=', company_id)] prev_ids = [] while True: ids = self.search(cr, SUPERUSER_ID, dom, offset=offset, context=context) if not ids or prev_ids == ids: break else: prev_ids = ids self.check(cr, SUPERUSER_ID, ids, autocommit=use_new_cursor, context=context) if use_new_cursor: cr.commit() finally: if use_new_cursor: try: cr.close() except Exception: pass return {} # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
durden/dayonetools
dayonetools/services/habit_list.py
1
10350
""" This module provides a way to import data from the Habit List iPhone application (http://habitlist.com/) into Day One Journal (http://dayoneapp.com/) entries. To use this module you must first do a manual export of your data from Habit list. This can be done by the following: - Open Habit List iPhone app - Click the 'gear' icon for settings at the bottom of the main 'Today' view - Choose the 'Export Data' option - E-mail the data to yourself - Copy and paste the e-mail contents into a file of your choosing - Remove the 'sent from iPhone' line at the end of your e-mail. This will cause the script to NOT process the JSON data. - DO NOT REMOVE THE LAST TWO EMPTY LINES OF THE E-MAIL. WE CURRENTLY HAVE A BUG THAT EXPECTS THESE LINES. - You can choose to optionally remove the first few lines of the e-mail that are not JSON data, everything up to the first '[' character. - Again, this is optional because this module will attempt to ignore any non-JSON data at the START of a file. At this point, you are ready to do the actual conversion from JSON to Day One entires. So, you should check all the 'settings' in this module for things you would like to change: - HEADER_FOR_DAY_ONE_ENTRIES - DAYONE_ENTRIES - ENTRY_TEMPLATE - TIMEZONE - Make sure to choose the timezone of your iPhone because the Habit List app stores all timezones in UTC and you'll want to convert this to the timezone your iPhone used at the time you completed the habit. This will ensure your Day One entries match the time you completed the task and also prevent a habit from showing up more than once per day which can happen with UTC time if you complete a habit late in one day and early in the next, etc. - You can find a list of available timezone strings here: - http://en.wikipedia.org/wiki/List_of_tz_database_time_zones Next, you can run this module with your exported JSON data as an argument like so: - python services/habit_list.py -f habit_list_data.json -t Also, it's encouraged to run this with the '-t' option first so that all your Day One entires will be created in a local directory called 'test.' This will allow you to inspect the conversion. You can manually copy a few select entries into your Day One 'entries/' folder to ensure you approve of the formatting and can easily make any formatting adjustments. Then, you can run this module again without the '-t' to fully import Habit List entries into Day One. """ import argparse import collections from datetime import datetime import json import os import re import uuid from dateutil import tz from dayonetools.services import convert_to_dayone_date_string DAYONE_ENTRIES = '/Users/durden/Dropbox/Apps/Day One/Journal.dayone/entries/' # This text will be inserted into the first line of all entries created, set to # '' to remove this completely. HEADER_FOR_DAYONE_ENTRIES = 'Habit List entry' # Note the strange lack of indentation on the {entry_text} b/c day one will # display special formatting to text that is indented, which we want to avoid. ENTRY_TEMPLATE = """ <?xml version="1.0" encoding="UTF-8"?> <!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd"> <plist version="1.0"> <dict> <key>Creation Date</key> <date>{date}</date> <key>Entry Text</key> <string> {entry_title} <![CDATA[ {habits}]]> #habits #habit_list </string> <key>Starred</key> <false/> <key>Tags</key> <array> <string>habits</string> <string>habit_list</string> </array> <key>UUID</key> <string>{uuid_str}</string> </dict> </plist> """ TIMEZONE = 'America/Chicago' def _parse_args(): """Parse sys.argv arguments""" parser = argparse.ArgumentParser( description='Export Habit List data to Day One') parser.add_argument('-f', '--file', action='store', dest='input_file', required=True, help='JSON file to import from') parser.add_argument('-v', '--verbose', default=False, action='store_true', dest='verbose', required=False, help='Verbose debugging information') parser.add_argument('-t', '--test', default=False, action='store_true', dest='test', required=False, help=('Test import by creating Day one files in local ' 'directory for inspect')) def _datetime(str_): """Convert date string in YYYY-MM-DD format to datetime object""" if not str_: return None try: date = datetime.strptime(str_, '%Y-%m-%d') except ValueError: msg = 'Invalid date format, should be YYYY-MM-DD' raise argparse.ArgumentTypeError(msg) return date.replace(tzinfo=_user_time_zone()) parser.add_argument('-s', '--since', type=_datetime, help=('Only process entries starting with YYYY-MM-DD ' 'and newer')) return vars(parser.parse_args()) def _user_time_zone(): """Get default timezone for user""" try: return tz.gettz(TIMEZONE) except Exception as err: print 'Failed getting timezone, check your TIMEZONE variable' raise def _user_time_zone_date(dt, user_time_zone, utc_time_zone): """ Convert given datetime string into a yyyy-mm-dd string taking into account the user time zone Keep in mind that this conversion might change the actual day if the habit was entered 'early' or 'late' in the day. This is correct because the user entered the habit in their own timezone, but the app stores this internally (and exports) in utc. So, here we are effectively converting the time back to when the user actually entered it, based on the timezone the user claims they were in. """ # We know habit list stores in UTC so don't need the timezone info dt = dt.split('+')[0].strip() dtime_obj = datetime.strptime(dt, '%Y-%m-%d %H:%M:%S') # Tell native datetime object we are using UTC, then we need to convert # that UTC time into the user's timezone BEFORE stripping off the time # to make sure the year, month, and date take into account timezone # differences. utc = dtime_obj.replace(tzinfo=utc_time_zone) return utc.astimezone(user_time_zone) def _habits_to_markdown(habits): """Create markdown list of habits""" # FIXME: This is inefficient but not sure of a good way to use join since # we want to add a chacter to the beginning and end of each string in list. markdown = '' for habit, dt_obj in habits: markdown += '- [%02d:%02d] %s\n' % (dt_obj.hour, dt_obj.minute, habit) return markdown def create_habitlist_entry(directory, day_str, habits, verbose): """Create day one file entry for given habits, date pair""" # Create unique uuid without any specific machine information # (uuid() vs. uuid()) and strip any '-' characters to be # consistent with dayone format. uuid_str = re.sub('-', '', str(uuid.uuid4())) file_name = '%s.doentry' % (uuid_str) full_file_name = os.path.join(directory, file_name) date = convert_to_dayone_date_string(day_str) habits = _habits_to_markdown(habits) entry = {'entry_title': HEADER_FOR_DAYONE_ENTRIES, 'habits': habits,'date': date, 'uuid_str': uuid_str} with open(full_file_name, 'w') as file_obj: text = ENTRY_TEMPLATE.format(**entry) file_obj.write(text) if verbose: print 'Created entry for %s: %s' % (date, file_name) def parse_habits_file(filename, start_date=None): """ Parse habits json file and return dict of data organized by day start_date can be a datetime object used only to return habits that were started on or after start_date """ with open(filename, 'r') as file_obj: # FIXME: This expects 3 lines of junk at the beginning of the file, but # we could just read until we find '[' and ignore up until that point. junk = file_obj.readline() junk = file_obj.readline() junk = file_obj.readline() # FIXME: For my sample this is about 27kb of memory _json = file_obj.read() # FIXME: Downside here is that we assume the user was in the same timezone # for every habit. However, it's possible that some of the habits were # entered while the user was traveling in a different timezone, etc. iphone_time_zone = _user_time_zone() utc_time_zone = tz.gettz('UTC') # Use a set b/c we can only do each habit once a day habits = collections.defaultdict(set) # FIXME: Maybe optimize this to not hold it all in memory # We have to parse all json and return it b/c the data is organized by # habit and we need it organized by date. So, we can't use a generator or # anything to yield values as they come b/c we won't know if we've parsed # the entire day until all JSON is parsed. # FIXME: Should have something to catch ValueError exceptions around this # so we can show the line with the error if something is wrong. for habit in json.loads(_json): name = habit['name'] for dt in habit['completed']: dt_obj = _user_time_zone_date(dt, iphone_time_zone, utc_time_zone) if start_date is None or dt_obj >= start_date: # Habits will be organized by day then each one will have it's # own time. day_str = dt_obj.strftime('%Y-%m-%d') habits[day_str].add((name, dt_obj)) return habits def main(): args = _parse_args() if args['test']: directory = './test' try: os.mkdir(directory) except OSError as err: print 'Warning: %s' % (err) else: directory = DAYONE_ENTRIES habits = parse_habits_file(args['input_file'], args['since']) for day_str, days_habits in habits.iteritems(): create_habitlist_entry(directory, day_str, days_habits, args['verbose']) if __name__ == '__main__': main()
mit
memtoko/django
django/template/loaders/cached.py
20
3031
""" Wrapper class that takes a list of template loaders as an argument and attempts to load templates from them in order, caching the result. """ import hashlib from django.template.base import Template, TemplateDoesNotExist from django.utils.encoding import force_bytes from .base import Loader as BaseLoader class Loader(BaseLoader): def __init__(self, engine, loaders): self.template_cache = {} self.find_template_cache = {} self.loaders = engine.get_template_loaders(loaders) super(Loader, self).__init__(engine) def cache_key(self, template_name, template_dirs): if template_dirs: # If template directories were specified, use a hash to differentiate return '-'.join([template_name, hashlib.sha1(force_bytes('|'.join(template_dirs))).hexdigest()]) else: return template_name def find_template(self, name, dirs=None): """ Helper method. Lookup the template :param name: in all the configured loaders """ key = self.cache_key(name, dirs) try: result = self.find_template_cache[key] except KeyError: result = None for loader in self.loaders: try: template, display_name = loader(name, dirs) except TemplateDoesNotExist: pass else: origin = self.engine.make_origin(display_name, loader, name, dirs) result = template, origin break self.find_template_cache[key] = result if result: return result else: self.template_cache[key] = TemplateDoesNotExist raise TemplateDoesNotExist(name) def load_template(self, template_name, template_dirs=None): key = self.cache_key(template_name, template_dirs) template_tuple = self.template_cache.get(key) # A cached previous failure: if template_tuple is TemplateDoesNotExist: raise TemplateDoesNotExist elif template_tuple is None: template, origin = self.find_template(template_name, template_dirs) if not hasattr(template, 'render'): try: template = Template(template, origin, template_name, self.engine) except TemplateDoesNotExist: # If compiling the template we found raises TemplateDoesNotExist, # back off to returning the source and display name for the template # we were asked to load. This allows for correct identification (later) # of the actual template that does not exist. self.template_cache[key] = (template, origin) self.template_cache[key] = (template, None) return self.template_cache[key] def reset(self): "Empty the template cache." self.template_cache.clear() self.find_template_cache.clear()
bsd-3-clause
jakevdp/pelican-plugins
static_comments/static_comments.py
72
1216
# -*- coding: utf-8 -*- import codecs import logging import markdown import os logger = logging.getLogger(__name__) from pelican import signals def initialized(pelican): from pelican.settings import DEFAULT_CONFIG DEFAULT_CONFIG.setdefault('STATIC_COMMENTS', False) DEFAULT_CONFIG.setdefault('STATIC_COMMENTS_DIR' 'comments') if pelican: pelican.settings.setdefault('STATIC_COMMENTS', False) pelican.settings.setdefault('STATIC_COMMENTS_DIR', 'comments') def add_static_comments(gen, metadata): if gen.settings['STATIC_COMMENTS'] != True: return if not 'slug' in metadata: logger.warning("static_comments: " "cant't locate comments file without slug tag in the article") return fname = os.path.join(gen.settings['STATIC_COMMENTS_DIR'], metadata['slug'] + ".md") if not os.path.exists(fname): return input_file = codecs.open(fname, mode="r", encoding="utf-8") text = input_file.read() html = markdown.markdown(text) metadata['static_comments'] = html def register(): signals.initialized.connect(initialized) signals.article_generator_context.connect(add_static_comments)
agpl-3.0
log2timeline/dfvfs
dfvfs/vfs/apfs_file_entry.py
2
8402
# -*- coding: utf-8 -*- """The APFS file entry implementation.""" from dfdatetime import apfs_time as dfdatetime_apfs_time from dfvfs.lib import definitions from dfvfs.lib import errors from dfvfs.path import apfs_path_spec from dfvfs.vfs import file_entry class APFSDirectory(file_entry.Directory): """File system directory that uses pyfsapfs.""" def _EntriesGenerator(self): """Retrieves directory entries. Since a directory can contain a vast number of entries using a generator is more memory efficient. Yields: APFSPathSpec: APFS path specification. """ try: fsapfs_file_entry = self._file_system.GetAPFSFileEntryByPathSpec( self.path_spec) except errors.PathSpecError: return location = getattr(self.path_spec, 'location', None) for fsapfs_sub_file_entry in fsapfs_file_entry.sub_file_entries: directory_entry = fsapfs_sub_file_entry.name if not location or location == self._file_system.PATH_SEPARATOR: directory_entry = self._file_system.JoinPath([directory_entry]) else: directory_entry = self._file_system.JoinPath([ location, directory_entry]) yield apfs_path_spec.APFSPathSpec( identifier=fsapfs_sub_file_entry.identifier, location=directory_entry, parent=self.path_spec.parent) class APFSFileEntry(file_entry.FileEntry): """File system file entry that uses pyfsapfs.""" TYPE_INDICATOR = definitions.TYPE_INDICATOR_APFS # Mappings of APFS file types to dfVFS file entry types. _ENTRY_TYPES = { 0x1000: definitions.FILE_ENTRY_TYPE_PIPE, 0x2000: definitions.FILE_ENTRY_TYPE_DEVICE, 0x4000: definitions.FILE_ENTRY_TYPE_DIRECTORY, 0x6000: definitions.FILE_ENTRY_TYPE_DEVICE, 0x8000: definitions.FILE_ENTRY_TYPE_FILE, 0xa000: definitions.FILE_ENTRY_TYPE_LINK, 0xc000: definitions.FILE_ENTRY_TYPE_SOCKET, 0xe000: definitions.FILE_ENTRY_TYPE_WHITEOUT} def __init__( self, resolver_context, file_system, path_spec, fsapfs_file_entry=None, is_root=False, is_virtual=False): """Initializes a file entry. Args: resolver_context (Context): resolver context. file_system (FileSystem): file system. path_spec (PathSpec): path specification. fsapfs_file_entry (Optional[pyfsapfs.file_entry]): APFS file entry. is_root (Optional[bool]): True if the file entry is the root file entry of the corresponding file system. is_virtual (Optional[bool]): True if the file entry is a virtual file entry emulated by the corresponding file system. Raises: BackEndError: if the pyfsapfs file entry is missing. """ if not fsapfs_file_entry: fsapfs_file_entry = file_system.GetAPFSFileEntryByPathSpec(path_spec) if not fsapfs_file_entry: raise errors.BackEndError('Missing pyfsapfs file entry.') super(APFSFileEntry, self).__init__( resolver_context, file_system, path_spec, is_root=is_root, is_virtual=is_virtual) self._fsapfs_file_entry = fsapfs_file_entry self.entry_type = self._ENTRY_TYPES.get( fsapfs_file_entry.file_mode & 0xf000, None) def _GetDirectory(self): """Retrieves a directory. Returns: APFSDirectory: a directory. """ if self._directory is None: self._directory = APFSDirectory(self._file_system, self.path_spec) return self._directory def _GetLink(self): """Retrieves the link. Returns: str: path of the linked file. """ if self._link is None: self._link = self._fsapfs_file_entry.symbolic_link_target if self._link and self._link[0] != self._file_system.PATH_SEPARATOR: # TODO: make link absolute. self._link = '/{0:s}'.format(self._link) return self._link def _GetStat(self): """Retrieves information about the file entry. Returns: VFSStat: a stat object. """ stat_object = super(APFSFileEntry, self)._GetStat() # Ownership and permissions stat information. stat_object.mode = self._fsapfs_file_entry.file_mode & 0x0fff stat_object.uid = self._fsapfs_file_entry.owner_identifier stat_object.gid = self._fsapfs_file_entry.group_identifier # Other stat information. stat_object.ino = self._fsapfs_file_entry.identifier stat_object.fs_type = 'APFS' return stat_object def _GetSubFileEntries(self): """Retrieves a sub file entries generator. Yields: APFSFileEntry: a sub file entry. """ if self.entry_type == definitions.FILE_ENTRY_TYPE_DIRECTORY: directory = self._GetDirectory() for path_spec in directory.entries: yield APFSFileEntry( self._resolver_context, self._file_system, path_spec) @property def access_time(self): """dfdatetime.DateTimeValues: access time or None if not available.""" timestamp = self._fsapfs_file_entry.get_access_time_as_integer() return dfdatetime_apfs_time.APFSTime(timestamp=timestamp) @property def added_time(self): """dfdatetime.DateTimeValues: added time or None if not available.""" timestamp = self._fsapfs_file_entry.get_added_time_as_integer() if timestamp is None: return None return dfdatetime_apfs_time.APFSTime(timestamp=timestamp) @property def change_time(self): """dfdatetime.DateTimeValues: change time or None if not available.""" timestamp = self._fsapfs_file_entry.get_inode_change_time_as_integer() return dfdatetime_apfs_time.APFSTime(timestamp=timestamp) @property def creation_time(self): """dfdatetime.DateTimeValues: creation time or None if not available.""" timestamp = self._fsapfs_file_entry.get_creation_time_as_integer() return dfdatetime_apfs_time.APFSTime(timestamp=timestamp) @property def modification_time(self): """dfdatetime.DateTimeValues: modification time or None if not available.""" timestamp = self._fsapfs_file_entry.get_modification_time_as_integer() return dfdatetime_apfs_time.APFSTime(timestamp=timestamp) @property def name(self): """str: name of the file entry, which does not include the full path.""" # The root directory file name is typically 'root', dfVFS however uses ''. if self._is_root: return '' return self._fsapfs_file_entry.name @property def size(self): """int: size of the file entry in bytes or None if not available.""" return self._fsapfs_file_entry.size def GetAPFSFileEntry(self): """Retrieves the APFS file entry. Returns: pyfsapfs.file_entry: APFS file entry. """ return self._fsapfs_file_entry def GetLinkedFileEntry(self): """Retrieves the linked file entry, e.g. for a symbolic link. Returns: APFSFileEntry: linked file entry or None if not available. """ link = self._GetLink() if not link: return None # TODO: is there a way to determine the identifier here? link_identifier = None parent_path_spec = getattr(self.path_spec, 'parent', None) path_spec = apfs_path_spec.APFSPathSpec( location=link, parent=parent_path_spec) is_root = bool( link == self._file_system.LOCATION_ROOT or link_identifier == self._file_system.ROOT_DIRECTORY_IDENTIFIER) return APFSFileEntry( self._resolver_context, self._file_system, path_spec, is_root=is_root) def GetParentFileEntry(self): """Retrieves the parent file entry. Returns: APFSFileEntry: parent file entry or None if not available. """ parent_location = None location = getattr(self.path_spec, 'location', None) if location is not None: parent_location = self._file_system.DirnamePath(location) if parent_location == '': parent_location = self._file_system.PATH_SEPARATOR parent_identifier = self._fsapfs_file_entry.parent_identifier if parent_identifier is None: return None parent_path_spec = getattr(self.path_spec, 'parent', None) path_spec = apfs_path_spec.APFSPathSpec( location=parent_location, identifier=parent_identifier, parent=parent_path_spec) is_root = bool( parent_location == self._file_system.LOCATION_ROOT or parent_identifier == self._file_system.ROOT_DIRECTORY_IDENTIFIER) return APFSFileEntry( self._resolver_context, self._file_system, path_spec, is_root=is_root)
apache-2.0
theflofly/tensorflow
tensorflow/python/keras/keras_parameterized.py
10
12005
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Utilities for unit-testing Keras.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import functools import itertools import unittest from absl.testing import parameterized from tensorflow.python import keras from tensorflow.python import tf2 from tensorflow.python.eager import context from tensorflow.python.keras import testing_utils from tensorflow.python.platform import test from tensorflow.python.util import nest class TestCase(test.TestCase, parameterized.TestCase): def tearDown(self): keras.backend.clear_session() super(TestCase, self).tearDown() # TODO(kaftan): Possibly enable 'subclass_custom_build' when tests begin to pass # it. Or perhaps make 'subclass' always use a custom build method. def run_with_all_model_types( test_or_class=None, exclude_models=None): """Execute the decorated test with all Keras model types. This decorator is intended to be applied either to individual test methods in a `keras_parameterized.TestCase` class, or directly to a test class that extends it. Doing so will cause the contents of the individual test method (or all test methods in the class) to be executed multiple times - once for each Keras model type. The Keras model types are: ['functional', 'subclass', 'sequential'] Note: if stacking this decorator with absl.testing's parameterized decorators, those should be at the bottom of the stack. Various methods in `testing_utils` to get models will auto-generate a model of the currently active Keras model type. This allows unittests to confirm the equivalence between different Keras models. For example, consider the following unittest: ```python class MyTests(testing_utils.KerasTestCase): @testing_utils.run_with_all_model_types( exclude_models = ['sequential']) def test_foo(self): model = testing_utils.get_small_mlp(1, 4, input_dim=3) optimizer = RMSPropOptimizer(learning_rate=0.001) loss = 'mse' metrics = ['mae'] model.compile(optimizer, loss, metrics=metrics) inputs = np.zeros((10, 3)) targets = np.zeros((10, 4)) dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets)) dataset = dataset.repeat(100) dataset = dataset.batch(10) model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=1) if __name__ == "__main__": tf.test.main() ``` This test tries building a small mlp as both a functional model and as a subclass model. We can also annotate the whole class if we want this to apply to all tests in the class: ```python @testing_utils.run_with_all_model_types(exclude_models = ['sequential']) class MyTests(testing_utils.KerasTestCase): def test_foo(self): model = testing_utils.get_small_mlp(1, 4, input_dim=3) optimizer = RMSPropOptimizer(learning_rate=0.001) loss = 'mse' metrics = ['mae'] model.compile(optimizer, loss, metrics=metrics) inputs = np.zeros((10, 3)) targets = np.zeros((10, 4)) dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets)) dataset = dataset.repeat(100) dataset = dataset.batch(10) model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=1) if __name__ == "__main__": tf.test.main() ``` Args: test_or_class: test method or class to be annotated. If None, this method returns a decorator that can be applied to a test method or test class. If it is not None this returns the decorator applied to the test or class. exclude_models: A collection of Keras model types to not run. (May also be a single model type not wrapped in a collection). Defaults to None. Returns: Returns a decorator that will run the decorated test method multiple times: once for each desired Keras model type. Raises: ImportError: If abseil parameterized is not installed or not included as a target dependency. """ model_types = ['functional', 'subclass', 'sequential'] params = [('_%s' % model, model) for model in model_types if model not in nest.flatten(exclude_models)] def single_method_decorator(f): """Decorator that constructs the test cases.""" # Use named_parameters so it can be individually run from the command line @parameterized.named_parameters(*params) @functools.wraps(f) def decorated(self, model_type, *args, **kwargs): """A run of a single test case w/ the specified model type.""" if model_type == 'functional': _test_functional_model_type(f, self, *args, **kwargs) elif model_type == 'subclass': _test_subclass_model_type(f, self, *args, **kwargs) elif model_type == 'sequential': _test_sequential_model_type(f, self, *args, **kwargs) else: raise ValueError('Unknown model type: %s' % (model_type,)) return decorated return _test_or_class_decorator(test_or_class, single_method_decorator) def _test_functional_model_type(f, test_or_class, *args, **kwargs): with testing_utils.model_type_scope('functional'): f(test_or_class, *args, **kwargs) def _test_subclass_model_type(f, test_or_class, *args, **kwargs): with testing_utils.model_type_scope('subclass'): f(test_or_class, *args, **kwargs) def _test_sequential_model_type(f, test_or_class, *args, **kwargs): with testing_utils.model_type_scope('sequential'): f(test_or_class, *args, **kwargs) def run_all_keras_modes( test_or_class=None, config=None, always_skip_v1=False): """Execute the decorated test with all keras execution modes. This decorator is intended to be applied either to individual test methods in a `keras_parameterized.TestCase` class, or directly to a test class that extends it. Doing so will cause the contents of the individual test method (or all test methods in the class) to be executed multiple times - once executing in legacy graph mode, once running eagerly and with `should_run_eagerly` returning True, and once running eagerly with `should_run_eagerly` returning False. If Tensorflow v2 behavior is enabled, legacy graph mode will be skipped, and the test will only run twice. Note: if stacking this decorator with absl.testing's parameterized decorators, those should be at the bottom of the stack. For example, consider the following unittest: ```python class MyTests(testing_utils.KerasTestCase): @testing_utils.run_all_keras_modes def test_foo(self): model = testing_utils.get_small_functional_mlp(1, 4, input_dim=3) optimizer = RMSPropOptimizer(learning_rate=0.001) loss = 'mse' metrics = ['mae'] model.compile(optimizer, loss, metrics=metrics, run_eagerly=testing_utils.should_run_eagerly()) inputs = np.zeros((10, 3)) targets = np.zeros((10, 4)) dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets)) dataset = dataset.repeat(100) dataset = dataset.batch(10) model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=1) if __name__ == "__main__": tf.test.main() ``` This test will try compiling & fitting the small functional mlp using all three Keras execution modes. Args: test_or_class: test method or class to be annotated. If None, this method returns a decorator that can be applied to a test method or test class. If it is not None this returns the decorator applied to the test or class. config: An optional config_pb2.ConfigProto to use to configure the session when executing graphs. always_skip_v1: If True, does not try running the legacy graph mode even when Tensorflow v2 behavior is not enabled. Returns: Returns a decorator that will run the decorated test method multiple times. Raises: ImportError: If abseil parameterized is not installed or not included as a target dependency. """ params = [('_v2_eager', 'v2_eager'), ('_v2_function', 'v2_function')] if not (always_skip_v1 or tf2.enabled()): params.append(('_v1_graph', 'v1_graph')) def single_method_decorator(f): """Decorator that constructs the test cases.""" # Use named_parameters so it can be individually run from the command line @parameterized.named_parameters(*params) @functools.wraps(f) def decorated(self, run_mode, *args, **kwargs): """A run of a single test case w/ specified run mode.""" if run_mode == 'v1_graph': _v1_graph_test(f, self, config, *args, **kwargs) elif run_mode == 'v2_function': _v2_graph_functions_test(f, self, *args, **kwargs) elif run_mode == 'v2_eager': _v2_eager_test(f, self, *args, **kwargs) else: return ValueError('Unknown run mode %s' % run_mode) return decorated return _test_or_class_decorator(test_or_class, single_method_decorator) def _v1_graph_test(f, test_or_class, config, *args, **kwargs): with context.graph_mode(), testing_utils.run_eagerly_scope(False): with test_or_class.test_session(use_gpu=True, config=config): f(test_or_class, *args, **kwargs) def _v2_graph_functions_test(f, test_or_class, *args, **kwargs): with context.eager_mode(): with testing_utils.run_eagerly_scope(False): f(test_or_class, *args, **kwargs) def _v2_eager_test(f, test_or_class, *args, **kwargs): with context.eager_mode(): with testing_utils.run_eagerly_scope(True): f(test_or_class, *args, **kwargs) def _test_or_class_decorator(test_or_class, single_method_decorator): """Decorate a test or class with a decorator intended for one method. If the test_or_class is a class: This will apply the decorator to all test methods in the class. If the test_or_class is an iterable of already-parameterized test cases: This will apply the decorator to all the cases, and then flatten the resulting cross-product of test cases. This allows stacking the Keras parameterized decorators w/ each other, and to apply them to test methods that have already been marked with an absl parameterized decorator. Otherwise, treat the obj as a single method and apply the decorator directly. Args: test_or_class: A test method (that may have already been decorated with a parameterized decorator, or a test class that extends keras_parameterized.TestCase single_method_decorator: A parameterized decorator intended for a single test method. Returns: The decorated result. """ def _decorate_test_or_class(obj): if isinstance(obj, collections.Iterable): return itertools.chain.from_iterable( single_method_decorator(method) for method in obj) if isinstance(obj, type): cls = obj for name, value in cls.__dict__.copy().items(): if callable(value) and name.startswith( unittest.TestLoader.testMethodPrefix): setattr(cls, name, single_method_decorator(value)) cls = type(cls).__new__(type(cls), cls.__name__, cls.__bases__, cls.__dict__.copy()) return cls return single_method_decorator(obj) if test_or_class is not None: return _decorate_test_or_class(test_or_class) return _decorate_test_or_class
apache-2.0
proversity-org/edx-platform
lms/djangoapps/class_dashboard/urls.py
18
1499
""" Class Dashboard API endpoint urls. """ from django.conf import settings from django.conf.urls import url import class_dashboard.views import class_dashboard.dashboard_data COURSE_ID_PATTERN = settings.COURSE_ID_PATTERN urlpatterns = [ # Json request data for metrics for entire course url(r'^{}/all_sequential_open_distrib$'.format(settings.COURSE_ID_PATTERN), class_dashboard.views.all_sequential_open_distrib, name="all_sequential_open_distrib"), url(r'^{}/all_problem_grade_distribution$'.format(settings.COURSE_ID_PATTERN), class_dashboard.views.all_problem_grade_distribution, name="all_problem_grade_distribution"), # Json request data for metrics for particular section url(r'^{}/problem_grade_distribution/(?P<section>\d+)$'.format(settings.COURSE_ID_PATTERN), class_dashboard.views.section_problem_grade_distrib, name="section_problem_grade_distrib"), # For listing students that opened a sub-section url(r'^get_students_opened_subsection$', class_dashboard.dashboard_data.get_students_opened_subsection, name="get_students_opened_subsection"), # For listing of students' grade per problem url(r'^get_students_problem_grades$', class_dashboard.dashboard_data.get_students_problem_grades, name="get_students_problem_grades"), # For generating metrics data as a csv url(r'^post_metrics_data_csv_url', class_dashboard.dashboard_data.post_metrics_data_csv, name="post_metrics_data_csv"), ]
agpl-3.0
patrikryd/kernel
tools/perf/scripts/python/net_dropmonitor.py
4235
1554
# Monitor the system for dropped packets and proudce a report of drop locations and counts import os import sys sys.path.append(os.environ['PERF_EXEC_PATH'] + \ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') from perf_trace_context import * from Core import * from Util import * drop_log = {} kallsyms = [] def get_kallsyms_table(): global kallsyms try: f = open("/proc/kallsyms", "r") linecount = 0 for line in f: linecount = linecount+1 f.seek(0) except: return j = 0 for line in f: loc = int(line.split()[0], 16) name = line.split()[2] j = j +1 if ((j % 100) == 0): print "\r" + str(j) + "/" + str(linecount), kallsyms.append({ 'loc': loc, 'name' : name}) print "\r" + str(j) + "/" + str(linecount) kallsyms.sort() return def get_sym(sloc): loc = int(sloc) for i in kallsyms: if (i['loc'] >= loc): return (i['name'], i['loc']-loc) return (None, 0) def print_drop_table(): print "%25s %25s %25s" % ("LOCATION", "OFFSET", "COUNT") for i in drop_log.keys(): (sym, off) = get_sym(i) if sym == None: sym = i print "%25s %25s %25s" % (sym, off, drop_log[i]) def trace_begin(): print "Starting trace (Ctrl-C to dump results)" def trace_end(): print "Gathering kallsyms data" get_kallsyms_table() print_drop_table() # called from perf, when it finds a correspoinding event def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr, protocol, location): slocation = str(location) try: drop_log[slocation] = drop_log[slocation] + 1 except: drop_log[slocation] = 1
gpl-2.0
jezdez/kuma
vendor/packages/translate/storage/test_monolingual.py
22
1421
#!/usr/bin/env python # # -*- coding: utf-8 -*- # # These test classes should be used as super class of test classes for the # classes that doesn't support the target property from translate.storage import base, test_base class TestMonolingualUnit(test_base.TestTranslationUnit): UnitClass = base.TranslationUnit def test_target(self): pass def test_rich_get(self): pass def test_rich_set(self): pass class TestMonolingualStore(test_base.TestTranslationStore): StoreClass = base.TranslationStore def test_translate(self): pass def test_markup(self): pass def test_nonascii(self): pass def check_equality(self, store1, store2): """Check that store1 and store2 are the same.""" assert len(store1.units) == len(store2.units) for n, store1unit in enumerate(store1.units): store2unit = store2.units[n] if str(store1unit) != str(store2unit): print("match failed between elements %d of %d" % (n+1, len(store1.units))) print("store1:") print(str(store1)) print("store2:") print(str(store2)) print("store1.units[%d].__dict__:" % n, store1unit.__dict__) print("store2.units[%d].__dict__:" % n, store2unit.__dict__) assert str(store1unit) == str(store2unit)
mpl-2.0
ehfeng/pipet
pipet/api/views.py
2
1357
from flask import Blueprint, request from pipet.sources import SCHEMANAME, Event, Group, Identity, Page from pipet.sources.api.tasks import process_event, process_page blueprint = Blueprint(SCHEMANAME, __name__) @blueprint.route('/identity', methods=['PUT']) def identity(): organization = Organization.query.filter_by(name=request.authorization.username, api_key=request.authorization.password).first() return @blueprint.route('/group', methods=['PUT']) def group(): organization = Organization.query.filter_by(name=request.authorization.username, api_key=request.authorization.password).first() return @blueprint.route('/event', methods=['POST']) def event(): organization = Organization.query.filter_by(name=request.authorization.username, api_key=request.authorization.password).first() data = request.get_json() process_event.delay(organization.id, data) @blueprint.route('/page', methods=['POST']) def page(): organization = Organization.query.filter_by(name=request.authorization.username, api_key=request.authorization.password).first() data = request.get_json() process_event.delay(organization.id, data)
mit
gtcasl/eiger
Eiger.py
1
20400
#!/usr/bin/python # # \file Eiger.py # \author Eric Anger <[email protected]> # \date July 6, 2012 # # \brief Command line interface into Eiger modeling framework # # \changes Added more plot functionality; Benjamin Allan, SNL 5/2013 # import argparse import matplotlib.pyplot as plt import numpy as np import math import tempfile import shutil import os from ast import literal_eval import json import sys from collections import namedtuple from tabulate import tabulate from sklearn.cluster import KMeans from eiger import database, PCA, LinearRegression Model = namedtuple('Model', ['metric_names', 'means', 'stdevs', 'rotation_matrix', 'kmeans', 'models']) def import_model(args): database.addModelFromFile(args.database, args.file, args.source_name, args.description) def export_model(args): database.dumpModelToFile(args.database, args.file, args.id) def list_models(args): all_models = database.getModels(args.database) print tabulate(all_models, headers=['ID', 'Description', 'Created', 'Source']) def trainModel(args): print "Training the model..." training_DC = database.DataCollection(args.training_dc, args.database) try: performance_metric_id = [m[0] for m in training_DC.metrics].index(args.target) except ValueError: print "Unable to find target metric '%s', " \ "please specify a valid one: " % (args.target,) for (my_name,my_desc,my_type) in training_DC.metrics: print "\t%s" % (my_name,) return training_performance = training_DC.profile[:,performance_metric_id] metric_names = [m[0] for m in training_DC.metrics if m[0] != args.target] if args.predictor_metrics != None: metric_names = filter(lambda x: x in args.predictor_metrics, metric_names) metric_ids = [[m[0] for m in training_DC.metrics].index(n) for n in metric_names] if not metric_ids: print "Unable to make model for empty data collection. Aborting..." return training_profile = training_DC.profile[:,metric_ids] #pca training_pca = PCA.PCA(training_profile) nonzero_components = training_pca.nonzeroComponents() rotation_matrix = training_pca.components[:,nonzero_components] rotated_training_profile = np.dot(training_profile, rotation_matrix) #kmeans n_clusters = args.clusters kmeans = KMeans(n_clusters) means = np.mean(rotated_training_profile, axis=0) stdevs = np.std(rotated_training_profile - means, axis=0, ddof=1) stdevs[stdevs==0.0] = 1.0 clusters = kmeans.fit_predict((rotated_training_profile - means)/stdevs) # reserve a vector for each model created per cluster models = [0] * len(clusters) print "Modeling..." for i in range(n_clusters): cluster_profile = rotated_training_profile[clusters==i,:] cluster_performance = training_performance[clusters==i] regression = LinearRegression.LinearRegression(cluster_profile, cluster_performance) pool = [LinearRegression.identityFunction()] for col in range(cluster_profile.shape[1]): if('inv_quadratic' in args.regressor_functions): pool.append(LinearRegression.powerFunction(col, -2)) if('inv_linear' in args.regressor_functions): pool.append(LinearRegression.powerFunction(col, -1)) if('inv_sqrt' in args.regressor_functions): pool.append(LinearRegression.powerFunction(col, -.5)) if('sqrt' in args.regressor_functions): pool.append(LinearRegression.powerFunction(col, .5)) if('linear' in args.regressor_functions): pool.append(LinearRegression.powerFunction(col, 1)) if('quadratic' in args.regressor_functions): pool.append(LinearRegression.powerFunction(col, 2)) if('log' in args.regressor_functions): pool.append(LinearRegression.logFunction(col)) if('cross' in args.regressor_functions): for xcol in range(col, cluster_profile.shape[1]): pool.append(LinearRegression.crossFunction(col, xcol)) if('div' in args.regressor_functions): for xcol in range(col, cluster_profile.shape[1]): pool.append(LinearRegression.divFunction(col,xcol)) pool.append(LinearRegression.divFunction(xcol,col)) (models[i], r_squared, r_squared_adj) = regression.select(pool, threshold=args.threshold, folds=args.nfolds) print "Index\tMetric Name" print '\n'.join("%s\t%s" % metric for metric in enumerate(metric_names)) print "PCA matrix:" print rotation_matrix print "Model:\n" + str(models[i]) print "Finished modeling cluster %s:" % (i,) print "r squared = %s" % (r_squared,) print "adjusted r squared = %s" % (r_squared_adj,) model = Model(metric_names, means, stdevs, rotation_matrix, kmeans, models) # if we want to save the model file, copy it now outfilename = training_DC.name + '.model' if args.output == None else args.output if args.json == True: writeToFileJSON(model, outfilename) else: writeToFile(model, outfilename) if args.test_fit: args.experiment_dc = args.training_dc args.model = outfilename testModel(args) def dumpCSV(args): training_DC = database.DataCollection(args.training_dc, args.database) names = [met[0] for met in training_DC.metrics] if args.metrics != None: names = args.metrics header = ','.join(names) idxs = training_DC.metricIndexByName(names) profile = training_DC.profile[:,idxs] outfile = sys.stdout if args.output == None else args.output np.savetxt(outfile, profile, delimiter=',', header=header, comments='') def testModel(args): print "Testing the model fit..." test_DC = database.DataCollection(args.experiment_dc, args.database) model = readFile(args.model) _runExperiment(model.kmeans, model.means, model.stdevs, model.models, model.rotation_matrix, test_DC, args, model.metric_names) def readFile(infile): with open(infile, 'r') as modelfile: first_char = modelfile.readline()[0] if first_char == '{': return readJSONFile(infile) else: return readBespokeFile(infile) def plotModel(args): print "Plotting model..." model = readFile(args.model) if args.plot_pcs_per_metric: PCA.PlotPCsPerMetric(rotation_matrix, metric_names, title="PCs Per Metric") if args.plot_metrics_per_pc: PCA.PlotMetricsPerPC(rotation_matrix, metric_names, title="Metrics Per PC") def _stringToArray(string): """ Parse string of form [len](number,number,number,...) to a numpy array. """ length = string[:string.find('(')] values = string[string.find('('):] arr = np.array(literal_eval(values)) return np.reshape(arr, literal_eval(length)) def _runExperiment(kmeans, means, stdevs, models, rotation_matrix, experiment_DC, args, metric_names): unordered_metric_ids = experiment_DC.metricIndexByType('deterministic', 'nondeterministic') unordered_metric_names = [experiment_DC.metrics[mid][0] for mid in unordered_metric_ids] # make sure all metric_names are in experiment_DC.metrics[:][0] have_metrics = [x in unordered_metric_names for x in metric_names] if not all(have_metrics): print("Experiment DC does not have matching metrics. Aborting...") return # set the correct ordering expr_metric_ids = [unordered_metric_ids[unordered_metric_names.index(name)] for name in metric_names] for idx,metric in enumerate(experiment_DC.metrics): if(metric[0] == args.target): performance_metric_id = idx performance = experiment_DC.profile[:,performance_metric_id] profile = experiment_DC.profile[:,expr_metric_ids] rotated_profile = np.dot(profile, rotation_matrix) means = np.mean(rotated_profile, axis=0) stdevs = np.std(rotated_profile - means, axis=0, ddof=1) stdevs = np.nan_to_num(stdevs) stdevs[stdevs==0.0] = 1.0 clusters = kmeans.predict((rotated_profile - means)/stdevs) prediction = np.empty_like(performance) for i in range(len(kmeans.cluster_centers_)): prediction[clusters==i] = abs(models[i].poll(rotated_profile[clusters==i])) if args.show_prediction: print "Actual\t\tPredicted" print '\n'.join("%s\t%s" % x for x in zip(performance,prediction)) mse = sum([(a-p)**2 for a,p in zip(performance, prediction)]) / len(performance) rmse = math.sqrt(mse) mape = 100 * sum([abs((a-p)/a) for a,p in zip(performance,prediction)]) / len(performance) print "Number of experiment trials: %s" % len(performance) print "Mean Average Percent Error: %s" % mape print "Mean Squared Error: %s" % mse print "Root Mean Squared Error: %s" % rmse def writeToFileJSON(model, outfile): # Let's assume model has all the attributes we care about json_root = {} json_root["metric_names"] = [name for name in model.metric_names] json_root["means"] = [mean for mean in model.means.tolist()] json_root["std_devs"] = [stdev for stdev in model.stdevs.tolist()] json_root["rotation_matrix"] = [[elem for elem in row] for row in model.rotation_matrix.tolist()] json_root["clusters"] = [] for i in range(len(model.kmeans.cluster_centers_)): json_cluster = {} json_cluster["center"] = [center for center in model.kmeans.cluster_centers_[i].tolist()] # get models in json format json_cluster["regressors"] = model.models[i].toJSONObject() json_root["clusters"].append(json_cluster) with open(outfile, 'w') as out: json.dump(json_root, out, indent=4) def readJSONFile(infile): with open(infile, 'r') as modelfile: json_root = json.load(modelfile) metric_names = json_root['metric_names'] means = np.array(json_root['means']) stdevs = np.array(json_root['std_devs']) rotation_matrix = np.array(json_root['rotation_matrix']) empty_kmeans = KMeans(n_clusters=len(json_root['clusters']), n_init=1) centers = [] models = [] for cluster in json_root['clusters']: centers.append(np.array(cluster['center'])) models.append(LinearRegression.Model.fromJSONObject(cluster['regressors'])) kmeans = empty_kmeans.fit(centers) return Model(metric_names, means, stdevs, rotation_matrix, kmeans, models) def writeToFile(model, outfile): with open(outfile, 'w') as modelfile: # For printing the original model file encoding modelfile.write("%s\n%s\n" % (len(model.metric_names), '\n'.join(model.metric_names))) modelfile.write("[%s](%s)\n" % (len(model.means), ','.join([str(mean) for mean in model.means.tolist()]))) modelfile.write("[%s](%s)\n" % (len(model.stdevs), ','.join([str(stdev) for stdev in model.stdevs.tolist()]))) modelfile.write("[%s,%s]" % model.rotation_matrix.shape) modelfile.write("(%s)\n" % ','.join(["(%s)" % ','.join([str(elem) for elem in row]) for row in model.rotation_matrix.tolist()])) for i in range(len(model.kmeans.cluster_centers_)): modelfile.write('Model %s\n' % i) modelfile.write("[%s](%s)\n" % (model.rotation_matrix.shape[1], ','.join([str(center) for center in model.kmeans.cluster_centers_[i].tolist()]))) modelfile.write(repr(model.models[i])) modelfile.write('\n') # need a trailing newline def readBespokeFile(infile): """Returns a Model namedtuple with all the model parts""" with open(infile, 'r') as modelfile: lines = iter(modelfile.read().splitlines()) n_params = int(lines.next()) metric_names = [lines.next() for i in range(n_params)] means = _stringToArray(lines.next()) stdevs = _stringToArray(lines.next()) rotation_matrix = _stringToArray(lines.next()) models = [] centroids = [] try: while True: name = lines.next() # kill a line centroids.append(_stringToArray(lines.next())) weights = _stringToArray(lines.next()) functions = [LinearRegression.stringToFunction(lines.next()) for i in range(weights.shape[0])] models.append(LinearRegression.Model(functions, weights)) except StopIteration: pass kmeans = KMeans(len(centroids)) kmeans.cluster_centers_ = np.array(centroids) return Model(metric_names, means, stdevs, rotation_matrix, kmeans, models) def convert(args): print "Converting model..." with open(args.input, 'r') as modelfile: first_char = modelfile.readline()[0] if first_char == '{': model = readJSONFile(args.input) writeToFile(model, args.output) else: model = readBespokeFile(args.input) writeToFileJSON(model, args.output) if __name__ == "__main__": parser = argparse.ArgumentParser(description = \ 'Command line interface into Eiger performance modeling framework \ for all model generation, polling, and serialization tasks.', argument_default=None, fromfile_prefix_chars='@') subparsers = parser.add_subparsers(title='subcommands') train_parser = subparsers.add_parser('train', help='train a model with data from the database', description='Train a model with data from the database') train_parser.set_defaults(func=trainModel) dump_parser = subparsers.add_parser('dump', help='dump data collection to CSV', description='Dump data collection as CSV') dump_parser.set_defaults(func=dumpCSV) test_parser = subparsers.add_parser('test', help='test how well a model predicts a data collection', description='Test how well a model predicts a data collection') test_parser.set_defaults(func=testModel) plot_parser = subparsers.add_parser('plot', help='plot the behavior of a model', description='Plot the behavior of a model') plot_parser.set_defaults(func=plotModel) convert_parser = subparsers.add_parser('convert', help='transform a model into a different file format', description='Transform a model into a different file format') convert_parser.set_defaults(func=convert) list_model_parser = subparsers.add_parser('list', help='list available models in the Eiger DB', description='List available models in the Eiger DB') list_model_parser.set_defaults(func=list_models) import_model_parser = subparsers.add_parser('import', help='import model file into the Eiger DB', description='Import model file into the Eiger DB') import_model_parser.set_defaults(func=import_model) export_model_parser = subparsers.add_parser('export', help='export model from Eiger DB to file', description='Export model from Eiger DB to file') export_model_parser.set_defaults(func=export_model) """TRAINING ARGUMENTS""" train_parser.add_argument('database', type=str, help='Name of the database file') train_parser.add_argument('training_dc', type=str, help='Name of the training data collection') train_parser.add_argument('target', type=str, help='Name of the target metric to predict') train_parser.add_argument('--test-fit', action='store_true', default=False, help='If set will test the model fit against the training data.') train_parser.add_argument('--show-prediction', action='store_true', default=False, help='If set, send the actual and predicted values to stdout.') train_parser.add_argument('--predictor-metrics', nargs='*', help='Only use these metrics when building a model.') train_parser.add_argument('--output', type=str, help='Filename to output file to, otherwise use "<training_dc>.model"') train_parser.add_argument('--clusters', '-k', type=int, default=1, help='Number of clusters for kmeans') train_parser.add_argument('--threshold', type=float, help='Cutoff threshold of increase in adjusted R-squared value when' ' adding new predictors to the model') train_parser.add_argument('--nfolds', type=int, help='Number of folds to use in k-fold cross validation.') train_parser.add_argument('--regressor-functions', nargs='*', default=['inv_quadratic', 'inv_linear', 'inv_sqrt', 'sqrt', 'linear', 'quadratic', 'log', 'cross', 'div'], help='Regressor functions to use. Options are linear, quadratic, ' 'sqrt, inv_linear, inv_quadratic, inv_sqrt, log, cross, and div. ' 'Defaults to all.') train_parser.add_argument('--json', action='store_true', default=False, help='Output model in JSON format, rather than bespoke') """DUMP CSV ARGUMENTS""" dump_parser.add_argument('database', type=str, help='Name of the database file') dump_parser.add_argument('training_dc', type=str, help='Name of the data collection to dump') dump_parser.add_argument('--metrics', nargs='*', help='Only dump these metrics.') dump_parser.add_argument('--output', type=str, help='Name of file to dump CSV to') """TEST ARGUMENTS""" test_parser.add_argument('database', type=str, help='Name of the database file') test_parser.add_argument('experiment_dc', type=str, help='Name of the data collection to experiment on') test_parser.add_argument('model', type=str, help='Name of the model to use') test_parser.add_argument('target', type=str, help='Name of the target metric to predict') test_parser.add_argument('--show-prediction', action='store_true', default=False, help='If set, send the actual and predicted values to stdout.') """PLOT ARGUMENTS""" plot_parser.add_argument('model', type=str, help='Name of the model to use') plot_parser.add_argument('--plot-pcs-per-metric', action='store_true', default=False, help='If set, plots the breakdown of principal components per metric.') plot_parser.add_argument('--plot-metrics-per-pc', action='store_true', default=False, help='If set, plots the breakdown of metrics per principal component.') """CONVERT ARGUMENTS""" convert_parser.add_argument('input', type=str, help='Name of input model to convert from') convert_parser.add_argument('output', type=str, help='Name of output model to convert to') """LIST ARGUMENTS""" list_model_parser.add_argument('database', type=str, help='Name of the database file') """IMPORT ARGUMENTS""" import_model_parser.add_argument('database', type=str, help='Name of the database file') import_model_parser.add_argument('file', type=str, help='Name of the model file to import') import_model_parser.add_argument('source_name', type=str, help='Name of the source of the model (ie Eiger)') import_model_parser.add_argument('--description', type=str, default='', help='String to describe the model') """EXPORT ARGUMENTS""" export_model_parser.add_argument('database', type=str, help='Name of the database file') export_model_parser.add_argument('id', type=int, help='ID number identifying which model in the database to export ') export_model_parser.add_argument('file', type=str, help='Name of the file to export into') args = parser.parse_args() args.func(args) print "Done."
bsd-3-clause
akhilari7/pa-dude
lib/python2.7/site-packages/pymongo/common.py
9
18768
# Copyright 2011-2015 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you # may not use this file except in compliance with the License. You # may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. See the License for the specific language governing # permissions and limitations under the License. """Functions and classes common to multiple pymongo modules.""" import collections import warnings from bson.binary import (STANDARD, PYTHON_LEGACY, JAVA_LEGACY, CSHARP_LEGACY) from bson.codec_options import CodecOptions from bson.py3compat import string_type, integer_types, iteritems from bson.raw_bson import RawBSONDocument from pymongo.auth import MECHANISMS from pymongo.errors import ConfigurationError from pymongo.monitoring import _validate_event_listeners from pymongo.read_concern import ReadConcern from pymongo.read_preferences import (read_pref_mode_from_name, _ServerMode) from pymongo.ssl_support import validate_cert_reqs from pymongo.write_concern import WriteConcern # Defaults until we connect to a server and get updated limits. MAX_BSON_SIZE = 16 * (1024 ** 2) MAX_MESSAGE_SIZE = 2 * MAX_BSON_SIZE MIN_WIRE_VERSION = 0 MAX_WIRE_VERSION = 0 MAX_WRITE_BATCH_SIZE = 1000 # What this version of PyMongo supports. MIN_SUPPORTED_WIRE_VERSION = 0 MAX_SUPPORTED_WIRE_VERSION = 3 # Frequency to call ismaster on servers, in seconds. HEARTBEAT_FREQUENCY = 10 # Frequency to process kill-cursors, in seconds. See MongoClient.close_cursor. KILL_CURSOR_FREQUENCY = 1 # How long to wait, in seconds, for a suitable server to be found before # aborting an operation. For example, if the client attempts an insert # during a replica set election, SERVER_SELECTION_TIMEOUT governs the # longest it is willing to wait for a new primary to be found. SERVER_SELECTION_TIMEOUT = 30 # Spec requires at least 500ms between ismaster calls. MIN_HEARTBEAT_INTERVAL = 0.5 # Default connectTimeout in seconds. CONNECT_TIMEOUT = 20.0 # Default value for maxPoolSize. MAX_POOL_SIZE = 100 # Default value for localThresholdMS. LOCAL_THRESHOLD_MS = 15 # mongod/s 2.6 and above return code 59 when a # command doesn't exist. mongod versions previous # to 2.6 and mongos 2.4.x return no error code # when a command does exist. mongos versions previous # to 2.4.0 return code 13390 when a command does not # exist. COMMAND_NOT_FOUND_CODES = (59, 13390, None) # Error codes to ignore if GridFS calls createIndex on a secondary UNAUTHORIZED_CODES = (13, 16547, 16548) def partition_node(node): """Split a host:port string into (host, int(port)) pair.""" host = node port = 27017 idx = node.rfind(':') if idx != -1: host, port = node[:idx], int(node[idx + 1:]) if host.startswith('['): host = host[1:-1] return host, port def clean_node(node): """Split and normalize a node name from an ismaster response.""" host, port = partition_node(node) # Normalize hostname to lowercase, since DNS is case-insensitive: # http://tools.ietf.org/html/rfc4343 # This prevents useless rediscovery if "foo.com" is in the seed list but # "FOO.com" is in the ismaster response. return host.lower(), port def raise_config_error(key, dummy): """Raise ConfigurationError with the given key name.""" raise ConfigurationError("Unknown option %s" % (key,)) # Mapping of URI uuid representation options to valid subtypes. _UUID_REPRESENTATIONS = { 'standard': STANDARD, 'pythonLegacy': PYTHON_LEGACY, 'javaLegacy': JAVA_LEGACY, 'csharpLegacy': CSHARP_LEGACY } def validate_boolean(option, value): """Validates that 'value' is True or False.""" if isinstance(value, bool): return value raise TypeError("%s must be True or False" % (option,)) def validate_boolean_or_string(option, value): """Validates that value is True, False, 'true', or 'false'.""" if isinstance(value, string_type): if value not in ('true', 'false'): raise ValueError("The value of %s must be " "'true' or 'false'" % (option,)) return value == 'true' return validate_boolean(option, value) def validate_integer(option, value): """Validates that 'value' is an integer (or basestring representation). """ if isinstance(value, integer_types): return value elif isinstance(value, string_type): if not value.isdigit(): raise ValueError("The value of %s must be " "an integer" % (option,)) return int(value) raise TypeError("Wrong type for %s, value must be an integer" % (option,)) def validate_positive_integer(option, value): """Validate that 'value' is a positive integer, which does not include 0. """ val = validate_integer(option, value) if val <= 0: raise ValueError("The value of %s must be " "a positive integer" % (option,)) return val def validate_non_negative_integer(option, value): """Validate that 'value' is a positive integer or 0. """ val = validate_integer(option, value) if val < 0: raise ValueError("The value of %s must be " "a non negative integer" % (option,)) return val def validate_readable(option, value): """Validates that 'value' is file-like and readable. """ if value is None: return value # First make sure its a string py3.3 open(True, 'r') succeeds # Used in ssl cert checking due to poor ssl module error reporting value = validate_string(option, value) open(value, 'r').close() return value def validate_positive_integer_or_none(option, value): """Validate that 'value' is a positive integer or None. """ if value is None: return value return validate_positive_integer(option, value) def validate_non_negative_integer_or_none(option, value): """Validate that 'value' is a positive integer or 0 or None. """ if value is None: return value return validate_non_negative_integer(option, value) def validate_string(option, value): """Validates that 'value' is an instance of `basestring` for Python 2 or `str` for Python 3. """ if isinstance(value, string_type): return value raise TypeError("Wrong type for %s, value must be " "an instance of %s" % (option, string_type.__name__)) def validate_string_or_none(option, value): """Validates that 'value' is an instance of `basestring` or `None`. """ if value is None: return value return validate_string(option, value) def validate_int_or_basestring(option, value): """Validates that 'value' is an integer or string. """ if isinstance(value, integer_types): return value elif isinstance(value, string_type): if value.isdigit(): return int(value) return value raise TypeError("Wrong type for %s, value must be an " "integer or a string" % (option,)) def validate_positive_float(option, value): """Validates that 'value' is a float, or can be converted to one, and is positive. """ errmsg = "%s must be an integer or float" % (option,) try: value = float(value) except ValueError: raise ValueError(errmsg) except TypeError: raise TypeError(errmsg) # float('inf') doesn't work in 2.4 or 2.5 on Windows, so just cap floats at # one billion - this is a reasonable approximation for infinity if not 0 < value < 1e9: raise ValueError("%s must be greater than 0 and " "less than one billion" % (option,)) return value def validate_positive_float_or_zero(option, value): """Validates that 'value' is 0 or a positive float, or can be converted to 0 or a positive float. """ if value == 0 or value == "0": return 0 return validate_positive_float(option, value) def validate_timeout_or_none(option, value): """Validates a timeout specified in milliseconds returning a value in floating point seconds. """ if value is None: return value return validate_positive_float(option, value) / 1000.0 def validate_timeout_or_zero(option, value): """Validates a timeout specified in milliseconds returning a value in floating point seconds for the case where None is an error and 0 is valid. Setting the timeout to nothing in the URI string is a config error. """ if value is None: raise ConfigurationError("%s cannot be None" % (option, )) if value == 0 or value == "0": return 0 return validate_positive_float(option, value) / 1000.0 def validate_read_preference(dummy, value): """Validate a read preference. """ if not isinstance(value, _ServerMode): raise TypeError("%r is not a read preference." % (value,)) return value def validate_read_preference_mode(dummy, name): """Validate read preference mode for a MongoReplicaSetClient. """ try: return read_pref_mode_from_name(name) except ValueError: raise ValueError("%s is not a valid read preference" % (name,)) def validate_auth_mechanism(option, value): """Validate the authMechanism URI option. """ # CRAM-MD5 is for server testing only. Undocumented, # unsupported, may be removed at any time. You have # been warned. if value not in MECHANISMS and value != 'CRAM-MD5': raise ValueError("%s must be in %s" % (option, tuple(MECHANISMS))) return value def validate_uuid_representation(dummy, value): """Validate the uuid representation option selected in the URI. """ try: return _UUID_REPRESENTATIONS[value] except KeyError: raise ValueError("%s is an invalid UUID representation. " "Must be one of " "%s" % (value, tuple(_UUID_REPRESENTATIONS))) def validate_read_preference_tags(name, value): """Parse readPreferenceTags if passed as a client kwarg. """ if not isinstance(value, list): value = [value] tag_sets = [] for tag_set in value: if tag_set == '': tag_sets.append({}) continue try: tag_sets.append(dict([tag.split(":") for tag in tag_set.split(",")])) except Exception: raise ValueError("%r not a valid " "value for %s" % (tag_set, name)) return tag_sets _MECHANISM_PROPS = frozenset(['SERVICE_NAME']) def validate_auth_mechanism_properties(option, value): """Validate authMechanismProperties.""" value = validate_string(option, value) props = {} for opt in value.split(','): try: key, val = opt.split(':') except ValueError: raise ValueError("auth mechanism properties must be " "key:value pairs like SERVICE_NAME:" "mongodb, not %s." % (opt,)) if key not in _MECHANISM_PROPS: raise ValueError("%s is not a supported auth " "mechanism property. Must be one of " "%s." % (key, tuple(_MECHANISM_PROPS))) props[key] = val return props def validate_document_class(option, value): """Validate the document_class option.""" if not issubclass(value, (collections.MutableMapping, RawBSONDocument)): raise TypeError("%s must be dict, bson.son.SON, " "bson.raw_bson.RawBSONDocument, or a " "sublass of collections.MutableMapping" % (option,)) return value def validate_is_mapping(option, value): """Validate the type of method arguments that expect a document.""" if not isinstance(value, collections.Mapping): raise TypeError("%s must be an instance of dict, bson.son.SON, or " "other type that inherits from " "collections.Mapping" % (option,)) def validate_is_document_type(option, value): """Validate the type of method arguments that expect a MongoDB document.""" if not isinstance(value, (collections.MutableMapping, RawBSONDocument)): raise TypeError("%s must be an instance of dict, bson.son.SON, " "bson.raw_bson.RawBSONDocument, or " "a type that inherits from " "collections.MutableMapping" % (option,)) def validate_ok_for_replace(replacement): """Validate a replacement document.""" validate_is_mapping("replacement", replacement) # Replacement can be {} if replacement and not isinstance(replacement, RawBSONDocument): first = next(iter(replacement)) if first.startswith('$'): raise ValueError('replacement can not include $ operators') def validate_ok_for_update(update): """Validate an update document.""" validate_is_mapping("update", update) # Update can not be {} if not update: raise ValueError('update only works with $ operators') first = next(iter(update)) if not first.startswith('$'): raise ValueError('update only works with $ operators') # journal is an alias for j, # wtimeoutms is an alias for wtimeout, VALIDATORS = { 'replicaset': validate_string_or_none, 'w': validate_int_or_basestring, 'wtimeout': validate_integer, 'wtimeoutms': validate_integer, 'fsync': validate_boolean_or_string, 'j': validate_boolean_or_string, 'journal': validate_boolean_or_string, 'connecttimeoutms': validate_timeout_or_none, 'maxpoolsize': validate_positive_integer_or_none, 'socketkeepalive': validate_boolean_or_string, 'sockettimeoutms': validate_timeout_or_none, 'waitqueuetimeoutms': validate_timeout_or_none, 'waitqueuemultiple': validate_non_negative_integer_or_none, 'ssl': validate_boolean_or_string, 'ssl_keyfile': validate_readable, 'ssl_certfile': validate_readable, 'ssl_cert_reqs': validate_cert_reqs, 'ssl_ca_certs': validate_readable, 'ssl_match_hostname': validate_boolean_or_string, 'readconcernlevel': validate_string_or_none, 'read_preference': validate_read_preference, 'readpreference': validate_read_preference_mode, 'readpreferencetags': validate_read_preference_tags, 'localthresholdms': validate_positive_float_or_zero, 'serverselectiontimeoutms': validate_timeout_or_zero, 'authmechanism': validate_auth_mechanism, 'authsource': validate_string, 'authmechanismproperties': validate_auth_mechanism_properties, 'document_class': validate_document_class, 'tz_aware': validate_boolean_or_string, 'uuidrepresentation': validate_uuid_representation, 'connect': validate_boolean, 'event_listeners': _validate_event_listeners } _AUTH_OPTIONS = frozenset(['authmechanismproperties']) def validate_auth_option(option, value): """Validate optional authentication parameters. """ lower, value = validate(option, value) if lower not in _AUTH_OPTIONS: raise ConfigurationError('Unknown ' 'authentication option: %s' % (option,)) return lower, value def validate(option, value): """Generic validation function. """ lower = option.lower() validator = VALIDATORS.get(lower, raise_config_error) value = validator(option, value) return lower, value def get_validated_options(options): """Validate each entry in options and raise a warning if it is not valid. Returns a copy of options with invalid entries removed """ validated_options = {} for opt, value in iteritems(options): lower = opt.lower() try: validator = VALIDATORS.get(lower, raise_config_error) value = validator(opt, value) except (ValueError, ConfigurationError) as exc: warnings.warn(str(exc)) else: validated_options[lower] = value return validated_options WRITE_CONCERN_OPTIONS = frozenset([ 'w', 'wtimeout', 'wtimeoutms', 'fsync', 'j', 'journal' ]) class BaseObject(object): """A base class that provides attributes and methods common to multiple pymongo classes. SHOULD NOT BE USED BY DEVELOPERS EXTERNAL TO MONGODB. """ def __init__(self, codec_options, read_preference, write_concern, read_concern): if not isinstance(codec_options, CodecOptions): raise TypeError("codec_options must be an instance of " "bson.codec_options.CodecOptions") self.__codec_options = codec_options if not isinstance(read_preference, _ServerMode): raise TypeError("%r is not valid for read_preference. See " "pymongo.read_preferences for valid " "options." % (read_preference,)) self.__read_preference = read_preference if not isinstance(write_concern, WriteConcern): raise TypeError("write_concern must be an instance of " "pymongo.write_concern.WriteConcern") self.__write_concern = write_concern if not isinstance(read_concern, ReadConcern): raise TypeError("read_concern must be an instance of " "pymongo.read_concern.ReadConcern") self.__read_concern = read_concern @property def codec_options(self): """Read only access to the :class:`~bson.codec_options.CodecOptions` of this instance. """ return self.__codec_options @property def write_concern(self): """Read only access to the :class:`~pymongo.write_concern.WriteConcern` of this instance. .. versionchanged:: 3.0 The :attr:`write_concern` attribute is now read only. """ return self.__write_concern @property def read_preference(self): """Read only access to the read preference of this instance. .. versionchanged:: 3.0 The :attr:`read_preference` attribute is now read only. """ return self.__read_preference @property def read_concern(self): """Read only access to the read concern of this instance. .. versionadded:: 3.2 """ return self.__read_concern
mit
gskachkov/phantomjs
src/qt/qtwebkit/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/extensions.py
119
31715
# Copyright 2012, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from mod_pywebsocket import common from mod_pywebsocket import util from mod_pywebsocket.http_header_util import quote_if_necessary _available_processors = {} _compression_extension_names = [] class ExtensionProcessorInterface(object): def __init__(self, request): self._request = request self._active = True def request(self): return self._request def name(self): return None def check_consistency_with_other_processors(self, processors): pass def set_active(self, active): self._active = active def is_active(self): return self._active def _get_extension_response_internal(self): return None def get_extension_response(self): if self._active: response = self._get_extension_response_internal() if response is None: self._active = False return response return None def _setup_stream_options_internal(self, stream_options): pass def setup_stream_options(self, stream_options): if self._active: self._setup_stream_options_internal(stream_options) def _log_outgoing_compression_ratio( logger, original_bytes, filtered_bytes, average_ratio): # Print inf when ratio is not available. ratio = float('inf') if original_bytes != 0: ratio = float(filtered_bytes) / original_bytes logger.debug('Outgoing compression ratio: %f (average: %f)' % (ratio, average_ratio)) def _log_incoming_compression_ratio( logger, received_bytes, filtered_bytes, average_ratio): # Print inf when ratio is not available. ratio = float('inf') if filtered_bytes != 0: ratio = float(received_bytes) / filtered_bytes logger.debug('Incoming compression ratio: %f (average: %f)' % (ratio, average_ratio)) def _parse_window_bits(bits): """Return parsed integer value iff the given string conforms to the grammar of the window bits extension parameters. """ if bits is None: raise ValueError('Value is required') # For non integer values such as "10.0", ValueError will be raised. int_bits = int(bits) # First condition is to drop leading zero case e.g. "08". if bits != str(int_bits) or int_bits < 8 or int_bits > 15: raise ValueError('Invalid value: %r' % bits) return int_bits class _AverageRatioCalculator(object): """Stores total bytes of original and result data, and calculates average result / original ratio. """ def __init__(self): self._total_original_bytes = 0 self._total_result_bytes = 0 def add_original_bytes(self, value): self._total_original_bytes += value def add_result_bytes(self, value): self._total_result_bytes += value def get_average_ratio(self): if self._total_original_bytes != 0: return (float(self._total_result_bytes) / self._total_original_bytes) else: return float('inf') class DeflateFrameExtensionProcessor(ExtensionProcessorInterface): """deflate-frame extension processor. Specification: http://tools.ietf.org/html/draft-tyoshino-hybi-websocket-perframe-deflate """ _WINDOW_BITS_PARAM = 'max_window_bits' _NO_CONTEXT_TAKEOVER_PARAM = 'no_context_takeover' def __init__(self, request): ExtensionProcessorInterface.__init__(self, request) self._logger = util.get_class_logger(self) self._response_window_bits = None self._response_no_context_takeover = False self._bfinal = False # Calculates # (Total outgoing bytes supplied to this filter) / # (Total bytes sent to the network after applying this filter) self._outgoing_average_ratio_calculator = _AverageRatioCalculator() # Calculates # (Total bytes received from the network) / # (Total incoming bytes obtained after applying this filter) self._incoming_average_ratio_calculator = _AverageRatioCalculator() def name(self): return common.DEFLATE_FRAME_EXTENSION def _get_extension_response_internal(self): # Any unknown parameter will be just ignored. window_bits = None if self._request.has_parameter(self._WINDOW_BITS_PARAM): window_bits = self._request.get_parameter_value( self._WINDOW_BITS_PARAM) try: window_bits = _parse_window_bits(window_bits) except ValueError, e: return None no_context_takeover = self._request.has_parameter( self._NO_CONTEXT_TAKEOVER_PARAM) if (no_context_takeover and self._request.get_parameter_value( self._NO_CONTEXT_TAKEOVER_PARAM) is not None): return None self._rfc1979_deflater = util._RFC1979Deflater( window_bits, no_context_takeover) self._rfc1979_inflater = util._RFC1979Inflater() self._compress_outgoing = True response = common.ExtensionParameter(self._request.name()) if self._response_window_bits is not None: response.add_parameter( self._WINDOW_BITS_PARAM, str(self._response_window_bits)) if self._response_no_context_takeover: response.add_parameter( self._NO_CONTEXT_TAKEOVER_PARAM, None) self._logger.debug( 'Enable %s extension (' 'request: window_bits=%s; no_context_takeover=%r, ' 'response: window_wbits=%s; no_context_takeover=%r)' % (self._request.name(), window_bits, no_context_takeover, self._response_window_bits, self._response_no_context_takeover)) return response def _setup_stream_options_internal(self, stream_options): class _OutgoingFilter(object): def __init__(self, parent): self._parent = parent def filter(self, frame): self._parent._outgoing_filter(frame) class _IncomingFilter(object): def __init__(self, parent): self._parent = parent def filter(self, frame): self._parent._incoming_filter(frame) stream_options.outgoing_frame_filters.append( _OutgoingFilter(self)) stream_options.incoming_frame_filters.insert( 0, _IncomingFilter(self)) def set_response_window_bits(self, value): self._response_window_bits = value def set_response_no_context_takeover(self, value): self._response_no_context_takeover = value def set_bfinal(self, value): self._bfinal = value def enable_outgoing_compression(self): self._compress_outgoing = True def disable_outgoing_compression(self): self._compress_outgoing = False def _outgoing_filter(self, frame): """Transform outgoing frames. This method is called only by an _OutgoingFilter instance. """ original_payload_size = len(frame.payload) self._outgoing_average_ratio_calculator.add_original_bytes( original_payload_size) if (not self._compress_outgoing or common.is_control_opcode(frame.opcode)): self._outgoing_average_ratio_calculator.add_result_bytes( original_payload_size) return frame.payload = self._rfc1979_deflater.filter( frame.payload, bfinal=self._bfinal) frame.rsv1 = 1 filtered_payload_size = len(frame.payload) self._outgoing_average_ratio_calculator.add_result_bytes( filtered_payload_size) _log_outgoing_compression_ratio( self._logger, original_payload_size, filtered_payload_size, self._outgoing_average_ratio_calculator.get_average_ratio()) def _incoming_filter(self, frame): """Transform incoming frames. This method is called only by an _IncomingFilter instance. """ received_payload_size = len(frame.payload) self._incoming_average_ratio_calculator.add_result_bytes( received_payload_size) if frame.rsv1 != 1 or common.is_control_opcode(frame.opcode): self._incoming_average_ratio_calculator.add_original_bytes( received_payload_size) return frame.payload = self._rfc1979_inflater.filter(frame.payload) frame.rsv1 = 0 filtered_payload_size = len(frame.payload) self._incoming_average_ratio_calculator.add_original_bytes( filtered_payload_size) _log_incoming_compression_ratio( self._logger, received_payload_size, filtered_payload_size, self._incoming_average_ratio_calculator.get_average_ratio()) _available_processors[common.DEFLATE_FRAME_EXTENSION] = ( DeflateFrameExtensionProcessor) _compression_extension_names.append(common.DEFLATE_FRAME_EXTENSION) _available_processors[common.X_WEBKIT_DEFLATE_FRAME_EXTENSION] = ( DeflateFrameExtensionProcessor) _compression_extension_names.append(common.X_WEBKIT_DEFLATE_FRAME_EXTENSION) def _parse_compression_method(data): """Parses the value of "method" extension parameter.""" return common.parse_extensions(data, allow_quoted_string=True) def _create_accepted_method_desc(method_name, method_params): """Creates accepted-method-desc from given method name and parameters""" extension = common.ExtensionParameter(method_name) for name, value in method_params: extension.add_parameter(name, value) return common.format_extension(extension) class CompressionExtensionProcessorBase(ExtensionProcessorInterface): """Base class for perframe-compress and permessage-compress extension.""" _METHOD_PARAM = 'method' def __init__(self, request): ExtensionProcessorInterface.__init__(self, request) self._logger = util.get_class_logger(self) self._compression_method_name = None self._compression_processor = None self._compression_processor_hook = None def name(self): return '' def _lookup_compression_processor(self, method_desc): return None def _get_compression_processor_response(self): """Looks up the compression processor based on the self._request and returns the compression processor's response. """ method_list = self._request.get_parameter_value(self._METHOD_PARAM) if method_list is None: return None methods = _parse_compression_method(method_list) if methods is None: return None comression_processor = None # The current implementation tries only the first method that matches # supported algorithm. Following methods aren't tried even if the # first one is rejected. # TODO(bashi): Need to clarify this behavior. for method_desc in methods: compression_processor = self._lookup_compression_processor( method_desc) if compression_processor is not None: self._compression_method_name = method_desc.name() break if compression_processor is None: return None if self._compression_processor_hook: self._compression_processor_hook(compression_processor) processor_response = compression_processor.get_extension_response() if processor_response is None: return None self._compression_processor = compression_processor return processor_response def _get_extension_response_internal(self): processor_response = self._get_compression_processor_response() if processor_response is None: return None response = common.ExtensionParameter(self._request.name()) accepted_method_desc = _create_accepted_method_desc( self._compression_method_name, processor_response.get_parameters()) response.add_parameter(self._METHOD_PARAM, accepted_method_desc) self._logger.debug( 'Enable %s extension (method: %s)' % (self._request.name(), self._compression_method_name)) return response def _setup_stream_options_internal(self, stream_options): if self._compression_processor is None: return self._compression_processor.setup_stream_options(stream_options) def set_compression_processor_hook(self, hook): self._compression_processor_hook = hook def get_compression_processor(self): return self._compression_processor class PerFrameCompressExtensionProcessor(CompressionExtensionProcessorBase): """perframe-compress processor. Specification: http://tools.ietf.org/html/draft-ietf-hybi-websocket-perframe-compression """ _DEFLATE_METHOD = 'deflate' def __init__(self, request): CompressionExtensionProcessorBase.__init__(self, request) def name(self): return common.PERFRAME_COMPRESSION_EXTENSION def _lookup_compression_processor(self, method_desc): if method_desc.name() == self._DEFLATE_METHOD: return DeflateFrameExtensionProcessor(method_desc) return None _available_processors[common.PERFRAME_COMPRESSION_EXTENSION] = ( PerFrameCompressExtensionProcessor) _compression_extension_names.append(common.PERFRAME_COMPRESSION_EXTENSION) class PerMessageDeflateExtensionProcessor(ExtensionProcessorInterface): """permessage-deflate extension processor. It's also used for permessage-compress extension when the deflate method is chosen. Specification: http://tools.ietf.org/html/draft-ietf-hybi-permessage-compression-08 """ _S2C_MAX_WINDOW_BITS_PARAM = 's2c_max_window_bits' _S2C_NO_CONTEXT_TAKEOVER_PARAM = 's2c_no_context_takeover' _C2S_MAX_WINDOW_BITS_PARAM = 'c2s_max_window_bits' _C2S_NO_CONTEXT_TAKEOVER_PARAM = 'c2s_no_context_takeover' def __init__(self, request, draft08=True): """Construct PerMessageDeflateExtensionProcessor Args: draft08: Follow the constraints on the parameters that were not specified for permessage-compress but are specified for permessage-deflate as on draft-ietf-hybi-permessage-compression-08. """ ExtensionProcessorInterface.__init__(self, request) self._logger = util.get_class_logger(self) self._c2s_max_window_bits = None self._c2s_no_context_takeover = False self._draft08 = draft08 def name(self): return 'deflate' def _get_extension_response_internal(self): if self._draft08: for name in self._request.get_parameter_names(): if name not in [self._S2C_MAX_WINDOW_BITS_PARAM, self._S2C_NO_CONTEXT_TAKEOVER_PARAM, self._C2S_MAX_WINDOW_BITS_PARAM]: self._logger.debug('Unknown parameter: %r', name) return None else: # Any unknown parameter will be just ignored. pass s2c_max_window_bits = None if self._request.has_parameter(self._S2C_MAX_WINDOW_BITS_PARAM): s2c_max_window_bits = self._request.get_parameter_value( self._S2C_MAX_WINDOW_BITS_PARAM) try: s2c_max_window_bits = _parse_window_bits(s2c_max_window_bits) except ValueError, e: self._logger.debug('Bad %s parameter: %r', self._S2C_MAX_WINDOW_BITS_PARAM, e) return None s2c_no_context_takeover = self._request.has_parameter( self._S2C_NO_CONTEXT_TAKEOVER_PARAM) if (s2c_no_context_takeover and self._request.get_parameter_value( self._S2C_NO_CONTEXT_TAKEOVER_PARAM) is not None): self._logger.debug('%s parameter must not have a value: %r', self._S2C_NO_CONTEXT_TAKEOVER_PARAM, s2c_no_context_takeover) return None c2s_max_window_bits = self._request.has_parameter( self._C2S_MAX_WINDOW_BITS_PARAM) if (self._draft08 and c2s_max_window_bits and self._request.get_parameter_value( self._C2S_MAX_WINDOW_BITS_PARAM) is not None): self._logger.debug('%s parameter must not have a value in a ' 'client\'s opening handshake: %r', self._C2S_MAX_WINDOW_BITS_PARAM, c2s_max_window_bits) return None self._rfc1979_deflater = util._RFC1979Deflater( s2c_max_window_bits, s2c_no_context_takeover) self._rfc1979_inflater = util._RFC1979Inflater() self._framer = _PerMessageDeflateFramer( s2c_max_window_bits, s2c_no_context_takeover) self._framer.set_bfinal(False) self._framer.set_compress_outgoing_enabled(True) response = common.ExtensionParameter(self._request.name()) if s2c_max_window_bits is not None: response.add_parameter( self._S2C_MAX_WINDOW_BITS_PARAM, str(s2c_max_window_bits)) if s2c_no_context_takeover: response.add_parameter( self._S2C_NO_CONTEXT_TAKEOVER_PARAM, None) if self._c2s_max_window_bits is not None: if self._draft08 and c2s_max_window_bits: self._logger.debug('Processor is configured to use %s but ' 'the client cannot accept it', self._C2S_MAX_WINDOW_BITS_PARAM) return None response.add_parameter( self._C2S_MAX_WINDOW_BITS_PARAM, str(self._c2s_max_window_bits)) if self._c2s_no_context_takeover: response.add_parameter( self._C2S_NO_CONTEXT_TAKEOVER_PARAM, None) self._logger.debug( 'Enable %s extension (' 'request: s2c_max_window_bits=%s; s2c_no_context_takeover=%r, ' 'response: c2s_max_window_bits=%s; c2s_no_context_takeover=%r)' % (self._request.name(), s2c_max_window_bits, s2c_no_context_takeover, self._c2s_max_window_bits, self._c2s_no_context_takeover)) return response def _setup_stream_options_internal(self, stream_options): self._framer.setup_stream_options(stream_options) def set_c2s_max_window_bits(self, value): """If this option is specified, this class adds the c2s_max_window_bits extension parameter to the handshake response, but doesn't reduce the LZ77 sliding window size of its inflater. I.e., you can use this for testing client implementation but cannot reduce memory usage of this class. If this method has been called with True and an offer without the c2s_max_window_bits extension parameter is received, - (When processing the permessage-deflate extension) this processor declines the request. - (When processing the permessage-compress extension) this processor accepts the request. """ self._c2s_max_window_bits = value def set_c2s_no_context_takeover(self, value): """If this option is specified, this class adds the c2s_no_context_takeover extension parameter to the handshake response, but doesn't reset inflater for each message. I.e., you can use this for testing client implementation but cannot reduce memory usage of this class. """ self._c2s_no_context_takeover = value def set_bfinal(self, value): self._framer.set_bfinal(value) def enable_outgoing_compression(self): self._framer.set_compress_outgoing_enabled(True) def disable_outgoing_compression(self): self._framer.set_compress_outgoing_enabled(False) class _PerMessageDeflateFramer(object): """A framer for extensions with per-message DEFLATE feature.""" def __init__(self, deflate_max_window_bits, deflate_no_context_takeover): self._logger = util.get_class_logger(self) self._rfc1979_deflater = util._RFC1979Deflater( deflate_max_window_bits, deflate_no_context_takeover) self._rfc1979_inflater = util._RFC1979Inflater() self._bfinal = False self._compress_outgoing_enabled = False # True if a message is fragmented and compression is ongoing. self._compress_ongoing = False # Calculates # (Total outgoing bytes supplied to this filter) / # (Total bytes sent to the network after applying this filter) self._outgoing_average_ratio_calculator = _AverageRatioCalculator() # Calculates # (Total bytes received from the network) / # (Total incoming bytes obtained after applying this filter) self._incoming_average_ratio_calculator = _AverageRatioCalculator() def set_bfinal(self, value): self._bfinal = value def set_compress_outgoing_enabled(self, value): self._compress_outgoing_enabled = value def _process_incoming_message(self, message, decompress): if not decompress: return message received_payload_size = len(message) self._incoming_average_ratio_calculator.add_result_bytes( received_payload_size) message = self._rfc1979_inflater.filter(message) filtered_payload_size = len(message) self._incoming_average_ratio_calculator.add_original_bytes( filtered_payload_size) _log_incoming_compression_ratio( self._logger, received_payload_size, filtered_payload_size, self._incoming_average_ratio_calculator.get_average_ratio()) return message def _process_outgoing_message(self, message, end, binary): if not binary: message = message.encode('utf-8') if not self._compress_outgoing_enabled: return message original_payload_size = len(message) self._outgoing_average_ratio_calculator.add_original_bytes( original_payload_size) message = self._rfc1979_deflater.filter( message, flush=end, bfinal=self._bfinal) filtered_payload_size = len(message) self._outgoing_average_ratio_calculator.add_result_bytes( filtered_payload_size) _log_outgoing_compression_ratio( self._logger, original_payload_size, filtered_payload_size, self._outgoing_average_ratio_calculator.get_average_ratio()) if not self._compress_ongoing: self._outgoing_frame_filter.set_compression_bit() self._compress_ongoing = not end return message def _process_incoming_frame(self, frame): if frame.rsv1 == 1 and not common.is_control_opcode(frame.opcode): self._incoming_message_filter.decompress_next_message() frame.rsv1 = 0 def _process_outgoing_frame(self, frame, compression_bit): if (not compression_bit or common.is_control_opcode(frame.opcode)): return frame.rsv1 = 1 def setup_stream_options(self, stream_options): """Creates filters and sets them to the StreamOptions.""" class _OutgoingMessageFilter(object): def __init__(self, parent): self._parent = parent def filter(self, message, end=True, binary=False): return self._parent._process_outgoing_message( message, end, binary) class _IncomingMessageFilter(object): def __init__(self, parent): self._parent = parent self._decompress_next_message = False def decompress_next_message(self): self._decompress_next_message = True def filter(self, message): message = self._parent._process_incoming_message( message, self._decompress_next_message) self._decompress_next_message = False return message self._outgoing_message_filter = _OutgoingMessageFilter(self) self._incoming_message_filter = _IncomingMessageFilter(self) stream_options.outgoing_message_filters.append( self._outgoing_message_filter) stream_options.incoming_message_filters.append( self._incoming_message_filter) class _OutgoingFrameFilter(object): def __init__(self, parent): self._parent = parent self._set_compression_bit = False def set_compression_bit(self): self._set_compression_bit = True def filter(self, frame): self._parent._process_outgoing_frame( frame, self._set_compression_bit) self._set_compression_bit = False class _IncomingFrameFilter(object): def __init__(self, parent): self._parent = parent def filter(self, frame): self._parent._process_incoming_frame(frame) self._outgoing_frame_filter = _OutgoingFrameFilter(self) self._incoming_frame_filter = _IncomingFrameFilter(self) stream_options.outgoing_frame_filters.append( self._outgoing_frame_filter) stream_options.incoming_frame_filters.append( self._incoming_frame_filter) stream_options.encode_text_message_to_utf8 = False _available_processors[common.PERMESSAGE_DEFLATE_EXTENSION] = ( PerMessageDeflateExtensionProcessor) # TODO(tyoshino): Reorganize class names. _compression_extension_names.append('deflate') class PerMessageCompressExtensionProcessor( CompressionExtensionProcessorBase): """permessage-compress extension processor. Specification: http://tools.ietf.org/html/draft-ietf-hybi-permessage-compression """ _DEFLATE_METHOD = 'deflate' def __init__(self, request): CompressionExtensionProcessorBase.__init__(self, request) def name(self): return common.PERMESSAGE_COMPRESSION_EXTENSION def _lookup_compression_processor(self, method_desc): if method_desc.name() == self._DEFLATE_METHOD: return PerMessageDeflateExtensionProcessor(method_desc, False) return None _available_processors[common.PERMESSAGE_COMPRESSION_EXTENSION] = ( PerMessageCompressExtensionProcessor) _compression_extension_names.append(common.PERMESSAGE_COMPRESSION_EXTENSION) class MuxExtensionProcessor(ExtensionProcessorInterface): """WebSocket multiplexing extension processor.""" _QUOTA_PARAM = 'quota' def __init__(self, request): ExtensionProcessorInterface.__init__(self, request) self._quota = 0 self._extensions = [] def name(self): return common.MUX_EXTENSION def check_consistency_with_other_processors(self, processors): before_mux = True for processor in processors: name = processor.name() if name == self.name(): before_mux = False continue if not processor.is_active(): continue if before_mux: # Mux extension cannot be used after extensions # that depend on frame boundary, extension data field, or any # reserved bits which are attributed to each frame. if (name == common.PERFRAME_COMPRESSION_EXTENSION or name == common.DEFLATE_FRAME_EXTENSION or name == common.X_WEBKIT_DEFLATE_FRAME_EXTENSION): self.set_active(False) return else: # Mux extension should not be applied before any history-based # compression extension. if (name == common.PERFRAME_COMPRESSION_EXTENSION or name == common.DEFLATE_FRAME_EXTENSION or name == common.X_WEBKIT_DEFLATE_FRAME_EXTENSION or name == common.PERMESSAGE_COMPRESSION_EXTENSION or name == common.X_WEBKIT_PERMESSAGE_COMPRESSION_EXTENSION): self.set_active(False) return def _get_extension_response_internal(self): self._active = False quota = self._request.get_parameter_value(self._QUOTA_PARAM) if quota is not None: try: quota = int(quota) except ValueError, e: return None if quota < 0 or quota >= 2 ** 32: return None self._quota = quota self._active = True return common.ExtensionParameter(common.MUX_EXTENSION) def _setup_stream_options_internal(self, stream_options): pass def set_quota(self, quota): self._quota = quota def quota(self): return self._quota def set_extensions(self, extensions): self._extensions = extensions def extensions(self): return self._extensions _available_processors[common.MUX_EXTENSION] = MuxExtensionProcessor def get_extension_processor(extension_request): processor_class = _available_processors.get(extension_request.name()) if processor_class is None: return None return processor_class(extension_request) def is_compression_extension(extension_name): return extension_name in _compression_extension_names # vi:sts=4 sw=4 et
bsd-3-clause
beblount/Steer-Clear-Backend-Web
env/Lib/encodings/__init__.py
406
5698
""" Standard "encodings" Package Standard Python encoding modules are stored in this package directory. Codec modules must have names corresponding to normalized encoding names as defined in the normalize_encoding() function below, e.g. 'utf-8' must be implemented by the module 'utf_8.py'. Each codec module must export the following interface: * getregentry() -> codecs.CodecInfo object The getregentry() API must a CodecInfo object with encoder, decoder, incrementalencoder, incrementaldecoder, streamwriter and streamreader atttributes which adhere to the Python Codec Interface Standard. In addition, a module may optionally also define the following APIs which are then used by the package's codec search function: * getaliases() -> sequence of encoding name strings to use as aliases Alias names returned by getaliases() must be normalized encoding names as defined by normalize_encoding(). Written by Marc-Andre Lemburg ([email protected]). (c) Copyright CNRI, All Rights Reserved. NO WARRANTY. """#" import codecs from encodings import aliases import __builtin__ _cache = {} _unknown = '--unknown--' _import_tail = ['*'] _norm_encoding_map = (' . ' '0123456789 ABCDEFGHIJKLMNOPQRSTUVWXYZ ' ' abcdefghijklmnopqrstuvwxyz ' ' ' ' ' ' ') _aliases = aliases.aliases class CodecRegistryError(LookupError, SystemError): pass def normalize_encoding(encoding): """ Normalize an encoding name. Normalization works as follows: all non-alphanumeric characters except the dot used for Python package names are collapsed and replaced with a single underscore, e.g. ' -;#' becomes '_'. Leading and trailing underscores are removed. Note that encoding names should be ASCII only; if they do use non-ASCII characters, these must be Latin-1 compatible. """ # Make sure we have an 8-bit string, because .translate() works # differently for Unicode strings. if hasattr(__builtin__, "unicode") and isinstance(encoding, unicode): # Note that .encode('latin-1') does *not* use the codec # registry, so this call doesn't recurse. (See unicodeobject.c # PyUnicode_AsEncodedString() for details) encoding = encoding.encode('latin-1') return '_'.join(encoding.translate(_norm_encoding_map).split()) def search_function(encoding): # Cache lookup entry = _cache.get(encoding, _unknown) if entry is not _unknown: return entry # Import the module: # # First try to find an alias for the normalized encoding # name and lookup the module using the aliased name, then try to # lookup the module using the standard import scheme, i.e. first # try in the encodings package, then at top-level. # norm_encoding = normalize_encoding(encoding) aliased_encoding = _aliases.get(norm_encoding) or \ _aliases.get(norm_encoding.replace('.', '_')) if aliased_encoding is not None: modnames = [aliased_encoding, norm_encoding] else: modnames = [norm_encoding] for modname in modnames: if not modname or '.' in modname: continue try: # Import is absolute to prevent the possibly malicious import of a # module with side-effects that is not in the 'encodings' package. mod = __import__('encodings.' + modname, fromlist=_import_tail, level=0) except ImportError: pass else: break else: mod = None try: getregentry = mod.getregentry except AttributeError: # Not a codec module mod = None if mod is None: # Cache misses _cache[encoding] = None return None # Now ask the module for the registry entry entry = getregentry() if not isinstance(entry, codecs.CodecInfo): if not 4 <= len(entry) <= 7: raise CodecRegistryError,\ 'module "%s" (%s) failed to register' % \ (mod.__name__, mod.__file__) if not hasattr(entry[0], '__call__') or \ not hasattr(entry[1], '__call__') or \ (entry[2] is not None and not hasattr(entry[2], '__call__')) or \ (entry[3] is not None and not hasattr(entry[3], '__call__')) or \ (len(entry) > 4 and entry[4] is not None and not hasattr(entry[4], '__call__')) or \ (len(entry) > 5 and entry[5] is not None and not hasattr(entry[5], '__call__')): raise CodecRegistryError,\ 'incompatible codecs in module "%s" (%s)' % \ (mod.__name__, mod.__file__) if len(entry)<7 or entry[6] is None: entry += (None,)*(6-len(entry)) + (mod.__name__.split(".", 1)[1],) entry = codecs.CodecInfo(*entry) # Cache the codec registry entry _cache[encoding] = entry # Register its aliases (without overwriting previously registered # aliases) try: codecaliases = mod.getaliases() except AttributeError: pass else: for alias in codecaliases: if alias not in _aliases: _aliases[alias] = modname # Return the registry entry return entry # Register the search_function in the Python codec registry codecs.register(search_function)
mit
vesellov/bitdust.devel
customer/data_sender.py
1
14665
#!/usr/bin/python # data_sender.py # # Copyright (C) 2008-2018 Veselin Penev, https://bitdust.io # # This file (data_sender.py) is part of BitDust Software. # # BitDust is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # BitDust Software is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with BitDust Software. If not, see <http://www.gnu.org/licenses/>. # # Please contact us if you have any questions at [email protected] # # # # """ .. module:: data_sender. .. raw:: html <a href="https://bitdust.io/automats/data_sender/data_sender.png" target="_blank"> <img src="https://bitdust.io/automats/data_sender/data_sender.png" style="max-width:100%;"> </a> A state machine to manage data sending process, acts very simple: 1) when new local data is created it tries to send it to the correct supplier 2) wait while ``p2p.io_throttle`` is doing some data transmission to remote suppliers 3) calls ``p2p.backup_matrix.ScanBlocksToSend()`` to get a list of pieces needs to be send 4) this machine is restarted every minute to check if some more data needs to be send 5) also can be restarted at any time when it is needed EVENTS: * :red:`block-acked` * :red:`block-failed` * :red:`init` * :red:`new-data` * :red:`restart` * :red:`scan-done` * :red:`timer-1min` * :red:`timer-1sec` """ #------------------------------------------------------------------------------ from __future__ import absolute_import from io import open #------------------------------------------------------------------------------ _Debug = True _DebugLevel = 12 #------------------------------------------------------------------------------ import os import time #------------------------------------------------------------------------------ from logs import lg from automats import automat from automats import global_state from lib import misc from lib import packetid from contacts import contactsdb from userid import my_id from main import settings from p2p import contact_status from . import io_throttle #------------------------------------------------------------------------------ _DataSender = None _ShutdownFlag = False #------------------------------------------------------------------------------ def A(event=None, arg=None): """ Access method to interact with the state machine. """ global _DataSender if _DataSender is None: _DataSender = DataSender( name='data_sender', state='READY', debug_level=_DebugLevel, log_events=_Debug, log_transitions=_Debug, ) if event is not None: _DataSender.automat(event, arg) return _DataSender def Destroy(): """ Destroy the state machine and remove the instance from memory. """ global _DataSender if _DataSender is None: return _DataSender.destroy() del _DataSender _DataSender = None class DataSender(automat.Automat): """ A class to manage process of sending data packets to remote suppliers. """ timers = { 'timer-1min': (60, ['READY']), 'timer-1min': (60, ['READY']), 'timer-1sec': (1.0, ['SENDING']), } statistic = {} def state_changed(self, oldstate, newstate, event, arg): global_state.set_global_state('DATASEND ' + newstate) def A(self, event, arg): #---READY--- if self.state == 'READY': if event == 'new-data' or event == 'timer-1min' or event == 'restart': self.state = 'SCAN_BLOCKS' self.doScanAndQueue(arg) elif event == 'init': pass #---SCAN_BLOCKS--- elif self.state == 'SCAN_BLOCKS': if event == 'scan-done' and self.isQueueEmpty(arg): self.state = 'READY' self.doRemoveUnusedFiles(arg) elif event == 'scan-done' and not self.isQueueEmpty(arg): self.state = 'SENDING' #---SENDING--- elif self.state == 'SENDING': if event == 'restart' or ( ( event == 'timer-1sec' or event == 'block-acked' or event == 'block-failed' or event == 'new-data' ) and self.isQueueEmpty(arg) ): self.state = 'SCAN_BLOCKS' self.doScanAndQueue(arg) return None def isQueueEmpty(self, arg): if not arg: return io_throttle.IsSendingQueueEmpty() remoteID, _ = arg return io_throttle.OkToSend(remoteID) def doScanAndQueue(self, arg): global _ShutdownFlag if _Debug: lg.out(_DebugLevel, 'data_sender.doScanAndQueue _ShutdownFlag=%r' % _ShutdownFlag) if _Debug: log = open(os.path.join(settings.LogsDir(), 'data_sender.log'), 'w') log.write(u'doScanAndQueue %s\n' % time.asctime()) # .decode('utf-8') if _ShutdownFlag: if _Debug: log.write(u'doScanAndQueue _ShutdownFlag is True\n') self.automat('scan-done') if _Debug: log.flush() log.close() return for customer_idurl in contactsdb.known_customers(): if '' not in contactsdb.suppliers(customer_idurl): from storage import backup_matrix for backupID in misc.sorted_backup_ids( list(backup_matrix.local_files().keys()), True): this_customer_idurl = packetid.CustomerIDURL(backupID) if this_customer_idurl != customer_idurl: continue packetsBySupplier = backup_matrix.ScanBlocksToSend(backupID) if _Debug: log.write(u'%s\n' % packetsBySupplier) for supplierNum in packetsBySupplier.keys(): supplier_idurl = contactsdb.supplier(supplierNum, customer_idurl=customer_idurl) if not supplier_idurl: lg.warn('unknown supplier_idurl supplierNum=%s for %s, customer_idurl=%s' % ( supplierNum, backupID, customer_idurl)) continue for packetID in packetsBySupplier[supplierNum]: backupID_, _, supplierNum_, _ = packetid.BidBnSnDp(packetID) if backupID_ != backupID: lg.warn('unexpected backupID supplierNum=%s for %s, customer_idurl=%s' % ( packetID, backupID, customer_idurl)) continue if supplierNum_ != supplierNum: lg.warn('unexpected supplierNum %s for %s, customer_idurl=%s' % ( packetID, backupID, customer_idurl)) continue if io_throttle.HasPacketInSendQueue( supplier_idurl, packetID): if _Debug: log.write(u'%s already in sending queue for %s\n' % (packetID, supplier_idurl)) continue if not io_throttle.OkToSend(supplier_idurl): if _Debug: log.write(u'skip, not ok to send %s\n' % supplier_idurl) continue customerGlobalID, pathID = packetid.SplitPacketID(packetID) # tranByID = gate.transfers_out_by_idurl().get(supplier_idurl, []) # if len(tranByID) > 3: # log.write(u'transfers by %s: %d\n' % (supplier_idurl, len(tranByID))) # continue customerGlobalID, pathID = packetid.SplitPacketID(packetID) filename = os.path.join( settings.getLocalBackupsDir(), customerGlobalID, pathID, ) if not os.path.isfile(filename): if _Debug: log.write(u'%s is not a file\n' % filename) continue if io_throttle.QueueSendFile( filename, packetID, supplier_idurl, my_id.getLocalID(), self._packetAcked, self._packetFailed, ): if _Debug: log.write(u'io_throttle.QueueSendFile %s\n' % packetID) else: if _Debug: log.write(u'io_throttle.QueueSendFile FAILED %s\n' % packetID) # lg.out(6, ' %s for %s' % (packetID, backupID)) # DEBUG # break self.automat('scan-done') if _Debug: log.flush() log.close() # def doPrintStats(self, arg): # """ # """ # if lg.is_debug(18): # transfers = transport_control.current_transfers() # bytes_stats = transport_control.current_bytes_transferred() # s = '' # for info in transfers: # s += '%s ' % (diskspace.MakeStringFromBytes(bytes_stats[info.transfer_id]).replace(' ', '').replace('bytes', 'b')) # lg.out(0, 'transfers: ' + s[:120]) def doRemoveUnusedFiles(self, arg): # we want to remove files for this block # because we only need them during rebuilding if settings.getBackupsKeepLocalCopies() is True: # if user set this in settings - he want to keep the local files return # ... user do not want to keep local backups if settings.getGeneralWaitSuppliers() is True: from customer import fire_hire # but he want to be sure - all suppliers are green for a long time if len(contact_status.listOfflineSuppliers()) > 0 or time.time( ) - fire_hire.GetLastFireTime() < 24 * 60 * 60: # some people are not there or we do not have stable team yet # do not remove the files because we need it to rebuild return count = 0 from storage import backup_matrix from storage import restore_monitor from storage import backup_rebuilder if _Debug: lg.out(_DebugLevel, 'data_sender.doRemoveUnusedFiles') for backupID in misc.sorted_backup_ids( list(backup_matrix.local_files().keys())): if restore_monitor.IsWorking(backupID): if _Debug: lg.out( _DebugLevel, ' %s : SKIP, because restoring' % backupID) continue if backup_rebuilder.IsBackupNeedsWork(backupID): if _Debug: lg.out( _DebugLevel, ' %s : SKIP, because needs rebuilding' % backupID) continue if not backup_rebuilder.ReadStoppedFlag(): if backup_rebuilder.A().currentBackupID is not None: if backup_rebuilder.A().currentBackupID == backupID: if _Debug: lg.out( _DebugLevel, ' %s : SKIP, because rebuilding is in process' % backupID) continue packets = backup_matrix.ScanBlocksToRemove( backupID, settings.getGeneralWaitSuppliers()) for packetID in packets: customer, pathID = packetid.SplitPacketID(packetID) filename = os.path.join(settings.getLocalBackupsDir(), customer, pathID) if os.path.isfile(filename): try: os.remove(filename) # lg.out(6, ' ' + os.path.basename(filename)) except: lg.exc() continue count += 1 if _Debug: lg.out(_DebugLevel, ' %d files were removed' % count) backup_matrix.ReadLocalFiles() def _packetAcked(self, packet, ownerID, packetID): from storage import backup_matrix backupID, blockNum, supplierNum, dataORparity = packetid.BidBnSnDp(packetID) backup_matrix.RemoteFileReport( backupID, blockNum, supplierNum, dataORparity, True) if ownerID not in self.statistic: self.statistic[ownerID] = [0, 0] self.statistic[ownerID][0] += 1 self.automat('block-acked', (ownerID, packetID)) def _packetFailed(self, remoteID, packetID, why): from storage import backup_matrix backupID, blockNum, supplierNum, dataORparity = packetid.BidBnSnDp( packetID) backup_matrix.RemoteFileReport( backupID, blockNum, supplierNum, dataORparity, False) if remoteID not in self.statistic: self.statistic[remoteID] = [0, 0] self.statistic[remoteID][1] += 1 self.automat('block-failed', (remoteID, packetID)) def statistic(): """ The ``data_sender()`` keeps track of sending results with every supplier. This is used by ``fire_hire()`` to decide how reliable is given supplier. """ global _DataSender if _DataSender is None: return {} return _DataSender.statistic def SetShutdownFlag(): """ Set flag to indicate that no need to send anything anymore. """ global _ShutdownFlag _ShutdownFlag = True
agpl-3.0
ondra-novak/chromium.src
build/android/pylib/host_driven/setup.py
48
6473
# Copyright 2013 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Setup for instrumentation host-driven tests.""" import logging import os import sys import types from pylib.host_driven import test_case from pylib.host_driven import test_info_collection from pylib.host_driven import test_runner def _GetPythonFiles(root, files): """Returns all files from |files| that end in 'Test.py'. Args: root: A directory name with python files. files: A list of file names. Returns: A list with all python files that match the testing naming scheme. """ return [os.path.join(root, f) for f in files if f.endswith('Test.py')] def _InferImportNameFromFile(python_file): """Given a file, infer the import name for that file. Example: /usr/foo/bar/baz.py -> baz. Args: python_file: Path to the Python file, ostensibly to import later. Returns: The module name for the given file. """ return os.path.splitext(os.path.basename(python_file))[0] def _GetTestModules(host_driven_test_root, is_official_build): """Retrieve a list of python modules that match the testing naming scheme. Walks the location of host-driven tests, imports them, and provides the list of imported modules to the caller. Args: host_driven_test_root: The path to walk, looking for the pythonDrivenTests or host_driven_tests directory is_official_build: Whether to run only those tests marked 'official' Returns: A list of python modules under |host_driven_test_root| which match the testing naming scheme. Each module should define one or more classes that derive from HostDrivenTestCase. """ # By default run all host-driven tests under pythonDrivenTests or # host_driven_tests. host_driven_test_file_list = [] for root, _, files in os.walk(host_driven_test_root): if (root.endswith('host_driven_tests') or root.endswith('pythonDrivenTests') or (is_official_build and (root.endswith('pythonDrivenTests/official') or root.endswith('host_driven_tests/official')))): host_driven_test_file_list += _GetPythonFiles(root, files) host_driven_test_file_list.sort() test_module_list = [_GetModuleFromFile(test_file) for test_file in host_driven_test_file_list] return test_module_list def _GetModuleFromFile(python_file): """Gets the python module associated with a file by importing it. Args: python_file: File to import. Returns: The module object. """ sys.path.append(os.path.dirname(python_file)) import_name = _InferImportNameFromFile(python_file) return __import__(import_name) def _GetTestsFromClass(test_case_class, **kwargs): """Returns one test object for each test method in |test_case_class|. Test methods are methods on the class which begin with 'test'. Args: test_case_class: Class derived from HostDrivenTestCase which contains zero or more test methods. kwargs: Keyword args to pass into the constructor of test cases. Returns: A list of test case objects, each initialized for a particular test method. """ test_names = [m for m in dir(test_case_class) if _IsTestMethod(m, test_case_class)] return [test_case_class(name, **kwargs) for name in test_names] def _GetTestsFromModule(test_module, **kwargs): """Gets a list of test objects from |test_module|. Args: test_module: Module from which to get the set of test methods. kwargs: Keyword args to pass into the constructor of test cases. Returns: A list of test case objects each initialized for a particular test method defined in |test_module|. """ tests = [] for name in dir(test_module): attr = getattr(test_module, name) if _IsTestCaseClass(attr): tests.extend(_GetTestsFromClass(attr, **kwargs)) return tests def _IsTestCaseClass(test_class): return (type(test_class) is types.TypeType and issubclass(test_class, test_case.HostDrivenTestCase) and test_class is not test_case.HostDrivenTestCase) def _IsTestMethod(attrname, test_case_class): """Checks whether this is a valid test method. Args: attrname: The method name. test_case_class: The test case class. Returns: True if test_case_class.'attrname' is callable and it starts with 'test'; False otherwise. """ attr = getattr(test_case_class, attrname) return callable(attr) and attrname.startswith('test') def _GetAllTests(test_root, is_official_build, **kwargs): """Retrieve a list of host-driven tests defined under |test_root|. Args: test_root: Path which contains host-driven test files. is_official_build: Whether this is an official build. kwargs: Keyword args to pass into the constructor of test cases. Returns: List of test case objects, one for each available test method. """ if not test_root: return [] all_tests = [] test_module_list = _GetTestModules(test_root, is_official_build) for module in test_module_list: all_tests.extend(_GetTestsFromModule(module, **kwargs)) return all_tests def InstrumentationSetup(host_driven_test_root, official_build, instrumentation_options): """Creates a list of host-driven instrumentation tests and a runner factory. Args: host_driven_test_root: Directory where the host-driven tests are. official_build: True if this is an official build. instrumentation_options: An InstrumentationOptions object. Returns: A tuple of (TestRunnerFactory, tests). """ test_collection = test_info_collection.TestInfoCollection() all_tests = _GetAllTests( host_driven_test_root, official_build, instrumentation_options=instrumentation_options) test_collection.AddTests(all_tests) available_tests = test_collection.GetAvailableTests( instrumentation_options.annotations, instrumentation_options.exclude_annotations, instrumentation_options.test_filter) logging.debug('All available tests: ' + str( [t.tagged_name for t in available_tests])) def TestRunnerFactory(device, shard_index): return test_runner.HostDrivenTestRunner( device, shard_index, instrumentation_options.tool, instrumentation_options.push_deps, instrumentation_options.cleanup_test_files) return (TestRunnerFactory, available_tests)
bsd-3-clause
agacek/camkes-tool
camkes/internal/version.py
1
1813
# # Copyright 2014, NICTA # # This software may be distributed and modified according to the terms of # the BSD 2-Clause license. Note that NO WARRANTY is provided. # See "LICENSE_BSD2.txt" for details. # # @TAG(NICTA_BSD) # '''Versioning functionality. This computes a version identifier based on the current source code state. It was decided this was more reliable while the tool is under active development. Note that any extraneous files in your source directory that match the version filters will be accumulated in the version computation.''' from memoization import memoized import hashlib, os, re @memoized def version(): # Files to consider relevant. Each entry should be a pair of (path, filter) # where 'path' is relative to the directory of this file and 'filter' is a # regex describing which filenames to match under the given path. SOURCES = [ ('../', r'^.*\.py$'), # Python sources ('../templates', r'.*'), # Templates ] my_path = os.path.dirname(os.path.abspath(__file__)) sources = set() # Accumulate all relevant source files. for s in SOURCES: path = os.path.join(my_path, s[0]) regex = re.compile(s[1]) for root, _, files in os.walk(path): for f in files: if regex.match(f): sources.add(os.path.abspath(os.path.join(root, f))) # Hash each file and hash a concatenation of these hashes. Note, hashing a # hash is not good practice for cryptography, but it's fine for this # purpose. hfinal = hashlib.sha1() #pylint: disable=E1101 for s in sources: with open(s, 'r') as f: h = hashlib.sha1(f.read()).hexdigest() #pylint: disable=E1101 hfinal.update('%s|' % h) #pylint: disable=E1101 return hfinal.hexdigest()
bsd-2-clause
greatmazinger/or-tools
examples/python/set_covering.py
34
2650
# Copyright 2010 Hakan Kjellerstrand [email protected] # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Set covering in Google CP Solver. Placing of firestations, from Winston 'Operations Research', page 486. Compare with the following models: * MiniZinc: http://www.hakank.org/minizinc/set_covering.mzn * ECLiPSe : http://www.hakank.org/eclipse/set_covering.ecl * Comet : http://www.hakank.org/comet/set_covering.co * Gecode : http://www.hakank.org/gecode/set_covering.cpp * SICStus : http://www.hakank.org/sicstus/set_covering.pl This model was created by Hakan Kjellerstrand ([email protected]) Also see my other Google CP Solver models: http://www.hakank.org/google_or_tools/ """ from ortools.constraint_solver import pywrapcp def main(unused_argv): # Create the solver. solver = pywrapcp.Solver("Set covering") # # data # min_distance = 15 num_cities = 6 distance = [ [0, 10, 20, 30, 30, 20], [10, 0, 25, 35, 20, 10], [20, 25, 0, 15, 30, 20], [30, 35, 15, 0, 15, 25], [30, 20, 30, 15, 0, 14], [20, 10, 20, 25, 14, 0] ] # # declare variables # x = [solver.IntVar(0, 1, "x[%i]" % i) for i in range(num_cities)] # # constraints # # objective to minimize z = solver.Sum(x) # ensure that all cities are covered for i in range(num_cities): b = [x[j] for j in range(num_cities) if distance[i][j] <= min_distance] solver.Add(solver.SumGreaterOrEqual(b, 1)) objective = solver.Minimize(z, 1) # # solution and search # solution = solver.Assignment() solution.Add(x) solution.AddObjective(z) collector = solver.LastSolutionCollector(solution) solver.Solve(solver.Phase(x + [z], solver.INT_VAR_DEFAULT, solver.INT_VALUE_DEFAULT), [collector, objective]) print "z:", collector.ObjectiveValue(0) print "x:", [collector.Value(0, x[i]) for i in range(num_cities)] print "failures:", solver.Failures() print "branches:", solver.Branches() print "WallTime:", solver.WallTime() if __name__ == "__main__": main("cp sample")
apache-2.0
mafiya69/sympy
sympy/polys/tests/test_injections.py
126
1795
"""Tests for functions that inject symbols into the global namespace. """ from sympy.polys.rings import vring from sympy.polys.fields import vfield from sympy.polys.domains import QQ from sympy.utilities.pytest import raises # make r1 with call-depth = 1 def _make_r1(): return vring("r1", QQ) # make r2 with call-depth = 2 def __make_r2(): return vring("r2", QQ) def _make_r2(): return __make_r2() def test_vring(): R = vring("r", QQ) assert r == R.gens[0] R = vring("rb rbb rcc rzz _rx", QQ) assert rb == R.gens[0] assert rbb == R.gens[1] assert rcc == R.gens[2] assert rzz == R.gens[3] assert _rx == R.gens[4] R = vring(['rd', 're', 'rfg'], QQ) assert rd == R.gens[0] assert re == R.gens[1] assert rfg == R.gens[2] # see if vring() really injects into global namespace raises(NameError, lambda: r1) R = _make_r1() assert r1 == R.gens[0] raises(NameError, lambda: r2) R = _make_r2() assert r2 == R.gens[0] # make f1 with call-depth = 1 def _make_f1(): return vfield("f1", QQ) # make f2 with call-depth = 2 def __make_f2(): return vfield("f2", QQ) def _make_f2(): return __make_f2() def test_vfield(): F = vfield("f", QQ) assert f == F.gens[0] F = vfield("fb fbb fcc fzz _fx", QQ) assert fb == F.gens[0] assert fbb == F.gens[1] assert fcc == F.gens[2] assert fzz == F.gens[3] assert _fx == F.gens[4] F = vfield(['fd', 'fe', 'ffg'], QQ) assert fd == F.gens[0] assert fe == F.gens[1] assert ffg == F.gens[2] # see if vfield() really injects into global namespace raises(NameError, lambda: f1) F = _make_f1() assert f1 == F.gens[0] raises(NameError, lambda: f2) F = _make_f2() assert f2 == F.gens[0]
bsd-3-clause
ForgottenKahz/CloudOPC
venv/Lib/site-packages/sqlalchemy/testing/suite/test_update_delete.py
203
1582
from .. import fixtures, config from ..assertions import eq_ from sqlalchemy import Integer, String from ..schema import Table, Column class SimpleUpdateDeleteTest(fixtures.TablesTest): run_deletes = 'each' __backend__ = True @classmethod def define_tables(cls, metadata): Table('plain_pk', metadata, Column('id', Integer, primary_key=True), Column('data', String(50)) ) @classmethod def insert_data(cls): config.db.execute( cls.tables.plain_pk.insert(), [ {"id": 1, "data": "d1"}, {"id": 2, "data": "d2"}, {"id": 3, "data": "d3"}, ] ) def test_update(self): t = self.tables.plain_pk r = config.db.execute( t.update().where(t.c.id == 2), data="d2_new" ) assert not r.is_insert assert not r.returns_rows eq_( config.db.execute(t.select().order_by(t.c.id)).fetchall(), [ (1, "d1"), (2, "d2_new"), (3, "d3") ] ) def test_delete(self): t = self.tables.plain_pk r = config.db.execute( t.delete().where(t.c.id == 2) ) assert not r.is_insert assert not r.returns_rows eq_( config.db.execute(t.select().order_by(t.c.id)).fetchall(), [ (1, "d1"), (3, "d3") ] ) __all__ = ('SimpleUpdateDeleteTest', )
mit
ibrica/universe-server
play.py
1
1073
from multiprocessing import Process import time import gym import universe from universe.spaces.vnc_event import keycode from envs import create_env def start_game(model, env_name): """regular Python process, not using torch""" p = Process(target=play_game, args=(model,env_name)) p.start() # Don't wait with join, respond to user request def play_game(model, env_name): """Play game with saved model if ther's no model play random""" env = create_env(env_name, client_id="play1",remotes=1) # Local docker container max_game_length = 10000 state = env.reset() reward_sum = 0 start_time = time.time() for step in range(max_game_length ): state, reward, done, _ = env.step( ['up' for i in range(60)]) #no saved model for now keep pressing up, 60 times in minute reward_sum += reward print("Time {}, game reward {}, game length {}".format( time.strftime("%Hh %Mm %Ss"), reward_sum, time.gmtime(time.time() - start_time))) if done: break
mit
m038/superdesk-content-api
content_api/errors.py
6
1588
# -*- coding: utf-8; -*- # # This file is part of Superdesk. # # Copyright 2013, 2014, 2015 Sourcefabric z.u. and contributors. # # For the full copyright and license information, please see the # AUTHORS and LICENSE files distributed with this source code, or # at https://www.sourcefabric.org/superdesk/license """ A module that contains exception types for the Superdesk public API. """ from superdesk.errors import SuperdeskApiError class PublicApiError(SuperdeskApiError): """Base class for all Superdesk public API errors.""" _codes = { 10000: "Unknown API error.", } """A mapping of error codes to error messages.""" def __init__(self, error_code=10000, desc=None): message = self._codes.get(error_code, 'Unknown error') super().__init__(status_code=error_code, message=message, payload=desc) class UnexpectedParameterError(PublicApiError): """Used when request contains an unexpected parameter.""" PublicApiError._codes[10001] = "Unexpected parameter." def __init__(self, desc=None): super().__init__(10001, desc=desc) class BadParameterValueError(PublicApiError): """Used when request contains a parameter with an invalid value.""" PublicApiError._codes[10002] = "Bad parameter value." def __init__(self, desc=None): super().__init__(10002, desc=desc) class FileNotFoundError(PublicApiError): """Used when trying to fetch a missing file.""" PublicApiError._codes[10003] = "File not found." def __init__(self, desc=None): super().__init__(10003, desc=desc)
agpl-3.0
alzamer2/crunchy-xml-decoder
crunchy-xml-decoder/unidecode/x083.py
252
4643
data = ( 'Fu ', # 0x00 'Zhuo ', # 0x01 'Mao ', # 0x02 'Fan ', # 0x03 'Qie ', # 0x04 'Mao ', # 0x05 'Mao ', # 0x06 'Ba ', # 0x07 'Zi ', # 0x08 'Mo ', # 0x09 'Zi ', # 0x0a 'Di ', # 0x0b 'Chi ', # 0x0c 'Ji ', # 0x0d 'Jing ', # 0x0e 'Long ', # 0x0f '[?] ', # 0x10 'Niao ', # 0x11 '[?] ', # 0x12 'Xue ', # 0x13 'Ying ', # 0x14 'Qiong ', # 0x15 'Ge ', # 0x16 'Ming ', # 0x17 'Li ', # 0x18 'Rong ', # 0x19 'Yin ', # 0x1a 'Gen ', # 0x1b 'Qian ', # 0x1c 'Chai ', # 0x1d 'Chen ', # 0x1e 'Yu ', # 0x1f 'Xiu ', # 0x20 'Zi ', # 0x21 'Lie ', # 0x22 'Wu ', # 0x23 'Ji ', # 0x24 'Kui ', # 0x25 'Ce ', # 0x26 'Chong ', # 0x27 'Ci ', # 0x28 'Gou ', # 0x29 'Guang ', # 0x2a 'Mang ', # 0x2b 'Chi ', # 0x2c 'Jiao ', # 0x2d 'Jiao ', # 0x2e 'Fu ', # 0x2f 'Yu ', # 0x30 'Zhu ', # 0x31 'Zi ', # 0x32 'Jiang ', # 0x33 'Hui ', # 0x34 'Yin ', # 0x35 'Cha ', # 0x36 'Fa ', # 0x37 'Rong ', # 0x38 'Ru ', # 0x39 'Chong ', # 0x3a 'Mang ', # 0x3b 'Tong ', # 0x3c 'Zhong ', # 0x3d '[?] ', # 0x3e 'Zhu ', # 0x3f 'Xun ', # 0x40 'Huan ', # 0x41 'Kua ', # 0x42 'Quan ', # 0x43 'Gai ', # 0x44 'Da ', # 0x45 'Jing ', # 0x46 'Xing ', # 0x47 'Quan ', # 0x48 'Cao ', # 0x49 'Jing ', # 0x4a 'Er ', # 0x4b 'An ', # 0x4c 'Shou ', # 0x4d 'Chi ', # 0x4e 'Ren ', # 0x4f 'Jian ', # 0x50 'Ti ', # 0x51 'Huang ', # 0x52 'Ping ', # 0x53 'Li ', # 0x54 'Jin ', # 0x55 'Lao ', # 0x56 'Shu ', # 0x57 'Zhuang ', # 0x58 'Da ', # 0x59 'Jia ', # 0x5a 'Rao ', # 0x5b 'Bi ', # 0x5c 'Ze ', # 0x5d 'Qiao ', # 0x5e 'Hui ', # 0x5f 'Qi ', # 0x60 'Dang ', # 0x61 '[?] ', # 0x62 'Rong ', # 0x63 'Hun ', # 0x64 'Ying ', # 0x65 'Luo ', # 0x66 'Ying ', # 0x67 'Xun ', # 0x68 'Jin ', # 0x69 'Sun ', # 0x6a 'Yin ', # 0x6b 'Mai ', # 0x6c 'Hong ', # 0x6d 'Zhou ', # 0x6e 'Yao ', # 0x6f 'Du ', # 0x70 'Wei ', # 0x71 'Chu ', # 0x72 'Dou ', # 0x73 'Fu ', # 0x74 'Ren ', # 0x75 'Yin ', # 0x76 'He ', # 0x77 'Bi ', # 0x78 'Bu ', # 0x79 'Yun ', # 0x7a 'Di ', # 0x7b 'Tu ', # 0x7c 'Sui ', # 0x7d 'Sui ', # 0x7e 'Cheng ', # 0x7f 'Chen ', # 0x80 'Wu ', # 0x81 'Bie ', # 0x82 'Xi ', # 0x83 'Geng ', # 0x84 'Li ', # 0x85 'Fu ', # 0x86 'Zhu ', # 0x87 'Mo ', # 0x88 'Li ', # 0x89 'Zhuang ', # 0x8a 'Ji ', # 0x8b 'Duo ', # 0x8c 'Qiu ', # 0x8d 'Sha ', # 0x8e 'Suo ', # 0x8f 'Chen ', # 0x90 'Feng ', # 0x91 'Ju ', # 0x92 'Mei ', # 0x93 'Meng ', # 0x94 'Xing ', # 0x95 'Jing ', # 0x96 'Che ', # 0x97 'Xin ', # 0x98 'Jun ', # 0x99 'Yan ', # 0x9a 'Ting ', # 0x9b 'Diao ', # 0x9c 'Cuo ', # 0x9d 'Wan ', # 0x9e 'Han ', # 0x9f 'You ', # 0xa0 'Cuo ', # 0xa1 'Jia ', # 0xa2 'Wang ', # 0xa3 'You ', # 0xa4 'Niu ', # 0xa5 'Shao ', # 0xa6 'Xian ', # 0xa7 'Lang ', # 0xa8 'Fu ', # 0xa9 'E ', # 0xaa 'Mo ', # 0xab 'Wen ', # 0xac 'Jie ', # 0xad 'Nan ', # 0xae 'Mu ', # 0xaf 'Kan ', # 0xb0 'Lai ', # 0xb1 'Lian ', # 0xb2 'Shi ', # 0xb3 'Wo ', # 0xb4 'Usagi ', # 0xb5 'Lian ', # 0xb6 'Huo ', # 0xb7 'You ', # 0xb8 'Ying ', # 0xb9 'Ying ', # 0xba 'Nuc ', # 0xbb 'Chun ', # 0xbc 'Mang ', # 0xbd 'Mang ', # 0xbe 'Ci ', # 0xbf 'Wan ', # 0xc0 'Jing ', # 0xc1 'Di ', # 0xc2 'Qu ', # 0xc3 'Dong ', # 0xc4 'Jian ', # 0xc5 'Zou ', # 0xc6 'Gu ', # 0xc7 'La ', # 0xc8 'Lu ', # 0xc9 'Ju ', # 0xca 'Wei ', # 0xcb 'Jun ', # 0xcc 'Nie ', # 0xcd 'Kun ', # 0xce 'He ', # 0xcf 'Pu ', # 0xd0 'Zi ', # 0xd1 'Gao ', # 0xd2 'Guo ', # 0xd3 'Fu ', # 0xd4 'Lun ', # 0xd5 'Chang ', # 0xd6 'Chou ', # 0xd7 'Song ', # 0xd8 'Chui ', # 0xd9 'Zhan ', # 0xda 'Men ', # 0xdb 'Cai ', # 0xdc 'Ba ', # 0xdd 'Li ', # 0xde 'Tu ', # 0xdf 'Bo ', # 0xe0 'Han ', # 0xe1 'Bao ', # 0xe2 'Qin ', # 0xe3 'Juan ', # 0xe4 'Xi ', # 0xe5 'Qin ', # 0xe6 'Di ', # 0xe7 'Jie ', # 0xe8 'Pu ', # 0xe9 'Dang ', # 0xea 'Jin ', # 0xeb 'Zhao ', # 0xec 'Tai ', # 0xed 'Geng ', # 0xee 'Hua ', # 0xef 'Gu ', # 0xf0 'Ling ', # 0xf1 'Fei ', # 0xf2 'Jin ', # 0xf3 'An ', # 0xf4 'Wang ', # 0xf5 'Beng ', # 0xf6 'Zhou ', # 0xf7 'Yan ', # 0xf8 'Ju ', # 0xf9 'Jian ', # 0xfa 'Lin ', # 0xfb 'Tan ', # 0xfc 'Shu ', # 0xfd 'Tian ', # 0xfe 'Dao ', # 0xff )
gpl-2.0
vinodkc/spark
python/pyspark/ml/tests/test_training_summary.py
15
25466
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import unittest from pyspark.ml.classification import BinaryLogisticRegressionSummary, \ BinaryRandomForestClassificationSummary, FMClassifier, \ FMClassificationSummary, LinearSVC, LinearSVCSummary, \ LogisticRegression, LogisticRegressionSummary, \ MultilayerPerceptronClassifier, MultilayerPerceptronClassificationSummary, \ RandomForestClassificationSummary, RandomForestClassifier from pyspark.ml.clustering import BisectingKMeans, GaussianMixture, KMeans from pyspark.ml.linalg import Vectors from pyspark.ml.regression import GeneralizedLinearRegression, LinearRegression from pyspark.sql import DataFrame from pyspark.testing.mlutils import SparkSessionTestCase class TrainingSummaryTest(SparkSessionTestCase): def test_linear_regression_summary(self): df = self.spark.createDataFrame([(1.0, 2.0, Vectors.dense(1.0)), (0.0, 2.0, Vectors.sparse(1, [], []))], ["label", "weight", "features"]) lr = LinearRegression(maxIter=5, regParam=0.0, solver="normal", weightCol="weight", fitIntercept=False) model = lr.fit(df) self.assertTrue(model.hasSummary) s = model.summary # test that api is callable and returns expected types self.assertEqual(s.totalIterations, 0) self.assertTrue(isinstance(s.predictions, DataFrame)) self.assertEqual(s.predictionCol, "prediction") self.assertEqual(s.labelCol, "label") self.assertEqual(s.featuresCol, "features") objHist = s.objectiveHistory self.assertTrue(isinstance(objHist, list) and isinstance(objHist[0], float)) self.assertAlmostEqual(s.explainedVariance, 0.25, 2) self.assertAlmostEqual(s.meanAbsoluteError, 0.0) self.assertAlmostEqual(s.meanSquaredError, 0.0) self.assertAlmostEqual(s.rootMeanSquaredError, 0.0) self.assertAlmostEqual(s.r2, 1.0, 2) self.assertAlmostEqual(s.r2adj, 1.0, 2) self.assertTrue(isinstance(s.residuals, DataFrame)) self.assertEqual(s.numInstances, 2) self.assertEqual(s.degreesOfFreedom, 1) devResiduals = s.devianceResiduals self.assertTrue(isinstance(devResiduals, list) and isinstance(devResiduals[0], float)) coefStdErr = s.coefficientStandardErrors self.assertTrue(isinstance(coefStdErr, list) and isinstance(coefStdErr[0], float)) tValues = s.tValues self.assertTrue(isinstance(tValues, list) and isinstance(tValues[0], float)) pValues = s.pValues self.assertTrue(isinstance(pValues, list) and isinstance(pValues[0], float)) # test evaluation (with training dataset) produces a summary with same values # one check is enough to verify a summary is returned # The child class LinearRegressionTrainingSummary runs full test sameSummary = model.evaluate(df) self.assertAlmostEqual(sameSummary.explainedVariance, s.explainedVariance) def test_glr_summary(self): from pyspark.ml.linalg import Vectors df = self.spark.createDataFrame([(1.0, 2.0, Vectors.dense(1.0)), (0.0, 2.0, Vectors.sparse(1, [], []))], ["label", "weight", "features"]) glr = GeneralizedLinearRegression(family="gaussian", link="identity", weightCol="weight", fitIntercept=False) model = glr.fit(df) self.assertTrue(model.hasSummary) s = model.summary # test that api is callable and returns expected types self.assertEqual(s.numIterations, 1) # this should default to a single iteration of WLS self.assertTrue(isinstance(s.predictions, DataFrame)) self.assertEqual(s.predictionCol, "prediction") self.assertEqual(s.numInstances, 2) self.assertTrue(isinstance(s.residuals(), DataFrame)) self.assertTrue(isinstance(s.residuals("pearson"), DataFrame)) coefStdErr = s.coefficientStandardErrors self.assertTrue(isinstance(coefStdErr, list) and isinstance(coefStdErr[0], float)) tValues = s.tValues self.assertTrue(isinstance(tValues, list) and isinstance(tValues[0], float)) pValues = s.pValues self.assertTrue(isinstance(pValues, list) and isinstance(pValues[0], float)) self.assertEqual(s.degreesOfFreedom, 1) self.assertEqual(s.residualDegreeOfFreedom, 1) self.assertEqual(s.residualDegreeOfFreedomNull, 2) self.assertEqual(s.rank, 1) self.assertTrue(isinstance(s.solver, str)) self.assertTrue(isinstance(s.aic, float)) self.assertTrue(isinstance(s.deviance, float)) self.assertTrue(isinstance(s.nullDeviance, float)) self.assertTrue(isinstance(s.dispersion, float)) # test evaluation (with training dataset) produces a summary with same values # one check is enough to verify a summary is returned # The child class GeneralizedLinearRegressionTrainingSummary runs full test sameSummary = model.evaluate(df) self.assertAlmostEqual(sameSummary.deviance, s.deviance) def test_binary_logistic_regression_summary(self): df = self.spark.createDataFrame([(1.0, 2.0, Vectors.dense(1.0)), (0.0, 2.0, Vectors.sparse(1, [], []))], ["label", "weight", "features"]) lr = LogisticRegression(maxIter=5, regParam=0.01, weightCol="weight", fitIntercept=False) model = lr.fit(df) self.assertTrue(model.hasSummary) s = model.summary # test that api is callable and returns expected types self.assertTrue(isinstance(s.predictions, DataFrame)) self.assertEqual(s.probabilityCol, "probability") self.assertEqual(s.labelCol, "label") self.assertEqual(s.featuresCol, "features") self.assertEqual(s.predictionCol, "prediction") objHist = s.objectiveHistory self.assertTrue(isinstance(objHist, list) and isinstance(objHist[0], float)) self.assertGreater(s.totalIterations, 0) self.assertTrue(isinstance(s.labels, list)) self.assertTrue(isinstance(s.truePositiveRateByLabel, list)) self.assertTrue(isinstance(s.falsePositiveRateByLabel, list)) self.assertTrue(isinstance(s.precisionByLabel, list)) self.assertTrue(isinstance(s.recallByLabel, list)) self.assertTrue(isinstance(s.fMeasureByLabel(), list)) self.assertTrue(isinstance(s.fMeasureByLabel(1.0), list)) self.assertTrue(isinstance(s.roc, DataFrame)) self.assertAlmostEqual(s.areaUnderROC, 1.0, 2) self.assertTrue(isinstance(s.pr, DataFrame)) self.assertTrue(isinstance(s.fMeasureByThreshold, DataFrame)) self.assertTrue(isinstance(s.precisionByThreshold, DataFrame)) self.assertTrue(isinstance(s.recallByThreshold, DataFrame)) self.assertAlmostEqual(s.accuracy, 1.0, 2) self.assertAlmostEqual(s.weightedTruePositiveRate, 1.0, 2) self.assertAlmostEqual(s.weightedFalsePositiveRate, 0.0, 2) self.assertAlmostEqual(s.weightedRecall, 1.0, 2) self.assertAlmostEqual(s.weightedPrecision, 1.0, 2) self.assertAlmostEqual(s.weightedFMeasure(), 1.0, 2) self.assertAlmostEqual(s.weightedFMeasure(1.0), 1.0, 2) # test evaluation (with training dataset) produces a summary with same values # one check is enough to verify a summary is returned, Scala version runs full test sameSummary = model.evaluate(df) self.assertTrue(isinstance(sameSummary, BinaryLogisticRegressionSummary)) self.assertAlmostEqual(sameSummary.areaUnderROC, s.areaUnderROC) def test_multiclass_logistic_regression_summary(self): df = self.spark.createDataFrame([(1.0, 2.0, Vectors.dense(1.0)), (0.0, 2.0, Vectors.sparse(1, [], [])), (2.0, 2.0, Vectors.dense(2.0)), (2.0, 2.0, Vectors.dense(1.9))], ["label", "weight", "features"]) lr = LogisticRegression(maxIter=5, regParam=0.01, weightCol="weight", fitIntercept=False) model = lr.fit(df) self.assertTrue(model.hasSummary) s = model.summary # test that api is callable and returns expected types self.assertTrue(isinstance(s.predictions, DataFrame)) self.assertEqual(s.probabilityCol, "probability") self.assertEqual(s.labelCol, "label") self.assertEqual(s.featuresCol, "features") self.assertEqual(s.predictionCol, "prediction") objHist = s.objectiveHistory self.assertTrue(isinstance(objHist, list) and isinstance(objHist[0], float)) self.assertGreater(s.totalIterations, 0) self.assertTrue(isinstance(s.labels, list)) self.assertTrue(isinstance(s.truePositiveRateByLabel, list)) self.assertTrue(isinstance(s.falsePositiveRateByLabel, list)) self.assertTrue(isinstance(s.precisionByLabel, list)) self.assertTrue(isinstance(s.recallByLabel, list)) self.assertTrue(isinstance(s.fMeasureByLabel(), list)) self.assertTrue(isinstance(s.fMeasureByLabel(1.0), list)) self.assertAlmostEqual(s.accuracy, 0.75, 2) self.assertAlmostEqual(s.weightedTruePositiveRate, 0.75, 2) self.assertAlmostEqual(s.weightedFalsePositiveRate, 0.25, 2) self.assertAlmostEqual(s.weightedRecall, 0.75, 2) self.assertAlmostEqual(s.weightedPrecision, 0.583, 2) self.assertAlmostEqual(s.weightedFMeasure(), 0.65, 2) self.assertAlmostEqual(s.weightedFMeasure(1.0), 0.65, 2) # test evaluation (with training dataset) produces a summary with same values # one check is enough to verify a summary is returned, Scala version runs full test sameSummary = model.evaluate(df) self.assertTrue(isinstance(sameSummary, LogisticRegressionSummary)) self.assertFalse(isinstance(sameSummary, BinaryLogisticRegressionSummary)) self.assertAlmostEqual(sameSummary.accuracy, s.accuracy) def test_linear_svc_summary(self): df = self.spark.createDataFrame([(1.0, 2.0, Vectors.dense(1.0, 1.0, 1.0)), (0.0, 2.0, Vectors.dense(1.0, 2.0, 3.0))], ["label", "weight", "features"]) svc = LinearSVC(maxIter=5, weightCol="weight") model = svc.fit(df) self.assertTrue(model.hasSummary) s = model.summary() # test that api is callable and returns expected types self.assertTrue(isinstance(s.predictions, DataFrame)) self.assertEqual(s.scoreCol, "rawPrediction") self.assertEqual(s.labelCol, "label") self.assertEqual(s.predictionCol, "prediction") objHist = s.objectiveHistory self.assertTrue(isinstance(objHist, list) and isinstance(objHist[0], float)) self.assertGreater(s.totalIterations, 0) self.assertTrue(isinstance(s.labels, list)) self.assertTrue(isinstance(s.truePositiveRateByLabel, list)) self.assertTrue(isinstance(s.falsePositiveRateByLabel, list)) self.assertTrue(isinstance(s.precisionByLabel, list)) self.assertTrue(isinstance(s.recallByLabel, list)) self.assertTrue(isinstance(s.fMeasureByLabel(), list)) self.assertTrue(isinstance(s.fMeasureByLabel(1.0), list)) self.assertTrue(isinstance(s.roc, DataFrame)) self.assertAlmostEqual(s.areaUnderROC, 1.0, 2) self.assertTrue(isinstance(s.pr, DataFrame)) self.assertTrue(isinstance(s.fMeasureByThreshold, DataFrame)) self.assertTrue(isinstance(s.precisionByThreshold, DataFrame)) self.assertTrue(isinstance(s.recallByThreshold, DataFrame)) print(s.weightedTruePositiveRate) self.assertAlmostEqual(s.weightedTruePositiveRate, 1.0, 2) self.assertAlmostEqual(s.weightedFalsePositiveRate, 0.0, 2) self.assertAlmostEqual(s.weightedRecall, 1.0, 2) self.assertAlmostEqual(s.weightedPrecision, 1.0, 2) self.assertAlmostEqual(s.weightedFMeasure(), 1.0, 2) self.assertAlmostEqual(s.weightedFMeasure(1.0), 1.0, 2) # test evaluation (with training dataset) produces a summary with same values # one check is enough to verify a summary is returned, Scala version runs full test sameSummary = model.evaluate(df) self.assertTrue(isinstance(sameSummary, LinearSVCSummary)) self.assertAlmostEqual(sameSummary.areaUnderROC, s.areaUnderROC) def test_binary_randomforest_classification_summary(self): df = self.spark.createDataFrame([(1.0, 2.0, Vectors.dense(1.0)), (0.0, 2.0, Vectors.sparse(1, [], []))], ["label", "weight", "features"]) rf = RandomForestClassifier(weightCol="weight") model = rf.fit(df) self.assertTrue(model.hasSummary) s = model.summary # test that api is callable and returns expected types self.assertTrue(isinstance(s.predictions, DataFrame)) self.assertEqual(s.labelCol, "label") self.assertEqual(s.predictionCol, "prediction") self.assertEqual(s.totalIterations, 0) self.assertTrue(isinstance(s.labels, list)) self.assertTrue(isinstance(s.truePositiveRateByLabel, list)) self.assertTrue(isinstance(s.falsePositiveRateByLabel, list)) self.assertTrue(isinstance(s.precisionByLabel, list)) self.assertTrue(isinstance(s.recallByLabel, list)) self.assertTrue(isinstance(s.fMeasureByLabel(), list)) self.assertTrue(isinstance(s.fMeasureByLabel(1.0), list)) self.assertTrue(isinstance(s.roc, DataFrame)) self.assertAlmostEqual(s.areaUnderROC, 1.0, 2) self.assertTrue(isinstance(s.pr, DataFrame)) self.assertTrue(isinstance(s.fMeasureByThreshold, DataFrame)) self.assertTrue(isinstance(s.precisionByThreshold, DataFrame)) self.assertTrue(isinstance(s.recallByThreshold, DataFrame)) self.assertAlmostEqual(s.accuracy, 1.0, 2) self.assertAlmostEqual(s.weightedTruePositiveRate, 1.0, 2) self.assertAlmostEqual(s.weightedFalsePositiveRate, 0.0, 2) self.assertAlmostEqual(s.weightedRecall, 1.0, 2) self.assertAlmostEqual(s.weightedPrecision, 1.0, 2) self.assertAlmostEqual(s.weightedFMeasure(), 1.0, 2) self.assertAlmostEqual(s.weightedFMeasure(1.0), 1.0, 2) # test evaluation (with training dataset) produces a summary with same values # one check is enough to verify a summary is returned, Scala version runs full test sameSummary = model.evaluate(df) self.assertTrue(isinstance(sameSummary, BinaryRandomForestClassificationSummary)) self.assertAlmostEqual(sameSummary.areaUnderROC, s.areaUnderROC) def test_multiclass_randomforest_classification_summary(self): df = self.spark.createDataFrame([(1.0, 2.0, Vectors.dense(1.0)), (0.0, 2.0, Vectors.sparse(1, [], [])), (2.0, 2.0, Vectors.dense(2.0)), (2.0, 2.0, Vectors.dense(1.9))], ["label", "weight", "features"]) rf = RandomForestClassifier(weightCol="weight") model = rf.fit(df) self.assertTrue(model.hasSummary) s = model.summary # test that api is callable and returns expected types self.assertTrue(isinstance(s.predictions, DataFrame)) self.assertEqual(s.labelCol, "label") self.assertEqual(s.predictionCol, "prediction") self.assertEqual(s.totalIterations, 0) self.assertTrue(isinstance(s.labels, list)) self.assertTrue(isinstance(s.truePositiveRateByLabel, list)) self.assertTrue(isinstance(s.falsePositiveRateByLabel, list)) self.assertTrue(isinstance(s.precisionByLabel, list)) self.assertTrue(isinstance(s.recallByLabel, list)) self.assertTrue(isinstance(s.fMeasureByLabel(), list)) self.assertTrue(isinstance(s.fMeasureByLabel(1.0), list)) self.assertAlmostEqual(s.accuracy, 1.0, 2) self.assertAlmostEqual(s.weightedTruePositiveRate, 1.0, 2) self.assertAlmostEqual(s.weightedFalsePositiveRate, 0.0, 2) self.assertAlmostEqual(s.weightedRecall, 1.0, 2) self.assertAlmostEqual(s.weightedPrecision, 1.0, 2) self.assertAlmostEqual(s.weightedFMeasure(), 1.0, 2) self.assertAlmostEqual(s.weightedFMeasure(1.0), 1.0, 2) # test evaluation (with training dataset) produces a summary with same values # one check is enough to verify a summary is returned, Scala version runs full test sameSummary = model.evaluate(df) self.assertTrue(isinstance(sameSummary, RandomForestClassificationSummary)) self.assertFalse(isinstance(sameSummary, BinaryRandomForestClassificationSummary)) self.assertAlmostEqual(sameSummary.accuracy, s.accuracy) def test_fm_classification_summary(self): df = self.spark.createDataFrame([(1.0, Vectors.dense(2.0)), (0.0, Vectors.dense(2.0)), (0.0, Vectors.dense(6.0)), (1.0, Vectors.dense(3.0)) ], ["label", "features"]) fm = FMClassifier(maxIter=5) model = fm.fit(df) self.assertTrue(model.hasSummary) s = model.summary() # test that api is callable and returns expected types self.assertTrue(isinstance(s.predictions, DataFrame)) self.assertEqual(s.scoreCol, "probability") self.assertEqual(s.labelCol, "label") self.assertEqual(s.predictionCol, "prediction") objHist = s.objectiveHistory self.assertTrue(isinstance(objHist, list) and isinstance(objHist[0], float)) self.assertGreater(s.totalIterations, 0) self.assertTrue(isinstance(s.labels, list)) self.assertTrue(isinstance(s.truePositiveRateByLabel, list)) self.assertTrue(isinstance(s.falsePositiveRateByLabel, list)) self.assertTrue(isinstance(s.precisionByLabel, list)) self.assertTrue(isinstance(s.recallByLabel, list)) self.assertTrue(isinstance(s.fMeasureByLabel(), list)) self.assertTrue(isinstance(s.fMeasureByLabel(1.0), list)) self.assertTrue(isinstance(s.roc, DataFrame)) self.assertAlmostEqual(s.areaUnderROC, 0.625, 2) self.assertTrue(isinstance(s.pr, DataFrame)) self.assertTrue(isinstance(s.fMeasureByThreshold, DataFrame)) self.assertTrue(isinstance(s.precisionByThreshold, DataFrame)) self.assertTrue(isinstance(s.recallByThreshold, DataFrame)) self.assertAlmostEqual(s.weightedTruePositiveRate, 0.75, 2) self.assertAlmostEqual(s.weightedFalsePositiveRate, 0.25, 2) self.assertAlmostEqual(s.weightedRecall, 0.75, 2) self.assertAlmostEqual(s.weightedPrecision, 0.8333333333333333, 2) self.assertAlmostEqual(s.weightedFMeasure(), 0.7333333333333334, 2) self.assertAlmostEqual(s.weightedFMeasure(1.0), 0.7333333333333334, 2) # test evaluation (with training dataset) produces a summary with same values # one check is enough to verify a summary is returned, Scala version runs full test sameSummary = model.evaluate(df) self.assertTrue(isinstance(sameSummary, FMClassificationSummary)) self.assertAlmostEqual(sameSummary.areaUnderROC, s.areaUnderROC) def test_mlp_classification_summary(self): df = self.spark.createDataFrame([(0.0, Vectors.dense([0.0, 0.0])), (1.0, Vectors.dense([0.0, 1.0])), (1.0, Vectors.dense([1.0, 0.0])), (0.0, Vectors.dense([1.0, 1.0])) ], ["label", "features"]) mlp = MultilayerPerceptronClassifier(layers=[2, 2, 2], seed=123) model = mlp.fit(df) self.assertTrue(model.hasSummary) s = model.summary() # test that api is callable and returns expected types self.assertTrue(isinstance(s.predictions, DataFrame)) self.assertEqual(s.labelCol, "label") self.assertEqual(s.predictionCol, "prediction") self.assertGreater(s.totalIterations, 0) self.assertTrue(isinstance(s.labels, list)) self.assertTrue(isinstance(s.truePositiveRateByLabel, list)) self.assertTrue(isinstance(s.falsePositiveRateByLabel, list)) self.assertTrue(isinstance(s.precisionByLabel, list)) self.assertTrue(isinstance(s.recallByLabel, list)) self.assertTrue(isinstance(s.fMeasureByLabel(), list)) self.assertTrue(isinstance(s.fMeasureByLabel(1.0), list)) self.assertAlmostEqual(s.accuracy, 1.0, 2) self.assertAlmostEqual(s.weightedTruePositiveRate, 1.0, 2) self.assertAlmostEqual(s.weightedFalsePositiveRate, 0.0, 2) self.assertAlmostEqual(s.weightedRecall, 1.0, 2) self.assertAlmostEqual(s.weightedPrecision, 1.0, 2) self.assertAlmostEqual(s.weightedFMeasure(), 1.0, 2) self.assertAlmostEqual(s.weightedFMeasure(1.0), 1.0, 2) # test evaluation (with training dataset) produces a summary with same values # one check is enough to verify a summary is returned, Scala version runs full test sameSummary = model.evaluate(df) self.assertTrue(isinstance(sameSummary, MultilayerPerceptronClassificationSummary)) self.assertAlmostEqual(sameSummary.accuracy, s.accuracy) def test_gaussian_mixture_summary(self): data = [(Vectors.dense(1.0),), (Vectors.dense(5.0),), (Vectors.dense(10.0),), (Vectors.sparse(1, [], []),)] df = self.spark.createDataFrame(data, ["features"]) gmm = GaussianMixture(k=2) model = gmm.fit(df) self.assertTrue(model.hasSummary) s = model.summary self.assertTrue(isinstance(s.predictions, DataFrame)) self.assertEqual(s.probabilityCol, "probability") self.assertTrue(isinstance(s.probability, DataFrame)) self.assertEqual(s.featuresCol, "features") self.assertEqual(s.predictionCol, "prediction") self.assertTrue(isinstance(s.cluster, DataFrame)) self.assertEqual(len(s.clusterSizes), 2) self.assertEqual(s.k, 2) self.assertEqual(s.numIter, 3) def test_bisecting_kmeans_summary(self): data = [(Vectors.dense(1.0),), (Vectors.dense(5.0),), (Vectors.dense(10.0),), (Vectors.sparse(1, [], []),)] df = self.spark.createDataFrame(data, ["features"]) bkm = BisectingKMeans(k=2) model = bkm.fit(df) self.assertTrue(model.hasSummary) s = model.summary self.assertTrue(isinstance(s.predictions, DataFrame)) self.assertEqual(s.featuresCol, "features") self.assertEqual(s.predictionCol, "prediction") self.assertTrue(isinstance(s.cluster, DataFrame)) self.assertEqual(len(s.clusterSizes), 2) self.assertEqual(s.k, 2) self.assertEqual(s.numIter, 20) def test_kmeans_summary(self): data = [(Vectors.dense([0.0, 0.0]),), (Vectors.dense([1.0, 1.0]),), (Vectors.dense([9.0, 8.0]),), (Vectors.dense([8.0, 9.0]),)] df = self.spark.createDataFrame(data, ["features"]) kmeans = KMeans(k=2, seed=1) model = kmeans.fit(df) self.assertTrue(model.hasSummary) s = model.summary self.assertTrue(isinstance(s.predictions, DataFrame)) self.assertEqual(s.featuresCol, "features") self.assertEqual(s.predictionCol, "prediction") self.assertTrue(isinstance(s.cluster, DataFrame)) self.assertEqual(len(s.clusterSizes), 2) self.assertEqual(s.k, 2) self.assertEqual(s.numIter, 1) if __name__ == "__main__": from pyspark.ml.tests.test_training_summary import * # noqa: F401 try: import xmlrunner # type: ignore[import] testRunner = xmlrunner.XMLTestRunner(output='target/test-reports', verbosity=2) except ImportError: testRunner = None unittest.main(testRunner=testRunner, verbosity=2)
apache-2.0
chrishas35/django-travis-ci
tests/regressiontests/handlers/tests.py
34
1232
from django.conf import settings from django.core.handlers.wsgi import WSGIHandler from django.test import RequestFactory from django.utils import unittest class HandlerTests(unittest.TestCase): def test_lock_safety(self): """ Tests for bug #11193 (errors inside middleware shouldn't leave the initLock locked). """ # Mangle settings so the handler will fail old_middleware_classes = settings.MIDDLEWARE_CLASSES settings.MIDDLEWARE_CLASSES = 42 # Try running the handler, it will fail in load_middleware handler = WSGIHandler() self.assertEqual(handler.initLock.locked(), False) try: handler(None, None) except: pass self.assertEqual(handler.initLock.locked(), False) # Reset settings settings.MIDDLEWARE_CLASSES = old_middleware_classes def test_bad_path_info(self): """Tests for bug #15672 ('request' referenced before assignment)""" environ = RequestFactory().get('/').environ environ['PATH_INFO'] = '\xed' handler = WSGIHandler() response = handler(environ, lambda *a, **k: None) self.assertEqual(response.status_code, 400)
bsd-3-clause
conwin/node-gyp
gyp/test/mac/gyptest-app.py
85
1409
#!/usr/bin/env python # Copyright (c) 2012 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """ Verifies that app bundles are built correctly. """ import TestGyp import sys if sys.platform == 'darwin': test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode']) test.run_gyp('test.gyp', chdir='app-bundle') test.build('test.gyp', test.ALL, chdir='app-bundle') # Binary test.built_file_must_exist('Test App Gyp.app/Contents/MacOS/Test App Gyp', chdir='app-bundle') # Info.plist info_plist = test.built_file_path('Test App Gyp.app/Contents/Info.plist', chdir='app-bundle') test.must_exist(info_plist) test.must_contain(info_plist, 'com.google.Test App Gyp') # Variable expansion # Resources test.built_file_must_exist( 'Test App Gyp.app/Contents/Resources/English.lproj/InfoPlist.strings', chdir='app-bundle') test.built_file_must_exist( 'Test App Gyp.app/Contents/Resources/English.lproj/MainMenu.nib', chdir='app-bundle') # Packaging test.built_file_must_exist('Test App Gyp.app/Contents/PkgInfo', chdir='app-bundle') test.built_file_must_match('Test App Gyp.app/Contents/PkgInfo', 'APPLause', chdir='app-bundle') test.pass_test()
mit
sandeepdsouza93/TensorFlow-15712
tensorflow/examples/learn/hdf5_classification.py
17
2201
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Example of DNNClassifier for Iris plant dataset, h5 format.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from sklearn import cross_validation from sklearn import metrics import tensorflow as tf from tensorflow.contrib import learn import h5py # pylint: disable=g-bad-import-order def main(unused_argv): # Load dataset. iris = learn.datasets.load_dataset('iris') x_train, x_test, y_train, y_test = cross_validation.train_test_split( iris.data, iris.target, test_size=0.2, random_state=42) # Note that we are saving and load iris data as h5 format as a simple # demonstration here. h5f = h5py.File('/tmp/test_hdf5.h5', 'w') h5f.create_dataset('X_train', data=x_train) h5f.create_dataset('X_test', data=x_test) h5f.create_dataset('y_train', data=y_train) h5f.create_dataset('y_test', data=y_test) h5f.close() h5f = h5py.File('/tmp/test_hdf5.h5', 'r') x_train = np.array(h5f['X_train']) x_test = np.array(h5f['X_test']) y_train = np.array(h5f['y_train']) y_test = np.array(h5f['y_test']) # Build 3 layer DNN with 10, 20, 10 units respectively. feature_columns = learn.infer_real_valued_columns_from_input(x_train) classifier = learn.DNNClassifier( feature_columns=feature_columns, hidden_units=[10, 20, 10], n_classes=3) # Fit and predict. classifier.fit(x_train, y_train, steps=200) score = metrics.accuracy_score(y_test, classifier.predict(x_test)) print('Accuracy: {0:f}'.format(score)) if __name__ == '__main__': tf.app.run()
apache-2.0
droundy/deft
talks/colloquium/figs/plot-walls.py
1
3242
#!/usr/bin/python # We need the following two lines in order for matplotlib to work # without access to an X server. from __future__ import division import matplotlib matplotlib.use('Agg') import pylab, numpy, sys xmax = 2.5 xmin = -0.4 def plotit(dftdata, mcdata): dft_len = len(dftdata[:,0]) dft_dr = dftdata[2,0] - dftdata[1,0] mcdata = numpy.insert(mcdata,0,0,0) mcdata[0,0]=-10 mcoffset = 10/2 offset = -3/2 n0 = dftdata[:,6] nA = dftdata[:,8] nAmc = mcdata[:,11] n0mc = mcdata[:,10] pylab.figure(figsize=(6, 6)) pylab.subplots_adjust(hspace=0.001) n_plt = pylab.subplot(3,1,3) n_plt.plot(mcdata[:,0]/2+mcoffset,mcdata[:,1]*4*numpy.pi/3,"b-",label='$n$ Monte Carlo') n_plt.plot(dftdata[:,0]/2+offset,dftdata[:,1]*4*numpy.pi/3,"b--",label='$n$ DFT') n_plt.legend(loc='best', ncol=1).draw_frame(False) #.get_frame().set_alpha(0.5) n_plt.yaxis.set_major_locator(pylab.MaxNLocator(6,steps=[1,5,10],prune='upper')) pylab.ylim(ymin=0) pylab.xlim(xmin, xmax) pylab.xlabel("$z/\sigma$") pylab.ylabel("$n(\mathbf{r})$") n_plt.axvline(x=0, color='k', linestyle=':') n = len(mcdata[:,0]) #pylab.twinx() dftr = dftdata[:,0]/2+offset thiswork = dftdata[:,5] gross = dftdata[:,7] stop_here = int(dft_len - 1/dft_dr) print stop_here start_here = int(2.5/dft_dr) off = 1 me = 40 A_plt = pylab.subplot(3,1,1) A_plt.axvline(x=0, color='k', linestyle=':') A_plt.plot(mcdata[:,0]/2+mcoffset,mcdata[:,2+2*off]/nAmc,"r-",label="$g_\sigma^A$ Monte Carlo") A_plt.plot(dftr[dftr>=0],thiswork[dftr>=0],"ro",markevery=me*.8,label="$g_\sigma^A$ this work") A_plt.plot(dftr[dftr>=0],gross[dftr>=0],"rx",markevery=me,label="Gross", markerfacecolor='none',markeredgecolor='red', markeredgewidth=1) A_plt.legend(loc='best', ncol=1).draw_frame(False) #.get_frame().set_alpha(0.5) A_plt.yaxis.set_major_locator(pylab.MaxNLocator(integer=True,prune='upper')) pylab.ylim(ymin=0) pylab.ylabel("$g_\sigma^A$") pylab.xlim(xmin, xmax) n0mc[0]=1 mcdata[0,10]=1 S_plt = pylab.subplot(3,1,2) S_plt.axvline(x=0, color='k', linestyle=':') S_plt.plot(mcdata[:,0]/2+mcoffset,mcdata[:,3+2*off]/n0mc,"g-",label="$g_\sigma^S$ Monte Carlo") S_plt.plot(dftdata[:,0]/2+offset,dftdata[:,4],"gx",markevery=me/2,label="Yu and Wu") S_plt.legend(loc='best', ncol=1).draw_frame(False) #.get_frame().set_alpha(0.5) #pylab.ylim(ymax=12) S_plt.yaxis.set_major_locator(pylab.MaxNLocator(5,integer=True,prune='upper')) pylab.xlim(xmin, xmax) pylab.ylim(ymin=0) pylab.ylabel("$g_\sigma^S$") xticklabels = A_plt.get_xticklabels() + S_plt.get_xticklabels() pylab.setp(xticklabels, visible=False) mcdata10 = numpy.loadtxt('../../papers/contact/figs/mc-walls-20-196.dat') dftdata10 = numpy.loadtxt('../../papers/contact/figs/wallsWB-0.10.dat') mcdata40 = numpy.loadtxt('../../papers/contact/figs/mc-walls-20-817.dat') dftdata40 = numpy.loadtxt('../../papers/contact/figs/wallsWB-0.40.dat') plotit(dftdata10, mcdata10) pylab.savefig('figs/walls-10.pdf', transparent=True) plotit(dftdata40, mcdata40) pylab.savefig('figs/walls-40.pdf', transparent=True)
gpl-2.0
iddqd1/django-cms
cms/test_utils/project/urls.py
47
1646
from django.conf import settings from django.conf.urls import include, url from django.contrib import admin from django.conf.urls.i18n import i18n_patterns from django.contrib.staticfiles.urls import staticfiles_urlpatterns from cms.utils.compat.dj import is_installed from cms.utils.conf import get_cms_setting from cms.test_utils.project.sampleapp.forms import LoginForm, LoginForm2, LoginForm3 admin.autodiscover() urlpatterns = [ url(r'^media/(?P<path>.*)$', 'django.views.static.serve', {'document_root': settings.MEDIA_ROOT, 'show_indexes': True}), url(r'^media/cms/(?P<path>.*)$', 'django.views.static.serve', {'document_root': get_cms_setting('MEDIA_ROOT'), 'show_indexes': True}), url(r'^jsi18n/(?P<packages>\S+?)/$', 'django.views.i18n.javascript_catalog'), ] urlpatterns += staticfiles_urlpatterns() urlpatterns += i18n_patterns('', url(r'^sample/login_other/$', 'django.contrib.auth.views.login', kwargs={'authentication_form': LoginForm2}), url(r'^sample/login/$', 'django.contrib.auth.views.login', kwargs={'authentication_form': LoginForm}), url(r'^sample/login3/$', 'django.contrib.auth.views.login', kwargs={'authentication_form': LoginForm3}), url(r'^admin/', include(admin.site.urls)), url(r'^example/$', 'cms.test_utils.project.placeholderapp.views.example_view'), url(r'^plain_view/$', 'cms.test_utils.project.sampleapp.views.plain_view'), url(r'^', include('cms.urls')), ) if settings.DEBUG and is_installed('debug_toolbar'): import debug_toolbar urlpatterns += [ url(r'^__debug__/', include(debug_toolbar.urls)), ]
bsd-3-clause
vjraitila/alfred-issues
src/requests/packages/chardet/charsetprober.py
3127
1902
######################## BEGIN LICENSE BLOCK ######################## # The Original Code is Mozilla Universal charset detector code. # # The Initial Developer of the Original Code is # Netscape Communications Corporation. # Portions created by the Initial Developer are Copyright (C) 2001 # the Initial Developer. All Rights Reserved. # # Contributor(s): # Mark Pilgrim - port to Python # Shy Shalom - original C code # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### from . import constants import re class CharSetProber: def __init__(self): pass def reset(self): self._mState = constants.eDetecting def get_charset_name(self): return None def feed(self, aBuf): pass def get_state(self): return self._mState def get_confidence(self): return 0.0 def filter_high_bit_only(self, aBuf): aBuf = re.sub(b'([\x00-\x7F])+', b' ', aBuf) return aBuf def filter_without_english_letters(self, aBuf): aBuf = re.sub(b'([A-Za-z])+', b' ', aBuf) return aBuf def filter_with_english_letters(self, aBuf): # TODO return aBuf
mit
michaelaye/vispy
examples/basics/scene/volume.py
4
5162
# -*- coding: utf-8 -*- # ----------------------------------------------------------------------------- # Copyright (c) 2015, Vispy Development Team. All Rights Reserved. # Distributed under the (new) BSD License. See LICENSE.txt for more info. # ----------------------------------------------------------------------------- # vispy: gallery 2 """ Example volume rendering Controls: * 1 - toggle camera between first person (fly), regular 3D (turntable) and arcball * 2 - toggle between volume rendering methods * 3 - toggle between stent-CT / brain-MRI image * 4 - toggle between colormaps * 0 - reset cameras * [] - decrease/increase isosurface threshold With fly camera: * WASD or arrow keys - move around * SPACE - brake * FC - move up-down * IJKL or mouse - look around """ from itertools import cycle import numpy as np from vispy import app, scene, io from vispy.color import get_colormaps, BaseColormap from vispy.visuals.transforms import STTransform # Read volume vol1 = np.load(io.load_data_file('volume/stent.npz'))['arr_0'] vol2 = np.load(io.load_data_file('brain/mri.npz'))['data'] vol2 = np.flipud(np.rollaxis(vol2, 1)) # Prepare canvas canvas = scene.SceneCanvas(keys='interactive', size=(800, 600), show=True) canvas.measure_fps() # Set up a viewbox to display the image with interactive pan/zoom view = canvas.central_widget.add_view() # Set whether we are emulating a 3D texture emulate_texture = False # Create the volume visuals, only one is visible volume1 = scene.visuals.Volume(vol1, parent=view.scene, threshold=0.225, emulate_texture=emulate_texture) volume1.transform = scene.STTransform(translate=(64, 64, 0)) volume2 = scene.visuals.Volume(vol2, parent=view.scene, threshold=0.2, emulate_texture=emulate_texture) volume2.visible = False # Create three cameras (Fly, Turntable and Arcball) fov = 60. cam1 = scene.cameras.FlyCamera(parent=view.scene, fov=fov, name='Fly') cam2 = scene.cameras.TurntableCamera(parent=view.scene, fov=fov, name='Turntable') cam3 = scene.cameras.ArcballCamera(parent=view.scene, fov=fov, name='Arcball') view.camera = cam2 # Select turntable at first # Create an XYZAxis visual axis = scene.visuals.XYZAxis(parent=view) s = STTransform(translate=(50, 50), scale=(50, 50, 50, 1)) affine = s.as_matrix() axis.transform = affine # create colormaps that work well for translucent and additive volume rendering class TransFire(BaseColormap): glsl_map = """ vec4 translucent_fire(float t) { return vec4(pow(t, 0.5), t, t*t, max(0, t*1.05 - 0.05)); } """ class TransGrays(BaseColormap): glsl_map = """ vec4 translucent_grays(float t) { return vec4(t, t, t, t*0.05); } """ # Setup colormap iterators opaque_cmaps = cycle(get_colormaps()) translucent_cmaps = cycle([TransFire(), TransGrays()]) opaque_cmap = next(opaque_cmaps) translucent_cmap = next(translucent_cmaps) # Implement axis connection with cam2 @canvas.events.mouse_move.connect def on_mouse_move(event): if event.button == 1 and event.is_dragging: axis.transform.reset() axis.transform.rotate(cam2.roll, (0, 0, 1)) axis.transform.rotate(cam2.elevation, (1, 0, 0)) axis.transform.rotate(cam2.azimuth, (0, 1, 0)) axis.transform.scale((50, 50, 0.001)) axis.transform.translate((50., 50.)) axis.update() # Implement key presses @canvas.events.key_press.connect def on_key_press(event): global opaque_cmap, translucent_cmap if event.text == '1': cam_toggle = {cam1: cam2, cam2: cam3, cam3: cam1} view.camera = cam_toggle.get(view.camera, cam2) print(view.camera.name + ' camera') if view.camera is cam2: axis.visible = True else: axis.visible = False elif event.text == '2': methods = ['mip', 'translucent', 'iso', 'additive'] method = methods[(methods.index(volume1.method) + 1) % 4] print("Volume render method: %s" % method) cmap = opaque_cmap if method in ['mip', 'iso'] else translucent_cmap volume1.method = method volume1.cmap = cmap volume2.method = method volume2.cmap = cmap elif event.text == '3': volume1.visible = not volume1.visible volume2.visible = not volume1.visible elif event.text == '4': if volume1.method in ['mip', 'iso']: cmap = opaque_cmap = next(opaque_cmaps) else: cmap = translucent_cmap = next(translucent_cmaps) volume1.cmap = cmap volume2.cmap = cmap elif event.text == '0': cam1.set_range() cam3.set_range() elif event.text != '' and event.text in '[]': s = -0.025 if event.text == '[' else 0.025 volume1.threshold += s volume2.threshold += s th = volume1.threshold if volume1.visible else volume2.threshold print("Isosurface threshold: %0.3f" % th) # for testing performance # @canvas.connect # def on_draw(ev): # canvas.update() if __name__ == '__main__': print(__doc__) app.run()
bsd-3-clause
ericzundel/pants
src/python/pants/backend/jvm/tasks/properties.py
16
3411
# coding=utf-8 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). from __future__ import (absolute_import, division, generators, nested_scopes, print_function, unicode_literals, with_statement) import re from collections import OrderedDict import six class Properties(object): """A Python reader for java.util.Properties formatted data. Based on: http://download.oracle.com/javase/6/docs/api/java/util/Properties.html#load(java.io.Reader) Originally copied from: https://github.com/twitter/commons/blob/master/src/python/twitter/common/config/properties.py :API: public """ @staticmethod def load(data): """Loads properties from an open stream or the contents of a string. :API: public :param (string | open stream) data: An open stream or a string. :returns: A dict of parsed property data. :rtype: dict """ if hasattr(data, 'read') and callable(data.read): contents = data.read() elif isinstance(data, six.string_types): contents = data else: raise TypeError('Can only process data from a string or a readable object, given: %s' % data) return Properties._parse(contents.splitlines()) # An unescaped '=' or ':' forms an explicit separator _EXPLICIT_KV_SEP = re.compile(r'(?<!\\)[=:]') @staticmethod def _parse(lines): def coalesce_lines(): line_iter = iter(lines) try: buffer = '' while True: line = next(line_iter) if line.strip().endswith('\\'): # Continuation. buffer += line.strip()[:-1] else: if buffer: # Continuation join, preserve left hand ws (could be a kv separator) buffer += line.rstrip() else: # Plain old line buffer = line.strip() try: yield buffer finally: buffer = '' except StopIteration: pass def normalize(atom): return re.sub(r'\\([:=\s])', r'\1', atom.strip()) def parse_line(line): if line and not (line.startswith('#') or line.startswith('!')): match = Properties._EXPLICIT_KV_SEP.search(line) if match: return normalize(line[:match.start()]), normalize(line[match.end():]) else: space_sep = line.find(' ') if space_sep == -1: return normalize(line), '' else: return normalize(line[:space_sep]), normalize(line[space_sep:]) props = OrderedDict() for line in coalesce_lines(): kv_pair = parse_line(line) if kv_pair: key, value = kv_pair props[key] = value return props @staticmethod def dump(props, output): """Dumps a dict of properties to the specified open stream or file path. :API: public """ def escape(token): return re.sub(r'([=:\s])', r'\\\1', token) def write(out): for k, v in props.items(): out.write('%s=%s\n' % (escape(str(k)), escape(str(v)))) if hasattr(output, 'write') and callable(output.write): write(output) elif isinstance(output, six.string_types): with open(output, 'w+a') as out: write(out) else: raise TypeError('Can only dump data to a path or a writable object, given: %s' % output)
apache-2.0
AprilBrother/esptool
esptool.py
1
28432
#!/usr/bin/env python # # ESP8266 ROM Bootloader Utility # https://github.com/themadinventor/esptool # # Copyright (C) 2014 Fredrik Ahlberg # # This program is free software; you can redistribute it and/or modify it under # the terms of the GNU General Public License as published by the Free Software # Foundation; either version 2 of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along with # this program; if not, write to the Free Software Foundation, Inc., 51 Franklin # Street, Fifth Floor, Boston, MA 02110-1301 USA. import sys import struct import serial import time import argparse import os import subprocess import tempfile class ESPROM: # These are the currently known commands supported by the ROM ESP_FLASH_BEGIN = 0x02 ESP_FLASH_DATA = 0x03 ESP_FLASH_END = 0x04 ESP_MEM_BEGIN = 0x05 ESP_MEM_END = 0x06 ESP_MEM_DATA = 0x07 ESP_SYNC = 0x08 ESP_WRITE_REG = 0x09 ESP_READ_REG = 0x0a # Maximum block sized for RAM and Flash writes, respectively. ESP_RAM_BLOCK = 0x1800 ESP_FLASH_BLOCK = 0x100 # Default baudrate. The ROM auto-bauds, so we can use more or less whatever we want. ESP_ROM_BAUD = 115200 # First byte of the application image ESP_IMAGE_MAGIC = 0xe9 # Initial state for the checksum routine ESP_CHECKSUM_MAGIC = 0xef # OTP ROM addresses ESP_OTP_MAC0 = 0x3ff00050 ESP_OTP_MAC1 = 0x3ff00054 # Sflash stub: an assembly routine to read from spi flash and send to host SFLASH_STUB = "\x80\x3c\x00\x40\x1c\x4b\x00\x40\x21\x11\x00\x40\x00\x80" \ "\xfe\x3f\xc1\xfb\xff\xd1\xf8\xff\x2d\x0d\x31\xfd\xff\x41\xf7\xff\x4a" \ "\xdd\x51\xf9\xff\xc0\x05\x00\x21\xf9\xff\x31\xf3\xff\x41\xf5\xff\xc0" \ "\x04\x00\x0b\xcc\x56\xec\xfd\x06\xff\xff\x00\x00" def __init__(self, port=0, baud=ESP_ROM_BAUD): self._port = serial.Serial(port) # setting baud rate in a separate step is a workaround for # CH341 driver on some Linux versions (this opens at 9600 then # sets), shouldn't matter for other platforms/drivers. See # https://github.com/themadinventor/esptool/issues/44#issuecomment-107094446 self._port.baudrate = baud """ Read bytes from the serial port while performing SLIP unescaping """ def read(self, length=1): b = '' while len(b) < length: c = self._port.read(1) if c == '\xdb': c = self._port.read(1) if c == '\xdc': b = b + '\xc0' elif c == '\xdd': b = b + '\xdb' else: raise FatalError('Invalid SLIP escape') else: b = b + c return b """ Write bytes to the serial port while performing SLIP escaping """ def write(self, packet): buf = '\xc0' \ + (packet.replace('\xdb','\xdb\xdd').replace('\xc0','\xdb\xdc')) \ + '\xc0' self._port.write(buf) """ Calculate checksum of a blob, as it is defined by the ROM """ @staticmethod def checksum(data, state=ESP_CHECKSUM_MAGIC): for b in data: state ^= ord(b) return state """ Send a request and read the response """ def command(self, op=None, data=None, chk=0): if op: pkt = struct.pack('<BBHI', 0x00, op, len(data), chk) + data self.write(pkt) # tries to get a response until that response has the # same operation as the request or a retries limit has # exceeded. This is needed for some esp8266s that # reply with more sync responses than expected. retries = 100 while retries > 0: (op_ret, val, body) = self.receive_response() if op is None or op_ret == op: return val, body # valid response received retries = retries - 1 raise FatalError("Response doesn't match request") """ Receive a response to a command """ def receive_response(self): # Read header of response and parse if self._port.read(1) != '\xc0': raise FatalError('Invalid head of packet') hdr = self.read(8) (resp, op_ret, len_ret, val) = struct.unpack('<BBHI', hdr) if resp != 0x01: raise FatalError('Invalid response 0x%02x" to command' % resp) # The variable-length body body = self.read(len_ret) # Terminating byte if self._port.read(1) != chr(0xc0): raise FatalError('Invalid end of packet') return op_ret, val, body """ Perform a connection test """ def sync(self): self.command(ESPROM.ESP_SYNC, '\x07\x07\x12\x20' + 32 * '\x55') for i in xrange(7): self.command() """ Try connecting repeatedly until successful, or giving up """ def connect(self): print 'Connecting...' for _ in xrange(4): # worst-case latency timer should be 255ms (probably <20ms) self._port.timeout = 0.3 for _ in xrange(4): try: self._port.flushInput() self._port.flushOutput() self.sync() self._port.timeout = 5 return except: time.sleep(0.05) raise FatalError('Failed to connect to ESP8266') """ Read memory address in target """ def read_reg(self, addr): res = self.command(ESPROM.ESP_READ_REG, struct.pack('<I', addr)) if res[1] != "\0\0": raise FatalError('Failed to read target memory') return res[0] """ Write to memory address in target """ def write_reg(self, addr, value, mask, delay_us=0): if self.command(ESPROM.ESP_WRITE_REG, struct.pack('<IIII', addr, value, mask, delay_us))[1] != "\0\0": raise FatalError('Failed to write target memory') """ Start downloading an application image to RAM """ def mem_begin(self, size, blocks, blocksize, offset): if self.command(ESPROM.ESP_MEM_BEGIN, struct.pack('<IIII', size, blocks, blocksize, offset))[1] != "\0\0": raise FatalError('Failed to enter RAM download mode') """ Send a block of an image to RAM """ def mem_block(self, data, seq): if self.command(ESPROM.ESP_MEM_DATA, struct.pack('<IIII', len(data), seq, 0, 0) + data, ESPROM.checksum(data))[1] != "\0\0": raise FatalError('Failed to write to target RAM') """ Leave download mode and run the application """ def mem_finish(self, entrypoint=0): if self.command(ESPROM.ESP_MEM_END, struct.pack('<II', int(entrypoint == 0), entrypoint))[1] != "\0\0": raise FatalError('Failed to leave RAM download mode') """ Start downloading to Flash (performs an erase) """ def flash_begin(self, size, offset): old_tmo = self._port.timeout num_blocks = (size + ESPROM.ESP_FLASH_BLOCK - 1) / ESPROM.ESP_FLASH_BLOCK sectors_per_block = 16 sector_size = 4096 num_sectors = (size + sector_size - 1) / sector_size start_sector = offset / sector_size head_sectors = sectors_per_block - (start_sector % sectors_per_block) if num_sectors < head_sectors: head_sectors = num_sectors if num_sectors < 2 * head_sectors: erase_size = (num_sectors + 1) / 2 * sector_size else: erase_size = (num_sectors - head_sectors) * sector_size self._port.timeout = 10 result = self.command(ESPROM.ESP_FLASH_BEGIN, struct.pack('<IIII', erase_size, num_blocks, ESPROM.ESP_FLASH_BLOCK, offset))[1] if result != "\0\0": raise FatalError.WithResult('Failed to enter Flash download mode (result "%s")', result) self._port.timeout = old_tmo """ Write block to flash """ def flash_block(self, data, seq): result = self.command(ESPROM.ESP_FLASH_DATA, struct.pack('<IIII', len(data), seq, 0, 0) + data, ESPROM.checksum(data))[1] if result != "\0\0": raise FatalError.WithResult('Failed to write to target Flash after seq %d (got result %%s)' % seq, result) """ Leave flash mode and run/reboot """ def flash_finish(self, reboot=False): pkt = struct.pack('<I', int(not reboot)) if self.command(ESPROM.ESP_FLASH_END, pkt)[1] != "\0\0": raise FatalError('Failed to leave Flash mode') """ Run application code in flash """ def run(self, reboot=False): # Fake flash begin immediately followed by flash end self.flash_begin(0, 0) self.flash_finish(reboot) """ Read MAC from OTP ROM """ def read_mac(self): mac0 = self.read_reg(self.ESP_OTP_MAC0) mac1 = self.read_reg(self.ESP_OTP_MAC1) if ((mac1 >> 16) & 0xff) == 0: oui = (0x18, 0xfe, 0x34) elif ((mac1 >> 16) & 0xff) == 1: oui = (0xac, 0xd0, 0x74) else: raise FatalError("Unknown OUI") return oui + ((mac1 >> 8) & 0xff, mac1 & 0xff, (mac0 >> 24) & 0xff) """ Read SPI flash manufacturer and device id """ def flash_id(self): self.flash_begin(0, 0) self.write_reg(0x60000240, 0x0, 0xffffffff) self.write_reg(0x60000200, 0x10000000, 0xffffffff) flash_id = self.read_reg(0x60000240) self.flash_finish(False) return flash_id """ Read SPI flash """ def flash_read(self, offset, size, count=1): # Create a custom stub stub = struct.pack('<III', offset, size, count) + self.SFLASH_STUB # Trick ROM to initialize SFlash self.flash_begin(0, 0) # Download stub self.mem_begin(len(stub), 1, len(stub), 0x40100000) self.mem_block(stub, 0) self.mem_finish(0x4010001c) # Fetch the data data = '' for _ in xrange(count): if self._port.read(1) != '\xc0': raise FatalError('Invalid head of packet (sflash read)') data += self.read(size) if self._port.read(1) != chr(0xc0): raise FatalError('Invalid end of packet (sflash read)') return data """ Abuse the loader protocol to force flash to be left in write mode """ def flash_unlock_dio(self): # Enable flash write mode self.flash_begin(0, 0) # Reset the chip rather than call flash_finish(), which would have # write protected the chip again (why oh why does it do that?!) self.mem_begin(0,0,0,0x40100000) self.mem_finish(0x40000080) """ Perform a chip erase of SPI flash """ def flash_erase(self): # Trick ROM to initialize SFlash self.flash_begin(0, 0) # This is hacky: we don't have a custom stub, instead we trick # the bootloader to jump to the SPIEraseChip() routine and then halt/crash # when it tries to boot an unconfigured system. self.mem_begin(0,0,0,0x40100000) self.mem_finish(0x40004984) # Yup - there's no good way to detect if we succeeded. # It it on the other hand unlikely to fail. class ESPFirmwareImage: def __init__(self, filename=None): self.segments = [] self.entrypoint = 0 self.flash_mode = 0 self.flash_size_freq = 0 if filename is not None: f = file(filename, 'rb') (magic, segments, self.flash_mode, self.flash_size_freq, self.entrypoint) = struct.unpack('<BBBBI', f.read(8)) # some sanity check if magic != ESPROM.ESP_IMAGE_MAGIC or segments > 16: raise FatalError('Invalid firmware image') for i in xrange(segments): (offset, size) = struct.unpack('<II', f.read(8)) if offset > 0x40200000 or offset < 0x3ffe0000 or size > 65536: raise FatalError('Suspicious segment 0x%x, length %d' % (offset, size)) segment_data = f.read(size) if len(segment_data) < size: raise FatalError('End of file reading segment 0x%x, length %d (actual length %d)' % (offset, size, len(segment_data))) self.segments.append((offset, size, segment_data)) # Skip the padding. The checksum is stored in the last byte so that the # file is a multiple of 16 bytes. align = 15 - (f.tell() % 16) f.seek(align, 1) self.checksum = ord(f.read(1)) def add_segment(self, addr, data): # Data should be aligned on word boundary l = len(data) if l % 4: data += b"\x00" * (4 - l % 4) if l > 0: self.segments.append((addr, len(data), data)) def save(self, filename): f = file(filename, 'wb') f.write(struct.pack('<BBBBI', ESPROM.ESP_IMAGE_MAGIC, len(self.segments), self.flash_mode, self.flash_size_freq, self.entrypoint)) checksum = ESPROM.ESP_CHECKSUM_MAGIC for (offset, size, data) in self.segments: f.write(struct.pack('<II', offset, size)) f.write(data) checksum = ESPROM.checksum(data, checksum) align = 15 - (f.tell() % 16) f.seek(align, 1) f.write(struct.pack('B', checksum)) class ELFFile: def __init__(self, name): self.name = name self.symbols = None def _fetch_symbols(self): if self.symbols is not None: return self.symbols = {} try: tool_nm = "xtensa-lx106-elf-nm" if os.getenv('XTENSA_CORE') == 'lx106': tool_nm = "xt-nm" proc = subprocess.Popen([tool_nm, self.name], stdout=subprocess.PIPE) except OSError: print "Error calling %s, do you have Xtensa toolchain in PATH?" % tool_nm sys.exit(1) for l in proc.stdout: fields = l.strip().split() try: if fields[0] == "U": print "Warning: ELF binary has undefined symbol %s" % fields[1] continue self.symbols[fields[2]] = int(fields[0], 16) except ValueError: raise FatalError("Failed to strip symbol output from nm: %s" % fields) def get_symbol_addr(self, sym): self._fetch_symbols() return self.symbols[sym] def get_entry_point(self): tool_readelf = "xtensa-lx106-elf-readelf" if os.getenv('XTENSA_CORE') == 'lx106': tool_readelf = "xt-readelf" try: proc = subprocess.Popen([tool_readelf, "-h", self.name], stdout=subprocess.PIPE) except OSError: print "Error calling %s, do you have Xtensa toolchain in PATH?" % tool_readelf sys.exit(1) for l in proc.stdout: fields = l.strip().split() if fields[0] == "Entry": return int(fields[3], 0) def load_section(self, section): tool_objcopy = "xtensa-lx106-elf-objcopy" if os.getenv('XTENSA_CORE') == 'lx106': tool_objcopy = "xt-objcopy" tmpsection = tempfile.mktemp(suffix=".section") try: subprocess.check_call([tool_objcopy, "--only-section", section, "-Obinary", self.name, tmpsection]) with open(tmpsection, "rb") as f: data = f.read() finally: os.remove(tmpsection) return data def arg_auto_int(x): return int(x, 0) def div_roundup(a, b): """ Return a/b rounded up to nearest integer, equivalent result to int(math.ceil(float(int(a)) / float(int(b))), only without possible floating point accuracy errors. """ return (int(a) + int(b) - 1) / int(b) class FatalError(RuntimeError): """ Wrapper class for runtime errors that aren't caused by internal bugs, but by ESP8266 responses or input content. """ def __init__(self, message): RuntimeError.__init__(self, message) @staticmethod def WithResult(message, result): """ Return a fatal error object that includes the hex values of 'result' as a string formatted argument. """ return FatalError(message % ", ".join(hex(ord(x)) for x in result)) def main(): parser = argparse.ArgumentParser(description='ESP8266 ROM Bootloader Utility', prog='esptool') parser.add_argument( '--port', '-p', help='Serial port device', default='/dev/ttyUSB0') parser.add_argument( '--baud', '-b', help='Serial port baud rate', type=arg_auto_int, default=ESPROM.ESP_ROM_BAUD) subparsers = parser.add_subparsers( dest='operation', help='Run esptool {command} -h for additional help') parser_load_ram = subparsers.add_parser( 'load_ram', help='Download an image to RAM and execute') parser_load_ram.add_argument('filename', help='Firmware image') parser_dump_mem = subparsers.add_parser( 'dump_mem', help='Dump arbitrary memory to disk') parser_dump_mem.add_argument('address', help='Base address', type=arg_auto_int) parser_dump_mem.add_argument('size', help='Size of region to dump', type=arg_auto_int) parser_dump_mem.add_argument('filename', help='Name of binary dump') parser_read_mem = subparsers.add_parser( 'read_mem', help='Read arbitrary memory location') parser_read_mem.add_argument('address', help='Address to read', type=arg_auto_int) parser_write_mem = subparsers.add_parser( 'write_mem', help='Read-modify-write to arbitrary memory location') parser_write_mem.add_argument('address', help='Address to write', type=arg_auto_int) parser_write_mem.add_argument('value', help='Value', type=arg_auto_int) parser_write_mem.add_argument('mask', help='Mask of bits to write', type=arg_auto_int) parser_write_flash = subparsers.add_parser( 'write_flash', help='Write a binary blob to flash') parser_write_flash.add_argument('addr_filename', nargs='+', help='Address and binary file to write there, separated by space') parser_write_flash.add_argument('--flash_freq', '-ff', help='SPI Flash frequency', choices=['40m', '26m', '20m', '80m'], default='40m') parser_write_flash.add_argument('--flash_mode', '-fm', help='SPI Flash mode', choices=['qio', 'qout', 'dio', 'dout'], default='qio') parser_write_flash.add_argument('--flash_size', '-fs', help='SPI Flash size in Mbit', choices=['4m', '2m', '8m', '16m', '32m', '16m-c1', '32m-c1', '32m-c2'], default='4m') subparsers.add_parser( 'run', help='Run application code in flash') parser_image_info = subparsers.add_parser( 'image_info', help='Dump headers from an application image') parser_image_info.add_argument('filename', help='Image file to parse') parser_make_image = subparsers.add_parser( 'make_image', help='Create an application image from binary files') parser_make_image.add_argument('output', help='Output image file') parser_make_image.add_argument('--segfile', '-f', action='append', help='Segment input file') parser_make_image.add_argument('--segaddr', '-a', action='append', help='Segment base address', type=arg_auto_int) parser_make_image.add_argument('--entrypoint', '-e', help='Address of entry point', type=arg_auto_int, default=0) parser_elf2image = subparsers.add_parser( 'elf2image', help='Create an application image from ELF file') parser_elf2image.add_argument('input', help='Input ELF file') parser_elf2image.add_argument('--output', '-o', help='Output filename prefix', type=str) parser_elf2image.add_argument('--flash_freq', '-ff', help='SPI Flash frequency', choices=['40m', '26m', '20m', '80m'], default='40m') parser_elf2image.add_argument('--flash_mode', '-fm', help='SPI Flash mode', choices=['qio', 'qout', 'dio', 'dout'], default='qio') parser_elf2image.add_argument('--flash_size', '-fs', help='SPI Flash size in Mbit', choices=['4m', '2m', '8m', '16m', '32m', '16m-c1', '32m-c1', '32m-c2'], default='4m') subparsers.add_parser( 'read_mac', help='Read MAC address from OTP ROM') subparsers.add_parser( 'flash_id', help='Read SPI flash manufacturer and device ID') parser_read_flash = subparsers.add_parser( 'read_flash', help='Read SPI flash content') parser_read_flash.add_argument('address', help='Start address', type=arg_auto_int) parser_read_flash.add_argument('size', help='Size of region to dump', type=arg_auto_int) parser_read_flash.add_argument('filename', help='Name of binary dump') subparsers.add_parser( 'erase_flash', help='Perform Chip Erase on SPI flash') args = parser.parse_args() # Create the ESPROM connection object, if needed esp = None if args.operation not in ('image_info','make_image','elf2image'): esp = ESPROM(args.port, args.baud) esp.connect() # Do the actual work. Should probably be split into separate functions. if args.operation == 'load_ram': image = ESPFirmwareImage(args.filename) print 'RAM boot...' for (offset, size, data) in image.segments: print 'Downloading %d bytes at %08x...' % (size, offset), sys.stdout.flush() esp.mem_begin(size, div_roundup(size, esp.ESP_RAM_BLOCK), esp.ESP_RAM_BLOCK, offset) seq = 0 while len(data) > 0: esp.mem_block(data[0:esp.ESP_RAM_BLOCK], seq) data = data[esp.ESP_RAM_BLOCK:] seq += 1 print 'done!' print 'All segments done, executing at %08x' % image.entrypoint esp.mem_finish(image.entrypoint) elif args.operation == 'read_mem': print '0x%08x = 0x%08x' % (args.address, esp.read_reg(args.address)) elif args.operation == 'write_mem': esp.write_reg(args.address, args.value, args.mask, 0) print 'Wrote %08x, mask %08x to %08x' % (args.value, args.mask, args.address) elif args.operation == 'dump_mem': f = file(args.filename, 'wb') for i in xrange(args.size / 4): d = esp.read_reg(args.address + (i * 4)) f.write(struct.pack('<I', d)) if f.tell() % 1024 == 0: print '\r%d bytes read... (%d %%)' % (f.tell(), f.tell() * 100 / args.size), sys.stdout.flush() print 'Done!' elif args.operation == 'write_flash': assert len(args.addr_filename) % 2 == 0 flash_mode = {'qio':0, 'qout':1, 'dio':2, 'dout': 3}[args.flash_mode] flash_size_freq = {'4m':0x00, '2m':0x10, '8m':0x20, '16m':0x30, '32m':0x40, '16m-c1': 0x50, '32m-c1':0x60, '32m-c2':0x70}[args.flash_size] flash_size_freq += {'40m':0, '26m':1, '20m':2, '80m': 0xf}[args.flash_freq] flash_info = struct.pack('BB', flash_mode, flash_size_freq) while args.addr_filename: address = int(args.addr_filename[0], 0) filename = args.addr_filename[1] args.addr_filename = args.addr_filename[2:] image = file(filename, 'rb').read() print 'Erasing flash...' blocks = div_roundup(len(image), esp.ESP_FLASH_BLOCK) esp.flash_begin(blocks * esp.ESP_FLASH_BLOCK, address) seq = 0 written = 0 t = time.time() while len(image) > 0: print '\rWriting at 0x%08x... (%d %%)' % (address + seq * esp.ESP_FLASH_BLOCK, 100 * (seq + 1) / blocks), sys.stdout.flush() block = image[0:esp.ESP_FLASH_BLOCK] # Fix sflash config data if address == 0 and seq == 0 and block[0] == '\xe9': block = block[0:2] + flash_info + block[4:] # Pad the last block block = block + '\xff' * (esp.ESP_FLASH_BLOCK - len(block)) esp.flash_block(block, seq) image = image[esp.ESP_FLASH_BLOCK:] seq += 1 written += len(block) t = time.time() - t print '\rWrote %d bytes at 0x%08x in %.1f seconds (%.1f kbit/s)...' % (written, address, t, written / t * 8 / 1000) print '\nLeaving...' if args.flash_mode == 'dio': esp.flash_unlock_dio() else: esp.flash_begin(0, 0) esp.flash_finish(False) elif args.operation == 'run': esp.run() elif args.operation == 'image_info': image = ESPFirmwareImage(args.filename) print ('Entry point: %08x' % image.entrypoint) if image.entrypoint != 0 else 'Entry point not set' print '%d segments' % len(image.segments) print checksum = ESPROM.ESP_CHECKSUM_MAGIC for (idx, (offset, size, data)) in enumerate(image.segments): print 'Segment %d: %5d bytes at %08x' % (idx + 1, size, offset) checksum = ESPROM.checksum(data, checksum) print print 'Checksum: %02x (%s)' % (image.checksum, 'valid' if image.checksum == checksum else 'invalid!') elif args.operation == 'make_image': image = ESPFirmwareImage() if len(args.segfile) == 0: raise FatalError('No segments specified') if len(args.segfile) != len(args.segaddr): raise FatalError('Number of specified files does not match number of specified addresses') for (seg, addr) in zip(args.segfile, args.segaddr): data = file(seg, 'rb').read() image.add_segment(addr, data) image.entrypoint = args.entrypoint image.save(args.output) elif args.operation == 'elf2image': if args.output is None: args.output = args.input + '-' e = ELFFile(args.input) image = ESPFirmwareImage() image.entrypoint = e.get_entry_point() for section, start in ((".text", "_text_start"), (".data", "_data_start"), (".rodata", "_rodata_start")): data = e.load_section(section) image.add_segment(e.get_symbol_addr(start), data) image.flash_mode = {'qio':0, 'qout':1, 'dio':2, 'dout': 3}[args.flash_mode] image.flash_size_freq = {'4m':0x00, '2m':0x10, '8m':0x20, '16m':0x30, '32m':0x40, '16m-c1': 0x50, '32m-c1':0x60, '32m-c2':0x70}[args.flash_size] image.flash_size_freq += {'40m':0, '26m':1, '20m':2, '80m': 0xf}[args.flash_freq] image.save(args.output + "0x00000.bin") data = e.load_section(".irom0.text") off = e.get_symbol_addr("_irom0_text_start") - 0x40200000 assert off >= 0 f = open(args.output + "0x%05x.bin" % off, "wb") f.write(data) f.close() elif args.operation == 'read_mac': mac = esp.read_mac() print 'MAC: %s' % ':'.join(map(lambda x: '%02x' % x, mac)) elif args.operation == 'flash_id': flash_id = esp.flash_id() print 'Manufacturer: %02x' % (flash_id & 0xff) print 'Device: %02x%02x' % ((flash_id >> 8) & 0xff, (flash_id >> 16) & 0xff) elif args.operation == 'read_flash': print 'Please wait...' file(args.filename, 'wb').write(esp.flash_read(args.address, 1024, div_roundup(args.size, 1024))[:args.size]) elif args.operation == 'erase_flash': esp.flash_erase() if __name__ == '__main__': try: main() except FatalError as e: print '\nA fatal error occurred: %s' % e sys.exit(2)
gpl-2.0
silly-wacky-3-town-toon/SOURCE-COD
toontown/coghq/SellbotMegaFactoryLavaRoomFoyer_Action01.py
7
8680
from toontown.coghq.SpecImports import * GlobalEntities = {1000: {'type': 'levelMgr', 'name': 'LevelMgr', 'comment': '', 'parentEntId': 0, 'cogLevel': 0, 'farPlaneDistance': 1500, 'modelFilename': 'phase_10/models/cashbotHQ/ZONE18a', 'wantDoors': 1}, 1001: {'type': 'editMgr', 'name': 'EditMgr', 'parentEntId': 0, 'insertEntity': None, 'removeEntity': None, 'requestNewEntity': None, 'requestSave': None}, 0: {'type': 'zone', 'name': 'UberZone', 'comment': '', 'parentEntId': 0, 'scale': 1, 'description': '', 'visibility': []}, 10000: {'type': 'attribModifier', 'name': '<unnamed>', 'comment': '', 'parentEntId': 10004, 'attribName': 'modelPath', 'recursive': 1, 'typeName': 'model', 'value': ''}, 10001: {'type': 'attribModifier', 'name': '<unnamed>', 'comment': '', 'parentEntId': 10004, 'attribName': 'scale', 'recursive': 1, 'typeName': 'model', 'value': 'Vec3(.955,1,1)'}, 10019: {'type': 'attribModifier', 'name': '<unnamed>', 'comment': '', 'parentEntId': 10015, 'attribName': 'modelPath', 'recursive': 1, 'typeName': 'model', 'value': ''}, 10006: {'type': 'gear', 'name': '<unnamed>', 'comment': '', 'parentEntId': 10003, 'pos': Point3(0.0, 0.0, 0.0), 'hpr': Vec3(0.0, 0.0, 0.0), 'scale': Vec3(1.0, 1.0, 1.0), 'degreesPerSec': -4.0, 'gearScale': 14.193780914463838, 'modelType': 'mint', 'orientation': 'horizontal', 'phaseShift': 0}, 10007: {'type': 'gear', 'name': 'copy of <unnamed>', 'comment': '', 'parentEntId': 10003, 'pos': Point3(0.0, 0.0, 4.28999996185), 'hpr': Vec3(0.0, 0.0, 0.0), 'scale': Vec3(1.0, 1.0, 1.0), 'degreesPerSec': 4.0, 'gearScale': 14.193780914463838, 'modelType': 'mint', 'orientation': 'horizontal', 'phaseShift': 0}, 10009: {'type': 'gear', 'name': 'copy of <unnamed> (2)', 'comment': '', 'parentEntId': 10003, 'pos': Point3(0.0, 0.0, 8.57999992371), 'hpr': Vec3(0.0, 0.0, 0.0), 'scale': Vec3(1.0, 1.0, 1.0), 'degreesPerSec': -4.0, 'gearScale': 14.193780914463838, 'modelType': 'mint', 'orientation': 'horizontal', 'phaseShift': 0.055}, 10014: {'type': 'gear', 'name': 'copy of <unnamed> (3)', 'comment': '', 'parentEntId': 10003, 'pos': Point3(0.0, 0.0, 12.8699998856), 'hpr': Vec3(0.0, 0.0, 0.0), 'scale': Vec3(1.0, 1.0, 1.0), 'degreesPerSec': 4.0, 'gearScale': 14.193780914463838, 'modelType': 'mint', 'orientation': 'horizontal', 'phaseShift': 0.06}, 10018: {'type': 'healBarrel', 'name': '<unnamed>', 'comment': '', 'parentEntId': 10017, 'pos': Point3(-2.03643107414, 2.34967470169, 5.46433734894), 'hpr': Vec3(34.1522636414, 0.0, 0.0), 'scale': Vec3(1.0, 1.0, 1.0), 'rewardPerGrab': 5, 'rewardPerGrabMax': 0}, 10002: {'type': 'model', 'name': '<unnamed>', 'comment': '', 'parentEntId': 10003, 'pos': Point3(0.0, 0.0, 0.0), 'hpr': Vec3(0.0, 0.0, 0.0), 'scale': Vec3(6.5, 6.5, 6.5), 'collisionsOnly': 0, 'flattenType': 'light', 'loadType': 'loadModelCopy', 'modelPath': 'phase_10/models/cogHQ/RoundShadow.bam'}, 10005: {'type': 'model', 'name': 'doorwayCrate', 'comment': '', 'parentEntId': 0, 'pos': Point3(27.0090961456, 0.850000023842, 0.0), 'hpr': Vec3(0.0, 0.0, 0.0), 'scale': Vec3(1.0, 1.0, 1.0), 'collisionsOnly': 0, 'flattenType': 'light', 'loadType': 'loadModelCopy', 'modelPath': 'phase_10/models/cogHQ/CBMetalCrate2.bam'}, 10008: {'type': 'model', 'name': 'shaft', 'comment': '', 'parentEntId': 10003, 'pos': Point3(0.0, 0.0, 7.25891637802), 'hpr': Vec3(0.0, 0.0, 180.0), 'scale': Vec3(5.35842609406, 5.35842609406, 5.35842609406), 'collisionsOnly': 0, 'flattenType': 'light', 'loadType': 'loadModel', 'modelPath': 'phase_10/models/cashbotHQ/MintGearPost.bam'}, 10010: {'type': 'model', 'name': 'middle', 'comment': '', 'parentEntId': 10004, 'pos': Point3(0.0, 0.0, 0.0), 'hpr': Vec3(0.0, 0.0, 0.0), 'scale': Vec3(0.954999983311, 1.0, 1.0), 'collisionsOnly': 0, 'flattenType': 'light', 'loadType': 'loadModelCopy', 'modelPath': 'phase_10/models/cogHQ/CBMetalCrate2.bam'}, 10011: {'type': 'model', 'name': 'copy of middle', 'comment': '', 'parentEntId': 10004, 'pos': Point3(-5.72357320786, 0.0, 0.0), 'hpr': Vec3(0.0, 0.0, 0.0), 'scale': Vec3(0.954999983311, 1.0, 1.0), 'collisionsOnly': 0, 'flattenType': 'light', 'loadType': 'loadModelCopy', 'modelPath': 'phase_10/models/cogHQ/CBMetalCrate2.bam'}, 10012: {'type': 'model', 'name': 'copy of middle', 'comment': '', 'parentEntId': 10004, 'pos': Point3(5.71999979019, 0.0, 0.0), 'hpr': Vec3(0.0, 0.0, 0.0), 'scale': Vec3(0.954999983311, 1.0, 1.0), 'collisionsOnly': 0, 'flattenType': 'light', 'loadType': 'loadModelCopy', 'modelPath': 'phase_10/models/cogHQ/CBMetalCrate2.bam'}, 10013: {'type': 'model', 'name': 'copy of middle', 'comment': '', 'parentEntId': 10004, 'pos': Point3(11.4399995804, 0.0, 0.0), 'hpr': Vec3(0.0, 0.0, 0.0), 'scale': Vec3(0.954999983311, 1.0, 1.0), 'collisionsOnly': 0, 'flattenType': 'light', 'loadType': 'loadModelCopy', 'modelPath': 'phase_10/models/cogHQ/CBMetalCrate2.bam'}, 10015: {'type': 'model', 'name': 'crateStack', 'comment': '', 'parentEntId': 0, 'pos': Point3(-18.0376968384, 20.2023410797, 0.0), 'hpr': Vec3(0.0, 0.0, 0.0), 'scale': Vec3(1.0, 1.0, 1.0), 'collisionsOnly': 0, 'flattenType': 'light', 'loadType': 'loadModelCopy', 'modelPath': 'phase_10/models/cogHQ/CBMetalCrate2.bam'}, 10016: {'type': 'model', 'name': 'upper', 'comment': '', 'parentEntId': 10015, 'pos': Point3(0.0, 0.0, 5.42841148376), 'hpr': Vec3(0.0, 0.0, 0.0), 'scale': Vec3(1.0, 1.0, 1.0), 'collisionsOnly': 0, 'flattenType': 'light', 'loadType': 'loadModelCopy', 'modelPath': 'phase_10/models/cogHQ/CBMetalCrate2.bam'}, 10017: {'type': 'model', 'name': 'copy of upper', 'comment': '', 'parentEntId': 10016, 'pos': Point3(0.0, 0.0, 5.43412637711), 'hpr': Vec3(0.0, 0.0, 0.0), 'scale': Vec3(1.0, 1.0, 1.0), 'collisionsOnly': 0, 'flattenType': 'light', 'loadType': 'loadModelCopy', 'modelPath': 'phase_10/models/cogHQ/CBMetalCrate2.bam'}, 10021: {'type': 'model', 'name': 'crateStack', 'comment': '', 'parentEntId': 10020, 'pos': Point3(21.064825058, 20.1899757385, 9.87216758728), 'hpr': Vec3(270.0, 0.0, 0.0), 'scale': Vec3(1.0, 1.0, 1.0), 'collisionsOnly': 0, 'flattenType': 'light', 'loadType': 'loadModelCopy', 'modelPath': 'phase_10/models/cashbotHQ/crates_C1.bam'}, 10003: {'type': 'nodepath', 'name': 'gears', 'comment': '', 'parentEntId': 0, 'pos': Point3(-3.18650078773, 0.0, 0.0), 'hpr': Vec3(0.0, 0.0, 0.0), 'scale': Vec3(1.0, 1.0, 1.0)}, 10004: {'type': 'nodepath', 'name': 'wall', 'comment': '', 'parentEntId': 0, 'pos': Point3(19.5468139648, 6.37875938416, 0.0), 'hpr': Point3(270.0, 0.0, 0.0), 'scale': Vec3(1.95812249184, 1.5, 1.79999995232)}, 10020: {'type': 'nodepath', 'name': 'props', 'comment': '', 'parentEntId': 0, 'pos': Point3(0.0, 0.0, 0.0), 'hpr': Vec3(0.0, 0.0, 0.0), 'scale': 1}} Scenario0 = {} levelSpec = {'globalEntities': GlobalEntities, 'scenarios': [Scenario0]}
apache-2.0
webmasterraj/FogOrNot
flask/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/mbcsgroupprober.py
2769
1967
######################## BEGIN LICENSE BLOCK ######################## # The Original Code is Mozilla Universal charset detector code. # # The Initial Developer of the Original Code is # Netscape Communications Corporation. # Portions created by the Initial Developer are Copyright (C) 2001 # the Initial Developer. All Rights Reserved. # # Contributor(s): # Mark Pilgrim - port to Python # Shy Shalom - original C code # Proofpoint, Inc. # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### from .charsetgroupprober import CharSetGroupProber from .utf8prober import UTF8Prober from .sjisprober import SJISProber from .eucjpprober import EUCJPProber from .gb2312prober import GB2312Prober from .euckrprober import EUCKRProber from .cp949prober import CP949Prober from .big5prober import Big5Prober from .euctwprober import EUCTWProber class MBCSGroupProber(CharSetGroupProber): def __init__(self): CharSetGroupProber.__init__(self) self._mProbers = [ UTF8Prober(), SJISProber(), EUCJPProber(), GB2312Prober(), EUCKRProber(), CP949Prober(), Big5Prober(), EUCTWProber() ] self.reset()
gpl-2.0
borosnborea/SwordGO_app
example/kivymap/.buildozer/android/app/_applibs/requests/packages/chardet/constants.py
3008
1335
######################## BEGIN LICENSE BLOCK ######################## # The Original Code is Mozilla Universal charset detector code. # # The Initial Developer of the Original Code is # Netscape Communications Corporation. # Portions created by the Initial Developer are Copyright (C) 2001 # the Initial Developer. All Rights Reserved. # # Contributor(s): # Mark Pilgrim - port to Python # Shy Shalom - original C code # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### _debug = 0 eDetecting = 0 eFoundIt = 1 eNotMe = 2 eStart = 0 eError = 1 eItsMe = 2 SHORTCUT_THRESHOLD = 0.95
gpl-3.0
phil-lopreiato/the-blue-alliance
tests/consts_tests/fcm/test_platform_priority.py
3
1613
import unittest2 from consts.fcm.platform_priority import PlatformPriority from consts.fcm.platform_type import PlatformType class TestPlatformPriority(unittest2.TestCase): def test_validate_invalid(self): with self.assertRaises(ValueError): PlatformPriority.validate(2) def test_validate(self): PlatformPriority.validate(PlatformPriority.NORMAL) PlatformPriority.validate(PlatformPriority.HIGH) def test_platform_priority_invalid_platform(self): with self.assertRaises(ValueError): PlatformPriority.platform_priority(-1, PlatformPriority.HIGH) def test_platform_priority_invalid_priority(self): with self.assertRaises(ValueError): PlatformPriority.platform_priority(PlatformType.ANDROID, -1) def test_platform_priority_android(self): self.assertEqual(PlatformPriority.platform_priority(PlatformType.ANDROID, PlatformPriority.HIGH), 'high') self.assertEqual(PlatformPriority.platform_priority(PlatformType.ANDROID, PlatformPriority.NORMAL), 'normal') def test_platform_priority_apns(self): self.assertEqual(PlatformPriority.platform_priority(PlatformType.APNS, PlatformPriority.HIGH), '10') self.assertEqual(PlatformPriority.platform_priority(PlatformType.APNS, PlatformPriority.NORMAL), '5') def test_platform_priority_webpush(self): self.assertEqual(PlatformPriority.platform_priority(PlatformType.WEBPUSH, PlatformPriority.HIGH), 'high') self.assertEqual(PlatformPriority.platform_priority(PlatformType.WEBPUSH, PlatformPriority.NORMAL), 'normal')
mit
homhei/glance
glance/glance_pull/lib/GlanceBattery.py
2
3375
#!/usr/bin/env python #encode=utf-8 #vim: tabstop=4 shiftwidth=4 softtabstop=4 #Created on 2013-8-17 #Copyright 2013 nuoqingyun xuqifeng import sys # Somes libs depends of OS is_BSD = sys.platform.find('bsd') != -1 is_Linux = sys.platform.startswith('linux') is_Mac = sys.platform.startswith('darwin') is_Windows = sys.platform.startswith('win') try: # psutil is the main library used to grab stats import psutil except ImportError: print('PsUtil module not found. Glances cannot start.') sys.exit(1) psutil_version = tuple([int(num) for num in psutil.__version__.split('.')]) # this is not a mistake: psutil 0.5.1 is detected as 0.5.0 if psutil_version < (0, 5, 0): print('PsUtil version %s detected.' % psutil.__version__) print('PsUtil 0.5.1 or higher is needed. Glances cannot start.') sys.exit(1) try: # psutil.virtual_memory() only available from psutil >= 0.6 psutil.virtual_memory() except Exception: psutil_mem_vm = False else: psutil_mem_vm = True try: # psutil.net_io_counters() only available from psutil >= 1.0.0 psutil.net_io_counters() except Exception: psutil_net_io_counters = False else: psutil_net_io_counters = True if not is_Mac: psutil_get_io_counter_tag = True else: # get_io_counters() not available on OS X psutil_get_io_counter_tag = False # batinfo library (optional; Linux-only) if is_Linux: try: import batinfo except ImportError: batinfo_lib_tag = False else: batinfo_lib_tag = True else: batinfo_lib_tag = False from oslo.config import cfg glance_opts = [ cfg.StrOpt('', default = '', help = '') ] CONF = cfg.CONF CONF.register_opts(glance_opts) # Default tag sensors_tag = False hddtemp_tag = False network_tag = True diskio_tag = True fs_tag = True process_tag = True class glanceGrabBat: """ Get batteries stats using the Batinfo librairie """ def __init__(self): """ Init batteries stats """ if batinfo_lib_tag: try: self.bat = batinfo.batteries() self.bat.stat[0]["capacity"] self.initok = True self.__update__() except: self.initok = False else: self.initok = False def __update__(self): """ Update the stats """ if self.initok: try: self.bat.update() except: self.bat_list = [] else: self.bat_list = self.bat.stat else: self.bat_list = [] def get(self): # Update the stats self.__update__() return self.bat_list def getcapacitypercent(self): if not self.initok or self.bat_list == []: return [] # Init the bsum (sum of percent) and bcpt (number of batteries) # and Loop over batteries (yes a computer could have more than 1 battery) bsum = 0 for bcpt in range(len(self.get())): bsum = bsum + int(self.bat_list[bcpt].capacity) bcpt = bcpt + 1 # Return the global percent return int(bsum / bcpt)
apache-2.0
uclouvain/osis
base/migrations/0333_auto_20180820_1343.py
2
1081
# -*- coding: utf-8 -*- # Generated by Django 1.11.2 on 2018-08-20 13:43 from __future__ import unicode_literals import django.db.models.deletion from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('base', '0332_auto_20180816_1540'), ] operations = [ migrations.CreateModel( name='Prerequisite', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('prerequisite', models.CharField(blank=True, max_length=240, null=True)), ('education_group_year', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='base.EducationGroupYear')), ('learning_unit_year', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='base.LearningUnitYear')), ], ), migrations.AlterUniqueTogether( name='prerequisite', unique_together=set([('learning_unit_year', 'education_group_year')]), ), ]
agpl-3.0
AnhellO/DAS_Sistemas
Ago-Dic-2017/Enrique Castillo/Ordinario/test/Lib/site-packages/django/core/validators.py
7
18596
import ipaddress import os import re from urllib.parse import urlsplit, urlunsplit from django.core.exceptions import ValidationError from django.utils.deconstruct import deconstructible from django.utils.functional import SimpleLazyObject from django.utils.ipv6 import is_valid_ipv6_address from django.utils.translation import gettext_lazy as _, ngettext_lazy # These values, if given to validate(), will trigger the self.required check. EMPTY_VALUES = (None, '', [], (), {}) def _lazy_re_compile(regex, flags=0): """Lazily compile a regex with flags.""" def _compile(): # Compile the regex if it was not passed pre-compiled. if isinstance(regex, str): return re.compile(regex, flags) else: assert not flags, "flags must be empty if regex is passed pre-compiled" return regex return SimpleLazyObject(_compile) @deconstructible class RegexValidator: regex = '' message = _('Enter a valid value.') code = 'invalid' inverse_match = False flags = 0 def __init__(self, regex=None, message=None, code=None, inverse_match=None, flags=None): if regex is not None: self.regex = regex if message is not None: self.message = message if code is not None: self.code = code if inverse_match is not None: self.inverse_match = inverse_match if flags is not None: self.flags = flags if self.flags and not isinstance(self.regex, str): raise TypeError("If the flags are set, regex must be a regular expression string.") self.regex = _lazy_re_compile(self.regex, self.flags) def __call__(self, value): """ Validate that the input contains (or does *not* contain, if inverse_match is True) a match for the regular expression. """ regex_matches = bool(self.regex.search(str(value))) invalid_input = regex_matches if self.inverse_match else not regex_matches if invalid_input: raise ValidationError(self.message, code=self.code) def __eq__(self, other): return ( isinstance(other, RegexValidator) and self.regex.pattern == other.regex.pattern and self.regex.flags == other.regex.flags and (self.message == other.message) and (self.code == other.code) and (self.inverse_match == other.inverse_match) ) @deconstructible class URLValidator(RegexValidator): ul = '\u00a1-\uffff' # unicode letters range (must not be a raw string) # IP patterns ipv4_re = r'(?:25[0-5]|2[0-4]\d|[0-1]?\d?\d)(?:\.(?:25[0-5]|2[0-4]\d|[0-1]?\d?\d)){3}' ipv6_re = r'\[[0-9a-f:\.]+\]' # (simple regex, validated later) # Host patterns hostname_re = r'[a-z' + ul + r'0-9](?:[a-z' + ul + r'0-9-]{0,61}[a-z' + ul + r'0-9])?' # Max length for domain name labels is 63 characters per RFC 1034 sec. 3.1 domain_re = r'(?:\.(?!-)[a-z' + ul + r'0-9-]{1,63}(?<!-))*' tld_re = ( r'\.' # dot r'(?!-)' # can't start with a dash r'(?:[a-z' + ul + '-]{2,63}' # domain label r'|xn--[a-z0-9]{1,59})' # or punycode label r'(?<!-)' # can't end with a dash r'\.?' # may have a trailing dot ) host_re = '(' + hostname_re + domain_re + tld_re + '|localhost)' regex = _lazy_re_compile( r'^(?:[a-z0-9\.\-\+]*)://' # scheme is validated separately r'(?:\S+(?::\S*)?@)?' # user:pass authentication r'(?:' + ipv4_re + '|' + ipv6_re + '|' + host_re + ')' r'(?::\d{2,5})?' # port r'(?:[/?#][^\s]*)?' # resource path r'\Z', re.IGNORECASE) message = _('Enter a valid URL.') schemes = ['http', 'https', 'ftp', 'ftps'] def __init__(self, schemes=None, **kwargs): super().__init__(**kwargs) if schemes is not None: self.schemes = schemes def __call__(self, value): # Check first if the scheme is valid scheme = value.split('://')[0].lower() if scheme not in self.schemes: raise ValidationError(self.message, code=self.code) # Then check full URL try: super().__call__(value) except ValidationError as e: # Trivial case failed. Try for possible IDN domain if value: try: scheme, netloc, path, query, fragment = urlsplit(value) except ValueError: # for example, "Invalid IPv6 URL" raise ValidationError(self.message, code=self.code) try: netloc = netloc.encode('idna').decode('ascii') # IDN -> ACE except UnicodeError: # invalid domain part raise e url = urlunsplit((scheme, netloc, path, query, fragment)) super().__call__(url) else: raise else: # Now verify IPv6 in the netloc part host_match = re.search(r'^\[(.+)\](?::\d{2,5})?$', urlsplit(value).netloc) if host_match: potential_ip = host_match.groups()[0] try: validate_ipv6_address(potential_ip) except ValidationError: raise ValidationError(self.message, code=self.code) # The maximum length of a full host name is 253 characters per RFC 1034 # section 3.1. It's defined to be 255 bytes or less, but this includes # one byte for the length of the name and one byte for the trailing dot # that's used to indicate absolute names in DNS. if len(urlsplit(value).netloc) > 253: raise ValidationError(self.message, code=self.code) integer_validator = RegexValidator( _lazy_re_compile(r'^-?\d+\Z'), message=_('Enter a valid integer.'), code='invalid', ) def validate_integer(value): return integer_validator(value) @deconstructible class EmailValidator: message = _('Enter a valid email address.') code = 'invalid' user_regex = _lazy_re_compile( r"(^[-!#$%&'*+/=?^_`{}|~0-9A-Z]+(\.[-!#$%&'*+/=?^_`{}|~0-9A-Z]+)*\Z" # dot-atom r'|^"([\001-\010\013\014\016-\037!#-\[\]-\177]|\\[\001-\011\013\014\016-\177])*"\Z)', # quoted-string re.IGNORECASE) domain_regex = _lazy_re_compile( # max length for domain name labels is 63 characters per RFC 1034 r'((?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+)(?:[A-Z0-9-]{2,63}(?<!-))\Z', re.IGNORECASE) literal_regex = _lazy_re_compile( # literal form, ipv4 or ipv6 address (SMTP 4.1.3) r'\[([A-f0-9:\.]+)\]\Z', re.IGNORECASE) domain_whitelist = ['localhost'] def __init__(self, message=None, code=None, whitelist=None): if message is not None: self.message = message if code is not None: self.code = code if whitelist is not None: self.domain_whitelist = whitelist def __call__(self, value): if not value or '@' not in value: raise ValidationError(self.message, code=self.code) user_part, domain_part = value.rsplit('@', 1) if not self.user_regex.match(user_part): raise ValidationError(self.message, code=self.code) if (domain_part not in self.domain_whitelist and not self.validate_domain_part(domain_part)): # Try for possible IDN domain-part try: domain_part = domain_part.encode('idna').decode('ascii') except UnicodeError: pass else: if self.validate_domain_part(domain_part): return raise ValidationError(self.message, code=self.code) def validate_domain_part(self, domain_part): if self.domain_regex.match(domain_part): return True literal_match = self.literal_regex.match(domain_part) if literal_match: ip_address = literal_match.group(1) try: validate_ipv46_address(ip_address) return True except ValidationError: pass return False def __eq__(self, other): return ( isinstance(other, EmailValidator) and (self.domain_whitelist == other.domain_whitelist) and (self.message == other.message) and (self.code == other.code) ) validate_email = EmailValidator() slug_re = _lazy_re_compile(r'^[-a-zA-Z0-9_]+\Z') validate_slug = RegexValidator( slug_re, # Translators: "letters" means latin letters: a-z and A-Z. _("Enter a valid 'slug' consisting of letters, numbers, underscores or hyphens."), 'invalid' ) slug_unicode_re = _lazy_re_compile(r'^[-\w]+\Z') validate_unicode_slug = RegexValidator( slug_unicode_re, _("Enter a valid 'slug' consisting of Unicode letters, numbers, underscores, or hyphens."), 'invalid' ) def validate_ipv4_address(value): try: ipaddress.IPv4Address(value) except ValueError: raise ValidationError(_('Enter a valid IPv4 address.'), code='invalid') def validate_ipv6_address(value): if not is_valid_ipv6_address(value): raise ValidationError(_('Enter a valid IPv6 address.'), code='invalid') def validate_ipv46_address(value): try: validate_ipv4_address(value) except ValidationError: try: validate_ipv6_address(value) except ValidationError: raise ValidationError(_('Enter a valid IPv4 or IPv6 address.'), code='invalid') ip_address_validator_map = { 'both': ([validate_ipv46_address], _('Enter a valid IPv4 or IPv6 address.')), 'ipv4': ([validate_ipv4_address], _('Enter a valid IPv4 address.')), 'ipv6': ([validate_ipv6_address], _('Enter a valid IPv6 address.')), } def ip_address_validators(protocol, unpack_ipv4): """ Depending on the given parameters, return the appropriate validators for the GenericIPAddressField. """ if protocol != 'both' and unpack_ipv4: raise ValueError( "You can only use `unpack_ipv4` if `protocol` is set to 'both'") try: return ip_address_validator_map[protocol.lower()] except KeyError: raise ValueError("The protocol '%s' is unknown. Supported: %s" % (protocol, list(ip_address_validator_map))) def int_list_validator(sep=',', message=None, code='invalid', allow_negative=False): regexp = _lazy_re_compile(r'^%(neg)s\d+(?:%(sep)s%(neg)s\d+)*\Z' % { 'neg': '(-)?' if allow_negative else '', 'sep': re.escape(sep), }) return RegexValidator(regexp, message=message, code=code) validate_comma_separated_integer_list = int_list_validator( message=_('Enter only digits separated by commas.'), ) @deconstructible class BaseValidator: message = _('Ensure this value is %(limit_value)s (it is %(show_value)s).') code = 'limit_value' def __init__(self, limit_value, message=None): self.limit_value = limit_value if message: self.message = message def __call__(self, value): cleaned = self.clean(value) params = {'limit_value': self.limit_value, 'show_value': cleaned, 'value': value} if self.compare(cleaned, self.limit_value): raise ValidationError(self.message, code=self.code, params=params) def __eq__(self, other): return ( isinstance(other, self.__class__) and self.limit_value == other.limit_value and self.message == other.message and self.code == other.code ) def compare(self, a, b): return a is not b def clean(self, x): return x @deconstructible class MaxValueValidator(BaseValidator): message = _('Ensure this value is less than or equal to %(limit_value)s.') code = 'max_value' def compare(self, a, b): return a > b @deconstructible class MinValueValidator(BaseValidator): message = _('Ensure this value is greater than or equal to %(limit_value)s.') code = 'min_value' def compare(self, a, b): return a < b @deconstructible class MinLengthValidator(BaseValidator): message = ngettext_lazy( 'Ensure this value has at least %(limit_value)d character (it has %(show_value)d).', 'Ensure this value has at least %(limit_value)d characters (it has %(show_value)d).', 'limit_value') code = 'min_length' def compare(self, a, b): return a < b def clean(self, x): return len(x) @deconstructible class MaxLengthValidator(BaseValidator): message = ngettext_lazy( 'Ensure this value has at most %(limit_value)d character (it has %(show_value)d).', 'Ensure this value has at most %(limit_value)d characters (it has %(show_value)d).', 'limit_value') code = 'max_length' def compare(self, a, b): return a > b def clean(self, x): return len(x) @deconstructible class DecimalValidator: """ Validate that the input does not exceed the maximum number of digits expected, otherwise raise ValidationError. """ messages = { 'max_digits': ngettext_lazy( 'Ensure that there are no more than %(max)s digit in total.', 'Ensure that there are no more than %(max)s digits in total.', 'max' ), 'max_decimal_places': ngettext_lazy( 'Ensure that there are no more than %(max)s decimal place.', 'Ensure that there are no more than %(max)s decimal places.', 'max' ), 'max_whole_digits': ngettext_lazy( 'Ensure that there are no more than %(max)s digit before the decimal point.', 'Ensure that there are no more than %(max)s digits before the decimal point.', 'max' ), } def __init__(self, max_digits, decimal_places): self.max_digits = max_digits self.decimal_places = decimal_places def __call__(self, value): digit_tuple, exponent = value.as_tuple()[1:] if exponent >= 0: # A positive exponent adds that many trailing zeros. digits = len(digit_tuple) + exponent decimals = 0 else: # If the absolute value of the negative exponent is larger than the # number of digits, then it's the same as the number of digits, # because it'll consume all of the digits in digit_tuple and then # add abs(exponent) - len(digit_tuple) leading zeros after the # decimal point. if abs(exponent) > len(digit_tuple): digits = decimals = abs(exponent) else: digits = len(digit_tuple) decimals = abs(exponent) whole_digits = digits - decimals if self.max_digits is not None and digits > self.max_digits: raise ValidationError( self.messages['max_digits'], code='max_digits', params={'max': self.max_digits}, ) if self.decimal_places is not None and decimals > self.decimal_places: raise ValidationError( self.messages['max_decimal_places'], code='max_decimal_places', params={'max': self.decimal_places}, ) if (self.max_digits is not None and self.decimal_places is not None and whole_digits > (self.max_digits - self.decimal_places)): raise ValidationError( self.messages['max_whole_digits'], code='max_whole_digits', params={'max': (self.max_digits - self.decimal_places)}, ) def __eq__(self, other): return ( isinstance(other, self.__class__) and self.max_digits == other.max_digits and self.decimal_places == other.decimal_places ) @deconstructible class FileExtensionValidator: message = _( "File extension '%(extension)s' is not allowed. " "Allowed extensions are: '%(allowed_extensions)s'." ) code = 'invalid_extension' def __init__(self, allowed_extensions=None, message=None, code=None): if allowed_extensions is not None: allowed_extensions = [allowed_extension.lower() for allowed_extension in allowed_extensions] self.allowed_extensions = allowed_extensions if message is not None: self.message = message if code is not None: self.code = code def __call__(self, value): extension = os.path.splitext(value.name)[1][1:].lower() if self.allowed_extensions is not None and extension not in self.allowed_extensions: raise ValidationError( self.message, code=self.code, params={ 'extension': extension, 'allowed_extensions': ', '.join(self.allowed_extensions) } ) def __eq__(self, other): return ( isinstance(other, self.__class__) and self.allowed_extensions == other.allowed_extensions and self.message == other.message and self.code == other.code ) def get_available_image_extensions(): try: from PIL import Image except ImportError: return [] else: Image.init() return [ext.lower()[1:] for ext in Image.EXTENSION] validate_image_file_extension = FileExtensionValidator( allowed_extensions=get_available_image_extensions(), ) @deconstructible class ProhibitNullCharactersValidator: """Validate that the string doesn't contain the null character.""" message = _('Null characters are not allowed.') code = 'null_characters_not_allowed' def __init__(self, message=None, code=None): if message is not None: self.message = message if code is not None: self.code = code def __call__(self, value): if '\x00' in str(value): raise ValidationError(self.message, code=self.code) def __eq__(self, other): return ( isinstance(other, self.__class__) and self.message == other.message and self.code == other.code )
mit
cgar/servo
tests/wpt/css-tests/css-text-3_dev/xhtml1print/reference/support/generate-segment-break-transformation-rules-tests.py
324
4862
#!/usr/bin/env python # - * - coding: UTF-8 - * - """ This script generates tests segment-break-transformation-rules-001 ~ 049 which cover all possible combinations of characters at two sides of segment breaks. More specifically, there are seven types of characters involve in these rules: 1. East Asian Full-width (F) 2. East Asian Half-width (H) 3. East Asian Wide (W) except Hangul 4. East Asian Narrow (Na) 5. East Asian Ambiguous (A) 6. Not East Asian (Neutral) 7. Hangul So there are 49 different combinations. It outputs a list of all tests it generated in the format of Mozilla reftest.list to the stdout. """ from __future__ import unicode_literals TEST_FILE = 'segment-break-transformation-rules-{:03}.html' TEST_TEMPLATE = '''<!DOCTYPE html> <meta charset="utf-8"> <title>CSS Reftest Test: Segment Break Transformation Rules</title> <link rel="author" title="Chun-Min (Jeremy) Chen" href="mailto:[email protected]"> <link rel="author" title="Mozilla" href="https://www.mozilla.org"> <link rel="help" href="https://drafts.csswg.org/css-text-3/#line-break-transform"> <meta name="assert" content="'segment-break-transformation-rules: with {prev}/{next} in front/back of the semgment break."> <link rel="stylesheet" type="text/css" href="support/ahem.css" /> <link rel="match" href="segment-break-transformation-rules-{index:03}-ref.html"> <style> p {{ font-family: ahem; }} </style> <div>Pass if there is {expect} white space between the two strings below. <p>{prevchar}&#x000a;{nextchar}</p> </div> ''' REF_FILE = 'segment-break-transformation-rules-{:03}-ref.html' REF_TEMPLATE_REMOVE = '''<!DOCTYPE html> <meta charset="utf-8"> <title>CSS Reftest Reference: Segment Break Transformation Rules</title> <link rel="author" title="Chun-Min (Jeremy) Chen" href="mailto:[email protected]"> <link rel="author" title="Mozilla" href="https://www.mozilla.org"> <link rel="stylesheet" type="text/css" href="support/ahem.css" /> <style> p {{ font-family: ahem; }} </style> <div>Pass if there is NO white space between the two strings below. <p>{0}{1}</p> </div> ''' REF_TEMPLATE_KEEP = '''<!DOCTYPE html> <meta charset="utf-8"> <title>CSS Reftest Reference: Segment Break Transformation Rules</title> <link rel="author" title="Chun-Min (Jeremy) Chen" href="mailto:[email protected]"> <link rel="author" title="Mozilla" href="https://www.mozilla.org"> <link rel="stylesheet" type="text/css" href="support/ahem.css" /> <style> p {{ font-family: ahem; }} </style> <div>Pass if there is ONE white space between the two strings below. <p>{0}{2}{1}</p> </div> ''' CHAR_SET = [ ('East Asian Full-width (F)', 'FULLWIDTH'), ('East Asian Half-width (H)', 'テスト'), ('East Asian Wide (W) except Hangul', '測試'), ('East Asian Narrow (Na)', 'narrow'), ('East Asian Ambiguous (A)', '■'), ('Not East Asian (Neutral)', 'آزمون'), ('Hangul', '테스트'), ] def write_file(filename, content): with open(filename, 'wb') as f: f.write(content.encode('UTF-8')) print("# START tests from {}".format(__file__)) global idx idx = 0 for i, (prevtype, prevchars) in enumerate(CHAR_SET): for j, (nextype, nextchars) in enumerate(CHAR_SET): idx += 1 reffilename = REF_FILE.format(idx) testfilename = TEST_FILE.format(idx) # According to CSS Text 3 - 4.1.2. Segment Break Transformation Rules, # if the East Asian Width property of both the character before and # after the segment break is F, W, or H (not A), and neither side is # Hangul, then the segment break is removed. Otherwise, the segment # break is converted to a space (U+0020). if i < 3 and j < 3: write_file(reffilename, REF_TEMPLATE_REMOVE.format(prevchars, nextchars)) write_file(testfilename, TEST_TEMPLATE.format(index=idx, prev=prevtype, next=nextype, prevchar=prevchars, nextchar=nextchars, expect='NO')) else: write_file(reffilename, REF_TEMPLATE_KEEP.format(prevchars, nextchars, '&#x0020;')) write_file(testfilename, TEST_TEMPLATE.format(index=idx, prev=prevtype, next=nextype, prevchar=prevchars, nextchar=nextchars, expect='ONE')) print("== {} {}".format(testfilename, reffilename)) print("# END tests from {}".format(__file__))
mpl-2.0