repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringclasses 981
values | size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values |
---|---|---|---|---|---|
syci/ingadhoc-odoo-addons | product_price_currency/product.py | 2 | 2492 | # -*- coding: utf-8 -*-
from openerp import fields, models, api
import openerp.addons.decimal_precision as dp
class product_template(models.Model):
_inherit = 'product.template'
@api.model
def get_currency_id(self):
price_type_obj = self.env['product.price.type']
price_type_ids = price_type_obj.search([('field', '=', 'list_price')])
if not price_type_ids.currency_id:
return self.env.user.company_id.currency_id
return price_type_ids.currency_id
sale_price_currency_id = fields.Many2one(
'res.currency', 'Sale Price Currency',
required=True, default=get_currency_id,
help="Currency used for the Currency List Price."
)
cia_currency_list_price = fields.Float(
'Company Currency Sale Price',
digits=dp.get_precision('Product Price'),
compute='get_cia_currency_list_price',
help="Base price on company currency at actual exchange rate",
)
@api.multi
@api.depends('list_price', 'sale_price_currency_id')
def get_cia_currency_list_price(self):
company_currency = self.env.user.company_id.currency_id
for product in self:
if product.sale_price_currency_id != company_currency:
cia_currency_list_price = product.sale_price_currency_id.compute(
product.list_price, company_currency)
else:
cia_currency_list_price = product.list_price
product.cia_currency_list_price = cia_currency_list_price
def _price_get(self, cr, uid, products, ptype='list_price', context=None):
if not context:
context = {}
res = super(product_template, self)._price_get(
cr, uid, products, ptype=ptype, context=context)
if ptype == 'list_price':
pricetype_obj = self.pool.get('product.price.type')
price_type_id = pricetype_obj.search(
cr, uid, [('field', '=', ptype)])[0]
price_type_currency_id = pricetype_obj.browse(
cr, uid, price_type_id).currency_id.id
for product in products:
if product.sale_price_currency_id.id != price_type_currency_id:
res[product.id] = self.pool.get('res.currency').compute(
cr, uid, product.sale_price_currency_id.id,
price_type_currency_id, res[product.id],
context=context)
return res
| agpl-3.0 |
lmprice/ansible | lib/ansible/plugins/lookup/k8s.py | 2 | 11120 | #
# Copyright 2018 Red Hat | Ansible
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
lookup: k8s
version_added: "2.5"
short_description: Query the K8s API
description:
- Uses the OpenShift Python client to fetch a specific object by name, all matching objects within a
namespace, or all matching objects for all namespaces, as well as information about the cluster.
- Provides access the full range of K8s APIs.
- Enables authentication via config file, certificates, password or token.
options:
cluster_info:
description:
- Use to specify the type of cluster information you are attempting to retrieve. Will take priority
over all the other options.
api_version:
description:
- Use to specify the API version. If I(resource definition) is provided, the I(apiVersion) from the
I(resource_definition) will override this option.
default: v1
kind:
description:
- Use to specify an object model. If I(resource definition) is provided, the I(kind) from a
I(resource_definition) will override this option.
required: true
resource_name:
description:
- Fetch a specific object by name. If I(resource definition) is provided, the I(metadata.name) value
from the I(resource_definition) will override this option.
namespace:
description:
- Limit the objects returned to a specific namespace. If I(resource definition) is provided, the
I(metadata.namespace) value from the I(resource_definition) will override this option.
label_selector:
description:
- Additional labels to include in the query. Ignored when I(resource_name) is provided.
field_selector:
description:
- Specific fields on which to query. Ignored when I(resource_name) is provided.
resource_definition:
description:
- "Provide a YAML configuration for an object. NOTE: I(kind), I(api_version), I(resource_name),
and I(namespace) will be overwritten by corresponding values found in the provided I(resource_definition)."
src:
description:
- "Provide a path to a file containing a valid YAML definition of an object dated. Mutually
exclusive with I(resource_definition). NOTE: I(kind), I(api_version), I(resource_name), and I(namespace)
will be overwritten by corresponding values found in the configuration read in from the I(src) file."
- Reads from the local file system. To read from the Ansible controller's file system, use the file lookup
plugin or template lookup plugin, combined with the from_yaml filter, and pass the result to
I(resource_definition). See Examples below.
host:
description:
- Provide a URL for accessing the API. Can also be specified via K8S_AUTH_HOST environment variable.
api_key:
description:
- Token used to authenticate with the API. Can also be specified via K8S_AUTH_API_KEY environment variable.
kubeconfig:
description:
- Path to an existing Kubernetes config file. If not provided, and no other connection
options are provided, the openshift client will attempt to load the default
configuration file from I(~/.kube/config.json). Can also be specified via K8S_AUTH_KUBECONFIG environment
variable.
context:
description:
- The name of a context found in the config file. Can also be specified via K8S_AUTH_CONTEXT environment
variable.
username:
description:
- Provide a username for authenticating with the API. Can also be specified via K8S_AUTH_USERNAME environment
variable.
password:
description:
- Provide a password for authenticating with the API. Can also be specified via K8S_AUTH_PASSWORD environment
variable.
cert_file:
description:
- Path to a certificate used to authenticate with the API. Can also be specified via K8S_AUTH_CERT_FILE
environment
variable.
key_file:
description:
- Path to a key file used to authenticate with the API. Can also be specified via K8S_AUTH_HOST environment
variable.
ssl_ca_cert:
description:
- Path to a CA certificate used to authenticate with the API. Can also be specified via K8S_AUTH_SSL_CA_CERT
environment variable.
verify_ssl:
description:
- Whether or not to verify the API server's SSL certificates. Can also be specified via K8S_AUTH_VERIFY_SSL
environment variable.
type: bool
requirements:
- "python >= 2.7"
- "openshift >= 0.6"
- "PyYAML >= 3.11"
notes:
- "The OpenShift Python client wraps the K8s Python client, providing full access to
all of the APIS and models available on both platforms. For API version details and
additional information visit https://github.com/openshift/openshift-restclient-python"
"""
EXAMPLES = """
- name: Fetch a list of namespaces
set_fact:
projects: "{{ lookup('k8s', api_version='v1', kind='Namespace') }}"
- name: Fetch all deployments
set_fact:
deployments: "{{ lookup('k8s', kind='Deployment', namespace='testing') }}"
- name: Fetch all deployments in a namespace
set_fact:
deployments: "{{ lookup('k8s', kind='Deployment', namespace='testing') }}"
- name: Fetch a specific deployment by name
set_fact:
deployments: "{{ lookup('k8s', kind='Deployment', namespace='testing', resource_name='elastic') }}"
- name: Fetch with label selector
set_fact:
service: "{{ lookup('k8s', kind='Service', label_selector='app=galaxy') }}"
# Use parameters from a YAML config
- name: Load config from the Ansible controller filesystem
set_fact:
config: "{{ lookup('file', 'service.yml') | from_yaml }}"
- name: Using the config (loaded from a file in prior task), fetch the latest version of the object
set_fact:
service: "{{ lookup('k8s', resource_definition=config) }}"
- name: Use a config from the local filesystem
set_fact:
service: "{{ lookup('k8s', src='service.yml') }}"
"""
RETURN = """
_list:
description:
- One ore more object definitions returned from the API.
type: complex
contains:
api_version:
description: The versioned schema of this representation of an object.
returned: success
type: str
kind:
description: Represents the REST resource this object represents.
returned: success
type: str
metadata:
description: Standard object metadata. Includes name, namespace, annotations, labels, etc.
returned: success
type: complex
spec:
description: Specific attributes of the object. Will vary based on the I(api_version) and I(kind).
returned: success
type: complex
status:
description: Current status details for the object.
returned: success
type: complex
"""
from ansible.plugins.lookup import LookupBase
import os
from ansible.module_utils.six import iteritems
from ansible.module_utils.k8s.common import K8sAnsibleMixin
try:
from openshift.dynamic import DynamicClient
from openshift.dynamic.exceptions import NotFoundError
HAS_K8S_MODULE_HELPER = True
except ImportError as exc:
HAS_K8S_MODULE_HELPER = False
try:
import yaml
HAS_YAML = True
except ImportError:
HAS_YAML = False
class KubernetesLookup(K8sAnsibleMixin):
def __init__(self):
if not HAS_K8S_MODULE_HELPER:
raise Exception(
"Requires the OpenShift Python client. Try `pip install openshift`"
)
if not HAS_YAML:
raise Exception(
"Requires PyYAML. Try `pip install PyYAML`"
)
self.kind = None
self.name = None
self.namespace = None
self.api_version = None
self.label_selector = None
self.field_selector = None
self.include_uninitialized = None
self.resource_definition = None
self.helper = None
self.connection = {}
def run(self, terms, variables=None, **kwargs):
self.params = kwargs
self.client = self.get_api_client()
cluster_info = kwargs.get('cluster_info')
if cluster_info == 'version':
return [self.client.version]
if cluster_info == 'api_groups':
return [self.client.resources.api_groups]
self.kind = kwargs.get('kind')
self.name = kwargs.get('resource_name')
self.namespace = kwargs.get('namespace')
self.api_version = kwargs.get('api_version', 'v1')
self.label_selector = kwargs.get('label_selector')
self.field_selector = kwargs.get('field_selector')
self.include_uninitialized = kwargs.get('include_uninitialized', False)
resource_definition = kwargs.get('resource_definition')
src = kwargs.get('src')
if src:
resource_definition = self.load_resource_definitions(src)[0]
if resource_definition:
self.kind = resource_definition.get('kind', self.kind)
self.api_version = resource_definition.get('apiVersion', self.api_version)
self.name = resource_definition.get('metadata', {}).get('name', self.name)
self.namespace = resource_definition.get('metadata', {}).get('namespace', self.namespace)
if not self.kind:
raise Exception(
"Error: no Kind specified. Use the 'kind' parameter, or provide an object YAML configuration "
"using the 'resource_definition' parameter."
)
resource = self.client.resources.get(kind=self.kind, api_version=self.api_version)
try:
k8s_obj = resource.get(name=self.name, namespace=self.namespace, label_selector=self.label_selector, field_selector=self.field_selector)
except NotFoundError:
return []
if self.name:
return [k8s_obj.to_dict()]
return k8s_obj.to_dict().get('items')
class LookupModule(LookupBase):
def run(self, terms, variables=None, **kwargs):
return KubernetesLookup().run(terms, variables=variables, **kwargs)
| gpl-3.0 |
lnliuxing/Impala | shell/ext-py/sasl-0.1.1/sasl/saslwrapper.py | 76 | 4375 | # This file was automatically generated by SWIG (http://www.swig.org).
# Version 1.3.36
#
# Don't modify this file, modify the SWIG interface instead.
# This file is compatible with both classic and new-style classes.
import _saslwrapper
import new
new_instancemethod = new.instancemethod
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
def _swig_setattr_nondynamic(self,class_type,name,value,static=1):
if (name == "thisown"): return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'PySwigObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name,None)
if method: return method(self,value)
if (not static) or hasattr(self,name):
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self,class_type,name,value):
return _swig_setattr_nondynamic(self,class_type,name,value,0)
def _swig_getattr(self,class_type,name):
if (name == "thisown"): return self.this.own()
method = class_type.__swig_getmethods__.get(name,None)
if method: return method(self)
raise AttributeError,name
def _swig_repr(self):
try: strthis = "proxy of " + self.this.__repr__()
except: strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
import types
try:
_object = types.ObjectType
_newclass = 1
except AttributeError:
class _object : pass
_newclass = 0
del types
class PySwigIterator(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, PySwigIterator, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, PySwigIterator, name)
def __init__(self, *args, **kwargs): raise AttributeError, "No constructor defined"
__repr__ = _swig_repr
__swig_destroy__ = _saslwrapper.delete_PySwigIterator
__del__ = lambda self : None;
def value(*args): return _saslwrapper.PySwigIterator_value(*args)
def incr(*args): return _saslwrapper.PySwigIterator_incr(*args)
def decr(*args): return _saslwrapper.PySwigIterator_decr(*args)
def distance(*args): return _saslwrapper.PySwigIterator_distance(*args)
def equal(*args): return _saslwrapper.PySwigIterator_equal(*args)
def copy(*args): return _saslwrapper.PySwigIterator_copy(*args)
def next(*args): return _saslwrapper.PySwigIterator_next(*args)
def previous(*args): return _saslwrapper.PySwigIterator_previous(*args)
def advance(*args): return _saslwrapper.PySwigIterator_advance(*args)
def __eq__(*args): return _saslwrapper.PySwigIterator___eq__(*args)
def __ne__(*args): return _saslwrapper.PySwigIterator___ne__(*args)
def __iadd__(*args): return _saslwrapper.PySwigIterator___iadd__(*args)
def __isub__(*args): return _saslwrapper.PySwigIterator___isub__(*args)
def __add__(*args): return _saslwrapper.PySwigIterator___add__(*args)
def __sub__(*args): return _saslwrapper.PySwigIterator___sub__(*args)
def __iter__(self): return self
PySwigIterator_swigregister = _saslwrapper.PySwigIterator_swigregister
PySwigIterator_swigregister(PySwigIterator)
class Client(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, Client, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, Client, name)
__repr__ = _swig_repr
def __init__(self, *args):
this = _saslwrapper.new_Client(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _saslwrapper.delete_Client
__del__ = lambda self : None;
def setAttr(*args): return _saslwrapper.Client_setAttr(*args)
def init(*args): return _saslwrapper.Client_init(*args)
def start(*args): return _saslwrapper.Client_start(*args)
def step(*args): return _saslwrapper.Client_step(*args)
def encode(*args): return _saslwrapper.Client_encode(*args)
def decode(*args): return _saslwrapper.Client_decode(*args)
def getUserId(*args): return _saslwrapper.Client_getUserId(*args)
def getError(*args): return _saslwrapper.Client_getError(*args)
Client_swigregister = _saslwrapper.Client_swigregister
Client_swigregister(Client)
| apache-2.0 |
jalilag/apspir | objdictgen/gnosis/xml/pickle/ext/_mutators.py | 3 | 7670 | from _mutate import XMLP_Mutator, XMLP_Mutated
import _mutate
import sys, string
from types import *
from gnosis.util.introspect import isInstanceLike, attr_update, \
data2attr, attr2data, getCoreData, setCoreData, isinstance_any
from gnosis.xml.pickle.util import _klass, _module, obj_from_name
from gnosis.util.XtoY import aton
import gnosis.pyconfig
class _EmptyClass: pass
class mutate_builtin_wrapper(XMLP_Mutator):
def __init__(self):
XMLP_Mutator.__init__(self,None,'builtin_wrapper')
def mutate(self,obj):
wrap = _EmptyClass()
wrap.__toplevel__ = obj
return XMLP_Mutated(wrap)
def unmutate(self,mobj):
return mobj.obj.__toplevel__
_mutate.add_mutator(mutate_builtin_wrapper())
# We pickle array.array() as type "array" and Numeric.array as
# type "Numpy_array" (this is really what earlier xml_pickles did,
# except you had to use EITHER array.array() or Numeric.array() -
# you couldn't mix them (in fact, you couldn't pickle array.array()
# types if Numeric was installed).
import array
#-- array.array --
class mutate_array(XMLP_Mutator):
def __init__(self):
XMLP_Mutator.__init__(self,array.ArrayType,'array',0)
def mutate(self,obj):
list = []
for item in obj:
list.append(item)
return XMLP_Mutated(list)
def unmutate(self,mobj):
obj = mobj.obj
as_int = 1
for item in obj:
if type(item) == type(1.0):
as_int = 0
if as_int:
return array.array('b',obj)
else:
return array.array('d',obj) # double precision
_mutate.add_mutator(mutate_array())
#-- Numeric.array --
try:
import Numeric
HAS_NUMERIC = 1
except:
HAS_NUMERIC = 0
class mutate_numpy(XMLP_Mutator):
def __init__(self):
# note, Numeric.ArrayType != array.ArrayType, which is good :-)
XMLP_Mutator.__init__(self,Numeric.ArrayType,'NumPy_array',0)
def mutate(self,obj):
list = []
for item in obj:
list.append(item)
return XMLP_Mutated(list)
def unmutate(self,mobj):
return Numeric.array(mobj.obj)
if HAS_NUMERIC:
_mutate.add_mutator(mutate_numpy())
#-- SREs --
# save the RE pattern in the element body
import re
SRE_Pattern_type = type(re.compile(''))
class mutate_sre(XMLP_Mutator):
def __init__(self):
XMLP_Mutator.__init__(self,SRE_Pattern_type,'SRE',paranoia=0,
in_body=1)
def mutate(self,obj):
return XMLP_Mutated(obj.pattern)
def unmutate(self,mobj):
return re.compile(mobj.obj)
_mutate.add_mutator(mutate_sre())
#-- rawpickles --
# save the pickle in the element body
try: import cPickle as pickle
except: import pickle
class mutate_rawpickle(XMLP_Mutator):
def __init__(self):
XMLP_Mutator.__init__(self,None,'rawpickle',0)
def mutate(self,obj): return XMLP_Mutated(pickle.dumps(obj))
def unmutate(self,mobj): return pickle.loads(str(mobj.obj))
_mutate.add_mutator(mutate_rawpickle())
#-- mx.DateTime --
# see test_mutators.py for an alternate way to pickle these
try:
import mx.DateTime
mxDateTime_type = type(mx.DateTime.localtime())
except:
mxDateTime_type = None
class mutate_mxdatetime(XMLP_Mutator):
def __init__(self):
XMLP_Mutator.__init__(self,mxDateTime_type,'mxDateTime',
paranoia=0,in_body=1)
def mutate(self,obj):
# (I avoided using strftime(), for portability reasons.)
# Pickle seconds as a float to save full precision.
s = "YMD = %d/%d/%d, HMS = %d:%d:%.17g" % \
(obj.year,obj.month,obj.day,\
obj.hour,obj.minute,obj.second)
return XMLP_Mutated(s)
def unmutate(self,mobj):
obj = mobj.obj
# is this forgiving enough? :-)
fmt = 'YMD\s*=\s*([0-9]+)\s*/\s*([0-9]+)\s*/\s*([0-9]+)\s*,\s*'
fmt += 'HMS\s*=\s*([0-9]+)\s*:\s*([0-9]+)\s*:\s*([0-9\.]+)'
m = re.match(fmt,obj)
# this started giving a deprecation warning about passing a
# float where an int was expected
#return apply(mx.DateTime.DateTime,map(float,m.groups()))
args = map(int,m.groups()[:5]) + [float(m.group(6))]
return apply(mx.DateTime.DateTime,args)
if mxDateTime_type is not None:
_mutate.add_mutator(mutate_mxdatetime())
#-- mutator + support functions for handling objects subclassed
#-- from builtin types (Python >= 2.2)
def newdata_to_olddata(o):
"""Given o, an object subclassed from a builtin type with no attributes,
return a tuple containing the raw data and a string containing
a tag to save in the extra= field"""
return (getCoreData(o),"%s %s"%(_module(o),_klass(o)))
def olddata_to_newdata(data,extra,paranoia):
"""Given raw data, the extra= tag, and paranoia setting,
recreate the object that was passed to newdata_to_olddata."""
(module,klass) = extra.split()
o = obj_from_name(klass,module,paranoia)
#if isinstance(o,ComplexType) and \
# type(data) in [StringType,UnicodeType]:
# # yuck ... have to strip () from complex data before
# # passing to __init__ (ran into this also in one of the
# # parsers ... maybe the () shouldn't be in the XML at all?)
# if data[0] == '(' and data[-1] == ')':
# data = data[1:-1]
if isinstance_any(o,(IntType,FloatType,ComplexType,LongType)) and \
type(data) in [StringType,UnicodeType]:
data = aton(data)
o = setCoreData(o,data)
return o
# my semantic preferences, of the moment :-)
newinst_to_oldinst = data2attr
oldinst_to_newinst = attr2data
def hasPickleFuncs(obj):
"Does obj define the special pickling functions?"
return (hasattr(obj,'__getstate__') or \
hasattr(obj,'__setstate__') or \
hasattr(obj,'__getinitargs__'))
class mutate_bltin_instances(XMLP_Mutator):
def __init__(self):
XMLP_Mutator.__init__(self,None,'__compound__',0)
def mutate(self,obj):
if isinstance(obj,UnicodeType):
# unicode strings are required to be placed in the body
# (by our encoding scheme)
self.in_body = 1
else:
# XXX really should check getInBody(), but we'd have
# to do isinstance() for each type ... maybe do later
self.in_body = 0
if isInstanceLike(obj) or hasPickleFuncs(obj):
# obj has data items (list,dict,tuple) *AND* attributes.
# mutate to an oldstyle object, turning the data items into
# a special attribute (eg. __items__, __entries__).
#
# also, if obj defines the special pickling functions, we treat
# it as an instance so we don't have to duplicate all the
# protocol logic here.
return XMLP_Mutated(newinst_to_oldinst(obj))
else:
# obj has only data items (list,dict,tuple,etc.)
# convert to the raw datatype and remember the
# module.class of obj for unpickling.
(o,t) = newdata_to_olddata(obj)
return XMLP_Mutated(o,t)
def unmutate(self,mobj):
obj = mobj.obj
if not mobj.extra:
# converting obj with __coredata__ + attrs
return oldinst_to_newinst(obj)
else:
# converting obj with __coredata__ but no attrs
return olddata_to_newdata(obj,mobj.extra,self.paranoia)
# add mutator for instances of builtin classes (int, dict, object, etc.)
if gnosis.pyconfig.Have_ObjectClass():
_mutate.add_mutator(mutate_bltin_instances())
| lgpl-2.1 |
xq262144/hue | desktop/core/ext-py/Django-1.6.10/tests/validators/tests.py | 38 | 9616 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import re
import types
from datetime import datetime, timedelta
from django.core.exceptions import ValidationError
from django.core.validators import *
from django.test.utils import str_prefix
from django.utils.unittest import TestCase
NOW = datetime.now()
TEST_DATA = (
# (validator, value, expected),
(validate_integer, '42', None),
(validate_integer, '-42', None),
(validate_integer, -42, None),
(validate_integer, -42.5, None),
(validate_integer, None, ValidationError),
(validate_integer, 'a', ValidationError),
(validate_email, '[email protected]', None),
(validate_email, '[email protected]', None),
(validate_email, 'email@[127.0.0.1]', None),
(validate_email, '[email protected]', None),
(validate_email, '[email protected]', None),
(validate_email, '[email protected].उदाहरण.परीक्षा', None),
(validate_email, 'email@localhost', None),
(EmailValidator(whitelist=['localdomain']), 'email@localdomain', None),
(validate_email, '"test@test"@example.com', None),
(validate_email, None, ValidationError),
(validate_email, '', ValidationError),
(validate_email, 'abc', ValidationError),
(validate_email, 'abc@', ValidationError),
(validate_email, 'abc@bar', ValidationError),
(validate_email, 'a @x.cz', ValidationError),
(validate_email, '[email protected]', ValidationError),
(validate_email, 'something@@somewhere.com', ValidationError),
(validate_email, '[email protected]', ValidationError),
(validate_email, '[email protected]', ValidationError),
(validate_email, '[email protected]', ValidationError),
(validate_email, '[email protected]', ValidationError),
(validate_email, '[email protected]', ValidationError),
(validate_email, '[email protected]', ValidationError),
(validate_email, '[email protected]\n\n<script src="x.js">', ValidationError),
# Quoted-string format (CR not allowed)
(validate_email, '"\\\011"@here.com', None),
(validate_email, '"\\\012"@here.com', ValidationError),
(validate_slug, 'slug-ok', None),
(validate_slug, 'longer-slug-still-ok', None),
(validate_slug, '--------', None),
(validate_slug, 'nohyphensoranything', None),
(validate_slug, '', ValidationError),
(validate_slug, ' text ', ValidationError),
(validate_slug, ' ', ValidationError),
(validate_slug, '[email protected]', ValidationError),
(validate_slug, '你好', ValidationError),
(validate_slug, '\n', ValidationError),
(validate_ipv4_address, '1.1.1.1', None),
(validate_ipv4_address, '255.0.0.0', None),
(validate_ipv4_address, '0.0.0.0', None),
(validate_ipv4_address, '256.1.1.1', ValidationError),
(validate_ipv4_address, '25.1.1.', ValidationError),
(validate_ipv4_address, '25,1,1,1', ValidationError),
(validate_ipv4_address, '25.1 .1.1', ValidationError),
# validate_ipv6_address uses django.utils.ipv6, which
# is tested in much greater detail in it's own testcase
(validate_ipv6_address, 'fe80::1', None),
(validate_ipv6_address, '::1', None),
(validate_ipv6_address, '1:2:3:4:5:6:7:8', None),
(validate_ipv6_address, '1:2', ValidationError),
(validate_ipv6_address, '::zzz', ValidationError),
(validate_ipv6_address, '12345::', ValidationError),
(validate_ipv46_address, '1.1.1.1', None),
(validate_ipv46_address, '255.0.0.0', None),
(validate_ipv46_address, '0.0.0.0', None),
(validate_ipv46_address, 'fe80::1', None),
(validate_ipv46_address, '::1', None),
(validate_ipv46_address, '1:2:3:4:5:6:7:8', None),
(validate_ipv46_address, '256.1.1.1', ValidationError),
(validate_ipv46_address, '25.1.1.', ValidationError),
(validate_ipv46_address, '25,1,1,1', ValidationError),
(validate_ipv46_address, '25.1 .1.1', ValidationError),
(validate_ipv46_address, '1:2', ValidationError),
(validate_ipv46_address, '::zzz', ValidationError),
(validate_ipv46_address, '12345::', ValidationError),
(validate_comma_separated_integer_list, '1', None),
(validate_comma_separated_integer_list, '1,2,3', None),
(validate_comma_separated_integer_list, '1,2,3,', None),
(validate_comma_separated_integer_list, '', ValidationError),
(validate_comma_separated_integer_list, 'a,b,c', ValidationError),
(validate_comma_separated_integer_list, '1, 2, 3', ValidationError),
(MaxValueValidator(10), 10, None),
(MaxValueValidator(10), -10, None),
(MaxValueValidator(10), 0, None),
(MaxValueValidator(NOW), NOW, None),
(MaxValueValidator(NOW), NOW - timedelta(days=1), None),
(MaxValueValidator(0), 1, ValidationError),
(MaxValueValidator(NOW), NOW + timedelta(days=1), ValidationError),
(MinValueValidator(-10), -10, None),
(MinValueValidator(-10), 10, None),
(MinValueValidator(-10), 0, None),
(MinValueValidator(NOW), NOW, None),
(MinValueValidator(NOW), NOW + timedelta(days=1), None),
(MinValueValidator(0), -1, ValidationError),
(MinValueValidator(NOW), NOW - timedelta(days=1), ValidationError),
(MaxLengthValidator(10), '', None),
(MaxLengthValidator(10), 10*'x', None),
(MaxLengthValidator(10), 15*'x', ValidationError),
(MinLengthValidator(10), 15*'x', None),
(MinLengthValidator(10), 10*'x', None),
(MinLengthValidator(10), '', ValidationError),
(URLValidator(), 'http://www.djangoproject.com/', None),
(URLValidator(), 'http://localhost/', None),
(URLValidator(), 'http://example.com/', None),
(URLValidator(), 'http://www.example.com/', None),
(URLValidator(), 'http://www.example.com:8000/test', None),
(URLValidator(), 'http://valid-with-hyphens.com/', None),
(URLValidator(), 'http://subdomain.example.com/', None),
(URLValidator(), 'http://200.8.9.10/', None),
(URLValidator(), 'http://200.8.9.10:8000/test', None),
(URLValidator(), 'http://valid-----hyphens.com/', None),
(URLValidator(), 'http://example.com?something=value', None),
(URLValidator(), 'http://example.com/index.php?something=value&another=value2', None),
(URLValidator(), 'foo', ValidationError),
(URLValidator(), 'http://', ValidationError),
(URLValidator(), 'http://example', ValidationError),
(URLValidator(), 'http://example.', ValidationError),
(URLValidator(), 'http://.com', ValidationError),
(URLValidator(), 'http://invalid-.com', ValidationError),
(URLValidator(), 'http://-invalid.com', ValidationError),
(URLValidator(), 'http://invalid.com-', ValidationError),
(URLValidator(), 'http://inv-.alid-.com', ValidationError),
(URLValidator(), 'http://inv-.-alid.com', ValidationError),
(BaseValidator(True), True, None),
(BaseValidator(True), False, ValidationError),
(RegexValidator(), '', None),
(RegexValidator(), 'x1x2', None),
(RegexValidator('[0-9]+'), 'xxxxxx', ValidationError),
(RegexValidator('[0-9]+'), '1234', None),
(RegexValidator(re.compile('[0-9]+')), '1234', None),
(RegexValidator('.*'), '', None),
(RegexValidator(re.compile('.*')), '', None),
(RegexValidator('.*'), 'xxxxx', None),
(RegexValidator('x'), 'y', ValidationError),
(RegexValidator(re.compile('x')), 'y', ValidationError),
)
def create_simple_test_method(validator, expected, value, num):
if expected is not None and issubclass(expected, Exception):
test_mask = 'test_%s_raises_error_%d'
def test_func(self):
# assertRaises not used, so as to be able to produce an error message
# containing the tested value
try:
validator(value)
except expected:
pass
else:
self.fail("%s not raised when validating '%s'" % (
expected.__name__, value))
else:
test_mask = 'test_%s_%d'
def test_func(self):
try:
self.assertEqual(expected, validator(value))
except ValidationError as e:
self.fail("Validation of '%s' failed. Error message was: %s" % (
value, str(e)))
if isinstance(validator, types.FunctionType):
val_name = validator.__name__
else:
val_name = validator.__class__.__name__
test_name = test_mask % (val_name, num)
return test_name, test_func
# Dynamically assemble a test class with the contents of TEST_DATA
class TestSimpleValidators(TestCase):
def test_single_message(self):
v = ValidationError('Not Valid')
self.assertEqual(str(v), str_prefix("[%(_)s'Not Valid']"))
self.assertEqual(repr(v), str_prefix("ValidationError([%(_)s'Not Valid'])"))
def test_message_list(self):
v = ValidationError(['First Problem', 'Second Problem'])
self.assertEqual(str(v), str_prefix("[%(_)s'First Problem', %(_)s'Second Problem']"))
self.assertEqual(repr(v), str_prefix("ValidationError([%(_)s'First Problem', %(_)s'Second Problem'])"))
def test_message_dict(self):
v = ValidationError({'first': ['First Problem']})
self.assertEqual(str(v), str_prefix("{%(_)s'first': [%(_)s'First Problem']}"))
self.assertEqual(repr(v), str_prefix("ValidationError({%(_)s'first': [%(_)s'First Problem']})"))
test_counter = 0
for validator, value, expected in TEST_DATA:
name, method = create_simple_test_method(validator, expected, value, test_counter)
setattr(TestSimpleValidators, name, method)
test_counter += 1
| apache-2.0 |
ychen820/microblog | y/google-cloud-sdk/.install/.backup/lib/googlecloudsdk/core/updater/local_state.py | 2 | 26812 | # Copyright 2013 Google Inc. All Rights Reserved.
"""Manages the state of what is installed in the cloud SDK.
This tracks the installed modules along with the files they created. It also
provides functionality like extracting tar files into the installation and
tracking when we check for updates.
"""
import errno
import json
import logging
import os
import shutil
import sys
import time
from googlecloudsdk.core import config
from googlecloudsdk.core import exceptions
from googlecloudsdk.core.updater import installers
from googlecloudsdk.core.updater import snapshots
from googlecloudsdk.core.util import console_io
from googlecloudsdk.core.util import files as file_utils
class Error(exceptions.Error):
"""Base exception for the local_state module."""
pass
class InvalidSDKRootError(Error):
"""Error for when the root of the Cloud SDK is invalid or cannot be found."""
def __init__(self):
super(InvalidSDKRootError, self).__init__(
'The update action could not be performed because the installation root'
' of the Cloud SDK could not be located. Please re-install the Cloud '
'SDK and try again.')
class InvalidDownloadError(Error):
"""Exception for when the SDK that was download was invalid."""
def __init__(self):
super(InvalidDownloadError, self).__init__(
'The Cloud SDK download was invalid.')
class PermissionsError(Error):
"""Error for when a file operation cannot complete due to permissions."""
def __init__(self, message, path):
"""Initialize a PermissionsError.
Args:
message: str, The message from the underlying error.
path: str, The absolute path to a file or directory that needs to be
operated on, but can't because of insufficient permissions.
"""
super(PermissionsError, self).__init__(
'{message}: [{path}]\n\nEnsure you have the permissions to access the '
'file and that the file is not in use.'
.format(message=message, path=path))
def _RaisesPermissionsError(func):
"""Use this decorator for functions that deal with files.
If an exception indicating file permissions is raised, this decorator will
raise a PermissionsError instead, so that the caller only has to watch for
one type of exception.
Args:
func: The function to decorate.
Returns:
A decorator.
"""
def _TryFunc(*args, **kwargs):
try:
return func(*args, **kwargs)
except (OSError, IOError) as e:
if e.errno == errno.EACCES:
new_exc = PermissionsError(
message=e.strerror, path=os.path.abspath(e.filename))
# Maintain original stack trace.
raise new_exc, None, sys.exc_info()[2]
raise
except shutil.Error as e:
args = e.args[0][0]
# unfortunately shutil.Error *only* has formatted strings to inspect.
# Looking for this substring is looking for errno.EACCES, which has
# a numeric value of 13.
if args[2].startswith('[Errno 13]'):
new_exc = PermissionsError(
message=args[2], path=os.path.abspath(args[0]))
# Maintain original stack trace.
raise new_exc, None, sys.exc_info()[2]
raise
return _TryFunc
class InstallationState(object):
"""The main class for checking / updating local installation state."""
STATE_DIR_NAME = config.Paths.CLOUDSDK_STATE_DIR
BACKUP_DIR_NAME = '.backup'
TRASH_DIR_NAME = '.trash'
STAGING_ROOT_SUFFIX = '.staging'
COMPONENT_SNAPSHOT_FILE_SUFFIX = '.snapshot.json'
@staticmethod
def ForCurrent():
"""Gets the installation state for the SDK that this code is running in.
Returns:
InstallationState, The state for this area.
Raises:
InvalidSDKRootError: If this code is not running under a valid SDK.
"""
sdk_root = config.Paths().sdk_root
if not sdk_root:
raise InvalidSDKRootError()
return InstallationState(os.path.realpath(sdk_root))
@staticmethod
def VersionForInstalledComponent(component_id):
"""Gets the version string for the given installed component.
This function is to be used to get component versions for metrics reporting.
If it fails in any way or if the component_id is unknown, it will return
None. This prevents errors from surfacing when the version is needed
strictly for reporting purposes.
Args:
component_id: str, The component id of the component you want the version
for.
Returns:
str, The installed version of the component, or None if it is not
installed or if an error occurs.
"""
try:
state = InstallationState.ForCurrent()
# pylint: disable=protected-access, This is the same class.
return InstallationManifest(
state._state_directory, component_id).VersionString()
# pylint: disable=bare-except, We never want to fail because of metrics.
except:
logging.debug('Failed to get installed version for component [%s]: [%s]',
component_id, sys.exc_info())
return None
@_RaisesPermissionsError
def __init__(self, sdk_root):
"""Initializes the installation state for the given sdk install.
Args:
sdk_root: str, The file path of the root of the SDK installation.
Raises:
ValueError: If the given SDK root does not exist.
"""
if not os.path.isdir(sdk_root):
raise ValueError('The given Cloud SDK root does not exist: [{0}]'
.format(sdk_root))
self.__sdk_root = sdk_root
self._state_directory = os.path.join(sdk_root,
InstallationState.STATE_DIR_NAME)
self.__backup_directory = os.path.join(self._state_directory,
InstallationState.BACKUP_DIR_NAME)
self.__trash_directory = os.path.join(self._state_directory,
InstallationState.TRASH_DIR_NAME)
self.__sdk_staging_root = (os.path.normpath(self.__sdk_root) +
InstallationState.STAGING_ROOT_SUFFIX)
for d in [self._state_directory]:
if not os.path.isdir(d):
file_utils.MakeDir(d)
@property
def sdk_root(self):
"""Gets the root of the SDK that this state corresponds to.
Returns:
str, the path to the root directory.
"""
return self.__sdk_root
def _FilesForSuffix(self, suffix):
"""Returns the files in the state directory that have the given suffix.
Args:
suffix: str, The file suffix to match on.
Returns:
list of str, The file names that match.
"""
files = os.listdir(self._state_directory)
matching = [f for f in files
if os.path.isfile(os.path.join(self._state_directory, f))
and f.endswith(suffix)]
return matching
@_RaisesPermissionsError
def InstalledComponents(self):
"""Gets all the components that are currently installed.
Returns:
A dictionary of component id string to InstallationManifest.
"""
snapshot_files = self._FilesForSuffix(
InstallationState.COMPONENT_SNAPSHOT_FILE_SUFFIX)
manifests = {}
for f in snapshot_files:
component_id = f[:-len(InstallationState.COMPONENT_SNAPSHOT_FILE_SUFFIX)]
manifests[component_id] = InstallationManifest(self._state_directory,
component_id)
return manifests
@_RaisesPermissionsError
def Snapshot(self):
"""Generates a ComponentSnapshot from the currently installed components."""
return snapshots.ComponentSnapshot.FromInstallState(self)
def LastUpdateCheck(self):
"""Gets a LastUpdateCheck object to check update status."""
return LastUpdateCheck(self)
def DiffCurrentState(self, latest_snapshot, platform_filter=None):
"""Generates a ComponentSnapshotDiff from current state and the given state.
Args:
latest_snapshot: snapshots.ComponentSnapshot, The current state of the
world to diff against.
platform_filter: platforms.Platform, A platform that components must
match in order to be considered for any operations.
Returns:
A ComponentSnapshotDiff.
"""
return self.Snapshot().CreateDiff(latest_snapshot,
platform_filter=platform_filter)
@_RaisesPermissionsError
def CloneToStaging(self, progress_callback=None):
"""Clones this state to the temporary staging area.
This is used for making temporary copies of the entire Cloud SDK
installation when doing updates. The entire installation is cloned, but
doing so removes any backups and trash from this state before doing the
copy.
Args:
progress_callback: f(float), A function to call with the fraction of
completeness.
Returns:
An InstallationState object for the cloned install.
"""
(rm_staging_cb, rm_backup_cb, rm_trash_cb, copy_cb) = (
console_io.ProgressBar.SplitProgressBar(progress_callback,
[1, 1, 1, 7]))
self._ClearStaging(progress_callback=rm_staging_cb)
self.ClearBackup(progress_callback=rm_backup_cb)
self.ClearTrash(progress_callback=rm_trash_cb)
class Counter(object):
def __init__(self, progress_callback, total):
self.count = 0
self.progress_callback = progress_callback
self.total = float(total)
# This function must match the signature that shutil expects for the
# ignore function.
def Tick(self, *unused_args):
self.count += 1
self.progress_callback(self.count / self.total)
return []
if progress_callback:
# This takes a little time, so only do it if we are going to report
# progress.
dirs = set()
for _, manifest in self.InstalledComponents().iteritems():
dirs.update(manifest.InstalledDirectories())
# There is always the root directory itself and the .install directory.
# In general, there could be in the SDK (if people just put stuff in there
# but this is fine for an estimate. The progress bar will at worst stay
# at 100% for slightly longer.
total_dirs = len(dirs) + 2
ticker = Counter(copy_cb, total_dirs).Tick if total_dirs else None
else:
ticker = None
shutil.copytree(self.__sdk_root, self.__sdk_staging_root, symlinks=True,
ignore=ticker)
return InstallationState(self.__sdk_staging_root)
@_RaisesPermissionsError
def CreateStagingFromDownload(self, url, progress_callback=None):
"""Creates a new staging area from a fresh download of the Cloud SDK.
Args:
url: str, The url to download the new SDK from.
progress_callback: f(float), A function to call with the fraction of
completeness.
Returns:
An InstallationState object for the new install.
Raises:
installers.URLFetchError: If the new SDK could not be downloaded.
InvalidDownloadError: If the new SDK was malformed.
"""
self._ClearStaging()
with file_utils.TemporaryDirectory() as t:
download_dir = os.path.join(t, '.download')
extract_dir = os.path.join(t, '.extract')
installers.ComponentInstaller.DownloadAndExtractTar(
url, download_dir, extract_dir, progress_callback=progress_callback)
files = os.listdir(extract_dir)
if len(files) != 1:
raise InvalidDownloadError()
sdk_root = os.path.join(extract_dir, files[0])
file_utils.MoveDir(sdk_root, self.__sdk_staging_root)
staging_sdk = InstallationState(self.__sdk_staging_root)
self.CopyMachinePropertiesTo(staging_sdk)
return staging_sdk
@_RaisesPermissionsError
def ReplaceWith(self, other_install_state):
"""Replaces this installation with the given other installation.
This moves the current installation to the backup directory of the other
installation. Then, it moves the entire second installation to replace
this one on the file system. The result is that the other installation
completely replaces the current one, but the current one is snapshotted and
stored as a backup under the new one (and can be restored later).
Args:
other_install_state: InstallationState, The other state with which to
replace this one.
"""
self.ClearBackup()
self.ClearTrash()
other_install_state.ClearBackup()
# pylint: disable=protected-access, This is an instance of InstallationState
file_utils.MoveDir(self.__sdk_root, other_install_state.__backup_directory)
file_utils.MoveDir(other_install_state.__sdk_root, self.__sdk_root)
@_RaisesPermissionsError
def RestoreBackup(self):
"""Restore the backup from this install state if it exists.
If this installation has a backup stored in it (created by and update that
used ReplaceWith(), above), it replaces this installation with the backup,
using a temporary staging area. This installation is moved to the trash
directory under the installation that exists after this is done. The trash
directory can be removed at any point in the future. We just don't want to
delete code that is running since some platforms have a problem with that.
Returns:
bool, True if there was a backup to restore, False otherwise.
"""
if not self.HasBackup():
return False
self._ClearStaging()
file_utils.MoveDir(self.__backup_directory, self.__sdk_staging_root)
staging_state = InstallationState(self.__sdk_staging_root)
staging_state.ClearTrash()
# pylint: disable=protected-access, This is an instance of InstallationState
file_utils.MoveDir(self.__sdk_root, staging_state.__trash_directory)
file_utils.MoveDir(staging_state.__sdk_root, self.__sdk_root)
return True
def HasBackup(self):
"""Determines if this install has a valid backup that can be restored.
Returns:
bool, True if there is a backup, False otherwise.
"""
return os.path.isdir(self.__backup_directory)
def BackupDirectory(self):
"""Gets the backup directory of this installation if it exists.
Returns:
str, The path to the backup directory or None if it does not exist.
"""
if self.HasBackup():
return self.__backup_directory
return None
@_RaisesPermissionsError
def _ClearStaging(self, progress_callback=None):
"""Deletes the current staging directory if it exists.
Args:
progress_callback: f(float), A function to call with the fraction of
completeness.
"""
if os.path.exists(self.__sdk_staging_root):
file_utils.RmTree(self.__sdk_staging_root)
if progress_callback:
progress_callback(1)
@_RaisesPermissionsError
def ClearBackup(self, progress_callback=None):
"""Deletes the current backup if it exists.
Args:
progress_callback: f(float), A function to call with the fraction of
completeness.
"""
if os.path.isdir(self.__backup_directory):
file_utils.RmTree(self.__backup_directory)
if progress_callback:
progress_callback(1)
@_RaisesPermissionsError
def ClearTrash(self, progress_callback=None):
"""Deletes the current trash directory if it exists.
Args:
progress_callback: f(float), A function to call with the fraction of
completeness.
"""
if os.path.isdir(self.__trash_directory):
file_utils.RmTree(self.__trash_directory)
if progress_callback:
progress_callback(1)
def _GetInstaller(self, snapshot):
"""Gets a component installer based on the given snapshot.
Args:
snapshot: snapshots.ComponentSnapshot, The snapshot that describes the
component to install.
Returns:
The installers.ComponentInstaller.
"""
return installers.ComponentInstaller(self.__sdk_root,
self._state_directory,
snapshot)
@_RaisesPermissionsError
def Install(self, snapshot, component_id, progress_callback=None):
"""Installs the given component based on the given snapshot.
Args:
snapshot: snapshots.ComponentSnapshot, The snapshot that describes the
component to install.
component_id: str, The component to install from the given snapshot.
progress_callback: f(float), A function to call with the fraction of
completeness.
Raises:
installers.URLFetchError: If the component associated with the provided
component ID has a URL that is not fetched correctly.
"""
files = self._GetInstaller(snapshot).Install(
component_id, progress_callback=progress_callback)
manifest = InstallationManifest(self._state_directory, component_id)
manifest.MarkInstalled(snapshot, files)
@_RaisesPermissionsError
def Uninstall(self, component_id, progress_callback=None):
"""Uninstalls the given component.
Deletes all the files for this component and marks it as no longer being
installed.
Args:
component_id: str, The id of the component to uninstall.
progress_callback: f(float), A function to call with the fraction of
completeness.
"""
manifest = InstallationManifest(self._state_directory, component_id)
paths = manifest.InstalledPaths()
total_paths = float(len(paths))
root = self.__sdk_root
dirs_to_remove = set()
for num, p in enumerate(paths, start=1):
path = os.path.join(root, p)
if os.path.isfile(path) or os.path.islink(path):
os.remove(path)
# Clean up the pyc files that correspond to any py files being removed.
if p.endswith('.py'):
pyc_path = path + 'c'
if os.path.isfile(pyc_path):
os.remove(pyc_path)
dir_path = os.path.dirname(path)
if dir_path:
dirs_to_remove.add(os.path.normpath(dir_path))
elif os.path.isdir(path):
dirs_to_remove.add(os.path.normpath(path))
if progress_callback:
progress_callback(num / total_paths)
# Remove dirs from the bottom up. Subdirs will always have a longer path
# than it's parent.
for d in sorted(dirs_to_remove, key=len, reverse=True):
if os.path.isdir(d) and not os.path.islink(d) and not os.listdir(d):
os.rmdir(d)
manifest.MarkUninstalled()
def CopyMachinePropertiesTo(self, other_state):
"""Copy this state's properties file to another state.
This is primarily intended to be used to maintain the machine properties
file during a schema-change-induced reinstall.
Args:
other_state: InstallationState, The installation state of the fresh
Cloud SDK that needs the properties file mirrored in.
"""
my_properties = os.path.join(
self.sdk_root, config.Paths.CLOUDSDK_PROPERTIES_NAME)
other_properties = os.path.join(
other_state.sdk_root, config.Paths.CLOUDSDK_PROPERTIES_NAME)
if not os.path.exists(my_properties):
return
shutil.copyfile(my_properties, other_properties)
class InstallationManifest(object):
"""Class to encapsulate the data stored in installation manifest files."""
MANIFEST_SUFFIX = '.manifest'
def __init__(self, state_dir, component_id):
"""Creates a new InstallationManifest.
Args:
state_dir: str, The directory path where install state is stored.
component_id: str, The component id that you want to get the manifest for.
"""
self.state_dir = state_dir
self.id = component_id
self.snapshot_file = os.path.join(
self.state_dir,
component_id + InstallationState.COMPONENT_SNAPSHOT_FILE_SUFFIX)
self.manifest_file = os.path.join(
self.state_dir,
component_id + InstallationManifest.MANIFEST_SUFFIX)
def MarkInstalled(self, snapshot, files):
"""Marks this component as installed with the given snapshot and files.
This saves the ComponentSnapshot and writes the installed files to a
manifest so they can be removed later.
Args:
snapshot: snapshots.ComponentSnapshot, The snapshot that was the source
of the install.
files: list of str, The files that were created by the installation.
"""
with open(self.manifest_file, 'w') as fp:
for f in files:
fp.write(f + '\n')
snapshot.WriteToFile(self.snapshot_file)
def MarkUninstalled(self):
"""Marks this component as no longer being installed.
This does not actually uninstall the component, but rather just removes the
snapshot and manifest.
"""
for f in [self.manifest_file, self.snapshot_file]:
if os.path.isfile(f):
os.remove(f)
def ComponentSnapshot(self):
"""Loads the local ComponentSnapshot for this component.
Returns:
The snapshots.ComponentSnapshot for this component.
"""
return snapshots.ComponentSnapshot.FromFile(self.snapshot_file)
def ComponentDefinition(self):
"""Loads the ComponentSnapshot and get the schemas.Component this component.
Returns:
The schemas.Component for this component.
"""
return self.ComponentSnapshot().ComponentFromId(self.id)
def VersionString(self):
"""Gets the version string of this component as it was installed.
Returns:
str, The installed version of this component.
"""
return self.ComponentDefinition().version.version_string
def InstalledPaths(self):
"""Gets the list of files and dirs created by installing this component.
Returns:
list of str, The files and directories installed by this component.
"""
with open(self.manifest_file) as f:
files = [line.rstrip() for line in f]
return files
def InstalledDirectories(self):
"""Gets the set of directories created by installing this component.
Returns:
set(str), The directories installed by this component.
"""
with open(self.manifest_file) as f:
dirs = set()
for line in f:
fixed = line.rstrip()
if fixed.endswith('/'):
dirs.add(fixed)
return dirs
class LastUpdateCheck(object):
"""A class to encapsulate information on when we last checked for updates."""
LAST_UPDATE_CHECK_FILE = 'last_update_check.json'
DATE = 'date'
LAST_NAG_DATE = 'last_nag_date'
REVISION = 'revision'
UPDATES_AVAILABLE = 'updates_available'
def __init__(self, install_state):
self.__install_state = install_state
# pylint: disable=protected-access, These classes work together
self.__last_update_check_file = os.path.join(
install_state._state_directory, LastUpdateCheck.LAST_UPDATE_CHECK_FILE)
self._LoadData()
def _LoadData(self):
"""Deserializes data from the json file."""
self.__dirty = False
if not os.path.isfile(self.__last_update_check_file):
data = {}
else:
with open(self.__last_update_check_file) as fp:
data = json.loads(fp.read())
self.__last_update_check_date = data.get(LastUpdateCheck.DATE, 0)
self.__last_nag_date = data.get(LastUpdateCheck.LAST_NAG_DATE, 0)
self.__last_update_check_revision = data.get(LastUpdateCheck.REVISION, 0)
self.__updates_available = data.get(LastUpdateCheck.UPDATES_AVAILABLE,
False)
def _SaveData(self):
"""Serializes data to the json file."""
if not self.__dirty:
return
data = {LastUpdateCheck.DATE: self.__last_update_check_date,
LastUpdateCheck.LAST_NAG_DATE: self.__last_nag_date,
LastUpdateCheck.REVISION: self.__last_update_check_revision,
LastUpdateCheck.UPDATES_AVAILABLE: self.__updates_available}
with open(self.__last_update_check_file, 'w') as fp:
fp.write(json.dumps(data))
self.__dirty = False
def __enter__(self):
return self
def __exit__(self, *args):
self._SaveData()
def UpdatesAvailable(self):
"""Returns whether we already know about updates that are available.
Returns:
bool, True if we know about updates, False otherwise.
"""
return self.__updates_available
def LastUpdateCheckRevision(self):
"""Gets the revision of the snapshot from the last update check.
Returns:
int, The revision of the last checked snapshot.
"""
return self.__last_update_check_revision
def LastUpdateCheckDate(self):
"""Gets the time of the last update check as seconds since the epoch.
Returns:
int, The time of the last update check.
"""
return self.__last_update_check_date
def LastNagDate(self):
"""Gets the time when the last nag was printed as seconds since the epoch.
Returns:
int, The time of the last nag.
"""
return self.__last_nag_date
def SecondsSinceLastUpdateCheck(self):
"""Gets the number of seconds since we last did an update check.
Returns:
int, The amount of time in seconds.
"""
return time.time() - self.__last_update_check_date
def SecondsSinceLastNag(self):
"""Gets the number of seconds since we last printed that there were updates.
Returns:
int, The amount of time in seconds.
"""
return time.time() - self.__last_nag_date
@_RaisesPermissionsError
def SetFromSnapshot(self, snapshot, force=False):
"""Sets that we just did an update check and found the given snapshot.
If the given snapshot is different that the last one we saw, this will also
diff the new snapshot with the current install state to refresh whether
there are components available for update.
You must call Save() to persist these changes.
Args:
snapshot: snapshots.ComponentSnapshot, The snapshot pulled from the
server.
force: bool, True to force a recalculation of whether there are available
updates, even if the snapshot revision has not changed.
Returns:
bool, True if there are now components to update, False otherwise.
"""
if force or self.__last_update_check_revision != snapshot.revision:
diff = self.__install_state.DiffCurrentState(snapshot)
self.__updates_available = bool(diff.AvailableUpdates())
self.__last_update_check_revision = snapshot.revision
self.__last_update_check_date = time.time()
self.__dirty = True
return self.__updates_available
def SetFromIncompatibleSchema(self):
"""Sets that we just did an update check and found a new schema version.
You must call Save() to persist these changes.
"""
self.__updates_available = True
self.__last_update_check_revision = 0 # Doesn't matter
self.__last_update_check_date = time.time()
self.__dirty = True
def SetNagged(self):
"""Sets that we printed the update nag."""
self.__last_nag_date = time.time()
self.__dirty = True
def Save(self):
"""Saves the changes we made to this object."""
self._SaveData()
| bsd-3-clause |
supriyantomaftuh/django | tests/template_tests/filter_tests/test_truncatechars_html.py | 390 | 1229 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.template.defaultfilters import truncatechars_html
from django.test import SimpleTestCase
class FunctionTests(SimpleTestCase):
def test_truncate_zero(self):
self.assertEqual(truncatechars_html('<p>one <a href="#">two - three <br>four</a> five</p>', 0), '...')
def test_truncate(self):
self.assertEqual(
truncatechars_html('<p>one <a href="#">two - three <br>four</a> five</p>', 6),
'<p>one...</p>',
)
def test_truncate2(self):
self.assertEqual(
truncatechars_html('<p>one <a href="#">two - three <br>four</a> five</p>', 11),
'<p>one <a href="#">two ...</a></p>',
)
def test_truncate3(self):
self.assertEqual(
truncatechars_html('<p>one <a href="#">two - three <br>four</a> five</p>', 100),
'<p>one <a href="#">two - three <br>four</a> five</p>',
)
def test_truncate_unicode(self):
self.assertEqual(truncatechars_html('<b>\xc5ngstr\xf6m</b> was here', 5), '<b>\xc5n...</b>')
def test_truncate_something(self):
self.assertEqual(truncatechars_html('a<b>b</b>c', 3), 'a<b>b</b>c')
| bsd-3-clause |
psykidellic/appengine-flask-skeleton | lib/pyasn1_modules/rfc2560.py | 127 | 7821 | #
# OCSP request/response syntax
#
# Derived from a minimal OCSP library (RFC2560) code written by
# Bud P. Bruegger <[email protected]>
# Copyright: Ancitel, S.p.a, Rome, Italy
# License: BSD
#
#
# current limitations:
# * request and response works only for a single certificate
# * only some values are parsed out of the response
# * the request does't set a nonce nor signature
# * there is no signature validation of the response
# * dates are left as strings in GeneralizedTime format -- datetime.datetime
# would be nicer
#
from pyasn1.type import tag, namedtype, namedval, univ, constraint, useful
from pyasn1_modules import rfc2459
# Start of OCSP module definitions
# This should be in directory Authentication Framework (X.509) module
class CRLReason(univ.Enumerated):
namedValues = namedval.NamedValues(
('unspecified', 0),
('keyCompromise', 1),
('cACompromise', 2),
('affiliationChanged', 3),
('superseded', 4),
('cessationOfOperation', 5),
('certificateHold', 6),
('removeFromCRL', 8),
('privilegeWithdrawn', 9),
('aACompromise', 10)
)
# end of directory Authentication Framework (X.509) module
# This should be in PKIX Certificate Extensions module
class GeneralName(univ.OctetString): pass
# end of PKIX Certificate Extensions module
id_kp_OCSPSigning = univ.ObjectIdentifier((1, 3, 6, 1, 5, 5, 7, 3, 9))
id_pkix_ocsp = univ.ObjectIdentifier((1, 3, 6, 1, 5, 5, 7, 48, 1))
id_pkix_ocsp_basic = univ.ObjectIdentifier((1, 3, 6, 1, 5, 5, 7, 48, 1, 1))
id_pkix_ocsp_nonce = univ.ObjectIdentifier((1, 3, 6, 1, 5, 5, 7, 48, 1, 2))
id_pkix_ocsp_crl = univ.ObjectIdentifier((1, 3, 6, 1, 5, 5, 7, 48, 1, 3))
id_pkix_ocsp_response = univ.ObjectIdentifier((1, 3, 6, 1, 5, 5, 7, 48, 1, 4))
id_pkix_ocsp_nocheck = univ.ObjectIdentifier((1, 3, 6, 1, 5, 5, 7, 48, 1, 5))
id_pkix_ocsp_archive_cutoff = univ.ObjectIdentifier((1, 3, 6, 1, 5, 5, 7, 48, 1, 6))
id_pkix_ocsp_service_locator = univ.ObjectIdentifier((1, 3, 6, 1, 5, 5, 7, 48, 1, 7))
class AcceptableResponses(univ.SequenceOf):
componentType = univ.ObjectIdentifier()
class ArchiveCutoff(useful.GeneralizedTime): pass
class UnknownInfo(univ.Null): pass
class RevokedInfo(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('revocationTime', useful.GeneralizedTime()),
namedtype.OptionalNamedType('revocationReason', CRLReason().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
)
class CertID(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('hashAlgorithm', rfc2459.AlgorithmIdentifier()),
namedtype.NamedType('issuerNameHash', univ.OctetString()),
namedtype.NamedType('issuerKeyHash', univ.OctetString()),
namedtype.NamedType('serialNumber', rfc2459.CertificateSerialNumber())
)
class CertStatus(univ.Choice):
componentType = namedtype.NamedTypes(
namedtype.NamedType('good', univ.Null().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.NamedType('revoked', RevokedInfo().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
namedtype.NamedType('unknown', UnknownInfo().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)))
)
class SingleResponse(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('certID', CertID()),
namedtype.NamedType('certStatus', CertStatus()),
namedtype.NamedType('thisUpdate', useful.GeneralizedTime()),
namedtype.OptionalNamedType('nextUpdate', useful.GeneralizedTime().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.OptionalNamedType('singleExtensions', rfc2459.Extensions().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
)
class KeyHash(univ.OctetString): pass
class ResponderID(univ.Choice):
componentType = namedtype.NamedTypes(
namedtype.NamedType('byName', rfc2459.Name().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
namedtype.NamedType('byKey', KeyHash().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)))
)
class Version(univ.Integer):
namedValues = namedval.NamedValues(('v1', 0))
class ResponseData(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.DefaultedNamedType('version', Version('v1').subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.NamedType('responderID', ResponderID()),
namedtype.NamedType('producedAt', useful.GeneralizedTime()),
namedtype.NamedType('responses', univ.SequenceOf(SingleResponse())),
namedtype.OptionalNamedType('responseExtensions', rfc2459.Extensions().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
)
class BasicOCSPResponse(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('tbsResponseData', ResponseData()),
namedtype.NamedType('signatureAlgorithm', rfc2459.AlgorithmIdentifier()),
namedtype.NamedType('signature', univ.BitString()),
namedtype.OptionalNamedType('certs', univ.SequenceOf(rfc2459.Certificate()).subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
)
class ResponseBytes(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('responseType', univ.ObjectIdentifier()),
namedtype.NamedType('response', univ.OctetString())
)
class OCSPResponseStatus(univ.Enumerated):
namedValues = namedval.NamedValues(
('successful', 0),
('malformedRequest', 1),
('internalError', 2),
('tryLater', 3),
('undefinedStatus', 4), # should never occur
('sigRequired', 5),
('unauthorized', 6)
)
class OCSPResponse(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('responseStatus', OCSPResponseStatus()),
namedtype.OptionalNamedType('responseBytes', ResponseBytes().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
)
class Request(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('reqCert', CertID()),
namedtype.OptionalNamedType('singleRequestExtensions', rfc2459.Extensions().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
)
class Signature(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('signatureAlgorithm', rfc2459.AlgorithmIdentifier()),
namedtype.NamedType('signature', univ.BitString()),
namedtype.OptionalNamedType('certs', univ.SequenceOf(rfc2459.Certificate()).subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
)
class TBSRequest(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.DefaultedNamedType('version', Version('v1').subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.OptionalNamedType('requestorName', GeneralName().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
namedtype.NamedType('requestList', univ.SequenceOf(Request())),
namedtype.OptionalNamedType('requestExtensions', rfc2459.Extensions().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)))
)
class OCSPRequest(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('tbsRequest', TBSRequest()),
namedtype.OptionalNamedType('optionalSignature', Signature().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
)
| apache-2.0 |
playm2mboy/edx-platform | lms/djangoapps/shoppingcart/migrations/0027_add_invoice_history.py | 102 | 22387 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'InvoiceHistory'
db.create_table('shoppingcart_invoicehistory', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('timestamp', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, db_index=True, blank=True)),
('invoice', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['shoppingcart.Invoice'])),
('snapshot', self.gf('django.db.models.fields.TextField')(blank=True)),
))
db.send_create_signal('shoppingcart', ['InvoiceHistory'])
def backwards(self, orm):
# Deleting model 'InvoiceHistory'
db.delete_table('shoppingcart_invoicehistory')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'shoppingcart.certificateitem': {
'Meta': {'object_name': 'CertificateItem', '_ormbases': ['shoppingcart.OrderItem']},
'course_enrollment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['student.CourseEnrollment']"}),
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '128', 'db_index': 'True'}),
'mode': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'orderitem_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['shoppingcart.OrderItem']", 'unique': 'True', 'primary_key': 'True'})
},
'shoppingcart.coupon': {
'Meta': {'object_name': 'Coupon'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2015, 2, 8, 0, 0)'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'expiration_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'percentage_discount': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'shoppingcart.couponredemption': {
'Meta': {'object_name': 'CouponRedemption'},
'coupon': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shoppingcart.Coupon']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shoppingcart.Order']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'shoppingcart.courseregcodeitem': {
'Meta': {'object_name': 'CourseRegCodeItem', '_ormbases': ['shoppingcart.OrderItem']},
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '128', 'db_index': 'True'}),
'mode': ('django.db.models.fields.SlugField', [], {'default': "'honor'", 'max_length': '50'}),
'orderitem_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['shoppingcart.OrderItem']", 'unique': 'True', 'primary_key': 'True'})
},
'shoppingcart.courseregcodeitemannotation': {
'Meta': {'object_name': 'CourseRegCodeItemAnnotation'},
'annotation': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'course_id': ('xmodule_django.models.CourseKeyField', [], {'unique': 'True', 'max_length': '128', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'shoppingcart.courseregistrationcode': {
'Meta': {'object_name': 'CourseRegistrationCode'},
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2015, 2, 8, 0, 0)'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_by_user'", 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invoice': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shoppingcart.Invoice']", 'null': 'True'}),
'invoice_item': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shoppingcart.CourseRegistrationCodeInvoiceItem']", 'null': 'True'}),
'mode_slug': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'purchase_order'", 'null': 'True', 'to': "orm['shoppingcart.Order']"})
},
'shoppingcart.courseregistrationcodeinvoiceitem': {
'Meta': {'object_name': 'CourseRegistrationCodeInvoiceItem', '_ormbases': ['shoppingcart.InvoiceItem']},
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '128', 'db_index': 'True'}),
'invoiceitem_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['shoppingcart.InvoiceItem']", 'unique': 'True', 'primary_key': 'True'})
},
'shoppingcart.donation': {
'Meta': {'object_name': 'Donation', '_ormbases': ['shoppingcart.OrderItem']},
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'donation_type': ('django.db.models.fields.CharField', [], {'default': "'general'", 'max_length': '32'}),
'orderitem_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['shoppingcart.OrderItem']", 'unique': 'True', 'primary_key': 'True'})
},
'shoppingcart.donationconfiguration': {
'Meta': {'object_name': 'DonationConfiguration'},
'change_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'changed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'shoppingcart.invoice': {
'Meta': {'object_name': 'Invoice'},
'address_line_1': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'address_line_2': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'address_line_3': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'company_contact_email': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'company_contact_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'company_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'created': ('model_utils.fields.AutoCreatedField', [], {'default': 'datetime.datetime.now'}),
'customer_reference_number': ('django.db.models.fields.CharField', [], {'max_length': '63', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'internal_reference': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'is_valid': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'modified': ('model_utils.fields.AutoLastModifiedField', [], {'default': 'datetime.datetime.now'}),
'recipient_email': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'recipient_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'total_amount': ('django.db.models.fields.FloatField', [], {}),
'zip': ('django.db.models.fields.CharField', [], {'max_length': '15', 'null': 'True'})
},
'shoppingcart.invoicehistory': {
'Meta': {'object_name': 'InvoiceHistory'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invoice': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shoppingcart.Invoice']"}),
'snapshot': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'})
},
'shoppingcart.invoiceitem': {
'Meta': {'object_name': 'InvoiceItem'},
'created': ('model_utils.fields.AutoCreatedField', [], {'default': 'datetime.datetime.now'}),
'currency': ('django.db.models.fields.CharField', [], {'default': "'usd'", 'max_length': '8'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invoice': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shoppingcart.Invoice']"}),
'modified': ('model_utils.fields.AutoLastModifiedField', [], {'default': 'datetime.datetime.now'}),
'qty': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'unit_price': ('django.db.models.fields.DecimalField', [], {'default': '0.0', 'max_digits': '30', 'decimal_places': '2'})
},
'shoppingcart.invoicetransaction': {
'Meta': {'object_name': 'InvoiceTransaction'},
'amount': ('django.db.models.fields.DecimalField', [], {'default': '0.0', 'max_digits': '30', 'decimal_places': '2'}),
'comments': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'created': ('model_utils.fields.AutoCreatedField', [], {'default': 'datetime.datetime.now'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'currency': ('django.db.models.fields.CharField', [], {'default': "'usd'", 'max_length': '8'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invoice': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shoppingcart.Invoice']"}),
'last_modified_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'last_modified_by_user'", 'to': "orm['auth.User']"}),
'modified': ('model_utils.fields.AutoLastModifiedField', [], {'default': 'datetime.datetime.now'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'started'", 'max_length': '32'})
},
'shoppingcart.order': {
'Meta': {'object_name': 'Order'},
'bill_to_cardtype': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'bill_to_ccnum': ('django.db.models.fields.CharField', [], {'max_length': '8', 'blank': 'True'}),
'bill_to_city': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'bill_to_country': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'bill_to_first': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'bill_to_last': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'bill_to_postalcode': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'bill_to_state': ('django.db.models.fields.CharField', [], {'max_length': '8', 'blank': 'True'}),
'bill_to_street1': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'bill_to_street2': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'company_contact_email': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'company_contact_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'company_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'currency': ('django.db.models.fields.CharField', [], {'default': "'usd'", 'max_length': '8'}),
'customer_reference_number': ('django.db.models.fields.CharField', [], {'max_length': '63', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order_type': ('django.db.models.fields.CharField', [], {'default': "'personal'", 'max_length': '32'}),
'processor_reply_dump': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'purchase_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'recipient_email': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'recipient_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'refunded_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'cart'", 'max_length': '32'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'shoppingcart.orderitem': {
'Meta': {'object_name': 'OrderItem'},
'created': ('model_utils.fields.AutoCreatedField', [], {'default': 'datetime.datetime.now'}),
'currency': ('django.db.models.fields.CharField', [], {'default': "'usd'", 'max_length': '8'}),
'fulfilled_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'line_desc': ('django.db.models.fields.CharField', [], {'default': "'Misc. Item'", 'max_length': '1024'}),
'list_price': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '30', 'decimal_places': '2'}),
'modified': ('model_utils.fields.AutoLastModifiedField', [], {'default': 'datetime.datetime.now'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shoppingcart.Order']"}),
'qty': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'refund_requested_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'report_comments': ('django.db.models.fields.TextField', [], {'default': "''"}),
'service_fee': ('django.db.models.fields.DecimalField', [], {'default': '0.0', 'max_digits': '30', 'decimal_places': '2'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'cart'", 'max_length': '32', 'db_index': 'True'}),
'unit_cost': ('django.db.models.fields.DecimalField', [], {'default': '0.0', 'max_digits': '30', 'decimal_places': '2'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'shoppingcart.paidcourseregistration': {
'Meta': {'object_name': 'PaidCourseRegistration', '_ormbases': ['shoppingcart.OrderItem']},
'course_enrollment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['student.CourseEnrollment']", 'null': 'True'}),
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '128', 'db_index': 'True'}),
'mode': ('django.db.models.fields.SlugField', [], {'default': "'honor'", 'max_length': '50'}),
'orderitem_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['shoppingcart.OrderItem']", 'unique': 'True', 'primary_key': 'True'})
},
'shoppingcart.paidcourseregistrationannotation': {
'Meta': {'object_name': 'PaidCourseRegistrationAnnotation'},
'annotation': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'course_id': ('xmodule_django.models.CourseKeyField', [], {'unique': 'True', 'max_length': '128', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'shoppingcart.registrationcoderedemption': {
'Meta': {'object_name': 'RegistrationCodeRedemption'},
'course_enrollment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['student.CourseEnrollment']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shoppingcart.Order']", 'null': 'True'}),
'redeemed_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2015, 2, 8, 0, 0)', 'null': 'True'}),
'redeemed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'registration_code': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shoppingcart.CourseRegistrationCode']"})
},
'student.courseenrollment': {
'Meta': {'ordering': "('user', 'course_id')", 'unique_together': "(('user', 'course_id'),)", 'object_name': 'CourseEnrollment'},
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'mode': ('django.db.models.fields.CharField', [], {'default': "'honor'", 'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
}
}
complete_apps = ['shoppingcart']
| agpl-3.0 |
IronLanguages/ironpython3 | Src/StdLib/Lib/csv.py | 90 | 16185 |
"""
csv.py - read/write/investigate CSV files
"""
import re
from _csv import Error, __version__, writer, reader, register_dialect, \
unregister_dialect, get_dialect, list_dialects, \
field_size_limit, \
QUOTE_MINIMAL, QUOTE_ALL, QUOTE_NONNUMERIC, QUOTE_NONE, \
__doc__
from _csv import Dialect as _Dialect
from io import StringIO
__all__ = [ "QUOTE_MINIMAL", "QUOTE_ALL", "QUOTE_NONNUMERIC", "QUOTE_NONE",
"Error", "Dialect", "__doc__", "excel", "excel_tab",
"field_size_limit", "reader", "writer",
"register_dialect", "get_dialect", "list_dialects", "Sniffer",
"unregister_dialect", "__version__", "DictReader", "DictWriter" ]
class Dialect:
"""Describe a CSV dialect.
This must be subclassed (see csv.excel). Valid attributes are:
delimiter, quotechar, escapechar, doublequote, skipinitialspace,
lineterminator, quoting.
"""
_name = ""
_valid = False
# placeholders
delimiter = None
quotechar = None
escapechar = None
doublequote = None
skipinitialspace = None
lineterminator = None
quoting = None
def __init__(self):
if self.__class__ != Dialect:
self._valid = True
self._validate()
def _validate(self):
try:
_Dialect(self)
except TypeError as e:
# We do this for compatibility with py2.3
raise Error(str(e))
class excel(Dialect):
"""Describe the usual properties of Excel-generated CSV files."""
delimiter = ','
quotechar = '"'
doublequote = True
skipinitialspace = False
lineterminator = '\r\n'
quoting = QUOTE_MINIMAL
register_dialect("excel", excel)
class excel_tab(excel):
"""Describe the usual properties of Excel-generated TAB-delimited files."""
delimiter = '\t'
register_dialect("excel-tab", excel_tab)
class unix_dialect(Dialect):
"""Describe the usual properties of Unix-generated CSV files."""
delimiter = ','
quotechar = '"'
doublequote = True
skipinitialspace = False
lineterminator = '\n'
quoting = QUOTE_ALL
register_dialect("unix", unix_dialect)
class DictReader:
def __init__(self, f, fieldnames=None, restkey=None, restval=None,
dialect="excel", *args, **kwds):
self._fieldnames = fieldnames # list of keys for the dict
self.restkey = restkey # key to catch long rows
self.restval = restval # default value for short rows
self.reader = reader(f, dialect, *args, **kwds)
self.dialect = dialect
self.line_num = 0
def __iter__(self):
return self
@property
def fieldnames(self):
if self._fieldnames is None:
try:
self._fieldnames = next(self.reader)
except StopIteration:
pass
self.line_num = self.reader.line_num
return self._fieldnames
@fieldnames.setter
def fieldnames(self, value):
self._fieldnames = value
def __next__(self):
if self.line_num == 0:
# Used only for its side effect.
self.fieldnames
row = next(self.reader)
self.line_num = self.reader.line_num
# unlike the basic reader, we prefer not to return blanks,
# because we will typically wind up with a dict full of None
# values
while row == []:
row = next(self.reader)
d = dict(zip(self.fieldnames, row))
lf = len(self.fieldnames)
lr = len(row)
if lf < lr:
d[self.restkey] = row[lf:]
elif lf > lr:
for key in self.fieldnames[lr:]:
d[key] = self.restval
return d
class DictWriter:
def __init__(self, f, fieldnames, restval="", extrasaction="raise",
dialect="excel", *args, **kwds):
self.fieldnames = fieldnames # list of keys for the dict
self.restval = restval # for writing short dicts
if extrasaction.lower() not in ("raise", "ignore"):
raise ValueError("extrasaction (%s) must be 'raise' or 'ignore'"
% extrasaction)
self.extrasaction = extrasaction
self.writer = writer(f, dialect, *args, **kwds)
def writeheader(self):
header = dict(zip(self.fieldnames, self.fieldnames))
self.writerow(header)
def _dict_to_list(self, rowdict):
if self.extrasaction == "raise":
wrong_fields = [k for k in rowdict if k not in self.fieldnames]
if wrong_fields:
raise ValueError("dict contains fields not in fieldnames: "
+ ", ".join([repr(x) for x in wrong_fields]))
return [rowdict.get(key, self.restval) for key in self.fieldnames]
def writerow(self, rowdict):
return self.writer.writerow(self._dict_to_list(rowdict))
def writerows(self, rowdicts):
rows = []
for rowdict in rowdicts:
rows.append(self._dict_to_list(rowdict))
return self.writer.writerows(rows)
# Guard Sniffer's type checking against builds that exclude complex()
try:
complex
except NameError:
complex = float
class Sniffer:
'''
"Sniffs" the format of a CSV file (i.e. delimiter, quotechar)
Returns a Dialect object.
'''
def __init__(self):
# in case there is more than one possible delimiter
self.preferred = [',', '\t', ';', ' ', ':']
def sniff(self, sample, delimiters=None):
"""
Returns a dialect (or None) corresponding to the sample
"""
quotechar, doublequote, delimiter, skipinitialspace = \
self._guess_quote_and_delimiter(sample, delimiters)
if not delimiter:
delimiter, skipinitialspace = self._guess_delimiter(sample,
delimiters)
if not delimiter:
raise Error("Could not determine delimiter")
class dialect(Dialect):
_name = "sniffed"
lineterminator = '\r\n'
quoting = QUOTE_MINIMAL
# escapechar = ''
dialect.doublequote = doublequote
dialect.delimiter = delimiter
# _csv.reader won't accept a quotechar of ''
dialect.quotechar = quotechar or '"'
dialect.skipinitialspace = skipinitialspace
return dialect
def _guess_quote_and_delimiter(self, data, delimiters):
"""
Looks for text enclosed between two identical quotes
(the probable quotechar) which are preceded and followed
by the same character (the probable delimiter).
For example:
,'some text',
The quote with the most wins, same with the delimiter.
If there is no quotechar the delimiter can't be determined
this way.
"""
matches = []
for restr in ('(?P<delim>[^\w\n"\'])(?P<space> ?)(?P<quote>["\']).*?(?P=quote)(?P=delim)', # ,".*?",
'(?:^|\n)(?P<quote>["\']).*?(?P=quote)(?P<delim>[^\w\n"\'])(?P<space> ?)', # ".*?",
'(?P<delim>>[^\w\n"\'])(?P<space> ?)(?P<quote>["\']).*?(?P=quote)(?:$|\n)', # ,".*?"
'(?:^|\n)(?P<quote>["\']).*?(?P=quote)(?:$|\n)'): # ".*?" (no delim, no space)
regexp = re.compile(restr, re.DOTALL | re.MULTILINE)
matches = regexp.findall(data)
if matches:
break
if not matches:
# (quotechar, doublequote, delimiter, skipinitialspace)
return ('', False, None, 0)
quotes = {}
delims = {}
spaces = 0
for m in matches:
n = regexp.groupindex['quote'] - 1
key = m[n]
if key:
quotes[key] = quotes.get(key, 0) + 1
try:
n = regexp.groupindex['delim'] - 1
key = m[n]
except KeyError:
continue
if key and (delimiters is None or key in delimiters):
delims[key] = delims.get(key, 0) + 1
try:
n = regexp.groupindex['space'] - 1
except KeyError:
continue
if m[n]:
spaces += 1
quotechar = max(quotes, key=quotes.get)
if delims:
delim = max(delims, key=delims.get)
skipinitialspace = delims[delim] == spaces
if delim == '\n': # most likely a file with a single column
delim = ''
else:
# there is *no* delimiter, it's a single column of quoted data
delim = ''
skipinitialspace = 0
# if we see an extra quote between delimiters, we've got a
# double quoted format
dq_regexp = re.compile(
r"((%(delim)s)|^)\W*%(quote)s[^%(delim)s\n]*%(quote)s[^%(delim)s\n]*%(quote)s\W*((%(delim)s)|$)" % \
{'delim':re.escape(delim), 'quote':quotechar}, re.MULTILINE)
if dq_regexp.search(data):
doublequote = True
else:
doublequote = False
return (quotechar, doublequote, delim, skipinitialspace)
def _guess_delimiter(self, data, delimiters):
"""
The delimiter /should/ occur the same number of times on
each row. However, due to malformed data, it may not. We don't want
an all or nothing approach, so we allow for small variations in this
number.
1) build a table of the frequency of each character on every line.
2) build a table of frequencies of this frequency (meta-frequency?),
e.g. 'x occurred 5 times in 10 rows, 6 times in 1000 rows,
7 times in 2 rows'
3) use the mode of the meta-frequency to determine the /expected/
frequency for that character
4) find out how often the character actually meets that goal
5) the character that best meets its goal is the delimiter
For performance reasons, the data is evaluated in chunks, so it can
try and evaluate the smallest portion of the data possible, evaluating
additional chunks as necessary.
"""
data = list(filter(None, data.split('\n')))
ascii = [chr(c) for c in range(127)] # 7-bit ASCII
# build frequency tables
chunkLength = min(10, len(data))
iteration = 0
charFrequency = {}
modes = {}
delims = {}
start, end = 0, min(chunkLength, len(data))
while start < len(data):
iteration += 1
for line in data[start:end]:
for char in ascii:
metaFrequency = charFrequency.get(char, {})
# must count even if frequency is 0
freq = line.count(char)
# value is the mode
metaFrequency[freq] = metaFrequency.get(freq, 0) + 1
charFrequency[char] = metaFrequency
for char in charFrequency.keys():
items = list(charFrequency[char].items())
if len(items) == 1 and items[0][0] == 0:
continue
# get the mode of the frequencies
if len(items) > 1:
modes[char] = max(items, key=lambda x: x[1])
# adjust the mode - subtract the sum of all
# other frequencies
items.remove(modes[char])
modes[char] = (modes[char][0], modes[char][1]
- sum(item[1] for item in items))
else:
modes[char] = items[0]
# build a list of possible delimiters
modeList = modes.items()
total = float(chunkLength * iteration)
# (rows of consistent data) / (number of rows) = 100%
consistency = 1.0
# minimum consistency threshold
threshold = 0.9
while len(delims) == 0 and consistency >= threshold:
for k, v in modeList:
if v[0] > 0 and v[1] > 0:
if ((v[1]/total) >= consistency and
(delimiters is None or k in delimiters)):
delims[k] = v
consistency -= 0.01
if len(delims) == 1:
delim = list(delims.keys())[0]
skipinitialspace = (data[0].count(delim) ==
data[0].count("%c " % delim))
return (delim, skipinitialspace)
# analyze another chunkLength lines
start = end
end += chunkLength
if not delims:
return ('', 0)
# if there's more than one, fall back to a 'preferred' list
if len(delims) > 1:
for d in self.preferred:
if d in delims.keys():
skipinitialspace = (data[0].count(d) ==
data[0].count("%c " % d))
return (d, skipinitialspace)
# nothing else indicates a preference, pick the character that
# dominates(?)
items = [(v,k) for (k,v) in delims.items()]
items.sort()
delim = items[-1][1]
skipinitialspace = (data[0].count(delim) ==
data[0].count("%c " % delim))
return (delim, skipinitialspace)
def has_header(self, sample):
# Creates a dictionary of types of data in each column. If any
# column is of a single type (say, integers), *except* for the first
# row, then the first row is presumed to be labels. If the type
# can't be determined, it is assumed to be a string in which case
# the length of the string is the determining factor: if all of the
# rows except for the first are the same length, it's a header.
# Finally, a 'vote' is taken at the end for each column, adding or
# subtracting from the likelihood of the first row being a header.
rdr = reader(StringIO(sample), self.sniff(sample))
header = next(rdr) # assume first row is header
columns = len(header)
columnTypes = {}
for i in range(columns): columnTypes[i] = None
checked = 0
for row in rdr:
# arbitrary number of rows to check, to keep it sane
if checked > 20:
break
checked += 1
if len(row) != columns:
continue # skip rows that have irregular number of columns
for col in list(columnTypes.keys()):
for thisType in [int, float, complex]:
try:
thisType(row[col])
break
except (ValueError, OverflowError):
pass
else:
# fallback to length of string
thisType = len(row[col])
if thisType != columnTypes[col]:
if columnTypes[col] is None: # add new column type
columnTypes[col] = thisType
else:
# type is inconsistent, remove column from
# consideration
del columnTypes[col]
# finally, compare results against first row and "vote"
# on whether it's a header
hasHeader = 0
for col, colType in columnTypes.items():
if type(colType) == type(0): # it's a length
if len(header[col]) != colType:
hasHeader += 1
else:
hasHeader -= 1
else: # attempt typecast
try:
colType(header[col])
except (ValueError, TypeError):
hasHeader += 1
else:
hasHeader -= 1
return hasHeader > 0
| apache-2.0 |
heisewangluo/micropython | tools/make-frozen.py | 34 | 1275 | #!/usr/bin/env python
#
# Create frozen modules structure for MicroPython.
#
# Usage:
#
# Have a directory with modules to be frozen (only modules, not packages
# supported so far):
#
# frozen/foo.py
# frozen/bar.py
#
# Run script, passing path to the directory above:
#
# ./make-frozen.py frozen > frozen.c
#
# Include frozen.c in your build, having defined MICROPY_MODULE_FROZEN in
# config.
#
from __future__ import print_function
import sys
import os
def module_name(f):
return f[:-len(".py")]
modules = []
root = sys.argv[1].rstrip("/")
root_len = len(root)
for dirpath, dirnames, filenames in os.walk(root):
for f in filenames:
fullpath = dirpath + "/" + f
st = os.stat(fullpath)
modules.append((fullpath[root_len + 1:], st))
print("#include <stdint.h>")
print("const uint16_t mp_frozen_sizes[] = {")
for f, st in modules:
print("%d," % st.st_size)
print("0};")
print("const char mp_frozen_content[] = {")
for f, st in modules:
m = module_name(f)
print('"%s\\0"' % m)
data = open(sys.argv[1] + "/" + f, "rb").read()
# Python2 vs Python3 tricks
data = repr(data)
if data[0] == "b":
data = data[1:]
data = data[1:-1]
data = data.replace('"', '\\"')
print('"%s"' % data)
print("};")
| mit |
harshaneelhg/scikit-learn | sklearn/naive_bayes.py | 128 | 28358 | # -*- coding: utf-8 -*-
"""
The :mod:`sklearn.naive_bayes` module implements Naive Bayes algorithms. These
are supervised learning methods based on applying Bayes' theorem with strong
(naive) feature independence assumptions.
"""
# Author: Vincent Michel <[email protected]>
# Minor fixes by Fabian Pedregosa
# Amit Aides <[email protected]>
# Yehuda Finkelstein <[email protected]>
# Lars Buitinck <[email protected]>
# Jan Hendrik Metzen <[email protected]>
# (parts based on earlier work by Mathieu Blondel)
#
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import issparse
from .base import BaseEstimator, ClassifierMixin
from .preprocessing import binarize
from .preprocessing import LabelBinarizer
from .preprocessing import label_binarize
from .utils import check_X_y, check_array
from .utils.extmath import safe_sparse_dot, logsumexp
from .utils.multiclass import _check_partial_fit_first_call
from .utils.fixes import in1d
from .utils.validation import check_is_fitted
from .externals import six
__all__ = ['BernoulliNB', 'GaussianNB', 'MultinomialNB']
class BaseNB(six.with_metaclass(ABCMeta, BaseEstimator, ClassifierMixin)):
"""Abstract base class for naive Bayes estimators"""
@abstractmethod
def _joint_log_likelihood(self, X):
"""Compute the unnormalized posterior log probability of X
I.e. ``log P(c) + log P(x|c)`` for all rows x of X, as an array-like of
shape [n_classes, n_samples].
Input is passed to _joint_log_likelihood as-is by predict,
predict_proba and predict_log_proba.
"""
def predict(self, X):
"""
Perform classification on an array of test vectors X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
Predicted target values for X
"""
jll = self._joint_log_likelihood(X)
return self.classes_[np.argmax(jll, axis=1)]
def predict_log_proba(self, X):
"""
Return log-probability estimates for the test vector X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array-like, shape = [n_samples, n_classes]
Returns the log-probability of the samples for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute `classes_`.
"""
jll = self._joint_log_likelihood(X)
# normalize by P(x) = P(f_1, ..., f_n)
log_prob_x = logsumexp(jll, axis=1)
return jll - np.atleast_2d(log_prob_x).T
def predict_proba(self, X):
"""
Return probability estimates for the test vector X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array-like, shape = [n_samples, n_classes]
Returns the probability of the samples for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute `classes_`.
"""
return np.exp(self.predict_log_proba(X))
class GaussianNB(BaseNB):
"""
Gaussian Naive Bayes (GaussianNB)
Can perform online updates to model parameters via `partial_fit` method.
For details on algorithm used to update feature means and variance online,
see Stanford CS tech report STAN-CS-79-773 by Chan, Golub, and LeVeque:
http://i.stanford.edu/pub/cstr/reports/cs/tr/79/773/CS-TR-79-773.pdf
Read more in the :ref:`User Guide <gaussian_naive_bayes>`.
Attributes
----------
class_prior_ : array, shape (n_classes,)
probability of each class.
class_count_ : array, shape (n_classes,)
number of training samples observed in each class.
theta_ : array, shape (n_classes, n_features)
mean of each feature per class
sigma_ : array, shape (n_classes, n_features)
variance of each feature per class
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> Y = np.array([1, 1, 1, 2, 2, 2])
>>> from sklearn.naive_bayes import GaussianNB
>>> clf = GaussianNB()
>>> clf.fit(X, Y)
GaussianNB()
>>> print(clf.predict([[-0.8, -1]]))
[1]
>>> clf_pf = GaussianNB()
>>> clf_pf.partial_fit(X, Y, np.unique(Y))
GaussianNB()
>>> print(clf_pf.predict([[-0.8, -1]]))
[1]
"""
def fit(self, X, y, sample_weight=None):
"""Fit Gaussian Naive Bayes according to X, y
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns self.
"""
X, y = check_X_y(X, y)
return self._partial_fit(X, y, np.unique(y), _refit=True,
sample_weight=sample_weight)
@staticmethod
def _update_mean_variance(n_past, mu, var, X, sample_weight=None):
"""Compute online update of Gaussian mean and variance.
Given starting sample count, mean, and variance, a new set of
points X, and optionally sample weights, return the updated mean and
variance. (NB - each dimension (column) in X is treated as independent
-- you get variance, not covariance).
Can take scalar mean and variance, or vector mean and variance to
simultaneously update a number of independent Gaussians.
See Stanford CS tech report STAN-CS-79-773 by Chan, Golub, and LeVeque:
http://i.stanford.edu/pub/cstr/reports/cs/tr/79/773/CS-TR-79-773.pdf
Parameters
----------
n_past : int
Number of samples represented in old mean and variance. If sample
weights were given, this should contain the sum of sample
weights represented in old mean and variance.
mu : array-like, shape (number of Gaussians,)
Means for Gaussians in original set.
var : array-like, shape (number of Gaussians,)
Variances for Gaussians in original set.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
total_mu : array-like, shape (number of Gaussians,)
Updated mean for each Gaussian over the combined set.
total_var : array-like, shape (number of Gaussians,)
Updated variance for each Gaussian over the combined set.
"""
if X.shape[0] == 0:
return mu, var
# Compute (potentially weighted) mean and variance of new datapoints
if sample_weight is not None:
n_new = float(sample_weight.sum())
new_mu = np.average(X, axis=0, weights=sample_weight / n_new)
new_var = np.average((X - new_mu) ** 2, axis=0,
weights=sample_weight / n_new)
else:
n_new = X.shape[0]
new_var = np.var(X, axis=0)
new_mu = np.mean(X, axis=0)
if n_past == 0:
return new_mu, new_var
n_total = float(n_past + n_new)
# Combine mean of old and new data, taking into consideration
# (weighted) number of observations
total_mu = (n_new * new_mu + n_past * mu) / n_total
# Combine variance of old and new data, taking into consideration
# (weighted) number of observations. This is achieved by combining
# the sum-of-squared-differences (ssd)
old_ssd = n_past * var
new_ssd = n_new * new_var
total_ssd = (old_ssd + new_ssd +
(n_past / float(n_new * n_total)) *
(n_new * mu - n_new * new_mu) ** 2)
total_var = total_ssd / n_total
return total_mu, total_var
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different chunks of a dataset so as to implement out-of-core
or online learning.
This is especially useful when the whole dataset is too big to fit in
memory at once.
This method has some performance and numerical stability overhead,
hence it is better to call partial_fit on chunks of data that are
as large as possible (as long as fitting in the memory budget) to
hide the overhead.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
classes : array-like, shape (n_classes,)
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns self.
"""
return self._partial_fit(X, y, classes, _refit=False,
sample_weight=sample_weight)
def _partial_fit(self, X, y, classes=None, _refit=False,
sample_weight=None):
"""Actual implementation of Gaussian NB fitting.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
classes : array-like, shape (n_classes,)
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
_refit: bool
If true, act as though this were the first time we called
_partial_fit (ie, throw away any past fitting and start over).
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns self.
"""
X, y = check_X_y(X, y)
epsilon = 1e-9
if _refit:
self.classes_ = None
if _check_partial_fit_first_call(self, classes):
# This is the first call to partial_fit:
# initialize various cumulative counters
n_features = X.shape[1]
n_classes = len(self.classes_)
self.theta_ = np.zeros((n_classes, n_features))
self.sigma_ = np.zeros((n_classes, n_features))
self.class_prior_ = np.zeros(n_classes)
self.class_count_ = np.zeros(n_classes)
else:
if X.shape[1] != self.theta_.shape[1]:
msg = "Number of features %d does not match previous data %d."
raise ValueError(msg % (X.shape[1], self.theta_.shape[1]))
# Put epsilon back in each time
self.sigma_[:, :] -= epsilon
classes = self.classes_
unique_y = np.unique(y)
unique_y_in_classes = in1d(unique_y, classes)
if not np.all(unique_y_in_classes):
raise ValueError("The target label(s) %s in y do not exist in the "
"initial classes %s" %
(y[~unique_y_in_classes], classes))
for y_i in unique_y:
i = classes.searchsorted(y_i)
X_i = X[y == y_i, :]
if sample_weight is not None:
sw_i = sample_weight[y == y_i]
N_i = sw_i.sum()
else:
sw_i = None
N_i = X_i.shape[0]
new_theta, new_sigma = self._update_mean_variance(
self.class_count_[i], self.theta_[i, :], self.sigma_[i, :],
X_i, sw_i)
self.theta_[i, :] = new_theta
self.sigma_[i, :] = new_sigma
self.class_count_[i] += N_i
self.sigma_[:, :] += epsilon
self.class_prior_[:] = self.class_count_ / np.sum(self.class_count_)
return self
def _joint_log_likelihood(self, X):
check_is_fitted(self, "classes_")
X = check_array(X)
joint_log_likelihood = []
for i in range(np.size(self.classes_)):
jointi = np.log(self.class_prior_[i])
n_ij = - 0.5 * np.sum(np.log(2. * np.pi * self.sigma_[i, :]))
n_ij -= 0.5 * np.sum(((X - self.theta_[i, :]) ** 2) /
(self.sigma_[i, :]), 1)
joint_log_likelihood.append(jointi + n_ij)
joint_log_likelihood = np.array(joint_log_likelihood).T
return joint_log_likelihood
class BaseDiscreteNB(BaseNB):
"""Abstract base class for naive Bayes on discrete/categorical data
Any estimator based on this class should provide:
__init__
_joint_log_likelihood(X) as per BaseNB
"""
def _update_class_log_prior(self, class_prior=None):
n_classes = len(self.classes_)
if class_prior is not None:
if len(class_prior) != n_classes:
raise ValueError("Number of priors must match number of"
" classes.")
self.class_log_prior_ = np.log(class_prior)
elif self.fit_prior:
# empirical prior, with sample_weight taken into account
self.class_log_prior_ = (np.log(self.class_count_)
- np.log(self.class_count_.sum()))
else:
self.class_log_prior_ = np.zeros(n_classes) - np.log(n_classes)
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different chunks of a dataset so as to implement out-of-core
or online learning.
This is especially useful when the whole dataset is too big to fit in
memory at once.
This method has some performance overhead hence it is better to call
partial_fit on chunks of data that are as large as possible
(as long as fitting in the memory budget) to hide the overhead.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
classes : array-like, shape = [n_classes]
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
sample_weight : array-like, shape = [n_samples], optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns self.
"""
X = check_array(X, accept_sparse='csr', dtype=np.float64)
_, n_features = X.shape
if _check_partial_fit_first_call(self, classes):
# This is the first call to partial_fit:
# initialize various cumulative counters
n_effective_classes = len(classes) if len(classes) > 1 else 2
self.class_count_ = np.zeros(n_effective_classes, dtype=np.float64)
self.feature_count_ = np.zeros((n_effective_classes, n_features),
dtype=np.float64)
elif n_features != self.coef_.shape[1]:
msg = "Number of features %d does not match previous data %d."
raise ValueError(msg % (n_features, self.coef_.shape[-1]))
Y = label_binarize(y, classes=self.classes_)
if Y.shape[1] == 1:
Y = np.concatenate((1 - Y, Y), axis=1)
n_samples, n_classes = Y.shape
if X.shape[0] != Y.shape[0]:
msg = "X.shape[0]=%d and y.shape[0]=%d are incompatible."
raise ValueError(msg % (X.shape[0], y.shape[0]))
# label_binarize() returns arrays with dtype=np.int64.
# We convert it to np.float64 to support sample_weight consistently
Y = Y.astype(np.float64)
if sample_weight is not None:
Y *= check_array(sample_weight).T
class_prior = self.class_prior
# Count raw events from data before updating the class log prior
# and feature log probas
self._count(X, Y)
# XXX: OPTIM: we could introduce a public finalization method to
# be called by the user explicitly just once after several consecutive
# calls to partial_fit and prior any call to predict[_[log_]proba]
# to avoid computing the smooth log probas at each call to partial fit
self._update_feature_log_prob()
self._update_class_log_prior(class_prior=class_prior)
return self
def fit(self, X, y, sample_weight=None):
"""Fit Naive Bayes classifier according to X, y
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
sample_weight : array-like, shape = [n_samples], optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns self.
"""
X, y = check_X_y(X, y, 'csr')
_, n_features = X.shape
labelbin = LabelBinarizer()
Y = labelbin.fit_transform(y)
self.classes_ = labelbin.classes_
if Y.shape[1] == 1:
Y = np.concatenate((1 - Y, Y), axis=1)
# LabelBinarizer().fit_transform() returns arrays with dtype=np.int64.
# We convert it to np.float64 to support sample_weight consistently;
# this means we also don't have to cast X to floating point
Y = Y.astype(np.float64)
if sample_weight is not None:
Y *= check_array(sample_weight).T
class_prior = self.class_prior
# Count raw events from data before updating the class log prior
# and feature log probas
n_effective_classes = Y.shape[1]
self.class_count_ = np.zeros(n_effective_classes, dtype=np.float64)
self.feature_count_ = np.zeros((n_effective_classes, n_features),
dtype=np.float64)
self._count(X, Y)
self._update_feature_log_prob()
self._update_class_log_prior(class_prior=class_prior)
return self
# XXX The following is a stopgap measure; we need to set the dimensions
# of class_log_prior_ and feature_log_prob_ correctly.
def _get_coef(self):
return (self.feature_log_prob_[1:]
if len(self.classes_) == 2 else self.feature_log_prob_)
def _get_intercept(self):
return (self.class_log_prior_[1:]
if len(self.classes_) == 2 else self.class_log_prior_)
coef_ = property(_get_coef)
intercept_ = property(_get_intercept)
class MultinomialNB(BaseDiscreteNB):
"""
Naive Bayes classifier for multinomial models
The multinomial Naive Bayes classifier is suitable for classification with
discrete features (e.g., word counts for text classification). The
multinomial distribution normally requires integer feature counts. However,
in practice, fractional counts such as tf-idf may also work.
Read more in the :ref:`User Guide <multinomial_naive_bayes>`.
Parameters
----------
alpha : float, optional (default=1.0)
Additive (Laplace/Lidstone) smoothing parameter
(0 for no smoothing).
fit_prior : boolean
Whether to learn class prior probabilities or not.
If false, a uniform prior will be used.
class_prior : array-like, size (n_classes,)
Prior probabilities of the classes. If specified the priors are not
adjusted according to the data.
Attributes
----------
class_log_prior_ : array, shape (n_classes, )
Smoothed empirical log probability for each class.
intercept_ : property
Mirrors ``class_log_prior_`` for interpreting MultinomialNB
as a linear model.
feature_log_prob_ : array, shape (n_classes, n_features)
Empirical log probability of features
given a class, ``P(x_i|y)``.
coef_ : property
Mirrors ``feature_log_prob_`` for interpreting MultinomialNB
as a linear model.
class_count_ : array, shape (n_classes,)
Number of samples encountered for each class during fitting. This
value is weighted by the sample weight when provided.
feature_count_ : array, shape (n_classes, n_features)
Number of samples encountered for each (class, feature)
during fitting. This value is weighted by the sample weight when
provided.
Examples
--------
>>> import numpy as np
>>> X = np.random.randint(5, size=(6, 100))
>>> y = np.array([1, 2, 3, 4, 5, 6])
>>> from sklearn.naive_bayes import MultinomialNB
>>> clf = MultinomialNB()
>>> clf.fit(X, y)
MultinomialNB(alpha=1.0, class_prior=None, fit_prior=True)
>>> print(clf.predict(X[2]))
[3]
Notes
-----
For the rationale behind the names `coef_` and `intercept_`, i.e.
naive Bayes as a linear classifier, see J. Rennie et al. (2003),
Tackling the poor assumptions of naive Bayes text classifiers, ICML.
References
----------
C.D. Manning, P. Raghavan and H. Schuetze (2008). Introduction to
Information Retrieval. Cambridge University Press, pp. 234-265.
http://nlp.stanford.edu/IR-book/html/htmledition/naive-bayes-text-classification-1.html
"""
def __init__(self, alpha=1.0, fit_prior=True, class_prior=None):
self.alpha = alpha
self.fit_prior = fit_prior
self.class_prior = class_prior
def _count(self, X, Y):
"""Count and smooth feature occurrences."""
if np.any((X.data if issparse(X) else X) < 0):
raise ValueError("Input X must be non-negative")
self.feature_count_ += safe_sparse_dot(Y.T, X)
self.class_count_ += Y.sum(axis=0)
def _update_feature_log_prob(self):
"""Apply smoothing to raw counts and recompute log probabilities"""
smoothed_fc = self.feature_count_ + self.alpha
smoothed_cc = smoothed_fc.sum(axis=1)
self.feature_log_prob_ = (np.log(smoothed_fc)
- np.log(smoothed_cc.reshape(-1, 1)))
def _joint_log_likelihood(self, X):
"""Calculate the posterior log probability of the samples X"""
check_is_fitted(self, "classes_")
X = check_array(X, accept_sparse='csr')
return (safe_sparse_dot(X, self.feature_log_prob_.T)
+ self.class_log_prior_)
class BernoulliNB(BaseDiscreteNB):
"""Naive Bayes classifier for multivariate Bernoulli models.
Like MultinomialNB, this classifier is suitable for discrete data. The
difference is that while MultinomialNB works with occurrence counts,
BernoulliNB is designed for binary/boolean features.
Read more in the :ref:`User Guide <bernoulli_naive_bayes>`.
Parameters
----------
alpha : float, optional (default=1.0)
Additive (Laplace/Lidstone) smoothing parameter
(0 for no smoothing).
binarize : float or None, optional
Threshold for binarizing (mapping to booleans) of sample features.
If None, input is presumed to already consist of binary vectors.
fit_prior : boolean
Whether to learn class prior probabilities or not.
If false, a uniform prior will be used.
class_prior : array-like, size=[n_classes,]
Prior probabilities of the classes. If specified the priors are not
adjusted according to the data.
Attributes
----------
class_log_prior_ : array, shape = [n_classes]
Log probability of each class (smoothed).
feature_log_prob_ : array, shape = [n_classes, n_features]
Empirical log probability of features given a class, P(x_i|y).
class_count_ : array, shape = [n_classes]
Number of samples encountered for each class during fitting. This
value is weighted by the sample weight when provided.
feature_count_ : array, shape = [n_classes, n_features]
Number of samples encountered for each (class, feature)
during fitting. This value is weighted by the sample weight when
provided.
Examples
--------
>>> import numpy as np
>>> X = np.random.randint(2, size=(6, 100))
>>> Y = np.array([1, 2, 3, 4, 4, 5])
>>> from sklearn.naive_bayes import BernoulliNB
>>> clf = BernoulliNB()
>>> clf.fit(X, Y)
BernoulliNB(alpha=1.0, binarize=0.0, class_prior=None, fit_prior=True)
>>> print(clf.predict(X[2]))
[3]
References
----------
C.D. Manning, P. Raghavan and H. Schuetze (2008). Introduction to
Information Retrieval. Cambridge University Press, pp. 234-265.
http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
A. McCallum and K. Nigam (1998). A comparison of event models for naive
Bayes text classification. Proc. AAAI/ICML-98 Workshop on Learning for
Text Categorization, pp. 41-48.
V. Metsis, I. Androutsopoulos and G. Paliouras (2006). Spam filtering with
naive Bayes -- Which naive Bayes? 3rd Conf. on Email and Anti-Spam (CEAS).
"""
def __init__(self, alpha=1.0, binarize=.0, fit_prior=True,
class_prior=None):
self.alpha = alpha
self.binarize = binarize
self.fit_prior = fit_prior
self.class_prior = class_prior
def _count(self, X, Y):
"""Count and smooth feature occurrences."""
if self.binarize is not None:
X = binarize(X, threshold=self.binarize)
self.feature_count_ += safe_sparse_dot(Y.T, X)
self.class_count_ += Y.sum(axis=0)
def _update_feature_log_prob(self):
"""Apply smoothing to raw counts and recompute log probabilities"""
smoothed_fc = self.feature_count_ + self.alpha
smoothed_cc = self.class_count_ + self.alpha * 2
self.feature_log_prob_ = (np.log(smoothed_fc)
- np.log(smoothed_cc.reshape(-1, 1)))
def _joint_log_likelihood(self, X):
"""Calculate the posterior log probability of the samples X"""
check_is_fitted(self, "classes_")
X = check_array(X, accept_sparse='csr')
if self.binarize is not None:
X = binarize(X, threshold=self.binarize)
n_classes, n_features = self.feature_log_prob_.shape
n_samples, n_features_X = X.shape
if n_features_X != n_features:
raise ValueError("Expected input with %d features, got %d instead"
% (n_features, n_features_X))
neg_prob = np.log(1 - np.exp(self.feature_log_prob_))
# Compute neg_prob · (1 - X).T as ∑neg_prob - X · neg_prob
jll = safe_sparse_dot(X, (self.feature_log_prob_ - neg_prob).T)
jll += self.class_log_prior_ + neg_prob.sum(axis=1)
return jll
| bsd-3-clause |
3dfxmadscientist/odoo-infrastructure | addons/infrastructure/hostname.py | 1 | 1468 | # -*- coding: utf-8 -*-
##############################################################################
#
# Infrastructure
# Copyright (C) 2014 Ingenieria ADHOC
# No email
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import re
from openerp import netsvc
from openerp.osv import osv, fields
class hostname(osv.osv):
""""""
_name = 'infrastructure.hostname'
_description = 'hostname'
_columns = {
'name': fields.char(string='name', required=True),
'server_id': fields.many2one('infrastructure.server', string='Server', ondelete='cascade', required=True),
}
_defaults = {
}
_constraints = [
]
hostname()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
andresmrm/brython-experiment | static/brython/Lib/unittest/test/_test_warnings.py | 858 | 2304 | # helper module for test_runner.Test_TextTestRunner.test_warnings
"""
This module has a number of tests that raise different kinds of warnings.
When the tests are run, the warnings are caught and their messages are printed
to stdout. This module also accepts an arg that is then passed to
unittest.main to affect the behavior of warnings.
Test_TextTestRunner.test_warnings executes this script with different
combinations of warnings args and -W flags and check that the output is correct.
See #10535.
"""
import sys
import unittest
import warnings
def warnfun():
warnings.warn('rw', RuntimeWarning)
class TestWarnings(unittest.TestCase):
# unittest warnings will be printed at most once per type (max one message
# for the fail* methods, and one for the assert* methods)
def test_assert(self):
self.assertEquals(2+2, 4)
self.assertEquals(2*2, 4)
self.assertEquals(2**2, 4)
def test_fail(self):
self.failUnless(1)
self.failUnless(True)
def test_other_unittest(self):
self.assertAlmostEqual(2+2, 4)
self.assertNotAlmostEqual(4+4, 2)
# these warnings are normally silenced, but they are printed in unittest
def test_deprecation(self):
warnings.warn('dw', DeprecationWarning)
warnings.warn('dw', DeprecationWarning)
warnings.warn('dw', DeprecationWarning)
def test_import(self):
warnings.warn('iw', ImportWarning)
warnings.warn('iw', ImportWarning)
warnings.warn('iw', ImportWarning)
# user warnings should always be printed
def test_warning(self):
warnings.warn('uw')
warnings.warn('uw')
warnings.warn('uw')
# these warnings come from the same place; they will be printed
# only once by default or three times if the 'always' filter is used
def test_function(self):
warnfun()
warnfun()
warnfun()
if __name__ == '__main__':
with warnings.catch_warnings(record=True) as ws:
# if an arg is provided pass it to unittest.main as 'warnings'
if len(sys.argv) == 2:
unittest.main(exit=False, warnings=sys.argv.pop())
else:
unittest.main(exit=False)
# print all the warning messages collected
for w in ws:
print(w.message)
| agpl-3.0 |
neurotechuoft/MindType | Code/V1/src/deprecated/pyqtgraph/tests/test_srttransform3d.py | 51 | 1339 | import pyqtgraph as pg
from pyqtgraph.Qt import QtCore, QtGui
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_almost_equal
testPoints = np.array([
[0, 0, 0],
[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[-1, -1, 0],
[0, -1, -1]])
def testMatrix():
"""
SRTTransform3D => Transform3D => SRTTransform3D
"""
tr = pg.SRTTransform3D()
tr.setRotate(45, (0, 0, 1))
tr.setScale(0.2, 0.4, 1)
tr.setTranslate(10, 20, 40)
assert tr.getRotation() == (45, QtGui.QVector3D(0, 0, 1))
assert tr.getScale() == QtGui.QVector3D(0.2, 0.4, 1)
assert tr.getTranslation() == QtGui.QVector3D(10, 20, 40)
tr2 = pg.Transform3D(tr)
assert np.all(tr.matrix() == tr2.matrix())
# This is the most important test:
# The transition from Transform3D to SRTTransform3D is a tricky one.
tr3 = pg.SRTTransform3D(tr2)
assert_array_almost_equal(tr.matrix(), tr3.matrix())
assert_almost_equal(tr3.getRotation()[0], tr.getRotation()[0])
assert_array_almost_equal(tr3.getRotation()[1], tr.getRotation()[1])
assert_array_almost_equal(tr3.getScale(), tr.getScale())
assert_array_almost_equal(tr3.getTranslation(), tr.getTranslation())
| agpl-3.0 |
devendermishrajio/nova_test_latest | nova/tests/unit/scheduler/filters/test_json_filters.py | 63 | 11677 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_serialization import jsonutils
from nova.scheduler.filters import json_filter
from nova import test
from nova.tests.unit.scheduler import fakes
class TestJsonFilter(test.NoDBTestCase):
def setUp(self):
super(TestJsonFilter, self).setUp()
self.filt_cls = json_filter.JsonFilter()
self.json_query = jsonutils.dumps(
['and', ['>=', '$free_ram_mb', 1024],
['>=', '$free_disk_mb', 200 * 1024]])
def test_json_filter_passes(self):
filter_properties = {'instance_type': {'memory_mb': 1024,
'root_gb': 200,
'ephemeral_gb': 0},
'scheduler_hints': {'query': self.json_query}}
host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 1024,
'free_disk_mb': 200 * 1024})
self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
def test_json_filter_passes_with_no_query(self):
filter_properties = {'instance_type': {'memory_mb': 1024,
'root_gb': 200,
'ephemeral_gb': 0}}
host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 0,
'free_disk_mb': 0})
self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
def test_json_filter_fails_on_memory(self):
filter_properties = {'instance_type': {'memory_mb': 1024,
'root_gb': 200,
'ephemeral_gb': 0},
'scheduler_hints': {'query': self.json_query}}
host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 1023,
'free_disk_mb': 200 * 1024})
self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
def test_json_filter_fails_on_disk(self):
filter_properties = {'instance_type': {'memory_mb': 1024,
'root_gb': 200,
'ephemeral_gb': 0},
'scheduler_hints': {'query': self.json_query}}
host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 1024,
'free_disk_mb': (200 * 1024) - 1})
self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
def test_json_filter_fails_on_service_disabled(self):
json_query = jsonutils.dumps(
['and', ['>=', '$free_ram_mb', 1024],
['>=', '$free_disk_mb', 200 * 1024],
['not', '$service.disabled']])
filter_properties = {'instance_type': {'memory_mb': 1024,
'local_gb': 200},
'scheduler_hints': {'query': json_query}}
host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 1024,
'free_disk_mb': 200 * 1024})
self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
def test_json_filter_happy_day(self):
# Test json filter more thoroughly.
raw = ['and',
'$capabilities.enabled',
['=', '$capabilities.opt1', 'match'],
['or',
['and',
['<', '$free_ram_mb', 30],
['<', '$free_disk_mb', 300]],
['and',
['>', '$free_ram_mb', 30],
['>', '$free_disk_mb', 300]]]]
filter_properties = {
'scheduler_hints': {
'query': jsonutils.dumps(raw),
},
}
# Passes
capabilities = {'opt1': 'match'}
service = {'disabled': False}
host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 10,
'free_disk_mb': 200,
'capabilities': capabilities,
'service': service})
self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
# Passes
capabilities = {'opt1': 'match'}
service = {'disabled': False}
host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 40,
'free_disk_mb': 400,
'capabilities': capabilities,
'service': service})
self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
# Fails due to capabilities being disabled
capabilities = {'enabled': False, 'opt1': 'match'}
service = {'disabled': False}
host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 40,
'free_disk_mb': 400,
'capabilities': capabilities,
'service': service})
self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
# Fails due to being exact memory/disk we don't want
capabilities = {'enabled': True, 'opt1': 'match'}
service = {'disabled': False}
host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 30,
'free_disk_mb': 300,
'capabilities': capabilities,
'service': service})
self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
# Fails due to memory lower but disk higher
capabilities = {'enabled': True, 'opt1': 'match'}
service = {'disabled': False}
host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 20,
'free_disk_mb': 400,
'capabilities': capabilities,
'service': service})
self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
# Fails due to capabilities 'opt1' not equal
capabilities = {'enabled': True, 'opt1': 'no-match'}
service = {'enabled': True}
host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 20,
'free_disk_mb': 400,
'capabilities': capabilities,
'service': service})
self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
def test_json_filter_basic_operators(self):
host = fakes.FakeHostState('host1', 'node1', {})
# (operator, arguments, expected_result)
ops_to_test = [
['=', [1, 1], True],
['=', [1, 2], False],
['<', [1, 2], True],
['<', [1, 1], False],
['<', [2, 1], False],
['>', [2, 1], True],
['>', [2, 2], False],
['>', [2, 3], False],
['<=', [1, 2], True],
['<=', [1, 1], True],
['<=', [2, 1], False],
['>=', [2, 1], True],
['>=', [2, 2], True],
['>=', [2, 3], False],
['in', [1, 1], True],
['in', [1, 1, 2, 3], True],
['in', [4, 1, 2, 3], False],
['not', [True], False],
['not', [False], True],
['or', [True, False], True],
['or', [False, False], False],
['and', [True, True], True],
['and', [False, False], False],
['and', [True, False], False],
# Nested ((True or False) and (2 > 1)) == Passes
['and', [['or', True, False], ['>', 2, 1]], True]]
for (op, args, expected) in ops_to_test:
raw = [op] + args
filter_properties = {
'scheduler_hints': {
'query': jsonutils.dumps(raw),
},
}
self.assertEqual(expected,
self.filt_cls.host_passes(host, filter_properties))
# This results in [False, True, False, True] and if any are True
# then it passes...
raw = ['not', True, False, True, False]
filter_properties = {
'scheduler_hints': {
'query': jsonutils.dumps(raw),
},
}
self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
# This results in [False, False, False] and if any are True
# then it passes...which this doesn't
raw = ['not', True, True, True]
filter_properties = {
'scheduler_hints': {
'query': jsonutils.dumps(raw),
},
}
self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
def test_json_filter_unknown_operator_raises(self):
raw = ['!=', 1, 2]
filter_properties = {
'scheduler_hints': {
'query': jsonutils.dumps(raw),
},
}
host = fakes.FakeHostState('host1', 'node1',
{})
self.assertRaises(KeyError,
self.filt_cls.host_passes, host, filter_properties)
def test_json_filter_empty_filters_pass(self):
host = fakes.FakeHostState('host1', 'node1',
{})
raw = []
filter_properties = {
'scheduler_hints': {
'query': jsonutils.dumps(raw),
},
}
self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
raw = {}
filter_properties = {
'scheduler_hints': {
'query': jsonutils.dumps(raw),
},
}
self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
def test_json_filter_invalid_num_arguments_fails(self):
host = fakes.FakeHostState('host1', 'node1',
{})
raw = ['>', ['and', ['or', ['not', ['<', ['>=', ['<=', ['in', ]]]]]]]]
filter_properties = {
'scheduler_hints': {
'query': jsonutils.dumps(raw),
},
}
self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
raw = ['>', 1]
filter_properties = {
'scheduler_hints': {
'query': jsonutils.dumps(raw),
},
}
self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
def test_json_filter_unknown_variable_ignored(self):
host = fakes.FakeHostState('host1', 'node1',
{})
raw = ['=', '$........', 1, 1]
filter_properties = {
'scheduler_hints': {
'query': jsonutils.dumps(raw),
},
}
self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
raw = ['=', '$foo', 2, 2]
filter_properties = {
'scheduler_hints': {
'query': jsonutils.dumps(raw),
},
}
self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
| apache-2.0 |
shashank971/edx-platform | openedx/core/djangoapps/credit/routers.py | 138 | 1338 | """ DRF routers. """
from rest_framework import routers
class SimpleRouter(routers.SimpleRouter):
""" Simple DRF router. """
# Note (CCB): This is a retrofit of a DRF 2.4 feature onto DRF 2.3. This is, sadly, simpler than
# updating edx-ora2 to work with DRF 2.4. See https://github.com/tomchristie/django-rest-framework/pull/1333
# for details on this specific DRF 2.4 feature.
def get_lookup_regex(self, viewset, lookup_prefix=''):
"""
Given a viewset, return the portion of URL regex that is used
to match against a single instance.
Note that lookup_prefix is not used directly inside REST rest_framework
itself, but is required in order to nicely support nested router
implementations, such as drf-nested-routers.
https://github.com/alanjds/drf-nested-routers
"""
base_regex = '(?P<{lookup_prefix}{lookup_field}>{lookup_value})'
lookup_field = getattr(viewset, 'lookup_field', 'pk')
try:
lookup_value = viewset.lookup_value_regex
except AttributeError:
# Don't consume `.json` style suffixes
lookup_value = '[^/.]+'
return base_regex.format(
lookup_prefix=lookup_prefix,
lookup_field=lookup_field,
lookup_value=lookup_value
)
| agpl-3.0 |
bwsblake/lettercounter | django-norel-env/lib/python2.7/site-packages/django/contrib/messages/tests/urls.py | 197 | 1959 | from django.conf.urls import patterns
from django.contrib import messages
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect, HttpResponse
from django.template import RequestContext, Template
from django.template.response import TemplateResponse
from django.views.decorators.cache import never_cache
TEMPLATE = """{% if messages %}
<ul class="messages">
{% for message in messages %}
<li{% if message.tags %} class="{{ message.tags }}"{% endif %}>
{{ message }}
</li>
{% endfor %}
</ul>
{% endif %}
"""
@never_cache
def add(request, message_type):
# don't default to False here, because we want to test that it defaults
# to False if unspecified
fail_silently = request.POST.get('fail_silently', None)
for msg in request.POST.getlist('messages'):
if fail_silently is not None:
getattr(messages, message_type)(request, msg,
fail_silently=fail_silently)
else:
getattr(messages, message_type)(request, msg)
show_url = reverse('django.contrib.messages.tests.urls.show')
return HttpResponseRedirect(show_url)
@never_cache
def add_template_response(request, message_type):
for msg in request.POST.getlist('messages'):
getattr(messages, message_type)(request, msg)
show_url = reverse('django.contrib.messages.tests.urls.show_template_response')
return HttpResponseRedirect(show_url)
@never_cache
def show(request):
t = Template(TEMPLATE)
return HttpResponse(t.render(RequestContext(request)))
@never_cache
def show_template_response(request):
return TemplateResponse(request, Template(TEMPLATE))
urlpatterns = patterns('',
('^add/(debug|info|success|warning|error)/$', add),
('^show/$', show),
('^template_response/add/(debug|info|success|warning|error)/$', add_template_response),
('^template_response/show/$', show_template_response),
)
| mit |
baiyunping333/BurpSuite-Plugins | Sqlmap/thirdparty/chardet/sbcsgroupprober.py | 235 | 3127 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import constants, sys
from charsetgroupprober import CharSetGroupProber
from sbcharsetprober import SingleByteCharSetProber
from langcyrillicmodel import Win1251CyrillicModel, Koi8rModel, Latin5CyrillicModel, MacCyrillicModel, Ibm866Model, Ibm855Model
from langgreekmodel import Latin7GreekModel, Win1253GreekModel
from langbulgarianmodel import Latin5BulgarianModel, Win1251BulgarianModel
from langhungarianmodel import Latin2HungarianModel, Win1250HungarianModel
from langthaimodel import TIS620ThaiModel
from langhebrewmodel import Win1255HebrewModel
from hebrewprober import HebrewProber
class SBCSGroupProber(CharSetGroupProber):
def __init__(self):
CharSetGroupProber.__init__(self)
self._mProbers = [ \
SingleByteCharSetProber(Win1251CyrillicModel),
SingleByteCharSetProber(Koi8rModel),
SingleByteCharSetProber(Latin5CyrillicModel),
SingleByteCharSetProber(MacCyrillicModel),
SingleByteCharSetProber(Ibm866Model),
SingleByteCharSetProber(Ibm855Model),
SingleByteCharSetProber(Latin7GreekModel),
SingleByteCharSetProber(Win1253GreekModel),
SingleByteCharSetProber(Latin5BulgarianModel),
SingleByteCharSetProber(Win1251BulgarianModel),
SingleByteCharSetProber(Latin2HungarianModel),
SingleByteCharSetProber(Win1250HungarianModel),
SingleByteCharSetProber(TIS620ThaiModel),
]
hebrewProber = HebrewProber()
logicalHebrewProber = SingleByteCharSetProber(Win1255HebrewModel, constants.False, hebrewProber)
visualHebrewProber = SingleByteCharSetProber(Win1255HebrewModel, constants.True, hebrewProber)
hebrewProber.set_model_probers(logicalHebrewProber, visualHebrewProber)
self._mProbers.extend([hebrewProber, logicalHebrewProber, visualHebrewProber])
self.reset()
| gpl-2.0 |
alianmohammad/pd-gem5-latest | tests/quick/se/70.tgen/test.py | 74 | 2122 | # Copyright (c) 2012 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Andreas Hansson
maxtick = 100000000000
| bsd-3-clause |
brian-l/django-1.4.10 | tests/regressiontests/admin_filters/tests.py | 6 | 35308 | from __future__ import absolute_import
import datetime
from django.contrib.admin import (site, ModelAdmin, SimpleListFilter,
BooleanFieldListFilter)
from django.contrib.admin.views.main import ChangeList
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.models import User
from django.core.exceptions import ImproperlyConfigured
from django.test import TestCase, RequestFactory
from django.test.utils import override_settings
from django.utils.encoding import force_unicode
from .models import Book, Department, Employee
def select_by(dictlist, key, value):
return [x for x in dictlist if x[key] == value][0]
class DecadeListFilter(SimpleListFilter):
def lookups(self, request, model_admin):
return (
('the 80s', "the 1980's"),
('the 90s', "the 1990's"),
('the 00s', "the 2000's"),
('other', "other decades"),
)
def queryset(self, request, queryset):
decade = self.value()
if decade == 'the 80s':
return queryset.filter(year__gte=1980, year__lte=1989)
if decade == 'the 90s':
return queryset.filter(year__gte=1990, year__lte=1999)
if decade == 'the 00s':
return queryset.filter(year__gte=2000, year__lte=2009)
class DecadeListFilterWithTitleAndParameter(DecadeListFilter):
title = 'publication decade'
parameter_name = 'publication-decade'
class DecadeListFilterWithoutTitle(DecadeListFilter):
parameter_name = 'publication-decade'
class DecadeListFilterWithoutParameter(DecadeListFilter):
title = 'publication decade'
class DecadeListFilterWithNoneReturningLookups(DecadeListFilterWithTitleAndParameter):
def lookups(self, request, model_admin):
pass
class DecadeListFilterWithFailingQueryset(DecadeListFilterWithTitleAndParameter):
def queryset(self, request, queryset):
raise 1/0
class DecadeListFilterWithQuerysetBasedLookups(DecadeListFilterWithTitleAndParameter):
def lookups(self, request, model_admin):
qs = model_admin.queryset(request)
if qs.filter(year__gte=1980, year__lte=1989).exists():
yield ('the 80s', "the 1980's")
if qs.filter(year__gte=1990, year__lte=1999).exists():
yield ('the 90s', "the 1990's")
if qs.filter(year__gte=2000, year__lte=2009).exists():
yield ('the 00s', "the 2000's")
class DecadeListFilterParameterEndsWith__In(DecadeListFilter):
title = 'publication decade'
parameter_name = 'decade__in' # Ends with '__in"
class DecadeListFilterParameterEndsWith__Isnull(DecadeListFilter):
title = 'publication decade'
parameter_name = 'decade__isnull' # Ends with '__isnull"
class DepartmentListFilterLookupWithNonStringValue(SimpleListFilter):
title = 'department'
parameter_name = 'department'
def lookups(self, request, model_admin):
return sorted(set([
(employee.department.id, # Intentionally not a string (Refs #19318)
employee.department.code)
for employee in model_admin.queryset(request).all()
]))
def queryset(self, request, queryset):
if self.value():
return queryset.filter(department__id=self.value())
class CustomUserAdmin(UserAdmin):
list_filter = ('books_authored', 'books_contributed')
class BookAdmin(ModelAdmin):
list_filter = ('year', 'author', 'contributors', 'is_best_seller', 'date_registered', 'no')
ordering = ('-id',)
class BookAdminWithTupleBooleanFilter(BookAdmin):
list_filter = ('year', 'author', 'contributors', ('is_best_seller', BooleanFieldListFilter), 'date_registered', 'no')
class DecadeFilterBookAdmin(ModelAdmin):
list_filter = ('author', DecadeListFilterWithTitleAndParameter)
ordering = ('-id',)
class DecadeFilterBookAdminWithoutTitle(ModelAdmin):
list_filter = (DecadeListFilterWithoutTitle,)
class DecadeFilterBookAdminWithoutParameter(ModelAdmin):
list_filter = (DecadeListFilterWithoutParameter,)
class DecadeFilterBookAdminWithNoneReturningLookups(ModelAdmin):
list_filter = (DecadeListFilterWithNoneReturningLookups,)
class DecadeFilterBookAdminWithFailingQueryset(ModelAdmin):
list_filter = (DecadeListFilterWithFailingQueryset,)
class DecadeFilterBookAdminWithQuerysetBasedLookups(ModelAdmin):
list_filter = (DecadeListFilterWithQuerysetBasedLookups,)
class DecadeFilterBookAdminParameterEndsWith__In(ModelAdmin):
list_filter = (DecadeListFilterParameterEndsWith__In,)
class DecadeFilterBookAdminParameterEndsWith__Isnull(ModelAdmin):
list_filter = (DecadeListFilterParameterEndsWith__Isnull,)
class EmployeeAdmin(ModelAdmin):
list_display = ['name', 'department']
list_filter = ['department']
class DepartmentFilterEmployeeAdmin(EmployeeAdmin):
list_filter = [DepartmentListFilterLookupWithNonStringValue, ]
class ListFiltersTests(TestCase):
def setUp(self):
self.today = datetime.date.today()
self.tomorrow = self.today + datetime.timedelta(days=1)
self.one_week_ago = self.today - datetime.timedelta(days=7)
self.request_factory = RequestFactory()
# Users
self.alfred = User.objects.create_user('alfred', '[email protected]')
self.bob = User.objects.create_user('bob', '[email protected]')
self.lisa = User.objects.create_user('lisa', '[email protected]')
# Books
self.djangonaut_book = Book.objects.create(title='Djangonaut: an art of living', year=2009, author=self.alfred, is_best_seller=True, date_registered=self.today)
self.bio_book = Book.objects.create(title='Django: a biography', year=1999, author=self.alfred, is_best_seller=False, no=207)
self.django_book = Book.objects.create(title='The Django Book', year=None, author=self.bob, is_best_seller=None, date_registered=self.today, no=103)
self.gipsy_book = Book.objects.create(title='Gipsy guitar for dummies', year=2002, is_best_seller=True, date_registered=self.one_week_ago)
self.gipsy_book.contributors = [self.bob, self.lisa]
self.gipsy_book.save()
# Departments
self.dev = Department.objects.create(code='DEV', description='Development')
self.design = Department.objects.create(code='DSN', description='Design')
# Employees
self.john = Employee.objects.create(name='John Blue', department=self.dev)
self.jack = Employee.objects.create(name='Jack Red', department=self.design)
def get_changelist(self, request, model, modeladmin):
return ChangeList(request, model, modeladmin.list_display, modeladmin.list_display_links,
modeladmin.list_filter, modeladmin.date_hierarchy, modeladmin.search_fields,
modeladmin.list_select_related, modeladmin.list_per_page, modeladmin.list_max_show_all, modeladmin.list_editable, modeladmin)
def test_datefieldlistfilter(self):
modeladmin = BookAdmin(Book, site)
request = self.request_factory.get('/')
changelist = self.get_changelist(request, Book, modeladmin)
request = self.request_factory.get('/', {'date_registered__gte': self.today,
'date_registered__lt': self.tomorrow})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
self.assertEqual(list(queryset), [self.django_book, self.djangonaut_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][4]
self.assertEqual(force_unicode(filterspec.title), u'date registered')
choice = select_by(filterspec.choices(changelist), "display", "Today")
self.assertEqual(choice['selected'], True)
self.assertEqual(choice['query_string'], '?date_registered__gte=%s'
'&date_registered__lt=%s'
% (self.today, self.tomorrow))
request = self.request_factory.get('/', {'date_registered__gte': self.today.replace(day=1),
'date_registered__lt': self.tomorrow})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
if (self.today.year, self.today.month) == (self.one_week_ago.year, self.one_week_ago.month):
# In case one week ago is in the same month.
self.assertEqual(list(queryset), [self.gipsy_book, self.django_book, self.djangonaut_book])
else:
self.assertEqual(list(queryset), [self.django_book, self.djangonaut_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][4]
self.assertEqual(force_unicode(filterspec.title), u'date registered')
choice = select_by(filterspec.choices(changelist), "display", "This month")
self.assertEqual(choice['selected'], True)
self.assertEqual(choice['query_string'], '?date_registered__gte=%s'
'&date_registered__lt=%s'
% (self.today.replace(day=1), self.tomorrow))
request = self.request_factory.get('/', {'date_registered__gte': self.today.replace(month=1, day=1),
'date_registered__lt': self.tomorrow})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
if self.today.year == self.one_week_ago.year:
# In case one week ago is in the same year.
self.assertEqual(list(queryset), [self.gipsy_book, self.django_book, self.djangonaut_book])
else:
self.assertEqual(list(queryset), [self.django_book, self.djangonaut_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][4]
self.assertEqual(force_unicode(filterspec.title), u'date registered')
choice = select_by(filterspec.choices(changelist), "display", "This year")
self.assertEqual(choice['selected'], True)
self.assertEqual(choice['query_string'], '?date_registered__gte=%s'
'&date_registered__lt=%s'
% (self.today.replace(month=1, day=1), self.tomorrow))
request = self.request_factory.get('/', {'date_registered__gte': str(self.one_week_ago),
'date_registered__lt': str(self.tomorrow)})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
self.assertEqual(list(queryset), [self.gipsy_book, self.django_book, self.djangonaut_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][4]
self.assertEqual(force_unicode(filterspec.title), u'date registered')
choice = select_by(filterspec.choices(changelist), "display", "Past 7 days")
self.assertEqual(choice['selected'], True)
self.assertEqual(choice['query_string'], '?date_registered__gte=%s'
'&date_registered__lt=%s'
% (str(self.one_week_ago), str(self.tomorrow)))
@override_settings(USE_TZ=True)
def test_datefieldlistfilter_with_time_zone_support(self):
# Regression for #17830
self.test_datefieldlistfilter()
def test_allvaluesfieldlistfilter(self):
modeladmin = BookAdmin(Book, site)
request = self.request_factory.get('/', {'year__isnull': 'True'})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
self.assertEqual(list(queryset), [self.django_book])
# Make sure the last choice is None and is selected
filterspec = changelist.get_filters(request)[0][0]
self.assertEqual(force_unicode(filterspec.title), u'year')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[-1]['selected'], True)
self.assertEqual(choices[-1]['query_string'], '?year__isnull=True')
request = self.request_factory.get('/', {'year': '2002'})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][0]
self.assertEqual(force_unicode(filterspec.title), u'year')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[2]['selected'], True)
self.assertEqual(choices[2]['query_string'], '?year=2002')
def test_relatedfieldlistfilter_foreignkey(self):
modeladmin = BookAdmin(Book, site)
request = self.request_factory.get('/', {'author__isnull': 'True'})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
self.assertEqual(list(queryset), [self.gipsy_book])
# Make sure the last choice is None and is selected
filterspec = changelist.get_filters(request)[0][1]
self.assertEquals(force_unicode(filterspec.title), u'Verbose Author')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[-1]['selected'], True)
self.assertEqual(choices[-1]['query_string'], '?author__isnull=True')
request = self.request_factory.get('/', {'author__id__exact': self.alfred.pk})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][1]
self.assertEquals(force_unicode(filterspec.title), u'Verbose Author')
# order of choices depends on User model, which has no order
choice = select_by(filterspec.choices(changelist), "display", "alfred")
self.assertEqual(choice['selected'], True)
self.assertEqual(choice['query_string'], '?author__id__exact=%d' % self.alfred.pk)
def test_relatedfieldlistfilter_manytomany(self):
modeladmin = BookAdmin(Book, site)
request = self.request_factory.get('/', {'contributors__isnull': 'True'})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
self.assertEqual(list(queryset), [self.django_book, self.bio_book, self.djangonaut_book])
# Make sure the last choice is None and is selected
filterspec = changelist.get_filters(request)[0][2]
self.assertEquals(force_unicode(filterspec.title), u'Verbose Contributors')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[-1]['selected'], True)
self.assertEqual(choices[-1]['query_string'], '?contributors__isnull=True')
request = self.request_factory.get('/', {'contributors__id__exact': self.bob.pk})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][2]
self.assertEquals(force_unicode(filterspec.title), u'Verbose Contributors')
choice = select_by(filterspec.choices(changelist), "display", "bob")
self.assertEqual(choice['selected'], True)
self.assertEqual(choice['query_string'], '?contributors__id__exact=%d' % self.bob.pk)
def test_relatedfieldlistfilter_reverse_relationships(self):
modeladmin = CustomUserAdmin(User, site)
# FK relationship -----
request = self.request_factory.get('/', {'books_authored__isnull': 'True'})
changelist = self.get_changelist(request, User, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
self.assertEqual(list(queryset), [self.lisa])
# Make sure the last choice is None and is selected
filterspec = changelist.get_filters(request)[0][0]
self.assertEqual(force_unicode(filterspec.title), u'book')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[-1]['selected'], True)
self.assertEqual(choices[-1]['query_string'], '?books_authored__isnull=True')
request = self.request_factory.get('/', {'books_authored__id__exact': self.bio_book.pk})
changelist = self.get_changelist(request, User, modeladmin)
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][0]
self.assertEqual(force_unicode(filterspec.title), u'book')
choice = select_by(filterspec.choices(changelist), "display", self.bio_book.title)
self.assertEqual(choice['selected'], True)
self.assertEqual(choice['query_string'], '?books_authored__id__exact=%d' % self.bio_book.pk)
# M2M relationship -----
request = self.request_factory.get('/', {'books_contributed__isnull': 'True'})
changelist = self.get_changelist(request, User, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
self.assertEqual(list(queryset), [self.alfred])
# Make sure the last choice is None and is selected
filterspec = changelist.get_filters(request)[0][1]
self.assertEqual(force_unicode(filterspec.title), u'book')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[-1]['selected'], True)
self.assertEqual(choices[-1]['query_string'], '?books_contributed__isnull=True')
request = self.request_factory.get('/', {'books_contributed__id__exact': self.django_book.pk})
changelist = self.get_changelist(request, User, modeladmin)
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][1]
self.assertEqual(force_unicode(filterspec.title), u'book')
choice = select_by(filterspec.choices(changelist), "display", self.django_book.title)
self.assertEqual(choice['selected'], True)
self.assertEqual(choice['query_string'], '?books_contributed__id__exact=%d' % self.django_book.pk)
def test_booleanfieldlistfilter(self):
modeladmin = BookAdmin(Book, site)
self.verify_booleanfieldlistfilter(modeladmin)
def test_booleanfieldlistfilter_tuple(self):
modeladmin = BookAdminWithTupleBooleanFilter(Book, site)
self.verify_booleanfieldlistfilter(modeladmin)
def verify_booleanfieldlistfilter(self, modeladmin):
request = self.request_factory.get('/')
changelist = self.get_changelist(request, Book, modeladmin)
request = self.request_factory.get('/', {'is_best_seller__exact': 0})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
self.assertEqual(list(queryset), [self.bio_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][3]
self.assertEqual(force_unicode(filterspec.title), u'is best seller')
choice = select_by(filterspec.choices(changelist), "display", "No")
self.assertEqual(choice['selected'], True)
self.assertEqual(choice['query_string'], '?is_best_seller__exact=0')
request = self.request_factory.get('/', {'is_best_seller__exact': 1})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
self.assertEqual(list(queryset), [self.gipsy_book, self.djangonaut_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][3]
self.assertEqual(force_unicode(filterspec.title), u'is best seller')
choice = select_by(filterspec.choices(changelist), "display", "Yes")
self.assertEqual(choice['selected'], True)
self.assertEqual(choice['query_string'], '?is_best_seller__exact=1')
request = self.request_factory.get('/', {'is_best_seller__isnull': 'True'})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
self.assertEqual(list(queryset), [self.django_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][3]
self.assertEqual(force_unicode(filterspec.title), u'is best seller')
choice = select_by(filterspec.choices(changelist), "display", "Unknown")
self.assertEqual(choice['selected'], True)
self.assertEqual(choice['query_string'], '?is_best_seller__isnull=True')
def test_simplelistfilter(self):
modeladmin = DecadeFilterBookAdmin(Book, site)
# Make sure that the first option is 'All' ---------------------------
request = self.request_factory.get('/', {})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
self.assertEqual(list(queryset), list(Book.objects.all().order_by('-id')))
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][1]
self.assertEqual(force_unicode(filterspec.title), u'publication decade')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[0]['display'], u'All')
self.assertEqual(choices[0]['selected'], True)
self.assertEqual(choices[0]['query_string'], '?')
# Look for books in the 1980s ----------------------------------------
request = self.request_factory.get('/', {'publication-decade': 'the 80s'})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
self.assertEqual(list(queryset), [])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][1]
self.assertEqual(force_unicode(filterspec.title), u'publication decade')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[1]['display'], u'the 1980\'s')
self.assertEqual(choices[1]['selected'], True)
self.assertEqual(choices[1]['query_string'], '?publication-decade=the+80s')
# Look for books in the 1990s ----------------------------------------
request = self.request_factory.get('/', {'publication-decade': 'the 90s'})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
self.assertEqual(list(queryset), [self.bio_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][1]
self.assertEqual(force_unicode(filterspec.title), u'publication decade')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[2]['display'], u'the 1990\'s')
self.assertEqual(choices[2]['selected'], True)
self.assertEqual(choices[2]['query_string'], '?publication-decade=the+90s')
# Look for books in the 2000s ----------------------------------------
request = self.request_factory.get('/', {'publication-decade': 'the 00s'})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
self.assertEqual(list(queryset), [self.gipsy_book, self.djangonaut_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][1]
self.assertEqual(force_unicode(filterspec.title), u'publication decade')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[3]['display'], u'the 2000\'s')
self.assertEqual(choices[3]['selected'], True)
self.assertEqual(choices[3]['query_string'], '?publication-decade=the+00s')
# Combine multiple filters -------------------------------------------
request = self.request_factory.get('/', {'publication-decade': 'the 00s', 'author__id__exact': self.alfred.pk})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
self.assertEqual(list(queryset), [self.djangonaut_book])
# Make sure the correct choices are selected
filterspec = changelist.get_filters(request)[0][1]
self.assertEqual(force_unicode(filterspec.title), u'publication decade')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[3]['display'], u'the 2000\'s')
self.assertEqual(choices[3]['selected'], True)
self.assertEqual(choices[3]['query_string'], '?publication-decade=the+00s&author__id__exact=%s' % self.alfred.pk)
filterspec = changelist.get_filters(request)[0][0]
self.assertEquals(force_unicode(filterspec.title), u'Verbose Author')
choice = select_by(filterspec.choices(changelist), "display", "alfred")
self.assertEqual(choice['selected'], True)
self.assertEqual(choice['query_string'], '?publication-decade=the+00s&author__id__exact=%s' % self.alfred.pk)
def test_listfilter_without_title(self):
"""
Any filter must define a title.
"""
modeladmin = DecadeFilterBookAdminWithoutTitle(Book, site)
request = self.request_factory.get('/', {})
self.assertRaisesRegexp(ImproperlyConfigured,
"The list filter 'DecadeListFilterWithoutTitle' does not specify a 'title'.",
self.get_changelist, request, Book, modeladmin)
def test_simplelistfilter_without_parameter(self):
"""
Any SimpleListFilter must define a parameter_name.
"""
modeladmin = DecadeFilterBookAdminWithoutParameter(Book, site)
request = self.request_factory.get('/', {})
self.assertRaisesRegexp(ImproperlyConfigured,
"The list filter 'DecadeListFilterWithoutParameter' does not specify a 'parameter_name'.",
self.get_changelist, request, Book, modeladmin)
def test_simplelistfilter_with_none_returning_lookups(self):
"""
A SimpleListFilter lookups method can return None but disables the
filter completely.
"""
modeladmin = DecadeFilterBookAdminWithNoneReturningLookups(Book, site)
request = self.request_factory.get('/', {})
changelist = self.get_changelist(request, Book, modeladmin)
filterspec = changelist.get_filters(request)[0]
self.assertEqual(len(filterspec), 0)
def test_filter_with_failing_queryset(self):
"""
Ensure that when a filter's queryset method fails, it fails loudly and
the corresponding exception doesn't get swallowed.
Refs #17828.
"""
modeladmin = DecadeFilterBookAdminWithFailingQueryset(Book, site)
request = self.request_factory.get('/', {})
self.assertRaises(ZeroDivisionError, self.get_changelist, request, Book, modeladmin)
def test_simplelistfilter_with_queryset_based_lookups(self):
modeladmin = DecadeFilterBookAdminWithQuerysetBasedLookups(Book, site)
request = self.request_factory.get('/', {})
changelist = self.get_changelist(request, Book, modeladmin)
filterspec = changelist.get_filters(request)[0][0]
self.assertEqual(force_unicode(filterspec.title), u'publication decade')
choices = list(filterspec.choices(changelist))
self.assertEqual(len(choices), 3)
self.assertEqual(choices[0]['display'], u'All')
self.assertEqual(choices[0]['selected'], True)
self.assertEqual(choices[0]['query_string'], '?')
self.assertEqual(choices[1]['display'], u'the 1990\'s')
self.assertEqual(choices[1]['selected'], False)
self.assertEqual(choices[1]['query_string'], '?publication-decade=the+90s')
self.assertEqual(choices[2]['display'], u'the 2000\'s')
self.assertEqual(choices[2]['selected'], False)
self.assertEqual(choices[2]['query_string'], '?publication-decade=the+00s')
def test_two_characters_long_field(self):
"""
Ensure that list_filter works with two-characters long field names.
Refs #16080.
"""
modeladmin = BookAdmin(Book, site)
request = self.request_factory.get('/', {'no': '207'})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
self.assertEqual(list(queryset), [self.bio_book])
filterspec = changelist.get_filters(request)[0][-1]
self.assertEqual(force_unicode(filterspec.title), u'number')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[2]['selected'], True)
self.assertEqual(choices[2]['query_string'], '?no=207')
def test_parameter_ends_with__in__or__isnull(self):
"""
Ensure that a SimpleListFilter's parameter name is not mistaken for a
model field if it ends with '__isnull' or '__in'.
Refs #17091.
"""
# When it ends with '__in' -----------------------------------------
modeladmin = DecadeFilterBookAdminParameterEndsWith__In(Book, site)
request = self.request_factory.get('/', {'decade__in': 'the 90s'})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
self.assertEqual(list(queryset), [self.bio_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][0]
self.assertEqual(force_unicode(filterspec.title), u'publication decade')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[2]['display'], u'the 1990\'s')
self.assertEqual(choices[2]['selected'], True)
self.assertEqual(choices[2]['query_string'], '?decade__in=the+90s')
# When it ends with '__isnull' ---------------------------------------
modeladmin = DecadeFilterBookAdminParameterEndsWith__Isnull(Book, site)
request = self.request_factory.get('/', {'decade__isnull': 'the 90s'})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
self.assertEqual(list(queryset), [self.bio_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][0]
self.assertEqual(force_unicode(filterspec.title), u'publication decade')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[2]['display'], u'the 1990\'s')
self.assertEqual(choices[2]['selected'], True)
self.assertEqual(choices[2]['query_string'], '?decade__isnull=the+90s')
def test_lookup_with_non_string_value(self):
"""
Ensure choices are set the selected class when using
non-string values for lookups in SimpleListFilters
Refs #19318
"""
modeladmin = DepartmentFilterEmployeeAdmin(Employee, site)
request = self.request_factory.get('/', {'department': self.john.pk})
changelist = self.get_changelist(request, Employee, modeladmin)
queryset = changelist.get_query_set(request)
self.assertEqual(list(queryset), [self.john])
filterspec = changelist.get_filters(request)[0][-1]
self.assertEqual(force_unicode(filterspec.title), u'department')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[1]['display'], 'DEV')
self.assertEqual(choices[1]['selected'], True)
self.assertEqual(choices[1]['query_string'], '?department=%s' % self.john.pk)
def test_fk_with_to_field(self):
"""
Ensure that a filter on a FK respects the FK's to_field attribute.
Refs #17972.
"""
modeladmin = EmployeeAdmin(Employee, site)
request = self.request_factory.get('/', {})
changelist = self.get_changelist(request, Employee, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
self.assertEqual(list(queryset), [self.jack, self.john])
filterspec = changelist.get_filters(request)[0][-1]
self.assertEqual(force_unicode(filterspec.title), u'department')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[0]['display'], u'All')
self.assertEqual(choices[0]['selected'], True)
self.assertEqual(choices[0]['query_string'], '?')
self.assertEqual(choices[1]['display'], u'Development')
self.assertEqual(choices[1]['selected'], False)
self.assertEqual(choices[1]['query_string'], '?department__code__exact=DEV')
self.assertEqual(choices[2]['display'], u'Design')
self.assertEqual(choices[2]['selected'], False)
self.assertEqual(choices[2]['query_string'], '?department__code__exact=DSN')
# Filter by Department=='Development' --------------------------------
request = self.request_factory.get('/', {'department__code__exact': 'DEV'})
changelist = self.get_changelist(request, Employee, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
self.assertEqual(list(queryset), [self.john])
filterspec = changelist.get_filters(request)[0][-1]
self.assertEqual(force_unicode(filterspec.title), u'department')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[0]['display'], u'All')
self.assertEqual(choices[0]['selected'], False)
self.assertEqual(choices[0]['query_string'], '?')
self.assertEqual(choices[1]['display'], u'Development')
self.assertEqual(choices[1]['selected'], True)
self.assertEqual(choices[1]['query_string'], '?department__code__exact=DEV')
self.assertEqual(choices[2]['display'], u'Design')
self.assertEqual(choices[2]['selected'], False)
self.assertEqual(choices[2]['query_string'], '?department__code__exact=DSN')
| bsd-3-clause |
Edu-Glez/Bank_sentiment_analysis | env/lib/python3.6/site-packages/traitlets/tests/_warnings.py | 17 | 4019 | # From scikit-image: https://github.com/scikit-image/scikit-image/blob/c2f8c4ab123ebe5f7b827bc495625a32bb225c10/skimage/_shared/_warnings.py
# Licensed under modified BSD license
__all__ = ['all_warnings', 'expected_warnings']
from contextlib import contextmanager
import sys
import warnings
import inspect
import re
@contextmanager
def all_warnings():
"""
Context for use in testing to ensure that all warnings are raised.
Examples
--------
>>> import warnings
>>> def foo():
... warnings.warn(RuntimeWarning("bar"))
We raise the warning once, while the warning filter is set to "once".
Hereafter, the warning is invisible, even with custom filters:
>>> with warnings.catch_warnings():
... warnings.simplefilter('once')
... foo()
We can now run ``foo()`` without a warning being raised:
>>> from numpy.testing import assert_warns
>>> foo()
To catch the warning, we call in the help of ``all_warnings``:
>>> with all_warnings():
... assert_warns(RuntimeWarning, foo)
"""
# Whenever a warning is triggered, Python adds a __warningregistry__
# member to the *calling* module. The exercize here is to find
# and eradicate all those breadcrumbs that were left lying around.
#
# We proceed by first searching all parent calling frames and explicitly
# clearing their warning registries (necessary for the doctests above to
# pass). Then, we search for all submodules of skimage and clear theirs
# as well (necessary for the skimage test suite to pass).
frame = inspect.currentframe()
if frame:
for f in inspect.getouterframes(frame):
f[0].f_locals['__warningregistry__'] = {}
del frame
for mod_name, mod in list(sys.modules.items()):
if 'six.moves' in mod_name:
continue
try:
mod.__warningregistry__.clear()
except AttributeError:
pass
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
yield w
@contextmanager
def expected_warnings(matching):
"""Context for use in testing to catch known warnings matching regexes
Parameters
----------
matching : list of strings or compiled regexes
Regexes for the desired warning to catch
Examples
--------
>>> from skimage import data, img_as_ubyte, img_as_float
>>> with expected_warnings(['precision loss']):
... d = img_as_ubyte(img_as_float(data.coins()))
Notes
-----
Uses `all_warnings` to ensure all warnings are raised.
Upon exiting, it checks the recorded warnings for the desired matching
pattern(s).
Raises a ValueError if any match was not found or an unexpected
warning was raised.
Allows for three types of behaviors: "and", "or", and "optional" matches.
This is done to accomodate different build enviroments or loop conditions
that may produce different warnings. The behaviors can be combined.
If you pass multiple patterns, you get an orderless "and", where all of the
warnings must be raised.
If you use the "|" operator in a pattern, you can catch one of several warnings.
Finally, you can use "|\A\Z" in a pattern to signify it as optional.
"""
with all_warnings() as w:
# enter context
yield w
# exited user context, check the recorded warnings
remaining = [m for m in matching if not '\A\Z' in m.split('|')]
for warn in w:
found = False
for match in matching:
if re.search(match, str(warn.message)) is not None:
found = True
if match in remaining:
remaining.remove(match)
if not found:
raise ValueError('Unexpected warning: %s' % str(warn.message))
if len(remaining) > 0:
msg = 'No warning raised matching:\n%s' % '\n'.join(remaining)
raise ValueError(msg)
| apache-2.0 |
locke105/mclib | examples/wsgi.py | 1 | 1781 |
import cgi
import json
from wsgiref import simple_server
import falcon
from mclib import mc_info
class MCInfo(object):
def on_get(self, req, resp):
host = req.get_param('host', required=True)
port = req.get_param_as_int('port', min=1024,
max=65565)
try:
if port is not None:
info = mc_info.get_info(host=host,
port=port)
else:
info = mc_info.get_info(host=host)
except Exception:
raise Exception('Couldn\'t retrieve info.')
if '.json' in req.uri:
resp.body = self.get_json(info)
return
preferred = req.client_prefers(['application/json', 'text/html'])
if 'html' in preferred:
resp.content_type = 'text/html'
resp.body = self.get_html(info)
else:
resp.body = self.get_json(info)
def get_html(self, info):
html = """<body>
<style>
table,th,td
{
border:1px solid black;
border-collapse:collapse
}
th,td
{
padding: 5px
}
</style>
<table>
"""
for k,v in info.iteritems():
items = {'key': cgi.escape(k)}
if isinstance(v, basestring):
items['val'] = cgi.escape(v)
else:
items['val'] = v
html = html + '<tr><td>%(key)s</td><td>%(val)s</td></tr>' % items
html = html + '</table></body>'
return html
def get_json(self, info):
return json.dumps(info)
app = falcon.API()
mcinfo = MCInfo()
app.add_route('/mcinfo', mcinfo)
app.add_route('/mcinfo.json', mcinfo)
if __name__ == '__main__':
httpd = simple_server.make_server('0.0.0.0', 3000, app)
httpd.serve_forever()
| apache-2.0 |
Meertecha/LearnPythonTheGame | pyGameEngine.py | 1 | 3565 | ### Imports
import pickle, os, platform, random
### Functions
def main():
curPlayer = loadPlayer( 'Tory' )
curGame = loadGame( 'Python_Tutorial' )
startGame(curPlayer, curGame)
def banner():
'''
if platform.system() == "Windows":
clearCmd = "cls"
elif platform.system() == "Linux":
clearCmd = "clear"
else:
print ("Unknown operating system detected. Some operations may not perform correctly!\n")
os.system(clearCmd)
'''
version = 0.1
banner = (" **Welcome to the Python Learning Environment\n\
**Written by Tory Clasen - Version: " + str(version) + " \n\
**For help at any time please type '?' or 'help' \n\
**To exit the program type 'exit' or 'quit' \n\n")
print banner
def startGame(curPlayer, curGame):
try:
curScore = curPlayer['score'][curGame['gameName']]
except:
curScore = 0
while True:
#banner()
print '----------------------------------------\n' + curGame['gameName'] + ' has been loaded'
print curGame['banner'] + '\n----------------------------------------'
try:
pickle.dump( curPlayer, open( ( str(curPlayer['Name']) + ".plep"), "wb" ) )
except:
print "Error! Unable to save player profile at current location!"
print 'Your current score is: ' + str(curScore) + ' out of a total possible score of: ' + str(len(curGame['gameData']))
print "Question " + str(curScore) + ": \n" + str(curGame['gameData'][curScore]["Q"]) + "\n"
temp = curGame['gameData'][curScore]["D"]
data = eval(str(curGame['gameData'][curScore]["D"]))
print "Data " + str(curScore) + ": \n" + data
print '----------------------------------------\n'
try:
myAnswer = eval(str(getInput('What command do you want to submit? ')))
if myAnswer == (eval(str(curGame['gameData'][curScore]["A"]))):
print "Correct!"
curScore = curScore + 1
else:
print "Incorrect!"
except:
print 'The answer you submitted crashed the program, so it was probably wrong'
#break
def getInput(prompt):
theInput = raw_input( str(prompt) + "\n" )
if theInput == '?' or theInput.lower() == 'help':
print "HELP! HELP!"
elif theInput.lower() == 'exit' or theInput.lower() == 'quit':
raise SystemExit
else:
return theInput
def loadPlayer(playerName = ''):
#banner()
curPlayer = {}
if playerName == '':
playerName = getInput("I would like to load your profile. \nWhat is your name? ")
try:
# Attempt to load the player file.
curPlayer = pickle.load( open( ( str(playerName) + ".plep"), "rb" ) )
print "Player profile found... loading player data..."
except:
# Ask the player if they want to try to create a new profile file.
createNew = getInput( "Player profile not found for '" + str(playerName) + "'\nWould you like to create a new one? [Y/N]").lower()
curPlayer = {'Name':playerName}
if createNew == "y":
try:
pickle.dump( curPlayer, open( ( str(playerName) + ".plep"), "wb" ) )
print "Player profile successfully created!"
except:
print "Error! Unable to create player profile at current location!"
else:
print "Progress will not be saved for you..."
return curPlayer
def loadGame(gameName = ''):
banner()
curGame = {}
while True:
if gameName == '':
gameName = getInput("What game would you like to load? ")
try:
# Attempt to load the player file.
curGame = pickle.load( open( ( str(gameName) + ".pleg"), "rb" ) )
print "Game module found... loading game data..."
gameName = ''
break
except:
gameName = ''
print "Game module not found... please try again..."
return curGame
main()
| mit |
openbig/odoo-contract | partner_billing/wizard/sale_make_invoice_advance.py | 1 | 1615 | # -*- encoding: utf-8 -*-
##############################################################################
#
# partner_billing
# (C) 2015 Mikołaj Dziurzyński, Grzegorz Grzelak, Thorsten Vocks (big-consulting GmbH)
# All Rights reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv
from openerp import fields, models
import logging
_logger = logging.getLogger(__name__)
class sale_advance_payment_inv(osv.osv_memory):
_inherit = "sale.advance.payment.inv"
def _prepare_advance_invoice_vals(self, cr, uid, ids, context=None):
res = super(sale_advance_payment_inv,self)._prepare_advance_invoice_vals(cr, uid, ids, context=context)
sale_order_obj = self.pool.get('sale.order')
for pair in res:
for sale in sale_order_obj.browse(cr, uid, [pair[0]]):
pair[1]['associated_partner'] = sale.associated_partner and sale.associated_partner.id or False
return res
| agpl-3.0 |
ingo-m/pyprf | pyprf/analysis/pyprf_main.py | 2 | 14174 | # -*- coding: utf-8 -*-
"""Find best fitting model time courses for population receptive fields.
Use `import pRF_config as cfg` for static pRF analysis.
Use `import pRF_config_motion as cfg` for pRF analysis with motion stimuli.
"""
# Part of py_pRF_mapping library
# Copyright (C) 2016 Ingo Marquardt
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program. If not, see <http://www.gnu.org/licenses/>.
import time
import numpy as np
import nibabel as nb
import h5py
from pyprf.analysis.load_config import load_config
from pyprf.analysis.utilities import cls_set_config
from pyprf.analysis.model_creation_main import model_creation
from pyprf.analysis.preprocessing_main import pre_pro_models
from pyprf.analysis.preprocessing_main import pre_pro_func
from pyprf.analysis.preprocessing_hdf5 import pre_pro_models_hdf5
from pyprf.analysis.preprocessing_hdf5 import pre_pro_func_hdf5
from pyprf.analysis.find_prf import find_prf
def pyprf(strCsvCnfg, lgcTest=False): #noqa
"""
Main function for pRF mapping.
Parameters
----------
strCsvCnfg : str
Absolute file path of config file.
lgcTest : Boolean
Whether this is a test (pytest). If yes, absolute path of pyprf libary
will be prepended to config file paths.
"""
# *************************************************************************
# *** Check time
print('---pRF analysis')
varTme01 = time.time()
# *************************************************************************
# *************************************************************************
# *** Preparations
# Load config parameters from csv file into dictionary:
dicCnfg = load_config(strCsvCnfg, lgcTest=lgcTest)
# Load config parameters from dictionary into namespace:
cfg = cls_set_config(dicCnfg)
# Convert preprocessing parameters (for temporal and spatial smoothing)
# from SI units (i.e. [s] and [mm]) into units of data array (volumes and
# voxels):
cfg.varSdSmthTmp = np.divide(cfg.varSdSmthTmp, cfg.varTr)
cfg.varSdSmthSpt = np.divide(cfg.varSdSmthSpt, cfg.varVoxRes)
# For the GPU version, we need to set down the parallelisation to 1 now,
# because no separate CPU threads are to be created. We may still use CPU
# parallelisation for preprocessing, which is why the parallelisation
# factor is only reduced now, not earlier.
if cfg.strVersion == 'gpu':
cfg.varPar = 1
# *************************************************************************
# *************************************************************************
# *** Create or load pRF time course models
# In case of a multi-run experiment, the data may not fit into memory.
# (Both pRF model time courses and the fMRI data may be large in this
# case.) Therefore, we switch to hdf5 mode, where model time courses and
# fMRI data are hold in hdf5 files (on disk). The location of the hdf5 file
# for model time courses is specified by 'strPathMdl' (in the config file).
# The hdf5 file with fMRI data are stored at the same location as the input
# nii files.
# Array with pRF time course models, shape:
# aryPrfTc[x-position, y-position, SD, condition, volume].
# If in hdf5 mode, `aryPrfTc` is `None`.
aryPrfTc = model_creation(dicCnfg, lgcHdf5=cfg.lgcHdf5)
# *************************************************************************
# *************************************************************************
# *** Preprocessing
if cfg.lgcHdf5:
print('---Hdf5 mode.')
# Preprocessing of functional data:
vecLgcMsk, hdrMsk, aryAff, vecLgcVar, tplNiiShp, strPthHdf5Func = \
pre_pro_func_hdf5(cfg.strPathNiiMask,
cfg.lstPathNiiFunc,
lgcLinTrnd=cfg.lgcLinTrnd,
varSdSmthTmp=cfg.varSdSmthTmp,
varSdSmthSpt=cfg.varSdSmthSpt)
# Preprocessing of pRF model time courses:
strPrfTc, aryLgcMdlVar = \
pre_pro_models_hdf5(cfg.strPathMdl,
varSdSmthTmp=cfg.varSdSmthTmp,
strVersion=cfg.strVersion,
varPar=cfg.varPar)
# Dummy pRF time courses (for compatibility with regular mode):
aryPrfTc = None
# ---Makeshift solution for small data after masking---
# TODO: IMPLEMENT FULL HDF5 MODE FOR READING OF FUNCTIONAL DATA.
# Read hdf5 file (masked timecourses of current run):
fleHdfFunc = h5py.File(strPthHdf5Func, 'r')
# Access dataset in current hdf5 file:
dtsFunc = fleHdfFunc['func']
aryFunc = dtsFunc[:, :]
aryFunc = np.copy(aryFunc)
fleHdfFunc.close()
else:
# Preprocessing of pRF model time courses:
aryPrfTc = pre_pro_models(aryPrfTc,
varSdSmthTmp=cfg.varSdSmthTmp,
varPar=cfg.varPar)
# Preprocessing of functional data:
vecLgcMsk, hdrMsk, aryAff, vecLgcVar, aryFunc, tplNiiShp = \
pre_pro_func(cfg.strPathNiiMask,
cfg.lstPathNiiFunc,
lgcLinTrnd=cfg.lgcLinTrnd,
varSdSmthTmp=cfg.varSdSmthTmp,
varSdSmthSpt=cfg.varSdSmthSpt,
varPar=cfg.varPar)
# Dummy variables (for compatibility with hdf5 mode):
strPrfTc = None
aryLgcMdlVar = None
# *************************************************************************
# *************************************************************************
# *** Find pRF models for voxel time courses.
lstPrfRes = find_prf(dicCnfg, aryFunc, aryPrfTc=aryPrfTc,
aryLgcMdlVar=aryLgcMdlVar, strPrfTc=strPrfTc)
# *************************************************************************
# *************************************************************************
# *** Merge results from parallel processes
print('---------Prepare pRF finding results for export')
# Create list for vectors with fitting results, in order to put the results
# into the correct order:
lstResXpos = [None] * cfg.varPar
lstResYpos = [None] * cfg.varPar
lstResSd = [None] * cfg.varPar
lstResR2 = [None] * cfg.varPar
lstResPe = [None] * cfg.varPar
# Put output into correct order:
for idxRes in range(cfg.varPar):
# Index of results (first item in output list):
varTmpIdx = lstPrfRes[idxRes][0]
# Put fitting results into list, in correct order:
lstResXpos[varTmpIdx] = lstPrfRes[idxRes][1]
lstResYpos[varTmpIdx] = lstPrfRes[idxRes][2]
lstResSd[varTmpIdx] = lstPrfRes[idxRes][3]
lstResR2[varTmpIdx] = lstPrfRes[idxRes][4]
lstResPe[varTmpIdx] = lstPrfRes[idxRes][5]
# Concatenate output vectors (into the same order as the voxels that were
# included in the fitting):
aryBstXpos = np.concatenate(lstResXpos, axis=0).astype(np.float32)
aryBstYpos = np.concatenate(lstResYpos, axis=0).astype(np.float32)
aryBstSd = np.concatenate(lstResSd, axis=0).astype(np.float32)
aryBstR2 = np.concatenate(lstResR2, axis=0).astype(np.float32)
# aryBstXpos = np.zeros(0, dtype=np.float32)
# aryBstYpos = np.zeros(0, dtype=np.float32)
# aryBstSd = np.zeros(0, dtype=np.float32)
# aryBstR2 = np.zeros(0, dtype=np.float32)
# for idxRes in range(0, cfg.varPar):
# aryBstXpos = np.append(aryBstXpos, lstResXpos[idxRes])
# aryBstYpos = np.append(aryBstYpos, lstResYpos[idxRes])
# aryBstSd = np.append(aryBstSd, lstResSd[idxRes])
# aryBstR2 = np.append(aryBstR2, lstResR2[idxRes])
# Concatenate PEs, shape: aryBstPe[varNumVox, varNumCon].
aryBstPe = np.concatenate(lstResPe, axis=0).astype(np.float32)
varNumCon = aryBstPe.shape[1]
# Delete unneeded large objects:
del(lstPrfRes)
del(lstResXpos)
del(lstResYpos)
del(lstResSd)
del(lstResR2)
del(lstResPe)
# *************************************************************************
# *************************************************************************
# *** Reshape spatial parameters
# Put results form pRF finding into array (they originally needed to be
# saved in a list due to parallelisation). Voxels were selected for pRF
# model finding in two stages: First, a mask was applied. Second, voxels
# with low variance were removed. Voxels are put back into the original
# format accordingly.
# Number of voxels that were included in the mask:
varNumVoxMsk = np.sum(vecLgcMsk)
# Array for pRF finding results, of the form aryPrfRes[voxel-count, 0:3],
# where the 2nd dimension contains the parameters of the best-fitting pRF
# model for the voxel, in the order (0) pRF-x-pos, (1) pRF-y-pos, (2)
# pRF-SD, (3) pRF-R2. At this step, only the voxels included in the mask
# are represented.
aryPrfRes01 = np.zeros((varNumVoxMsk, 6), dtype=np.float32)
# Place voxels based on low-variance exlusion:
aryPrfRes01[vecLgcVar, 0] = aryBstXpos
aryPrfRes01[vecLgcVar, 1] = aryBstYpos
aryPrfRes01[vecLgcVar, 2] = aryBstSd
aryPrfRes01[vecLgcVar, 3] = aryBstR2
# Total number of voxels:
varNumVoxTlt = (tplNiiShp[0] * tplNiiShp[1] * tplNiiShp[2])
# Place voxels based on mask-exclusion:
aryPrfRes02 = np.zeros((varNumVoxTlt, 6), dtype=np.float32)
aryPrfRes02[vecLgcMsk, 0] = aryPrfRes01[:, 0]
aryPrfRes02[vecLgcMsk, 1] = aryPrfRes01[:, 1]
aryPrfRes02[vecLgcMsk, 2] = aryPrfRes01[:, 2]
aryPrfRes02[vecLgcMsk, 3] = aryPrfRes01[:, 3]
# Reshape pRF finding results into original image dimensions:
aryPrfRes = np.reshape(aryPrfRes02,
[tplNiiShp[0],
tplNiiShp[1],
tplNiiShp[2],
6])
del(aryPrfRes01)
del(aryPrfRes02)
# *************************************************************************
# *************************************************************************
# *** Reshape parameter estimates (betas)
# Bring PEs into original data shape. First, account for binary (brain)
# mask:
aryPrfRes01 = np.zeros((varNumVoxMsk, varNumCon), dtype=np.float32)
# Place voxels based on low-variance exlusion:
aryPrfRes01[vecLgcVar, :] = aryBstPe
# Place voxels based on mask-exclusion:
aryPrfRes02 = np.zeros((varNumVoxTlt, varNumCon), dtype=np.float32)
aryPrfRes02[vecLgcMsk, :] = aryPrfRes01
# Reshape pRF finding results into original image dimensions:
aryBstPe = np.reshape(aryPrfRes02,
[tplNiiShp[0],
tplNiiShp[1],
tplNiiShp[2],
varNumCon])
# New shape: aryBstPe[x, y, z, varNumCon]
del(aryPrfRes01)
del(aryPrfRes02)
# *************************************************************************
# *************************************************************************
# *** Export results
# The nii header of the mask will be used for creation of result nii files.
# Set dtype to float32 to avoid precision loss (in case mask is int).
hdrMsk.set_data_dtype(np.float32)
# Calculate polar angle map:
aryPrfRes[:, :, :, 4] = np.arctan2(aryPrfRes[:, :, :, 1],
aryPrfRes[:, :, :, 0])
# Calculate eccentricity map (r = sqrt( x^2 + y^2 ) ):
aryPrfRes[:, :, :, 5] = np.sqrt(np.add(np.power(aryPrfRes[:, :, :, 0],
2.0),
np.power(aryPrfRes[:, :, :, 1],
2.0)))
# List with name suffices of output images:
lstNiiNames = ['_x_pos',
'_y_pos',
'_SD',
'_R2',
'_polar_angle',
'_eccentricity']
print('---------Exporting results')
# Save spatial pRF parameters to nii:
for idxOut in range(6):
# Create nii object for results:
niiOut = nb.Nifti1Image(aryPrfRes[:, :, :, idxOut],
aryAff,
header=hdrMsk
)
# Save nii:
strTmp = (cfg.strPathOut + lstNiiNames[idxOut] + '.nii.gz')
nb.save(niiOut, strTmp)
# Save PEs to nii (not implemented for gpu mode):
if cfg.strVersion != 'gpu':
for idxCon in range(varNumCon):
# Create nii object for results:
niiOut = nb.Nifti1Image(aryBstPe[:, :, :, idxCon],
aryAff,
header=hdrMsk
)
# Save nii:
strTmp = (cfg.strPathOut
+ '_PE_'
+ str(idxCon + 1).zfill(2)
+ '.nii.gz')
nb.save(niiOut, strTmp)
# *************************************************************************
# *************************************************************************
# *** Report time
varTme02 = time.time()
varTme03 = varTme02 - varTme01
print('---Elapsed time: ' + str(varTme03) + ' s')
print('---Done.')
# *************************************************************************
| gpl-3.0 |
lipixun/pytest | rabbitmq/deadchannel/going2dead.py | 1 | 2112 | #!/usr/bin/env python
# encoding=utf8
# The dead channel applicationn
import sys
reload(sys)
sys.setdefaultencoding('utf8')
from uuid import uuid4
from time import time, sleep
from haigha.connections.rabbit_connection import RabbitConnection
from haigha.message import Message
class Client(object):
"""The RPC Client
"""
def __init__(self, host, port, vhost, user, password):
"""Create a new Server
"""
self._conn = RabbitConnection(host = host, port = port, vhost = vhost, user = user, password = password)
self._channel = self._conn.channel()
result = self._channel.queue.declare(arguments = { 'x-dead-letter-exchange': 'amq.topic', 'x-dead-letter-routing-key': 'test.dead_channel' })
self._deadQueue = result[0]
# Send a message
self._channel.basic.publish(Message('OMG! I\'m dead!'), '', self._deadQueue)
def dead(self):
"""Normal dead
"""
self._channel.close()
if __name__ == '__main__':
from argparse import ArgumentParser
def getArguments():
"""Get arguments
"""
parser = ArgumentParser(description = 'RabbitMQ dead channel client')
parser.add_argument('--host', dest = 'host', required = True, help = 'The host')
parser.add_argument('--port', dest = 'port', default = 5672, type = int, help = 'The port')
parser.add_argument('--vhost', dest = 'vhost', default = '/test', help = 'The virtual host')
parser.add_argument('--user', dest = 'user', default = 'test', help = 'The user name')
parser.add_argument('--password', dest = 'password', default = 'test', help = 'The password')
# Done
return parser.parse_args()
def main():
"""The main entry
"""
args = getArguments()
# Create the server
client = Client(args.host, args.port, args.vhost, args.user, args.password)
# Go to dead
print 'Will go to dead in 10s, or you can use ctrl + c to cause a unexpected death'
sleep(10)
client.dead()
print 'Normal dead'
main()
| gpl-2.0 |
Gustry/inasafe | safe/gui/tools/multi_buffer_dialog.py | 3 | 11681 | # coding=utf-8
"""**Multi Buffer Tool Implementation.**"""
import logging
import os
from collections import OrderedDict
from operator import itemgetter
from qgis.core import QgsMapLayerRegistry
from qgis.gui import QgsMapLayerProxyModel
from PyQt4 import QtGui
from PyQt4.QtCore import pyqtSignature, pyqtSlot
from PyQt4.QtGui import QFileDialog, QIcon
from safe.common.utilities import unique_filename, temp_dir
from safe.datastore.folder import Folder
from safe.gis.vector.multi_buffering import multi_buffering
from safe.gui.tools.wizard.wizard_dialog import WizardDialog
from safe.gui.tools.help.multi_buffer_help import multi_buffer_help
from safe.messaging import styles
from safe.utilities.resources import (
get_ui_class,
resources_path,
html_footer,
html_header)
INFO_STYLE = styles.BLUE_LEVEL_4_STYLE
LOGGER = logging.getLogger('InaSAFE')
FORM_CLASS = get_ui_class('multi_buffer_dialog_base.ui')
class MultiBufferDialog(QtGui.QDialog, FORM_CLASS):
"""Dialog implementation class for the InaSAFE multi buffer tool."""
def __init__(self, parent=None, iface=None, dock_widget=None):
"""Constructor for the multi buffer dialog.
:param parent: Parent widget of this dialog.
:type parent: QWidget
"""
QtGui.QDialog.__init__(self, parent)
self.setupUi(self)
self.setWindowTitle(self.tr('InaSAFE Multi Buffer Tool'))
self.parent = parent
self.iface = iface
self.dock_widget = dock_widget
self.keyword_wizard = None
# output file properties initialisation
self.data_store = None
self.output_directory = None
self.output_filename = None
self.output_extension = None
self.output_layer = None
self.classification = []
# set icon
self.add_class_button.setIcon(
QIcon(resources_path('img', 'icons', 'add.svg')))
self.remove_class_button.setIcon(
QIcon(resources_path('img', 'icons', 'remove.svg')))
# prepare dialog initialisation
self.layer.setFilters(QgsMapLayerProxyModel.VectorLayer)
self.directory_button_status()
self.add_class_button_status()
self.ok_button_status()
self.output_form.setPlaceholderText(
self.tr('[Create a temporary layer]'))
self.keyword_wizard_checkbox.setChecked(True)
# set signal
self.layer.layerChanged.connect(self.directory_button_status)
self.layer.layerChanged.connect(self.ok_button_status)
self.output_form.textChanged.connect(self.ok_button_status)
self.directory_button.clicked.connect(
self.on_directory_button_tool_clicked)
self.radius_form.valueChanged.connect(self.add_class_button_status)
self.class_form.textChanged.connect(self.add_class_button_status)
self.add_class_button.clicked.connect(
self.populate_hazard_classification)
self.add_class_button.clicked.connect(self.ok_button_status)
self.remove_class_button.clicked.connect(
self.remove_selected_classification)
self.remove_class_button.clicked.connect(self.ok_button_status)
# Set up things for context help
self.help_button = self.button_box.button(QtGui.QDialogButtonBox.Help)
# Allow toggling the help button
self.help_button.setCheckable(True)
self.help_button.toggled.connect(self.help_toggled)
self.main_stacked_widget.setCurrentIndex(1)
# Fix for issue 1699 - cancel button does nothing
cancel_button = self.button_box.button(QtGui.QDialogButtonBox.Cancel)
cancel_button.clicked.connect(self.reject)
# Fix ends
ok_button = self.button_box.button(QtGui.QDialogButtonBox.Ok)
ok_button.clicked.connect(self.accept)
def accept(self):
"""Process the layer for multi buffering and generate a new layer.
.. note:: This is called on OK click.
"""
# set parameter from dialog
input_layer = self.layer.currentLayer()
output_path = self.output_form.text()
radius = self.get_classification()
# monkey patch keywords so layer works on multi buffering function
input_layer.keywords = {'inasafe_fields': {}}
# run multi buffering
self.output_layer = multi_buffering(input_layer, radius)
# save output layer to data store and check whether user
# provide the output path.
if output_path:
self.output_directory, self.output_filename = (
os.path.split(output_path))
self.output_filename, self.output_extension = (
os.path.splitext(self.output_filename))
# if user do not provide the output path, create a temporary file.
else:
self.output_directory = temp_dir(sub_dir='work')
self.output_filename = (
unique_filename(
prefix='hazard_layer',
suffix='.geojson',
dir=self.output_directory))
self.output_filename = os.path.split(self.output_filename)[1]
self.output_filename, self.output_extension = (
os.path.splitext(self.output_filename))
self.data_store = Folder(self.output_directory)
if self.output_extension == '.shp':
self.data_store.default_vector_format = 'shp'
elif self.output_extension == '.geojson':
self.data_store.default_vector_format = 'geojson'
self.data_store.add_layer(self.output_layer, self.output_filename)
# add output layer to map canvas
self.output_layer = self.data_store.layer(self.output_filename)
QgsMapLayerRegistry.instance().addMapLayers(
[self.output_layer])
self.iface.setActiveLayer(self.output_layer)
self.iface.zoomToActiveLayer()
self.done(QtGui.QDialog.Accepted)
if self.keyword_wizard_checkbox.isChecked():
self.launch_keyword_wizard()
@pyqtSignature('') # prevents actions being handled twice
def on_directory_button_tool_clicked(self):
"""Autoconnect slot activated when directory button is clicked."""
# noinspection PyCallByClass,PyTypeChecker
# set up parameter from dialog
input_path = self.layer.currentLayer().source()
input_directory, self.output_filename = os.path.split(input_path)
file_extension = os.path.splitext(self.output_filename)[1]
self.output_filename = os.path.splitext(self.output_filename)[0]
# show Qt file directory dialog
output_path = QFileDialog.getSaveFileName(
self,
self.tr('Output file'),
'%s_multi_buffer%s' % (
os.path.join(input_directory, self.output_filename),
file_extension),
'GeoJSON (*.geojson);;Shapefile (*.shp)')
# set selected path to the dialog
self.output_form.setText(output_path)
def get_output_from_input(self):
"""Populate output form with default output path based on input layer.
"""
input_path = self.layer.currentLayer().source()
output_path = (
os.path.splitext(input_path)[0] + '_multi_buffer' +
os.path.splitext(input_path)[1])
self.output_form.setText(output_path)
def populate_hazard_classification(self):
"""Populate hazard classification on hazard class form."""
new_class = {
'value': self.radius_form.value(),
'name': self.class_form.text()}
self.classification.append(new_class)
self.classification = sorted(
self.classification, key=itemgetter('value'))
self.hazard_class_form.clear()
for item in self.classification:
new_item = '{value} - {name}'.format(
value=item['value'], name=item['name'])
self.hazard_class_form.addItem(new_item)
self.radius_form.setValue(0)
self.class_form.clear()
self.ok_button_status()
def remove_selected_classification(self):
"""Remove selected item on hazard class form."""
removed_classes = self.hazard_class_form.selectedItems()
current_item = self.hazard_class_form.currentItem()
removed_index = self.hazard_class_form.indexFromItem(current_item)
del self.classification[removed_index.row()]
for item in removed_classes:
self.hazard_class_form.takeItem(
self.hazard_class_form.row(item))
def get_classification(self):
"""Get all hazard class created by user.
:return: Hazard class definition created by user.
:rtype: OrderedDict
"""
classification_dictionary = {}
for item in self.classification:
classification_dictionary[item['value']] = item['name']
classification_dictionary = OrderedDict(
sorted(classification_dictionary.items()))
return classification_dictionary
def directory_button_status(self):
"""Function to enable or disable directory button."""
if self.layer.currentLayer():
self.directory_button.setEnabled(True)
else:
self.directory_button.setEnabled(False)
def add_class_button_status(self):
"""Function to enable or disable add class button."""
if self.class_form.text() and self.radius_form >= 0:
self.add_class_button.setEnabled(True)
else:
self.add_class_button.setEnabled(False)
def ok_button_status(self):
"""Function to enable or disable OK button."""
if not self.layer.currentLayer():
self.button_box.button(QtGui.QDialogButtonBox.Ok).setEnabled(False)
elif (self.hazard_class_form.count() > 0 and
self.layer.currentLayer().name() and
len(self.output_form.text()) >= 0):
self.button_box.button(QtGui.QDialogButtonBox.Ok).setEnabled(True)
else:
self.button_box.button(QtGui.QDialogButtonBox.Ok).setEnabled(False)
@pyqtSlot()
@pyqtSignature('bool') # prevents actions being handled twice
def help_toggled(self, flag):
"""Show or hide the help tab in the stacked widget.
:param flag: Flag indicating whether help should be shown or hidden.
:type flag: bool
"""
if flag:
self.help_button.setText(self.tr('Hide Help'))
self.show_help()
else:
self.help_button.setText(self.tr('Show Help'))
self.hide_help()
def hide_help(self):
"""Hide the usage info from the user."""
self.main_stacked_widget.setCurrentIndex(1)
def show_help(self):
"""Show usage info to the user."""
# Read the header and footer html snippets
self.main_stacked_widget.setCurrentIndex(0)
header = html_header()
footer = html_footer()
string = header
message = multi_buffer_help()
string += message.to_html()
string += footer
self.help_web_view.setHtml(string)
def launch_keyword_wizard(self):
"""Launch keyword creation wizard."""
# make sure selected layer is the output layer
if self.iface.activeLayer() != self.output_layer:
return
# launch wizard dialog
self.keyword_wizard = WizardDialog(
self.iface.mainWindow(),
self.iface,
self.dock_widget)
self.keyword_wizard.set_keywords_creation_mode(self.output_layer)
self.keyword_wizard.exec_() # modal
| gpl-3.0 |
gf712/AbPyTools | abpytools/core/fab_collection.py | 1 | 14123 | from .chain_collection import ChainCollection
import numpy as np
import pandas as pd
from .chain import calculate_charge
from abpytools.utils import DataLoader
from operator import itemgetter
from .fab import Fab
from .helper_functions import germline_identity_pd, to_numbering_table
from .base import CollectionBase
import os
import json
from .utils import (json_FabCollection_formatter, pb2_FabCollection_formatter, pb2_FabCollection_parser,
json_FabCollection_parser)
from .flags import *
if BACKEND_FLAGS.HAS_PROTO:
from abpytools.core.formats import FabCollectionProto
class FabCollection(CollectionBase):
def __init__(self, fab=None, heavy_chains=None, light_chains=None, names=None):
"""
Fab object container that handles combinations of light/heavy Chain pairs.
Args:
fab (list):
heavy_chains (ChainCollection):
light_chains (ChainCollection):
names (list):
"""
# check if it's a Chain object
if heavy_chains is None and light_chains is None and fab is None:
raise ValueError('Provide a list of Chain objects or an ChainCollection object')
# check if fab object is a list and if all object are abpytools.Fab objects
if isinstance(fab, list) and all(isinstance(fab_i, Fab) for fab_i in fab):
self._fab = fab
self._light_chains = ChainCollection([x[0] for x in self._fab])
self._heavy_chains = ChainCollection([x[1] for x in self._fab])
if fab is None and (heavy_chains is not None and light_chains is not None):
if isinstance(heavy_chains, list):
self._heavy_chains = ChainCollection(antibody_objects=heavy_chains)
elif isinstance(heavy_chains, ChainCollection):
self._heavy_chains = heavy_chains
else:
raise ValueError('Provide a list of Chain objects or an ChainCollection object')
if isinstance(light_chains, list):
self._light_chains = ChainCollection(antibody_objects=light_chains)
elif isinstance(light_chains, ChainCollection):
self._light_chains = light_chains
else:
raise ValueError('Provide a list of Chain objects or an ChainCollection object')
if len(self._light_chains.loading_status()) == 0:
self._light_chains.load()
if len(self._heavy_chains.loading_status()) == 0:
self._heavy_chains.load()
if self._light_chains.n_ab != self._heavy_chains.n_ab:
raise ValueError('Number of heavy chains must be the same of light chains')
if isinstance(names, list) and all(isinstance(name, str) for name in names):
if len(names) == self._heavy_chains.n_ab:
self._names = names
else:
raise ValueError(
'Length of name list must be the same as length of heavy_chains/light chains lists')
elif names is None:
self._names = ['{} - {}'.format(heavy, light) for heavy, light in zip(self._heavy_chains.names,
self._light_chains.names)]
else:
raise ValueError("Names expected a list of strings, instead got {}".format(type(names)))
self._n_ab = self._light_chains.n_ab
self._pair_sequences = [heavy + light for light, heavy in zip(self._heavy_chains.sequences,
self._light_chains.sequences)]
# keep the name of the heavy and light chains internally to keep everything in the right order
self._internal_heavy_name = self._heavy_chains.names
self._internal_light_name = self._light_chains.names
# even though it makes more sense to draw all these values from the base Fab objects this is much slower
# whenever self._n_ab > 1 it makes more sense to use the self._heavy_chain and self._light_chain containers
# in all the methods
# in essence the abpytools.Fab object is just a representative building block that could in future just
# cache data and would then represent a speed up in the calculations
def molecular_weights(self, monoisotopic=False):
return [heavy + light for heavy, light in zip(self._heavy_chains.molecular_weights(monoisotopic=monoisotopic),
self._light_chains.molecular_weights(monoisotopic=monoisotopic))]
def extinction_coefficients(self, extinction_coefficient_database='Standard', reduced=False, normalise=False,
**kwargs):
heavy_ec = self._heavy_chains.extinction_coefficients(
extinction_coefficient_database=extinction_coefficient_database,
reduced=reduced)
light_ec = self._light_chains.extinction_coefficients(
extinction_coefficient_database=extinction_coefficient_database,
reduced=reduced)
if normalise:
return [(heavy + light) / mw for heavy, light, mw in
zip(heavy_ec, light_ec, self.molecular_weights(**kwargs))]
else:
return [heavy + light for heavy, light in zip(heavy_ec, light_ec)]
def hydrophobicity_matrix(self):
return np.column_stack((self._heavy_chains.hydrophobicity_matrix(), self._light_chains.hydrophobicity_matrix()))
def charge(self):
return np.column_stack((self._heavy_chains.charge, self._light_chains.charge))
def total_charge(self, ph=7.4, pka_database='Wikipedia'):
available_pi_databases = ["EMBOSS", "DTASetect", "Solomon", "Sillero", "Rodwell", "Wikipedia", "Lehninger",
"Grimsley"]
assert pka_database in available_pi_databases, \
"Selected pI database {} not available. Available databases: {}".format(pka_database,
' ,'.join(available_pi_databases))
data_loader = DataLoader(data_type='AminoAcidProperties', data=['pI', pka_database])
pka_data = data_loader.get_data()
return [calculate_charge(sequence=seq, ph=ph, pka_values=pka_data) for seq in self.sequences]
def igblast_local_query(self, file_path, chain):
if chain.lower() == 'light':
self._light_chains.igblast_local_query(file_path=file_path)
elif chain.lower() == 'heavy':
self._heavy_chains.igblast_local_query(file_path=file_path)
else:
raise ValueError('Specify if the data being loaded is for the heavy or light chain')
def igblast_server_query(self, **kwargs):
self._light_chains.igblast_server_query(**kwargs)
self._heavy_chains.igblast_server_query(**kwargs)
def numbering_table(self, as_array=False, region='all', chain='both', **kwargs):
return to_numbering_table(as_array=as_array, region=region, chain=chain,
heavy_chains_numbering_table=self._heavy_chains.numbering_table,
light_chains_numbering_table=self._light_chains.numbering_table,
names=self.names, **kwargs)
def _germline_pd(self):
# empty dictionaries return false, so this condition checks if any of the values are False
if all([x for x in self._light_chains.germline_identity.values()]) is False:
# this means there is no information about the germline,
# by default it will run a web query
self._light_chains.igblast_server_query()
if all([x for x in self._heavy_chains.germline_identity.values()]) is False:
self._heavy_chains.igblast_server_query()
heavy_chain_germlines = self._heavy_chains.germline
light_chain_germlines = self._light_chains.germline
data = np.array([[heavy_chain_germlines[x][0] for x in self._internal_heavy_name],
[heavy_chain_germlines[x][1] for x in self._internal_heavy_name],
[light_chain_germlines[x][0] for x in self._internal_light_name],
[light_chain_germlines[x][1] for x in self._internal_light_name]]).T
df = pd.DataFrame(data=data,
columns=pd.MultiIndex.from_tuples([('Heavy', 'Assignment'),
('Heavy', 'Score'),
('Light', 'Assignment'),
('Light', 'Score')]),
index=self.names)
df.loc[:, (slice(None), 'Score')] = df.loc[:, (slice(None), 'Score')].apply(pd.to_numeric)
return df
def save_to_json(self, path, update=True):
with open(os.path.join(path + '.json'), 'w') as f:
fab_data = json_FabCollection_formatter(self)
json.dump(fab_data, f, indent=2)
def save_to_pb2(self, path, update=True):
proto_parser = FabCollectionProto()
try:
with open(os.path.join(path + '.pb2'), 'rb') as f:
proto_parser.ParseFromString(f.read())
except IOError:
# Creating new file
pass
pb2_FabCollection_formatter(self, proto_parser)
with open(os.path.join(path + '.pb2'), 'wb') as f:
f.write(proto_parser.SerializeToString())
def save_to_fasta(self, path, update=True):
raise NotImplementedError
@classmethod
def load_from_json(cls, path, n_threads=20, verbose=True, show_progressbar=True):
with open(path, 'r') as f:
data = json.load(f)
fab_objects = json_FabCollection_parser(data)
fab_collection = cls(fab=fab_objects)
return fab_collection
@classmethod
def load_from_pb2(cls, path, n_threads=20, verbose=True, show_progressbar=True):
with open(path, 'rb') as f:
proto_parser = FabCollectionProto()
proto_parser.ParseFromString(f.read())
fab_objects = pb2_FabCollection_parser(proto_parser)
fab_collection = cls(fab=fab_objects)
return fab_collection
@classmethod
def load_from_fasta(cls, path, numbering_scheme=NUMBERING_FLAGS.CHOTHIA, n_threads=20,
verbose=True, show_progressbar=True):
raise NotImplementedError
def _get_names_iter(self, chain='both'):
if chain == 'both':
for light_chain, heavy_chain in zip(self._light_chains, self._heavy_chains):
yield f"{light_chain.name}-{heavy_chain.name}"
elif chain == 'light':
for light_chain in self._light_chains:
yield light_chain.name
elif chain == 'heavy':
for heavy_chain in self._heavy_chains:
yield heavy_chain.name
else:
raise ValueError(f"Unknown chain type ({chain}), available options are:"
f"both, light or heavy.")
@property
def regions(self):
heavy_regions = self._heavy_chains.ab_region_index()
light_regions = self._light_chains.ab_region_index()
return {name: {CHAIN_FLAGS.HEAVY_CHAIN: heavy_regions[heavy],
CHAIN_FLAGS.LIGHT_CHAIN: light_regions[light]} for name, heavy, light in
zip(self.names, self._internal_heavy_name, self._internal_light_name)}
@property
def names(self):
return self._names
@property
def sequences(self):
return self._pair_sequences
@property
def aligned_sequences(self):
return [heavy + light for light, heavy in
zip(self._heavy_chains.aligned_sequences,
self._light_chains.aligned_sequences)]
@property
def n_ab(self):
return self._n_ab
@property
def germline_identity(self):
return self._germline_identity()
@property
def germline(self):
return self._germline_pd()
def _string_summary_basic(self):
return "abpytools.FabCollection Number of sequences: {}".format(self._n_ab)
def __len__(self):
return self._n_ab
def __repr__(self):
return "<%s at 0x%02x>" % (self._string_summary_basic(), id(self))
def __getitem__(self, indices):
if isinstance(indices, int):
return Fab(heavy_chain=self._heavy_chains[indices],
light_chain=self._light_chains[indices],
name=self.names[indices], load=False)
else:
return FabCollection(heavy_chains=list(itemgetter(*indices)(self._heavy_chains)),
light_chains=list(itemgetter(*indices)(self._light_chains)),
names=list(itemgetter(*indices)(self._names)))
def _germline_identity(self):
# empty dictionaries return false, so this condition checks if any of the values are False
if all([x for x in self._light_chains.germline_identity.values()]) is False:
# this means there is no information about the germline,
# by default it will run a web query
self._light_chains.igblast_server_query()
if all([x for x in self._heavy_chains.germline_identity.values()]) is False:
self._heavy_chains.igblast_server_query()
return germline_identity_pd(self._heavy_chains.germline_identity,
self._light_chains.germline_identity,
self._internal_heavy_name,
self._internal_light_name,
self._names)
def get_object(self, name):
"""
:param name: str
:return:
"""
if name in self.names:
index = self.names.index(name)
return self[index]
else:
raise ValueError('Could not find sequence with specified name')
| mit |
tartavull/google-cloud-python | bigtable/tests/unit/test_client.py | 2 | 24528 | # Copyright 2015 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import mock
def _make_credentials():
import google.auth.credentials
class _CredentialsWithScopes(
google.auth.credentials.Credentials,
google.auth.credentials.Scoped):
pass
return mock.Mock(spec=_CredentialsWithScopes)
class Test__make_data_stub(unittest.TestCase):
def _call_fut(self, client):
from google.cloud.bigtable.client import _make_data_stub
return _make_data_stub(client)
@mock.patch('google.cloud.bigtable.client.make_secure_stub',
return_value=mock.sentinel.stub)
def test_without_emulator(self, make_stub):
from google.cloud.bigtable import client as MUT
credentials = _make_credentials()
user_agent = 'you-sir-age-int'
client = _Client(credentials, user_agent)
result = self._call_fut(client)
self.assertIs(result, mock.sentinel.stub)
make_stub.assert_called_once_with(
client.credentials,
client.user_agent,
MUT.bigtable_pb2.BigtableStub,
MUT.DATA_API_HOST,
extra_options=MUT._GRPC_MAX_LENGTH_OPTIONS,
)
def test_with_emulator(self):
from google.cloud._testing import _Monkey
from google.cloud.bigtable import client as MUT
emulator_host = object()
client = _Client(None, None, emulator_host=emulator_host)
fake_stub = object()
make_insecure_stub_args = []
def mock_make_insecure_stub(*args):
make_insecure_stub_args.append(args)
return fake_stub
with _Monkey(MUT, make_insecure_stub=mock_make_insecure_stub):
result = self._call_fut(client)
self.assertIs(result, fake_stub)
self.assertEqual(make_insecure_stub_args, [
(
MUT.bigtable_pb2.BigtableStub,
emulator_host,
),
])
class Test__make_instance_stub(unittest.TestCase):
def _call_fut(self, client):
from google.cloud.bigtable.client import _make_instance_stub
return _make_instance_stub(client)
@mock.patch('google.cloud.bigtable.client.make_secure_stub',
return_value=mock.sentinel.stub)
def test_without_emulator(self, make_stub):
from google.cloud.bigtable import client as MUT
credentials = _make_credentials()
user_agent = 'you-sir-age-int'
client = _Client(credentials, user_agent)
result = self._call_fut(client)
self.assertIs(result, mock.sentinel.stub)
make_stub.assert_called_once_with(
client.credentials,
client.user_agent,
MUT.bigtable_instance_admin_pb2.BigtableInstanceAdminStub,
MUT.INSTANCE_ADMIN_HOST,
extra_options=MUT._GRPC_EXTRA_OPTIONS,
)
def test_with_emulator(self):
from google.cloud._testing import _Monkey
from google.cloud.bigtable import client as MUT
emulator_host = object()
client = _Client(None, None, emulator_host=emulator_host)
fake_stub = object()
make_insecure_stub_args = []
def mock_make_insecure_stub(*args):
make_insecure_stub_args.append(args)
return fake_stub
with _Monkey(MUT, make_insecure_stub=mock_make_insecure_stub):
result = self._call_fut(client)
self.assertIs(result, fake_stub)
self.assertEqual(make_insecure_stub_args, [
(
MUT.bigtable_instance_admin_pb2.BigtableInstanceAdminStub,
emulator_host,
),
])
class Test__make_operations_stub(unittest.TestCase):
def _call_fut(self, client):
from google.cloud.bigtable.client import _make_operations_stub
return _make_operations_stub(client)
@mock.patch('google.cloud.bigtable.client.make_secure_stub',
return_value=mock.sentinel.stub)
def test_without_emulator(self, make_stub):
from google.longrunning import operations_grpc
from google.cloud.bigtable import client as MUT
credentials = _make_credentials()
user_agent = 'you-sir-age-int'
client = _Client(credentials, user_agent)
result = self._call_fut(client)
self.assertIs(result, mock.sentinel.stub)
make_stub.assert_called_once_with(
client.credentials,
client.user_agent,
operations_grpc.OperationsStub,
MUT.OPERATIONS_API_HOST,
extra_options=MUT._GRPC_EXTRA_OPTIONS,
)
def test_with_emulator(self):
from google.longrunning import operations_grpc
from google.cloud._testing import _Monkey
from google.cloud.bigtable import client as MUT
emulator_host = object()
client = _Client(None, None, emulator_host=emulator_host)
fake_stub = object()
make_insecure_stub_args = []
def mock_make_insecure_stub(*args):
make_insecure_stub_args.append(args)
return fake_stub
with _Monkey(MUT, make_insecure_stub=mock_make_insecure_stub):
result = self._call_fut(client)
self.assertIs(result, fake_stub)
self.assertEqual(make_insecure_stub_args, [
(
operations_grpc.OperationsStub,
emulator_host,
),
])
class Test__make_table_stub(unittest.TestCase):
def _call_fut(self, client):
from google.cloud.bigtable.client import _make_table_stub
return _make_table_stub(client)
@mock.patch('google.cloud.bigtable.client.make_secure_stub',
return_value=mock.sentinel.stub)
def test_without_emulator(self, make_stub):
from google.cloud.bigtable import client as MUT
credentials = _make_credentials()
user_agent = 'you-sir-age-int'
client = _Client(credentials, user_agent)
result = self._call_fut(client)
self.assertIs(result, mock.sentinel.stub)
make_stub.assert_called_once_with(
client.credentials,
client.user_agent,
MUT.bigtable_table_admin_pb2.BigtableTableAdminStub,
MUT.TABLE_ADMIN_HOST,
extra_options=MUT._GRPC_EXTRA_OPTIONS,
)
def test_with_emulator(self):
from google.cloud._testing import _Monkey
from google.cloud.bigtable import client as MUT
emulator_host = object()
client = _Client(None, None, emulator_host=emulator_host)
fake_stub = object()
make_insecure_stub_args = []
def mock_make_insecure_stub(*args):
make_insecure_stub_args.append(args)
return fake_stub
with _Monkey(MUT, make_insecure_stub=mock_make_insecure_stub):
result = self._call_fut(client)
self.assertIs(result, fake_stub)
self.assertEqual(make_insecure_stub_args, [
(
MUT.bigtable_table_admin_pb2.BigtableTableAdminStub,
emulator_host,
),
])
class TestClient(unittest.TestCase):
PROJECT = 'PROJECT'
INSTANCE_ID = 'instance-id'
DISPLAY_NAME = 'display-name'
USER_AGENT = 'you-sir-age-int'
@staticmethod
def _get_target_class():
from google.cloud.bigtable.client import Client
return Client
def _make_one(self, *args, **kwargs):
return self._get_target_class()(*args, **kwargs)
@mock.patch('google.cloud.bigtable.client._make_table_stub')
@mock.patch('google.cloud.bigtable.client._make_operations_stub')
@mock.patch('google.cloud.bigtable.client._make_instance_stub')
@mock.patch('google.cloud.bigtable.client._make_data_stub')
def _make_one_with_mocks(
self, _make_data_stub, _make_instance_stub,
_make_operations_stub, _make_table_stub,
*args, **kwargs):
return self._make_one(*args, **kwargs)
@mock.patch('google.cloud.bigtable.client._make_table_stub')
@mock.patch('google.cloud.bigtable.client._make_operations_stub')
@mock.patch('google.cloud.bigtable.client._make_instance_stub')
@mock.patch('google.cloud.bigtable.client._make_data_stub')
def test_constructor_default_scopes(
self, _make_data_stub, _make_instance_stub,
_make_operations_stub, _make_table_stub):
from google.cloud.bigtable.client import DATA_SCOPE
expected_scopes = (DATA_SCOPE,)
credentials = _make_credentials()
custom_user_agent = 'custom-application'
client = self._make_one(
project=self.PROJECT, credentials=credentials,
user_agent=custom_user_agent)
self.assertEqual(client.project, self.PROJECT)
self.assertIs(
client._credentials, credentials.with_scopes.return_value)
self.assertIsNone(client._http_internal)
self.assertFalse(client._read_only)
self.assertFalse(client._admin)
self.assertEqual(client.SCOPE, expected_scopes)
self.assertEqual(client.user_agent, custom_user_agent)
self.assertIsNone(client.emulator_host)
self.assertIs(client._data_stub, _make_data_stub.return_value)
self.assertIsNone(client._instance_stub_internal)
self.assertIsNone(client._operations_stub_internal)
self.assertIsNone(client._table_stub_internal)
# Check mocks.
credentials.with_scopes.assert_called_once_with(expected_scopes)
_make_data_stub.assert_called_once_with(client)
_make_instance_stub.assert_not_called()
_make_operations_stub.assert_not_called()
_make_table_stub.assert_not_called()
@mock.patch('google.cloud.bigtable.client._make_table_stub')
@mock.patch('google.cloud.bigtable.client._make_operations_stub')
@mock.patch('google.cloud.bigtable.client._make_instance_stub')
@mock.patch('google.cloud.bigtable.client._make_data_stub')
def test_constructor_with_admin(
self, _make_data_stub, _make_instance_stub,
_make_operations_stub, _make_table_stub):
from google.cloud._http import DEFAULT_USER_AGENT
from google.cloud.bigtable.client import ADMIN_SCOPE
from google.cloud.bigtable.client import DATA_SCOPE
expected_scopes = (DATA_SCOPE, ADMIN_SCOPE)
credentials = _make_credentials()
client = self._make_one(
project=self.PROJECT, credentials=credentials, admin=True)
self.assertEqual(client.project, self.PROJECT)
self.assertIs(
client._credentials, credentials.with_scopes.return_value)
self.assertIsNone(client._http_internal)
self.assertFalse(client._read_only)
self.assertTrue(client._admin)
self.assertEqual(client.SCOPE, expected_scopes)
self.assertEqual(client.user_agent, DEFAULT_USER_AGENT)
self.assertIsNone(client.emulator_host)
self.assertIs(client._data_stub, _make_data_stub.return_value)
self.assertIs(
client._instance_stub_internal, _make_instance_stub.return_value)
self.assertIs(
client._operations_stub_internal,
_make_operations_stub.return_value)
self.assertIs(
client._table_stub_internal, _make_table_stub.return_value)
# Check mocks.
credentials.with_scopes.assert_called_once_with(expected_scopes)
_make_data_stub.assert_called_once_with(client)
_make_instance_stub.assert_called_once_with(client)
_make_operations_stub.assert_called_once_with(client)
_make_table_stub.assert_called_once_with(client)
def test_constructor_both_admin_and_read_only(self):
credentials = _make_credentials()
with self.assertRaises(ValueError):
self._make_one(
project=self.PROJECT, credentials=credentials,
admin=True, read_only=True)
def test__get_scopes_default(self):
from google.cloud.bigtable.client import DATA_SCOPE
client = self._make_one(
project=self.PROJECT, credentials=_make_credentials())
self.assertEqual(client._get_scopes(), (DATA_SCOPE,))
def test__get_scopes_admin(self):
from google.cloud.bigtable.client import ADMIN_SCOPE
from google.cloud.bigtable.client import DATA_SCOPE
client = self._make_one(
project=self.PROJECT, credentials=_make_credentials(),
admin=True)
expected_scopes = (DATA_SCOPE, ADMIN_SCOPE)
self.assertEqual(client._get_scopes(), expected_scopes)
def test__get_scopes_read_only(self):
from google.cloud.bigtable.client import READ_ONLY_SCOPE
client = self._make_one(
project=self.PROJECT, credentials=_make_credentials(),
read_only=True)
self.assertEqual(client._get_scopes(), (READ_ONLY_SCOPE,))
def _copy_helper_check_stubs(self, client, new_client):
if client._admin:
# Check the instance stub.
self.assertIs(
client._instance_stub_internal, mock.sentinel.inst_stub1)
self.assertIs(
new_client._instance_stub_internal, mock.sentinel.inst_stub2)
self.assertIsNot(
new_client._instance_stub_internal,
client._instance_stub_internal)
# Check the operations stub.
self.assertIs(
client._operations_stub_internal, mock.sentinel.ops_stub1)
self.assertIs(
new_client._operations_stub_internal, mock.sentinel.ops_stub2)
self.assertIsNot(
new_client._operations_stub_internal,
client._operations_stub_internal)
# Check the table stub.
self.assertIs(
client._table_stub_internal, mock.sentinel.table_stub1)
self.assertIs(
new_client._table_stub_internal, mock.sentinel.table_stub2)
self.assertIsNot(
new_client._table_stub_internal, client._table_stub_internal)
else:
# Check the instance stub.
self.assertIsNone(client._instance_stub_internal)
self.assertIsNone(new_client._instance_stub_internal)
# Check the operations stub.
self.assertIsNone(client._operations_stub_internal)
self.assertIsNone(new_client._operations_stub_internal)
# Check the table stub.
self.assertIsNone(client._table_stub_internal)
self.assertIsNone(new_client._table_stub_internal)
@mock.patch(
'google.cloud.bigtable.client._make_table_stub',
side_effect=[mock.sentinel.table_stub1, mock.sentinel.table_stub2],
)
@mock.patch(
'google.cloud.bigtable.client._make_operations_stub',
side_effect=[mock.sentinel.ops_stub1, mock.sentinel.ops_stub2],
)
@mock.patch(
'google.cloud.bigtable.client._make_instance_stub',
side_effect=[mock.sentinel.inst_stub1, mock.sentinel.inst_stub2],
)
@mock.patch(
'google.cloud.bigtable.client._make_data_stub',
side_effect=[mock.sentinel.data_stub1, mock.sentinel.data_stub2],
)
def _copy_test_helper(
self, _make_data_stub, _make_instance_stub,
_make_operations_stub, _make_table_stub, **kwargs):
credentials = _make_credentials()
# Make sure it "already" is scoped.
credentials.requires_scopes = False
client = self._make_one(
project=self.PROJECT, credentials=credentials, **kwargs)
self.assertIs(client._credentials, credentials)
new_client = client.copy()
self.assertEqual(new_client._admin, client._admin)
self.assertEqual(new_client._credentials, client._credentials)
self.assertEqual(new_client.project, client.project)
self.assertEqual(new_client.user_agent, client.user_agent)
# Make sure stubs are not preserved.
self.assertIs(client._data_stub, mock.sentinel.data_stub1)
self.assertIs(new_client._data_stub, mock.sentinel.data_stub2)
self.assertIsNot(new_client._data_stub, client._data_stub)
self._copy_helper_check_stubs(client, new_client)
# Check mocks.
credentials.with_scopes.assert_not_called()
stub_calls = [
mock.call(client),
mock.call(new_client),
]
self.assertEqual(_make_data_stub.mock_calls, stub_calls)
if client._admin:
self.assertEqual(_make_instance_stub.mock_calls, stub_calls)
self.assertEqual(_make_operations_stub.mock_calls, stub_calls)
self.assertEqual(_make_table_stub.mock_calls, stub_calls)
else:
_make_instance_stub.assert_not_called()
_make_operations_stub.assert_not_called()
_make_table_stub.assert_not_called()
def test_copy(self):
self._copy_test_helper()
def test_copy_admin(self):
self._copy_test_helper(admin=True)
def test_copy_read_only(self):
self._copy_test_helper(read_only=True)
def test_credentials_getter(self):
credentials = _make_credentials()
project = 'PROJECT'
client = self._make_one_with_mocks(
project=project, credentials=credentials)
self.assertIs(client.credentials, credentials.with_scopes.return_value)
def test_project_name_property(self):
credentials = _make_credentials()
project = 'PROJECT'
client = self._make_one_with_mocks(
project=project, credentials=credentials)
project_name = 'projects/' + project
self.assertEqual(client.project_name, project_name)
def test_instance_stub_getter(self):
credentials = _make_credentials()
project = 'PROJECT'
client = self._make_one_with_mocks(
project=project, credentials=credentials, admin=True)
self.assertIs(client._instance_stub, client._instance_stub_internal)
def test_instance_stub_non_admin_failure(self):
credentials = _make_credentials()
project = 'PROJECT'
client = self._make_one_with_mocks(
project=project, credentials=credentials, admin=False)
with self.assertRaises(ValueError):
getattr(client, '_instance_stub')
def test_operations_stub_getter(self):
credentials = _make_credentials()
project = 'PROJECT'
client = self._make_one_with_mocks(
project=project, credentials=credentials, admin=True)
self.assertIs(client._operations_stub,
client._operations_stub_internal)
def test_operations_stub_non_admin_failure(self):
credentials = _make_credentials()
project = 'PROJECT'
client = self._make_one_with_mocks(
project=project, credentials=credentials, admin=False)
with self.assertRaises(ValueError):
getattr(client, '_operations_stub')
def test_table_stub_getter(self):
credentials = _make_credentials()
project = 'PROJECT'
client = self._make_one_with_mocks(
project=project, credentials=credentials, admin=True)
self.assertIs(client._table_stub, client._table_stub_internal)
def test_table_stub_non_admin_failure(self):
credentials = _make_credentials()
project = 'PROJECT'
client = self._make_one_with_mocks(
project=project, credentials=credentials, admin=False)
with self.assertRaises(ValueError):
getattr(client, '_table_stub')
def test_instance_factory_defaults(self):
from google.cloud.bigtable.cluster import DEFAULT_SERVE_NODES
from google.cloud.bigtable.instance import Instance
from google.cloud.bigtable.instance import (
_EXISTING_INSTANCE_LOCATION_ID)
PROJECT = 'PROJECT'
INSTANCE_ID = 'instance-id'
DISPLAY_NAME = 'display-name'
credentials = _make_credentials()
client = self._make_one_with_mocks(
project=PROJECT, credentials=credentials)
instance = client.instance(INSTANCE_ID, display_name=DISPLAY_NAME)
self.assertIsInstance(instance, Instance)
self.assertEqual(instance.instance_id, INSTANCE_ID)
self.assertEqual(instance.display_name, DISPLAY_NAME)
self.assertEqual(instance._cluster_location_id,
_EXISTING_INSTANCE_LOCATION_ID)
self.assertEqual(instance._cluster_serve_nodes, DEFAULT_SERVE_NODES)
self.assertIs(instance._client, client)
def test_instance_factory_w_explicit_serve_nodes(self):
from google.cloud.bigtable.instance import Instance
PROJECT = 'PROJECT'
INSTANCE_ID = 'instance-id'
DISPLAY_NAME = 'display-name'
LOCATION_ID = 'locname'
SERVE_NODES = 5
credentials = _make_credentials()
client = self._make_one_with_mocks(
project=PROJECT, credentials=credentials)
instance = client.instance(
INSTANCE_ID, display_name=DISPLAY_NAME,
location=LOCATION_ID, serve_nodes=SERVE_NODES)
self.assertIsInstance(instance, Instance)
self.assertEqual(instance.instance_id, INSTANCE_ID)
self.assertEqual(instance.display_name, DISPLAY_NAME)
self.assertEqual(instance._cluster_location_id, LOCATION_ID)
self.assertEqual(instance._cluster_serve_nodes, SERVE_NODES)
self.assertIs(instance._client, client)
def test_list_instances(self):
from google.cloud.bigtable._generated import (
instance_pb2 as data_v2_pb2)
from google.cloud.bigtable._generated import (
bigtable_instance_admin_pb2 as messages_v2_pb2)
from tests.unit._testing import _FakeStub
LOCATION = 'projects/' + self.PROJECT + '/locations/locname'
FAILED_LOCATION = 'FAILED'
INSTANCE_ID1 = 'instance-id1'
INSTANCE_ID2 = 'instance-id2'
INSTANCE_NAME1 = (
'projects/' + self.PROJECT + '/instances/' + INSTANCE_ID1)
INSTANCE_NAME2 = (
'projects/' + self.PROJECT + '/instances/' + INSTANCE_ID2)
credentials = _make_credentials()
client = self._make_one_with_mocks(
project=self.PROJECT,
credentials=credentials,
admin=True,
)
# Create request_pb
request_pb = messages_v2_pb2.ListInstancesRequest(
parent='projects/' + self.PROJECT,
)
# Create response_pb
response_pb = messages_v2_pb2.ListInstancesResponse(
failed_locations=[
FAILED_LOCATION,
],
instances=[
data_v2_pb2.Instance(
name=INSTANCE_NAME1,
display_name=INSTANCE_NAME1,
),
data_v2_pb2.Instance(
name=INSTANCE_NAME2,
display_name=INSTANCE_NAME2,
),
],
)
# Patch the stub used by the API method.
client._instance_stub_internal = stub = _FakeStub(response_pb)
# Create expected_result.
failed_locations = [FAILED_LOCATION]
instances = [
client.instance(INSTANCE_ID1, LOCATION),
client.instance(INSTANCE_ID2, LOCATION),
]
expected_result = (instances, failed_locations)
# Perform the method and check the result.
result = client.list_instances()
self.assertEqual(result, expected_result)
self.assertEqual(stub.method_calls, [(
'ListInstances',
(request_pb,),
{},
)])
class _Client(object):
def __init__(self, credentials, user_agent, emulator_host=None):
self.credentials = credentials
self.user_agent = user_agent
self.emulator_host = emulator_host
| apache-2.0 |
jwill89/clifford-discord-bot | source/retired/main.py | 1 | 31345 | import discord
from discord.ext import commands
import random
import MySQLdb
# ********************************************** #
# DEFINITIONS ********************************** #
# ********************************************** #
# Bot Description
description = '''Official Zealot Gaming Discord bot!'''
# Define Bot
bot = commands.Bot(command_prefix='!', description='Official Zealot Gaming Discord Bot')
# Define MySQL DB and Cursor Object
db = MySQLdb.connect(host="localhost",
user="discord_secure",
passwd="password-here",
db="discord")
# ********************************************** #
# FUNCTIONS ************************************ #
# ********************************************** #
# Check for Game Abbreviations
def is_game_abv(game_abv: str):
try:
sql = "SELECT 1 FROM games WHERE `abv` = %s LIMIT 1"
cur = db.cursor()
result = cur.execute(sql, (game_abv,))
cur.close()
except Exception as e:
print('Exception: ' + str(e))
result = 0
# If we got a result, true, else false
return result == 1
# Check for Game Names
def is_game_name(game_name: str):
try:
sql = "SELECT 1 FROM games WHERE `name` = %s LIMIT 1"
cur = db.cursor()
result = cur.execute(sql, (game_name,))
cur.close()
except Exception as e:
print('Exception: ' + str(e))
result = 0
# If we got a result, true, else false
return result == 1
# Check for Staff Member Status
def is_staff(member: discord.Member):
# Return True or False if User is a Staff Member
return 'Staff' in [r.name for r in member.roles]
# ********************************************** #
# BOT EVENTS *********************************** #
# ********************************************** #
# Bot Start Event
@bot.event
async def on_ready():
print('Logged in as')
print(bot.user.name)
print(bot.user.id)
print('------')
await bot.change_presence(game=discord.Game(name='Zealot Gaming'))
# Welcome Message
@bot.event
async def on_member_join(member):
channel = bot.get_channel('108369515502411776')
fmt = "Everyone welcome {0.mention} to Zealot Gaming! Have a great time here! :wink: " \
"http://puu.sh/nG6Qe.wav".format(member)
await bot.send_message(channel, fmt)
# Goodbye Message
@bot.event
async def on_member_remove(member):
channel = bot.get_channel('108369515502411776')
fmt = ":wave: Goodbye {0}, we're sad to see you go!".format(member.name)
await bot.send_message(channel, fmt)
# ********************************************** #
# UN-GROUPED BOT COMMANDS ********************** #
# ********************************************** #
# COMMAND: !hello
@bot.command(pass_context=True)
async def hello(ctx):
# we do not want the bot to reply to itself
if ctx.message.author == bot.user:
return
else:
msg = 'Hello {0.message.author.mention}'.format(ctx)
await bot.send_message(ctx.message.channel, msg)
# COMMAND: !carlito
@bot.command()
async def carlito():
"""The legendary message of Carlito, maz00's personal cabana boy."""
await bot.say("wew men :ok_hand::skin-tone-1: that's some good shit:100: some good shit :100: that's some good shit"
" right there :100: :ok_hand::skin-tone-1: right there :ok_hand::skin-tone-1: :100: sign me the FUCK "
"up:100: :100: :ok_hand::skin-tone-1: :eggplant:")
# COMMAND: !eightball
@bot.command(pass_context=True)
async def eightball(ctx, question: str):
"""Rolls a magic 8-ball to answer any question you have."""
if question is None:
await bot.say('{0.message.author.mention}, you did not ask a question.'.format(ctx))
return
# Answers List (Classic 8-Ball, 20 Answers)
answers = ['It is certain.',
'It is decidedly so',
'Without a doubt.',
'Yes, definitely.',
'You may rely on it.',
'As I see it, yes.',
'Most likely.',
'Outlook good.',
'Yes.',
'Signs point to yes.',
'Reply hazy; try again.',
'Ask again later.',
'Better not tell you now.',
'Cannot predict now.',
'Concentrate, then ask again.',
'Do not count on it.',
'My reply is no.',
'My sources say no.',
'Outlook not so good.',
'Very doubtful.']
# Send the Answer
await bot.say('{0.message.author.mention}, '.format(ctx) + random.choice(answers))
# COMMAND: !roll
@bot.command()
async def roll(dice: str):
"""Rolls a dice in NdN format."""
try:
rolls, limit = map(int, dice.split('d'))
except Exception:
await bot.say('Format has to be in NdN!')
return
result = ', '.join(str(random.randint(1, limit)) for r in range(rolls))
await bot.say(result)
# COMMAND: !choose
@bot.command()
async def choose(*choices: str):
"""Chooses between multiple choices."""
await bot.say(random.choice(choices))
# COMMAND: !joined
@bot.command()
async def joined(member: discord.Member):
"""Says when a member joined."""
await bot.say('{0.name} joined in {0.joined_at}'.format(member))
# COMMAND: !get_roles
@bot.command()
async def get_roles(member: discord.Member):
"""Lists a User's Roles"""
total = 0
role_list = ''
for role in member.roles:
if total > 0:
role_list += ', '
role_list += str(role)
total += 1
await bot.say('{0.name} is a member of these roles: '.format(member) + role_list)
# COMMAND: !get_channel_id
@bot.command(pass_context=True)
async def get_channel_id(ctx):
"""Lists the ID of the channel the message is sent in."""
# Is the user allowed? (Must be staff)
if not is_staff(ctx.message.author):
await bot.say('{0.mention}, you must be a staff member to use this command.'.format(ctx.message.author))
return
await bot.say('Channel ID is {0.id}'.format(ctx.message.channel))
# COMMAND: !join
@bot.command(pass_context=True)
async def join(ctx, *, role_name: str):
"""Allows a user to join a public group."""
# List of Allowed Public Roles
allowed_roles = ['Europe',
'North America',
'Oceania',
'Overwatch',
'League of Legends',
'Co-op',
'Minna-chan']
if role_name not in allowed_roles:
await bot.say('{0.mention}, you may only join allowed public groups.'.format(ctx.message.author))
return
# Define role, then add role to member.
try:
role = discord.utils.get(ctx.message.server.roles, name=role_name)
await bot.add_roles(ctx.message.author, role)
except Exception as e:
await bot.send_message(ctx.message.channel, "{0.mention}, there was an error getting the roster for you. "
"I'm sorry! : ".format(ctx.message.author) + str(e))
return
# Success Message
await bot.say('{0.mention}, you have successfully been added to the group **{1}**.'
.format(ctx.message.author, role_name))
# ********************************************** #
# GROUPED COMMANDS : EVENTS ******************** #
# ********************************************** #
# COMMAND: !events
@bot.group(pass_context=True)
async def events(ctx):
"""Manage events and attendance!"""
if ctx.invoked_subcommand is None:
await bot.say('Invalid command passed. Must be *add*, *description*, *edit*, *register*, or *remove*.')
# COMMAND: !events add
@events.command(name='add', pass_context=True)
async def events_add(ctx, date: str, time: str, *, title: str):
"""Add an event to the Events List!
Date **must** be in YYYY/MM/DD format. Time **must** be in UTC."""
# Set #events Channel
event_channel = bot.get_channel('296694692135829504')
# Is the user allowed? (Must be staff)
if not is_staff(ctx.message.author):
await bot.say('{0.mention}, you must be a staff member to use this command.'.format(ctx.message.author))
return
# Make sure we have a date.
if date is None:
await bot.say('Error: You must enter a date in YYYY/MM/DD format.')
return
# Make sure we have a time.
if time is None:
await bot.say('Error: You must enter a time in HH:MM format in UTC timezone.')
return
# Make sure we have a title.
if date is None:
await bot.say('Error: You must enter a title for the event.')
return
# Add Event to Database
try:
sql = "INSERT INTO events (`date`,`time`,`title`) VALUES (%s, %s, %s)"
cur = db.cursor()
cur.execute(sql, (date, time, title))
event_id = cur.lastrowid
msg_text = "**Title**: {0} \n**Event ID**: {1} \n**Date & Time**: {2} at {3} (UTC)"
# Add Message to Events Channel and Save Message ID
message = await bot.send_message(event_channel, msg_text.format(title, event_id, date, time))
cur.execute('UPDATE events SET `message_id` = %s WHERE `event_id` = %s', (message.id, event_id))
db.commit()
cur.close()
except Exception as e:
await bot.say('{0.mention}, there was an error adding the event to the list. '.format(ctx.message.author)
+ str(e))
return
# Success Message
await bot.say('{0.mention}, your event was successfully added. The event ID is: {1}.'
.format(ctx.message.author, event_id))
# COMMAND: !events description
@events.command(name='description', pass_context=True)
async def events_description(ctx, event_id: int, *, desc: str):
"""Adds a Description to an Event Given an Event ID."""
# EVENT CHANNEL ID: 296694692135829504
event_channel = bot.get_channel('296694692135829504')
# Is the user allowed? (Must be staff)
if not is_staff(ctx.message.author):
await bot.say('{0.mention}, you must be a staff member to use this command.'.format(ctx.message.author))
return
# Make sure we have a date.
if event_id is None:
await bot.say('Error: You must enter an event ID. Check the #events channel.')
return
# Make sure we have a date.
if desc is None:
await bot.say('Error: You must enter a description.')
return
try:
sql = "UPDATE events SET `description` = %s WHERE `event_id` = %s"
cur = db.cursor()
cur.execute(sql, (desc, event_id))
cur.execute("SELECT `message_id` FROM events WHERE `event_id` = %s", (event_id,))
msg_id = cur.fetchone()
message = await bot.get_message(event_channel, msg_id[0])
msg_text = message.content + " \n**Description**: {0}".format(desc)
# Update Message in Events Channel with Description
await bot.edit_message(message, msg_text)
db.commit()
cur.close()
except Exception as e:
await bot.say('{0.mention}, there was an error adding a description to the event. '.format(ctx.message.author)
+ str(e))
return
# Success Message
await bot.say('{0.mention}, the event was successfully updated with a description.'.format(ctx.message.author))
# ********************************************** #
# GROUPED COMMANDS : GAMES ********************* #
# ********************************************** #
# COMMAND: !games
@bot.group(pass_context=True)
async def games(ctx):
"""Manages games for the roster."""
if ctx.invoked_subcommand is None:
await bot.say('Invalid command passed. Must be *add*, *edit*, *list*, or *remove*.')
# COMMAND: !games add
@games.command(name='add', pass_context=True)
async def games_add(ctx, game_abv: str, *, game_name: str):
"""Adds a game to the list of games available in the roster."""
# Is the user allowed? (Must be staff)
if not is_staff(ctx.message.author):
await bot.say('{0.mention}, you must be a staff member to use this command.'.format(ctx.message.author))
return
# Does Game Abbreviation Exist?
if is_game_abv(game_abv):
await bot.say('{0.mention}, this abbreviation is already in use.'.format(ctx.message.author))
return
# Does Game Name Exist?
if is_game_name(game_name):
await bot.say('{0.mention}, this game is already in the list.'.format(ctx.message.author))
return
# Handle Database
try:
sql = "INSERT INTO games (`abv`,`name`) VALUES (%s, %s)"
cur = db.cursor()
cur.execute(sql, (game_abv, game_name))
db.commit()
cur.close()
except Exception as e:
await bot.say('{0.mention}, there was an error adding the game to the games list. '.format(ctx.message.author)
+ str(e))
return
# Display Success Message
await bot.say('{0.mention}, the game was successfully added to the games list!'.format(ctx.message.author))
# COMMAND: !games edit
@games.command(name='edit', pass_context=True)
async def games_edit(ctx, game_abv: str, *, game_name: str):
"""Updates a game in the list of games available in the roster."""
# Is the user allowed? (Must be staff)
if not is_staff(ctx.message.author):
await bot.say('{0.mention}, you must be a staff member to use this command.'.format(ctx.message.author))
return
# Is there anything to update?
if not (is_game_abv(game_abv) or is_game_name(game_name)):
await bot.say('{0.mention}, either the abbreviation of game must exist to update.'.format(ctx.message.author))
return
# Handle Database
try:
sql = "UPDATE games SET `abv` = %s, `name = %s WHERE `abv` = %s OR `name` = %s"
cur = db.cursor()
cur.execute(sql, (game_abv, game_name, game_abv, game_name))
db.commit()
cur.close()
except Exception as e:
await bot.say('{0.mention}, there was an error updating the game in the games list. '.format(ctx.message.author)
+ str(e))
return
# Display Success Message
await bot.say('{0.mention}, the game was successfully updated in the games list!'.format(ctx.message.author))
# COMMAND: !games remove
@games.command(name='remove', pass_context=True)
async def games_remove(ctx, *, game_or_abv: str):
"""Removes a game from the list of games available in the roster."""
# Is the user allowed? (Must be staff)
if not is_staff(ctx.message.author):
await bot.say('{0.mention}, you must be a staff member to use this command.'.format(ctx.message.author))
return
# Is there anything to update?
if not (is_game_abv(game_or_abv) or is_game_name(game_or_abv)):
await bot.say('{0.mention}, either the abbreviation of game must exist to update.'.format(ctx.message.author))
return
# Handle Database
try:
sql = "DELETE FROM games WHERE `abv` = %s OR `name` = %s"
cur = db.cursor()
cur.execute(sql, (game_or_abv, game_or_abv))
db.commit()
cur.close()
except Exception as e:
await bot.say("{0.mention}, there was an error deleting the game from the games list."
" ".format(ctx.message.author) + str(e))
return
# Display Success Message
await bot.say('{0.mention}, the game was successfully deleted from the games list!'.format(ctx.message.author))
# COMMAND: !games list
@games.command(name='list', pass_context=True)
async def games_list(ctx):
"""Sends a message to the user with the current games and abbreviations for use in the roster."""
# Handle Database
try:
sql = "SELECT `abv`, `name` FROM games ORDER BY `name`"
cur = db.cursor()
cur.execute(sql)
result = cur.fetchall()
cur.close()
except Exception:
await bot.send_message(ctx.message.channel, "{0.mention}, there was an error getting the list of games for you."
" I'm sorry!".format(ctx.message.author))
return
# Create Variables for Embed Table
abvs = ''
names = ''
for row in result:
abvs += (row[0] + '\n')
names += (row[1] + '\n')
# Create Embed Table
embed = discord.Embed()
embed.add_field(name="Abbreviation", value=abvs, inline=True)
embed.add_field(name="Game Name", value=names, inline=True)
# Send Table to User Privately
await bot.send_message(ctx.message.channel, embed=embed)
# ********************************************** #
# GROUPED COMMANDS : ROSTER ******************** #
# ********************************************** #
# COMMAND: !roster
@bot.group(pass_context=True)
async def roster(ctx):
"""Handles Roster Management."""
if ctx.invoked_subcommand is None:
await bot.say('Invalid roster command passed. Must be *add*, *edit*, *list*, or *remove*.')
# COMMAND: !roster add
@roster.command(name='add', pass_context=True)
async def roster_add(ctx, game_abv: str, *, ign: str):
"""Adds username to roster.
User a game abbreviation from the games list. Only one entry per game. Include all in-game names if necessary."""
username = str(ctx.message.author)
# Does Game Abbreviation Exist?
if not is_game_abv(game_abv):
await bot.say('{0.mention}, this abbreviation does not exist. Use !games display for a list of acceptable game '
'abbreviations.'.format(ctx.message.author))
return
# Handle Database
try:
sql = "INSERT INTO roster (`discord_account`,`game_abv`,`game_account`) VALUES (%s, %s, %s)"
cur = db.cursor()
cur.execute(sql, (username, game_abv, ign))
db.commit()
cur.close()
except Exception:
await bot.say('{0.message.author.mention}, there was an error adding your information to the roster.'.format(ctx))
return
# Display Success Message
await bot.say('{0.message.author.mention}, your information was successfully added to the roster!'.format(ctx))
# COMMAND: !roster edit
@roster.command(name='edit', pass_context=True)
async def roster_edit(ctx, game_abv: str, *, ign: str):
"""Updates a roster entry for a specific game.
If the either Game Name or your in-Game Name have spaces, put them in quotes."""
username = str(ctx.message.author)
# Does Game Abbreviation Exist?
if not is_game_abv(game_abv):
await bot.say('{0.mention}, this abbreviation does not exist. Use !games display for a list of acceptable game'
' abbreviations.'.format(ctx.message.author))
return
# Handle Database
try:
sql = "UPDATE roster SET `game_account` = %s WHERE `discord_account` = %s AND `game_abv` = %s"
cur = db.cursor()
cur.execute(sql, (ign, username, game_abv))
db.commit()
cur.close()
except Exception:
await bot.say('{0.message.author.mention}, there was an error updating your roster information.'.format(ctx))
return
# Display Success Message
await bot.say('{0.message.author.mention}, your roster information was successfully updated!'.format(ctx))
# COMMAND: !roster remove
@roster.command(name='remove', pass_context=True)
async def roster_remove(ctx, game_abv: str, *, ign: str):
"""Removes a user's entries in the roster for the specified game."""
username = str(ctx.message.author)
# Does Game Abbreviation Exist?
if not is_game_abv(game_abv):
await bot.say('{0.mention}, this abbreviation does not exist. Use !games display for a list of acceptable '
'game abbreviations.'.format(ctx.message.author))
return
# Handle Database
try:
sql = "DELETE FROM roster WHERE `discord_account` = %s AND `game_abv` = %s AND `game_account` = %s"
cur = db.cursor()
cur.execute(sql, (username, game_abv, ign))
db.commit()
cur.close()
except Exception:
await bot.say('{0.message.author.mention}, there was an error deleting your roster information.'.format(ctx))
return
# Display Success Message
await bot.say('{0.message.author.mention}, your roster information was successfully deleted!'.format(ctx))
# COMMAND: !roster list
@roster.command(name='list', pass_context=True)
async def roster_list(ctx, game_abv: str):
"""Sends a message to the user with the current roster for the specified game."""
# Does Game Abbreviation Exist?
if not is_game_abv(game_abv):
await bot.say('{0.mention}, this abbreviation does not exist. Use !games display for a list of acceptable game '
'abbreviations.'.format(ctx.message.author))
return
# Handle Database
try:
sql = "SELECT `discord_account`, `game_account` FROM roster WHERE `game_abv` = %s ORDER BY `discord_account`"
cur = db.cursor()
cur.execute(sql, (game_abv,))
result = cur.fetchall()
cur.close()
except Exception:
await bot.send_message(ctx.message.channel, "{0.mention}, there was an error getting the roster for you. "
"I'm sorry!".format(ctx.message.author))
return
# Create Variables for Embed Table
accounts = ''
names = ''
for row in result:
accounts += (row[0] + '\n')
names += (row[1] + '\n')
# Create Embed Table
embed = discord.Embed()
embed.add_field(name="Discord Account", value=accounts, inline=True)
embed.add_field(name="In-Game Name", value=names, inline=True)
# Send Table to Channel
await bot.send_message(ctx.message.channel, embed=embed)
# ********************************************** #
# GROUPED COMMANDS : RECRUIT ******************* #
# ********************************************** #
# COMMAND: !recruit
@bot.group(pass_context=True)
async def recruit(ctx):
"""Handles Recruitment Post and Invites Management."""
if ctx.invoked_subcommand is None:
await bot.say('Invalid recruitment command passed. Must be *add*, *edit*, *invite*, *list*, or *remove*.')
# COMMAND: !recruit add
@recruit.command(name='add', pass_context=True)
async def recruit_add(ctx, game_abv: str, *, link: str):
"""Adds recruitment post link to the recruitment list. Use a game abbreviation from the games list."""
# Is the user allowed? (Must be staff)
if not is_staff(ctx.message.author):
await bot.say('{0.mention}, you must be a staff member to use this command.'.format(ctx.message.author))
return
# Does Game Abbreviation Exist?
if not is_game_abv(game_abv):
await bot.say(
'{0.mention}, this abbreviation does not exist. Use !games display for a list of acceptable game '
'abbreviations.'.format(ctx.message.author))
return
# Handle Database
try:
sql = "INSERT INTO recruitment (`game`,`link`) VALUES (%s, %s)"
cur = db.cursor()
cur.execute(sql, (game_abv, link))
db.commit()
cur.close()
except Exception:
await bot.say(
'{0.message.author.mention}, there was an error adding your recruitment link to the list.'.format(ctx))
return
# Display Success Message
await bot.say('{0.message.author.mention}, your information was successfully added to the recruitment '
'posts list!'.format(ctx))
# COMMAND: !recruit edit
@recruit.command(name='edit', pass_context=True)
async def roster_edit(ctx, entry_id: int, *, link: str):
"""Updates a recruitment post entry with the specified entry ID."""
# Is the user allowed? (Must be staff)
if not is_staff(ctx.message.author):
await bot.say('{0.mention}, you must be a staff member to use this command.'.format(ctx.message.author))
return
# Handle Database
try:
sql = "UPDATE recruitment SET `link` = %s WHERE `entry_id` = %s"
cur = db.cursor()
cur.execute(sql, (link, entry_id))
db.commit()
cur.close()
except Exception:
await bot.say('{0.message.author.mention}, there was an error updating the specified '
'recruitment entry.'.format(ctx))
return
# Display Success Message
await bot.say('{0.message.author.mention}, the recruitment entry was successfully updated!'.format(ctx))
# COMMAND: !recruit remove
@recruit.command(name='remove', pass_context=True)
async def recruit_remove(ctx, entry_id: int):
"""Removes an entry for the recruitment posts list with the specified entry ID."""
# Is the user allowed? (Must be staff)
if not is_staff(ctx.message.author):
await bot.say('{0.mention}, you must be a staff member to use this command.'.format(ctx.message.author))
return
# Handle Database
try:
sql = "DELETE FROM recruitment WHERE `entry_id` = %s"
cur = db.cursor()
cur.execute(sql, (entry_id,))
db.commit()
cur.close()
except Exception:
await bot.say('{0.message.author.mention}, there was an error deleting the specified '
'recruitment entry.'.format(ctx))
return
# Display Success Message
await bot.say('{0.message.author.mention}, the recruitment entry was successfully deleted!'.format(ctx))
# COMMAND: !recruit list
@recruit.command(name='list', pass_context=True)
async def recruit_list(ctx):
"""Lists all recruitment post entries in the system."""
# Handle Database
try:
sql = "SELECT * FROM recruitment ORDER BY `game`"
cur = db.cursor()
cur.execute(sql)
result = cur.fetchall()
cur.close()
except Exception:
await bot.send_message(ctx.message.channel, "{0.mention}, there was an error getting the recruitment list "
"for you. I'm sorry!".format(ctx.message.author))
return
# Create Variables for Embed Table
entries = ''
game_abvs = ''
links = ''
for row in result:
entries += (row[0] + '\n')
game_abvs += (row[1] + '\n')
links += (row[2] + '\n')
# Create Embed Table
embed = discord.Embed()
embed.add_field(name="ID", value=entries, inline=True)
embed.add_field(name="Game", value=game_abvs, inline=True)
embed.add_field(name="Link", value=links, inline=True)
# Send Table to Channel
await bot.send_message(ctx.message.channel, embed=embed)
# COMMAND: !recruit invite
@recruit.command(name='invite')
async def recruit_invite(duration: int):
"""Provides an invite link to the Discord server. Set duration to 0 for permanent invite."""
# Default Duration 30 Minutes, Else Convert to Minutes
if duration is None:
duration = 1800
else:
duration *= 60
# WELCOME CHANNEL ID: 141622052133142529
welcome_channel = bot.get_channel('141622052133142529')
# Create the Invite
new_invite = await bot.create_invite(welcome_channel, max_age=duration)
# Send Message with Invite Link
await bot.say('Your newly generated invite link is: {0.url}'.format(new_invite))
# ********************************************** #
# MODERATOR COMMANDS *************************** #
# ********************************************** #
# COMMAND: !give_role
@bot.command(pass_context=True)
async def give_role(ctx, username: str, *, role_name: str):
"""Assigns a role to a user."""
# List of Roles Staff Can Add To.
allowed_roles = ['Europe',
'North America',
'Oceania',
'Overwatch',
'League of Legends',
'Co-op',
'Minna-chan',
'Squire',
'Knight',
'Zealot']
# Is the user allowed? (Must be Staff)
if not is_staff(ctx.message.author):
await bot.say('{0.mention}, you must be a staff member to use this command.'.format(ctx.message.author))
return
if role_name not in allowed_roles:
await bot.say('{0.mention}, you may only assign users to public roles, Guest, or Registered Member'
.format(ctx.message.author))
return
# Define role, then add role to member.
try:
role = discord.utils.get(ctx.message.server.roles, name=role_name)
user = discord.utils.get(ctx.message.server.members, name=username)
await bot.add_roles(user, role)
except Exception as e:
await bot.send_message(ctx.message.channel, "{0.mention}, there was an granting the role to the user."
" ".format(ctx.message.author) + str(e))
return
# Success Message
await bot.say('{0.mention}, you have successfully added **{1}** to the group **{2}**'
'.'.format(ctx.message.author, username, role_name))
# COMMAND: !kick
@bot.command(name='kick', pass_context=True)
async def mod_kick(ctx, username: str, *, reason: str):
"""Kicks a user from the server."""
# User must be a staff member
if not is_staff(ctx.message.author):
await bot.say('{0.mention}, you must be a staff member to use this command.'.format(ctx.message.author))
return
# Add to DB and Post Message
try:
# Variables Needed
member = discord.utils.get(ctx.message.server.members, name=username)
staffer = ctx.message.author
# Handle Database
sql = "INSERT INTO mod_log (`action`,`user`, `user_id`, `staff`, `staff_id`, reason) " \
"VALUES ('kick', %s, %s, %s, %s, %s)"
cur = db.cursor()
cur.execute(sql, (str(member), member.id, str(staffer), staffer.id, reason))
# Save Last Row ID
case_id = cur.lastrowid
# Insert Message
log_channel = bot.get_channel('303262467205890051')
msg_text = "**Case #{0}** | Kick :boot: \n**User**: {1} ({2}) " \
"\n**Moderator**: {3} ({4}) \n**Reason**: {5}"
# Add Message to Events Channel and Save Message ID
case_message = await bot.send_message(log_channel, msg_text.format(case_id, str(member), member.id, str(staffer), staffer.id, reason))
cur.execute("UPDATE mod_log SET `message_id` = %s WHERE `case_id` = %s", (case_message.id, case_id))
# Finish Database Stuff and Commit
db.commit()
cur.close()
# Kick the Member
await bot.kick(member)
except Exception as e:
await bot.send_message(ctx.message.channel, "{0.mention}, there was an error when kicking the user."
" ".format(ctx.message.author) + str(e))
await bot.say("{0.mention}, the user was successfully kicked. A log entry has been added.".format(ctx.message.author))
# ********************************************** #
# START THE BOT ******************************** #
# ********************************************** #
# Run the Bot
bot.run('token-here')
| gpl-3.0 |
AlbertoPeon/invenio | modules/bibsword/lib/bibsword_client_templates.py | 37 | 41746 | # -*- coding: utf-8 -*-
## This file is part of Invenio.
## Copyright (C) 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
'''
BibSWORD Client Templates
'''
from invenio.config import CFG_SITE_URL, CFG_SITE_NAME, CFG_SITE_RECORD
class BibSwordTemplate:
'''
This class contains attributes and methods that allows to display all
information used by the BibSword web user interface. Theses informations
are form, validation or error messages
'''
def __init__(self):
''' No init necessary for this class '''
#---------------------------------------------------------------------------
# BibSword WebSubmit Interface
#---------------------------------------------------------------------------
def tmpl_display_submit_ack(self, remote_id, link):
'''
This method generate the html code that displays the acknoledgement
message after the submission of a record.
@param remote_id: id of the record given by arXiv
@param link: links to modify or consult submission
@return: string containing the html code
'''
html = ''
html += '''<h1>Success !</h1>'''
html += '''<p>The record has been successfully pushed to arXiv ! <br />''' \
'''You will get an email once it will be accepted by ''' \
'''arXiv moderator.</p>'''
html += '''<p>The arXiv id of the submission is: <b>%s</b></p>''' % \
remote_id
html += '''<p><a href="www.arxiv.org/user">Manage your submission</a></p>'''
return html
#---------------------------------------------------------------------------
# BibSword Administrator Interface
#---------------------------------------------------------------------------
def tmpl_display_admin_page(self, submissions, first_row, last_row,
total_rows, is_prev, is_last, offset,
error_messages=None):
'''
format the html code that display the submission table
@param submissions: list of all submissions and their status
@return: html code to be displayed
'''
if error_messages == None:
error_messages = []
body = '''
<form method="post" enctype="multipart/form-data" accept-charset="UTF-8" action="/bibsword">
%(error_message)s
<input type="hidden" name="status" value="display_submission"/>
<input type="hidden" name="first_row" value="%(first_row)s"/>
<input type="hidden" name="last_row" value="%(last_row)s"/>
<input type="hidden" name="total_rows" value="%(total_rows)s" />
<input type="submit" name="submit" value="New submission"/><br/>
<br />
<input type="submit" name="submit" value="Refresh all"/><br/>
<br />
Display
<select name="offset">
<option value="5" %(selected_1)s>5</option>
<option value="10" %(selected_2)s>10</option>
<option value="25" %(selected_3)s>25</option>
<option value="50" %(selected_4)s>50</option>
<option value=%(total_rows)s %(selected_5)s>all</option>
</select>
rows per page <input type="submit" name="submit" value="Select" /><br />
<br />
<input type="submit" name="submit" value="First" %(is_prev)s/>
<input type="submit" name="submit" value="Prev" %(is_prev)s/>
Pages %(first_row)s - %(last_row)s / %(total_rows)s
<input type="submit" name="submit" value="Next" %(is_last)s/>
<input type="submit" name="submit" value="Last" %(is_last)s/><br/>
<table border="1" valign="top" width="%(table_width)s">
<tr>
<td align="left" colspan="7" bgcolor="#e6e6fa">
<h2>Submission state</h2>
</td>
</tr>
<tr>
<td align="center" bgcolor="#e6e6fa"><b>Remote server</b></td>
<td align="center" bgcolor="#e6e6fa"><b>Submitter</b></td>
<td align="center" bgcolor="#e6e6fa"><b>Record number</b></td>
<td align="center" bgcolor="#e6e6fa"><b>Remote id</b></td>
<td align="center" bgcolor="#e6e6fa"><b>Status</b></td>
<td align="center" bgcolor="#e6e6fa"><b>Dates</b></td>
<td align="center" bgcolor="#e6e6fa"><b>Links</b></td>
</tr>
%(submissions)s
</table>
</form>''' % {
'error_message': \
self.display_error_message_row(error_messages),
'table_width' : '100%',
'first_row' : first_row,
'last_row' : last_row,
'total_rows' : total_rows,
'is_prev' : is_prev,
'is_last' : is_last,
'selected_1' : offset[0],
'selected_2' : offset[1],
'selected_3' : offset[2],
'selected_4' : offset[3],
'selected_5' : offset[4],
'submissions' : self.fill_submission_table(submissions)
}
return body
def tmpl_display_remote_server_info(self, server_info):
'''
Display a table containing all server informations
@param server_info: tuple containing all server infos
@return: html code for the table containing infos
'''
body = '''<table width="%(table_width)s">\n''' \
''' <tr>\n''' \
''' <td bgcolor="#e6e6fa">ID</td>\n''' \
''' <td>%(server_id)s</td>\n''' \
''' </tr>\n ''' \
''' <tr>\n''' \
''' <td bgcolor="#e6e6fa">Name</td>\n''' \
''' <td>%(server_name)s</td>\n''' \
''' </tr>\n ''' \
''' <tr>\n''' \
''' <td bgcolor="#e6e6fa">Host</td>\n''' \
''' <td>%(server_host)s</td>\n''' \
''' </tr>\n ''' \
''' <tr>\n''' \
''' <td bgcolor="#e6e6fa">Username</td>\n''' \
''' <td>%(username)s</td>\n''' \
''' </tr>\n ''' \
''' <tr>\n''' \
''' <td bgcolor="#e6e6fa">Password</td>\n''' \
''' <td>%(password)s</td>\n''' \
''' </tr>\n ''' \
''' <tr>\n''' \
''' <td bgcolor="#e6e6fa">Email</td>\n''' \
''' <td>%(email)s</td>\n''' \
''' </tr>\n ''' \
''' <tr>\n''' \
''' <td bgcolor="#e6e6fa">Realm</td>\n''' \
''' <td>%(realm)s</td>\n''' \
''' </tr>\n ''' \
''' <tr>\n''' \
''' <td bgcolor="#e6e6fa">Record URL</td>\n''' \
''' <td>%(url_base_record)s</td>\n''' \
''' </tr>\n ''' \
''' <tr>\n''' \
''' <td bgcolor="#e6e6fa">URL Servicedocument</td>\n'''\
''' <td>%(url_servicedocument)s</td>\n''' \
''' </tr>\n ''' \
'''</table>''' % {
'table_width' : '50%',
'server_id' : server_info['server_id'],
'server_name' : server_info['server_name'],
'server_host' : server_info['server_host'],
'username' : server_info['username'],
'password' : server_info['password'],
'email' : server_info['email'],
'realm' : server_info['realm'],
'url_base_record' : server_info['url_base_record'],
'url_servicedocument': server_info['url_servicedocument']
}
return body
def tmpl_display_remote_servers(self, remote_servers, id_record,
error_messages):
'''
format the html code that display a dropdown list containing the
servers
@param self: reference to the current instance of the class
@param remote_servers: list of tuple containing server's infos
@return: string containing html code
'''
body = '''
<form method="post" enctype="multipart/form-data" accept-charset="UTF-8" action="/bibsword">
<input type="hidden" name="status" value="select_server"/>
%(error_message)s
<input type="submit" name="submit" value="Cancel" />
<table border="1" valign="top" width="%(table_width)s">
<tr>
<td align="left" colspan="2" bgcolor="#e6e6fa">
<h2>Forward a record</h2>
</td>
</tr>
<tr>
<td align="right" width="%(row_width)s">
<p>Enter the number of the report to submit: </p>
</td>
<td align="left" width="%(row_width)s">
<input type="text" name="id_record" size="20"
value="%(id_record)s"/>
</td>
</tr>
<tr>
<td align="right" width="%(row_width)s">
<p>Select a remote server: </p>
</td>
<td align="left" width="%(row_width)s">
<select name="id_remote_server" size="1">
<option value="0">-- select a remote server --</option>
%(remote_server)s
</select>
</td>
</tr>
<tr>
<td colspan="2" align="center">
<input type="submit" value="Select" name="submit"/>
</td>
</tr>
</table>
</form>''' % {
'error_message': \
self.display_error_message_row(error_messages),
'table_width' : '100%',
'row_width' : '50%',
'id_record' : id_record,
'remote_server': \
self.fill_dropdown_remote_servers(remote_servers)
}
return body
def tmpl_display_collections(self, selected_server, server_infos,
collections, id_record, recid, error_messages):
'''
format the html code that display the selected server, the informations
about the selected server and a dropdown list containing the server's
collections
@param self: reference to the current instance of the class
@param selected_server: tuple containing selected server name and id
@param server_infos: tuple containing infos about selected server
@param collections: list contianing server's collections
@return: string containing html code
'''
body = '''
<form method="post" enctype="multipart/form-data" accept-charset="UTF-8" action="/bibsword">
<input type="hidden" name="status" value="select_collection"/>
<input type="hidden" name="id_remote_server" value="%(id_server)s"/>
<input type="hidden" name="id_record" value="%(id_record)s"/>
<input type="hidden" name="recid" value="%(recid)s"/>
%(error_message)s
<input type="submit" name="submit" value="Cancel" />
<table border="1" valign="top" width="%(table_width)s">
<tr>
<td align="left" colspan="2" bgcolor="#e6e6fa">
<h2>Remote server</h2></td>
</tr>
<tr>
<td align="center" rowspan="2" valign="center">
<h2>%(server_name)s</h2>
</td>
<td align="left">
SWORD version: %(server_version)s
</td>
</tr>
<tr>
<td align="left">
Max upload size [Kb]: %(server_maxUpload)s
</td>
</tr>
<tr>
<td align="left" colspan="2">
<input type="submit" value="Modify server" name="submit"/>
</td>
</tr>
</table>
<p> </p>
<table border="1" valign="top" width="%(table_width)s">
<tr>
<td align="left" colspan="2" bgcolor="#e6e6fa"><h2>Collection</h2>
</td>
</tr>
<tr>
<td align="right" width="%(row_width)s">Select a collection: </td>
<td align="left" width="%(row_width)s">
<select name="id_collection" size="1">
<option value="0">-- select a collection --</option>
%(collection)s
</select>
</td>
</tr>
<tr>
<td align="center" colspan="2">
<input type="submit" value="Select" name="submit"/>
</td>
</tr>
</table>
</form>''' % {
'table_width' : '100%',
'row_width' : '50%',
'error_message' : \
self.display_error_message_row(error_messages),
'id_server' : selected_server['id'],
'server_name' : selected_server['name'],
'server_version' : server_infos['version'],
'server_maxUpload': server_infos['maxUploadSize'],
'collection' : \
self.fill_dropdown_collections(collections),
'id_record' : id_record,
'recid' : recid
}
return body
def tmpl_display_categories(self, selected_server, server_infos,
selected_collection, collection_infos,
primary_categories, secondary_categories,
id_record, recid, error_messages):
'''
format the html code that display the selected server, the informations
about the selected server, the selected collections, the informations
about the collection and a dropdown list containing the server's
primary and secondary categories
@param self: reference to the current instance of the class
@param selected_server: tuple containing selected server name and id
@param server_infos: tuple containing infos about selected server
@param selected_collection: selected collection
@param collection_infos: tuple containing infos about selected col
@param primary_categories: list of mandated categories for the col
@return: string containing html code
'''
body = '''
<form method="post" enctype="multipart/form-data" accept-charset="UTF-8" action="/bibsword">
<input type="hidden" name="status" value="select_primary_category"/>
<input type="hidden" name="id_remote_server" value="%(id_server)s"/>
<input type="hidden" name="id_collection" value="%(id_collection)s"/>
<input type="hidden" name="id_record" value="%(id_record)s"/>
<input type="hidden" name="recid" value="%(recid)s"/>
%(error_message)s
<input type="submit" name="submit" value="Cancel" />
<table border="1" valign="top" width="%(table_width)s">
<tr>
<td align="left" colspan="2" bgcolor="#e6e6fa">
<h2>Remote server</h2>
</td>
</tr>
<tr>
<td align="center" rowspan="2" valign="center">
<h2>%(server_name)s</h2>
</td>
<td align="left">
SWORD version: %(server_version)s
</td>
</tr>
<tr>
<td align="left">
Max upload size [Kb]: %(server_maxUpload)s
</td>
</tr>
<tr>
<td align="left" colspan="2">
<input type="submit" value="Modify server" name="submit"/>
</td>
</tr>
</table>
<p> </p>
<table border="1" valign="top" width="%(table_width)s">
<tr>
<td align="left" colspan="2" bgcolor="#e6e6fa">
<h2>Collection</h2>
</td>
</tr>
<tr>
<td align="center" rowspan="2" valign="center">
<h2>%(collection_name)s</h2>
</td>
<td align="left">
URL: %(collection_url)s
</td>
</tr>
<tr>
<td align="left">
Accepted media types:
<ul>%(collection_accept)s</ul>
</td>
</tr>
<tr>
<td align="left" colspan=2>
<input type="submit" value="Modify collection" name="submit"/>
</td>
</tr>
</table>
<p> </p>
<table border="1" valign="top" width="%(table_width)s">
<tr>
<td align="left" colspan="2" bgcolor="#e6e6fa">
<h2>Mandatory category</h2>
</td>
</tr>
<tr>
<td align="right" width="%(row_width)s">
<p>Select a mandated category: </p>
</td>
<td align="left" width="%(row_width)s">
<select name="id_primary" size="1">
<option value="0">-- select a category --</option>
%(primary_categories)s
</select>
</td>
</tr>
</table>
<p></p>
<table border="1" valign="top" width="%(table_width)s">
<tr>
<td align="left" colspan="2" bgcolor="#e6e6fa">
<h2>Optional categories</h2>
</td>
</tr>
<td align="right" width="%(row_width)s">
<p>Select optional categories: </p>
</td>
<td align="left" width="%(row_width)s">
<select name="id_categories" size="10" multiple>
%(secondary_categories)s
</select>
</td>
</tr>
</table>
<p> </p>
<center>
<input type="submit" value="Select" name="submit"/>
</center>
</form>''' % {
'table_width' : '100%',
'row_width' : '50%',
'error_message' : self.display_error_message_row(
error_messages),
# hidden input
'id_server' : selected_server['id'],
'id_collection' : selected_collection['id'],
'id_record' : id_record,
'recid' : recid,
# variables values
'server_name' : selected_server['name'],
'server_version' : server_infos['version'],
'server_maxUpload' : server_infos['maxUploadSize'],
'collection_name' : selected_collection['label'],
'collection_accept': ''.join([
'''<li>%(name)s </li>''' % {
'name': accept
} for accept in collection_infos['accept'] ]),
'collection_url' : selected_collection['url'],
'primary_categories' : self.fill_dropdown_primary(
primary_categories),
'secondary_categories': self.fill_dropdown_secondary(
secondary_categories)
}
return body
def tmpl_display_metadata(self, user, server, collection, primary,
categories, medias, metadata, id_record, recid,
error_messages):
'''
format a string containing every informations before a submission
'''
body = '''
<form method="post" enctype="multipart/form-data" accept-charset="UTF-8" action="/bibsword">
<input type="hidden" name="status" value="check_submission"/>
<input type="hidden" name="id_remote_server" value="%(id_server)s"/>
<input type="hidden" name="id_collection" value="%(id_collection)s"/>
<input type="hidden" name="id_primary" value="%(id_primary)s"/>
<input type="hidden" name="id_categories" value="%(id_categories)s"/>
<input type="hidden" name="id_record" value="%(id_record)s"/>
<input type="hidden" name="recid" value="%(recid)s"/>
%(error_message)s
<input type="submit" name="submit" value="Cancel" />
<table border="1" valign="top" width="%(table_width)s">
<tr>
<td align="left" colspan="2" bgcolor="#e6e6fa">
<h2>Destination</h2>
</td>
</tr>
<tr>
<td align="center" rowspan="3" valign="center">
<h2>%(server_name)s</h2>
</td>
<td align="left">
Collection: %(collection_name)s ( %(collection_url)s )
</td>
</tr>
<tr>
<td align="left">
Primary category: %(primary_name)s ( %(primary_url)s )
</td>
</tr>
%(categories)s
<tr>
<td align="left" colspan="2">
<input type="submit" value="Modify destination" name="submit"/>
</td>
</tr>
</table>
<p> </p>
<table border="1" valign="top" width="%(table_width)s">
<tr>
<td align="left" colspan="4" bgcolor="#e6e6fa">
<h2>Submitter</h2>
</td>
</tr>
<tr>
<td width="%(row_width)s">Name:</td>
<td><input type="text" name="author_name" size="100"
value="%(user_name)s"/></td>
</tr>
<tr>
<td>Email:</td>
<td><input type="text" name="author_email" size="100"
value="%(user_email)s"/></td>
</tr>
</table>
<p></p>
<table border="1" valign="top" width="%(table_width)s">
<tr>
<td align="left" colspan="4" bgcolor="#e6e6fa"><h2>Media</h2></td>
</tr>
<tr><td colspan="4">%(medias)s%(media_help)s</td></tr>
</table>
<p></p>
<table border="1" valign="top" width="%(table_width)s">
<tr>
<td align="left" colspan="3" bgcolor="#e6e6fa"><h2>Metadata</h2> <font color="red"><b>Warning:</b> modification(s) will not be saved on the %(CFG_SITE_NAME)s</font>
</td>
</tr>
<tr>
<td align="left" width="%(row_width)s"><p>Report Number<span style="color:#f00">*</span>:</p></td>
<td><input type="text" name="id" size="100" value="%(id)s"/></td>
</tr>
<tr>
<td align="left" width="%(row_width)s"><p>Title<span style="color:#f00">*</span>:</p></td>
<td><input type="text" name="title" size="100" value="%(title)s"/>
</td>
</tr>
<tr>
<td align="left" width="%(row_width)s"><p>Summary<span style="color:#f00">*</span>:</p></td>
<td>
<textarea name="summary" rows="4" cols="100">%(summary)s
</textarea>
</td>
</tr>
%(contributors)s
%(journal_refs)s
%(report_nos)s
</table>
<p><font color="red">The fields having a * are mandatory</font></p>
<center>
<input type="submit" value="Submit" name="submit"/>
</center>
<form>''' % {
'table_width' : '100%',
'row_width' : '25%',
'error_message' : \
self.display_error_message_row(error_messages),
'CFG_SITE_NAME': CFG_SITE_NAME,
# hidden input
'id_server' : server['id'],
'id_collection' : collection['id'],
'id_primary' : primary['id'],
'id_categories' : self.get_list_id_categories(categories),
'id_record' : id_record,
'recid' : recid,
# variables values
'server_name' : server['name'],
'collection_name' : collection['label'],
'collection_url' : collection['url'],
'primary_name' : primary['label'],
'primary_url' : primary['url'],
'categories' : self.fill_optional_category_list(categories),
#user
'user_name' : user['nickname'],
'user_email' : user['email'],
# media
'medias' : self.fill_media_list(medias, server['id']),
'media_help' : self.fill_arxiv_help_message(),
# metadata
'id' : metadata['id'],
'title' : metadata['title'],
'summary' : metadata['summary'],
'contributors' : self.fill_contributors_list(
metadata['contributors']),
'journal_refs' : self.fill_journal_refs_list(
metadata['journal_refs']),
'report_nos' : self.fill_report_nos_list(
metadata['report_nos'])
}
return body
def tmpl_display_list_submission(self, submissions):
'''
Display the data of submitted recods
'''
body = '''
<form method="post" enctype="multipart/form-data" accept-charset="UTF-8" action="/bibsword">
<table border="1" valign="top" width="%(table_width)s">
<tr>
<td align="left" colspan="7" bgcolor="#e6e6fa">
<h2>Document successfully submitted !</h2>
</td>
</tr>
<tr>
<td align="center" bgcolor="#e6e6fa"><b>Remote server</b></td>
<td align="center" bgcolor="#e6e6fa"><b>Submitter</b></td>
<td align="center" bgcolor="#e6e6fa"><b>Record id</b></td>
<td align="center" bgcolor="#e6e6fa"><b>Remote id</b></td>
<td align="center" bgcolor="#e6e6fa"><b>Status</b></td>
<td align="center" bgcolor="#e6e6fa"><b>Dates</b></td>
<td align="center" bgcolor="#e6e6fa"><b>Links</b></td>
</tr>
%(submissions)s
</table>
<a href=%(CFG_SITE_URL)s/bibsword>Return</a>
</form>''' % {
'table_width' : '100%',
'submissions' : self.fill_submission_table(submissions),
'CFG_SITE_URL' : CFG_SITE_URL
}
return body
#***************************************************************************
# Private functions
#***************************************************************************
def display_error_message_row(self, error_messages):
'''
return a list of error_message in form of a bullet list
@param error_messages: list of error_messages to display
@return: html code that display list of errors
'''
# if no errors, return nothing
if len(error_messages) == 0:
return ''
if len(error_messages) == 1:
# display a generic header message
body = '''
<tr>
<td align="left" colspan=2>
<font color='red'>
<p> The following error was found: </p>
<ul>
'''
else:
# display a generic header message
body = '''
<tr>
<td align="left" colspan=2>
<font color='red'>
<p> Following errors were found: </p>
<ul>
'''
# insert each error lines
for error_message in error_messages:
body = body + '''
<li>%(error)s</li>''' % {
'error': error_message
}
body = body + '''
</ul>
</font>
</td>
</tr>'''
return body
def fill_submission_table(self, submissions):
'''
This method return the body of the submission state table. each
submissions given in parameters has one row
@param submissions: submission status list
@return: html table body
'''
return ''.join([
''' <tr>
<td>%(id_server)s: <a href="%(server_infos)s">
%(server_name)s</a></td>
<td>%(user_name)s <br/> %(user_email)s</td
<td>%(id_bibrec)s: <a href="%(cfg_site_url)s/%(CFG_SITE_RECORD)s/%(id_bibrec)s"
target="_blank">%(no_bibrec)s</a></td>
<td><a href="%(url_base_remote)s/%(id_remote)s" target="_blank">
%(id_remote)s</a></td>
<td>%(status)s</td>
<td><b>submission: </b> %(submission_date)s <br/>
<b>publication: </b> %(publication_date)s <br/>
<b>removal: </b> %(removal_date)s </td>
<td><b>media: </b> <a href="%(media_link)s" target="_blank">
%(media_link)s</a> <br/>
<b>metadata: </b> <a href="%(metadata_link)s" target="_blank">
%(metadata_link)s</a> <br />
<b>status: </b> <a href="%(status_link)s" target="_blank">
%(status_link)s</a></td>
</tr>''' % {
'id_server' : str(submission['id_server']),
'server_infos' : "%s/bibsword/remoteserverinfos?id=%s" % \
(CFG_SITE_URL, submission['id_server']),
'server_name' : str(submission['server_name']),
'user_name' : str(submission['user_name']),
'user_email' : str(submission['user_email']),
'id_bibrec' : str(submission['id_record']),
'no_bibrec' : str(submission['report_no']),
'id_remote' : str(submission['id_remote']),
'status' : str(submission['status']),
'submission_date' : str(submission['submission_date']),
'publication_date' : str(submission['publication_date']),
'removal_date' : str(submission['removal_date']),
'media_link' : str(submission['link_medias']),
'metadata_link' : str(submission['link_metadata']),
'status_link' : str(submission['link_status']),
'url_base_remote' : str(submission['url_base_remote']),
'cfg_site_url' : CFG_SITE_URL,
'CFG_SITE_RECORD' : CFG_SITE_RECORD
} for submission in submissions])
def fill_dropdown_remote_servers(self, remote_servers):
'''
This method fill a dropdown list of remote servers.
@return: html code to display
'''
return ''.join([
'''<option value="%(id)s">%(name)s - %(host)s</option>''' % {
'id': str(remote_server['id']),
'name': remote_server['name'],
'host': remote_server['host']
} for remote_server in remote_servers])
def fill_dropdown_collections(self, collections):
'''
This method fill a dropdown list of collection.
@param collections: list of all collections with name - url
@return: html code to display
'''
return ''.join([
'''<option value="%(id)s">%(name)s</option>''' % {
'id': str(collection['id']),
'name': collection['label']
} for collection in collections])
def fill_dropdown_primary(self, primary_categories):
'''
This method fill the primary dropdown list with the data given in
parameter
@param primary_categories: list of 'url' 'name' tuples
@return: html code generated to display the list
'''
return ''.join([
'''<option value="%(id)s">%(name)s</option>''' % {
'id': primary_categorie['id'],
'name': primary_categorie['label']
} for primary_categorie in primary_categories])
def fill_dropdown_secondary(self, categories):
'''
This method fill a category list. This list is allows the multi-selection
or items. To proced to select more than one categorie through a browser
ctrl + clic
@param categories: list of all categories in the format name - url
@return: the html code that display each dropdown list
'''
if len(categories) == '':
return ''
return ''.join([
'''<option value="%(id)s">%(name)s</option>''' % {
'id': category['id'],
'name': category['label']
} for category in categories])
def fill_optional_category_list(self, categories):
'''
This method fill a table row that contains name and url of the selected
optional categories
@param self: reference to the current instance of the class
@param categories: list of tuples containing selected categories
@return: html code generated to display the list
'''
if len(categories) == 0:
return ''
else:
body = '<tr><td>'
body = body + ''.join([
'''<p>Category: %(category_name)s ( %(category_url)s )</p>'''%{
'category_name' : category['label'],
'category_url' : category['url']
} for category in categories
])
body = body + '</td></tr>'
return body
def fill_media_list(self, medias, id_server, from_websubmit=False):
'''
Concatenate the string that contains all informations about the medias
'''
text = ''
if id_server == 1:
media_type = self.format_media_list_by_type(medias)
text = '''<h2>Please select files you would like to push to arXiv:</h2>'''
for mtype in media_type:
text += '''<h3><b>%s: </b></h3>''' % mtype['media_type']
text += '''<blockquote>'''
for media in mtype['media_list']:
text += '''<input type='checkbox' name="media" value="%s" %s>%s</input><br />''' % (media['path'], media['selected'], media['name'])
text += "</blockquote>"
text += '''<h3>Upload</h3>'''
text += '''<blockquote>'''
text += '''<p>In addition, you can submit a new file (that will be added to the record as well):</p>'''
if from_websubmit == False:
text += '''<input type="file" name="new_media" size="60"/>'''
return text
def fill_arxiv_help_message(self):
text = '''</blockquote><h3>Help</h3>'''
text += '''<blockquote><p>For more help on which formats are supported by arXiv, please see:'''\
'''<ul>'''\
'''<li><a href="http://arxiv.org/help/submit" target="_blank">'''\
'''arXiv submission process</a></li>'''\
'''<li><a href="http://arxiv.org/help/submit_tex" target="_blank">'''\
'''arXiv TeX submission</a></li>'''\
'''<li><a href="http://arxiv.org/help/submit_docx" target="_blank">'''\
'''arXiv Docx submission</a></li>'''\
'''<li><a href="http://arxiv.org/help/submit_pdf" target="_blank">'''\
'''arXiv PDF submission</a></li>'''\
'''</ul></blockquote>'''
return text
def fill_contributors_list(self, contributors):
'''
This method display each contributors in the format of an editable input
text. This allows the user to modifie it.
@param contributors: The list of all contributors of the document
@return: the html code that display each dropdown list
'''
output = ''
is_author = True
for author in contributors:
nb_rows = 2
author_name = \
'''<LABEL for="name">Name: </LABEL><input type = "text" ''' \
'''name = "contributor_name" size = "100" value = "%s" ''' \
'''id="name"/>''' % author['name']
author_email = \
'''<LABEL for = "email">Email: </LABEL>''' \
'''<input type = "text" name = "contributor_email" ''' \
'''size = "100" value = "%s" id = "email"/>''' % author['email']
author_affiliations = []
for affiliation in author['affiliation']:
affiliation_row = \
'''<LABEL for = "affiliation">Affiliation: </LABEL> ''' \
'''<input type="text" name = "contributor_affiliation" ''' \
'''size = "100" value = "%s" id = "affiliation"/>''' % \
affiliation
author_affiliations.append(affiliation_row)
nb_rows = nb_rows + 1
affiliation_row = \
'''<LABEL for = "affiliation">Affiliation: </LABEL>''' \
'''<input type = "text" name = "contributor_affiliation" ''' \
'''size = "100" id = "affiliation"/>'''
author_affiliations.append(affiliation_row)
nb_rows = nb_rows + 1
if is_author:
output += '''<tr><td rowspan = "%s">Author: </td>''' % nb_rows
is_author = False
else:
output += '''<tr><td rowspan = "%s">Contributor: </td>''' % \
nb_rows
output += '''<td>%s</td></tr>''' % author_name
if author_email != '':
output += '''<tr><td>%s</td></tr>''' % author_email
for affiliation in author_affiliations:
output += '''<tr><td>%s</td></tr>''' % affiliation
output += \
'''<input type = "hidden" name = "contributor_affiliation" ''' \
'''value = "next"/>'''
return output
def fill_journal_refs_list(self, journal_refs):
'''
This method display each journal references in the format of an editable
input text. This allows the user to modifie it.
@param journal_refs: The list of all journal references of the document
@return: the html code that display each dropdown list
'''
html = ''
if len(journal_refs) > 0:
html += '''
<tr>
<td align="left"><p>Journal references: </p></td><td>
'''
html = html + ''.join([
'''
<p><input type="text" name="journal_refs" size="100" ''' \
'''value="%(journal_ref)s"/></p>
''' % {
'journal_ref': journal_ref
} for journal_ref in journal_refs
])
html = html + '''
</td>
</tr>
'''
return html
def fill_report_nos_list(self, report_nos):
'''
Concatate a string containing the report number html table rows
'''
html = ''
if len(report_nos) > 0:
html = '''
<tr>
<td align="left"><p>Report numbers: </p></td><td>
'''
html = html + ''.join([
'''
<p><input type="text" name="report_nos" size="100" ''' \
'''value="%(report_no)s"/></p>''' % {
'report_no': report_no
} for report_no in report_nos
])
html = html + '''
</td>
</tr>
'''
return html
def get_list_id_categories(self, categories):
'''
gives the id of the categores tuple
'''
id_categories = []
for category in categories:
id_categories.append(category['id'])
return id_categories
def format_media_list_by_type(self, medias):
'''
This function format the media by type (Main, Uploaded, ...)
'''
#format media list by type of document
media_type = []
for media in medias:
# if it is the first media of this type, create a new type
is_type_in_media_type = False
for type in media_type:
if media['collection'] == type['media_type']:
is_type_in_media_type = True
if is_type_in_media_type == False:
type = {}
type['media_type'] = media['collection']
type['media_list'] = []
media_type.append(type)
# insert the media in the good media_type element
for type in media_type:
if type['media_type'] == media['collection']:
type['media_list'].append(media)
return media_type
| gpl-2.0 |
jcai19/smm_gem5 | src/dev/x86/I82094AA.py | 69 | 2201 | # Copyright (c) 2008 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
from m5.params import *
from m5.proxy import *
from Device import BasicPioDevice
from X86IntPin import X86IntSinkPin
class I82094AA(BasicPioDevice):
type = 'I82094AA'
cxx_class = 'X86ISA::I82094AA'
cxx_header = "dev/x86/i82094aa.hh"
apic_id = Param.Int(1, 'APIC id for this IO APIC')
int_master = MasterPort("Port for sending interrupt messages")
int_latency = Param.Latency('1ns', \
"Latency for an interrupt to propagate through this device.")
external_int_pic = Param.I8259(NULL, "External PIC, if any")
def pin(self, line):
return X86IntSinkPin(device=self, number=line)
| bsd-3-clause |
MichaelNedzelsky/intellij-community | python/helpers/py3only/docutils/parsers/rst/languages/zh_cn.py | 52 | 3936 | # -*- coding: utf-8 -*-
# $Id: zh_cn.py 7119 2011-09-02 13:00:23Z milde $
# Author: Panjunyong <[email protected]>
# Copyright: This module has been placed in the public domain.
# New language mappings are welcome. Before doing a new translation, please
# read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be
# translated for each language: one in docutils/languages, the other in
# docutils/parsers/rst/languages.
"""
Simplified Chinese language mappings for language-dependent features of
reStructuredText.
"""
__docformat__ = 'reStructuredText'
directives = {
# language-dependent: fixed
'注意': 'attention',
'小心': 'caution',
'code (translation required)': 'code',
'危险': 'danger',
'错误': 'error',
'提示': 'hint',
'重要': 'important',
'注解': 'note',
'技巧': 'tip',
'警告': 'warning',
'忠告': 'admonition',
'侧框': 'sidebar',
'主题': 'topic',
'line-block (translation required)': 'line-block',
'parsed-literal (translation required)': 'parsed-literal',
'醒目': 'rubric',
'铭文': 'epigraph',
'要点': 'highlights',
'pull-quote (translation required)': 'pull-quote',
'复合': 'compound',
'容器': 'container',
#u'questions (translation required)': 'questions',
'表格': 'table',
'csv表格': 'csv-table',
'列表表格': 'list-table',
#u'qa (translation required)': 'questions',
#u'faq (translation required)': 'questions',
'元数据': 'meta',
'math (translation required)': 'math',
#u'imagemap (translation required)': 'imagemap',
'图片': 'image',
'图例': 'figure',
'包含': 'include',
'原文': 'raw',
'代替': 'replace',
'统一码': 'unicode',
'日期': 'date',
'类型': 'class',
'角色': 'role',
'默认角色': 'default-role',
'标题': 'title',
'目录': 'contents',
'章节序号': 'sectnum',
'题头': 'header',
'页脚': 'footer',
#u'footnotes (translation required)': 'footnotes',
#u'citations (translation required)': 'citations',
'target-notes (translation required)': 'target-notes',
'restructuredtext-test-directive': 'restructuredtext-test-directive'}
"""Simplified Chinese name to registered (in directives/__init__.py)
directive name mapping."""
roles = {
# language-dependent: fixed
'缩写': 'abbreviation',
'简称': 'acronym',
'code (translation required)': 'code',
'index (translation required)': 'index',
'i (translation required)': 'index',
'下标': 'subscript',
'上标': 'superscript',
'title-reference (translation required)': 'title-reference',
'title (translation required)': 'title-reference',
't (translation required)': 'title-reference',
'pep-reference (translation required)': 'pep-reference',
'pep (translation required)': 'pep-reference',
'rfc-reference (translation required)': 'rfc-reference',
'rfc (translation required)': 'rfc-reference',
'强调': 'emphasis',
'加粗': 'strong',
'字面': 'literal',
'math (translation required)': 'math',
'named-reference (translation required)': 'named-reference',
'anonymous-reference (translation required)': 'anonymous-reference',
'footnote-reference (translation required)': 'footnote-reference',
'citation-reference (translation required)': 'citation-reference',
'substitution-reference (translation required)': 'substitution-reference',
'target (translation required)': 'target',
'uri-reference (translation required)': 'uri-reference',
'uri (translation required)': 'uri-reference',
'url (translation required)': 'uri-reference',
'raw (translation required)': 'raw',}
"""Mapping of Simplified Chinese role names to canonical role names
for interpreted text."""
| apache-2.0 |
nicain/dipde_dev | dipde/interfaces/zmq/__init__.py | 1 | 4371 | import time
import zmq
import threading
context = zmq.Context()
class PublishCallback(object):
def __init__(self, port, topic, message_callback):
self.port = port
self.topic = topic
self.message_callback = message_callback
self.socket = context.socket(zmq.PUB)
def __call__(self, obj):
message_to_send = list(self.message_callback(obj))
message_to_send.insert(0,"%s" % self.topic)
self.socket.send_multipart(map(str, message_to_send))
class PublishCallbackConnect(PublishCallback):
def __init__(self, port, topic, message_callback):
super(self.__class__, self).__init__(port, topic, message_callback)
self.socket.connect("tcp://localhost:%s" % self.port)
class CallbackSubscriber(object):
def __init__(self, port=None, receive_callback=None):
self.socket = context.socket(zmq.SUB)
if port is None:
self.port = self.socket.bind_to_random_port('tcp://*', min_port=6001, max_port=6004, max_tries=100)
else:
self.socket.bind("tcp://*:%s" % port)
self.port = port
self.socket.setsockopt(zmq.SUBSCRIBE, 'test')
if receive_callback is None:
def receive_callback(received_message):
print received_message
self.receive_callback = receive_callback
def run(self):
while True:
received_message_multipart = self.socket.recv_multipart()
topic = received_message_multipart[0]
received_message = received_message_multipart[1:]
self.receive_callback(received_message)
class CallbackSubscriberThread(threading.Thread):
def __init__(self, port=None):
super(self.__class__, self).__init__()
self.subscriber = CallbackSubscriber(port)
self.daemon = True
def run(self, port=None):
self.subscriber.run()
@property
def port(self):
return self.subscriber.port
class RequestConnection(object):
def __init__(self, port):
self.port = port
self.socket = context.socket(zmq.REQ)
self.socket.connect("tcp://localhost:%s" % port)
def __call__(self, *args):
if len(args) == 0:
self.socket.send(b'')
else:
self.socket.send_multipart(map(str,args))
message = self.socket.recv_multipart()
return float(message[0])
def shutdown(self):
self.socket.close()
assert self.socket.closed
class ReplyServerBind(object):
def __init__(self, reply_function, port=None):
self.socket = context.socket(zmq.REP)
if port is None:
self.port = self.socket.bind_to_random_port('tcp://*', min_port=6001, max_port=6004, max_tries=100)
else:
self.socket.bind("tcp://*:%s" % port)
self.port = port
self.reply_function = reply_function
def run(self):
while True:
message = self.socket.recv()
# print 'message:', message, type(message)
if message == 'SHUTDOWN':
break
# print 'message'
if message == '':
requested_args = tuple()
else:
requested_args = tuple([float(message)])
self.socket.send_multipart([b"%s" % self.reply_function(*requested_args)])
self.socket.send('DOWN')
self.socket.close()
class ReplyServerThread(threading.Thread):
def __init__(self, reply_function, port=None):
super(ReplyServerThread, self).__init__()
self._stop = threading.Event()
self.daemon = True
self.reply_function = reply_function
self.server = ReplyServerBind(self.reply_function, port=port)
def run(self, port=None):
self.server.run()
def shutdown(self):
shutdown_socket = context.socket(zmq.REQ)
shutdown_socket.connect("tcp://localhost:%s" % self.port)
shutdown_socket.send('SHUTDOWN')
message = shutdown_socket.recv()
assert message == 'DOWN'
self.stop()
def stop(self):
self._stop.set()
def stopped(self):
return self._stop.isSet()
@property
def port(self):
return self.server.port
| gpl-3.0 |
ZhaoCJ/django | django/db/backends/utils.py | 2 | 5407 | from __future__ import unicode_literals
import datetime
import decimal
import hashlib
import logging
from time import time
from django.conf import settings
from django.utils.encoding import force_bytes
from django.utils.timezone import utc
logger = logging.getLogger('django.db.backends')
class CursorWrapper(object):
def __init__(self, cursor, db):
self.cursor = cursor
self.db = db
SET_DIRTY_ATTRS = frozenset(['execute', 'executemany', 'callproc'])
WRAP_ERROR_ATTRS = frozenset([
'callproc', 'close', 'execute', 'executemany',
'fetchone', 'fetchmany', 'fetchall', 'nextset'])
def __getattr__(self, attr):
if attr in CursorWrapper.SET_DIRTY_ATTRS:
self.db.set_dirty()
cursor_attr = getattr(self.cursor, attr)
if attr in CursorWrapper.WRAP_ERROR_ATTRS:
return self.db.wrap_database_errors(cursor_attr)
else:
return cursor_attr
def __iter__(self):
return iter(self.cursor)
class CursorDebugWrapper(CursorWrapper):
def execute(self, sql, params=None):
self.db.set_dirty()
start = time()
try:
with self.db.wrap_database_errors:
if params is None:
# params default might be backend specific
return self.cursor.execute(sql)
return self.cursor.execute(sql, params)
finally:
stop = time()
duration = stop - start
sql = self.db.ops.last_executed_query(self.cursor, sql, params)
self.db.queries.append({
'sql': sql,
'time': "%.3f" % duration,
})
logger.debug('(%.3f) %s; args=%s' % (duration, sql, params),
extra={'duration': duration, 'sql': sql, 'params': params}
)
def executemany(self, sql, param_list):
self.db.set_dirty()
start = time()
try:
with self.db.wrap_database_errors:
return self.cursor.executemany(sql, param_list)
finally:
stop = time()
duration = stop - start
try:
times = len(param_list)
except TypeError: # param_list could be an iterator
times = '?'
self.db.queries.append({
'sql': '%s times: %s' % (times, sql),
'time': "%.3f" % duration,
})
logger.debug('(%.3f) %s; args=%s' % (duration, sql, param_list),
extra={'duration': duration, 'sql': sql, 'params': param_list}
)
###############################################
# Converters from database (string) to Python #
###############################################
def typecast_date(s):
return datetime.date(*map(int, s.split('-'))) if s else None # returns None if s is null
def typecast_time(s): # does NOT store time zone information
if not s:
return None
hour, minutes, seconds = s.split(':')
if '.' in seconds: # check whether seconds have a fractional part
seconds, microseconds = seconds.split('.')
else:
microseconds = '0'
return datetime.time(int(hour), int(minutes), int(seconds), int(float('.' + microseconds) * 1000000))
def typecast_timestamp(s): # does NOT store time zone information
# "2005-07-29 15:48:00.590358-05"
# "2005-07-29 09:56:00-05"
if not s:
return None
if not ' ' in s:
return typecast_date(s)
d, t = s.split()
# Extract timezone information, if it exists. Currently we just throw
# it away, but in the future we may make use of it.
if '-' in t:
t, tz = t.split('-', 1)
tz = '-' + tz
elif '+' in t:
t, tz = t.split('+', 1)
tz = '+' + tz
else:
tz = ''
dates = d.split('-')
times = t.split(':')
seconds = times[2]
if '.' in seconds: # check whether seconds have a fractional part
seconds, microseconds = seconds.split('.')
else:
microseconds = '0'
tzinfo = utc if settings.USE_TZ else None
return datetime.datetime(int(dates[0]), int(dates[1]), int(dates[2]),
int(times[0]), int(times[1]), int(seconds),
int((microseconds + '000000')[:6]), tzinfo)
def typecast_decimal(s):
if s is None or s == '':
return None
return decimal.Decimal(s)
###############################################
# Converters from Python to database (string) #
###############################################
def rev_typecast_decimal(d):
if d is None:
return None
return str(d)
def truncate_name(name, length=None, hash_len=4):
"""Shortens a string to a repeatable mangled version with the given length.
"""
if length is None or len(name) <= length:
return name
hsh = hashlib.md5(force_bytes(name)).hexdigest()[:hash_len]
return '%s%s' % (name[:length - hash_len], hsh)
def format_number(value, max_digits, decimal_places):
"""
Formats a number into a string with the requisite number of digits and
decimal places.
"""
if isinstance(value, decimal.Decimal):
context = decimal.getcontext().copy()
context.prec = max_digits
return "{0:f}".format(value.quantize(decimal.Decimal(".1") ** decimal_places, context=context))
else:
return "%.*f" % (decimal_places, value)
| bsd-3-clause |
ReturntoZero/codecombat | scripts/analytics/mixpanelGetEvent.py | 97 | 7517 | # Get mixpanel event data via export API
# Useful for debugging Mixpanel data weirdness
targetLevels = ['dungeons-of-kithgard', 'the-raised-sword', 'endangered-burl']
targetLevels = ['dungeons-of-kithgard']
eventFunnel = ['Started Level', 'Saw Victory']
# eventFunnel = ['Saw Victory']
# eventFunnel = ['Started Level']
import sys
from pprint import pprint
from datetime import datetime, timedelta
from mixpanel import Mixpanel
try:
import json
except ImportError:
import simplejson as json
# NOTE: mixpanel dates are by day and inclusive
# E.g. '2014-12-08' is any date that day, up to 2014-12-09 12am
if __name__ == '__main__':
if not len(sys.argv) is 3:
print "Script format: <script> <api_key> <api_secret>"
else:
scriptStart = datetime.now()
api_key = sys.argv[1]
api_secret = sys.argv[2]
api = Mixpanel(
api_key = api_key,
api_secret = api_secret
)
startDate = '2015-01-01'
endDate = '2015-01-26'
startEvent = eventFunnel[0]
endEvent = eventFunnel[-1]
print("Requesting data for {0} to {1}".format(startDate, endDate))
data = api.request(['export'], {
# 'where': '"539c630f30a67c3b05d98d95" == properties["id"]',
# 'where': "('539c630f30a67c3b05d98d95' == properties['id'] or '539c630f30a67c3b05d98d95' == properties['distinct_id'])",
'event': eventFunnel,
'from_date': startDate,
'to_date': endDate
})
weirdUserIDs = []
eventUsers = {}
levelEventUserDayMap = {}
levelUserEventDayMap = {}
lines = data.split('\n')
print "Received %d entries" % len(lines)
for line in lines:
try:
if len(line) is 0: continue
eventData = json.loads(line)
# pprint(eventData)
# break
eventName = eventData['event']
if not eventName in eventFunnel:
print 'Unexpected event ' + eventName
break
if not 'properties' in eventData:
print('no properties, skpping')
continue
properties = eventData['properties']
if not 'distinct_id' in properties:
print('no distinct_id, skpping')
continue
user = properties['distinct_id']
if not 'time' in properties:
print('no time, skpping')
continue
time = properties['time']
pst = datetime.fromtimestamp(int(properties['time']))
utc = pst + timedelta(0, 8 * 60 * 60)
dateCreated = utc.isoformat()
day = dateCreated[0:10]
if day < startDate or day > endDate:
print "Skipping {0}".format(day)
continue
if 'levelID' in properties:
level = properties['levelID']
elif 'level' in properties:
level = properties['level'].lower().replace(' ', '-')
else:
print("Unkonwn level for", eventName)
print(properties)
break
if not level in targetLevels: continue
# if user != "539c630f30a67c3b05d98d95": continue
pprint(eventData)
# if user == "54c1fc3a08652d5305442c6b":
# pprint(eventData)
# break
# if '-' in user:
# weirdUserIDs.append(user)
# # pprint(eventData)
# # break
# continue
# print level
if not level in levelEventUserDayMap: levelEventUserDayMap[level] = {}
if not eventName in levelEventUserDayMap[level]: levelEventUserDayMap[level][eventName] = {}
if not user in levelEventUserDayMap[level][eventName] or levelEventUserDayMap[level][eventName][user] > day:
levelEventUserDayMap[level][eventName][user] = day
if not user in eventUsers: eventUsers[user] = True
if not level in levelUserEventDayMap: levelUserEventDayMap[level] = {}
if not user in levelUserEventDayMap[level]: levelUserEventDayMap[level][user] = {}
if not eventName in levelUserEventDayMap[level][user] or levelUserEventDayMap[level][user][eventName] > day:
levelUserEventDayMap[level][user][eventName] = day
except:
print "Unexpected error:", sys.exc_info()[0]
print line
break
# pprint(levelEventUserDayMap)
print("Weird user IDs: {0}".format(len(weirdUserIDs)))
for level in levelEventUserDayMap:
for event in levelEventUserDayMap[level]:
print("{0} {1} {2}".format(level, event, len(levelEventUserDayMap[level][event])))
print("Users: {0}".format(len(eventUsers)))
noStartDayUsers = []
levelFunnelData = {}
for level in levelUserEventDayMap:
for user in levelUserEventDayMap[level]:
# 6455
# for event in levelUserEventDayMap[level][user]:
# day = levelUserEventDayMap[level][user][event]
# if not level in levelFunnelData: levelFunnelData[level] = {}
# if not day in levelFunnelData[level]: levelFunnelData[level][day] = {}
# if not event in levelFunnelData[level][day]: levelFunnelData[level][day][event] = 0
# levelFunnelData[level][day][event] += 1
# 5382
funnelStartDay = None
for event in levelUserEventDayMap[level][user]:
day = levelUserEventDayMap[level][user][event]
if not level in levelFunnelData: levelFunnelData[level] = {}
if not day in levelFunnelData[level]: levelFunnelData[level][day] = {}
if not event in levelFunnelData[level][day]: levelFunnelData[level][day][event] = 0
if eventFunnel[0] == event:
levelFunnelData[level][day][event] += 1
funnelStartDay = day
break
if funnelStartDay:
for event in levelUserEventDayMap[level][user]:
if not event in levelFunnelData[level][funnelStartDay]:
levelFunnelData[level][funnelStartDay][event] = 0
if eventFunnel[0] != event:
levelFunnelData[level][funnelStartDay][event] += 1
for i in range(1, len(eventFunnel)):
event = eventFunnel[i]
if not event in levelFunnelData[level][funnelStartDay]:
levelFunnelData[level][funnelStartDay][event] = 0
else:
noStartDayUsers.append(user)
pprint(levelFunnelData)
print("No start day count: {0}".format(len(noStartDayUsers)))
noStartDayUsers.sort()
for i in range(len(noStartDayUsers)):
if i > 50: break
print(noStartDayUsers[i])
print("Script runtime: {0}".format(datetime.now() - scriptStart))
| mit |
lunafeng/django | django/core/mail/backends/console.py | 696 | 1477 | """
Email backend that writes messages to console instead of sending them.
"""
import sys
import threading
from django.core.mail.backends.base import BaseEmailBackend
from django.utils import six
class EmailBackend(BaseEmailBackend):
def __init__(self, *args, **kwargs):
self.stream = kwargs.pop('stream', sys.stdout)
self._lock = threading.RLock()
super(EmailBackend, self).__init__(*args, **kwargs)
def write_message(self, message):
msg = message.message()
msg_data = msg.as_bytes()
if six.PY3:
charset = msg.get_charset().get_output_charset() if msg.get_charset() else 'utf-8'
msg_data = msg_data.decode(charset)
self.stream.write('%s\n' % msg_data)
self.stream.write('-' * 79)
self.stream.write('\n')
def send_messages(self, email_messages):
"""Write all messages to the stream in a thread-safe way."""
if not email_messages:
return
msg_count = 0
with self._lock:
try:
stream_created = self.open()
for message in email_messages:
self.write_message(message)
self.stream.flush() # flush after each message
msg_count += 1
if stream_created:
self.close()
except Exception:
if not self.fail_silently:
raise
return msg_count
| bsd-3-clause |
sambyers/o365_fmc | .venv/lib/python3.6/site-packages/requests/packages/__init__.py | 838 | 1384 | '''
Debian and other distributions "unbundle" requests' vendored dependencies, and
rewrite all imports to use the global versions of ``urllib3`` and ``chardet``.
The problem with this is that not only requests itself imports those
dependencies, but third-party code outside of the distros' control too.
In reaction to these problems, the distro maintainers replaced
``requests.packages`` with a magical "stub module" that imports the correct
modules. The implementations were varying in quality and all had severe
problems. For example, a symlink (or hardlink) that links the correct modules
into place introduces problems regarding object identity, since you now have
two modules in `sys.modules` with the same API, but different identities::
requests.packages.urllib3 is not urllib3
With version ``2.5.2``, requests started to maintain its own stub, so that
distro-specific breakage would be reduced to a minimum, even though the whole
issue is not requests' fault in the first place. See
https://github.com/kennethreitz/requests/pull/2375 for the corresponding pull
request.
'''
from __future__ import absolute_import
import sys
try:
from . import urllib3
except ImportError:
import urllib3
sys.modules['%s.urllib3' % __name__] = urllib3
try:
from . import chardet
except ImportError:
import chardet
sys.modules['%s.chardet' % __name__] = chardet
| gpl-3.0 |
infinitespace/deepdive | ddlib/without_ddlib.py | 15 | 1750 | #! /usr/bin/env python
# File: udf/ext_has_spouse_features.py
import sys, json
# For each input tuple
# TODO: Sample Data and the input schema.
# sample json
for row in sys.stdin:
obj = json.loads(row)
# Library/DSL??? This is a span, it should be an object.
p1_start = obj["p1.start_position"]
p1_length = obj["p1.length"]
p1_end = p1_start + p1_length
p2_start = obj["p2.start_position"]
p2_length = obj["p2.length"]
p2_end = p2_start + p2_length
p1_text = obj["words"][p1_start:p1_length]
p2_text = obj["words"][p2_start:p2_length]
left_idx = min(p1_end, p2_end)
right_idx = max(p1_start, p2_start)
# Features for this pair come in here
features = set()
# Feature 1: Find out if a lemma of marry occurs.
# A better feature would ensure this is on the dependency path between the two.
lemma_between = obj["lemma"][left_idx:right_idx]
married_words = ['marry', 'widow']
for mw in married_words:
if mw in lemma_between:
features.add("important_word=%s" % mw)
# Feature 2: The number of words between the two phrases.
# Intuition: if they are close by, the link may be stronger.
words_between = obj["words"][left_idx:right_idx]
l = len(words_between)
if l < 5: features.add("num_words_between=%s" % l)
else: features.add("many_words_between")
# Feature 3: Check if the last name matches heuristically.
last_word_left = obj["words"][p1_end - 1]
last_word_right = obj["words"][p2_end - 1]
if (last_word_left == last_word_right):
features.add("potential_last_name_match")
# TODO: Add more features, look at dependency paths, etc
for feature in features:
print json.dumps({
"relation_id": obj["relation_id"],
"feature": feature
})
| apache-2.0 |
yantrabuddhi/nativeclient | buildbot/buildbot_lib.py | 1 | 21952 | #!/usr/bin/python
# Copyright (c) 2012 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import optparse
import os.path
import shutil
import subprocess
import stat
import sys
import time
import traceback
ARCH_MAP = {
'32': {
'gyp_arch': 'ia32',
'scons_platform': 'x86-32',
},
'64': {
'gyp_arch': 'x64',
'scons_platform': 'x86-64',
},
'arm': {
'gyp_arch': 'arm',
'scons_platform': 'arm',
},
'mips32': {
'gyp_arch': 'mips32',
'scons_platform': 'mips32',
},
}
def RunningOnBuildbot():
return os.environ.get('BUILDBOT_SLAVE_TYPE') is not None
def GetHostPlatform():
sys_platform = sys.platform.lower()
if sys_platform.startswith('linux'):
return 'linux'
elif sys_platform in ('win', 'win32', 'windows', 'cygwin'):
return 'win'
elif sys_platform in ('darwin', 'mac'):
return 'mac'
else:
raise Exception('Can not determine the platform!')
def SetDefaultContextAttributes(context):
"""
Set default values for the attributes needed by the SCons function, so that
SCons can be run without needing ParseStandardCommandLine
"""
platform = GetHostPlatform()
context['platform'] = platform
context['mode'] = 'opt'
context['default_scons_mode'] = ['opt-host', 'nacl']
context['default_scons_platform'] = ('x86-64' if platform == 'win'
else 'x86-32')
context['android'] = False
context['clang'] = False
context['asan'] = False
context['pnacl'] = False
context['use_glibc'] = False
context['use_breakpad_tools'] = False
context['max_jobs'] = 8
context['scons_args'] = []
# Windows-specific environment manipulation
def SetupWindowsEnvironment(context):
# Poke around looking for MSVC. We should do something more principled in
# the future.
# The name of Program Files can differ, depending on the bittage of Windows.
program_files = r'c:\Program Files (x86)'
if not os.path.exists(program_files):
program_files = r'c:\Program Files'
if not os.path.exists(program_files):
raise Exception('Cannot find the Program Files directory!')
# The location of MSVC can differ depending on the version.
msvc_locs = [
('Microsoft Visual Studio 12.0', 'VS120COMNTOOLS', '2013'),
('Microsoft Visual Studio 10.0', 'VS100COMNTOOLS', '2010'),
('Microsoft Visual Studio 9.0', 'VS90COMNTOOLS', '2008'),
('Microsoft Visual Studio 8.0', 'VS80COMNTOOLS', '2005'),
]
for dirname, comntools_var, gyp_msvs_version in msvc_locs:
msvc = os.path.join(program_files, dirname)
context.SetEnv('GYP_MSVS_VERSION', gyp_msvs_version)
if os.path.exists(msvc):
break
else:
# The break statement did not execute.
raise Exception('Cannot find MSVC!')
# Put MSVC in the path.
vc = os.path.join(msvc, 'VC')
comntools = os.path.join(msvc, 'Common7', 'Tools')
perf = os.path.join(msvc, 'Team Tools', 'Performance Tools')
context.SetEnv('PATH', os.pathsep.join([
context.GetEnv('PATH'),
vc,
comntools,
perf]))
# SCons needs this variable to find vsvars.bat.
# The end slash is needed because the batch files expect it.
context.SetEnv(comntools_var, comntools + '\\')
# This environment variable will SCons to print debug info while it searches
# for MSVC.
context.SetEnv('SCONS_MSCOMMON_DEBUG', '-')
# Needed for finding devenv.
context['msvc'] = msvc
SetupGyp(context, [])
def SetupGyp(context, extra_vars=[]):
if RunningOnBuildbot():
goma_opts = [
'use_goma=1',
'gomadir=/b/build/goma',
]
else:
goma_opts = []
context.SetEnv('GYP_DEFINES', ' '.join(
context['gyp_vars'] + goma_opts + extra_vars))
def SetupLinuxEnvironment(context):
if context['arch'] == 'mips32':
# Ensure the trusted mips toolchain is installed.
cmd = ['build/package_version/package_version.py', '--packages',
'linux_x86/mips_trusted', 'sync', '-x']
Command(context, cmd)
SetupGyp(context, ['target_arch='+context['gyp_arch']])
def SetupMacEnvironment(context):
SetupGyp(context, ['target_arch='+context['gyp_arch']])
def SetupAndroidEnvironment(context):
SetupGyp(context, ['OS=android', 'target_arch='+context['gyp_arch']])
context.SetEnv('GYP_CROSSCOMPILE', '1')
def ParseStandardCommandLine(context):
"""
The standard buildbot scripts require 3 arguments to run. The first
argument (dbg/opt) controls if the build is a debug or a release build. The
second argument (32/64) controls the machine architecture being targeted.
The third argument (newlib/glibc) controls which c library we're using for
the nexes. Different buildbots may have different sets of arguments.
"""
parser = optparse.OptionParser()
parser.add_option('-n', '--dry-run', dest='dry_run', default=False,
action='store_true', help='Do not execute any commands.')
parser.add_option('--inside-toolchain', dest='inside_toolchain',
default=bool(os.environ.get('INSIDE_TOOLCHAIN')),
action='store_true', help='Inside toolchain build.')
parser.add_option('--android', dest='android', default=False,
action='store_true', help='Build for Android.')
parser.add_option('--clang', dest='clang', default=False,
action='store_true', help='Build trusted code with Clang.')
parser.add_option('--coverage', dest='coverage', default=False,
action='store_true',
help='Build and test for code coverage.')
parser.add_option('--validator', dest='validator', default=False,
action='store_true',
help='Only run validator regression test')
parser.add_option('--asan', dest='asan', default=False,
action='store_true', help='Build trusted code with ASan.')
parser.add_option('--scons-args', dest='scons_args', default =[],
action='append', help='Extra scons arguments.')
parser.add_option('--step-suffix', metavar='SUFFIX', default='',
help='Append SUFFIX to buildbot step names.')
parser.add_option('--no-gyp', dest='no_gyp', default=False,
action='store_true', help='Do not run the gyp build')
parser.add_option('--no-goma', dest='no_goma', default=False,
action='store_true', help='Do not run with goma')
parser.add_option('--use-breakpad-tools', dest='use_breakpad_tools',
default=False, action='store_true',
help='Use breakpad tools for testing')
parser.add_option('--skip-build', dest='skip_build', default=False,
action='store_true',
help='Skip building steps in buildbot_pnacl')
parser.add_option('--skip-run', dest='skip_run', default=False,
action='store_true',
help='Skip test-running steps in buildbot_pnacl')
options, args = parser.parse_args()
if len(args) != 3:
parser.error('Expected 3 arguments: mode arch toolchain')
# script + 3 args == 4
mode, arch, toolchain = args
if mode not in ('dbg', 'opt', 'coverage'):
parser.error('Invalid mode %r' % mode)
if arch not in ARCH_MAP:
parser.error('Invalid arch %r' % arch)
if toolchain not in ('newlib', 'glibc', 'pnacl', 'nacl_clang'):
parser.error('Invalid toolchain %r' % toolchain)
# TODO(ncbray) allow a command-line override
platform = GetHostPlatform()
context['platform'] = platform
context['mode'] = mode
context['arch'] = arch
context['android'] = options.android
# ASan is Clang, so set the flag to simplify other checks.
context['clang'] = options.clang or options.asan
context['validator'] = options.validator
context['asan'] = options.asan
# TODO(ncbray) turn derived values into methods.
context['gyp_mode'] = {
'opt': 'Release',
'dbg': 'Debug',
'coverage': 'Debug'}[mode]
context['gn_is_debug'] = {
'opt': 'false',
'dbg': 'true',
'coverage': 'true'}[mode]
context['gyp_arch'] = ARCH_MAP[arch]['gyp_arch']
context['gyp_vars'] = []
if context['clang']:
context['gyp_vars'].append('clang=1')
if context['asan']:
context['gyp_vars'].append('asan=1')
context['default_scons_platform'] = ARCH_MAP[arch]['scons_platform']
context['default_scons_mode'] = ['nacl']
# Only Linux can build trusted code on ARM.
# TODO(mcgrathr): clean this up somehow
if arch != 'arm' or platform == 'linux':
context['default_scons_mode'] += [mode + '-host']
context['use_glibc'] = toolchain == 'glibc'
context['pnacl'] = toolchain == 'pnacl'
context['nacl_clang'] = toolchain == 'nacl_clang'
context['max_jobs'] = 8
context['dry_run'] = options.dry_run
context['inside_toolchain'] = options.inside_toolchain
context['step_suffix'] = options.step_suffix
context['no_gyp'] = options.no_gyp
context['no_goma'] = options.no_goma
context['coverage'] = options.coverage
context['use_breakpad_tools'] = options.use_breakpad_tools
context['scons_args'] = options.scons_args
context['skip_build'] = options.skip_build
context['skip_run'] = options.skip_run
# Don't run gyp on coverage builds.
if context['coverage']:
context['no_gyp'] = True
for key, value in sorted(context.config.items()):
print '%s=%s' % (key, value)
def EnsureDirectoryExists(path):
"""
Create a directory if it does not already exist.
Does not mask failures, but there really shouldn't be any.
"""
if not os.path.exists(path):
os.makedirs(path)
def TryToCleanContents(path, file_name_filter=lambda fn: True):
"""
Remove the contents of a directory without touching the directory itself.
Ignores all failures.
"""
if os.path.exists(path):
for fn in os.listdir(path):
TryToCleanPath(os.path.join(path, fn), file_name_filter)
def TryToCleanPath(path, file_name_filter=lambda fn: True):
"""
Removes a file or directory.
Ignores all failures.
"""
if os.path.exists(path):
if file_name_filter(path):
print 'Trying to remove %s' % path
try:
RemovePath(path)
except Exception:
print 'Failed to remove %s' % path
else:
print 'Skipping %s' % path
def Retry(op, *args):
# Windows seems to be prone to having commands that delete files or
# directories fail. We currently do not have a complete understanding why,
# and as a workaround we simply retry the command a few times.
# It appears that file locks are hanging around longer than they should. This
# may be a secondary effect of processes hanging around longer than they
# should. This may be because when we kill a browser sel_ldr does not exit
# immediately, etc.
# Virus checkers can also accidently prevent files from being deleted, but
# that shouldn't be a problem on the bots.
if GetHostPlatform() == 'win':
count = 0
while True:
try:
op(*args)
break
except Exception:
print "FAILED: %s %s" % (op.__name__, repr(args))
count += 1
if count < 5:
print "RETRY: %s %s" % (op.__name__, repr(args))
time.sleep(pow(2, count))
else:
# Don't mask the exception.
raise
else:
op(*args)
def PermissionsFixOnError(func, path, exc_info):
if not os.access(path, os.W_OK):
os.chmod(path, stat.S_IWUSR)
func(path)
else:
raise
def _RemoveDirectory(path):
print 'Removing %s' % path
if os.path.exists(path):
shutil.rmtree(path, onerror=PermissionsFixOnError)
print ' Succeeded.'
else:
print ' Path does not exist, nothing to do.'
def RemoveDirectory(path):
"""
Remove a directory if it exists.
Does not mask failures, although it does retry a few times on Windows.
"""
Retry(_RemoveDirectory, path)
def RemovePath(path):
"""Remove a path, file or directory."""
if os.path.isdir(path):
RemoveDirectory(path)
else:
if os.path.isfile(path) and not os.access(path, os.W_OK):
os.chmod(path, stat.S_IWUSR)
os.remove(path)
# This is a sanity check so Command can print out better error information.
def FileCanBeFound(name, paths):
# CWD
if os.path.exists(name):
return True
# Paths with directories are not resolved using the PATH variable.
if os.path.dirname(name):
return False
# In path
for path in paths.split(os.pathsep):
full = os.path.join(path, name)
if os.path.exists(full):
return True
return False
def RemoveGypBuildDirectories():
# Remove all directories on all platforms. Overkill, but it allows for
# straight-line code.
# Windows
RemoveDirectory('build/Debug')
RemoveDirectory('build/Release')
RemoveDirectory('build/Debug-Win32')
RemoveDirectory('build/Release-Win32')
RemoveDirectory('build/Debug-x64')
RemoveDirectory('build/Release-x64')
# Linux and Mac
RemoveDirectory('../xcodebuild')
RemoveDirectory('../out')
RemoveDirectory('src/third_party/nacl_sdk/arm-newlib')
def RemoveSconsBuildDirectories():
RemoveDirectory('scons-out')
RemoveDirectory('breakpad-out')
# Execute a command using Python's subprocess module.
def Command(context, cmd, cwd=None):
print 'Running command: %s' % ' '.join(cmd)
# Python's subprocess has a quirk. A subprocess can execute with an
# arbitrary, user-defined environment. The first argument of the command,
# however, is located using the PATH variable of the Python script that is
# launching the subprocess. Modifying the PATH in the environment passed to
# the subprocess does not affect Python's search for the first argument of
# the command (the executable file.) This is a little counter intuitive,
# so we're forcing the search to use the same PATH variable as is seen by
# the subprocess.
env = context.MakeCommandEnv()
script_path = os.environ['PATH']
os.environ['PATH'] = env['PATH']
try:
if FileCanBeFound(cmd[0], env['PATH']) or context['dry_run']:
# Make sure that print statements before the subprocess call have been
# flushed, otherwise the output of the subprocess call may appear before
# the print statements.
sys.stdout.flush()
if context['dry_run']:
retcode = 0
else:
retcode = subprocess.call(cmd, cwd=cwd, env=env)
else:
# Provide a nicer failure message.
# If subprocess cannot find the executable, it will throw a cryptic
# exception.
print 'Executable %r cannot be found.' % cmd[0]
retcode = 1
finally:
os.environ['PATH'] = script_path
print 'Command return code: %d' % retcode
if retcode != 0:
raise StepFailed()
return retcode
# A specialized version of CommandStep.
def SCons(context, mode=None, platform=None, parallel=False, browser_test=False,
args=(), cwd=None):
python = sys.executable
if mode is None: mode = context['default_scons_mode']
if platform is None: platform = context['default_scons_platform']
if parallel:
jobs = context['max_jobs']
else:
jobs = 1
cmd = []
if browser_test and context.Linux():
# Although we could use the "browser_headless=1" Scons option, it runs
# xvfb-run once per Chromium invocation. This is good for isolating
# the tests, but xvfb-run has a stupid fixed-period sleep, which would
# slow down the tests unnecessarily.
cmd.extend(['xvfb-run', '--auto-servernum'])
cmd.extend([
python, 'scons.py',
'--verbose',
'-k',
'-j%d' % jobs,
'--mode='+','.join(mode),
'platform='+platform,
])
cmd.extend(context['scons_args'])
if context['clang']: cmd.append('--clang')
if context['asan']: cmd.append('--asan')
if context['use_glibc']: cmd.append('--nacl_glibc')
if context['pnacl']: cmd.append('bitcode=1')
if context['nacl_clang']: cmd.append('nacl_clang=1')
if context['use_breakpad_tools']:
cmd.append('breakpad_tools_dir=breakpad-out')
if context['android']:
cmd.append('android=1')
# Append used-specified arguments.
cmd.extend(args)
Command(context, cmd, cwd)
class StepFailed(Exception):
"""
Thrown when the step has failed.
"""
class StopBuild(Exception):
"""
Thrown when the entire build should stop. This does not indicate a failure,
in of itself.
"""
class Step(object):
"""
This class is used in conjunction with a Python "with" statement to ensure
that the preamble and postamble of each build step gets printed and failures
get logged. This class also ensures that exceptions thrown inside a "with"
statement don't take down the entire build.
"""
def __init__(self, name, status, halt_on_fail=True):
self.status = status
if 'step_suffix' in status.context:
suffix = status.context['step_suffix']
else:
suffix = ''
self.name = name + suffix
self.halt_on_fail = halt_on_fail
self.step_failed = False
# Called on entry to a 'with' block.
def __enter__(self):
sys.stdout.flush()
print
print '@@@BUILD_STEP %s@@@' % self.name
self.status.ReportBegin(self.name)
# The method is called on exit from a 'with' block - even for non-local
# control flow, i.e. exceptions, breaks, continues, returns, etc.
# If an exception is thrown inside a block wrapped with a 'with' statement,
# the __exit__ handler can suppress the exception by returning True. This is
# used to isolate each step in the build - if an exception occurs in a given
# step, the step is treated as a failure. This allows the postamble for each
# step to be printed and also allows the build to continue of the failure of
# a given step doesn't halt the build.
def __exit__(self, type, exception, trace):
sys.stdout.flush()
if exception is None:
# If exception is None, no exception occurred.
step_failed = False
elif isinstance(exception, StepFailed):
step_failed = True
print
print 'Halting build step because of failure.'
print
else:
step_failed = True
print
print 'The build step threw an exception...'
print
traceback.print_exception(type, exception, trace, file=sys.stdout)
print
if step_failed:
self.status.ReportFail(self.name)
print '@@@STEP_FAILURE@@@'
if self.halt_on_fail:
print
print 'Entire build halted because %s failed.' % self.name
sys.stdout.flush()
raise StopBuild()
else:
self.status.ReportPass(self.name)
sys.stdout.flush()
# Suppress any exception that occurred.
return True
# Adds an arbitrary link inside the build stage on the waterfall.
def StepLink(text, link):
print '@@@STEP_LINK@%s@%s@@@' % (text, link)
# Adds arbitrary text inside the build stage on the waterfall.
def StepText(text):
print '@@@STEP_TEXT@%s@@@' % (text)
class BuildStatus(object):
"""
Keeps track of the overall status of the build.
"""
def __init__(self, context):
self.context = context
self.ever_failed = False
self.steps = []
def ReportBegin(self, name):
pass
def ReportPass(self, name):
self.steps.append((name, 'passed'))
def ReportFail(self, name):
self.steps.append((name, 'failed'))
self.ever_failed = True
# Handy info when this script is run outside of the buildbot.
def DisplayBuildStatus(self):
print
for step, status in self.steps:
print '%-40s[%s]' % (step, status)
print
if self.ever_failed:
print 'Build failed.'
else:
print 'Build succeeded.'
def ReturnValue(self):
return int(self.ever_failed)
class BuildContext(object):
"""
Encapsulates the information needed for running a build command. This
includes environment variables and default arguments for SCons invocations.
"""
# Only allow these attributes on objects of this type.
__slots__ = ['status', 'global_env', 'config']
def __init__(self):
# The contents of global_env override os.environ for any commands run via
# self.Command(...)
self.global_env = {}
# PATH is a special case. See: Command.
self.global_env['PATH'] = os.environ.get('PATH', '')
self.config = {}
self['dry_run'] = False
# Emulate dictionary subscripting.
def __getitem__(self, key):
return self.config[key]
# Emulate dictionary subscripting.
def __setitem__(self, key, value):
self.config[key] = value
# Emulate dictionary membership test
def __contains__(self, key):
return key in self.config
def Windows(self):
return self.config['platform'] == 'win'
def Linux(self):
return self.config['platform'] == 'linux'
def Mac(self):
return self.config['platform'] == 'mac'
def GetEnv(self, name, default=None):
return self.global_env.get(name, default)
def SetEnv(self, name, value):
self.global_env[name] = str(value)
def MakeCommandEnv(self):
# The external environment is not sanitized.
e = dict(os.environ)
# Arbitrary variables can be overridden.
e.update(self.global_env)
return e
def RunBuild(script, status):
try:
script(status, status.context)
except StopBuild:
pass
# Emit a summary step for three reasons:
# - The annotator will attribute non-zero exit status to the last build step.
# This can misattribute failures to the last build step.
# - runtest.py wraps the builds to scrape perf data. It emits an annotator
# tag on exit which misattributes perf results to the last build step.
# - Provide a label step in which to show summary result.
# Otherwise these go back to the preamble.
with Step('summary', status):
if status.ever_failed:
print 'There were failed stages.'
else:
print 'Success.'
# Display a summary of the build.
status.DisplayBuildStatus()
sys.exit(status.ReturnValue())
| bsd-3-clause |
willharris/django | django/db/models/options.py | 30 | 35264 | from __future__ import unicode_literals
import warnings
from bisect import bisect
from collections import OrderedDict, defaultdict
from itertools import chain
from django.apps import apps
from django.conf import settings
from django.core.exceptions import FieldDoesNotExist
from django.db.models.fields import AutoField
from django.db.models.fields.proxy import OrderWrt
from django.db.models.fields.related import ManyToManyField
from django.utils import six
from django.utils.datastructures import ImmutableList, OrderedSet
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.encoding import (
force_text, python_2_unicode_compatible, smart_text,
)
from django.utils.functional import cached_property
from django.utils.lru_cache import lru_cache
from django.utils.text import camel_case_to_spaces
from django.utils.translation import override, string_concat
PROXY_PARENTS = object()
EMPTY_RELATION_TREE = tuple()
IMMUTABLE_WARNING = (
"The return type of '%s' should never be mutated. If you want to manipulate this list "
"for your own use, make a copy first."
)
DEFAULT_NAMES = ('verbose_name', 'verbose_name_plural', 'db_table', 'ordering',
'unique_together', 'permissions', 'get_latest_by',
'order_with_respect_to', 'app_label', 'db_tablespace',
'abstract', 'managed', 'proxy', 'swappable', 'auto_created',
'index_together', 'apps', 'default_permissions',
'select_on_save', 'default_related_name')
class raise_deprecation(object):
def __init__(self, suggested_alternative):
self.suggested_alternative = suggested_alternative
def __call__(self, fn):
def wrapper(*args, **kwargs):
warnings.warn(
"'%s is an unofficial API that has been deprecated. "
"You may be able to replace it with '%s'" % (
fn.__name__,
self.suggested_alternative,
),
RemovedInDjango20Warning, stacklevel=2
)
return fn(*args, **kwargs)
return wrapper
def normalize_together(option_together):
"""
option_together can be either a tuple of tuples, or a single
tuple of two strings. Normalize it to a tuple of tuples, so that
calling code can uniformly expect that.
"""
try:
if not option_together:
return ()
if not isinstance(option_together, (tuple, list)):
raise TypeError
first_element = next(iter(option_together))
if not isinstance(first_element, (tuple, list)):
option_together = (option_together,)
# Normalize everything to tuples
return tuple(tuple(ot) for ot in option_together)
except TypeError:
# If the value of option_together isn't valid, return it
# verbatim; this will be picked up by the check framework later.
return option_together
def make_immutable_fields_list(name, data):
return ImmutableList(data, warning=IMMUTABLE_WARNING % name)
@python_2_unicode_compatible
class Options(object):
FORWARD_PROPERTIES = ('fields', 'many_to_many', 'concrete_fields',
'local_concrete_fields', '_forward_fields_map')
REVERSE_PROPERTIES = ('related_objects', 'fields_map', '_relation_tree')
def __init__(self, meta, app_label=None):
self._get_fields_cache = {}
self.proxied_children = []
self.local_fields = []
self.local_many_to_many = []
self.virtual_fields = []
self.model_name = None
self.verbose_name = None
self.verbose_name_plural = None
self.db_table = ''
self.ordering = []
self.unique_together = []
self.index_together = []
self.select_on_save = False
self.default_permissions = ('add', 'change', 'delete')
self.permissions = []
self.object_name = None
self.app_label = app_label
self.get_latest_by = None
self.order_with_respect_to = None
self.db_tablespace = settings.DEFAULT_TABLESPACE
self.meta = meta
self.pk = None
self.has_auto_field = False
self.auto_field = None
self.abstract = False
self.managed = True
self.proxy = False
# For any class that is a proxy (including automatically created
# classes for deferred object loading), proxy_for_model tells us
# which class this model is proxying. Note that proxy_for_model
# can create a chain of proxy models. For non-proxy models, the
# variable is always None.
self.proxy_for_model = None
# For any non-abstract class, the concrete class is the model
# in the end of the proxy_for_model chain. In particular, for
# concrete models, the concrete_model is always the class itself.
self.concrete_model = None
self.swappable = None
self.parents = OrderedDict()
self.auto_created = False
# To handle various inheritance situations, we need to track where
# managers came from (concrete or abstract base classes). `managers`
# keeps a list of 3-tuples of the form:
# (creation_counter, instance, abstract(=True))
self.managers = []
# List of all lookups defined in ForeignKey 'limit_choices_to' options
# from *other* models. Needed for some admin checks. Internal use only.
self.related_fkey_lookups = []
# A custom app registry to use, if you're making a separate model set.
self.apps = apps
self.default_related_name = None
@lru_cache(maxsize=None)
def _map_model(self, link):
# This helper function is used to allow backwards compatibility with
# the previous API. No future methods should use this function.
# It maps a field to (field, model or related_model,) depending on the
# field type.
model = link.model._meta.concrete_model
if model is self.model:
model = None
return link, model
@lru_cache(maxsize=None)
def _map_model_details(self, link):
# This helper function is used to allow backwards compatibility with
# the previous API. No future methods should use this function.
# This function maps a field to a tuple of:
# (field, model or related_model, direct, is_m2m) depending on the
# field type.
direct = not link.auto_created or link.concrete
model = link.model._meta.concrete_model
if model is self.model:
model = None
m2m = link.is_relation and link.many_to_many
return link, model, direct, m2m
@property
def app_config(self):
# Don't go through get_app_config to avoid triggering imports.
return self.apps.app_configs.get(self.app_label)
@property
def installed(self):
return self.app_config is not None
@property
def abstract_managers(self):
return [
(counter, instance.name, instance) for counter, instance, abstract
in self.managers if abstract
]
@property
def concrete_managers(self):
return [
(counter, instance.name, instance) for counter, instance, abstract
in self.managers if not abstract
]
def contribute_to_class(self, cls, name):
from django.db import connection
from django.db.backends.utils import truncate_name
cls._meta = self
self.model = cls
# First, construct the default values for these options.
self.object_name = cls.__name__
self.model_name = self.object_name.lower()
self.verbose_name = camel_case_to_spaces(self.object_name)
# Store the original user-defined values for each option,
# for use when serializing the model definition
self.original_attrs = {}
# Next, apply any overridden values from 'class Meta'.
if self.meta:
meta_attrs = self.meta.__dict__.copy()
for name in self.meta.__dict__:
# Ignore any private attributes that Django doesn't care about.
# NOTE: We can't modify a dictionary's contents while looping
# over it, so we loop over the *original* dictionary instead.
if name.startswith('_'):
del meta_attrs[name]
for attr_name in DEFAULT_NAMES:
if attr_name in meta_attrs:
setattr(self, attr_name, meta_attrs.pop(attr_name))
self.original_attrs[attr_name] = getattr(self, attr_name)
elif hasattr(self.meta, attr_name):
setattr(self, attr_name, getattr(self.meta, attr_name))
self.original_attrs[attr_name] = getattr(self, attr_name)
ut = meta_attrs.pop('unique_together', self.unique_together)
self.unique_together = normalize_together(ut)
it = meta_attrs.pop('index_together', self.index_together)
self.index_together = normalize_together(it)
# verbose_name_plural is a special case because it uses a 's'
# by default.
if self.verbose_name_plural is None:
self.verbose_name_plural = string_concat(self.verbose_name, 's')
# Any leftover attributes must be invalid.
if meta_attrs != {}:
raise TypeError("'class Meta' got invalid attribute(s): %s" % ','.join(meta_attrs.keys()))
else:
self.verbose_name_plural = string_concat(self.verbose_name, 's')
del self.meta
# If the db_table wasn't provided, use the app_label + model_name.
if not self.db_table:
self.db_table = "%s_%s" % (self.app_label, self.model_name)
self.db_table = truncate_name(self.db_table, connection.ops.max_name_length())
def _prepare(self, model):
if self.order_with_respect_to:
# The app registry will not be ready at this point, so we cannot
# use get_field().
query = self.order_with_respect_to
try:
self.order_with_respect_to = next(
f for f in self._get_fields(reverse=False)
if f.name == query or f.attname == query
)
except StopIteration:
raise FieldDoesNotExist('%s has no field named %r' % (self.object_name, query))
self.ordering = ('_order',)
if not any(isinstance(field, OrderWrt) for field in model._meta.local_fields):
model.add_to_class('_order', OrderWrt())
else:
self.order_with_respect_to = None
if self.pk is None:
if self.parents:
# Promote the first parent link in lieu of adding yet another
# field.
field = next(six.itervalues(self.parents))
# Look for a local field with the same name as the
# first parent link. If a local field has already been
# created, use it instead of promoting the parent
already_created = [fld for fld in self.local_fields if fld.name == field.name]
if already_created:
field = already_created[0]
field.primary_key = True
self.setup_pk(field)
else:
auto = AutoField(verbose_name='ID', primary_key=True,
auto_created=True)
model.add_to_class('id', auto)
def add_field(self, field, virtual=False):
# Insert the given field in the order in which it was created, using
# the "creation_counter" attribute of the field.
# Move many-to-many related fields from self.fields into
# self.many_to_many.
if virtual:
self.virtual_fields.append(field)
elif field.is_relation and field.many_to_many:
self.local_many_to_many.insert(bisect(self.local_many_to_many, field), field)
else:
self.local_fields.insert(bisect(self.local_fields, field), field)
self.setup_pk(field)
# If the field being added is a relation to another known field,
# expire the cache on this field and the forward cache on the field
# being referenced, because there will be new relationships in the
# cache. Otherwise, expire the cache of references *to* this field.
# The mechanism for getting at the related model is slightly odd -
# ideally, we'd just ask for field.related_model. However, related_model
# is a cached property, and all the models haven't been loaded yet, so
# we need to make sure we don't cache a string reference.
if field.is_relation and hasattr(field.rel, 'to') and field.rel.to:
try:
field.rel.to._meta._expire_cache(forward=False)
except AttributeError:
pass
self._expire_cache()
else:
self._expire_cache(reverse=False)
def setup_pk(self, field):
if not self.pk and field.primary_key:
self.pk = field
field.serialize = False
def setup_proxy(self, target):
"""
Does the internal setup so that the current model is a proxy for
"target".
"""
self.pk = target._meta.pk
self.proxy_for_model = target
self.db_table = target._meta.db_table
def __repr__(self):
return '<Options for %s>' % self.object_name
def __str__(self):
return "%s.%s" % (smart_text(self.app_label), smart_text(self.model_name))
@property
def verbose_name_raw(self):
"""
There are a few places where the untranslated verbose name is needed
(so that we get the same value regardless of currently active
locale).
"""
with override(None):
return force_text(self.verbose_name)
@property
def swapped(self):
"""
Has this model been swapped out for another? If so, return the model
name of the replacement; otherwise, return None.
For historical reasons, model name lookups using get_model() are
case insensitive, so we make sure we are case insensitive here.
"""
if self.swappable:
model_label = '%s.%s' % (self.app_label, self.model_name)
swapped_for = getattr(settings, self.swappable, None)
if swapped_for:
try:
swapped_label, swapped_object = swapped_for.split('.')
except ValueError:
# setting not in the format app_label.model_name
# raising ImproperlyConfigured here causes problems with
# test cleanup code - instead it is raised in get_user_model
# or as part of validation.
return swapped_for
if '%s.%s' % (swapped_label, swapped_object.lower()) not in (None, model_label):
return swapped_for
return None
@cached_property
def fields(self):
"""
Returns a list of all forward fields on the model and its parents,
excluding ManyToManyFields.
Private API intended only to be used by Django itself; get_fields()
combined with filtering of field properties is the public API for
obtaining this field list.
"""
# For legacy reasons, the fields property should only contain forward
# fields that are not virtual or with a m2m cardinality. Therefore we
# pass these three filters as filters to the generator.
# The third lambda is a longwinded way of checking f.related_model - we don't
# use that property directly because related_model is a cached property,
# and all the models may not have been loaded yet; we don't want to cache
# the string reference to the related_model.
is_not_an_m2m_field = lambda f: not (f.is_relation and f.many_to_many)
is_not_a_generic_relation = lambda f: not (f.is_relation and f.one_to_many)
is_not_a_generic_foreign_key = lambda f: not (
f.is_relation and f.many_to_one and not (hasattr(f.rel, 'to') and f.rel.to)
)
return make_immutable_fields_list(
"fields",
(f for f in self._get_fields(reverse=False) if
is_not_an_m2m_field(f) and is_not_a_generic_relation(f)
and is_not_a_generic_foreign_key(f))
)
@cached_property
def concrete_fields(self):
"""
Returns a list of all concrete fields on the model and its parents.
Private API intended only to be used by Django itself; get_fields()
combined with filtering of field properties is the public API for
obtaining this field list.
"""
return make_immutable_fields_list(
"concrete_fields", (f for f in self.fields if f.concrete)
)
@cached_property
def local_concrete_fields(self):
"""
Returns a list of all concrete fields on the model.
Private API intended only to be used by Django itself; get_fields()
combined with filtering of field properties is the public API for
obtaining this field list.
"""
return make_immutable_fields_list(
"local_concrete_fields", (f for f in self.local_fields if f.concrete)
)
@raise_deprecation(suggested_alternative="get_fields()")
def get_fields_with_model(self):
return [self._map_model(f) for f in self.get_fields()]
@raise_deprecation(suggested_alternative="get_fields()")
def get_concrete_fields_with_model(self):
return [self._map_model(f) for f in self.concrete_fields]
@cached_property
def many_to_many(self):
"""
Returns a list of all many to many fields on the model and its parents.
Private API intended only to be used by Django itself; get_fields()
combined with filtering of field properties is the public API for
obtaining this list.
"""
return make_immutable_fields_list(
"many_to_many",
(f for f in self._get_fields(reverse=False)
if f.is_relation and f.many_to_many)
)
@cached_property
def related_objects(self):
"""
Returns all related objects pointing to the current model. The related
objects can come from a one-to-one, one-to-many, or many-to-many field
relation type.
Private API intended only to be used by Django itself; get_fields()
combined with filtering of field properties is the public API for
obtaining this field list.
"""
all_related_fields = self._get_fields(forward=False, reverse=True, include_hidden=True)
return make_immutable_fields_list(
"related_objects",
(obj for obj in all_related_fields
if not obj.hidden or obj.field.many_to_many)
)
@raise_deprecation(suggested_alternative="get_fields()")
def get_m2m_with_model(self):
return [self._map_model(f) for f in self.many_to_many]
@cached_property
def _forward_fields_map(self):
res = {}
fields = self._get_fields(reverse=False)
for field in fields:
res[field.name] = field
# Due to the way Django's internals work, get_field() should also
# be able to fetch a field by attname. In the case of a concrete
# field with relation, includes the *_id name too
try:
res[field.attname] = field
except AttributeError:
pass
return res
@cached_property
def fields_map(self):
res = {}
fields = self._get_fields(forward=False, include_hidden=True)
for field in fields:
res[field.name] = field
# Due to the way Django's internals work, get_field() should also
# be able to fetch a field by attname. In the case of a concrete
# field with relation, includes the *_id name too
try:
res[field.attname] = field
except AttributeError:
pass
return res
def get_field(self, field_name, many_to_many=None):
"""
Returns a field instance given a field name. The field can be either a
forward or reverse field, unless many_to_many is specified; if it is,
only forward fields will be returned.
The many_to_many argument exists for backwards compatibility reasons;
it has been deprecated and will be removed in Django 2.0.
"""
m2m_in_kwargs = many_to_many is not None
if m2m_in_kwargs:
# Always throw a warning if many_to_many is used regardless of
# whether it alters the return type or not.
warnings.warn(
"The 'many_to_many' argument on get_field() is deprecated; "
"use a filter on field.many_to_many instead.",
RemovedInDjango20Warning
)
try:
# In order to avoid premature loading of the relation tree
# (expensive) we prefer checking if the field is a forward field.
field = self._forward_fields_map[field_name]
if many_to_many is False and field.many_to_many:
raise FieldDoesNotExist(
'%s has no field named %r' % (self.object_name, field_name)
)
return field
except KeyError:
# If the app registry is not ready, reverse fields are
# unavailable, therefore we throw a FieldDoesNotExist exception.
if not self.apps.models_ready:
raise FieldDoesNotExist(
"%s has no field named %r. The app cache isn't ready yet, "
"so if this is an auto-created related field, it won't "
"be available yet." % (self.object_name, field_name)
)
try:
if m2m_in_kwargs:
# Previous API does not allow searching reverse fields.
raise FieldDoesNotExist('%s has no field named %r' % (self.object_name, field_name))
# Retrieve field instance by name from cached or just-computed
# field map.
return self.fields_map[field_name]
except KeyError:
raise FieldDoesNotExist('%s has no field named %r' % (self.object_name, field_name))
@raise_deprecation(suggested_alternative="get_field()")
def get_field_by_name(self, name):
return self._map_model_details(self.get_field(name))
@raise_deprecation(suggested_alternative="get_fields()")
def get_all_field_names(self):
names = set()
fields = self.get_fields()
for field in fields:
# For backwards compatibility GenericForeignKey should not be
# included in the results.
if field.is_relation and field.many_to_one and field.related_model is None:
continue
# Relations to child proxy models should not be included.
if (field.model != self.model and
field.model._meta.concrete_model == self.concrete_model):
continue
names.add(field.name)
if hasattr(field, 'attname'):
names.add(field.attname)
return list(names)
@raise_deprecation(suggested_alternative="get_fields()")
def get_all_related_objects(self, local_only=False, include_hidden=False,
include_proxy_eq=False):
include_parents = True if local_only is False else PROXY_PARENTS
fields = self._get_fields(
forward=False, reverse=True,
include_parents=include_parents,
include_hidden=include_hidden,
)
fields = (obj for obj in fields if not isinstance(obj.field, ManyToManyField))
if include_proxy_eq:
children = chain.from_iterable(c._relation_tree
for c in self.concrete_model._meta.proxied_children
if c is not self)
relations = (f.rel for f in children
if include_hidden or not f.rel.field.rel.is_hidden())
fields = chain(fields, relations)
return list(fields)
@raise_deprecation(suggested_alternative="get_fields()")
def get_all_related_objects_with_model(self, local_only=False, include_hidden=False,
include_proxy_eq=False):
return [
self._map_model(f) for f in self.get_all_related_objects(
local_only=local_only,
include_hidden=include_hidden,
include_proxy_eq=include_proxy_eq,
)
]
@raise_deprecation(suggested_alternative="get_fields()")
def get_all_related_many_to_many_objects(self, local_only=False):
include_parents = True if local_only is not True else PROXY_PARENTS
fields = self._get_fields(
forward=False, reverse=True,
include_parents=include_parents, include_hidden=True
)
return [obj for obj in fields if isinstance(obj.field, ManyToManyField)]
@raise_deprecation(suggested_alternative="get_fields()")
def get_all_related_m2m_objects_with_model(self):
fields = self._get_fields(forward=False, reverse=True, include_hidden=True)
return [self._map_model(obj) for obj in fields if isinstance(obj.field, ManyToManyField)]
def get_base_chain(self, model):
"""
Returns a list of parent classes leading to 'model' (order from closet
to most distant ancestor). This has to handle the case were 'model' is
a grandparent or even more distant relation.
"""
if not self.parents:
return None
if model in self.parents:
return [model]
for parent in self.parents:
res = parent._meta.get_base_chain(model)
if res:
res.insert(0, parent)
return res
return None
def get_parent_list(self):
"""
Returns all the ancestors of this model as a list ordered by MRO.
Useful for determining if something is an ancestor, regardless of lineage.
"""
result = OrderedSet(self.parents)
for parent in self.parents:
for ancestor in parent._meta.get_parent_list():
result.add(ancestor)
return list(result)
def get_ancestor_link(self, ancestor):
"""
Returns the field on the current model which points to the given
"ancestor". This is possible an indirect link (a pointer to a parent
model, which points, eventually, to the ancestor). Used when
constructing table joins for model inheritance.
Returns None if the model isn't an ancestor of this one.
"""
if ancestor in self.parents:
return self.parents[ancestor]
for parent in self.parents:
# Tries to get a link field from the immediate parent
parent_link = parent._meta.get_ancestor_link(ancestor)
if parent_link:
# In case of a proxied model, the first link
# of the chain to the ancestor is that parent
# links
return self.parents[parent] or parent_link
def _populate_directed_relation_graph(self):
"""
This method is used by each model to find its reverse objects. As this
method is very expensive and is accessed frequently (it looks up every
field in a model, in every app), it is computed on first access and then
is set as a property on every model.
"""
related_objects_graph = defaultdict(list)
all_models = self.apps.get_models(include_auto_created=True)
for model in all_models:
# Abstract model's fields are copied to child models, hence we will
# see the fields from the child models.
if model._meta.abstract:
continue
fields_with_relations = (
f for f in model._meta._get_fields(reverse=False, include_parents=False)
if f.is_relation and f.related_model is not None
)
for f in fields_with_relations:
if not isinstance(f.rel.to, six.string_types):
related_objects_graph[f.rel.to._meta].append(f)
for model in all_models:
# Set the relation_tree using the internal __dict__. In this way
# we avoid calling the cached property. In attribute lookup,
# __dict__ takes precedence over a data descriptor (such as
# @cached_property). This means that the _meta._relation_tree is
# only called if related_objects is not in __dict__.
related_objects = related_objects_graph[model._meta]
model._meta.__dict__['_relation_tree'] = related_objects
# It seems it is possible that self is not in all_models, so guard
# against that with default for get().
return self.__dict__.get('_relation_tree', EMPTY_RELATION_TREE)
@cached_property
def _relation_tree(self):
return self._populate_directed_relation_graph()
def _expire_cache(self, forward=True, reverse=True):
# This method is usually called by apps.cache_clear(), when the
# registry is finalized, or when a new field is added.
properties_to_expire = []
if forward:
properties_to_expire.extend(self.FORWARD_PROPERTIES)
if reverse and not self.abstract:
properties_to_expire.extend(self.REVERSE_PROPERTIES)
for cache_key in properties_to_expire:
try:
delattr(self, cache_key)
except AttributeError:
pass
self._get_fields_cache = {}
def get_fields(self, include_parents=True, include_hidden=False):
"""
Returns a list of fields associated to the model. By default will only
return forward fields. This can be changed by enabling or disabling
field types using the parameters:
- include_parents: include fields derived from inheritance
- include_hidden: include fields that have a related_name that
starts with a "+"
"""
if include_parents is False:
include_parents = PROXY_PARENTS
return self._get_fields(include_parents=include_parents, include_hidden=include_hidden)
def _get_fields(self, forward=True, reverse=True, include_parents=True, include_hidden=False,
seen_models=None):
"""
Internal helper function to return fields of the model.
* If forward=True, then fields defined on this model are returned.
* If reverse=True, then relations pointing to this model are returned.
* If include_hidden=True, then fields with is_hidden=True are returned.
* The include_parents argument toggles if fields from parent models
should be included. It has three values: True, False, and
PROXY_PARENTS. When set to PROXY_PARENTS, the call will return all
fields defined for the current model or any of its parents in the
parent chain to the model's concrete model.
"""
if include_parents not in (True, False, PROXY_PARENTS):
raise TypeError("Invalid argument for include_parents: %s" % (include_parents,))
# This helper function is used to allow recursion in ``get_fields()``
# implementation and to provide a fast way for Django's internals to
# access specific subsets of fields.
# We must keep track of which models we have already seen. Otherwise we
# could include the same field multiple times from different models.
topmost_call = False
if seen_models is None:
seen_models = set()
topmost_call = True
seen_models.add(self.model)
# Creates a cache key composed of all arguments
cache_key = (forward, reverse, include_parents, include_hidden, topmost_call)
try:
# In order to avoid list manipulation. Always return a shallow copy
# of the results.
return self._get_fields_cache[cache_key]
except KeyError:
pass
fields = []
# Recursively call _get_fields() on each parent, with the same
# options provided in this call.
if include_parents is not False:
for parent in self.parents:
# In diamond inheritance it is possible that we see the same
# model from two different routes. In that case, avoid adding
# fields from the same parent again.
if parent in seen_models:
continue
if (parent._meta.concrete_model != self.concrete_model and
include_parents == PROXY_PARENTS):
continue
for obj in parent._meta._get_fields(
forward=forward, reverse=reverse, include_parents=include_parents,
include_hidden=include_hidden, seen_models=seen_models):
if hasattr(obj, 'parent_link') and obj.parent_link:
continue
fields.append(obj)
if reverse:
# Tree is computed once and cached until the app cache is expired.
# It is composed of a list of fields pointing to the current model
# from other models.
all_fields = self._relation_tree
for field in all_fields:
# If hidden fields should be included or the relation is not
# intentionally hidden, add to the fields dict.
if include_hidden or not field.rel.hidden:
fields.append(field.rel)
if forward:
fields.extend(
field for field in chain(self.local_fields, self.local_many_to_many)
)
# Virtual fields are recopied to each child model, and they get a
# different model as field.model in each child. Hence we have to
# add the virtual fields separately from the topmost call. If we
# did this recursively similar to local_fields, we would get field
# instances with field.model != self.model.
if topmost_call:
fields.extend(
f for f in self.virtual_fields
)
# In order to avoid list manipulation. Always
# return a shallow copy of the results
fields = make_immutable_fields_list("get_fields()", fields)
# Store result into cache for later access
self._get_fields_cache[cache_key] = fields
return fields
| bsd-3-clause |
tjcsl/director | web3/apps/sites/migrations/0001_initial.py | 1 | 1297 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-11-05 23:20
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('users', '0002_auto_20161105_2046'),
]
operations = [
migrations.CreateModel(
name='Website',
fields=[
('id', models.PositiveIntegerField(primary_key=True, serialize=False, validators=[django.core.validators.MinValueValidator(1000)])),
('name', models.CharField(max_length=32, unique=True)),
('category', models.CharField(choices=[('legacy', 'legacy'), ('static', 'static'), ('php', 'php'), ('dynamic', 'dynamic')], max_length=16)),
('purpose', models.CharField(choices=[('user', 'user'), ('activity', 'activity')], max_length=16)),
('domain', models.TextField()),
('description', models.TextField()),
('group', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='users.Group')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='users.User')),
],
),
]
| mit |
scroggo/skia | tools/skp/recreate_skps.py | 10 | 2822 | #!/usr/bin/env python
# Copyright (c) 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Run the webpages_playback automation script."""
import os
import subprocess
import sys
sys.path.insert(0, os.getcwd())
from common.py.utils import gs_utils
from common.py.utils import shell_utils
SKP_VERSION_FILE = 'SKP_VERSION'
def _get_skp_version():
"""Find an unused SKP version."""
current_skp_version = None
with open(SKP_VERSION_FILE) as f:
current_skp_version = int(f.read().rstrip())
# Find the first SKP version which has no uploaded SKPs.
new_version = current_skp_version + 1
while True:
gs_path = 'playback_%d/skps' % new_version
if not gs_utils.GSUtils().does_storage_object_exist('chromium-skia-gm',
gs_path):
return new_version
new_version += 1
def main(chrome_src_path, browser_executable):
browser_executable = os.path.realpath(browser_executable)
skp_version = _get_skp_version()
print 'SKP_VERSION=%d' % skp_version
if os.environ.get('CHROME_HEADLESS'):
# Start Xvfb if running on a bot.
try:
shell_utils.run('sudo Xvfb :0 -screen 0 1280x1024x24 &', shell=True)
except Exception:
# It is ok if the above command fails, it just means that DISPLAY=:0
# is already up.
pass
upload_dir = 'playback_%d' % skp_version
webpages_playback_cmd = [
'python', os.path.join(os.path.dirname(os.path.realpath(__file__)),
'webpages_playback.py'),
'--page_sets', 'all',
'--browser_executable', browser_executable,
'--non-interactive',
'--upload_to_gs',
'--alternate_upload_dir', upload_dir,
'--chrome_src_path', chrome_src_path,
]
try:
shell_utils.run(webpages_playback_cmd)
finally:
# Clean up any leftover browser instances. This can happen if there are
# telemetry crashes, processes are not always cleaned up appropriately by
# the webpagereplay and telemetry frameworks.
procs = subprocess.check_output(['ps', 'ax'])
for line in procs.splitlines():
if browser_executable in line:
pid = line.strip().split(' ')[0]
if pid != str(os.getpid()) and not 'python' in line:
try:
shell_utils.run(['kill', '-9', pid])
except shell_utils.CommandFailedException as e:
print e
else:
print 'Refusing to kill self.'
print 'writing %s: %s' % (SKP_VERSION_FILE, skp_version)
with open(SKP_VERSION_FILE, 'w') as f:
f.write(str(skp_version))
if '__main__' == __name__:
if len(sys.argv) != 3:
print >> sys.stderr, 'USAGE: %s <chrome src path> <browser executable>'
sys.exit(1)
main(*sys.argv[1:])
| bsd-3-clause |
g-k/servo | tests/wpt/harness/wptrunner/testloader.py | 34 | 22201 | import json
import os
import sys
import urlparse
from abc import ABCMeta, abstractmethod
from Queue import Empty
from collections import defaultdict, OrderedDict, deque
from multiprocessing import Queue
import manifestinclude
import manifestexpected
import wpttest
from mozlog import structured
manifest = None
manifest_update = None
def do_delayed_imports():
# This relies on an already loaded module having set the sys.path correctly :(
global manifest, manifest_update
from manifest import manifest
from manifest import update as manifest_update
class TestChunker(object):
def __init__(self, total_chunks, chunk_number):
self.total_chunks = total_chunks
self.chunk_number = chunk_number
assert self.chunk_number <= self.total_chunks
self.logger = structured.get_default_logger()
def __call__(self, manifest):
raise NotImplementedError
class Unchunked(TestChunker):
def __init__(self, *args, **kwargs):
TestChunker.__init__(self, *args, **kwargs)
assert self.total_chunks == 1
def __call__(self, manifest):
for item in manifest:
yield item
class HashChunker(TestChunker):
def __call__(self):
chunk_index = self.chunk_number - 1
for test_path, tests in manifest:
if hash(test_path) % self.total_chunks == chunk_index:
yield test_path, tests
class EqualTimeChunker(TestChunker):
def _group_by_directory(self, manifest_items):
"""Split the list of manifest items into a ordered dict that groups tests in
so that anything in the same subdirectory beyond a depth of 3 is in the same
group. So all tests in a/b/c, a/b/c/d and a/b/c/e will be grouped together
and separate to tests in a/b/f
Returns: tuple (ordered dict of {test_dir: PathData}, total estimated runtime)
"""
class PathData(object):
def __init__(self, path):
self.path = path
self.time = 0
self.tests = []
by_dir = OrderedDict()
total_time = 0
for i, (test_path, tests) in enumerate(manifest_items):
test_dir = tuple(os.path.split(test_path)[0].split(os.path.sep)[:3])
if not test_dir in by_dir:
by_dir[test_dir] = PathData(test_dir)
data = by_dir[test_dir]
time = sum(wpttest.DEFAULT_TIMEOUT if test.timeout !=
"long" else wpttest.LONG_TIMEOUT for test in tests)
data.time += time
total_time += time
data.tests.append((test_path, tests))
return by_dir, total_time
def _maybe_remove(self, chunks, i, direction):
"""Trial removing a chunk from one chunk to an adjacent one.
:param chunks: - the list of all chunks
:param i: - the chunk index in the list of chunks to try removing from
:param direction: either "next" if we are going to move from the end to
the subsequent chunk, or "prev" if we are going to move
from the start into the previous chunk.
:returns bool: Did a chunk get moved?"""
source_chunk = chunks[i]
if direction == "next":
target_chunk = chunks[i+1]
path_index = -1
move_func = lambda: target_chunk.appendleft(source_chunk.pop())
elif direction == "prev":
target_chunk = chunks[i-1]
path_index = 0
move_func = lambda: target_chunk.append(source_chunk.popleft())
else:
raise ValueError("Unexpected move direction %s" % direction)
return self._maybe_move(source_chunk, target_chunk, path_index, move_func)
def _maybe_add(self, chunks, i, direction):
"""Trial adding a chunk from one chunk to an adjacent one.
:param chunks: - the list of all chunks
:param i: - the chunk index in the list of chunks to try adding to
:param direction: either "next" if we are going to remove from the
the subsequent chunk, or "prev" if we are going to remove
from the the previous chunk.
:returns bool: Did a chunk get moved?"""
target_chunk = chunks[i]
if direction == "next":
source_chunk = chunks[i+1]
path_index = 0
move_func = lambda: target_chunk.append(source_chunk.popleft())
elif direction == "prev":
source_chunk = chunks[i-1]
path_index = -1
move_func = lambda: target_chunk.appendleft(source_chunk.pop())
else:
raise ValueError("Unexpected move direction %s" % direction)
return self._maybe_move(source_chunk, target_chunk, path_index, move_func)
def _maybe_move(self, source_chunk, target_chunk, path_index, move_func):
"""Move from one chunk to another, assess the change in badness,
and keep the move iff it decreases the badness score.
:param source_chunk: chunk to move from
:param target_chunk: chunk to move to
:param path_index: 0 if we are moving from the start or -1 if we are moving from the
end
:param move_func: Function that actually moves between chunks"""
if len(source_chunk.paths) <= 1:
return False
move_time = source_chunk.paths[path_index].time
new_source_badness = self._badness(source_chunk.time - move_time)
new_target_badness = self._badness(target_chunk.time + move_time)
delta_badness = ((new_source_badness + new_target_badness) -
(source_chunk.badness + target_chunk.badness))
if delta_badness < 0:
move_func()
return True
return False
def _badness(self, time):
"""Metric of badness for a specific chunk
:param time: the time for a specific chunk"""
return (time - self.expected_time)**2
def _get_chunk(self, manifest_items):
by_dir, total_time = self._group_by_directory(manifest_items)
if len(by_dir) < self.total_chunks:
raise ValueError("Tried to split into %i chunks, but only %i subdirectories included" % (
self.total_chunks, len(by_dir)))
self.expected_time = float(total_time) / self.total_chunks
chunks = self._create_initial_chunks(by_dir)
while True:
# Move a test from one chunk to the next until doing so no longer
# reduces the badness
got_improvement = self._update_chunks(chunks)
if not got_improvement:
break
self.logger.debug(self.expected_time)
for i, chunk in chunks.iteritems():
self.logger.debug("%i: %i, %i" % (i + 1, chunk.time, chunk.badness))
assert self._all_tests(by_dir) == self._chunked_tests(chunks)
return self._get_tests(chunks)
@staticmethod
def _all_tests(by_dir):
"""Return a set of all tests in the manifest from a grouping by directory"""
return set(x[0] for item in by_dir.itervalues()
for x in item.tests)
@staticmethod
def _chunked_tests(chunks):
"""Return a set of all tests in the manifest from the chunk list"""
return set(x[0] for chunk in chunks.itervalues()
for path in chunk.paths
for x in path.tests)
def _create_initial_chunks(self, by_dir):
"""Create an initial unbalanced list of chunks.
:param by_dir: All tests in the manifest grouped by subdirectory
:returns list: A list of Chunk objects"""
class Chunk(object):
def __init__(self, paths, index):
"""List of PathData objects that together form a single chunk of
tests"""
self.paths = deque(paths)
self.time = sum(item.time for item in paths)
self.index = index
def appendleft(self, path):
"""Add a PathData object to the start of the chunk"""
self.paths.appendleft(path)
self.time += path.time
def append(self, path):
"""Add a PathData object to the end of the chunk"""
self.paths.append(path)
self.time += path.time
def pop(self):
"""Remove PathData object from the end of the chunk"""
assert len(self.paths) > 1
self.time -= self.paths[-1].time
return self.paths.pop()
def popleft(self):
"""Remove PathData object from the start of the chunk"""
assert len(self.paths) > 1
self.time -= self.paths[0].time
return self.paths.popleft()
@property
def badness(self_):
"""Badness metric for this chunk"""
return self._badness(self_.time)
initial_size = len(by_dir) / self.total_chunks
chunk_boundaries = [initial_size * i
for i in xrange(self.total_chunks)] + [len(by_dir)]
chunks = OrderedDict()
for i, lower in enumerate(chunk_boundaries[:-1]):
upper = chunk_boundaries[i + 1]
paths = by_dir.values()[lower:upper]
chunks[i] = Chunk(paths, i)
assert self._all_tests(by_dir) == self._chunked_tests(chunks)
return chunks
def _update_chunks(self, chunks):
"""Run a single iteration of the chunk update algorithm.
:param chunks: - List of chunks
"""
#TODO: consider replacing this with a heap
sorted_chunks = sorted(chunks.values(), key=lambda x:-x.badness)
got_improvement = False
for chunk in sorted_chunks:
if chunk.time < self.expected_time:
f = self._maybe_add
else:
f = self._maybe_remove
if chunk.index == 0:
order = ["next"]
elif chunk.index == self.total_chunks - 1:
order = ["prev"]
else:
if chunk.time < self.expected_time:
# First try to add a test from the neighboring chunk with the
# greatest total time
if chunks[chunk.index + 1].time > chunks[chunk.index - 1].time:
order = ["next", "prev"]
else:
order = ["prev", "next"]
else:
# First try to remove a test and add to the neighboring chunk with the
# lowest total time
if chunks[chunk.index + 1].time > chunks[chunk.index - 1].time:
order = ["prev", "next"]
else:
order = ["next", "prev"]
for direction in order:
if f(chunks, chunk.index, direction):
got_improvement = True
break
if got_improvement:
break
return got_improvement
def _get_tests(self, chunks):
"""Return the list of tests corresponding to the chunk number we are running.
:param chunks: List of chunks"""
tests = []
for path in chunks[self.chunk_number - 1].paths:
tests.extend(path.tests)
return tests
def __call__(self, manifest_iter):
manifest = list(manifest_iter)
tests = self._get_chunk(manifest)
for item in tests:
yield item
class TestFilter(object):
def __init__(self, test_manifests, include=None, exclude=None, manifest_path=None):
if manifest_path is not None and include is None:
self.manifest = manifestinclude.get_manifest(manifest_path)
else:
self.manifest = manifestinclude.IncludeManifest.create()
if include:
self.manifest.set("skip", "true")
for item in include:
self.manifest.add_include(test_manifests, item)
if exclude:
for item in exclude:
self.manifest.add_exclude(test_manifests, item)
def __call__(self, manifest_iter):
for test_path, tests in manifest_iter:
include_tests = set()
for test in tests:
if self.manifest.include(test):
include_tests.add(test)
if include_tests:
yield test_path, include_tests
class TagFilter(object):
def __init__(self, tags):
self.tags = set(tags)
def __call__(self, test_iter):
for test in test_iter:
if test.tags & self.tags:
yield test
class ManifestLoader(object):
def __init__(self, test_paths, force_manifest_update=False):
do_delayed_imports()
self.test_paths = test_paths
self.force_manifest_update = force_manifest_update
self.logger = structured.get_default_logger()
if self.logger is None:
self.logger = structured.structuredlog.StructuredLogger("ManifestLoader")
def load(self):
rv = {}
for url_base, paths in self.test_paths.iteritems():
manifest_file = self.load_manifest(url_base=url_base,
**paths)
path_data = {"url_base": url_base}
path_data.update(paths)
rv[manifest_file] = path_data
return rv
def create_manifest(self, manifest_path, tests_path, url_base="/"):
self.update_manifest(manifest_path, tests_path, url_base, recreate=True)
def update_manifest(self, manifest_path, tests_path, url_base="/",
recreate=False):
self.logger.info("Updating test manifest %s" % manifest_path)
json_data = None
if not recreate:
try:
with open(manifest_path) as f:
json_data = json.load(f)
except IOError:
#If the existing file doesn't exist just create one from scratch
pass
if not json_data:
manifest_file = manifest.Manifest(None, url_base)
else:
try:
manifest_file = manifest.Manifest.from_json(tests_path, json_data)
except manifest.ManifestVersionMismatch:
manifest_file = manifest.Manifest(None, url_base)
manifest_update.update(tests_path, url_base, manifest_file)
manifest.write(manifest_file, manifest_path)
def load_manifest(self, tests_path, metadata_path, url_base="/"):
manifest_path = os.path.join(metadata_path, "MANIFEST.json")
if (not os.path.exists(manifest_path) or
self.force_manifest_update):
self.update_manifest(manifest_path, tests_path, url_base)
manifest_file = manifest.load(tests_path, manifest_path)
if manifest_file.url_base != url_base:
self.logger.info("Updating url_base in manifest from %s to %s" % (manifest_file.url_base,
url_base))
manifest_file.url_base = url_base
manifest.write(manifest_file, manifest_path)
return manifest_file
def iterfilter(filters, iter):
for f in filters:
iter = f(iter)
for item in iter:
yield item
class TestLoader(object):
def __init__(self,
test_manifests,
test_types,
run_info,
manifest_filters=None,
meta_filters=None,
chunk_type="none",
total_chunks=1,
chunk_number=1,
include_https=True):
self.test_types = test_types
self.run_info = run_info
self.manifest_filters = manifest_filters if manifest_filters is not None else []
self.meta_filters = meta_filters if meta_filters is not None else []
self.manifests = test_manifests
self.tests = None
self.disabled_tests = None
self.include_https = include_https
self.chunk_type = chunk_type
self.total_chunks = total_chunks
self.chunk_number = chunk_number
self.chunker = {"none": Unchunked,
"hash": HashChunker,
"equal_time": EqualTimeChunker}[chunk_type](total_chunks,
chunk_number)
self._test_ids = None
self.directory_manifests = {}
self._load_tests()
@property
def test_ids(self):
if self._test_ids is None:
self._test_ids = []
for test_dict in [self.disabled_tests, self.tests]:
for test_type in self.test_types:
self._test_ids += [item.id for item in test_dict[test_type]]
return self._test_ids
def get_test(self, manifest_test, inherit_metadata, test_metadata):
if test_metadata is not None:
inherit_metadata.append(test_metadata)
test_metadata = test_metadata.get_test(manifest_test.id)
return wpttest.from_manifest(manifest_test, inherit_metadata, test_metadata)
def load_dir_metadata(self, test_manifest, metadata_path, test_path):
rv = []
path_parts = os.path.dirname(test_path).split(os.path.sep)
for i in xrange(1,len(path_parts) + 1):
path = os.path.join(os.path.sep.join(path_parts[:i]), "__dir__.ini")
if path not in self.directory_manifests:
self.directory_manifests[path] = manifestexpected.get_dir_manifest(
metadata_path, path, self.run_info)
manifest = self.directory_manifests[path]
if manifest is not None:
rv.append(manifest)
return rv
def load_metadata(self, test_manifest, metadata_path, test_path):
inherit_metadata = self.load_dir_metadata(test_manifest, metadata_path, test_path)
test_metadata = manifestexpected.get_manifest(
metadata_path, test_path, test_manifest.url_base, self.run_info)
return inherit_metadata, test_metadata
def iter_tests(self):
manifest_items = []
for manifest in self.manifests.keys():
manifest_iter = iterfilter(self.manifest_filters,
manifest.itertypes(*self.test_types))
manifest_items.extend(manifest_iter)
if self.chunker is not None:
manifest_items = self.chunker(manifest_items)
for test_path, tests in manifest_items:
manifest_file = iter(tests).next().manifest
metadata_path = self.manifests[manifest_file]["metadata_path"]
inherit_metadata, test_metadata = self.load_metadata(manifest_file, metadata_path, test_path)
for test in iterfilter(self.meta_filters,
self.iter_wpttest(inherit_metadata, test_metadata, tests)):
yield test_path, test.test_type, test
def iter_wpttest(self, inherit_metadata, test_metadata, tests):
for manifest_test in tests:
yield self.get_test(manifest_test, inherit_metadata, test_metadata)
def _load_tests(self):
"""Read in the tests from the manifest file and add them to a queue"""
tests = {"enabled":defaultdict(list),
"disabled":defaultdict(list)}
for test_path, test_type, test in self.iter_tests():
enabled = not test.disabled()
if not self.include_https and test.environment["protocol"] == "https":
enabled = False
key = "enabled" if enabled else "disabled"
tests[key][test_type].append(test)
self.tests = tests["enabled"]
self.disabled_tests = tests["disabled"]
def groups(self, test_types, chunk_type="none", total_chunks=1, chunk_number=1):
groups = set()
for test_type in test_types:
for test in self.tests[test_type]:
group = test.url.split("/")[1]
groups.add(group)
return groups
class TestSource(object):
__metaclass__ = ABCMeta
@abstractmethod
def queue_tests(self, test_queue):
pass
@abstractmethod
def requeue_test(self, test):
pass
def __enter__(self):
return self
def __exit__(self, *args, **kwargs):
pass
class SingleTestSource(TestSource):
def __init__(self, test_queue):
self.test_queue = test_queue
@classmethod
def queue_tests(cls, test_queue, test_type, tests):
for test in tests[test_type]:
test_queue.put(test)
def get_queue(self):
if self.test_queue.empty():
return None
return self.test_queue
def requeue_test(self, test):
self.test_queue.put(test)
class PathGroupedSource(TestSource):
def __init__(self, test_queue):
self.test_queue = test_queue
self.current_queue = None
@classmethod
def queue_tests(cls, test_queue, test_type, tests, depth=None):
if depth is True:
depth = None
prev_path = None
group = None
for test in tests[test_type]:
path = urlparse.urlsplit(test.url).path.split("/")[1:-1][:depth]
if path != prev_path:
group = []
test_queue.put(group)
prev_path = path
group.append(test)
def get_queue(self):
if not self.current_queue or self.current_queue.empty():
try:
data = self.test_queue.get(block=True, timeout=1)
self.current_queue = Queue()
for item in data:
self.current_queue.put(item)
except Empty:
return None
return self.current_queue
def requeue_test(self, test):
self.current_queue.put(test)
def __exit__(self, *args, **kwargs):
if self.current_queue:
self.current_queue.close()
| mpl-2.0 |
lisa-groundhog/GroundHog | groundhog/layers/ff_layers.py | 16 | 18887 | """
Feedforward layers.
TODO: write more documentation
"""
__docformat__ = 'restructedtext en'
__authors__ = ("Razvan Pascanu "
"KyungHyun Cho "
"Caglar Gulcehre ")
__contact__ = "Razvan Pascanu <r.pascanu@gmail>"
import numpy
import copy
import theano
import theano.tensor as TT
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
from groundhog import utils
from groundhog.utils import sample_weights, \
sample_weights_classic,\
init_bias, \
constant_shape, \
sample_zeros
from basic import Layer
class MultiLayer(Layer):
"""
Implementing a standard feed forward MLP
"""
def __init__(self,
rng,
n_in,
n_hids=[500,500],
activation='TT.tanh',
scale=0.01,
sparsity=-1,
rank_n_approx=0,
rank_n_activ='lambda x: x',
weight_noise=False,
dropout = 1.,
init_fn='sample_weights_classic',
bias_fn='init_bias',
bias_scale = 0.,
learn_bias = True,
grad_scale = 1.,
name=None):
"""
:type rng: numpy random generator
:param rng: numpy random generator
:type n_in: int
:param n_in: number of inputs units
:type n_hids: list of ints
:param n_hids: Number of hidden units on each layer of the MLP
:type activation: string/function or list of
:param activation: Activation function for the embedding layers. If
a list it needs to have a value for each layer. If not, the same
activation will be applied to all layers
:type scale: float or list of
:param scale: depending on the initialization function, it can be
the standard deviation of the Gaussian from which the weights
are sampled or the largest singular value. If a single value it
will be used for each layer, otherwise it has to have one value
for each layer
:type sparsity: int or list of
:param sparsity: if a single value, it will be used for each layer,
otherwise it has to be a list with as many values as layers. If
negative, it means the weight matrix is dense. Otherwise it
means this many randomly selected input units are connected to
an output unit
:type rank_n_approx: int
:param rank_n_approx: It applies to the first layer only. If
positive and larger than 0, the first weight matrix is
factorized into two matrices. The first one goes from input to
`rank_n_approx` hidden units, the second from `rank_n_approx` to
the number of units on the second layer
:type rank_n_activ: string or function
:param rank_n_activ: Function that is applied on on the intermediary
layer formed from factorizing the first weight matrix (Q: do we
need this?)
:type weight_noise: bool
:param weight_noise: If true, the model is used with weight noise
(and the right shared variable are constructed, to keep track of the
noise)
:type dropout: float
:param dropout: the probability with which hidden units are dropped
from the hidden layer. If set to 1, dropout is not used
:type init_fn: string or function
:param init_fn: function used to initialize the weights of the
layer. We recommend using either `sample_weights_classic` or
`sample_weights` defined in the utils
:type bias_fn: string or function
:param bias_fn: function used to initialize the biases. We recommend
using `init_bias` defined in the utils
:type bias_scale: float
:param bias_scale: argument passed to `bias_fn`, depicting the scale
of the initial bias
:type learn_bias: bool
:param learn_bias: flag, saying if we should learn the bias or keep
it constant
:type grad_scale: float or theano scalar
:param grad_scale: factor with which the gradients with respect to
the parameters of this layer are scaled. It is used for
differentiating between the different parameters of a model.
:type name: string
:param name: name of the layer (used to name parameters). NB: in
this library names are very important because certain parts of the
code relies on name to disambiguate between variables, therefore
each layer should have a unique name.
"""
assert rank_n_approx >= 0, "Please enter a valid rank_n_approx"
self.rank_n_approx = rank_n_approx
if isinstance(rank_n_activ, (str, unicode)):
rank_n_activ = eval(rank_n_activ)
self.rank_n_activ = rank_n_activ
if type(n_hids) not in (list, tuple):
n_hids = [n_hids]
n_layers = len(n_hids)
self.n_layers = n_layers
if type(scale) not in (list, tuple):
scale = [scale] * n_layers
if type(sparsity) not in (list, tuple):
sparsity = [sparsity] * n_layers
for idx, sp in enumerate(sparsity):
if sp < 0: sparsity[idx] = n_hids[idx]
if type(activation) not in (list, tuple):
activation = [activation] * n_layers
if type(bias_scale) not in (list, tuple):
bias_scale = [bias_scale] * n_layers
if bias_fn not in (list, tuple):
bias_fn = [bias_fn] * n_layers
if init_fn not in (list, tuple):
init_fn = [init_fn] * n_layers
for dx in xrange(n_layers):
if isinstance(bias_fn[dx], (str, unicode)):
bias_fn[dx] = eval(bias_fn[dx])
if isinstance(init_fn[dx], (str, unicode)):
init_fn[dx] = eval(init_fn[dx])
if isinstance(activation[dx], (str, unicode)):
activation[dx] = eval(activation[dx])
super(MultiLayer, self).__init__(n_in, n_hids[-1], rng, name)
self.trng = RandomStreams(self.rng.randint(int(1e6)))
self.activation = activation
self.scale = scale
self.sparsity = sparsity
self.bias_scale = bias_scale
self.bias_fn = bias_fn
self.init_fn = init_fn
self._grad_scale = grad_scale
self.weight_noise = weight_noise
self.dropout = dropout
self.n_hids = n_hids
self.learn_bias = learn_bias
self._init_params()
def _init_params(self):
"""
Initialize the parameters of the layer, either by using sparse initialization or small
isotropic noise.
"""
self.W_ems = []
self.b_ems = []
if self.rank_n_approx:
W_em1 = self.init_fn[0](self.n_in,
self.rank_n_approx,
self.sparsity[0],
self.scale[0],
self.rng)
W_em2 = self.init_fn[0](self.rank_n_approx,
self.n_hids[0],
self.sparsity[0],
self.scale[0],
self.rng)
self.W_em1 = theano.shared(W_em1,
name='W1_0_%s'%self.name)
self.W_em2 = theano.shared(W_em2,
name='W2_0_%s'%self.name)
self.W_ems = [self.W_em1, self.W_em2]
else:
W_em = self.init_fn[0](self.n_in,
self.n_hids[0],
self.sparsity[0],
self.scale[0],
self.rng)
self.W_em = theano.shared(W_em,
name='W_0_%s'%self.name)
self.W_ems = [self.W_em]
self.b_em = theano.shared(
self.bias_fn[0](self.n_hids[0], self.bias_scale[0],self.rng),
name='b_0_%s'%self.name)
self.b_ems = [self.b_em]
for dx in xrange(1, self.n_layers):
W_em = self.init_fn[dx](self.n_hids[dx-1] / self.pieces[dx],
self.n_hids[dx],
self.sparsity[dx],
self.scale[dx],
self.rng)
W_em = theano.shared(W_em,
name='W_%d_%s'%(dx,self.name))
self.W_ems += [W_em]
b_em = theano.shared(
self.bias_fn[dx](self.n_hids[dx], self.bias_scale[dx],self.rng),
name='b_%d_%s'%(dx,self.name))
self.b_ems += [b_em]
self.params = [x for x in self.W_ems]
if self.learn_bias and self.learn_bias!='last':
self.params = [x for x in self.W_ems] + [x for x in self.b_ems]
elif self.learn_bias == 'last':
self.params = [x for x in self.W_ems] + [x for x in
self.b_ems][:-1]
self.params_grad_scale = [self._grad_scale for x in self.params]
if self.weight_noise:
self.nW_ems = [theano.shared(x.get_value()*0, name='noise_'+x.name) for x in self.W_ems]
self.nb_ems = [theano.shared(x.get_value()*0, name='noise_'+x.name) for x in self.b_ems]
self.noise_params = [x for x in self.nW_ems] + [x for x in self.nb_ems]
self.noise_params_shape_fn = [constant_shape(x.get_value().shape)
for x in self.noise_params]
def fprop(self, state_below, use_noise=True, no_noise_bias=False,
first_only = False):
"""
Constructs the computational graph of this layer.
If the input is ints, we assume is an index, otherwise we assume is
a set of floats.
"""
if self.weight_noise and use_noise and self.noise_params:
W_ems = [(x+y) for x, y in zip(self.W_ems, self.nW_ems)]
if not no_noise_bias:
b_ems = [(x+y) for x, y in zip(self.b_ems, self.nb_ems)]
else:
b_ems = self.b_ems
else:
W_ems = self.W_ems
b_ems = self.b_ems
if self.rank_n_approx:
if first_only:
emb_val = self.rank_n_activ(utils.dot(state_below, W_ems[0]))
self.out = emb_val
return emb_val
emb_val = TT.dot(
self.rank_n_activ(utils.dot(state_below, W_ems[0])),
W_ems[1])
if b_ems:
emb_val += b_ems[0]
st_pos = 1
else:
emb_val = utils.dot(state_below, W_ems[0])
if b_ems:
emb_val += b_ems[0]
st_pos = 0
emb_val = self.activation[0](emb_val)
if self.dropout < 1.:
if use_noise:
emb_val = emb_val * self.trng.binomial(emb_val.shape, n=1, p=self.dropout, dtype=emb_val.dtype)
else:
emb_val = emb_val * self.dropout
for dx in xrange(1, self.n_layers):
emb_val = utils.dot(emb_val, W_ems[st_pos+dx])
if b_ems:
emb_val = self.activation[dx](emb_val+ b_ems[dx])
else:
emb_val = self.activation[dx](emb_val)
if self.dropout < 1.:
if use_noise:
emb_val = emb_val * self.trng.binomial(emb_val.shape, n=1, p=self.dropout, dtype=emb_val.dtype)
else:
emb_val = emb_val * self.dropout
self.out = emb_val
return emb_val
class LastState(Layer):
"""
This layer is used to construct the embedding of the encoder by taking
the last state of the recurrent model
"""
def __init__(self, ntimes = False, n = TT.constant(0)):
"""
:type ntimes: bool
:param ntimes: If the last state needs to be repeated `n` times
:type n: int, theano constant, None
:param n: how many times the last state is repeated
"""
self.ntimes = ntimes
self.n = n
super(LastState, self).__init__(0, 0, None)
def fprop(self, all_states):
if self.ntimes:
stateshape0 = all_states.shape[0]
shape0 = TT.switch(TT.gt(self.n, 0), self.n, all_states.shape[0])
single_frame = TT.shape_padleft(all_states[stateshape0-1])
mask = TT.alloc(numpy.float32(1), shape0, *[1 for k in xrange(all_states.ndim-1)])
rval = single_frame * mask
self.out = rval
return rval
single_frame = all_states[all_states.shape[0]-1]
self.out = single_frame
return single_frame
last = LastState()
last_ntimes = LastState(ntimes=True)
class GaussianNoise(Layer):
"""
This layer is used to construct the embedding of the encoder by taking
the last state of the recurrent model
"""
def __init__(self, rng, std = 0.1, ndim=0, avg =0, shape_fn=None):
"""
"""
assert rng is not None, "random number generator should not be empty!"
super(GaussianNoise, self).__init__(0, 0, rng)
self.std = scale
self.avg = self.avg
self.ndim = ndim
self.shape_fn = shape_fn
if self.shape_fn:
# Name is not important as it is not a parameter of the model
self.noise_term = theano.shared(numpy.zeros((2,)*ndim,
dtype=theano.config.floatX),
name='ndata')
self.noise_params += [self.noise_term]
self.noise_params_shape_fn += [shape_fn]
self.trng = RandomStreams(rng.randint(1e5))
def fprop(self, x):
self.out = x
if self.scale:
if self.shape_fn:
self.out += self.noise_term
else:
self.out += self.trng.normal(self.out.shape, std=self.std,
avg = self.avg,
dtype=self.out.dtype)
return self.out
class BinaryOp(Layer):
"""
This layer is used to construct the embedding of the encoder by taking
the last state of the recurrent model
"""
def __init__(self, op = 'lambda x,y: x+y', name=None):
if type(op) is str:
op = eval(op)
self.op = op
super(BinaryOp, self).__init__(0, 0, None, name)
def fprop(self, x, y):
self.out = self.op(x, y)
return self.out
class DropOp(Layer):
"""
This layers randomly drops elements of the input by multiplying with a
mask sampled from a binomial distribution
"""
def __init__(self, rng = None, name=None, dropout=1.):
super(DropOp, self).__init__(0, 0, None, name)
self.dropout = dropout
if dropout < 1.:
self.trng = RandomStreams(rng.randint(1e5))
def fprop(self, state_below, use_noise = True):
self.out = state_below
if self.dropout < 1.:
if use_noise:
self.out = self.out * self.trng.binomial(self.out.shape,
n=1,
p=self.dropout,
dtype=self.out.dtype)
else:
self.out = self.out * self.dropout
return self.out
class UnaryOp(Layer):
"""
This layer is used to construct an embedding of the encoder by doing a
max pooling over the hidden state
"""
def __init__(self, activation = 'lambda x: x', name=None):
if type(activation) is str:
activation = eval(activation)
self.activation = activation
super(UnaryOp, self).__init__(0, 0, None, name)
def fprop(self, state_below):
self.out = self.activation(state_below)
return self.out
tanh = UnaryOp('lambda x: TT.tanh(x)')
sigmoid = UnaryOp('lambda x: TT.nnet.sigmoid(x)')
rectifier = UnaryOp('lambda x: x*(x>0)')
hard_sigmoid = UnaryOp('lambda x: x*(x>0)*(x<1)')
hard_tanh = UnaryOp('lambda x: x*(x>-1)*(x<1)')
class Shift(Layer):
"""
This layer is used to construct the embedding of the encoder by taking
the last state of the recurrent model
"""
def __init__(self, n=1, name=None):
self.n = n
super(Shift, self).__init__(0, 0, None, name)
def fprop(self, var):
rval = TT.zeros_like(var)
if self.n >0:
rval = TT.set_subtensor(rval[self.n:], var[:-self.n])
elif self.n<0:
rval = TT.set_subtensor(rval[:self.n], var[-self.n:])
self.out = rval
return rval
class MinPooling(Layer):
"""
This layer is used to construct an embedding of the encoder by doing a
max pooling over the hidden state
"""
def __init__(self, ntimes=False, name=None):
self.ntimes = ntimes
super(MinPooling, self).__init__(0, 0, None, name)
def fprop(self, all_states):
shape0 = all_states.shape[0]
single_frame = all_states.min(0)
if self.ntimes:
single_frame = TT.shape_padleft(all_states.max(0))
mask = TT.alloc(numpy.float32(1),
shape0, *[1 for k in xrange(all_states.ndim-1)])
rval = single_frame * mask
self.out = rval
return rval
self.out = single_frame
return single_frame
minpool = MinPooling()
minpool_ntimes = MinPooling(ntimes=True)
class MaxPooling(Layer):
"""
This layer is used to construct an embedding of the encoder by doing a
max pooling over the hidden state
"""
def __init__(self, ntimes=False, name=None):
self.ntimes = ntimes
super(MaxPooling, self).__init__(0, 0, None, name)
def fprop(self, all_states):
shape0 = all_states.shape[0]
single_frame = all_states.max(0)
if self.ntimes:
single_frame = TT.shape_padleft(all_states.max(0))
mask = TT.alloc(numpy.float32(1),
shape0, *[1 for k in xrange(all_states.ndim-1)])
rval = single_frame * mask
self.out = rval
return rval
self.out = single_frame
return single_frame
maxpool = MaxPooling()
maxpool_ntimes = MaxPooling(ntimes=True)
class Concatenate(Layer):
def __init__(self, axis):
self.axis = axis
Layer.__init__(self, 0, 0, None)
def fprop(self, *args):
self.out = TT.concatenate(args, axis=self.axis)
return self.out
| bsd-3-clause |
rabernat/xmitgcm | xmitgcm/_version.py | 2 | 18453 |
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.18 (https://github.com/warner/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
git_date = "$Format:%ci$"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = "v"
cfg.parentdir_prefix = "xmitgcm-"
cfg.versionfile_source = "xmitgcm/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %s but none started with prefix %s" %
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%s*" % tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"],
cwd=root)[0].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version", "date": None}
| mit |
aselle/tensorflow | tensorflow/contrib/timeseries/python/timeseries/math_utils.py | 14 | 43089 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Miscellaneous utilities used by time series models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
from tensorflow.contrib import lookup
from tensorflow.contrib.layers.python.layers import layers
from tensorflow.contrib.timeseries.python.timeseries.feature_keys import TrainEvalFeatures
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.util import nest
def clip_covariance(
covariance_matrix, maximum_variance_ratio, minimum_variance):
"""Enforce constraints on a covariance matrix to improve numerical stability.
Args:
covariance_matrix: A [..., N, N] batch of covariance matrices.
maximum_variance_ratio: The maximum allowed ratio of two diagonal
entries. Any entries lower than the maximum entry divided by this ratio
will be set to that value.
minimum_variance: A floor for diagonal entries in the returned matrix.
Returns:
A new covariance matrix with the requested constraints enforced. If the
input was positive definite, the output will be too.
"""
# TODO(allenl): Smarter scaling here so that correlations are preserved when
# fiddling with diagonal elements.
diagonal = array_ops.matrix_diag_part(covariance_matrix)
maximum = math_ops.reduce_max(diagonal, axis=-1, keepdims=True)
new_diagonal = gen_math_ops.maximum(
diagonal, maximum / maximum_variance_ratio)
return array_ops.matrix_set_diag(
covariance_matrix, math_ops.maximum(new_diagonal, minimum_variance))
def block_diagonal(matrices, dtype=dtypes.float32, name="block_diagonal"):
r"""Constructs block-diagonal matrices from a list of batched 2D tensors.
Args:
matrices: A list of Tensors with shape [..., N_i, M_i] (i.e. a list of
matrices with the same batch dimension).
dtype: Data type to use. The Tensors in `matrices` must match this dtype.
name: A name for the returned op.
Returns:
A matrix with the input matrices stacked along its main diagonal, having
shape [..., \sum_i N_i, \sum_i M_i].
"""
matrices = [ops.convert_to_tensor(matrix, dtype=dtype) for matrix in matrices]
blocked_rows = tensor_shape.Dimension(0)
blocked_cols = tensor_shape.Dimension(0)
batch_shape = tensor_shape.TensorShape(None)
for matrix in matrices:
full_matrix_shape = matrix.get_shape().with_rank_at_least(2)
batch_shape = batch_shape.merge_with(full_matrix_shape[:-2])
blocked_rows += full_matrix_shape[-2]
blocked_cols += full_matrix_shape[-1]
ret_columns_list = []
for matrix in matrices:
matrix_shape = array_ops.shape(matrix)
ret_columns_list.append(matrix_shape[-1])
ret_columns = math_ops.add_n(ret_columns_list)
row_blocks = []
current_column = 0
for matrix in matrices:
matrix_shape = array_ops.shape(matrix)
row_before_length = current_column
current_column += matrix_shape[-1]
row_after_length = ret_columns - current_column
row_blocks.append(
array_ops.pad(
tensor=matrix,
paddings=array_ops.concat(
[
array_ops.zeros(
[array_ops.rank(matrix) - 1, 2], dtype=dtypes.int32), [(
row_before_length, row_after_length)]
],
axis=0)))
blocked = array_ops.concat(row_blocks, -2, name=name)
blocked.set_shape(batch_shape.concatenate((blocked_rows, blocked_cols)))
return blocked
def power_sums_tensor(array_size, power_matrix, multiplier):
r"""Computes \sum_{i=0}^{N-1} A^i B (A^i)^T for N=0..(array_size + 1).
Args:
array_size: The number of non-trivial sums to pre-compute.
power_matrix: The "A" matrix above.
multiplier: The "B" matrix above
Returns:
A Tensor with S[N] = \sum_{i=0}^{N-1} A^i B (A^i)^T
S[0] is the zero matrix
S[1] is B
S[2] is A B A^T + B
...and so on
"""
array_size = math_ops.cast(array_size, dtypes.int32)
power_matrix = ops.convert_to_tensor(power_matrix)
identity_like_power_matrix = linalg_ops.eye(
array_ops.shape(power_matrix)[0], dtype=power_matrix.dtype)
identity_like_power_matrix.set_shape(
ops.convert_to_tensor(power_matrix).get_shape())
transition_powers = functional_ops.scan(
lambda previous_power, _: math_ops.matmul(previous_power, power_matrix),
math_ops.range(array_size - 1),
initializer=identity_like_power_matrix)
summed = math_ops.cumsum(
array_ops.concat([
array_ops.expand_dims(multiplier, 0), math_ops.matmul(
batch_times_matrix(transition_powers, multiplier),
transition_powers,
adjoint_b=True)
], 0))
return array_ops.concat(
[array_ops.expand_dims(array_ops.zeros_like(multiplier), 0), summed], 0)
def matrix_to_powers(matrix, powers):
"""Raise a single matrix to multiple powers."""
matrix_tiled = array_ops.tile(
array_ops.expand_dims(matrix, 0), [array_ops.size(powers), 1, 1])
return batch_matrix_pow(matrix_tiled, powers)
def batch_matrix_pow(matrices, powers):
"""Compute powers of matrices, e.g. A^3 = matmul(matmul(A, A), A).
Uses exponentiation by squaring, with O(log(p)) matrix multiplications to
compute A^p.
Args:
matrices: [batch size x N x N]
powers: Which integer power to raise each matrix to [batch size]
Returns:
The matrices raised to their respective powers, same dimensions as the
"matrices" argument.
"""
def terminate_when_all_zero(current_argument, residual_powers, accumulator):
del current_argument, accumulator # not used for condition
do_exit = math_ops.reduce_any(
math_ops.greater(residual_powers, array_ops.ones_like(residual_powers)))
return do_exit
def do_iteration(current_argument, residual_powers, accumulator):
"""Compute one step of iterative exponentiation by squaring.
The recursive form is:
power(A, p) = { power(matmul(A, A), p / 2) for even p
{ matmul(A, power(matmul(A, A), (p - 1) / 2)) for odd p
power(A, 0) = I
The power(A, 0) = I case is handled by starting with accumulator set to the
identity matrix; matrices with zero residual powers are passed through
unchanged.
Args:
current_argument: On this step, what is the first argument (A^2..^2) to
the (unrolled) recursive function? [batch size x N x N]
residual_powers: On this step, what is the second argument (residual p)?
[batch_size]
accumulator: Accumulates the exterior multiplications from the odd
powers (initially the identity matrix). [batch_size x N x N]
Returns:
Updated versions of each argument for one step of the unrolled
computation. Does not change parts of the batch which have a residual
power of zero.
"""
is_even = math_ops.equal(residual_powers % 2,
array_ops.zeros(
array_ops.shape(residual_powers),
dtype=dtypes.int32))
new_accumulator = array_ops.where(is_even, accumulator,
math_ops.matmul(accumulator,
current_argument))
new_argument = math_ops.matmul(current_argument, current_argument)
do_update = math_ops.greater(residual_powers, 1)
new_residual_powers = residual_powers - residual_powers % 2
new_residual_powers //= 2
# Stop updating if we've reached our base case; some batch elements may
# finish sooner than others
accumulator = array_ops.where(do_update, new_accumulator, accumulator)
current_argument = array_ops.where(do_update, new_argument,
current_argument)
residual_powers = array_ops.where(do_update, new_residual_powers,
residual_powers)
return (current_argument, residual_powers, accumulator)
matrices = ops.convert_to_tensor(matrices)
powers = math_ops.cast(powers, dtype=dtypes.int32)
ident = array_ops.expand_dims(
array_ops.diag(
array_ops.ones([array_ops.shape(matrices)[1]], dtype=matrices.dtype)),
0)
ident_tiled = array_ops.tile(ident, [array_ops.shape(matrices)[0], 1, 1])
(final_argument,
final_residual_power, final_accumulator) = control_flow_ops.while_loop(
terminate_when_all_zero, do_iteration, [matrices, powers, ident_tiled])
return array_ops.where(
math_ops.equal(final_residual_power,
array_ops.zeros_like(
final_residual_power, dtype=dtypes.int32)),
ident_tiled, math_ops.matmul(final_argument, final_accumulator))
# TODO(allenl): would be useful if this was built into batch_matmul
def batch_times_matrix(batch, matrix, adj_x=False, adj_y=False):
"""Multiply a batch of matrices by a single matrix.
Functionally equivalent to:
tf.matmul(batch, array_ops.tile(gen_math_ops.expand_dims(matrix, 0),
[array_ops.shape(batch)[0], 1, 1]),
adjoint_a=adj_x, adjoint_b=adj_y)
Args:
batch: [batch_size x N x M] after optional transpose
matrix: [M x P] after optional transpose
adj_x: If true, transpose the second two dimensions of "batch" before
multiplying.
adj_y: If true, transpose "matrix" before multiplying.
Returns:
[batch_size x N x P]
"""
batch = ops.convert_to_tensor(batch)
matrix = ops.convert_to_tensor(matrix)
assert batch.get_shape().ndims == 3
assert matrix.get_shape().ndims == 2
if adj_x:
batch = array_ops.transpose(batch, [0, 2, 1])
batch_dimension = batch.get_shape()[0].value
first_dimension = batch.get_shape()[1].value
tensor_batch_shape = array_ops.shape(batch)
if batch_dimension is None:
batch_dimension = tensor_batch_shape[0]
if first_dimension is None:
first_dimension = tensor_batch_shape[1]
matrix_first_dimension, matrix_second_dimension = matrix.get_shape().as_list()
batch_reshaped = array_ops.reshape(batch, [-1, tensor_batch_shape[2]])
if adj_y:
if matrix_first_dimension is None:
matrix_first_dimension = array_ops.shape(matrix)[0]
result_shape = [batch_dimension, first_dimension, matrix_first_dimension]
else:
if matrix_second_dimension is None:
matrix_second_dimension = array_ops.shape(matrix)[1]
result_shape = [batch_dimension, first_dimension, matrix_second_dimension]
return array_ops.reshape(
math_ops.matmul(batch_reshaped, matrix, adjoint_b=adj_y), result_shape)
def matrix_times_batch(matrix, batch, adj_x=False, adj_y=False):
"""Like batch_times_matrix, but with the multiplication order swapped."""
return array_ops.transpose(
batch_times_matrix(
batch=batch, matrix=matrix, adj_x=not adj_y, adj_y=not adj_x),
[0, 2, 1])
def make_toeplitz_matrix(inputs, name=None):
"""Make a symmetric Toeplitz matrix from input array of values.
Args:
inputs: a 3-D tensor of shape [num_blocks, block_size, block_size].
name: the name of the operation.
Returns:
a symmetric Toeplitz matrix of shape
[num_blocks*block_size, num_blocks*block_size].
"""
num_blocks = array_ops.shape(inputs)[0]
block_size = array_ops.shape(inputs)[1]
output_size = block_size * num_blocks
lags = array_ops.reshape(math_ops.range(num_blocks), shape=[1, -1])
indices = math_ops.abs(lags - array_ops.transpose(lags))
output = array_ops.gather(inputs, indices)
output = array_ops.reshape(
array_ops.transpose(output, [0, 2, 1, 3]), [output_size, output_size])
return array_ops.identity(output, name=name)
# TODO(allenl): Investigate alternative parameterizations.
def sign_magnitude_positive_definite(
raw, off_diagonal_scale=0., overall_scale=0.):
"""Constructs a positive definite matrix from an unconstrained input matrix.
We want to keep the whole matrix on a log scale, but also allow off-diagonal
elements to be negative, so the sign of off-diagonal elements is modeled
separately from their magnitude (using the lower and upper triangles
respectively). Specifically:
for i < j, we have:
output_cholesky[i, j] = raw[j, i] / (abs(raw[j, i]) + 1) *
exp((off_diagonal_scale + overall_scale + raw[i, j]) / 2)
output_cholesky[i, i] = exp((raw[i, i] + overall_scale) / 2)
output = output_cholesky^T * output_cholesky
where raw, off_diagonal_scale, and overall_scale are
un-constrained real-valued variables. The resulting values are stable
around zero due to the exponential (and the softsign keeps the function
smooth).
Args:
raw: A [..., M, M] Tensor.
off_diagonal_scale: A scalar or [...] shaped Tensor controlling the relative
scale of off-diagonal values in the output matrix.
overall_scale: A scalar or [...] shaped Tensor controlling the overall scale
of the output matrix.
Returns:
The `output` matrix described above, a [..., M, M] positive definite matrix.
"""
raw = ops.convert_to_tensor(raw)
diagonal = array_ops.matrix_diag_part(raw)
def _right_pad_with_ones(tensor, target_rank):
# Allow broadcasting even if overall_scale and off_diagonal_scale have batch
# dimensions
tensor = ops.convert_to_tensor(tensor, dtype=raw.dtype.base_dtype)
return array_ops.reshape(tensor,
array_ops.concat(
[
array_ops.shape(tensor), array_ops.ones(
[target_rank - array_ops.rank(tensor)],
dtype=target_rank.dtype)
],
axis=0))
# We divide the log values by 2 to compensate for the squaring that happens
# when transforming Cholesky factors into positive definite matrices.
sign_magnitude = (gen_math_ops.exp(
(raw + _right_pad_with_ones(off_diagonal_scale, array_ops.rank(raw)) +
_right_pad_with_ones(overall_scale, array_ops.rank(raw))) / 2.) *
nn.softsign(array_ops.matrix_transpose(raw)))
sign_magnitude.set_shape(raw.get_shape())
cholesky_factor = array_ops.matrix_set_diag(
input=array_ops.matrix_band_part(sign_magnitude, 0, -1),
diagonal=gen_math_ops.exp((diagonal + _right_pad_with_ones(
overall_scale, array_ops.rank(diagonal))) / 2.))
return math_ops.matmul(cholesky_factor, cholesky_factor, transpose_a=True)
def transform_to_covariance_matrices(input_vectors, matrix_size):
"""Construct covariance matrices via transformations from input_vectors.
Args:
input_vectors: A [batch size x input size] batch of vectors to transform.
matrix_size: An integer indicating one dimension of the (square) output
matrix.
Returns:
A [batch size x matrix_size x matrix_size] batch of covariance matrices.
"""
combined_values = layers.fully_connected(
input_vectors, matrix_size**2 + 2, activation_fn=None)
return sign_magnitude_positive_definite(
raw=array_ops.reshape(combined_values[..., :-2],
array_ops.concat([
array_ops.shape(combined_values)[:-1],
[matrix_size, matrix_size]
], 0)),
off_diagonal_scale=combined_values[..., -2],
overall_scale=combined_values[..., -1])
def variable_covariance_matrix(
size, name, dtype, initial_diagonal_values=None,
initial_overall_scale_log=0.):
"""Construct a Variable-parameterized positive definite matrix.
Useful for parameterizing covariance matrices.
Args:
size: The size of the main diagonal, the returned matrix having shape [size
x size].
name: The name to use when defining variables and ops.
dtype: The floating point data type to use.
initial_diagonal_values: A Tensor with shape [size] with initial values for
the diagonal values of the returned matrix. Must be positive.
initial_overall_scale_log: Initial value of the bias term for every element
of the matrix in log space.
Returns:
A Variable-parameterized covariance matrix with shape [size x size].
"""
raw_values = variable_scope.get_variable(
name + "_pre_transform",
dtype=dtype,
shape=[size, size],
initializer=init_ops.zeros_initializer())
if initial_diagonal_values is not None:
raw_values += array_ops.matrix_diag(math_ops.log(initial_diagonal_values))
return array_ops.identity(
sign_magnitude_positive_definite(
raw=raw_values,
off_diagonal_scale=variable_scope.get_variable(
name + "_off_diagonal_scale",
dtype=dtype,
initializer=constant_op.constant(-5., dtype=dtype)),
overall_scale=ops.convert_to_tensor(
initial_overall_scale_log, dtype=dtype) +
variable_scope.get_variable(
name + "_overall_scale",
dtype=dtype,
shape=[],
initializer=init_ops.zeros_initializer())),
name=name)
def batch_start_time(times):
return times[:, 0]
def batch_end_time(times):
return times[:, -1]
def log_noninformative_covariance_prior(covariance):
"""Compute a relatively uninformative prior for noise parameters.
Helpful for avoiding noise over-estimation, where noise otherwise decreases
very slowly during optimization.
See:
Villegas, C. On the A Priori Distribution of the Covariance Matrix.
Ann. Math. Statist. 40 (1969), no. 3, 1098--1099.
Args:
covariance: A covariance matrix.
Returns:
For a [p x p] matrix:
log(det(covariance)^(-(p + 1) / 2))
"""
# Avoid zero/negative determinants due to numerical errors
covariance += array_ops.diag(1e-8 * array_ops.ones(
shape=[array_ops.shape(covariance)[0]], dtype=covariance.dtype))
power = -(math_ops.cast(array_ops.shape(covariance)[0] + 1,
covariance.dtype) / 2.)
return power * math_ops.log(linalg_ops.matrix_determinant(covariance))
def entropy_matched_cauchy_scale(covariance):
"""Approximates a similar Cauchy distribution given a covariance matrix.
Since Cauchy distributions do not have moments, entropy matching provides one
way to set a Cauchy's scale parameter in a way that provides a similar
distribution. The effect is dividing the standard deviation of an independent
Gaussian by a constant very near 3.
To set the scale of the Cauchy distribution, we first select the diagonals of
`covariance`. Since this ignores cross terms, it overestimates the entropy of
the Gaussian. For each of these variances, we solve for the Cauchy scale
parameter which gives the same entropy as the Gaussian with that
variance. This means setting the (univariate) Gaussian entropy
0.5 * ln(2 * variance * pi * e)
equal to the Cauchy entropy
ln(4 * pi * scale)
Solving, we get scale = sqrt(variance * (e / (8 pi))).
Args:
covariance: A [batch size x N x N] batch of covariance matrices to produce
Cauchy scales for.
Returns:
A [batch size x N] set of Cauchy scale parameters for each part of the batch
and each dimension of the input Gaussians.
"""
return math_ops.sqrt(math.e / (8. * math.pi) *
array_ops.matrix_diag_part(covariance))
class TensorValuedMutableDenseHashTable(lookup.MutableDenseHashTable):
"""A version of MutableDenseHashTable which stores arbitrary Tensor shapes.
Since MutableDenseHashTable only allows vectors right now, simply adds reshape
ops on both ends.
"""
def __init__(self, key_dtype, value_dtype, default_value, *args, **kwargs):
self._non_vector_value_shape = array_ops.shape(default_value)
super(TensorValuedMutableDenseHashTable, self).__init__(
key_dtype=key_dtype,
value_dtype=value_dtype,
default_value=array_ops.reshape(default_value, [-1]),
*args,
**kwargs)
def insert(self, keys, values, name=None):
keys = ops.convert_to_tensor(keys, dtype=self._key_dtype)
keys_flat = array_ops.reshape(keys, [-1])
return super(TensorValuedMutableDenseHashTable, self).insert(
keys=keys_flat,
# Each key has one corresponding value, so the shape of the tensor of
# values for every key is key_shape + value_shape
values=array_ops.reshape(values, [array_ops.shape(keys_flat)[0], -1]),
name=name)
def lookup(self, keys, name=None):
keys_flat = array_ops.reshape(
ops.convert_to_tensor(keys, dtype=self._key_dtype), [-1])
return array_ops.reshape(
super(TensorValuedMutableDenseHashTable, self).lookup(
keys=keys_flat, name=name),
array_ops.concat([array_ops.shape(keys), self._non_vector_value_shape],
0))
class TupleOfTensorsLookup(lookup.LookupInterface):
"""A LookupInterface with nested tuples of Tensors as values.
Creates one MutableDenseHashTable per value Tensor, which has some unnecessary
overhead.
"""
def __init__(
self, key_dtype, default_values, empty_key, name, checkpoint=True):
default_values_flat = nest.flatten(default_values)
self._hash_tables = nest.pack_sequence_as(
default_values,
[TensorValuedMutableDenseHashTable(
key_dtype=key_dtype,
value_dtype=default_value.dtype.base_dtype,
default_value=default_value,
empty_key=empty_key,
name=name + "_{}".format(table_number),
checkpoint=checkpoint)
for table_number, default_value
in enumerate(default_values_flat)])
self._name = name
def lookup(self, keys):
return nest.pack_sequence_as(
self._hash_tables,
[hash_table.lookup(keys)
for hash_table in nest.flatten(self._hash_tables)])
def insert(self, keys, values):
nest.assert_same_structure(self._hash_tables, values)
# Avoid race conditions by requiring that all inputs are computed before any
# inserts happen (an issue if one key's update relies on another's value).
values_flat = [array_ops.identity(value) for value in nest.flatten(values)]
with ops.control_dependencies(values_flat):
insert_ops = [hash_table.insert(keys, value)
for hash_table, value
in zip(nest.flatten(self._hash_tables),
values_flat)]
return control_flow_ops.group(*insert_ops)
def check_table_dtypes(self, key_dtype, value_dtype):
# dtype checking is done in the objects in self._hash_tables
pass
def replicate_state(start_state, batch_size):
"""Create batch versions of state.
Takes a list of Tensors, adds a batch dimension, and replicates
batch_size times across that batch dimension. Used to replicate the
non-batch state returned by get_start_state in define_loss.
Args:
start_state: Model-defined state to replicate.
batch_size: Batch dimension for data.
Returns:
Replicated versions of the state.
"""
flattened_state = nest.flatten(start_state)
replicated_state = [
array_ops.tile(
array_ops.expand_dims(state_nonbatch, 0),
array_ops.concat([[batch_size], array_ops.ones(
[array_ops.rank(state_nonbatch)], dtype=dtypes.int32)], 0))
for state_nonbatch in flattened_state
]
return nest.pack_sequence_as(start_state, replicated_state)
Moments = collections.namedtuple("Moments", ["mean", "variance"])
# Currently all of these statistics are computed incrementally (i.e. are updated
# every time a new mini-batch of training data is presented) when this object is
# created in InputStatisticsFromMiniBatch.
InputStatistics = collections.namedtuple(
"InputStatistics",
["series_start_moments", # The mean and variance of each feature in a chunk
# (with a size configured in the statistics
# object) at the start of the series. A tuple of
# (mean, variance), each with shape [number of
# features], floating point. One use is in state
# space models, to keep priors calibrated even as
# earlier parts of the series are presented. If
# this object was created by
# InputStatisticsFromMiniBatch, these moments are
# computed based on the earliest chunk of data
# presented so far. However, there is a race
# condition in the update, so these may reflect
# statistics later in the series, but should
# eventually reflect statistics in a chunk at the
# series start.
"overall_feature_moments", # The mean and variance of each feature over
# the entire series. A tuple of (mean,
# variance), each with shape [number of
# features]. If this object was created by
# InputStatisticsFromMiniBatch, these moments
# are estimates based on the data seen so far.
"start_time", # The first (lowest) time in the series, a scalar
# integer. If this object was created by
# InputStatisticsFromMiniBatch, this is the lowest time seen
# so far rather than the lowest time that will ever be seen
# (guaranteed to be at least as low as the lowest time
# presented in the current minibatch).
"total_observation_count", # Count of data points, a scalar integer. If
# this object was created by
# InputStatisticsFromMiniBatch, this is an
# estimate of the total number of observations
# in the whole dataset computed based on the
# density of the series and the minimum and
# maximum times seen.
])
# TODO(allenl): It would be nice to do something with full series statistics
# when the user provides that.
class InputStatisticsFromMiniBatch(object):
"""Generate statistics from mini-batch input."""
def __init__(self, num_features, dtype, starting_variance_window_size=16):
"""Configure the input statistics object.
Args:
num_features: Number of features for the time series
dtype: The floating point data type to use.
starting_variance_window_size: The number of datapoints to use when
computing the mean and variance at the start of the series.
"""
self._starting_variance_window_size = starting_variance_window_size
self._num_features = num_features
self._dtype = dtype
def initialize_graph(self, features, update_statistics=True):
"""Create any ops needed to provide input statistics.
Should be called before statistics are requested.
Args:
features: A dictionary, the output of a `TimeSeriesInputFn` (with keys
TrainEvalFeatures.TIMES and TrainEvalFeatures.VALUES).
update_statistics: Whether `features` should be used to update adaptive
statistics. Typically True for training and false for evaluation.
Returns:
An InputStatistics object composed of Variables, which will be updated
based on mini-batches of data if requested.
"""
if (TrainEvalFeatures.TIMES in features
and TrainEvalFeatures.VALUES in features):
times = features[TrainEvalFeatures.TIMES]
values = features[TrainEvalFeatures.VALUES]
else:
# times and values may not be available, for example during prediction. We
# still need to retrieve our variables so that they can be read from, even
# if we're not going to update them.
times = None
values = None
# Create/retrieve variables representing input statistics, initialized
# without data to avoid deadlocking if variables are initialized before
# queue runners are started.
with variable_scope.variable_scope("input_statistics", use_resource=True):
statistics = self._create_variable_statistics_object()
with variable_scope.variable_scope(
"input_statistics_auxiliary", use_resource=True):
# Secondary statistics, necessary for the incremental computation of the
# primary statistics (e.g. counts and sums for computing a mean
# incrementally).
auxiliary_variables = self._AdaptiveInputAuxiliaryStatistics(
num_features=self._num_features, dtype=self._dtype)
if update_statistics and times is not None and values is not None:
# If we have times and values from mini-batch input, create update ops to
# take the new data into account.
assign_op = self._update_statistics_from_mini_batch(
statistics, auxiliary_variables, times, values)
with ops.control_dependencies([assign_op]):
stat_variables = nest.pack_sequence_as(statistics, [
array_ops.identity(tensor) for tensor in nest.flatten(statistics)
])
# Since start time updates have a race condition, ensure that the
# reported start time is at least as low as the lowest time in this
# mini-batch. The start time should converge on the correct value
# eventually even with the race condition, but for example state space
# models have an assertion which could fail without this
# post-processing.
return stat_variables._replace(start_time=gen_math_ops.minimum(
stat_variables.start_time, math_ops.reduce_min(times)))
else:
return statistics
class _AdaptiveInputAuxiliaryStatistics(collections.namedtuple(
"_AdaptiveInputAuxiliaryStatistics",
["max_time_seen", # The maximum time seen (best effort if updated from
# multiple workers; see notes about race condition
# below).
"chunk_count", # The number of chunks seen.
"inter_observation_duration_sum", # The sum across chunks of their "time
# density" (number of times per
# example).
"example_count", # The number of examples seen (each example has a
# single time associated with it and one or more
# real-valued features).
"overall_feature_sum", # The sum of values for each feature. Shape
# [number of features].
"overall_feature_sum_of_squares", # The sum of squared values for each
# feature. Shape [number of features]
])):
"""Extra statistics used to incrementally update InputStatistics."""
def __new__(cls, num_features, dtype):
return super(
InputStatisticsFromMiniBatch # pylint: disable=protected-access
._AdaptiveInputAuxiliaryStatistics,
cls).__new__(
cls,
max_time_seen=variable_scope.get_variable(
name="max_time_seen",
initializer=dtypes.int64.min,
dtype=dtypes.int64,
trainable=False),
chunk_count=variable_scope.get_variable(
name="chunk_count",
initializer=init_ops.zeros_initializer(),
shape=[],
dtype=dtypes.int64,
trainable=False),
inter_observation_duration_sum=variable_scope.get_variable(
name="inter_observation_duration_sum",
initializer=init_ops.zeros_initializer(),
shape=[],
dtype=dtype,
trainable=False),
example_count=variable_scope.get_variable(
name="example_count",
shape=[],
dtype=dtypes.int64,
trainable=False),
overall_feature_sum=variable_scope.get_variable(
name="overall_feature_sum",
shape=[num_features],
dtype=dtype,
initializer=init_ops.zeros_initializer(),
trainable=False),
overall_feature_sum_of_squares=variable_scope.get_variable(
name="overall_feature_sum_of_squares",
shape=[num_features],
dtype=dtype,
initializer=init_ops.zeros_initializer(),
trainable=False))
def _update_statistics_from_mini_batch(
self, statistics, auxiliary_variables, times, values):
"""Given mini-batch input, update `statistics` and `auxiliary_variables`."""
values = math_ops.cast(values, self._dtype)
# The density (measured in times per observation) that we see in each part
# of the mini-batch.
batch_inter_observation_duration = (math_ops.cast(
math_ops.reduce_max(times, axis=1) - math_ops.reduce_min(times, axis=1),
self._dtype) / math_ops.cast(
array_ops.shape(times)[1] - 1, self._dtype))
# Co-locate updates with their variables to minimize race conditions when
# updating statistics.
with ops.colocate_with(auxiliary_variables.max_time_seen):
# There is a race condition if this value is being updated from multiple
# workers. However, it should eventually reach the correct value if the
# last chunk is presented enough times.
max_time_seen_assign = state_ops.assign(
auxiliary_variables.max_time_seen,
gen_math_ops.maximum(auxiliary_variables.max_time_seen,
math_ops.reduce_max(times)))
with ops.colocate_with(auxiliary_variables.chunk_count):
chunk_count_assign = state_ops.assign_add(auxiliary_variables.chunk_count,
array_ops.shape(
times,
out_type=dtypes.int64)[0])
with ops.colocate_with(auxiliary_variables.inter_observation_duration_sum):
inter_observation_duration_assign = state_ops.assign_add(
auxiliary_variables.inter_observation_duration_sum,
math_ops.reduce_sum(batch_inter_observation_duration))
with ops.colocate_with(auxiliary_variables.example_count):
example_count_assign = state_ops.assign_add(
auxiliary_variables.example_count,
array_ops.size(times, out_type=dtypes.int64))
# Note: These mean/variance updates assume that all points are equally
# likely, which is not true if _chunks_ are sampled uniformly from the space
# of all possible contiguous chunks, since points at the start and end of
# the series are then members of fewer chunks. For series which are much
# longer than the chunk size (the usual/expected case), this effect becomes
# irrelevant.
with ops.colocate_with(auxiliary_variables.overall_feature_sum):
overall_feature_sum_assign = state_ops.assign_add(
auxiliary_variables.overall_feature_sum,
math_ops.reduce_sum(values, axis=[0, 1]))
with ops.colocate_with(auxiliary_variables.overall_feature_sum_of_squares):
overall_feature_sum_of_squares_assign = state_ops.assign_add(
auxiliary_variables.overall_feature_sum_of_squares,
math_ops.reduce_sum(values**2, axis=[0, 1]))
per_chunk_aux_updates = control_flow_ops.group(
max_time_seen_assign, chunk_count_assign,
inter_observation_duration_assign, example_count_assign,
overall_feature_sum_assign, overall_feature_sum_of_squares_assign)
with ops.control_dependencies([per_chunk_aux_updates]):
example_count_float = math_ops.cast(auxiliary_variables.example_count,
self._dtype)
new_feature_mean = (auxiliary_variables.overall_feature_sum /
example_count_float)
overall_feature_mean_update = state_ops.assign(
statistics.overall_feature_moments.mean, new_feature_mean)
overall_feature_var_update = state_ops.assign(
statistics.overall_feature_moments.variance,
# De-biased n / (n - 1) variance correction
example_count_float / (example_count_float - 1.) *
(auxiliary_variables.overall_feature_sum_of_squares /
example_count_float - new_feature_mean**2))
# TODO(b/35675805): Remove this cast
min_time_batch = math_ops.cast(math_ops.argmin(times[:, 0]), dtypes.int32)
def series_start_updates():
# If this is the lowest-time chunk that we have seen so far, update
# series start moments to reflect that. Note that these statistics are
# "best effort", as there are race conditions in the update (however,
# they should eventually converge if the start of the series is
# presented enough times).
mean, variance = nn.moments(
values[min_time_batch, :self._starting_variance_window_size],
axes=[0])
return control_flow_ops.group(
state_ops.assign(statistics.series_start_moments.mean, mean),
state_ops.assign(statistics.series_start_moments.variance,
variance))
with ops.colocate_with(statistics.start_time):
series_start_update = control_flow_ops.cond(
# Update moments whenever we even match the lowest time seen so far,
# to ensure that series start statistics are eventually updated to
# their correct values, despite race conditions (i.e. eventually
# statistics.start_time will reflect the global lowest time, and
# given that we will eventually update the series start moments to
# their correct values).
math_ops.less_equal(times[min_time_batch, 0],
statistics.start_time),
series_start_updates,
control_flow_ops.no_op)
with ops.control_dependencies([series_start_update]):
# There is a race condition if this update is performed in parallel on
# multiple workers. Since models may be sensitive to being presented
# with times before the putative start time, the value of this
# variable is post-processed above to guarantee that each worker is
# presented with a start time which is at least as low as the lowest
# time in its current mini-batch.
start_time_update = state_ops.assign(statistics.start_time,
gen_math_ops.minimum(
statistics.start_time,
math_ops.reduce_min(times)))
inter_observation_duration_estimate = (
auxiliary_variables.inter_observation_duration_sum / math_ops.cast(
auxiliary_variables.chunk_count, self._dtype))
# Estimate the total number of observations as:
# (end time - start time + 1) * average intra-chunk time density
total_observation_count_update = state_ops.assign(
statistics.total_observation_count,
math_ops.cast(
gen_math_ops.round(
math_ops.cast(auxiliary_variables.max_time_seen -
statistics.start_time + 1, self._dtype) /
inter_observation_duration_estimate), dtypes.int64))
per_chunk_stat_updates = control_flow_ops.group(
overall_feature_mean_update, overall_feature_var_update,
series_start_update, start_time_update,
total_observation_count_update)
return per_chunk_stat_updates
def _create_variable_statistics_object(self):
"""Creates non-trainable variables representing input statistics."""
series_start_moments = Moments(
mean=variable_scope.get_variable(
name="series_start_mean",
shape=[self._num_features],
dtype=self._dtype,
initializer=init_ops.zeros_initializer(),
trainable=False),
variance=variable_scope.get_variable(
name="series_start_variance",
shape=[self._num_features],
dtype=self._dtype,
initializer=init_ops.ones_initializer(),
trainable=False))
overall_feature_moments = Moments(
mean=variable_scope.get_variable(
name="overall_feature_mean",
shape=[self._num_features],
dtype=self._dtype,
initializer=init_ops.zeros_initializer(),
trainable=False),
variance=variable_scope.get_variable(
name="overall_feature_var",
shape=[self._num_features],
dtype=self._dtype,
initializer=init_ops.ones_initializer(),
trainable=False))
start_time = variable_scope.get_variable(
name="start_time",
dtype=dtypes.int64,
initializer=dtypes.int64.max,
trainable=False)
total_observation_count = variable_scope.get_variable(
name="total_observation_count",
shape=[],
dtype=dtypes.int64,
initializer=init_ops.ones_initializer(),
trainable=False)
return InputStatistics(
series_start_moments=series_start_moments,
overall_feature_moments=overall_feature_moments,
start_time=start_time,
total_observation_count=total_observation_count)
| apache-2.0 |
TAMU-CPT/galaxy-tools | tools/gff3/gff3_filter.py | 1 | 1553 | #!/usr/bin/env python
import sys
import logging
import argparse
from cpt_gffParser import gffParse, gffWrite
from gff3 import feature_lambda, feature_test_qual_value
logging.basicConfig(level=logging.INFO)
log = logging.getLogger(__name__)
def gff_filter(gff3, id_list=None, id="", attribute_field="ID", subfeatures=True):
attribute_field = attribute_field.split("__cn__")
if id_list:
filter_strings = [line.strip() for line in id_list]
else:
filter_strings = [x.strip() for x in id.split("__cn__")]
for rec in gffParse(gff3):
rec.features = feature_lambda(
rec.features,
feature_test_qual_value,
{"qualifier": attribute_field, "attribute_list": filter_strings},
subfeatures=subfeatures,
)
rec.annotations = {}
gffWrite([rec], sys.stdout)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="extract features from a GFF3 file based on ID/qualifiers"
)
parser.add_argument("gff3", type=argparse.FileType("r"), help="GFF3 annotations")
parser.add_argument("--id_list", type=argparse.FileType("r"))
parser.add_argument("--id", type=str)
parser.add_argument(
"--attribute_field",
type=str,
help="Column 9 Field to search against",
default="ID",
)
parser.add_argument(
"--subfeatures",
action="store_true",
help="Retain subfeature tree of matched features",
)
args = parser.parse_args()
gff_filter(**vars(args))
| gpl-3.0 |
boyuegame/kbengine | kbe/src/lib/python/Lib/subprocess.py | 67 | 63971 | # subprocess - Subprocesses with accessible I/O streams
#
# For more information about this module, see PEP 324.
#
# Copyright (c) 2003-2005 by Peter Astrand <[email protected]>
#
# Licensed to PSF under a Contributor Agreement.
# See http://www.python.org/2.4/license for licensing details.
r"""subprocess - Subprocesses with accessible I/O streams
This module allows you to spawn processes, connect to their
input/output/error pipes, and obtain their return codes. This module
intends to replace several older modules and functions:
os.system
os.spawn*
Information about how the subprocess module can be used to replace these
modules and functions can be found below.
Using the subprocess module
===========================
This module defines one class called Popen:
class Popen(args, bufsize=-1, executable=None,
stdin=None, stdout=None, stderr=None,
preexec_fn=None, close_fds=True, shell=False,
cwd=None, env=None, universal_newlines=False,
startupinfo=None, creationflags=0,
restore_signals=True, start_new_session=False, pass_fds=()):
Arguments are:
args should be a string, or a sequence of program arguments. The
program to execute is normally the first item in the args sequence or
string, but can be explicitly set by using the executable argument.
On POSIX, with shell=False (default): In this case, the Popen class
uses os.execvp() to execute the child program. args should normally
be a sequence. A string will be treated as a sequence with the string
as the only item (the program to execute).
On POSIX, with shell=True: If args is a string, it specifies the
command string to execute through the shell. If args is a sequence,
the first item specifies the command string, and any additional items
will be treated as additional shell arguments.
On Windows: the Popen class uses CreateProcess() to execute the child
program, which operates on strings. If args is a sequence, it will be
converted to a string using the list2cmdline method. Please note that
not all MS Windows applications interpret the command line the same
way: The list2cmdline is designed for applications using the same
rules as the MS C runtime.
bufsize will be supplied as the corresponding argument to the io.open()
function when creating the stdin/stdout/stderr pipe file objects:
0 means unbuffered (read & write are one system call and can return short),
1 means line buffered, any other positive value means use a buffer of
approximately that size. A negative bufsize, the default, means the system
default of io.DEFAULT_BUFFER_SIZE will be used.
stdin, stdout and stderr specify the executed programs' standard
input, standard output and standard error file handles, respectively.
Valid values are PIPE, an existing file descriptor (a positive
integer), an existing file object, and None. PIPE indicates that a
new pipe to the child should be created. With None, no redirection
will occur; the child's file handles will be inherited from the
parent. Additionally, stderr can be STDOUT, which indicates that the
stderr data from the applications should be captured into the same
file handle as for stdout.
On POSIX, if preexec_fn is set to a callable object, this object will be
called in the child process just before the child is executed. The use
of preexec_fn is not thread safe, using it in the presence of threads
could lead to a deadlock in the child process before the new executable
is executed.
If close_fds is true, all file descriptors except 0, 1 and 2 will be
closed before the child process is executed. The default for close_fds
varies by platform: Always true on POSIX. True when stdin/stdout/stderr
are None on Windows, false otherwise.
pass_fds is an optional sequence of file descriptors to keep open between the
parent and child. Providing any pass_fds implicitly sets close_fds to true.
if shell is true, the specified command will be executed through the
shell.
If cwd is not None, the current directory will be changed to cwd
before the child is executed.
On POSIX, if restore_signals is True all signals that Python sets to
SIG_IGN are restored to SIG_DFL in the child process before the exec.
Currently this includes the SIGPIPE, SIGXFZ and SIGXFSZ signals. This
parameter does nothing on Windows.
On POSIX, if start_new_session is True, the setsid() system call will be made
in the child process prior to executing the command.
If env is not None, it defines the environment variables for the new
process.
If universal_newlines is false, the file objects stdin, stdout and stderr
are opened as binary files, and no line ending conversion is done.
If universal_newlines is true, the file objects stdout and stderr are
opened as a text files, but lines may be terminated by any of '\n',
the Unix end-of-line convention, '\r', the old Macintosh convention or
'\r\n', the Windows convention. All of these external representations
are seen as '\n' by the Python program. Also, the newlines attribute
of the file objects stdout, stdin and stderr are not updated by the
communicate() method.
The startupinfo and creationflags, if given, will be passed to the
underlying CreateProcess() function. They can specify things such as
appearance of the main window and priority for the new process.
(Windows only)
This module also defines some shortcut functions:
call(*popenargs, **kwargs):
Run command with arguments. Wait for command to complete, then
return the returncode attribute.
The arguments are the same as for the Popen constructor. Example:
>>> retcode = subprocess.call(["ls", "-l"])
check_call(*popenargs, **kwargs):
Run command with arguments. Wait for command to complete. If the
exit code was zero then return, otherwise raise
CalledProcessError. The CalledProcessError object will have the
return code in the returncode attribute.
The arguments are the same as for the Popen constructor. Example:
>>> subprocess.check_call(["ls", "-l"])
0
getstatusoutput(cmd):
Return (status, output) of executing cmd in a shell.
Execute the string 'cmd' in a shell with 'check_output' and
return a 2-tuple (status, output). Universal newlines mode is used,
meaning that the result with be decoded to a string.
A trailing newline is stripped from the output.
The exit status for the command can be interpreted
according to the rules for the function 'wait'. Example:
>>> subprocess.getstatusoutput('ls /bin/ls')
(0, '/bin/ls')
>>> subprocess.getstatusoutput('cat /bin/junk')
(256, 'cat: /bin/junk: No such file or directory')
>>> subprocess.getstatusoutput('/bin/junk')
(256, 'sh: /bin/junk: not found')
getoutput(cmd):
Return output (stdout or stderr) of executing cmd in a shell.
Like getstatusoutput(), except the exit status is ignored and the return
value is a string containing the command's output. Example:
>>> subprocess.getoutput('ls /bin/ls')
'/bin/ls'
check_output(*popenargs, **kwargs):
Run command with arguments and return its output.
If the exit code was non-zero it raises a CalledProcessError. The
CalledProcessError object will have the return code in the returncode
attribute and output in the output attribute.
The arguments are the same as for the Popen constructor. Example:
>>> output = subprocess.check_output(["ls", "-l", "/dev/null"])
There is an additional optional argument, "input", allowing you to
pass a string to the subprocess's stdin. If you use this argument
you may not also use the Popen constructor's "stdin" argument.
Exceptions
----------
Exceptions raised in the child process, before the new program has
started to execute, will be re-raised in the parent. Additionally,
the exception object will have one extra attribute called
'child_traceback', which is a string containing traceback information
from the child's point of view.
The most common exception raised is OSError. This occurs, for
example, when trying to execute a non-existent file. Applications
should prepare for OSErrors.
A ValueError will be raised if Popen is called with invalid arguments.
Exceptions defined within this module inherit from SubprocessError.
check_call() and check_output() will raise CalledProcessError if the
called process returns a non-zero return code. TimeoutExpired
be raised if a timeout was specified and expired.
Security
--------
Unlike some other popen functions, this implementation will never call
/bin/sh implicitly. This means that all characters, including shell
metacharacters, can safely be passed to child processes.
Popen objects
=============
Instances of the Popen class have the following methods:
poll()
Check if child process has terminated. Returns returncode
attribute.
wait()
Wait for child process to terminate. Returns returncode attribute.
communicate(input=None)
Interact with process: Send data to stdin. Read data from stdout
and stderr, until end-of-file is reached. Wait for process to
terminate. The optional input argument should be a string to be
sent to the child process, or None, if no data should be sent to
the child.
communicate() returns a tuple (stdout, stderr).
Note: The data read is buffered in memory, so do not use this
method if the data size is large or unlimited.
The following attributes are also available:
stdin
If the stdin argument is PIPE, this attribute is a file object
that provides input to the child process. Otherwise, it is None.
stdout
If the stdout argument is PIPE, this attribute is a file object
that provides output from the child process. Otherwise, it is
None.
stderr
If the stderr argument is PIPE, this attribute is file object that
provides error output from the child process. Otherwise, it is
None.
pid
The process ID of the child process.
returncode
The child return code. A None value indicates that the process
hasn't terminated yet. A negative value -N indicates that the
child was terminated by signal N (POSIX only).
Replacing older functions with the subprocess module
====================================================
In this section, "a ==> b" means that b can be used as a replacement
for a.
Note: All functions in this section fail (more or less) silently if
the executed program cannot be found; this module raises an OSError
exception.
In the following examples, we assume that the subprocess module is
imported with "from subprocess import *".
Replacing /bin/sh shell backquote
---------------------------------
output=`mycmd myarg`
==>
output = Popen(["mycmd", "myarg"], stdout=PIPE).communicate()[0]
Replacing shell pipe line
-------------------------
output=`dmesg | grep hda`
==>
p1 = Popen(["dmesg"], stdout=PIPE)
p2 = Popen(["grep", "hda"], stdin=p1.stdout, stdout=PIPE)
output = p2.communicate()[0]
Replacing os.system()
---------------------
sts = os.system("mycmd" + " myarg")
==>
p = Popen("mycmd" + " myarg", shell=True)
pid, sts = os.waitpid(p.pid, 0)
Note:
* Calling the program through the shell is usually not required.
* It's easier to look at the returncode attribute than the
exitstatus.
A more real-world example would look like this:
try:
retcode = call("mycmd" + " myarg", shell=True)
if retcode < 0:
print("Child was terminated by signal", -retcode, file=sys.stderr)
else:
print("Child returned", retcode, file=sys.stderr)
except OSError as e:
print("Execution failed:", e, file=sys.stderr)
Replacing os.spawn*
-------------------
P_NOWAIT example:
pid = os.spawnlp(os.P_NOWAIT, "/bin/mycmd", "mycmd", "myarg")
==>
pid = Popen(["/bin/mycmd", "myarg"]).pid
P_WAIT example:
retcode = os.spawnlp(os.P_WAIT, "/bin/mycmd", "mycmd", "myarg")
==>
retcode = call(["/bin/mycmd", "myarg"])
Vector example:
os.spawnvp(os.P_NOWAIT, path, args)
==>
Popen([path] + args[1:])
Environment example:
os.spawnlpe(os.P_NOWAIT, "/bin/mycmd", "mycmd", "myarg", env)
==>
Popen(["/bin/mycmd", "myarg"], env={"PATH": "/usr/bin"})
"""
import sys
mswindows = (sys.platform == "win32")
import io
import os
import time
import signal
import builtins
import warnings
import errno
try:
from time import monotonic as _time
except ImportError:
from time import time as _time
# Exception classes used by this module.
class SubprocessError(Exception): pass
class CalledProcessError(SubprocessError):
"""This exception is raised when a process run by check_call() or
check_output() returns a non-zero exit status.
The exit status will be stored in the returncode attribute;
check_output() will also store the output in the output attribute.
"""
def __init__(self, returncode, cmd, output=None):
self.returncode = returncode
self.cmd = cmd
self.output = output
def __str__(self):
return "Command '%s' returned non-zero exit status %d" % (self.cmd, self.returncode)
class TimeoutExpired(SubprocessError):
"""This exception is raised when the timeout expires while waiting for a
child process.
"""
def __init__(self, cmd, timeout, output=None):
self.cmd = cmd
self.timeout = timeout
self.output = output
def __str__(self):
return ("Command '%s' timed out after %s seconds" %
(self.cmd, self.timeout))
if mswindows:
import threading
import msvcrt
import _winapi
class STARTUPINFO:
dwFlags = 0
hStdInput = None
hStdOutput = None
hStdError = None
wShowWindow = 0
else:
import _posixsubprocess
import select
import selectors
try:
import threading
except ImportError:
import dummy_threading as threading
# When select or poll has indicated that the file is writable,
# we can write up to _PIPE_BUF bytes without risk of blocking.
# POSIX defines PIPE_BUF as >= 512.
_PIPE_BUF = getattr(select, 'PIPE_BUF', 512)
# poll/select have the advantage of not requiring any extra file
# descriptor, contrarily to epoll/kqueue (also, they require a single
# syscall).
if hasattr(selectors, 'PollSelector'):
_PopenSelector = selectors.PollSelector
else:
_PopenSelector = selectors.SelectSelector
__all__ = ["Popen", "PIPE", "STDOUT", "call", "check_call", "getstatusoutput",
"getoutput", "check_output", "CalledProcessError", "DEVNULL"]
if mswindows:
from _winapi import (CREATE_NEW_CONSOLE, CREATE_NEW_PROCESS_GROUP,
STD_INPUT_HANDLE, STD_OUTPUT_HANDLE,
STD_ERROR_HANDLE, SW_HIDE,
STARTF_USESTDHANDLES, STARTF_USESHOWWINDOW)
__all__.extend(["CREATE_NEW_CONSOLE", "CREATE_NEW_PROCESS_GROUP",
"STD_INPUT_HANDLE", "STD_OUTPUT_HANDLE",
"STD_ERROR_HANDLE", "SW_HIDE",
"STARTF_USESTDHANDLES", "STARTF_USESHOWWINDOW"])
class Handle(int):
closed = False
def Close(self, CloseHandle=_winapi.CloseHandle):
if not self.closed:
self.closed = True
CloseHandle(self)
def Detach(self):
if not self.closed:
self.closed = True
return int(self)
raise ValueError("already closed")
def __repr__(self):
return "Handle(%d)" % int(self)
__del__ = Close
__str__ = __repr__
try:
MAXFD = os.sysconf("SC_OPEN_MAX")
except:
MAXFD = 256
# This lists holds Popen instances for which the underlying process had not
# exited at the time its __del__ method got called: those processes are wait()ed
# for synchronously from _cleanup() when a new Popen object is created, to avoid
# zombie processes.
_active = []
def _cleanup():
for inst in _active[:]:
res = inst._internal_poll(_deadstate=sys.maxsize)
if res is not None:
try:
_active.remove(inst)
except ValueError:
# This can happen if two threads create a new Popen instance.
# It's harmless that it was already removed, so ignore.
pass
PIPE = -1
STDOUT = -2
DEVNULL = -3
def _eintr_retry_call(func, *args):
while True:
try:
return func(*args)
except InterruptedError:
continue
# XXX This function is only used by multiprocessing and the test suite,
# but it's here so that it can be imported when Python is compiled without
# threads.
def _args_from_interpreter_flags():
"""Return a list of command-line arguments reproducing the current
settings in sys.flags and sys.warnoptions."""
flag_opt_map = {
'debug': 'd',
# 'inspect': 'i',
# 'interactive': 'i',
'optimize': 'O',
'dont_write_bytecode': 'B',
'no_user_site': 's',
'no_site': 'S',
'ignore_environment': 'E',
'verbose': 'v',
'bytes_warning': 'b',
'quiet': 'q',
'hash_randomization': 'R',
}
args = []
for flag, opt in flag_opt_map.items():
v = getattr(sys.flags, flag)
if v > 0:
if flag == 'hash_randomization':
v = 1 # Handle specification of an exact seed
args.append('-' + opt * v)
for opt in sys.warnoptions:
args.append('-W' + opt)
return args
def call(*popenargs, timeout=None, **kwargs):
"""Run command with arguments. Wait for command to complete or
timeout, then return the returncode attribute.
The arguments are the same as for the Popen constructor. Example:
retcode = call(["ls", "-l"])
"""
with Popen(*popenargs, **kwargs) as p:
try:
return p.wait(timeout=timeout)
except:
p.kill()
p.wait()
raise
def check_call(*popenargs, **kwargs):
"""Run command with arguments. Wait for command to complete. If
the exit code was zero then return, otherwise raise
CalledProcessError. The CalledProcessError object will have the
return code in the returncode attribute.
The arguments are the same as for the call function. Example:
check_call(["ls", "-l"])
"""
retcode = call(*popenargs, **kwargs)
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise CalledProcessError(retcode, cmd)
return 0
def check_output(*popenargs, timeout=None, **kwargs):
r"""Run command with arguments and return its output.
If the exit code was non-zero it raises a CalledProcessError. The
CalledProcessError object will have the return code in the returncode
attribute and output in the output attribute.
The arguments are the same as for the Popen constructor. Example:
>>> check_output(["ls", "-l", "/dev/null"])
b'crw-rw-rw- 1 root root 1, 3 Oct 18 2007 /dev/null\n'
The stdout argument is not allowed as it is used internally.
To capture standard error in the result, use stderr=STDOUT.
>>> check_output(["/bin/sh", "-c",
... "ls -l non_existent_file ; exit 0"],
... stderr=STDOUT)
b'ls: non_existent_file: No such file or directory\n'
There is an additional optional argument, "input", allowing you to
pass a string to the subprocess's stdin. If you use this argument
you may not also use the Popen constructor's "stdin" argument, as
it too will be used internally. Example:
>>> check_output(["sed", "-e", "s/foo/bar/"],
... input=b"when in the course of fooman events\n")
b'when in the course of barman events\n'
If universal_newlines=True is passed, the return value will be a
string rather than bytes.
"""
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
if 'input' in kwargs:
if 'stdin' in kwargs:
raise ValueError('stdin and input arguments may not both be used.')
inputdata = kwargs['input']
del kwargs['input']
kwargs['stdin'] = PIPE
else:
inputdata = None
with Popen(*popenargs, stdout=PIPE, **kwargs) as process:
try:
output, unused_err = process.communicate(inputdata, timeout=timeout)
except TimeoutExpired:
process.kill()
output, unused_err = process.communicate()
raise TimeoutExpired(process.args, timeout, output=output)
except:
process.kill()
process.wait()
raise
retcode = process.poll()
if retcode:
raise CalledProcessError(retcode, process.args, output=output)
return output
def list2cmdline(seq):
"""
Translate a sequence of arguments into a command line
string, using the same rules as the MS C runtime:
1) Arguments are delimited by white space, which is either a
space or a tab.
2) A string surrounded by double quotation marks is
interpreted as a single argument, regardless of white space
contained within. A quoted string can be embedded in an
argument.
3) A double quotation mark preceded by a backslash is
interpreted as a literal double quotation mark.
4) Backslashes are interpreted literally, unless they
immediately precede a double quotation mark.
5) If backslashes immediately precede a double quotation mark,
every pair of backslashes is interpreted as a literal
backslash. If the number of backslashes is odd, the last
backslash escapes the next double quotation mark as
described in rule 3.
"""
# See
# http://msdn.microsoft.com/en-us/library/17w5ykft.aspx
# or search http://msdn.microsoft.com for
# "Parsing C++ Command-Line Arguments"
result = []
needquote = False
for arg in seq:
bs_buf = []
# Add a space to separate this argument from the others
if result:
result.append(' ')
needquote = (" " in arg) or ("\t" in arg) or not arg
if needquote:
result.append('"')
for c in arg:
if c == '\\':
# Don't know if we need to double yet.
bs_buf.append(c)
elif c == '"':
# Double backslashes.
result.append('\\' * len(bs_buf)*2)
bs_buf = []
result.append('\\"')
else:
# Normal char
if bs_buf:
result.extend(bs_buf)
bs_buf = []
result.append(c)
# Add remaining backslashes, if any.
if bs_buf:
result.extend(bs_buf)
if needquote:
result.extend(bs_buf)
result.append('"')
return ''.join(result)
# Various tools for executing commands and looking at their output and status.
#
def getstatusoutput(cmd):
""" Return (status, output) of executing cmd in a shell.
Execute the string 'cmd' in a shell with 'check_output' and
return a 2-tuple (status, output). Universal newlines mode is used,
meaning that the result with be decoded to a string.
A trailing newline is stripped from the output.
The exit status for the command can be interpreted
according to the rules for the function 'wait'. Example:
>>> import subprocess
>>> subprocess.getstatusoutput('ls /bin/ls')
(0, '/bin/ls')
>>> subprocess.getstatusoutput('cat /bin/junk')
(256, 'cat: /bin/junk: No such file or directory')
>>> subprocess.getstatusoutput('/bin/junk')
(256, 'sh: /bin/junk: not found')
"""
try:
data = check_output(cmd, shell=True, universal_newlines=True, stderr=STDOUT)
status = 0
except CalledProcessError as ex:
data = ex.output
status = ex.returncode
if data[-1:] == '\n':
data = data[:-1]
return status, data
def getoutput(cmd):
"""Return output (stdout or stderr) of executing cmd in a shell.
Like getstatusoutput(), except the exit status is ignored and the return
value is a string containing the command's output. Example:
>>> import subprocess
>>> subprocess.getoutput('ls /bin/ls')
'/bin/ls'
"""
return getstatusoutput(cmd)[1]
_PLATFORM_DEFAULT_CLOSE_FDS = object()
class Popen(object):
_child_created = False # Set here since __del__ checks it
def __init__(self, args, bufsize=-1, executable=None,
stdin=None, stdout=None, stderr=None,
preexec_fn=None, close_fds=_PLATFORM_DEFAULT_CLOSE_FDS,
shell=False, cwd=None, env=None, universal_newlines=False,
startupinfo=None, creationflags=0,
restore_signals=True, start_new_session=False,
pass_fds=()):
"""Create new Popen instance."""
_cleanup()
# Held while anything is calling waitpid before returncode has been
# updated to prevent clobbering returncode if wait() or poll() are
# called from multiple threads at once. After acquiring the lock,
# code must re-check self.returncode to see if another thread just
# finished a waitpid() call.
self._waitpid_lock = threading.Lock()
self._input = None
self._communication_started = False
if bufsize is None:
bufsize = -1 # Restore default
if not isinstance(bufsize, int):
raise TypeError("bufsize must be an integer")
if mswindows:
if preexec_fn is not None:
raise ValueError("preexec_fn is not supported on Windows "
"platforms")
any_stdio_set = (stdin is not None or stdout is not None or
stderr is not None)
if close_fds is _PLATFORM_DEFAULT_CLOSE_FDS:
if any_stdio_set:
close_fds = False
else:
close_fds = True
elif close_fds and any_stdio_set:
raise ValueError(
"close_fds is not supported on Windows platforms"
" if you redirect stdin/stdout/stderr")
else:
# POSIX
if close_fds is _PLATFORM_DEFAULT_CLOSE_FDS:
close_fds = True
if pass_fds and not close_fds:
warnings.warn("pass_fds overriding close_fds.", RuntimeWarning)
close_fds = True
if startupinfo is not None:
raise ValueError("startupinfo is only supported on Windows "
"platforms")
if creationflags != 0:
raise ValueError("creationflags is only supported on Windows "
"platforms")
self.args = args
self.stdin = None
self.stdout = None
self.stderr = None
self.pid = None
self.returncode = None
self.universal_newlines = universal_newlines
# Input and output objects. The general principle is like
# this:
#
# Parent Child
# ------ -----
# p2cwrite ---stdin---> p2cread
# c2pread <--stdout--- c2pwrite
# errread <--stderr--- errwrite
#
# On POSIX, the child objects are file descriptors. On
# Windows, these are Windows file handles. The parent objects
# are file descriptors on both platforms. The parent objects
# are -1 when not using PIPEs. The child objects are -1
# when not redirecting.
(p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite) = self._get_handles(stdin, stdout, stderr)
# We wrap OS handles *before* launching the child, otherwise a
# quickly terminating child could make our fds unwrappable
# (see #8458).
if mswindows:
if p2cwrite != -1:
p2cwrite = msvcrt.open_osfhandle(p2cwrite.Detach(), 0)
if c2pread != -1:
c2pread = msvcrt.open_osfhandle(c2pread.Detach(), 0)
if errread != -1:
errread = msvcrt.open_osfhandle(errread.Detach(), 0)
if p2cwrite != -1:
self.stdin = io.open(p2cwrite, 'wb', bufsize)
if universal_newlines:
self.stdin = io.TextIOWrapper(self.stdin, write_through=True)
if c2pread != -1:
self.stdout = io.open(c2pread, 'rb', bufsize)
if universal_newlines:
self.stdout = io.TextIOWrapper(self.stdout)
if errread != -1:
self.stderr = io.open(errread, 'rb', bufsize)
if universal_newlines:
self.stderr = io.TextIOWrapper(self.stderr)
self._closed_child_pipe_fds = False
try:
self._execute_child(args, executable, preexec_fn, close_fds,
pass_fds, cwd, env,
startupinfo, creationflags, shell,
p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite,
restore_signals, start_new_session)
except:
# Cleanup if the child failed starting.
for f in filter(None, (self.stdin, self.stdout, self.stderr)):
try:
f.close()
except OSError:
pass # Ignore EBADF or other errors.
if not self._closed_child_pipe_fds:
to_close = []
if stdin == PIPE:
to_close.append(p2cread)
if stdout == PIPE:
to_close.append(c2pwrite)
if stderr == PIPE:
to_close.append(errwrite)
if hasattr(self, '_devnull'):
to_close.append(self._devnull)
for fd in to_close:
try:
os.close(fd)
except OSError:
pass
raise
def _translate_newlines(self, data, encoding):
data = data.decode(encoding)
return data.replace("\r\n", "\n").replace("\r", "\n")
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
if self.stdout:
self.stdout.close()
if self.stderr:
self.stderr.close()
if self.stdin:
self.stdin.close()
# Wait for the process to terminate, to avoid zombies.
self.wait()
def __del__(self, _maxsize=sys.maxsize):
if not self._child_created:
# We didn't get to successfully create a child process.
return
# In case the child hasn't been waited on, check if it's done.
self._internal_poll(_deadstate=_maxsize)
if self.returncode is None and _active is not None:
# Child is still running, keep us alive until we can wait on it.
_active.append(self)
def _get_devnull(self):
if not hasattr(self, '_devnull'):
self._devnull = os.open(os.devnull, os.O_RDWR)
return self._devnull
def communicate(self, input=None, timeout=None):
"""Interact with process: Send data to stdin. Read data from
stdout and stderr, until end-of-file is reached. Wait for
process to terminate. The optional input argument should be
bytes to be sent to the child process, or None, if no data
should be sent to the child.
communicate() returns a tuple (stdout, stderr)."""
if self._communication_started and input:
raise ValueError("Cannot send input after starting communication")
# Optimization: If we are not worried about timeouts, we haven't
# started communicating, and we have one or zero pipes, using select()
# or threads is unnecessary.
if (timeout is None and not self._communication_started and
[self.stdin, self.stdout, self.stderr].count(None) >= 2):
stdout = None
stderr = None
if self.stdin:
if input:
try:
self.stdin.write(input)
except OSError as e:
if e.errno != errno.EPIPE and e.errno != errno.EINVAL:
raise
self.stdin.close()
elif self.stdout:
stdout = _eintr_retry_call(self.stdout.read)
self.stdout.close()
elif self.stderr:
stderr = _eintr_retry_call(self.stderr.read)
self.stderr.close()
self.wait()
else:
if timeout is not None:
endtime = _time() + timeout
else:
endtime = None
try:
stdout, stderr = self._communicate(input, endtime, timeout)
finally:
self._communication_started = True
sts = self.wait(timeout=self._remaining_time(endtime))
return (stdout, stderr)
def poll(self):
return self._internal_poll()
def _remaining_time(self, endtime):
"""Convenience for _communicate when computing timeouts."""
if endtime is None:
return None
else:
return endtime - _time()
def _check_timeout(self, endtime, orig_timeout):
"""Convenience for checking if a timeout has expired."""
if endtime is None:
return
if _time() > endtime:
raise TimeoutExpired(self.args, orig_timeout)
if mswindows:
#
# Windows methods
#
def _get_handles(self, stdin, stdout, stderr):
"""Construct and return tuple with IO objects:
p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite
"""
if stdin is None and stdout is None and stderr is None:
return (-1, -1, -1, -1, -1, -1)
p2cread, p2cwrite = -1, -1
c2pread, c2pwrite = -1, -1
errread, errwrite = -1, -1
if stdin is None:
p2cread = _winapi.GetStdHandle(_winapi.STD_INPUT_HANDLE)
if p2cread is None:
p2cread, _ = _winapi.CreatePipe(None, 0)
p2cread = Handle(p2cread)
_winapi.CloseHandle(_)
elif stdin == PIPE:
p2cread, p2cwrite = _winapi.CreatePipe(None, 0)
p2cread, p2cwrite = Handle(p2cread), Handle(p2cwrite)
elif stdin == DEVNULL:
p2cread = msvcrt.get_osfhandle(self._get_devnull())
elif isinstance(stdin, int):
p2cread = msvcrt.get_osfhandle(stdin)
else:
# Assuming file-like object
p2cread = msvcrt.get_osfhandle(stdin.fileno())
p2cread = self._make_inheritable(p2cread)
if stdout is None:
c2pwrite = _winapi.GetStdHandle(_winapi.STD_OUTPUT_HANDLE)
if c2pwrite is None:
_, c2pwrite = _winapi.CreatePipe(None, 0)
c2pwrite = Handle(c2pwrite)
_winapi.CloseHandle(_)
elif stdout == PIPE:
c2pread, c2pwrite = _winapi.CreatePipe(None, 0)
c2pread, c2pwrite = Handle(c2pread), Handle(c2pwrite)
elif stdout == DEVNULL:
c2pwrite = msvcrt.get_osfhandle(self._get_devnull())
elif isinstance(stdout, int):
c2pwrite = msvcrt.get_osfhandle(stdout)
else:
# Assuming file-like object
c2pwrite = msvcrt.get_osfhandle(stdout.fileno())
c2pwrite = self._make_inheritable(c2pwrite)
if stderr is None:
errwrite = _winapi.GetStdHandle(_winapi.STD_ERROR_HANDLE)
if errwrite is None:
_, errwrite = _winapi.CreatePipe(None, 0)
errwrite = Handle(errwrite)
_winapi.CloseHandle(_)
elif stderr == PIPE:
errread, errwrite = _winapi.CreatePipe(None, 0)
errread, errwrite = Handle(errread), Handle(errwrite)
elif stderr == STDOUT:
errwrite = c2pwrite
elif stderr == DEVNULL:
errwrite = msvcrt.get_osfhandle(self._get_devnull())
elif isinstance(stderr, int):
errwrite = msvcrt.get_osfhandle(stderr)
else:
# Assuming file-like object
errwrite = msvcrt.get_osfhandle(stderr.fileno())
errwrite = self._make_inheritable(errwrite)
return (p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite)
def _make_inheritable(self, handle):
"""Return a duplicate of handle, which is inheritable"""
h = _winapi.DuplicateHandle(
_winapi.GetCurrentProcess(), handle,
_winapi.GetCurrentProcess(), 0, 1,
_winapi.DUPLICATE_SAME_ACCESS)
return Handle(h)
def _execute_child(self, args, executable, preexec_fn, close_fds,
pass_fds, cwd, env,
startupinfo, creationflags, shell,
p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite,
unused_restore_signals, unused_start_new_session):
"""Execute program (MS Windows version)"""
assert not pass_fds, "pass_fds not supported on Windows."
if not isinstance(args, str):
args = list2cmdline(args)
# Process startup details
if startupinfo is None:
startupinfo = STARTUPINFO()
if -1 not in (p2cread, c2pwrite, errwrite):
startupinfo.dwFlags |= _winapi.STARTF_USESTDHANDLES
startupinfo.hStdInput = p2cread
startupinfo.hStdOutput = c2pwrite
startupinfo.hStdError = errwrite
if shell:
startupinfo.dwFlags |= _winapi.STARTF_USESHOWWINDOW
startupinfo.wShowWindow = _winapi.SW_HIDE
comspec = os.environ.get("COMSPEC", "cmd.exe")
args = '{} /c "{}"'.format (comspec, args)
# Start the process
try:
hp, ht, pid, tid = _winapi.CreateProcess(executable, args,
# no special security
None, None,
int(not close_fds),
creationflags,
env,
cwd,
startupinfo)
finally:
# Child is launched. Close the parent's copy of those pipe
# handles that only the child should have open. You need
# to make sure that no handles to the write end of the
# output pipe are maintained in this process or else the
# pipe will not close when the child process exits and the
# ReadFile will hang.
if p2cread != -1:
p2cread.Close()
if c2pwrite != -1:
c2pwrite.Close()
if errwrite != -1:
errwrite.Close()
if hasattr(self, '_devnull'):
os.close(self._devnull)
# Retain the process handle, but close the thread handle
self._child_created = True
self._handle = Handle(hp)
self.pid = pid
_winapi.CloseHandle(ht)
def _internal_poll(self, _deadstate=None,
_WaitForSingleObject=_winapi.WaitForSingleObject,
_WAIT_OBJECT_0=_winapi.WAIT_OBJECT_0,
_GetExitCodeProcess=_winapi.GetExitCodeProcess):
"""Check if child process has terminated. Returns returncode
attribute.
This method is called by __del__, so it can only refer to objects
in its local scope.
"""
if self.returncode is None:
if _WaitForSingleObject(self._handle, 0) == _WAIT_OBJECT_0:
self.returncode = _GetExitCodeProcess(self._handle)
return self.returncode
def wait(self, timeout=None, endtime=None):
"""Wait for child process to terminate. Returns returncode
attribute."""
if endtime is not None:
timeout = self._remaining_time(endtime)
if timeout is None:
timeout_millis = _winapi.INFINITE
else:
timeout_millis = int(timeout * 1000)
if self.returncode is None:
result = _winapi.WaitForSingleObject(self._handle,
timeout_millis)
if result == _winapi.WAIT_TIMEOUT:
raise TimeoutExpired(self.args, timeout)
self.returncode = _winapi.GetExitCodeProcess(self._handle)
return self.returncode
def _readerthread(self, fh, buffer):
buffer.append(fh.read())
fh.close()
def _communicate(self, input, endtime, orig_timeout):
# Start reader threads feeding into a list hanging off of this
# object, unless they've already been started.
if self.stdout and not hasattr(self, "_stdout_buff"):
self._stdout_buff = []
self.stdout_thread = \
threading.Thread(target=self._readerthread,
args=(self.stdout, self._stdout_buff))
self.stdout_thread.daemon = True
self.stdout_thread.start()
if self.stderr and not hasattr(self, "_stderr_buff"):
self._stderr_buff = []
self.stderr_thread = \
threading.Thread(target=self._readerthread,
args=(self.stderr, self._stderr_buff))
self.stderr_thread.daemon = True
self.stderr_thread.start()
if self.stdin:
if input is not None:
try:
self.stdin.write(input)
except OSError as e:
if e.errno == errno.EPIPE:
# communicate() should ignore pipe full error
pass
elif (e.errno == errno.EINVAL
and self.poll() is not None):
# Issue #19612: stdin.write() fails with EINVAL
# if the process already exited before the write
pass
else:
raise
self.stdin.close()
# Wait for the reader threads, or time out. If we time out, the
# threads remain reading and the fds left open in case the user
# calls communicate again.
if self.stdout is not None:
self.stdout_thread.join(self._remaining_time(endtime))
if self.stdout_thread.is_alive():
raise TimeoutExpired(self.args, orig_timeout)
if self.stderr is not None:
self.stderr_thread.join(self._remaining_time(endtime))
if self.stderr_thread.is_alive():
raise TimeoutExpired(self.args, orig_timeout)
# Collect the output from and close both pipes, now that we know
# both have been read successfully.
stdout = None
stderr = None
if self.stdout:
stdout = self._stdout_buff
self.stdout.close()
if self.stderr:
stderr = self._stderr_buff
self.stderr.close()
# All data exchanged. Translate lists into strings.
if stdout is not None:
stdout = stdout[0]
if stderr is not None:
stderr = stderr[0]
return (stdout, stderr)
def send_signal(self, sig):
"""Send a signal to the process
"""
if sig == signal.SIGTERM:
self.terminate()
elif sig == signal.CTRL_C_EVENT:
os.kill(self.pid, signal.CTRL_C_EVENT)
elif sig == signal.CTRL_BREAK_EVENT:
os.kill(self.pid, signal.CTRL_BREAK_EVENT)
else:
raise ValueError("Unsupported signal: {}".format(sig))
def terminate(self):
"""Terminates the process
"""
try:
_winapi.TerminateProcess(self._handle, 1)
except PermissionError:
# ERROR_ACCESS_DENIED (winerror 5) is received when the
# process already died.
rc = _winapi.GetExitCodeProcess(self._handle)
if rc == _winapi.STILL_ACTIVE:
raise
self.returncode = rc
kill = terminate
else:
#
# POSIX methods
#
def _get_handles(self, stdin, stdout, stderr):
"""Construct and return tuple with IO objects:
p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite
"""
p2cread, p2cwrite = -1, -1
c2pread, c2pwrite = -1, -1
errread, errwrite = -1, -1
if stdin is None:
pass
elif stdin == PIPE:
p2cread, p2cwrite = os.pipe()
elif stdin == DEVNULL:
p2cread = self._get_devnull()
elif isinstance(stdin, int):
p2cread = stdin
else:
# Assuming file-like object
p2cread = stdin.fileno()
if stdout is None:
pass
elif stdout == PIPE:
c2pread, c2pwrite = os.pipe()
elif stdout == DEVNULL:
c2pwrite = self._get_devnull()
elif isinstance(stdout, int):
c2pwrite = stdout
else:
# Assuming file-like object
c2pwrite = stdout.fileno()
if stderr is None:
pass
elif stderr == PIPE:
errread, errwrite = os.pipe()
elif stderr == STDOUT:
errwrite = c2pwrite
elif stderr == DEVNULL:
errwrite = self._get_devnull()
elif isinstance(stderr, int):
errwrite = stderr
else:
# Assuming file-like object
errwrite = stderr.fileno()
return (p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite)
def _close_fds(self, fds_to_keep):
start_fd = 3
for fd in sorted(fds_to_keep):
if fd >= start_fd:
os.closerange(start_fd, fd)
start_fd = fd + 1
if start_fd <= MAXFD:
os.closerange(start_fd, MAXFD)
def _execute_child(self, args, executable, preexec_fn, close_fds,
pass_fds, cwd, env,
startupinfo, creationflags, shell,
p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite,
restore_signals, start_new_session):
"""Execute program (POSIX version)"""
if isinstance(args, (str, bytes)):
args = [args]
else:
args = list(args)
if shell:
args = ["/bin/sh", "-c"] + args
if executable:
args[0] = executable
if executable is None:
executable = args[0]
orig_executable = executable
# For transferring possible exec failure from child to parent.
# Data format: "exception name:hex errno:description"
# Pickle is not used; it is complex and involves memory allocation.
errpipe_read, errpipe_write = os.pipe()
# errpipe_write must not be in the standard io 0, 1, or 2 fd range.
low_fds_to_close = []
while errpipe_write < 3:
low_fds_to_close.append(errpipe_write)
errpipe_write = os.dup(errpipe_write)
for low_fd in low_fds_to_close:
os.close(low_fd)
try:
try:
# We must avoid complex work that could involve
# malloc or free in the child process to avoid
# potential deadlocks, thus we do all this here.
# and pass it to fork_exec()
if env is not None:
env_list = [os.fsencode(k) + b'=' + os.fsencode(v)
for k, v in env.items()]
else:
env_list = None # Use execv instead of execve.
executable = os.fsencode(executable)
if os.path.dirname(executable):
executable_list = (executable,)
else:
# This matches the behavior of os._execvpe().
executable_list = tuple(
os.path.join(os.fsencode(dir), executable)
for dir in os.get_exec_path(env))
fds_to_keep = set(pass_fds)
fds_to_keep.add(errpipe_write)
self.pid = _posixsubprocess.fork_exec(
args, executable_list,
close_fds, sorted(fds_to_keep), cwd, env_list,
p2cread, p2cwrite, c2pread, c2pwrite,
errread, errwrite,
errpipe_read, errpipe_write,
restore_signals, start_new_session, preexec_fn)
self._child_created = True
finally:
# be sure the FD is closed no matter what
os.close(errpipe_write)
# self._devnull is not always defined.
devnull_fd = getattr(self, '_devnull', None)
if p2cread != -1 and p2cwrite != -1 and p2cread != devnull_fd:
os.close(p2cread)
if c2pwrite != -1 and c2pread != -1 and c2pwrite != devnull_fd:
os.close(c2pwrite)
if errwrite != -1 and errread != -1 and errwrite != devnull_fd:
os.close(errwrite)
if devnull_fd is not None:
os.close(devnull_fd)
# Prevent a double close of these fds from __init__ on error.
self._closed_child_pipe_fds = True
# Wait for exec to fail or succeed; possibly raising an
# exception (limited in size)
errpipe_data = bytearray()
while True:
part = _eintr_retry_call(os.read, errpipe_read, 50000)
errpipe_data += part
if not part or len(errpipe_data) > 50000:
break
finally:
# be sure the FD is closed no matter what
os.close(errpipe_read)
if errpipe_data:
try:
_eintr_retry_call(os.waitpid, self.pid, 0)
except OSError as e:
if e.errno != errno.ECHILD:
raise
try:
exception_name, hex_errno, err_msg = (
errpipe_data.split(b':', 2))
except ValueError:
exception_name = b'SubprocessError'
hex_errno = b'0'
err_msg = (b'Bad exception data from child: ' +
repr(errpipe_data))
child_exception_type = getattr(
builtins, exception_name.decode('ascii'),
SubprocessError)
err_msg = err_msg.decode(errors="surrogatepass")
if issubclass(child_exception_type, OSError) and hex_errno:
errno_num = int(hex_errno, 16)
child_exec_never_called = (err_msg == "noexec")
if child_exec_never_called:
err_msg = ""
if errno_num != 0:
err_msg = os.strerror(errno_num)
if errno_num == errno.ENOENT:
if child_exec_never_called:
# The error must be from chdir(cwd).
err_msg += ': ' + repr(cwd)
else:
err_msg += ': ' + repr(orig_executable)
raise child_exception_type(errno_num, err_msg)
raise child_exception_type(err_msg)
def _handle_exitstatus(self, sts, _WIFSIGNALED=os.WIFSIGNALED,
_WTERMSIG=os.WTERMSIG, _WIFEXITED=os.WIFEXITED,
_WEXITSTATUS=os.WEXITSTATUS):
"""All callers to this function MUST hold self._waitpid_lock."""
# This method is called (indirectly) by __del__, so it cannot
# refer to anything outside of its local scope.
if _WIFSIGNALED(sts):
self.returncode = -_WTERMSIG(sts)
elif _WIFEXITED(sts):
self.returncode = _WEXITSTATUS(sts)
else:
# Should never happen
raise SubprocessError("Unknown child exit status!")
def _internal_poll(self, _deadstate=None, _waitpid=os.waitpid,
_WNOHANG=os.WNOHANG, _ECHILD=errno.ECHILD):
"""Check if child process has terminated. Returns returncode
attribute.
This method is called by __del__, so it cannot reference anything
outside of the local scope (nor can any methods it calls).
"""
if self.returncode is None:
if not self._waitpid_lock.acquire(False):
# Something else is busy calling waitpid. Don't allow two
# at once. We know nothing yet.
return None
try:
if self.returncode is not None:
return self.returncode # Another thread waited.
pid, sts = _waitpid(self.pid, _WNOHANG)
if pid == self.pid:
self._handle_exitstatus(sts)
except OSError as e:
if _deadstate is not None:
self.returncode = _deadstate
elif e.errno == _ECHILD:
# This happens if SIGCLD is set to be ignored or
# waiting for child processes has otherwise been
# disabled for our process. This child is dead, we
# can't get the status.
# http://bugs.python.org/issue15756
self.returncode = 0
finally:
self._waitpid_lock.release()
return self.returncode
def _try_wait(self, wait_flags):
"""All callers to this function MUST hold self._waitpid_lock."""
try:
(pid, sts) = _eintr_retry_call(os.waitpid, self.pid, wait_flags)
except OSError as e:
if e.errno != errno.ECHILD:
raise
# This happens if SIGCLD is set to be ignored or waiting
# for child processes has otherwise been disabled for our
# process. This child is dead, we can't get the status.
pid = self.pid
sts = 0
return (pid, sts)
def wait(self, timeout=None, endtime=None):
"""Wait for child process to terminate. Returns returncode
attribute."""
if self.returncode is not None:
return self.returncode
# endtime is preferred to timeout. timeout is only used for
# printing.
if endtime is not None or timeout is not None:
if endtime is None:
endtime = _time() + timeout
elif timeout is None:
timeout = self._remaining_time(endtime)
if endtime is not None:
# Enter a busy loop if we have a timeout. This busy loop was
# cribbed from Lib/threading.py in Thread.wait() at r71065.
delay = 0.0005 # 500 us -> initial delay of 1 ms
while True:
if self._waitpid_lock.acquire(False):
try:
if self.returncode is not None:
break # Another thread waited.
(pid, sts) = self._try_wait(os.WNOHANG)
assert pid == self.pid or pid == 0
if pid == self.pid:
self._handle_exitstatus(sts)
break
finally:
self._waitpid_lock.release()
remaining = self._remaining_time(endtime)
if remaining <= 0:
raise TimeoutExpired(self.args, timeout)
delay = min(delay * 2, remaining, .05)
time.sleep(delay)
else:
while self.returncode is None:
with self._waitpid_lock:
if self.returncode is not None:
break # Another thread waited.
(pid, sts) = self._try_wait(0)
# Check the pid and loop as waitpid has been known to
# return 0 even without WNOHANG in odd situations.
# http://bugs.python.org/issue14396.
if pid == self.pid:
self._handle_exitstatus(sts)
return self.returncode
def _communicate(self, input, endtime, orig_timeout):
if self.stdin and not self._communication_started:
# Flush stdio buffer. This might block, if the user has
# been writing to .stdin in an uncontrolled fashion.
self.stdin.flush()
if not input:
self.stdin.close()
stdout = None
stderr = None
# Only create this mapping if we haven't already.
if not self._communication_started:
self._fileobj2output = {}
if self.stdout:
self._fileobj2output[self.stdout] = []
if self.stderr:
self._fileobj2output[self.stderr] = []
if self.stdout:
stdout = self._fileobj2output[self.stdout]
if self.stderr:
stderr = self._fileobj2output[self.stderr]
self._save_input(input)
if self._input:
input_view = memoryview(self._input)
with _PopenSelector() as selector:
if self.stdin and input:
selector.register(self.stdin, selectors.EVENT_WRITE)
if self.stdout:
selector.register(self.stdout, selectors.EVENT_READ)
if self.stderr:
selector.register(self.stderr, selectors.EVENT_READ)
while selector.get_map():
timeout = self._remaining_time(endtime)
if timeout is not None and timeout < 0:
raise TimeoutExpired(self.args, orig_timeout)
ready = selector.select(timeout)
self._check_timeout(endtime, orig_timeout)
# XXX Rewrite these to use non-blocking I/O on the file
# objects; they are no longer using C stdio!
for key, events in ready:
if key.fileobj is self.stdin:
chunk = input_view[self._input_offset :
self._input_offset + _PIPE_BUF]
try:
self._input_offset += os.write(key.fd, chunk)
except OSError as e:
if e.errno == errno.EPIPE:
selector.unregister(key.fileobj)
key.fileobj.close()
else:
raise
else:
if self._input_offset >= len(self._input):
selector.unregister(key.fileobj)
key.fileobj.close()
elif key.fileobj in (self.stdout, self.stderr):
data = os.read(key.fd, 32768)
if not data:
selector.unregister(key.fileobj)
key.fileobj.close()
self._fileobj2output[key.fileobj].append(data)
self.wait(timeout=self._remaining_time(endtime))
# All data exchanged. Translate lists into strings.
if stdout is not None:
stdout = b''.join(stdout)
if stderr is not None:
stderr = b''.join(stderr)
# Translate newlines, if requested.
# This also turns bytes into strings.
if self.universal_newlines:
if stdout is not None:
stdout = self._translate_newlines(stdout,
self.stdout.encoding)
if stderr is not None:
stderr = self._translate_newlines(stderr,
self.stderr.encoding)
return (stdout, stderr)
def _save_input(self, input):
# This method is called from the _communicate_with_*() methods
# so that if we time out while communicating, we can continue
# sending input if we retry.
if self.stdin and self._input is None:
self._input_offset = 0
self._input = input
if self.universal_newlines and input is not None:
self._input = self._input.encode(self.stdin.encoding)
def send_signal(self, sig):
"""Send a signal to the process
"""
os.kill(self.pid, sig)
def terminate(self):
"""Terminate the process with SIGTERM
"""
self.send_signal(signal.SIGTERM)
def kill(self):
"""Kill the process with SIGKILL
"""
self.send_signal(signal.SIGKILL)
| lgpl-3.0 |
dekom/threepress-bookworm-read-only | bookworm/gdata/src/gdata/tlslite/__init__.py | 409 | 1129 | """
TLS Lite is a free python library that implements SSL v3, TLS v1, and
TLS v1.1. TLS Lite supports non-traditional authentication methods
such as SRP, shared keys, and cryptoIDs, in addition to X.509
certificates. TLS Lite is pure python, however it can access OpenSSL,
cryptlib, pycrypto, and GMPY for faster crypto operations. TLS Lite
integrates with httplib, xmlrpclib, poplib, imaplib, smtplib,
SocketServer, asyncore, and Twisted.
To use, do::
from tlslite.api import *
Then use the L{tlslite.TLSConnection.TLSConnection} class with a socket,
or use one of the integration classes in L{tlslite.integration}.
@version: 0.3.8
"""
__version__ = "0.3.8"
__all__ = ["api",
"BaseDB",
"Checker",
"constants",
"errors",
"FileObject",
"HandshakeSettings",
"mathtls",
"messages",
"Session",
"SessionCache",
"SharedKeyDB",
"TLSConnection",
"TLSRecordLayer",
"VerifierDB",
"X509",
"X509CertChain",
"integration",
"utils"]
| bsd-3-clause |
greggian/TapdIn | django/contrib/localflavor/us/models.py | 1 | 1132 | from django.conf import settings
from django.db.models.fields import Field
class USStateField(Field):
def get_internal_type(self):
return "USStateField"
def db_type(self):
if settings.DATABASE_ENGINE == 'oracle':
return 'CHAR(2)'
else:
return 'varchar(2)'
def formfield(self, **kwargs):
from django.contrib.localflavor.us.forms import USStateSelect
defaults = {'widget': USStateSelect}
defaults.update(kwargs)
return super(USStateField, self).formfield(**defaults)
class PhoneNumberField(Field):
def get_internal_type(self):
return "PhoneNumberField"
def db_type(self):
if settings.DATABASE_ENGINE == 'oracle':
return 'VARCHAR2(20)'
else:
return 'varchar(20)'
def formfield(self, **kwargs):
from django.contrib.localflavor.us.forms import USPhoneNumberField
defaults = {'form_class': USPhoneNumberField}
defaults.update(kwargs)
return super(PhoneNumberField, self).formfield(**defaults)
| apache-2.0 |
seraphlnWu/in_trip | in_trip/scripts/change_data_from_hbase_to_pg.py | 1 | 1620 | #coding=utf-8
import time
import cPickle
from in_trip.store_data.views import pg_db,conn
import logging
logger = logging.getLogger('parser')
def creat_table():
sql_str = '''
create table "tmp_hbase_to_pg"(
data text,
timestamp float(24)
)
'''
pg_db.execute(sql_str)
conn.commit()
def insert_data(o_dict, default_value):
data =cPickle.dumps({
'o_dict' : o_dict,
'default_value' : default_value
})
sql_str = '''
insert into tmp_hbase_to_pg
(data,timestamp)
values
(%s,%s);
'''
try:
pg_db.execute(sql_str,(data,time.time()))
conn.commit()
except Exception as e:
conn.rollback()
logger.error('insert to pg error: %s', e)
def get_data_all():
sql_str = '''
select * from tmp_hbase_to_pg;
'''
pg_db.execute(sql_str)
print pg_db.fetchall()
def get_data(offset,limit=1000):
sql_str = '''
select * from tmp_hbase_to_pg limit(%s) offset(%s);
'''
pg_db.execute(sql_str,(limit,offset))
return pg_db.fetchall()
def insert_into_hbase():
from in_trip.store_data.hbase.run import insert_data as hbase_insert
offset = 0
limit = 1000
while True:
res_list = get_data(offset,limit)
if not res_list:
break
offset = offset + limit
for item in res_list:
tmp_data = cPickle.loads(item[0])
hbase_insert(tmp_data['o_dict'],tmp_data['default_value'])
return True
if __name__ == "__main__":
creat_table()
print "success!"
| mit |
QianBIG/odoo | addons/l10n_ro/res_partner.py | 309 | 2255 | # -*- encoding: utf-8 -*-
##############################################################################
#
# @author - Fekete Mihai <[email protected]>
# Copyright (C) 2011 TOTAL PC SYSTEMS (http://www.www.erpsystems.ro).
# Copyright (C) 2009 (<http://www.filsystem.ro>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class res_partner(osv.osv):
_name = "res.partner"
_inherit = "res.partner"
_columns = {
'nrc' : fields.char('NRC', help='Registration number at the Registry of Commerce'),
}
def _auto_init(self, cr, context=None):
result = super(res_partner, self)._auto_init(cr, context=context)
# Remove constrains for vat, nrc on "commercial entities" because is not mandatory by legislation
# Even that VAT numbers are unique, the NRC field is not unique, and there are certain entities that
# doesn't have a NRC number plus the formatting was changed few times, so we cannot have a base rule for
# checking if available and emmited by the Ministry of Finance, only online on their website.
cr.execute("""
DROP INDEX IF EXISTS res_partner_vat_uniq_for_companies;
DROP INDEX IF EXISTS res_partner_nrc_uniq_for_companies;
""")
return result
def _commercial_fields(self, cr, uid, context=None):
return super(res_partner, self)._commercial_fields(cr, uid, context=context) + ['nrc']
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
ngpestelos/ansible | test/units/parsing/vault/test_vault_editor.py | 142 | 6762 | # (c) 2014, James Tanner <[email protected]>
# (c) 2014, James Cammarata, <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
#!/usr/bin/env python
import sys
import getpass
import os
import shutil
import time
import tempfile
from binascii import unhexlify
from binascii import hexlify
from nose.plugins.skip import SkipTest
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import patch
from ansible.utils.unicode import to_bytes, to_unicode
from ansible import errors
from ansible.parsing.vault import VaultLib
from ansible.parsing.vault import VaultEditor
# Counter import fails for 2.0.1, requires >= 2.6.1 from pip
try:
from Crypto.Util import Counter
HAS_COUNTER = True
except ImportError:
HAS_COUNTER = False
# KDF import fails for 2.0.1, requires >= 2.6.1 from pip
try:
from Crypto.Protocol.KDF import PBKDF2
HAS_PBKDF2 = True
except ImportError:
HAS_PBKDF2 = False
# AES IMPORTS
try:
from Crypto.Cipher import AES as AES
HAS_AES = True
except ImportError:
HAS_AES = False
v10_data = """$ANSIBLE_VAULT;1.0;AES
53616c7465645f5fd0026926a2d415a28a2622116273fbc90e377225c12a347e1daf4456d36a77f9
9ad98d59f61d06a4b66718d855f16fb7bdfe54d1ec8aeaa4d06c2dc1fa630ae1846a029877f0eeb1
83c62ffb04c2512995e815de4b4d29ed"""
v11_data = """$ANSIBLE_VAULT;1.1;AES256
62303130653266653331306264616235333735323636616539316433666463323964623162386137
3961616263373033353631316333623566303532663065310a393036623466376263393961326530
64336561613965383835646464623865663966323464653236343638373165343863623638316664
3631633031323837340a396530313963373030343933616133393566366137363761373930663833
3739"""
class TestVaultEditor(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_methods_exist(self):
v = VaultEditor(None)
slots = ['create_file',
'decrypt_file',
'edit_file',
'encrypt_file',
'rekey_file',
'read_data',
'write_data',
'shuffle_files']
for slot in slots:
assert hasattr(v, slot), "VaultLib is missing the %s method" % slot
@patch.object(VaultEditor, '_editor_shell_command')
def test_create_file(self, mock_editor_shell_command):
def sc_side_effect(filename):
return ['touch', filename]
mock_editor_shell_command.side_effect = sc_side_effect
tmp_file = tempfile.NamedTemporaryFile()
os.unlink(tmp_file.name)
ve = VaultEditor("ansible")
ve.create_file(tmp_file.name)
self.assertTrue(os.path.exists(tmp_file.name))
def test_decrypt_1_0(self):
"""
Skip testing decrypting 1.0 files if we don't have access to AES, KDF or
Counter, or we are running on python3 since VaultAES hasn't been backported.
"""
if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2 or sys.version > '3':
raise SkipTest
v10_file = tempfile.NamedTemporaryFile(delete=False)
with v10_file as f:
f.write(to_bytes(v10_data))
ve = VaultEditor("ansible")
# make sure the password functions for the cipher
error_hit = False
try:
ve.decrypt_file(v10_file.name)
except errors.AnsibleError as e:
error_hit = True
# verify decrypted content
f = open(v10_file.name, "rb")
fdata = to_unicode(f.read())
f.close()
os.unlink(v10_file.name)
assert error_hit == False, "error decrypting 1.0 file"
assert fdata.strip() == "foo", "incorrect decryption of 1.0 file: %s" % fdata.strip()
def test_decrypt_1_1(self):
if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2:
raise SkipTest
v11_file = tempfile.NamedTemporaryFile(delete=False)
with v11_file as f:
f.write(to_bytes(v11_data))
ve = VaultEditor("ansible")
# make sure the password functions for the cipher
error_hit = False
try:
ve.decrypt_file(v11_file.name)
except errors.AnsibleError as e:
error_hit = True
# verify decrypted content
f = open(v11_file.name, "rb")
fdata = to_unicode(f.read())
f.close()
os.unlink(v11_file.name)
assert error_hit == False, "error decrypting 1.0 file"
assert fdata.strip() == "foo", "incorrect decryption of 1.0 file: %s" % fdata.strip()
def test_rekey_migration(self):
"""
Skip testing rekeying files if we don't have access to AES, KDF or
Counter, or we are running on python3 since VaultAES hasn't been backported.
"""
if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2 or sys.version > '3':
raise SkipTest
v10_file = tempfile.NamedTemporaryFile(delete=False)
with v10_file as f:
f.write(to_bytes(v10_data))
ve = VaultEditor("ansible")
# make sure the password functions for the cipher
error_hit = False
try:
ve.rekey_file(v10_file.name, 'ansible2')
except errors.AnsibleError as e:
error_hit = True
# verify decrypted content
f = open(v10_file.name, "rb")
fdata = f.read()
f.close()
assert error_hit == False, "error rekeying 1.0 file to 1.1"
# ensure filedata can be decrypted, is 1.1 and is AES256
vl = VaultLib("ansible2")
dec_data = None
error_hit = False
try:
dec_data = vl.decrypt(fdata)
except errors.AnsibleError as e:
error_hit = True
os.unlink(v10_file.name)
assert vl.cipher_name == "AES256", "wrong cipher name set after rekey: %s" % vl.cipher_name
assert error_hit == False, "error decrypting migrated 1.0 file"
assert dec_data.strip() == "foo", "incorrect decryption of rekeyed/migrated file: %s" % dec_data
| gpl-3.0 |
Eksmo/calibre | src/odf/odf2xhtml.py | 10 | 68488 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2006-2010 Søren Roug, European Environment Agency
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Contributor(s):
#
#import pdb
#pdb.set_trace()
from xml.sax import handler
from xml.sax.saxutils import escape, quoteattr
from xml.dom import Node
from opendocument import load
from namespaces import ANIMNS, CHARTNS, CONFIGNS, DCNS, DR3DNS, DRAWNS, FONS, \
FORMNS, MATHNS, METANS, NUMBERNS, OFFICENS, PRESENTATIONNS, SCRIPTNS, \
SMILNS, STYLENS, SVGNS, TABLENS, TEXTNS, XLINKNS
if False: # Added by Kovid
DR3DNS, MATHNS, CHARTNS, CONFIGNS, ANIMNS, FORMNS, SMILNS, SCRIPTNS
# Handling of styles
#
# First there are font face declarations. These set up a font style that will be
# referenced from a text-property. The declaration describes the font making
# it possible for the application to find a similar font should the system not
# have that particular one. The StyleToCSS stores these attributes to be used
# for the CSS2 font declaration.
#
# Then there are default-styles. These set defaults for various style types:
# "text", "paragraph", "section", "ruby", "table", "table-column", "table-row",
# "table-cell", "graphic", "presentation", "drawing-page", "chart".
# Since CSS2 can't refer to another style, ODF2XHTML add these to all
# styles unless overridden.
#
# The real styles are declared in the <style:style> element. They have a
# family referring to the default-styles, and may have a parent style.
#
# Styles have scope. The same name can be used for both paragraph and
# character etc. styles Since CSS2 has no scope we use a prefix. (Not elegant)
# In ODF a style can have a parent, these parents can be chained.
class StyleToCSS:
""" The purpose of the StyleToCSS class is to contain the rules to convert
ODF styles to CSS2. Since it needs the generic fonts, it would probably
make sense to also contain the Styles in a dict as well..
"""
def __init__(self):
# Font declarations
self.fontdict = {}
# Fill-images from presentations for backgrounds
self.fillimages = {}
self.ruleconversions = {
(DRAWNS,u'fill-image-name'): self.c_drawfillimage,
(FONS,u"background-color"): self.c_fo,
(FONS,u"border"): self.c_fo,
(FONS,u"border-bottom"): self.c_fo,
(FONS,u"border-left"): self.c_fo,
(FONS,u"border-right"): self.c_fo,
(FONS,u"border-top"): self.c_fo,
(FONS,u"break-after"): self.c_break, # Added by Kovid
(FONS,u"break-before"): self.c_break,# Added by Kovid
(FONS,u"color"): self.c_fo,
(FONS,u"font-family"): self.c_fo,
(FONS,u"font-size"): self.c_fo,
(FONS,u"font-style"): self.c_fo,
(FONS,u"font-variant"): self.c_fo,
(FONS,u"font-weight"): self.c_fo,
(FONS,u"line-height"): self.c_fo,
(FONS,u"margin"): self.c_fo,
(FONS,u"margin-bottom"): self.c_fo,
(FONS,u"margin-left"): self.c_fo,
(FONS,u"margin-right"): self.c_fo,
(FONS,u"margin-top"): self.c_fo,
(FONS,u"min-height"): self.c_fo,
(FONS,u"padding"): self.c_fo,
(FONS,u"padding-bottom"): self.c_fo,
(FONS,u"padding-left"): self.c_fo,
(FONS,u"padding-right"): self.c_fo,
(FONS,u"padding-top"): self.c_fo,
(FONS,u"page-width"): self.c_page_width,
(FONS,u"page-height"): self.c_page_height,
(FONS,u"text-align"): self.c_text_align,
(FONS,u"text-indent") :self.c_fo,
(TABLENS,u'border-model') :self.c_border_model,
(STYLENS,u'column-width') : self.c_width,
(STYLENS,u"font-name"): self.c_fn,
(STYLENS,u'horizontal-pos'): self.c_hp,
(STYLENS,u'text-position'): self.c_text_position,
(STYLENS,u'text-line-through-style'): self.c_text_line_through_style,
(STYLENS,u'text-underline-style'): self.c_text_underline_style,
(STYLENS,u'width') : self.c_width,
# FIXME Should do style:vertical-pos here
}
def save_font(self, name, family, generic):
""" It is possible that the HTML browser doesn't know how to
show a particular font. Fortunately ODF provides generic fallbacks.
Unfortunately they are not the same as CSS2.
CSS2: serif, sans-serif, cursive, fantasy, monospace
ODF: roman, swiss, modern, decorative, script, system
This method put the font and fallback into a dictionary
"""
htmlgeneric = "sans-serif"
if generic == "roman": htmlgeneric = "serif"
elif generic == "swiss": htmlgeneric = "sans-serif"
elif generic == "modern": htmlgeneric = "monospace"
elif generic == "decorative": htmlgeneric = "sans-serif"
elif generic == "script": htmlgeneric = "monospace"
elif generic == "system": htmlgeneric = "serif"
self.fontdict[name] = (family, htmlgeneric)
def c_drawfillimage(self, ruleset, sdict, rule, val):
""" Fill a figure with an image. Since CSS doesn't let you resize images
this should really be implemented as an absolutely position <img>
with a width and a height
"""
sdict['background-image'] = "url('%s')" % self.fillimages[val]
def c_fo(self, ruleset, sdict, rule, val):
""" XSL formatting attributes """
selector = rule[1]
sdict[selector] = val
def c_break(self, ruleset, sdict, rule, val): # Added by Kovid
property = 'page-' + rule[1]
values = {'auto': 'auto', 'column': 'always', 'page': 'always',
'even-page': 'left', 'odd-page': 'right',
'inherit': 'inherit'}
sdict[property] = values.get(val, 'auto')
def c_border_model(self, ruleset, sdict, rule, val):
""" Convert to CSS2 border model """
if val == 'collapsing':
sdict['border-collapse'] ='collapse'
else:
sdict['border-collapse'] ='separate'
def c_width(self, ruleset, sdict, rule, val):
""" Set width of box """
sdict['width'] = val
def c_text_align(self, ruleset, sdict, rule, align):
""" Text align """
if align == "start": align = "left"
if align == "end": align = "right"
sdict['text-align'] = align
def c_fn(self, ruleset, sdict, rule, fontstyle):
""" Generate the CSS font family
A generic font can be found in two ways. In a <style:font-face>
element or as a font-family-generic attribute in text-properties.
"""
generic = ruleset.get((STYLENS,'font-family-generic') )
if generic is not None:
self.save_font(fontstyle, fontstyle, generic)
family, htmlgeneric = self.fontdict.get(fontstyle, (fontstyle, 'serif'))
sdict['font-family'] = '%s, %s' % (family, htmlgeneric)
def c_text_position(self, ruleset, sdict, rule, tp):
""" Text position. This is used e.g. to make superscript and subscript
This attribute can have one or two values.
The first value must be present and specifies the vertical
text position as a percentage that relates to the current font
height or it takes one of the values sub or super. Negative
percentages or the sub value place the text below the
baseline. Positive percentages or the super value place
the text above the baseline. If sub or super is specified,
the application can choose an appropriate text position.
The second value is optional and specifies the font height
as a percentage that relates to the current font-height. If
this value is not specified, an appropriate font height is
used. Although this value may change the font height that
is displayed, it never changes the current font height that
is used for additional calculations.
"""
textpos = tp.split(' ')
if len(textpos) == 2 and textpos[0] != "0%":
# Bug in OpenOffice. If vertical-align is 0% - ignore the text size.
sdict['font-size'] = textpos[1]
if textpos[0] == "super":
sdict['vertical-align'] = "33%"
elif textpos[0] == "sub":
sdict['vertical-align'] = "-33%"
else:
sdict['vertical-align'] = textpos[0]
def c_hp(self, ruleset, sdict, rule, hpos):
#FIXME: Frames wrap-style defaults to 'parallel', graphics to 'none'.
# It is properly set in the parent-styles, but the program doesn't
# collect the information.
wrap = ruleset.get((STYLENS,'wrap'),'parallel')
# Can have: from-left, left, center, right, from-inside, inside, outside
if hpos == "center":
sdict['margin-left'] = "auto"
sdict['margin-right'] = "auto"
# else:
# # force it to be *something* then delete it
# sdict['margin-left'] = sdict['margin-right'] = ''
# del sdict['margin-left'], sdict['margin-right']
if hpos in ("right","outside"):
if wrap in ( "left", "parallel","dynamic"):
sdict['float'] = "right"
elif wrap == "run-through":
sdict['position'] = "absolute" # Simulate run-through
sdict['top'] = "0"
sdict['right'] = "0";
else: # No wrapping
sdict['margin-left'] = "auto"
sdict['margin-right'] = "0px"
elif hpos in ("left", "inside"):
if wrap in ( "right", "parallel","dynamic"):
sdict['float'] = "left"
elif wrap == "run-through":
sdict['position'] = "absolute" # Simulate run-through
sdict['top'] = "0"
sdict['left'] = "0"
else: # No wrapping
sdict['margin-left'] = "0px"
sdict['margin-right'] = "auto"
elif hpos in ("from-left", "from-inside"):
if wrap in ( "right", "parallel"):
sdict['float'] = "left"
else:
sdict['position'] = "relative" # No wrapping
if ruleset.has_key( (SVGNS,'x') ):
sdict['left'] = ruleset[(SVGNS,'x')]
def c_page_width(self, ruleset, sdict, rule, val):
""" Set width of box
HTML doesn't really have a page-width. It is always 100% of the browser width
"""
sdict['width'] = val
def c_text_underline_style(self, ruleset, sdict, rule, val):
""" Set underline decoration
HTML doesn't really have a page-width. It is always 100% of the browser width
"""
if val and val != "none":
sdict['text-decoration'] = "underline"
def c_text_line_through_style(self, ruleset, sdict, rule, val):
""" Set underline decoration
HTML doesn't really have a page-width. It is always 100% of the browser width
"""
if val and val != "none":
sdict['text-decoration'] = "line-through"
def c_page_height(self, ruleset, sdict, rule, val):
""" Set height of box """
sdict['height'] = val
def convert_styles(self, ruleset):
""" Rule is a tuple of (namespace, name). If the namespace is '' then
it is already CSS2
"""
sdict = {}
for rule,val in ruleset.items():
if rule[0] == '':
sdict[rule[1]] = val
continue
method = self.ruleconversions.get(rule, None )
if method:
method(ruleset, sdict, rule, val)
return sdict
class TagStack:
def __init__(self):
self.stack = []
def push(self, tag, attrs):
self.stack.append( (tag, attrs) )
def pop(self):
item = self.stack.pop()
return item
def stackparent(self):
item = self.stack[-1]
return item[1]
def rfindattr(self, attr):
""" Find a tag with the given attribute """
for tag, attrs in self.stack:
if attrs.has_key(attr):
return attrs[attr]
return None
def count_tags(self, tag):
c = 0
for ttag, tattrs in self.stack:
if ttag == tag: c = c + 1
return c
special_styles = {
'S-Emphasis':'em',
'S-Citation':'cite',
'S-Strong_20_Emphasis':'strong',
'S-Variable':'var',
'S-Definition':'dfn',
'S-Teletype':'tt',
'P-Heading_20_1':'h1',
'P-Heading_20_2':'h2',
'P-Heading_20_3':'h3',
'P-Heading_20_4':'h4',
'P-Heading_20_5':'h5',
'P-Heading_20_6':'h6',
# 'P-Caption':'caption',
'P-Addressee':'address',
# 'P-List_20_Heading':'dt',
# 'P-List_20_Contents':'dd',
'P-Preformatted_20_Text':'pre',
# 'P-Table_20_Heading':'th',
# 'P-Table_20_Contents':'td',
# 'P-Text_20_body':'p'
}
#-----------------------------------------------------------------------------
#
# ODFCONTENTHANDLER
#
#-----------------------------------------------------------------------------
class ODF2XHTML(handler.ContentHandler):
""" The ODF2XHTML parses an ODF file and produces XHTML"""
def __init__(self, generate_css=True, embedable=False):
# Tags
self.generate_css = generate_css
self.elements = {
(DCNS, 'title'): (self.s_processcont, self.e_dc_title),
(DCNS, 'language'): (self.s_processcont, self.e_dc_contentlanguage),
(DCNS, 'creator'): (self.s_processcont, self.e_dc_creator),
(DCNS, 'description'): (self.s_processcont, self.e_dc_metatag),
(DCNS, 'date'): (self.s_processcont, self.e_dc_metatag),
(DRAWNS, 'custom-shape'): (self.s_custom_shape, self.e_custom_shape),
(DRAWNS, 'frame'): (self.s_draw_frame, self.e_draw_frame),
(DRAWNS, 'image'): (self.s_draw_image, None),
(DRAWNS, 'fill-image'): (self.s_draw_fill_image, None),
(DRAWNS, "layer-set"):(self.s_ignorexml, None),
(DRAWNS, 'object'): (self.s_draw_object, None),
(DRAWNS, 'object-ole'): (self.s_draw_object_ole, None),
(DRAWNS, 'page'): (self.s_draw_page, self.e_draw_page),
(DRAWNS, 'text-box'): (self.s_draw_textbox, self.e_draw_textbox),
(METANS, 'creation-date'):(self.s_processcont, self.e_dc_metatag),
(METANS, 'generator'):(self.s_processcont, self.e_dc_metatag),
(METANS, 'initial-creator'): (self.s_processcont, self.e_dc_metatag),
(METANS, 'keyword'): (self.s_processcont, self.e_dc_metatag),
(NUMBERNS, "boolean-style"):(self.s_ignorexml, None),
(NUMBERNS, "currency-style"):(self.s_ignorexml, None),
(NUMBERNS, "date-style"):(self.s_ignorexml, None),
(NUMBERNS, "number-style"):(self.s_ignorexml, None),
(NUMBERNS, "text-style"):(self.s_ignorexml, None),
(OFFICENS, "annotation"):(self.s_ignorexml, None),
(OFFICENS, "automatic-styles"):(self.s_office_automatic_styles, None),
(OFFICENS, "document"):(self.s_office_document_content, self.e_office_document_content),
(OFFICENS, "document-content"):(self.s_office_document_content, self.e_office_document_content),
(OFFICENS, "forms"):(self.s_ignorexml, None),
(OFFICENS, "master-styles"):(self.s_office_master_styles, None),
(OFFICENS, "meta"):(self.s_ignorecont, None),
(OFFICENS, "presentation"):(self.s_office_presentation, self.e_office_presentation),
(OFFICENS, "spreadsheet"):(self.s_office_spreadsheet, self.e_office_spreadsheet),
(OFFICENS, "styles"):(self.s_office_styles, None),
(OFFICENS, "text"):(self.s_office_text, self.e_office_text),
(OFFICENS, "scripts"):(self.s_ignorexml, None),
(OFFICENS, "settings"):(self.s_ignorexml, None),
(PRESENTATIONNS, "notes"):(self.s_ignorexml, None),
# (STYLENS, "default-page-layout"):(self.s_style_default_page_layout, self.e_style_page_layout),
(STYLENS, "default-page-layout"):(self.s_ignorexml, None),
(STYLENS, "default-style"):(self.s_style_default_style, self.e_style_default_style),
(STYLENS, "drawing-page-properties"):(self.s_style_handle_properties, None),
(STYLENS, "font-face"):(self.s_style_font_face, None),
# (STYLENS, "footer"):(self.s_style_footer, self.e_style_footer),
# (STYLENS, "footer-style"):(self.s_style_footer_style, None),
(STYLENS, "graphic-properties"):(self.s_style_handle_properties, None),
(STYLENS, "handout-master"):(self.s_ignorexml, None),
# (STYLENS, "header"):(self.s_style_header, self.e_style_header),
# (STYLENS, "header-footer-properties"):(self.s_style_handle_properties, None),
# (STYLENS, "header-style"):(self.s_style_header_style, None),
(STYLENS, "master-page"):(self.s_style_master_page, None),
(STYLENS, "page-layout-properties"):(self.s_style_handle_properties, None),
(STYLENS, "page-layout"):(self.s_style_page_layout, self.e_style_page_layout),
# (STYLENS, "page-layout"):(self.s_ignorexml, None),
(STYLENS, "paragraph-properties"):(self.s_style_handle_properties, None),
(STYLENS, "style"):(self.s_style_style, self.e_style_style),
(STYLENS, "table-cell-properties"):(self.s_style_handle_properties, None),
(STYLENS, "table-column-properties"):(self.s_style_handle_properties, None),
(STYLENS, "table-properties"):(self.s_style_handle_properties, None),
(STYLENS, "text-properties"):(self.s_style_handle_properties, None),
(SVGNS, 'desc'): (self.s_ignorexml, None),
(TABLENS, 'covered-table-cell'): (self.s_ignorexml, None),
(TABLENS, 'table-cell'): (self.s_table_table_cell, self.e_table_table_cell),
(TABLENS, 'table-column'): (self.s_table_table_column, None),
(TABLENS, 'table-row'): (self.s_table_table_row, self.e_table_table_row),
(TABLENS, 'table'): (self.s_table_table, self.e_table_table),
(TEXTNS, 'a'): (self.s_text_a, self.e_text_a),
(TEXTNS, "alphabetical-index-source"):(self.s_text_x_source, self.e_text_x_source),
(TEXTNS, "bibliography-configuration"):(self.s_ignorexml, None),
(TEXTNS, "bibliography-source"):(self.s_text_x_source, self.e_text_x_source),
(TEXTNS, 'bookmark'): (self.s_text_bookmark, None),
(TEXTNS, 'bookmark-start'): (self.s_text_bookmark, None),
(TEXTNS, 'bookmark-ref'): (self.s_text_bookmark_ref, self.e_text_a),
(TEXTNS, 'bookmark-ref-start'): (self.s_text_bookmark_ref, None),
(TEXTNS, 'h'): (self.s_text_h, self.e_text_h),
(TEXTNS, "illustration-index-source"):(self.s_text_x_source, self.e_text_x_source),
(TEXTNS, 'line-break'):(self.s_text_line_break, None),
(TEXTNS, "linenumbering-configuration"):(self.s_ignorexml, None),
(TEXTNS, "list"):(self.s_text_list, self.e_text_list),
(TEXTNS, "list-item"):(self.s_text_list_item, self.e_text_list_item),
(TEXTNS, "list-level-style-bullet"):(self.s_text_list_level_style_bullet, self.e_text_list_level_style_bullet),
(TEXTNS, "list-level-style-number"):(self.s_text_list_level_style_number, self.e_text_list_level_style_number),
(TEXTNS, "list-style"):(None, None),
(TEXTNS, "note"):(self.s_text_note, None),
(TEXTNS, "note-body"):(self.s_text_note_body, self.e_text_note_body),
(TEXTNS, "note-citation"):(None, self.e_text_note_citation),
(TEXTNS, "notes-configuration"):(self.s_ignorexml, None),
(TEXTNS, "object-index-source"):(self.s_text_x_source, self.e_text_x_source),
(TEXTNS, 'p'): (self.s_text_p, self.e_text_p),
(TEXTNS, 's'): (self.s_text_s, None),
(TEXTNS, 'span'): (self.s_text_span, self.e_text_span),
(TEXTNS, 'tab'): (self.s_text_tab, None),
(TEXTNS, "table-index-source"):(self.s_text_x_source, self.e_text_x_source),
(TEXTNS, "table-of-content-source"):(self.s_text_x_source, self.e_text_x_source),
(TEXTNS, "user-index-source"):(self.s_text_x_source, self.e_text_x_source),
}
if embedable:
self.make_embedable()
self._resetobject()
def set_plain(self):
""" Tell the parser to not generate CSS """
self.generate_css = False
def set_embedable(self):
""" Tells the converter to only output the parts inside the <body>"""
self.elements[(OFFICENS, u"text")] = (None,None)
self.elements[(OFFICENS, u"spreadsheet")] = (None,None)
self.elements[(OFFICENS, u"presentation")] = (None,None)
self.elements[(OFFICENS, u"document-content")] = (None,None)
def add_style_file(self, stylefilename, media=None):
""" Add a link to an external style file.
Also turns of the embedding of styles in the HTML
"""
self.use_internal_css = False
self.stylefilename = stylefilename
if media:
self.metatags.append('<link rel="stylesheet" type="text/css" href="%s" media="%s"/>\n' % (stylefilename,media))
else:
self.metatags.append('<link rel="stylesheet" type="text/css" href="%s"/>\n' % (stylefilename))
def _resetfootnotes(self):
# Footnotes and endnotes
self.notedict = {}
self.currentnote = 0
self.notebody = ''
def _resetobject(self):
self.lines = []
self._wfunc = self._wlines
self.xmlfile = ''
self.title = ''
self.language = ''
self.creator = ''
self.data = []
self.tagstack = TagStack()
self.htmlstack = []
self.pstack = []
self.processelem = True
self.processcont = True
self.listtypes = {}
self.headinglevels = [0, 0,0,0,0,0, 0,0,0,0,0] # level 0 to 10
self.use_internal_css = True
self.cs = StyleToCSS()
self.anchors = {}
# Style declarations
self.stylestack = []
self.styledict = {}
self.currentstyle = None
self._resetfootnotes()
# Tags from meta.xml
self.metatags = []
def writeout(self, s):
if s != '':
self._wfunc(s)
def writedata(self):
d = ''.join(self.data)
if d != '':
self.writeout(escape(d))
def opentag(self, tag, attrs={}, block=False):
""" Create an open HTML tag """
self.htmlstack.append((tag,attrs,block))
a = []
for key,val in attrs.items():
a.append('''%s=%s''' % (key, quoteattr(val)))
if len(a) == 0:
self.writeout("<%s>" % tag)
else:
self.writeout("<%s %s>" % (tag, " ".join(a)))
if block == True:
self.writeout("\n")
def closetag(self, tag, block=True):
""" Close an open HTML tag """
self.htmlstack.pop()
self.writeout("</%s>" % tag)
if block == True:
self.writeout("\n")
def emptytag(self, tag, attrs={}):
a = []
for key,val in attrs.items():
a.append('''%s=%s''' % (key, quoteattr(val)))
self.writeout("<%s %s/>\n" % (tag, " ".join(a)))
#--------------------------------------------------
# Interface to parser
#--------------------------------------------------
def characters(self, data):
if self.processelem and self.processcont:
self.data.append(data)
def startElementNS(self, tag, qname, attrs):
self.pstack.append( (self.processelem, self.processcont) )
if self.processelem:
method = self.elements.get(tag, (None, None) )[0]
if method:
self.handle_starttag(tag, method, attrs)
else:
self.unknown_starttag(tag,attrs)
self.tagstack.push( tag, attrs )
def endElementNS(self, tag, qname):
stag, attrs = self.tagstack.pop()
if self.processelem:
method = self.elements.get(tag, (None, None) )[1]
if method:
self.handle_endtag(tag, attrs, method)
else:
self.unknown_endtag(tag, attrs)
self.processelem, self.processcont = self.pstack.pop()
#--------------------------------------------------
def handle_starttag(self, tag, method, attrs):
method(tag,attrs)
def handle_endtag(self, tag, attrs, method):
method(tag, attrs)
def unknown_starttag(self, tag, attrs):
pass
def unknown_endtag(self, tag, attrs):
pass
def s_ignorexml(self, tag, attrs):
""" Ignore this xml element and all children of it
It will automatically stop ignoring
"""
self.processelem = False
def s_ignorecont(self, tag, attrs):
""" Stop processing the text nodes """
self.processcont = False
def s_processcont(self, tag, attrs):
""" Start processing the text nodes """
self.processcont = True
def classname(self, attrs):
""" Generate a class name from a style name """
c = attrs.get((TEXTNS,'style-name'),'')
c = c.replace(".","_")
return c
def get_anchor(self, name):
""" Create a unique anchor id for a href name """
if not self.anchors.has_key(name):
# Changed by Kovid
self.anchors[name] = "anchor%d" % (len(self.anchors) + 1)
return self.anchors.get(name)
#--------------------------------------------------
def purgedata(self):
self.data = []
#-----------------------------------------------------------------------------
#
# Handle meta data
#
#-----------------------------------------------------------------------------
def e_dc_title(self, tag, attrs):
""" Get the title from the meta data and create a HTML <title>
"""
self.title = ''.join(self.data)
#self.metatags.append('<title>%s</title>\n' % escape(self.title))
self.data = []
def e_dc_metatag(self, tag, attrs):
""" Any other meta data is added as a <meta> element
"""
self.metatags.append('<meta name="%s" content=%s/>\n' % (tag[1], quoteattr(''.join(self.data))))
self.data = []
def e_dc_contentlanguage(self, tag, attrs):
""" Set the content language. Identifies the targeted audience
"""
self.language = ''.join(self.data)
self.metatags.append('<meta http-equiv="content-language" content="%s"/>\n' % escape(self.language))
self.data = []
def e_dc_creator(self, tag, attrs):
""" Set the content creator. Identifies the targeted audience
"""
self.creator = ''.join(self.data)
self.metatags.append('<meta http-equiv="creator" content="%s"/>\n' % escape(self.creator))
self.data = []
def s_custom_shape(self, tag, attrs):
""" A <draw:custom-shape> is made into a <div> in HTML which is then styled
"""
anchor_type = attrs.get((TEXTNS,'anchor-type'),'notfound')
htmltag = 'div'
name = "G-" + attrs.get( (DRAWNS,'style-name'), "")
if name == 'G-':
name = "PR-" + attrs.get( (PRESENTATIONNS,'style-name'), "")
name = name.replace(".","_")
if anchor_type == "paragraph":
style = 'position:absolute;'
elif anchor_type == 'char':
style = "position:absolute;"
elif anchor_type == 'as-char':
htmltag = 'div'
style = ''
else:
style = "position: absolute;"
if attrs.has_key( (SVGNS,"width") ):
style = style + "width:" + attrs[(SVGNS,"width")] + ";"
if attrs.has_key( (SVGNS,"height") ):
style = style + "height:" + attrs[(SVGNS,"height")] + ";"
if attrs.has_key( (SVGNS,"x") ):
style = style + "left:" + attrs[(SVGNS,"x")] + ";"
if attrs.has_key( (SVGNS,"y") ):
style = style + "top:" + attrs[(SVGNS,"y")] + ";"
if self.generate_css:
self.opentag(htmltag, {'class': name, 'style': style})
else:
self.opentag(htmltag)
def e_custom_shape(self, tag, attrs):
""" End the <draw:frame>
"""
self.closetag('div')
def s_draw_frame(self, tag, attrs):
""" A <draw:frame> is made into a <div> in HTML which is then styled
"""
anchor_type = attrs.get((TEXTNS,'anchor-type'),'notfound')
htmltag = 'div'
name = "G-" + attrs.get( (DRAWNS,'style-name'), "")
if name == 'G-':
name = "PR-" + attrs.get( (PRESENTATIONNS,'style-name'), "")
name = name.replace(".","_")
if anchor_type == "paragraph":
style = 'position:relative;'
elif anchor_type == 'char':
style = "position:relative;"
elif anchor_type == 'as-char':
htmltag = 'div'
style = ''
else:
style = "position:absolute;"
if attrs.has_key( (SVGNS,"width") ):
style = style + "width:" + attrs[(SVGNS,"width")] + ";"
if attrs.has_key( (SVGNS,"height") ):
style = style + "height:" + attrs[(SVGNS,"height")] + ";"
if attrs.has_key( (SVGNS,"x") ):
style = style + "left:" + attrs[(SVGNS,"x")] + ";"
if attrs.has_key( (SVGNS,"y") ):
style = style + "top:" + attrs[(SVGNS,"y")] + ";"
if self.generate_css:
self.opentag(htmltag, {'class': name, 'style': style})
else:
self.opentag(htmltag)
def e_draw_frame(self, tag, attrs):
""" End the <draw:frame>
"""
self.closetag('div')
def s_draw_fill_image(self, tag, attrs):
name = attrs.get( (DRAWNS,'name'), "NoName")
imghref = attrs[(XLINKNS,"href")]
imghref = self.rewritelink(imghref)
self.cs.fillimages[name] = imghref
def rewritelink(self, imghref):
""" Intended to be overloaded if you don't store your pictures
in a Pictures subfolder
"""
return imghref
def s_draw_image(self, tag, attrs):
""" A <draw:image> becomes an <img/> element
"""
parent = self.tagstack.stackparent()
anchor_type = parent.get((TEXTNS,'anchor-type'))
imghref = attrs[(XLINKNS,"href")]
imghref = self.rewritelink(imghref)
htmlattrs = {'alt':"", 'src':imghref }
if self.generate_css:
if anchor_type != "char":
htmlattrs['style'] = "display: block;"
self.emptytag('img', htmlattrs)
def s_draw_object(self, tag, attrs):
""" A <draw:object> is embedded object in the document (e.g. spreadsheet in presentation).
"""
return # Added by Kovid
objhref = attrs[(XLINKNS,"href")]
# Remove leading "./": from "./Object 1" to "Object 1"
# objhref = objhref [2:]
# Not using os.path.join since it fails to find the file on Windows.
# objcontentpath = '/'.join([objhref, 'content.xml'])
for c in self.document.childnodes:
if c.folder == objhref:
self._walknode(c.topnode)
def s_draw_object_ole(self, tag, attrs):
""" A <draw:object-ole> is embedded OLE object in the document (e.g. MS Graph).
"""
try:
class_id = attrs[(DRAWNS,"class-id")]
except KeyError: # Added by Kovid to ignore <draw> without the right
return # attributes
if class_id and class_id.lower() == "00020803-0000-0000-c000-000000000046": ## Microsoft Graph 97 Chart
tagattrs = { 'name':'object_ole_graph', 'class':'ole-graph' }
self.opentag('a', tagattrs)
self.closetag('a', tagattrs)
def s_draw_page(self, tag, attrs):
""" A <draw:page> is a slide in a presentation. We use a <fieldset> element in HTML.
Therefore if you convert a ODP file, you get a series of <fieldset>s.
Override this for your own purpose.
"""
name = attrs.get( (DRAWNS,'name'), "NoName")
stylename = attrs.get( (DRAWNS,'style-name'), "")
stylename = stylename.replace(".","_")
masterpage = attrs.get( (DRAWNS,'master-page-name'),"")
masterpage = masterpage.replace(".","_")
if self.generate_css:
self.opentag('fieldset', {'class':"DP-%s MP-%s" % (stylename, masterpage) })
else:
self.opentag('fieldset')
self.opentag('legend')
self.writeout(escape(name))
self.closetag('legend')
def e_draw_page(self, tag, attrs):
self.closetag('fieldset')
def s_draw_textbox(self, tag, attrs):
style = ''
if attrs.has_key( (FONS,"min-height") ):
style = style + "min-height:" + attrs[(FONS,"min-height")] + ";"
self.opentag('div')
# self.opentag('div', {'style': style})
def e_draw_textbox(self, tag, attrs):
""" End the <draw:text-box>
"""
self.closetag('div')
def html_body(self, tag, attrs):
self.writedata()
if self.generate_css and self.use_internal_css:
self.opentag('style', {'type':"text/css"}, True)
self.writeout('/*<![CDATA[*/\n')
self.generate_stylesheet()
self.writeout('/*]]>*/\n')
self.closetag('style')
self.purgedata()
self.closetag('head')
self.opentag('body', block=True)
# background-color: white removed by Kovid for #9118
# Specifying an explicit bg color prevents ebook readers
# from successfully inverting colors
default_styles = """
img { width: 100%; height: 100%; }
* { padding: 0; margin: 0; }
body { margin: 0 1em; }
ol, ul { padding-left: 2em; }
"""
def generate_stylesheet(self):
for name in self.stylestack:
styles = self.styledict.get(name)
# Preload with the family's default style
if styles.has_key('__style-family') and self.styledict.has_key(styles['__style-family']):
familystyle = self.styledict[styles['__style-family']].copy()
del styles['__style-family']
for style, val in styles.items():
familystyle[style] = val
styles = familystyle
# Resolve the remaining parent styles
while styles.has_key('__parent-style-name') and self.styledict.has_key(styles['__parent-style-name']):
parentstyle = self.styledict[styles['__parent-style-name']].copy()
del styles['__parent-style-name']
for style, val in styles.items():
parentstyle[style] = val
styles = parentstyle
self.styledict[name] = styles
# Write the styles to HTML
self.writeout(self.default_styles)
# Changed by Kovid to not write out endless copies of the same style
css_styles = {}
for name in self.stylestack:
styles = self.styledict.get(name)
css2 = tuple(self.cs.convert_styles(styles).iteritems())
if css2 in css_styles:
css_styles[css2].append(name)
else:
css_styles[css2] = [name]
def filter_margins(css2):
names = { k for k, v in css2 }
ignore = set()
if {'margin-left', 'margin-right', 'margin-top',
'margin-bottom'}.issubset(names):
# These come from XML and we cannot preserve XML attribute
# order so we assume that margin is to be overridden See
# https://bugs.launchpad.net/calibre/+bug/941134 and
# https://bugs.launchpad.net/calibre/+bug/1002702
ignore.add('margin')
css2 = sorted(css2, key=lambda x:{'margin':0}.get(x[0], 1))
for k, v in css2:
if k not in ignore:
yield k, v
for css2, names in css_styles.iteritems():
self.writeout("%s {\n" % ', '.join(names))
for style, val in filter_margins(css2):
self.writeout("\t%s: %s;\n" % (style, val) )
self.writeout("}\n")
def generate_footnotes(self):
if self.currentnote == 0:
return
if self.generate_css:
self.opentag('ol', {'style':'border-top: 1px solid black'}, True)
else:
self.opentag('ol')
for key in range(1,self.currentnote+1):
note = self.notedict[key]
# for key,note in self.notedict.items():
self.opentag('li', { 'id':"footnote-%d" % key })
# self.opentag('sup')
# self.writeout(escape(note['citation']))
# self.closetag('sup', False)
self.writeout(note['body'])
self.closetag('li')
self.closetag('ol')
def s_office_automatic_styles(self, tag, attrs):
if self.xmlfile == 'styles.xml':
self.autoprefix = "A"
else:
self.autoprefix = ""
def s_office_document_content(self, tag, attrs):
""" First tag in the content.xml file"""
self.writeout('<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" ')
self.writeout('"http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">\n')
self.opentag('html', {'xmlns':"http://www.w3.org/1999/xhtml"}, True)
self.opentag('head', block=True)
self.emptytag('meta', { 'http-equiv':"Content-Type", 'content':"text/html;charset=UTF-8"})
for metaline in self.metatags:
self.writeout(metaline)
self.writeout('<title>%s</title>\n' % escape(self.title))
def e_office_document_content(self, tag, attrs):
""" Last tag """
self.closetag('html')
def s_office_master_styles(self, tag, attrs):
""" """
def s_office_presentation(self, tag, attrs):
""" For some odd reason, OpenOffice Impress doesn't define a default-style
for the 'paragraph'. We therefore force a standard when we see
it is a presentation
"""
self.styledict['p'] = {(FONS,u'font-size'): u"24pt" }
self.styledict['presentation'] = {(FONS,u'font-size'): u"24pt" }
self.html_body(tag, attrs)
def e_office_presentation(self, tag, attrs):
self.generate_footnotes()
self.closetag('body')
def s_office_spreadsheet(self, tag, attrs):
self.html_body(tag, attrs)
def e_office_spreadsheet(self, tag, attrs):
self.generate_footnotes()
self.closetag('body')
def s_office_styles(self, tag, attrs):
self.autoprefix = ""
def s_office_text(self, tag, attrs):
""" OpenDocument text """
self.styledict['frame'] = { (STYLENS,'wrap'): u'parallel'}
self.html_body(tag, attrs)
def e_office_text(self, tag, attrs):
self.generate_footnotes()
self.closetag('body')
def s_style_handle_properties(self, tag, attrs):
""" Copy all attributes to a struct.
We will later convert them to CSS2
"""
if self.currentstyle is None: # Added by Kovid
return
for key,attr in attrs.items():
self.styledict[self.currentstyle][key] = attr
familymap = {'frame':'frame', 'paragraph':'p', 'presentation':'presentation',
'text':'span','section':'div',
'table':'table','table-cell':'td','table-column':'col',
'table-row':'tr','graphic':'graphic' }
def s_style_default_style(self, tag, attrs):
""" A default style is like a style on an HTML tag
"""
family = attrs[(STYLENS,'family')]
htmlfamily = self.familymap.get(family,'unknown')
self.currentstyle = htmlfamily
# self.stylestack.append(self.currentstyle)
self.styledict[self.currentstyle] = {}
def e_style_default_style(self, tag, attrs):
self.currentstyle = None
def s_style_font_face(self, tag, attrs):
""" It is possible that the HTML browser doesn't know how to
show a particular font. Luckily ODF provides generic fallbacks
Unfortunately they are not the same as CSS2.
CSS2: serif, sans-serif, cursive, fantasy, monospace
ODF: roman, swiss, modern, decorative, script, system
"""
name = attrs[(STYLENS,"name")]
family = attrs[(SVGNS,"font-family")]
generic = attrs.get( (STYLENS,'font-family-generic'),"" )
self.cs.save_font(name, family, generic)
def s_style_footer(self, tag, attrs):
self.opentag('div', { 'id':"footer" })
self.purgedata()
def e_style_footer(self, tag, attrs):
self.writedata()
self.closetag('div')
self.purgedata()
def s_style_footer_style(self, tag, attrs):
self.currentstyle = "@print #footer"
self.stylestack.append(self.currentstyle)
self.styledict[self.currentstyle] = {}
def s_style_header(self, tag, attrs):
self.opentag('div', { 'id':"header" })
self.purgedata()
def e_style_header(self, tag, attrs):
self.writedata()
self.closetag('div')
self.purgedata()
def s_style_header_style(self, tag, attrs):
self.currentstyle = "@print #header"
self.stylestack.append(self.currentstyle)
self.styledict[self.currentstyle] = {}
def s_style_default_page_layout(self, tag, attrs):
""" Collect the formatting for the default page layout style.
"""
self.currentstyle = "@page"
self.stylestack.append(self.currentstyle)
self.styledict[self.currentstyle] = {}
def s_style_page_layout(self, tag, attrs):
""" Collect the formatting for the page layout style.
This won't work in CSS 2.1, as page identifiers are not allowed.
It is legal in CSS3, but the rest of the application doesn't specify when to use what page layout
"""
name = attrs[(STYLENS,'name')]
name = name.replace(".","_")
self.currentstyle = ".PL-" + name
self.stylestack.append(self.currentstyle)
self.styledict[self.currentstyle] = {}
def e_style_page_layout(self, tag, attrs):
""" End this style
"""
self.currentstyle = None
def s_style_master_page(self, tag, attrs):
""" Collect the formatting for the page layout style.
"""
name = attrs[(STYLENS,'name')]
name = name.replace(".","_")
self.currentstyle = ".MP-" + name
self.stylestack.append(self.currentstyle)
self.styledict[self.currentstyle] = {('','position'):'relative'}
# Then load the pagelayout style if we find it
pagelayout = attrs.get( (STYLENS,'page-layout-name'), None)
if pagelayout:
pagelayout = ".PL-" + pagelayout
if self.styledict.has_key( pagelayout ):
styles = self.styledict[pagelayout]
for style, val in styles.items():
self.styledict[self.currentstyle][style] = val
else:
self.styledict[self.currentstyle]['__parent-style-name'] = pagelayout
self.s_ignorexml(tag, attrs)
# Short prefixes for class selectors
_familyshort = {'drawing-page':'DP', 'paragraph':'P', 'presentation':'PR',
'text':'S', 'section':'D',
'table':'T', 'table-cell':'TD', 'table-column':'TC',
'table-row':'TR', 'graphic':'G' }
def s_style_style(self, tag, attrs):
""" Collect the formatting for the style.
Styles have scope. The same name can be used for both paragraph and
character styles Since CSS has no scope we use a prefix. (Not elegant)
In ODF a style can have a parent, these parents can be chained.
We may not have encountered the parent yet, but if we have, we resolve it.
"""
name = attrs[(STYLENS,'name')]
name = name.replace(".","_")
family = attrs[(STYLENS,'family')]
htmlfamily = self.familymap.get(family,'unknown')
sfamily = self._familyshort.get(family,'X')
name = "%s%s-%s" % (self.autoprefix, sfamily, name)
parent = attrs.get( (STYLENS,'parent-style-name') )
self.currentstyle = special_styles.get(name,"."+name)
self.stylestack.append(self.currentstyle)
if not self.styledict.has_key(self.currentstyle):
self.styledict[self.currentstyle] = {}
self.styledict[self.currentstyle]['__style-family'] = htmlfamily
# Then load the parent style if we find it
if parent:
parent = "%s-%s" % (sfamily, parent)
parent = special_styles.get(parent, "."+parent)
if self.styledict.has_key( parent ):
styles = self.styledict[parent]
for style, val in styles.items():
self.styledict[self.currentstyle][style] = val
else:
self.styledict[self.currentstyle]['__parent-style-name'] = parent
def e_style_style(self, tag, attrs):
""" End this style
"""
self.currentstyle = None
def s_table_table(self, tag, attrs):
""" Start a table
"""
c = attrs.get( (TABLENS,'style-name'), None)
if c and self.generate_css:
c = c.replace(".","_")
self.opentag('table',{ 'class': "T-%s" % c })
else:
self.opentag('table')
self.purgedata()
def e_table_table(self, tag, attrs):
""" End a table
"""
self.writedata()
self.closetag('table')
self.purgedata()
def s_table_table_cell(self, tag, attrs):
""" Start a table cell """
#FIXME: number-columns-repeated § 8.1.3
#repeated = int(attrs.get( (TABLENS,'number-columns-repeated'), 1))
htmlattrs = {}
rowspan = attrs.get( (TABLENS,'number-rows-spanned') )
if rowspan:
htmlattrs['rowspan'] = rowspan
colspan = attrs.get( (TABLENS,'number-columns-spanned') )
if colspan:
htmlattrs['colspan'] = colspan
c = attrs.get( (TABLENS,'style-name') )
if c:
htmlattrs['class'] = 'TD-%s' % c.replace(".","_")
self.opentag('td', htmlattrs)
self.purgedata()
def e_table_table_cell(self, tag, attrs):
""" End a table cell """
self.writedata()
self.closetag('td')
self.purgedata()
def s_table_table_column(self, tag, attrs):
""" Start a table column """
c = attrs.get( (TABLENS,'style-name'), None)
repeated = int(attrs.get( (TABLENS,'number-columns-repeated'), 1))
htmlattrs = {}
if c:
htmlattrs['class'] = "TC-%s" % c.replace(".","_")
for x in xrange(repeated):
self.emptytag('col', htmlattrs)
self.purgedata()
def s_table_table_row(self, tag, attrs):
""" Start a table row """
#FIXME: table:number-rows-repeated
c = attrs.get( (TABLENS,'style-name'), None)
htmlattrs = {}
if c:
htmlattrs['class'] = "TR-%s" % c.replace(".","_")
self.opentag('tr', htmlattrs)
self.purgedata()
def e_table_table_row(self, tag, attrs):
""" End a table row """
self.writedata()
self.closetag('tr')
self.purgedata()
def s_text_a(self, tag, attrs):
""" Anchors start """
self.writedata()
href = attrs[(XLINKNS,"href")].split("|")[0]
if href[:1] == "#": # Changed by Kovid
href = "#" + self.get_anchor(href[1:])
self.opentag('a', {'href':href})
self.purgedata()
def e_text_a(self, tag, attrs):
""" End an anchor or bookmark reference """
self.writedata()
self.closetag('a', False)
self.purgedata()
def s_text_bookmark(self, tag, attrs):
""" Bookmark definition """
name = attrs[(TEXTNS,'name')]
html_id = self.get_anchor(name)
self.writedata()
self.opentag('span', {'id':html_id})
self.closetag('span', False)
self.purgedata()
def s_text_bookmark_ref(self, tag, attrs):
""" Bookmark reference """
name = attrs[(TEXTNS,'ref-name')]
html_id = "#" + self.get_anchor(name)
self.writedata()
self.opentag('a', {'href':html_id})
self.purgedata()
def s_text_h(self, tag, attrs):
""" Headings start """
level = int(attrs[(TEXTNS,'outline-level')])
if level > 6: level = 6 # Heading levels go only to 6 in XHTML
if level < 1: level = 1
self.headinglevels[level] = self.headinglevels[level] + 1
name = self.classname(attrs)
for x in range(level + 1,10):
self.headinglevels[x] = 0
special = special_styles.get("P-"+name)
if special or not self.generate_css:
self.opentag('h%s' % level)
else:
self.opentag('h%s' % level, {'class':"P-%s" % name })
self.purgedata()
def e_text_h(self, tag, attrs):
""" Headings end
Side-effect: If there is no title in the metadata, then it is taken
from the first heading of any level.
"""
self.writedata()
level = int(attrs[(TEXTNS,'outline-level')])
if level > 6: level = 6 # Heading levels go only to 6 in XHTML
if level < 1: level = 1
lev = self.headinglevels[1:level+1]
outline = '.'.join(map(str,lev) )
heading = ''.join(self.data)
if self.title == '': self.title = heading
# Changed by Kovid
tail = ''.join(self.data)
anchor = self.get_anchor("%s.%s" % ( outline, tail))
anchor2 = self.get_anchor(tail) # Added by kovid to fix #7506
self.opentag('a', {'id': anchor} )
self.closetag('a', False)
self.opentag('a', {'id': anchor2} )
self.closetag('a', False)
self.closetag('h%s' % level)
self.purgedata()
def s_text_line_break(self, tag, attrs):
""" Force a line break (<br/>) """
self.writedata()
self.emptytag('br')
self.purgedata()
def s_text_list(self, tag, attrs):
""" Start a list (<ul> or <ol>)
To know which level we're at, we have to count the number
of <text:list> elements on the tagstack.
"""
name = attrs.get( (TEXTNS,'style-name') )
level = self.tagstack.count_tags(tag) + 1
if name:
name = name.replace(".","_")
else:
# FIXME: If a list is contained in a table cell or text box,
# the list level must return to 1, even though the table or
# textbox itself may be nested within another list.
name = self.tagstack.rfindattr( (TEXTNS,'style-name') )
list_class = "%s_%d" % (name, level)
if self.generate_css:
self.opentag('%s' % self.listtypes.get(list_class,'ul'), {'class': list_class })
else:
self.opentag('%s' % self.listtypes.get(list_class,'ul'))
self.purgedata()
def e_text_list(self, tag, attrs):
""" End a list """
self.writedata()
name = attrs.get( (TEXTNS,'style-name') )
level = self.tagstack.count_tags(tag) + 1
if name:
name = name.replace(".","_")
else:
# FIXME: If a list is contained in a table cell or text box,
# the list level must return to 1, even though the table or
# textbox itself may be nested within another list.
name = self.tagstack.rfindattr( (TEXTNS,'style-name') )
list_class = "%s_%d" % (name, level)
self.closetag(self.listtypes.get(list_class,'ul'))
self.purgedata()
def s_text_list_item(self, tag, attrs):
""" Start list item """
self.opentag('li')
self.purgedata()
def e_text_list_item(self, tag, attrs):
""" End list item """
self.writedata()
self.closetag('li')
self.purgedata()
def s_text_list_level_style_bullet(self, tag, attrs):
""" CSS doesn't have the ability to set the glyph
to a particular character, so we just go through
the available glyphs
"""
name = self.tagstack.rfindattr( (STYLENS,'name') )
level = attrs[(TEXTNS,'level')]
self.prevstyle = self.currentstyle
list_class = "%s_%s" % (name, level)
self.listtypes[list_class] = 'ul'
self.currentstyle = ".%s_%s" % ( name.replace(".","_"), level)
self.stylestack.append(self.currentstyle)
self.styledict[self.currentstyle] = {}
level = int(level)
listtype = ("square", "disc", "circle")[level % 3]
self.styledict[self.currentstyle][('','list-style-type')] = listtype
def e_text_list_level_style_bullet(self, tag, attrs):
self.currentstyle = self.prevstyle
del self.prevstyle
def s_text_list_level_style_number(self, tag, attrs):
name = self.tagstack.stackparent()[(STYLENS,'name')]
level = attrs[(TEXTNS,'level')]
num_format = attrs.get( (STYLENS,'name'),"1")
list_class = "%s_%s" % (name, level)
self.prevstyle = self.currentstyle
self.currentstyle = ".%s_%s" % ( name.replace(".","_"), level)
self.listtypes[list_class] = 'ol'
self.stylestack.append(self.currentstyle)
self.styledict[self.currentstyle] = {}
if num_format == "1": listtype = "decimal"
elif num_format == "I": listtype = "upper-roman"
elif num_format == "i": listtype = "lower-roman"
elif num_format == "A": listtype = "upper-alpha"
elif num_format == "a": listtype = "lower-alpha"
else: listtype = "decimal"
self.styledict[self.currentstyle][('','list-style-type')] = listtype
def e_text_list_level_style_number(self, tag, attrs):
self.currentstyle = self.prevstyle
del self.prevstyle
def s_text_note(self, tag, attrs):
self.writedata()
self.purgedata()
self.currentnote = self.currentnote + 1
self.notedict[self.currentnote] = {}
self.notebody = []
def e_text_note(self, tag, attrs):
pass
def collectnote(self,s):
if s != '':
self.notebody.append(s)
def s_text_note_body(self, tag, attrs):
self._orgwfunc = self._wfunc
self._wfunc = self.collectnote
def e_text_note_body(self, tag, attrs):
self._wfunc = self._orgwfunc
self.notedict[self.currentnote]['body'] = ''.join(self.notebody)
self.notebody = ''
del self._orgwfunc
def e_text_note_citation(self, tag, attrs):
mark = ''.join(self.data)
self.notedict[self.currentnote]['citation'] = mark
self.opentag('a',{ 'href': "#footnote-%s" % self.currentnote })
self.opentag('sup')
# self.writeout( escape(mark) )
# Since HTML only knows about endnotes, there is too much risk that the
# marker is reused in the source. Therefore we force numeric markers
self.writeout(unicode(self.currentnote))
self.closetag('sup')
self.closetag('a')
def s_text_p(self, tag, attrs):
""" Paragraph
"""
htmlattrs = {}
specialtag = "p"
c = attrs.get( (TEXTNS,'style-name'), None)
if c:
c = c.replace(".","_")
specialtag = special_styles.get("P-"+c)
if specialtag is None:
specialtag = 'p'
if self.generate_css:
htmlattrs['class'] = "P-%s" % c
self.opentag(specialtag, htmlattrs)
self.purgedata()
def e_text_p(self, tag, attrs):
""" End Paragraph
"""
specialtag = "p"
c = attrs.get( (TEXTNS,'style-name'), None)
if c:
c = c.replace(".","_")
specialtag = special_styles.get("P-"+c)
if specialtag is None:
specialtag = 'p'
self.writedata()
if not self.data: # Added by Kovid
# Give substance to empty paragraphs, as rendered by OOo
self.writeout(' ')
self.closetag(specialtag)
self.purgedata()
def s_text_s(self, tag, attrs):
# Changed by Kovid to fix non breaking spaces being prepended to
# element instead of being part of the text flow.
# We don't use an entity for the nbsp as the contents of self.data will
# be escaped on writeout.
""" Generate a number of spaces. We use the non breaking space for
the text:s ODF element.
"""
try:
c = int(attrs.get((TEXTNS, 'c'), 1))
except:
c = 0
if c > 0:
self.data.append(u'\u00a0'*c)
def s_text_span(self, tag, attrs):
""" The <text:span> element matches the <span> element in HTML. It is
typically used to properties of the text.
"""
self.writedata()
c = attrs.get( (TEXTNS,'style-name'), None)
htmlattrs = {}
# Changed by Kovid to handle inline special styles defined on <text:span> tags.
# Apparently LibreOffice does this.
special = 'span'
if c:
c = c.replace(".","_")
special = special_styles.get("S-"+c)
if special is None:
special = 'span'
if self.generate_css:
htmlattrs['class'] = "S-%s" % c
self.opentag(special, htmlattrs)
self.purgedata()
def e_text_span(self, tag, attrs):
""" End the <text:span> """
self.writedata()
c = attrs.get( (TEXTNS,'style-name'), None)
# Changed by Kovid to handle inline special styles defined on <text:span> tags.
# Apparently LibreOffice does this.
special = 'span'
if c:
c = c.replace(".","_")
special = special_styles.get("S-"+c)
if special is None:
special = 'span'
self.closetag(special, False)
self.purgedata()
def s_text_tab(self, tag, attrs):
""" Move to the next tabstop. We ignore this in HTML
"""
self.writedata()
self.writeout(' ')
self.purgedata()
def s_text_x_source(self, tag, attrs):
""" Various indexes and tables of contents. We ignore those.
"""
self.writedata()
self.purgedata()
self.s_ignorexml(tag, attrs)
def e_text_x_source(self, tag, attrs):
""" Various indexes and tables of contents. We ignore those.
"""
self.writedata()
self.purgedata()
#-----------------------------------------------------------------------------
#
# Reading the file
#
#-----------------------------------------------------------------------------
def load(self, odffile):
""" Loads a document into the parser and parses it.
The argument can either be a filename or a document in memory.
"""
self.lines = []
self._wfunc = self._wlines
if isinstance(odffile, basestring) \
or hasattr(odffile, 'read'): # Added by Kovid
self.document = load(odffile)
else:
self.document = odffile
self._walknode(self.document.topnode)
def _walknode(self, node):
if node.nodeType == Node.ELEMENT_NODE:
self.startElementNS(node.qname, node.tagName, node.attributes)
for c in node.childNodes:
self._walknode(c)
self.endElementNS(node.qname, node.tagName)
if node.nodeType == Node.TEXT_NODE or node.nodeType == Node.CDATA_SECTION_NODE:
self.characters(unicode(node))
def odf2xhtml(self, odffile):
""" Load a file and return the XHTML
"""
self.load(odffile)
return self.xhtml()
def _wlines(self,s):
if s != '': self.lines.append(s)
def xhtml(self):
""" Returns the xhtml
"""
return ''.join(self.lines)
def _writecss(self, s):
if s != '': self._csslines.append(s)
def _writenothing(self, s):
pass
def css(self):
""" Returns the CSS content """
self._csslines = []
self._wfunc = self._writecss
self.generate_stylesheet()
res = ''.join(self._csslines)
self._wfunc = self._wlines
del self._csslines
return res
def save(self, outputfile, addsuffix=False):
""" Save the HTML under the filename.
If the filename is '-' then save to stdout
We have the last style filename in self.stylefilename
"""
if outputfile == '-':
import sys # Added by Kovid
outputfp = sys.stdout
else:
if addsuffix:
outputfile = outputfile + ".html"
outputfp = file(outputfile, "w")
outputfp.write(self.xhtml().encode('us-ascii','xmlcharrefreplace'))
outputfp.close()
class ODF2XHTMLembedded(ODF2XHTML):
""" The ODF2XHTML parses an ODF file and produces XHTML"""
def __init__(self, lines, generate_css=True, embedable=False):
self._resetobject()
self.lines = lines
# Tags
self.generate_css = generate_css
self.elements = {
# (DCNS, 'title'): (self.s_processcont, self.e_dc_title),
# (DCNS, 'language'): (self.s_processcont, self.e_dc_contentlanguage),
# (DCNS, 'creator'): (self.s_processcont, self.e_dc_metatag),
# (DCNS, 'description'): (self.s_processcont, self.e_dc_metatag),
# (DCNS, 'date'): (self.s_processcont, self.e_dc_metatag),
(DRAWNS, 'frame'): (self.s_draw_frame, self.e_draw_frame),
(DRAWNS, 'image'): (self.s_draw_image, None),
(DRAWNS, 'fill-image'): (self.s_draw_fill_image, None),
(DRAWNS, "layer-set"):(self.s_ignorexml, None),
(DRAWNS, 'page'): (self.s_draw_page, self.e_draw_page),
(DRAWNS, 'object'): (self.s_draw_object, None),
(DRAWNS, 'object-ole'): (self.s_draw_object_ole, None),
(DRAWNS, 'text-box'): (self.s_draw_textbox, self.e_draw_textbox),
# (METANS, 'creation-date'):(self.s_processcont, self.e_dc_metatag),
# (METANS, 'generator'):(self.s_processcont, self.e_dc_metatag),
# (METANS, 'initial-creator'): (self.s_processcont, self.e_dc_metatag),
# (METANS, 'keyword'): (self.s_processcont, self.e_dc_metatag),
(NUMBERNS, "boolean-style"):(self.s_ignorexml, None),
(NUMBERNS, "currency-style"):(self.s_ignorexml, None),
(NUMBERNS, "date-style"):(self.s_ignorexml, None),
(NUMBERNS, "number-style"):(self.s_ignorexml, None),
(NUMBERNS, "text-style"):(self.s_ignorexml, None),
# (OFFICENS, "automatic-styles"):(self.s_office_automatic_styles, None),
# (OFFICENS, "document-content"):(self.s_office_document_content, self.e_office_document_content),
(OFFICENS, "forms"):(self.s_ignorexml, None),
# (OFFICENS, "master-styles"):(self.s_office_master_styles, None),
(OFFICENS, "meta"):(self.s_ignorecont, None),
# (OFFICENS, "presentation"):(self.s_office_presentation, self.e_office_presentation),
# (OFFICENS, "spreadsheet"):(self.s_office_spreadsheet, self.e_office_spreadsheet),
# (OFFICENS, "styles"):(self.s_office_styles, None),
# (OFFICENS, "text"):(self.s_office_text, self.e_office_text),
(OFFICENS, "scripts"):(self.s_ignorexml, None),
(PRESENTATIONNS, "notes"):(self.s_ignorexml, None),
## (STYLENS, "default-page-layout"):(self.s_style_default_page_layout, self.e_style_page_layout),
# (STYLENS, "default-page-layout"):(self.s_ignorexml, None),
# (STYLENS, "default-style"):(self.s_style_default_style, self.e_style_default_style),
# (STYLENS, "drawing-page-properties"):(self.s_style_handle_properties, None),
# (STYLENS, "font-face"):(self.s_style_font_face, None),
## (STYLENS, "footer"):(self.s_style_footer, self.e_style_footer),
## (STYLENS, "footer-style"):(self.s_style_footer_style, None),
# (STYLENS, "graphic-properties"):(self.s_style_handle_properties, None),
# (STYLENS, "handout-master"):(self.s_ignorexml, None),
## (STYLENS, "header"):(self.s_style_header, self.e_style_header),
## (STYLENS, "header-footer-properties"):(self.s_style_handle_properties, None),
## (STYLENS, "header-style"):(self.s_style_header_style, None),
# (STYLENS, "master-page"):(self.s_style_master_page, None),
# (STYLENS, "page-layout-properties"):(self.s_style_handle_properties, None),
## (STYLENS, "page-layout"):(self.s_style_page_layout, self.e_style_page_layout),
# (STYLENS, "page-layout"):(self.s_ignorexml, None),
# (STYLENS, "paragraph-properties"):(self.s_style_handle_properties, None),
# (STYLENS, "style"):(self.s_style_style, self.e_style_style),
# (STYLENS, "table-cell-properties"):(self.s_style_handle_properties, None),
# (STYLENS, "table-column-properties"):(self.s_style_handle_properties, None),
# (STYLENS, "table-properties"):(self.s_style_handle_properties, None),
# (STYLENS, "text-properties"):(self.s_style_handle_properties, None),
(SVGNS, 'desc'): (self.s_ignorexml, None),
(TABLENS, 'covered-table-cell'): (self.s_ignorexml, None),
(TABLENS, 'table-cell'): (self.s_table_table_cell, self.e_table_table_cell),
(TABLENS, 'table-column'): (self.s_table_table_column, None),
(TABLENS, 'table-row'): (self.s_table_table_row, self.e_table_table_row),
(TABLENS, 'table'): (self.s_table_table, self.e_table_table),
(TEXTNS, 'a'): (self.s_text_a, self.e_text_a),
(TEXTNS, "alphabetical-index-source"):(self.s_text_x_source, self.e_text_x_source),
(TEXTNS, "bibliography-configuration"):(self.s_ignorexml, None),
(TEXTNS, "bibliography-source"):(self.s_text_x_source, self.e_text_x_source),
(TEXTNS, 'h'): (self.s_text_h, self.e_text_h),
(TEXTNS, "illustration-index-source"):(self.s_text_x_source, self.e_text_x_source),
(TEXTNS, 'line-break'):(self.s_text_line_break, None),
(TEXTNS, "linenumbering-configuration"):(self.s_ignorexml, None),
(TEXTNS, "list"):(self.s_text_list, self.e_text_list),
(TEXTNS, "list-item"):(self.s_text_list_item, self.e_text_list_item),
(TEXTNS, "list-level-style-bullet"):(self.s_text_list_level_style_bullet, self.e_text_list_level_style_bullet),
(TEXTNS, "list-level-style-number"):(self.s_text_list_level_style_number, self.e_text_list_level_style_number),
(TEXTNS, "list-style"):(None, None),
(TEXTNS, "note"):(self.s_text_note, None),
(TEXTNS, "note-body"):(self.s_text_note_body, self.e_text_note_body),
(TEXTNS, "note-citation"):(None, self.e_text_note_citation),
(TEXTNS, "notes-configuration"):(self.s_ignorexml, None),
(TEXTNS, "object-index-source"):(self.s_text_x_source, self.e_text_x_source),
(TEXTNS, 'p'): (self.s_text_p, self.e_text_p),
(TEXTNS, 's'): (self.s_text_s, None),
(TEXTNS, 'span'): (self.s_text_span, self.e_text_span),
(TEXTNS, 'tab'): (self.s_text_tab, None),
(TEXTNS, "table-index-source"):(self.s_text_x_source, self.e_text_x_source),
(TEXTNS, "table-of-content-source"):(self.s_text_x_source, self.e_text_x_source),
(TEXTNS, "user-index-source"):(self.s_text_x_source, self.e_text_x_source),
(TEXTNS, "page-number"):(None, None),
}
| gpl-3.0 |
fallen/artiq | artiq/frontend/artiq_run.py | 1 | 4103 | #!/usr/bin/env python3
# Copyright (C) 2014, 2015 M-Labs Limited
# Copyright (C) 2014, 2015 Robert Jordens <[email protected]>
import argparse
import sys
import time
from operator import itemgetter
from itertools import chain
import logging
import h5py
from artiq.language.environment import EnvExperiment
from artiq.protocols.file_db import FlatFileDB
from artiq.master.worker_db import DeviceManager, ResultDB
from artiq.tools import *
logger = logging.getLogger(__name__)
class ELFRunner(EnvExperiment):
def build(self):
self.attr_device("core")
self.attr_argument("file")
def run(self):
with open(self.file, "rb") as f:
self.core.comm.load(f.read())
self.core.comm.run("run")
self.core.comm.serve(dict(), dict())
class SimpleParamLogger:
def set(self, timestamp, name, value):
logger.info("Parameter change: {} = {}".format(name, value))
class DummyScheduler:
def __init__(self):
self.next_rid = 0
self.pipeline_name = "main"
self.priority = 0
self.expid = None
def submit(self, pipeline_name, expid, priority, due_date, flush):
rid = self.next_rid
self.next_rid += 1
logger.info("Submitting: %s, RID=%s", expid, rid)
return rid
def delete(self, rid):
logger.info("Deleting RID %s", rid)
def pause(self):
pass
def get_argparser(with_file=True):
parser = argparse.ArgumentParser(
description="Local experiment running tool")
verbosity_args(parser)
parser.add_argument("-d", "--ddb", default="ddb.pyon",
help="device database file")
parser.add_argument("-p", "--pdb", default="pdb.pyon",
help="parameter database file")
parser.add_argument("-e", "--experiment", default=None,
help="experiment to run")
parser.add_argument("-o", "--hdf5", default=None,
help="write results to specified HDF5 file"
" (default: print them)")
if with_file:
parser.add_argument("file",
help="file containing the experiment to run")
parser.add_argument("arguments", nargs="*",
help="run arguments")
return parser
def _build_experiment(dmgr, pdb, rdb, args):
if hasattr(args, "file"):
if args.file.endswith(".elf"):
if args.arguments:
raise ValueError("arguments not supported for ELF kernels")
if args.experiment:
raise ValueError("experiment-by-name not supported "
"for ELF kernels")
return ELFRunner(dmgr, pdb, rdb, file=args.file)
else:
module = file_import(args.file)
file = args.file
else:
module = sys.modules["__main__"]
file = getattr(module, "__file__")
exp = get_experiment(module, args.experiment)
arguments = parse_arguments(args.arguments)
expid = {
"file": file,
"experiment": args.experiment,
"arguments": arguments
}
dmgr.virtual_devices["scheduler"].expid = expid
return exp(dmgr, pdb, rdb, **arguments)
def run(with_file=False):
args = get_argparser(with_file).parse_args()
init_logger(args)
dmgr = DeviceManager(FlatFileDB(args.ddb),
virtual_devices={"scheduler": DummyScheduler()})
pdb = FlatFileDB(args.pdb)
pdb.hooks.append(SimpleParamLogger())
rdb = ResultDB()
try:
exp_inst = _build_experiment(dmgr, pdb, rdb, args)
exp_inst.prepare()
exp_inst.run()
exp_inst.analyze()
finally:
dmgr.close_devices()
if args.hdf5 is not None:
with h5py.File(args.hdf5, "w") as f:
rdb.write_hdf5(f)
elif rdb.rt.read or rdb.nrt:
r = chain(rdb.rt.read.items(), rdb.nrt.items())
for k, v in sorted(r, key=itemgetter(0)):
print("{}: {}".format(k, v))
def main():
return run(with_file=True)
if __name__ == "__main__":
main()
| gpl-3.0 |
blueskycoco/rt-thread | bsp/nuvoton/numaker-iot-m487/rtconfig.py | 12 | 3486 | import os
# toolchains options
ARCH='arm'
CPU='cortex-m4'
CROSS_TOOL='keil'
if os.getenv('RTT_CC'):
CROSS_TOOL = os.getenv('RTT_CC')
# cross_tool provides the cross compiler
# EXEC_PATH is the compiler execute path, for example, CodeSourcery, Keil MDK, IAR
if CROSS_TOOL == 'gcc':
PLATFORM = 'gcc'
EXEC_PATH = r'C:\Program Files (x86)\GNU Tools ARM Embedded\6 2017-q1-update\bin'
elif CROSS_TOOL == 'keil':
PLATFORM = 'armcc'
EXEC_PATH = r'C:\Keil_v5'
elif CROSS_TOOL == 'iar':
PLATFORM = 'iar'
EXEC_PATH = r'C:\Program Files (x86)\IAR Systems\Embedded Workbench 8.2'
if os.getenv('RTT_EXEC_PATH'):
EXEC_PATH = os.getenv('RTT_EXEC_PATH')
BUILD = ''
# BUILD = 'debug'
if PLATFORM == 'gcc':
# toolchains
PREFIX = 'arm-none-eabi-'
CC = PREFIX + 'gcc'
AS = PREFIX + 'gcc'
AR = PREFIX + 'ar'
LINK = PREFIX + 'g++'
TARGET_EXT = 'elf'
SIZE = PREFIX + 'size'
OBJDUMP = PREFIX + 'objdump'
OBJCPY = PREFIX + 'objcopy'
DEVICE = ' -mcpu=cortex-m4 -mthumb -ffunction-sections -fdata-sections -Wuninitialized'
if BUILD == 'debug':
DEVICE = DEVICE + ' -DDEBUG'
CFLAGS = DEVICE
AFLAGS = ' -c' + DEVICE + ' -x assembler-with-cpp'
LFLAGS = DEVICE + ' -nostartfiles -Wl,--gc-sections,-Map=rtthread.map -T ./linking_scripts/m480_link.ld '
CPATH = ''
LPATH = ''
if BUILD == 'debug':
CFLAGS += ' -O0 -gdwarf-2'
AFLAGS += ' -gdwarf-2'
else:
CFLAGS += ' -O2 -gdwarf-2'
AFLAGS += ' -gdwarf-2'
POST_ACTION = OBJCPY + ' -O binary $TARGET rtthread.bin\n' + SIZE + ' $TARGET \n'
elif PLATFORM == 'armcc':
# toolchains
CC = 'armcc'
AS = 'armasm'
AR = 'armar'
LINK = 'armlink'
TARGET_EXT = 'axf'
DEVICE = ' --cpu=cortex-m4.fp'
CFLAGS = DEVICE + ' --apcs=interwork'
AFLAGS = DEVICE
LFLAGS = DEVICE + ' --info sizes --info totals --info unused --info veneers --list rtthread.map --scatter ./linking_scripts/m480_flash.sct'
CFLAGS += ' --c99'
CFLAGS += ' -I' + EXEC_PATH + '/ARM/RV31/INC'
LFLAGS += ' --libpath ' + EXEC_PATH + '/ARM/RV31/LIB'
EXEC_PATH += '/arm/bin40/'
if BUILD == 'debug':
CFLAGS += ' -g -O0'
AFLAGS += ' -g'
else:
CFLAGS += ' -O2'
POST_ACTION = 'fromelf --bin $TARGET --output rtthread.bin \nfromelf -z $TARGET'
elif PLATFORM == 'iar':
# toolchains
CC = 'iccarm'
AS = 'iasmarm'
AR = 'iarchive'
LINK = 'ilinkarm'
TARGET_EXT = 'out'
DEVICE = ' '
CFLAGS = DEVICE
CFLAGS += ' --diag_suppress Pa050'
CFLAGS += ' --no_cse'
CFLAGS += ' --no_unroll'
CFLAGS += ' --no_inline'
CFLAGS += ' --no_code_motion'
CFLAGS += ' --no_tbaa'
CFLAGS += ' --no_clustering'
CFLAGS += ' --no_scheduling'
CFLAGS += ' --debug'
CFLAGS += ' --endian=little'
CFLAGS += ' --cpu=Cortex-M4'
CFLAGS += ' -e'
CFLAGS += ' --fpu=None'
CFLAGS += ' --dlib_config "' + EXEC_PATH + '/arm/INC/c/DLib_Config_Normal.h"'
CFLAGS += ' -Ol'
CFLAGS += ' --use_c++_inline'
AFLAGS = ''
AFLAGS += ' -s+'
AFLAGS += ' -w+'
AFLAGS += ' -r'
AFLAGS += ' --cpu Cortex-M4'
AFLAGS += ' --fpu None'
LFLAGS = ' --config ./linking_scripts/m480_flash.icf'
LFLAGS += ' --redirect _Printf=_PrintfTiny'
LFLAGS += ' --redirect _Scanf=_ScanfSmall'
LFLAGS += ' --entry __iar_program_start'
EXEC_PATH = EXEC_PATH + '/arm/bin/'
POST_ACTION = ''
| gpl-2.0 |
cernops/neutron | neutron/db/external_net_db.py | 17 | 6456 | # Copyright (c) 2013 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy as sa
from sqlalchemy import orm
from sqlalchemy.orm import exc
from sqlalchemy.sql import expression as expr
from neutron.api.v2 import attributes
from neutron.common import constants as l3_constants
from neutron.common import exceptions as n_exc
from neutron.db import db_base_plugin_v2
from neutron.db import model_base
from neutron.db import models_v2
from neutron.extensions import external_net
from neutron import manager
from neutron.plugins.common import constants as service_constants
DEVICE_OWNER_ROUTER_GW = l3_constants.DEVICE_OWNER_ROUTER_GW
class ExternalNetwork(model_base.BASEV2):
network_id = sa.Column(sa.String(36),
sa.ForeignKey('networks.id', ondelete="CASCADE"),
primary_key=True)
# Add a relationship to the Network model in order to instruct
# SQLAlchemy to eagerly load this association
network = orm.relationship(
models_v2.Network,
backref=orm.backref("external", lazy='joined',
uselist=False, cascade='delete'))
class External_net_db_mixin(object):
"""Mixin class to add external network methods to db_base_plugin_v2."""
def _network_model_hook(self, context, original_model, query):
query = query.outerjoin(ExternalNetwork,
(original_model.id ==
ExternalNetwork.network_id))
return query
def _network_filter_hook(self, context, original_model, conditions):
if conditions is not None and not hasattr(conditions, '__iter__'):
conditions = (conditions, )
# Apply the external network filter only in non-admin context
if not context.is_admin and hasattr(original_model, 'tenant_id'):
conditions = expr.or_(ExternalNetwork.network_id != expr.null(),
*conditions)
return conditions
def _network_result_filter_hook(self, query, filters):
vals = filters and filters.get(external_net.EXTERNAL, [])
if not vals:
return query
if vals[0]:
return query.filter((ExternalNetwork.network_id != expr.null()))
return query.filter((ExternalNetwork.network_id == expr.null()))
# TODO(salvatore-orlando): Perform this operation without explicitly
# referring to db_base_plugin_v2, as plugins that do not extend from it
# might exist in the future
db_base_plugin_v2.NeutronDbPluginV2.register_model_query_hook(
models_v2.Network,
"external_net",
'_network_model_hook',
'_network_filter_hook',
'_network_result_filter_hook')
def _network_is_external(self, context, net_id):
try:
context.session.query(ExternalNetwork).filter_by(
network_id=net_id).one()
return True
except exc.NoResultFound:
return False
def _extend_network_dict_l3(self, network_res, network_db):
# Comparing with None for converting uuid into bool
network_res[external_net.EXTERNAL] = network_db.external is not None
return network_res
# Register dict extend functions for networks
db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs(
attributes.NETWORKS, ['_extend_network_dict_l3'])
def _process_l3_create(self, context, net_data, req_data):
external = req_data.get(external_net.EXTERNAL)
external_set = attributes.is_attr_set(external)
if not external_set:
return
if external:
# expects to be called within a plugin's session
context.session.add(ExternalNetwork(network_id=net_data['id']))
net_data[external_net.EXTERNAL] = external
def _process_l3_update(self, context, net_data, req_data):
new_value = req_data.get(external_net.EXTERNAL)
net_id = net_data['id']
if not attributes.is_attr_set(new_value):
return
if net_data.get(external_net.EXTERNAL) == new_value:
return
if new_value:
context.session.add(ExternalNetwork(network_id=net_id))
net_data[external_net.EXTERNAL] = True
else:
# must make sure we do not have any external gateway ports
# (and thus, possible floating IPs) on this network before
# allow it to be update to external=False
port = context.session.query(models_v2.Port).filter_by(
device_owner=DEVICE_OWNER_ROUTER_GW,
network_id=net_data['id']).first()
if port:
raise external_net.ExternalNetworkInUse(net_id=net_id)
context.session.query(ExternalNetwork).filter_by(
network_id=net_id).delete()
net_data[external_net.EXTERNAL] = False
def _process_l3_delete(self, context, network_id):
l3plugin = manager.NeutronManager.get_service_plugins().get(
service_constants.L3_ROUTER_NAT)
if l3plugin:
l3plugin.delete_disassociated_floatingips(context, network_id)
def _filter_nets_l3(self, context, nets, filters):
vals = filters and filters.get(external_net.EXTERNAL, [])
if not vals:
return nets
ext_nets = set(en['network_id']
for en in context.session.query(ExternalNetwork))
if vals[0]:
return [n for n in nets if n['id'] in ext_nets]
else:
return [n for n in nets if n['id'] not in ext_nets]
def get_external_network_id(self, context):
nets = self.get_networks(context, {external_net.EXTERNAL: [True]})
if len(nets) > 1:
raise n_exc.TooManyExternalNetworks()
else:
return nets[0]['id'] if nets else None
| apache-2.0 |
coryb/aminator | aminator/plugins/finalizer/tagging_s3.py | 2 | 9211 | # -*- coding: utf-8 -*-
#
#
# Copyright 2013 Netflix, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
"""
aminator.plugins.finalizer.tagging_s3
======================================
s3 tagging image finalizer
"""
import logging
from shutil import rmtree
from os.path import isdir
from os import makedirs, system
from os import environ
from aminator.config import conf_action
from aminator.plugins.finalizer.tagging_base import TaggingBaseFinalizerPlugin
from aminator.util import randword
from aminator.util.linux import sanitize_metadata, monitor_command
from aminator.util.metrics import cmdsucceeds, cmdfails, timer
__all__ = ('TaggingS3FinalizerPlugin',)
log = logging.getLogger(__name__)
class TaggingS3FinalizerPlugin(TaggingBaseFinalizerPlugin):
_name = 'tagging_s3'
def add_plugin_args(self):
tagging = super(TaggingS3FinalizerPlugin, self).add_plugin_args()
context = self._config.context
tagging.add_argument('-n', '--name', dest='name', action=conf_action(context.ami), help='name of resultant AMI (default package_name-version-release-arch-yyyymmddHHMM-s3')
tagging.add_argument('--cert', dest='cert', action=conf_action(context.ami), help='The path to the PEM encoded RSA public key certificate file for ec2-bundle-volume')
tagging.add_argument('--privatekey', dest='privatekey', action=conf_action(context.ami), help='The path to the PEM encoded RSA private key file for ec2-bundle-vol')
tagging.add_argument('--ec2-user', dest='ec2_user', action=conf_action(context.ami), help='ec2 user id for ec2-bundle-vol')
tagging.add_argument('--tmpdir', dest='tmpdir', action=conf_action(context.ami), help='temp directory used by ec2-bundle-vol')
tagging.add_argument('--bucket', dest='bucket', action=conf_action(context.ami), help='the S3 bucket to use for ec2-upload-bundle')
tagging.add_argument('--break-copy-volume', dest='break_copy_volume', action=conf_action(context.ami, action='store_true'), help='break into shell after copying the volume, for debugging')
def _set_metadata(self):
super(TaggingS3FinalizerPlugin, self)._set_metadata()
context = self._config.context
config = self._config.plugins[self.full_name]
metadata = context.package.attributes
ami_name = context.ami.get('name', None)
if not ami_name:
ami_name = config.name_format.format(**metadata)
context.ami.name = sanitize_metadata('{0}-s3'.format(ami_name))
def tmpdir(self):
config = self._config.plugins[self.full_name]
ami = self._config.context.ami
return "{0}/{1}".format(ami.get("tmpdir", config.get("default_tmpdir", "/tmp")), ami.name)
# pylint: disable=access-member-before-definition
def unique_name(self):
context = self._config.context
if hasattr(self, "_unique_name"):
return self._unique_name
self._unique_name = "{0}-{1}".format(context.ami.name, randword(6))
return self._unique_name
def image_location(self):
return "{0}/{1}".format(self.tmpdir(), self.unique_name())
@cmdsucceeds("aminator.finalizer.tagging_s3.copy_volume.count")
@cmdfails("aminator.finalizer.tagging_s3.copy_volume.error")
@timer("aminator.finalizer.tagging_s3.copy_volume.duration")
def _copy_volume(self):
context = self._config.context
tmpdir = self.tmpdir()
if not isdir(tmpdir):
makedirs(tmpdir)
return monitor_command(["dd", "bs=65536", "if={0}".format(context.volume.dev), "of={0}".format(self.image_location())])
@cmdsucceeds("aminator.finalizer.tagging_s3.bundle_image.count")
@cmdfails("aminator.finalizer.tagging_s3.bundle_image.error")
@timer("aminator.finalizer.tagging_s3.bundle_image.duration")
def _bundle_image(self):
context = self._config.context
config = self._config.plugins[self.full_name]
block_device_map = config.default_block_device_map
root_device = config.default_root_device
bdm = "root={0}".format(root_device)
for bd in block_device_map:
bdm += ",{0}={1}".format(bd[1], bd[0])
bdm += ",ami={0}".format(root_device)
cmd = ['ec2-bundle-image']
cmd.extend(['-c', context.ami.get("cert", config.default_cert)])
cmd.extend(['-k', context.ami.get("privatekey", config.default_privatekey)])
cmd.extend(['-u', context.ami.get("ec2_user", str(config.default_ec2_user))])
cmd.extend(['-i', self.image_location()])
cmd.extend(['-d', self.tmpdir()])
if context.base_ami.architecture:
cmd.extend(['-r', context.base_ami.architecture])
vm_type = context.ami.get("vm_type", "paravirtual")
if vm_type == "paravirtual":
if context.base_ami.kernel_id:
cmd.extend(['--kernel', context.base_ami.kernel_id])
if context.base_ami.ramdisk_id:
cmd.extend(['--ramdisk', context.base_ami.ramdisk_id])
cmd.extend(['-B', bdm])
return monitor_command(cmd)
@cmdsucceeds("aminator.finalizer.tagging_s3.upload_bundle.count")
@cmdfails("aminator.finalizer.tagging_s3.upload_bundle.error")
@timer("aminator.finalizer.tagging_s3.upload_bundle.duration")
def _upload_bundle(self):
context = self._config.context
provider = self._cloud._connection.provider
ak = provider.get_access_key()
sk = provider.get_secret_key()
tk = provider.get_security_token()
cmd = ['ec2-upload-bundle']
cmd.extend(['-b', context.ami.bucket])
cmd.extend(['-a', ak])
cmd.extend(['-s', sk])
if tk:
cmd.extend(['-t', tk])
cmd.extend(['-m', "{0}.manifest.xml".format(self.image_location())])
cmd.extend(['--retry'])
return monitor_command(cmd)
def _register_image(self):
context = self._config.context
log.info('Registering image')
if not self._cloud.register_image(manifest="{0}/{1}.manifest.xml".format(context.ami.bucket, self.unique_name())):
return False
log.info('Registration success')
return True
def finalize(self):
log.info('Finalizing image')
context = self._config.context
self._set_metadata()
ret = self._copy_volume()
if not ret.success:
log.debug('Error copying volume, failure:{0.command} :{0.std_err}'.format(ret.result))
return False
if context.ami.get('break_copy_volume', False):
system("bash")
ret = self._bundle_image()
if not ret.success:
log.debug('Error bundling image, failure:{0.command} :{0.std_err}'.format(ret.result))
return False
ret = self._upload_bundle()
if not ret.success:
log.debug('Error uploading bundled volume, failure:{0.command} :{0.std_err}'.format(ret.result))
return False
if not self._register_image():
log.critical('Error registering image')
return False
if not self._add_tags(['ami']):
log.critical('Error adding tags')
return False
log.info('Image registered and tagged')
self._log_ami_metadata()
return True
def __enter__(self):
context = self._config.context
environ["AMINATOR_STORE_TYPE"] = "s3"
if context.ami.get("name", None):
environ["AMINATOR_AMI_NAME"] = context.ami.name
if context.ami.get("cert", None):
environ["AMINATOR_CERT"] = context.ami.cert
if context.ami.get("privatekey", None):
environ["AMINATOR_PRIVATEKEY"] = context.ami.privatekey
if context.ami.get("ec2_user", None):
environ["AMINATOR_EC2_USER"] = context.ami.ec2_user
if context.ami.get("tmpdir", None):
environ["AMINATOR_TMPDIR"] = context.ami.tmpdir
if context.ami.get("bucket", None):
environ["AMINATOR_BUCKET"] = context.ami.bucket
return super(TaggingS3FinalizerPlugin, self).__enter__()
def __exit__(self, exc_type, exc_value, trace):
if exc_type:
log.debug('Exception encountered in tagging s3 finalizer context manager',
exc_info=(exc_type, exc_value, trace))
# delete tmpdir used by ec2-bundle-vol
try:
td = self.tmpdir()
if isdir(td):
rmtree(td)
except Exception:
log.debug('Exception encountered attempting to clean s3 bundle tmpdir',
exc_info=True)
return False
| apache-2.0 |
template01/editthispost | node_modules/node-gyp/gyp/pylib/gyp/generator/eclipse.py | 1825 | 17014 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""GYP backend that generates Eclipse CDT settings files.
This backend DOES NOT generate Eclipse CDT projects. Instead, it generates XML
files that can be imported into an Eclipse CDT project. The XML file contains a
list of include paths and symbols (i.e. defines).
Because a full .cproject definition is not created by this generator, it's not
possible to properly define the include dirs and symbols for each file
individually. Instead, one set of includes/symbols is generated for the entire
project. This works fairly well (and is a vast improvement in general), but may
still result in a few indexer issues here and there.
This generator has no automated tests, so expect it to be broken.
"""
from xml.sax.saxutils import escape
import os.path
import subprocess
import gyp
import gyp.common
import gyp.msvs_emulation
import shlex
import xml.etree.cElementTree as ET
generator_wants_static_library_dependencies_adjusted = False
generator_default_variables = {
}
for dirname in ['INTERMEDIATE_DIR', 'PRODUCT_DIR', 'LIB_DIR', 'SHARED_LIB_DIR']:
# Some gyp steps fail if these are empty(!), so we convert them to variables
generator_default_variables[dirname] = '$' + dirname
for unused in ['RULE_INPUT_PATH', 'RULE_INPUT_ROOT', 'RULE_INPUT_NAME',
'RULE_INPUT_DIRNAME', 'RULE_INPUT_EXT',
'EXECUTABLE_PREFIX', 'EXECUTABLE_SUFFIX',
'STATIC_LIB_PREFIX', 'STATIC_LIB_SUFFIX',
'SHARED_LIB_PREFIX', 'SHARED_LIB_SUFFIX',
'CONFIGURATION_NAME']:
generator_default_variables[unused] = ''
# Include dirs will occasionally use the SHARED_INTERMEDIATE_DIR variable as
# part of the path when dealing with generated headers. This value will be
# replaced dynamically for each configuration.
generator_default_variables['SHARED_INTERMEDIATE_DIR'] = \
'$SHARED_INTERMEDIATE_DIR'
def CalculateVariables(default_variables, params):
generator_flags = params.get('generator_flags', {})
for key, val in generator_flags.items():
default_variables.setdefault(key, val)
flavor = gyp.common.GetFlavor(params)
default_variables.setdefault('OS', flavor)
if flavor == 'win':
# Copy additional generator configuration data from VS, which is shared
# by the Eclipse generator.
import gyp.generator.msvs as msvs_generator
generator_additional_non_configuration_keys = getattr(msvs_generator,
'generator_additional_non_configuration_keys', [])
generator_additional_path_sections = getattr(msvs_generator,
'generator_additional_path_sections', [])
gyp.msvs_emulation.CalculateCommonVariables(default_variables, params)
def CalculateGeneratorInputInfo(params):
"""Calculate the generator specific info that gets fed to input (called by
gyp)."""
generator_flags = params.get('generator_flags', {})
if generator_flags.get('adjust_static_libraries', False):
global generator_wants_static_library_dependencies_adjusted
generator_wants_static_library_dependencies_adjusted = True
def GetAllIncludeDirectories(target_list, target_dicts,
shared_intermediate_dirs, config_name, params,
compiler_path):
"""Calculate the set of include directories to be used.
Returns:
A list including all the include_dir's specified for every target followed
by any include directories that were added as cflag compiler options.
"""
gyp_includes_set = set()
compiler_includes_list = []
# Find compiler's default include dirs.
if compiler_path:
command = shlex.split(compiler_path)
command.extend(['-E', '-xc++', '-v', '-'])
proc = subprocess.Popen(args=command, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output = proc.communicate()[1]
# Extract the list of include dirs from the output, which has this format:
# ...
# #include "..." search starts here:
# #include <...> search starts here:
# /usr/include/c++/4.6
# /usr/local/include
# End of search list.
# ...
in_include_list = False
for line in output.splitlines():
if line.startswith('#include'):
in_include_list = True
continue
if line.startswith('End of search list.'):
break
if in_include_list:
include_dir = line.strip()
if include_dir not in compiler_includes_list:
compiler_includes_list.append(include_dir)
flavor = gyp.common.GetFlavor(params)
if flavor == 'win':
generator_flags = params.get('generator_flags', {})
for target_name in target_list:
target = target_dicts[target_name]
if config_name in target['configurations']:
config = target['configurations'][config_name]
# Look for any include dirs that were explicitly added via cflags. This
# may be done in gyp files to force certain includes to come at the end.
# TODO(jgreenwald): Change the gyp files to not abuse cflags for this, and
# remove this.
if flavor == 'win':
msvs_settings = gyp.msvs_emulation.MsvsSettings(target, generator_flags)
cflags = msvs_settings.GetCflags(config_name)
else:
cflags = config['cflags']
for cflag in cflags:
if cflag.startswith('-I'):
include_dir = cflag[2:]
if include_dir not in compiler_includes_list:
compiler_includes_list.append(include_dir)
# Find standard gyp include dirs.
if config.has_key('include_dirs'):
include_dirs = config['include_dirs']
for shared_intermediate_dir in shared_intermediate_dirs:
for include_dir in include_dirs:
include_dir = include_dir.replace('$SHARED_INTERMEDIATE_DIR',
shared_intermediate_dir)
if not os.path.isabs(include_dir):
base_dir = os.path.dirname(target_name)
include_dir = base_dir + '/' + include_dir
include_dir = os.path.abspath(include_dir)
gyp_includes_set.add(include_dir)
# Generate a list that has all the include dirs.
all_includes_list = list(gyp_includes_set)
all_includes_list.sort()
for compiler_include in compiler_includes_list:
if not compiler_include in gyp_includes_set:
all_includes_list.append(compiler_include)
# All done.
return all_includes_list
def GetCompilerPath(target_list, data, options):
"""Determine a command that can be used to invoke the compiler.
Returns:
If this is a gyp project that has explicit make settings, try to determine
the compiler from that. Otherwise, see if a compiler was specified via the
CC_target environment variable.
"""
# First, see if the compiler is configured in make's settings.
build_file, _, _ = gyp.common.ParseQualifiedTarget(target_list[0])
make_global_settings_dict = data[build_file].get('make_global_settings', {})
for key, value in make_global_settings_dict:
if key in ['CC', 'CXX']:
return os.path.join(options.toplevel_dir, value)
# Check to see if the compiler was specified as an environment variable.
for key in ['CC_target', 'CC', 'CXX']:
compiler = os.environ.get(key)
if compiler:
return compiler
return 'gcc'
def GetAllDefines(target_list, target_dicts, data, config_name, params,
compiler_path):
"""Calculate the defines for a project.
Returns:
A dict that includes explict defines declared in gyp files along with all of
the default defines that the compiler uses.
"""
# Get defines declared in the gyp files.
all_defines = {}
flavor = gyp.common.GetFlavor(params)
if flavor == 'win':
generator_flags = params.get('generator_flags', {})
for target_name in target_list:
target = target_dicts[target_name]
if flavor == 'win':
msvs_settings = gyp.msvs_emulation.MsvsSettings(target, generator_flags)
extra_defines = msvs_settings.GetComputedDefines(config_name)
else:
extra_defines = []
if config_name in target['configurations']:
config = target['configurations'][config_name]
target_defines = config['defines']
else:
target_defines = []
for define in target_defines + extra_defines:
split_define = define.split('=', 1)
if len(split_define) == 1:
split_define.append('1')
if split_define[0].strip() in all_defines:
# Already defined
continue
all_defines[split_define[0].strip()] = split_define[1].strip()
# Get default compiler defines (if possible).
if flavor == 'win':
return all_defines # Default defines already processed in the loop above.
if compiler_path:
command = shlex.split(compiler_path)
command.extend(['-E', '-dM', '-'])
cpp_proc = subprocess.Popen(args=command, cwd='.',
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
cpp_output = cpp_proc.communicate()[0]
cpp_lines = cpp_output.split('\n')
for cpp_line in cpp_lines:
if not cpp_line.strip():
continue
cpp_line_parts = cpp_line.split(' ', 2)
key = cpp_line_parts[1]
if len(cpp_line_parts) >= 3:
val = cpp_line_parts[2]
else:
val = '1'
all_defines[key] = val
return all_defines
def WriteIncludePaths(out, eclipse_langs, include_dirs):
"""Write the includes section of a CDT settings export file."""
out.write(' <section name="org.eclipse.cdt.internal.ui.wizards.' \
'settingswizards.IncludePaths">\n')
out.write(' <language name="holder for library settings"></language>\n')
for lang in eclipse_langs:
out.write(' <language name="%s">\n' % lang)
for include_dir in include_dirs:
out.write(' <includepath workspace_path="false">%s</includepath>\n' %
include_dir)
out.write(' </language>\n')
out.write(' </section>\n')
def WriteMacros(out, eclipse_langs, defines):
"""Write the macros section of a CDT settings export file."""
out.write(' <section name="org.eclipse.cdt.internal.ui.wizards.' \
'settingswizards.Macros">\n')
out.write(' <language name="holder for library settings"></language>\n')
for lang in eclipse_langs:
out.write(' <language name="%s">\n' % lang)
for key in sorted(defines.iterkeys()):
out.write(' <macro><name>%s</name><value>%s</value></macro>\n' %
(escape(key), escape(defines[key])))
out.write(' </language>\n')
out.write(' </section>\n')
def GenerateOutputForConfig(target_list, target_dicts, data, params,
config_name):
options = params['options']
generator_flags = params.get('generator_flags', {})
# build_dir: relative path from source root to our output files.
# e.g. "out/Debug"
build_dir = os.path.join(generator_flags.get('output_dir', 'out'),
config_name)
toplevel_build = os.path.join(options.toplevel_dir, build_dir)
# Ninja uses out/Debug/gen while make uses out/Debug/obj/gen as the
# SHARED_INTERMEDIATE_DIR. Include both possible locations.
shared_intermediate_dirs = [os.path.join(toplevel_build, 'obj', 'gen'),
os.path.join(toplevel_build, 'gen')]
GenerateCdtSettingsFile(target_list,
target_dicts,
data,
params,
config_name,
os.path.join(toplevel_build,
'eclipse-cdt-settings.xml'),
options,
shared_intermediate_dirs)
GenerateClasspathFile(target_list,
target_dicts,
options.toplevel_dir,
toplevel_build,
os.path.join(toplevel_build,
'eclipse-classpath.xml'))
def GenerateCdtSettingsFile(target_list, target_dicts, data, params,
config_name, out_name, options,
shared_intermediate_dirs):
gyp.common.EnsureDirExists(out_name)
with open(out_name, 'w') as out:
out.write('<?xml version="1.0" encoding="UTF-8"?>\n')
out.write('<cdtprojectproperties>\n')
eclipse_langs = ['C++ Source File', 'C Source File', 'Assembly Source File',
'GNU C++', 'GNU C', 'Assembly']
compiler_path = GetCompilerPath(target_list, data, options)
include_dirs = GetAllIncludeDirectories(target_list, target_dicts,
shared_intermediate_dirs,
config_name, params, compiler_path)
WriteIncludePaths(out, eclipse_langs, include_dirs)
defines = GetAllDefines(target_list, target_dicts, data, config_name,
params, compiler_path)
WriteMacros(out, eclipse_langs, defines)
out.write('</cdtprojectproperties>\n')
def GenerateClasspathFile(target_list, target_dicts, toplevel_dir,
toplevel_build, out_name):
'''Generates a classpath file suitable for symbol navigation and code
completion of Java code (such as in Android projects) by finding all
.java and .jar files used as action inputs.'''
gyp.common.EnsureDirExists(out_name)
result = ET.Element('classpath')
def AddElements(kind, paths):
# First, we need to normalize the paths so they are all relative to the
# toplevel dir.
rel_paths = set()
for path in paths:
if os.path.isabs(path):
rel_paths.add(os.path.relpath(path, toplevel_dir))
else:
rel_paths.add(path)
for path in sorted(rel_paths):
entry_element = ET.SubElement(result, 'classpathentry')
entry_element.set('kind', kind)
entry_element.set('path', path)
AddElements('lib', GetJavaJars(target_list, target_dicts, toplevel_dir))
AddElements('src', GetJavaSourceDirs(target_list, target_dicts, toplevel_dir))
# Include the standard JRE container and a dummy out folder
AddElements('con', ['org.eclipse.jdt.launching.JRE_CONTAINER'])
# Include a dummy out folder so that Eclipse doesn't use the default /bin
# folder in the root of the project.
AddElements('output', [os.path.join(toplevel_build, '.eclipse-java-build')])
ET.ElementTree(result).write(out_name)
def GetJavaJars(target_list, target_dicts, toplevel_dir):
'''Generates a sequence of all .jars used as inputs.'''
for target_name in target_list:
target = target_dicts[target_name]
for action in target.get('actions', []):
for input_ in action['inputs']:
if os.path.splitext(input_)[1] == '.jar' and not input_.startswith('$'):
if os.path.isabs(input_):
yield input_
else:
yield os.path.join(os.path.dirname(target_name), input_)
def GetJavaSourceDirs(target_list, target_dicts, toplevel_dir):
'''Generates a sequence of all likely java package root directories.'''
for target_name in target_list:
target = target_dicts[target_name]
for action in target.get('actions', []):
for input_ in action['inputs']:
if (os.path.splitext(input_)[1] == '.java' and
not input_.startswith('$')):
dir_ = os.path.dirname(os.path.join(os.path.dirname(target_name),
input_))
# If there is a parent 'src' or 'java' folder, navigate up to it -
# these are canonical package root names in Chromium. This will
# break if 'src' or 'java' exists in the package structure. This
# could be further improved by inspecting the java file for the
# package name if this proves to be too fragile in practice.
parent_search = dir_
while os.path.basename(parent_search) not in ['src', 'java']:
parent_search, _ = os.path.split(parent_search)
if not parent_search or parent_search == toplevel_dir:
# Didn't find a known root, just return the original path
yield dir_
break
else:
yield parent_search
def GenerateOutput(target_list, target_dicts, data, params):
"""Generate an XML settings file that can be imported into a CDT project."""
if params['options'].generator_output:
raise NotImplementedError("--generator_output not implemented for eclipse")
user_config = params.get('generator_flags', {}).get('config', None)
if user_config:
GenerateOutputForConfig(target_list, target_dicts, data, params,
user_config)
else:
config_names = target_dicts[target_list[0]]['configurations'].keys()
for config_name in config_names:
GenerateOutputForConfig(target_list, target_dicts, data, params,
config_name)
| mit |
zederson/Arduino | arduino-core/src/processing/app/i18n/python/requests/packages/charade/gb2312prober.py | 2994 | 1681 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import GB2312DistributionAnalysis
from .mbcssm import GB2312SMModel
class GB2312Prober(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(GB2312SMModel)
self._mDistributionAnalyzer = GB2312DistributionAnalysis()
self.reset()
def get_charset_name(self):
return "GB2312"
| lgpl-2.1 |
akrzos/cfme_tests | cfme/tests/integration/test_aws_iam_auth_and_roles.py | 2 | 1835 | import pytest
from cfme.configure.access_control import simple_user
from cfme.login import login
from cfme.web_ui import menu
from utils.conf import credentials
from utils.testgen import auth_groups, generate
from utils import version
from utils.providers import setup_a_provider
pytest_generate_tests = generate(auth_groups, auth_mode='aws_iam')
@pytest.fixture(scope="module")
def setup_first_provider():
setup_a_provider(validate=True, check_existing=True)
@pytest.mark.tier(2)
def test_group_roles(configure_aws_iam_auth_mode, group_name, group_data, setup_first_provider):
"""Basic default AWS_IAM group role RBAC test
Validates expected menu and submenu names are present for default
AWS IAM groups
"""
# This should be removed but currently these roles are subject to a bug
if version.current_version() >= '5.4' and group_name in ['evmgroup-administrator',
'evmgroup-approver',
'evmgroup-auditor',
'evmgroup-operator',
'evmgroup-security',
'evmgroup-support',
'evmgroup-user']:
pytest.skip("This role currently fails this test")
try:
iam_group_name = group_name + '_aws_iam'
username = credentials[iam_group_name]['username']
password = credentials[iam_group_name]['password']
except KeyError:
pytest.fail('No match in credentials file for group "{}"'.format(iam_group_name))
login(simple_user(username, password))
assert set(menu.visible_pages()) == set(group_data)
| gpl-2.0 |
vntarasov/openpilot | selfdrive/debug/get_fingerprint.py | 1 | 1030 | #!/usr/bin/env python3
# simple script to get a vehicle fingerprint.
# Instructions:
# - connect to a Panda
# - run selfdrive/boardd/boardd
# - launching this script
# - turn on the car in STOCK MODE (set giraffe switches properly).
# Note: it's very important that the car is in stock mode, in order to collect a complete fingerprint
# - since some messages are published at low frequency, keep this script running for at least 30s,
# until all messages are received at least once
import cereal.messaging as messaging
logcan = messaging.sub_sock('can')
msgs = {}
while True:
lc = messaging.recv_sock(logcan, True)
if lc is None:
continue
for c in lc.can:
# read also msgs sent by EON on CAN bus 0x80 and filter out the
# addr with more than 11 bits
if c.src in [0, 2] and c.address < 0x800:
msgs[c.address] = len(c.dat)
fingerprint = ', '.join("%d: %d" % v for v in sorted(msgs.items()))
print("number of messages {0}:".format(len(msgs)))
print("fingerprint {0}".format(fingerprint))
| mit |
rubenacevedo3/cpp-RoboDogVoiceController | vendor/googletest/googletest/test/gtest_shuffle_test.py | 3023 | 12549 | #!/usr/bin/env python
#
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Verifies that test shuffling works."""
__author__ = '[email protected] (Zhanyong Wan)'
import os
import gtest_test_utils
# Command to run the gtest_shuffle_test_ program.
COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_shuffle_test_')
# The environment variables for test sharding.
TOTAL_SHARDS_ENV_VAR = 'GTEST_TOTAL_SHARDS'
SHARD_INDEX_ENV_VAR = 'GTEST_SHARD_INDEX'
TEST_FILTER = 'A*.A:A*.B:C*'
ALL_TESTS = []
ACTIVE_TESTS = []
FILTERED_TESTS = []
SHARDED_TESTS = []
SHUFFLED_ALL_TESTS = []
SHUFFLED_ACTIVE_TESTS = []
SHUFFLED_FILTERED_TESTS = []
SHUFFLED_SHARDED_TESTS = []
def AlsoRunDisabledTestsFlag():
return '--gtest_also_run_disabled_tests'
def FilterFlag(test_filter):
return '--gtest_filter=%s' % (test_filter,)
def RepeatFlag(n):
return '--gtest_repeat=%s' % (n,)
def ShuffleFlag():
return '--gtest_shuffle'
def RandomSeedFlag(n):
return '--gtest_random_seed=%s' % (n,)
def RunAndReturnOutput(extra_env, args):
"""Runs the test program and returns its output."""
environ_copy = os.environ.copy()
environ_copy.update(extra_env)
return gtest_test_utils.Subprocess([COMMAND] + args, env=environ_copy).output
def GetTestsForAllIterations(extra_env, args):
"""Runs the test program and returns a list of test lists.
Args:
extra_env: a map from environment variables to their values
args: command line flags to pass to gtest_shuffle_test_
Returns:
A list where the i-th element is the list of tests run in the i-th
test iteration.
"""
test_iterations = []
for line in RunAndReturnOutput(extra_env, args).split('\n'):
if line.startswith('----'):
tests = []
test_iterations.append(tests)
elif line.strip():
tests.append(line.strip()) # 'TestCaseName.TestName'
return test_iterations
def GetTestCases(tests):
"""Returns a list of test cases in the given full test names.
Args:
tests: a list of full test names
Returns:
A list of test cases from 'tests', in their original order.
Consecutive duplicates are removed.
"""
test_cases = []
for test in tests:
test_case = test.split('.')[0]
if not test_case in test_cases:
test_cases.append(test_case)
return test_cases
def CalculateTestLists():
"""Calculates the list of tests run under different flags."""
if not ALL_TESTS:
ALL_TESTS.extend(
GetTestsForAllIterations({}, [AlsoRunDisabledTestsFlag()])[0])
if not ACTIVE_TESTS:
ACTIVE_TESTS.extend(GetTestsForAllIterations({}, [])[0])
if not FILTERED_TESTS:
FILTERED_TESTS.extend(
GetTestsForAllIterations({}, [FilterFlag(TEST_FILTER)])[0])
if not SHARDED_TESTS:
SHARDED_TESTS.extend(
GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '1'},
[])[0])
if not SHUFFLED_ALL_TESTS:
SHUFFLED_ALL_TESTS.extend(GetTestsForAllIterations(
{}, [AlsoRunDisabledTestsFlag(), ShuffleFlag(), RandomSeedFlag(1)])[0])
if not SHUFFLED_ACTIVE_TESTS:
SHUFFLED_ACTIVE_TESTS.extend(GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1)])[0])
if not SHUFFLED_FILTERED_TESTS:
SHUFFLED_FILTERED_TESTS.extend(GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1), FilterFlag(TEST_FILTER)])[0])
if not SHUFFLED_SHARDED_TESTS:
SHUFFLED_SHARDED_TESTS.extend(
GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '1'},
[ShuffleFlag(), RandomSeedFlag(1)])[0])
class GTestShuffleUnitTest(gtest_test_utils.TestCase):
"""Tests test shuffling."""
def setUp(self):
CalculateTestLists()
def testShufflePreservesNumberOfTests(self):
self.assertEqual(len(ALL_TESTS), len(SHUFFLED_ALL_TESTS))
self.assertEqual(len(ACTIVE_TESTS), len(SHUFFLED_ACTIVE_TESTS))
self.assertEqual(len(FILTERED_TESTS), len(SHUFFLED_FILTERED_TESTS))
self.assertEqual(len(SHARDED_TESTS), len(SHUFFLED_SHARDED_TESTS))
def testShuffleChangesTestOrder(self):
self.assert_(SHUFFLED_ALL_TESTS != ALL_TESTS, SHUFFLED_ALL_TESTS)
self.assert_(SHUFFLED_ACTIVE_TESTS != ACTIVE_TESTS, SHUFFLED_ACTIVE_TESTS)
self.assert_(SHUFFLED_FILTERED_TESTS != FILTERED_TESTS,
SHUFFLED_FILTERED_TESTS)
self.assert_(SHUFFLED_SHARDED_TESTS != SHARDED_TESTS,
SHUFFLED_SHARDED_TESTS)
def testShuffleChangesTestCaseOrder(self):
self.assert_(GetTestCases(SHUFFLED_ALL_TESTS) != GetTestCases(ALL_TESTS),
GetTestCases(SHUFFLED_ALL_TESTS))
self.assert_(
GetTestCases(SHUFFLED_ACTIVE_TESTS) != GetTestCases(ACTIVE_TESTS),
GetTestCases(SHUFFLED_ACTIVE_TESTS))
self.assert_(
GetTestCases(SHUFFLED_FILTERED_TESTS) != GetTestCases(FILTERED_TESTS),
GetTestCases(SHUFFLED_FILTERED_TESTS))
self.assert_(
GetTestCases(SHUFFLED_SHARDED_TESTS) != GetTestCases(SHARDED_TESTS),
GetTestCases(SHUFFLED_SHARDED_TESTS))
def testShuffleDoesNotRepeatTest(self):
for test in SHUFFLED_ALL_TESTS:
self.assertEqual(1, SHUFFLED_ALL_TESTS.count(test),
'%s appears more than once' % (test,))
for test in SHUFFLED_ACTIVE_TESTS:
self.assertEqual(1, SHUFFLED_ACTIVE_TESTS.count(test),
'%s appears more than once' % (test,))
for test in SHUFFLED_FILTERED_TESTS:
self.assertEqual(1, SHUFFLED_FILTERED_TESTS.count(test),
'%s appears more than once' % (test,))
for test in SHUFFLED_SHARDED_TESTS:
self.assertEqual(1, SHUFFLED_SHARDED_TESTS.count(test),
'%s appears more than once' % (test,))
def testShuffleDoesNotCreateNewTest(self):
for test in SHUFFLED_ALL_TESTS:
self.assert_(test in ALL_TESTS, '%s is an invalid test' % (test,))
for test in SHUFFLED_ACTIVE_TESTS:
self.assert_(test in ACTIVE_TESTS, '%s is an invalid test' % (test,))
for test in SHUFFLED_FILTERED_TESTS:
self.assert_(test in FILTERED_TESTS, '%s is an invalid test' % (test,))
for test in SHUFFLED_SHARDED_TESTS:
self.assert_(test in SHARDED_TESTS, '%s is an invalid test' % (test,))
def testShuffleIncludesAllTests(self):
for test in ALL_TESTS:
self.assert_(test in SHUFFLED_ALL_TESTS, '%s is missing' % (test,))
for test in ACTIVE_TESTS:
self.assert_(test in SHUFFLED_ACTIVE_TESTS, '%s is missing' % (test,))
for test in FILTERED_TESTS:
self.assert_(test in SHUFFLED_FILTERED_TESTS, '%s is missing' % (test,))
for test in SHARDED_TESTS:
self.assert_(test in SHUFFLED_SHARDED_TESTS, '%s is missing' % (test,))
def testShuffleLeavesDeathTestsAtFront(self):
non_death_test_found = False
for test in SHUFFLED_ACTIVE_TESTS:
if 'DeathTest.' in test:
self.assert_(not non_death_test_found,
'%s appears after a non-death test' % (test,))
else:
non_death_test_found = True
def _VerifyTestCasesDoNotInterleave(self, tests):
test_cases = []
for test in tests:
[test_case, _] = test.split('.')
if test_cases and test_cases[-1] != test_case:
test_cases.append(test_case)
self.assertEqual(1, test_cases.count(test_case),
'Test case %s is not grouped together in %s' %
(test_case, tests))
def testShuffleDoesNotInterleaveTestCases(self):
self._VerifyTestCasesDoNotInterleave(SHUFFLED_ALL_TESTS)
self._VerifyTestCasesDoNotInterleave(SHUFFLED_ACTIVE_TESTS)
self._VerifyTestCasesDoNotInterleave(SHUFFLED_FILTERED_TESTS)
self._VerifyTestCasesDoNotInterleave(SHUFFLED_SHARDED_TESTS)
def testShuffleRestoresOrderAfterEachIteration(self):
# Get the test lists in all 3 iterations, using random seed 1, 2,
# and 3 respectively. Google Test picks a different seed in each
# iteration, and this test depends on the current implementation
# picking successive numbers. This dependency is not ideal, but
# makes the test much easier to write.
[tests_in_iteration1, tests_in_iteration2, tests_in_iteration3] = (
GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1), RepeatFlag(3)]))
# Make sure running the tests with random seed 1 gets the same
# order as in iteration 1 above.
[tests_with_seed1] = GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1)])
self.assertEqual(tests_in_iteration1, tests_with_seed1)
# Make sure running the tests with random seed 2 gets the same
# order as in iteration 2 above. Success means that Google Test
# correctly restores the test order before re-shuffling at the
# beginning of iteration 2.
[tests_with_seed2] = GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(2)])
self.assertEqual(tests_in_iteration2, tests_with_seed2)
# Make sure running the tests with random seed 3 gets the same
# order as in iteration 3 above. Success means that Google Test
# correctly restores the test order before re-shuffling at the
# beginning of iteration 3.
[tests_with_seed3] = GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(3)])
self.assertEqual(tests_in_iteration3, tests_with_seed3)
def testShuffleGeneratesNewOrderInEachIteration(self):
[tests_in_iteration1, tests_in_iteration2, tests_in_iteration3] = (
GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1), RepeatFlag(3)]))
self.assert_(tests_in_iteration1 != tests_in_iteration2,
tests_in_iteration1)
self.assert_(tests_in_iteration1 != tests_in_iteration3,
tests_in_iteration1)
self.assert_(tests_in_iteration2 != tests_in_iteration3,
tests_in_iteration2)
def testShuffleShardedTestsPreservesPartition(self):
# If we run M tests on N shards, the same M tests should be run in
# total, regardless of the random seeds used by the shards.
[tests1] = GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '0'},
[ShuffleFlag(), RandomSeedFlag(1)])
[tests2] = GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '1'},
[ShuffleFlag(), RandomSeedFlag(20)])
[tests3] = GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '2'},
[ShuffleFlag(), RandomSeedFlag(25)])
sorted_sharded_tests = tests1 + tests2 + tests3
sorted_sharded_tests.sort()
sorted_active_tests = []
sorted_active_tests.extend(ACTIVE_TESTS)
sorted_active_tests.sort()
self.assertEqual(sorted_active_tests, sorted_sharded_tests)
if __name__ == '__main__':
gtest_test_utils.Main()
| mit |
mcr/ietfdb | django/core/management/base.py | 45 | 16447 | """
Base classes for writing management commands (named commands which can
be executed through ``django-admin.py`` or ``manage.py``).
"""
import os
import sys
from optparse import make_option, OptionParser
import django
from django.core.exceptions import ImproperlyConfigured
from django.core.management.color import color_style
from django.utils.encoding import smart_str
class CommandError(Exception):
"""
Exception class indicating a problem while executing a management
command.
If this exception is raised during the execution of a management
command, it will be caught and turned into a nicely-printed error
message to the appropriate output stream (i.e., stderr); as a
result, raising this exception (with a sensible description of the
error) is the preferred way to indicate that something has gone
wrong in the execution of a command.
"""
pass
def handle_default_options(options):
"""
Include any default options that all commands should accept here
so that ManagementUtility can handle them before searching for
user commands.
"""
if options.settings:
os.environ['DJANGO_SETTINGS_MODULE'] = options.settings
if options.pythonpath:
sys.path.insert(0, options.pythonpath)
class BaseCommand(object):
"""
The base class from which all management commands ultimately
derive.
Use this class if you want access to all of the mechanisms which
parse the command-line arguments and work out what code to call in
response; if you don't need to change any of that behavior,
consider using one of the subclasses defined in this file.
If you are interested in overriding/customizing various aspects of
the command-parsing and -execution behavior, the normal flow works
as follows:
1. ``django-admin.py`` or ``manage.py`` loads the command class
and calls its ``run_from_argv()`` method.
2. The ``run_from_argv()`` method calls ``create_parser()`` to get
an ``OptionParser`` for the arguments, parses them, performs
any environment changes requested by options like
``pythonpath``, and then calls the ``execute()`` method,
passing the parsed arguments.
3. The ``execute()`` method attempts to carry out the command by
calling the ``handle()`` method with the parsed arguments; any
output produced by ``handle()`` will be printed to standard
output and, if the command is intended to produce a block of
SQL statements, will be wrapped in ``BEGIN`` and ``COMMIT``.
4. If ``handle()`` raised a ``CommandError``, ``execute()`` will
instead print an error message to ``stderr``.
Thus, the ``handle()`` method is typically the starting point for
subclasses; many built-in commands and command types either place
all of their logic in ``handle()``, or perform some additional
parsing work in ``handle()`` and then delegate from it to more
specialized methods as needed.
Several attributes affect behavior at various steps along the way:
``args``
A string listing the arguments accepted by the command,
suitable for use in help messages; e.g., a command which takes
a list of application names might set this to '<appname
appname ...>'.
``can_import_settings``
A boolean indicating whether the command needs to be able to
import Django settings; if ``True``, ``execute()`` will verify
that this is possible before proceeding. Default value is
``True``.
``help``
A short description of the command, which will be printed in
help messages.
``option_list``
This is the list of ``optparse`` options which will be fed
into the command's ``OptionParser`` for parsing arguments.
``output_transaction``
A boolean indicating whether the command outputs SQL
statements; if ``True``, the output will automatically be
wrapped with ``BEGIN;`` and ``COMMIT;``. Default value is
``False``.
``requires_model_validation``
A boolean; if ``True``, validation of installed models will be
performed prior to executing the command. Default value is
``True``. To validate an individual application's models
rather than all applications' models, call
``self.validate(app)`` from ``handle()``, where ``app`` is the
application's Python module.
"""
# Metadata about this command.
option_list = (
make_option('-v', '--verbosity', action='store', dest='verbosity', default='1',
type='choice', choices=['0', '1', '2'],
help='Verbosity level; 0=minimal output, 1=normal output, 2=all output'),
make_option('--settings',
help='The Python path to a settings module, e.g. "myproject.settings.main". If this isn\'t provided, the DJANGO_SETTINGS_MODULE environment variable will be used.'),
make_option('--pythonpath',
help='A directory to add to the Python path, e.g. "/home/djangoprojects/myproject".'),
make_option('--traceback', action='store_true',
help='Print traceback on exception'),
)
help = ''
args = ''
# Configuration shortcuts that alter various logic.
can_import_settings = True
requires_model_validation = True
output_transaction = False # Whether to wrap the output in a "BEGIN; COMMIT;"
def __init__(self):
self.style = color_style()
def get_version(self):
"""
Return the Django version, which should be correct for all
built-in Django commands. User-supplied commands should
override this method.
"""
return django.get_version()
def usage(self, subcommand):
"""
Return a brief description of how to use this command, by
default from the attribute ``self.help``.
"""
usage = '%%prog %s [options] %s' % (subcommand, self.args)
if self.help:
return '%s\n\n%s' % (usage, self.help)
else:
return usage
def create_parser(self, prog_name, subcommand):
"""
Create and return the ``OptionParser`` which will be used to
parse the arguments to this command.
"""
return OptionParser(prog=prog_name,
usage=self.usage(subcommand),
version=self.get_version(),
option_list=self.option_list)
def print_help(self, prog_name, subcommand):
"""
Print the help message for this command, derived from
``self.usage()``.
"""
parser = self.create_parser(prog_name, subcommand)
parser.print_help()
def run_from_argv(self, argv):
"""
Set up any environment changes requested (e.g., Python path
and Django settings), then run this command.
"""
parser = self.create_parser(argv[0], argv[1])
options, args = parser.parse_args(argv[2:])
handle_default_options(options)
self.execute(*args, **options.__dict__)
def execute(self, *args, **options):
"""
Try to execute this command, performing model validation if
needed (as controlled by the attribute
``self.requires_model_validation``). If the command raises a
``CommandError``, intercept it and print it sensibly to
stderr.
"""
# Switch to English, because django-admin.py creates database content
# like permissions, and those shouldn't contain any translations.
# But only do this if we can assume we have a working settings file,
# because django.utils.translation requires settings.
if self.can_import_settings:
try:
from django.utils import translation
translation.activate('en-us')
except ImportError, e:
# If settings should be available, but aren't,
# raise the error and quit.
sys.stderr.write(smart_str(self.style.ERROR('Error: %s\n' % e)))
sys.exit(1)
try:
self.stdout = options.get('stdout', sys.stdout)
self.stderr = options.get('stderr', sys.stderr)
if self.requires_model_validation:
self.validate()
output = self.handle(*args, **options)
if output:
if self.output_transaction:
# This needs to be imported here, because it relies on
# settings.
from django.db import connections, DEFAULT_DB_ALIAS
connection = connections[options.get('database', DEFAULT_DB_ALIAS)]
if connection.ops.start_transaction_sql():
self.stdout.write(self.style.SQL_KEYWORD(connection.ops.start_transaction_sql()) + '\n')
self.stdout.write(output)
if self.output_transaction:
self.stdout.write('\n' + self.style.SQL_KEYWORD("COMMIT;") + '\n')
except CommandError, e:
self.stderr.write(smart_str(self.style.ERROR('Error: %s\n' % e)))
sys.exit(1)
def validate(self, app=None, display_num_errors=False):
"""
Validates the given app, raising CommandError for any errors.
If app is None, then this will validate all installed apps.
"""
from django.core.management.validation import get_validation_errors
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
s = StringIO()
num_errors = get_validation_errors(s, app)
if num_errors:
s.seek(0)
error_text = s.read()
raise CommandError("One or more models did not validate:\n%s" % error_text)
if display_num_errors:
self.stdout.write("%s error%s found\n" % (num_errors, num_errors != 1 and 's' or ''))
def handle(self, *args, **options):
"""
The actual logic of the command. Subclasses must implement
this method.
"""
raise NotImplementedError()
class AppCommand(BaseCommand):
"""
A management command which takes one or more installed application
names as arguments, and does something with each of them.
Rather than implementing ``handle()``, subclasses must implement
``handle_app()``, which will be called once for each application.
"""
args = '<appname appname ...>'
def handle(self, *app_labels, **options):
from django.db import models
if not app_labels:
raise CommandError('Enter at least one appname.')
try:
app_list = [models.get_app(app_label) for app_label in app_labels]
except (ImproperlyConfigured, ImportError), e:
raise CommandError("%s. Are you sure your INSTALLED_APPS setting is correct?" % e)
output = []
for app in app_list:
app_output = self.handle_app(app, **options)
if app_output:
output.append(app_output)
return '\n'.join(output)
def handle_app(self, app, **options):
"""
Perform the command's actions for ``app``, which will be the
Python module corresponding to an application name given on
the command line.
"""
raise NotImplementedError()
class LabelCommand(BaseCommand):
"""
A management command which takes one or more arbitrary arguments
(labels) on the command line, and does something with each of
them.
Rather than implementing ``handle()``, subclasses must implement
``handle_label()``, which will be called once for each label.
If the arguments should be names of installed applications, use
``AppCommand`` instead.
"""
args = '<label label ...>'
label = 'label'
def handle(self, *labels, **options):
if not labels:
raise CommandError('Enter at least one %s.' % self.label)
output = []
for label in labels:
label_output = self.handle_label(label, **options)
if label_output:
output.append(label_output)
return '\n'.join(output)
def handle_label(self, label, **options):
"""
Perform the command's actions for ``label``, which will be the
string as given on the command line.
"""
raise NotImplementedError()
class NoArgsCommand(BaseCommand):
"""
A command which takes no arguments on the command line.
Rather than implementing ``handle()``, subclasses must implement
``handle_noargs()``; ``handle()`` itself is overridden to ensure
no arguments are passed to the command.
Attempting to pass arguments will raise ``CommandError``.
"""
args = ''
def handle(self, *args, **options):
if args:
raise CommandError("Command doesn't accept any arguments")
return self.handle_noargs(**options)
def handle_noargs(self, **options):
"""
Perform this command's actions.
"""
raise NotImplementedError()
def copy_helper(style, app_or_project, name, directory, other_name=''):
"""
Copies either a Django application layout template or a Django project
layout template into the specified directory.
"""
# style -- A color style object (see django.core.management.color).
# app_or_project -- The string 'app' or 'project'.
# name -- The name of the application or project.
# directory -- The directory to which the layout template should be copied.
# other_name -- When copying an application layout, this should be the name
# of the project.
import re
import shutil
other = {'project': 'app', 'app': 'project'}[app_or_project]
if not re.search(r'^[_a-zA-Z]\w*$', name): # If it's not a valid directory name.
# Provide a smart error message, depending on the error.
if not re.search(r'^[_a-zA-Z]', name):
message = 'make sure the name begins with a letter or underscore'
else:
message = 'use only numbers, letters and underscores'
raise CommandError("%r is not a valid %s name. Please %s." % (name, app_or_project, message))
top_dir = os.path.join(directory, name)
try:
os.mkdir(top_dir)
except OSError, e:
raise CommandError(e)
# Determine where the app or project templates are. Use
# django.__path__[0] because we don't know into which directory
# django has been installed.
template_dir = os.path.join(django.__path__[0], 'conf', '%s_template' % app_or_project)
for d, subdirs, files in os.walk(template_dir):
relative_dir = d[len(template_dir)+1:].replace('%s_name' % app_or_project, name)
if relative_dir:
os.mkdir(os.path.join(top_dir, relative_dir))
for subdir in subdirs[:]:
if subdir.startswith('.'):
subdirs.remove(subdir)
for f in files:
if not f.endswith('.py'):
# Ignore .pyc, .pyo, .py.class etc, as they cause various
# breakages.
continue
path_old = os.path.join(d, f)
path_new = os.path.join(top_dir, relative_dir, f.replace('%s_name' % app_or_project, name))
fp_old = open(path_old, 'r')
fp_new = open(path_new, 'w')
fp_new.write(fp_old.read().replace('{{ %s_name }}' % app_or_project, name).replace('{{ %s_name }}' % other, other_name))
fp_old.close()
fp_new.close()
try:
shutil.copymode(path_old, path_new)
_make_writeable(path_new)
except OSError:
sys.stderr.write(style.NOTICE("Notice: Couldn't set permission bits on %s. You're probably using an uncommon filesystem setup. No problem.\n" % path_new))
def _make_writeable(filename):
"""
Make sure that the file is writeable. Useful if our source is
read-only.
"""
import stat
if sys.platform.startswith('java'):
# On Jython there is no os.access()
return
if not os.access(filename, os.W_OK):
st = os.stat(filename)
new_permissions = stat.S_IMODE(st.st_mode) | stat.S_IWUSR
os.chmod(filename, new_permissions)
| bsd-3-clause |
vcoin-project/v | qa/rpc-tests/test_framework/bignum.py | 1 | 1991 | #
#
# bignum.py
#
# This file is copied from python-vcoinlib.
#
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
"""Bignum routines"""
from __future__ import absolute_import, division, print_function, unicode_literals
import struct
# generic big endian MPI format
def bn_bytes(v, have_ext=False):
ext = 0
if have_ext:
ext = 1
return ((v.bit_length()+7)//8) + ext
def bn2bin(v):
s = bytearray()
i = bn_bytes(v)
while i > 0:
s.append((v >> ((i-1) * 8)) & 0xff)
i -= 1
return s
def bin2bn(s):
l = 0
for ch in s:
l = (l << 8) | ch
return l
def bn2mpi(v):
have_ext = False
if v.bit_length() > 0:
have_ext = (v.bit_length() & 0x07) == 0
neg = False
if v < 0:
neg = True
v = -v
s = struct.pack(b">I", bn_bytes(v, have_ext))
ext = bytearray()
if have_ext:
ext.append(0)
v_bin = bn2bin(v)
if neg:
if have_ext:
ext[0] |= 0x80
else:
v_bin[0] |= 0x80
return s + ext + v_bin
def mpi2bn(s):
if len(s) < 4:
return None
s_size = bytes(s[:4])
v_len = struct.unpack(b">I", s_size)[0]
if len(s) != (v_len + 4):
return None
if v_len == 0:
return 0
v_str = bytearray(s[4:])
neg = False
i = v_str[0]
if i & 0x80:
neg = True
i &= ~0x80
v_str[0] = i
v = bin2bn(v_str)
if neg:
return -v
return v
# vcoin-specific little endian format, with implicit size
def mpi2vch(s):
r = s[4:] # strip size
r = r[::-1] # reverse string, converting BE->LE
return r
def bn2vch(v):
return bytes(mpi2vch(bn2mpi(v)))
def vch2mpi(s):
r = struct.pack(b">I", len(s)) # size
r += s[::-1] # reverse string, converting LE->BE
return r
def vch2bn(s):
return mpi2bn(vch2mpi(s))
| mit |
CatsAndDogsbvba/odoo | addons/hr_timesheet_invoice/report/__init__.py | 433 | 1136 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import account_analytic_profit
import report_analytic
import hr_timesheet_invoice_report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
xianjunzhengbackup/Cloud-Native-Python | env/lib/python3.5/site-packages/pip/__init__.py | 328 | 11348 | #!/usr/bin/env python
from __future__ import absolute_import
import locale
import logging
import os
import optparse
import warnings
import sys
import re
# 2016-06-17 [email protected]: urllib3 1.14 added optional support for socks,
# but if invoked (i.e. imported), it will issue a warning to stderr if socks
# isn't available. requests unconditionally imports urllib3's socks contrib
# module, triggering this warning. The warning breaks DEP-8 tests (because of
# the stderr output) and is just plain annoying in normal usage. I don't want
# to add socks as yet another dependency for pip, nor do I want to allow-stder
# in the DEP-8 tests, so just suppress the warning. pdb tells me this has to
# be done before the import of pip.vcs.
from pip._vendor.requests.packages.urllib3.exceptions import DependencyWarning
warnings.filterwarnings("ignore", category=DependencyWarning) # noqa
from pip.exceptions import InstallationError, CommandError, PipError
from pip.utils import get_installed_distributions, get_prog
from pip.utils import deprecation, dist_is_editable
from pip.vcs import git, mercurial, subversion, bazaar # noqa
from pip.baseparser import ConfigOptionParser, UpdatingDefaultsHelpFormatter
from pip.commands import get_summaries, get_similar_commands
from pip.commands import commands_dict
from pip._vendor.requests.packages.urllib3.exceptions import (
InsecureRequestWarning,
)
# assignment for flake8 to be happy
# This fixes a peculiarity when importing via __import__ - as we are
# initialising the pip module, "from pip import cmdoptions" is recursive
# and appears not to work properly in that situation.
import pip.cmdoptions
cmdoptions = pip.cmdoptions
# The version as used in the setup.py and the docs conf.py
__version__ = "9.0.1"
logger = logging.getLogger(__name__)
# Hide the InsecureRequestWarning from urllib3
warnings.filterwarnings("ignore", category=InsecureRequestWarning)
def autocomplete():
"""Command and option completion for the main option parser (and options)
and its subcommands (and options).
Enable by sourcing one of the completion shell scripts (bash, zsh or fish).
"""
# Don't complete if user hasn't sourced bash_completion file.
if 'PIP_AUTO_COMPLETE' not in os.environ:
return
cwords = os.environ['COMP_WORDS'].split()[1:]
cword = int(os.environ['COMP_CWORD'])
try:
current = cwords[cword - 1]
except IndexError:
current = ''
subcommands = [cmd for cmd, summary in get_summaries()]
options = []
# subcommand
try:
subcommand_name = [w for w in cwords if w in subcommands][0]
except IndexError:
subcommand_name = None
parser = create_main_parser()
# subcommand options
if subcommand_name:
# special case: 'help' subcommand has no options
if subcommand_name == 'help':
sys.exit(1)
# special case: list locally installed dists for uninstall command
if subcommand_name == 'uninstall' and not current.startswith('-'):
installed = []
lc = current.lower()
for dist in get_installed_distributions(local_only=True):
if dist.key.startswith(lc) and dist.key not in cwords[1:]:
installed.append(dist.key)
# if there are no dists installed, fall back to option completion
if installed:
for dist in installed:
print(dist)
sys.exit(1)
subcommand = commands_dict[subcommand_name]()
options += [(opt.get_opt_string(), opt.nargs)
for opt in subcommand.parser.option_list_all
if opt.help != optparse.SUPPRESS_HELP]
# filter out previously specified options from available options
prev_opts = [x.split('=')[0] for x in cwords[1:cword - 1]]
options = [(x, v) for (x, v) in options if x not in prev_opts]
# filter options by current input
options = [(k, v) for k, v in options if k.startswith(current)]
for option in options:
opt_label = option[0]
# append '=' to options which require args
if option[1]:
opt_label += '='
print(opt_label)
else:
# show main parser options only when necessary
if current.startswith('-') or current.startswith('--'):
opts = [i.option_list for i in parser.option_groups]
opts.append(parser.option_list)
opts = (o for it in opts for o in it)
subcommands += [i.get_opt_string() for i in opts
if i.help != optparse.SUPPRESS_HELP]
print(' '.join([x for x in subcommands if x.startswith(current)]))
sys.exit(1)
def create_main_parser():
parser_kw = {
'usage': '\n%prog <command> [options]',
'add_help_option': False,
'formatter': UpdatingDefaultsHelpFormatter(),
'name': 'global',
'prog': get_prog(),
}
parser = ConfigOptionParser(**parser_kw)
parser.disable_interspersed_args()
pip_pkg_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
parser.version = 'pip %s from %s (python %s)' % (
__version__, pip_pkg_dir, sys.version[:3])
# add the general options
gen_opts = cmdoptions.make_option_group(cmdoptions.general_group, parser)
parser.add_option_group(gen_opts)
parser.main = True # so the help formatter knows
# create command listing for description
command_summaries = get_summaries()
description = [''] + ['%-27s %s' % (i, j) for i, j in command_summaries]
parser.description = '\n'.join(description)
return parser
def parseopts(args):
parser = create_main_parser()
# Note: parser calls disable_interspersed_args(), so the result of this
# call is to split the initial args into the general options before the
# subcommand and everything else.
# For example:
# args: ['--timeout=5', 'install', '--user', 'INITools']
# general_options: ['--timeout==5']
# args_else: ['install', '--user', 'INITools']
general_options, args_else = parser.parse_args(args)
# --version
if general_options.version:
sys.stdout.write(parser.version)
sys.stdout.write(os.linesep)
sys.exit()
# pip || pip help -> print_help()
if not args_else or (args_else[0] == 'help' and len(args_else) == 1):
parser.print_help()
sys.exit()
# the subcommand name
cmd_name = args_else[0]
if cmd_name not in commands_dict:
guess = get_similar_commands(cmd_name)
msg = ['unknown command "%s"' % cmd_name]
if guess:
msg.append('maybe you meant "%s"' % guess)
raise CommandError(' - '.join(msg))
# all the args without the subcommand
cmd_args = args[:]
cmd_args.remove(cmd_name)
return cmd_name, cmd_args
def check_isolated(args):
isolated = False
if "--isolated" in args:
isolated = True
return isolated
def main(args=None):
if args is None:
args = sys.argv[1:]
# Configure our deprecation warnings to be sent through loggers
deprecation.install_warning_logger()
autocomplete()
try:
cmd_name, cmd_args = parseopts(args)
except PipError as exc:
sys.stderr.write("ERROR: %s" % exc)
sys.stderr.write(os.linesep)
sys.exit(1)
# Needed for locale.getpreferredencoding(False) to work
# in pip.utils.encoding.auto_decode
try:
locale.setlocale(locale.LC_ALL, '')
except locale.Error as e:
# setlocale can apparently crash if locale are uninitialized
logger.debug("Ignoring error %s when setting locale", e)
command = commands_dict[cmd_name](isolated=check_isolated(cmd_args))
return command.main(cmd_args)
# ###########################################################
# # Writing freeze files
class FrozenRequirement(object):
def __init__(self, name, req, editable, comments=()):
self.name = name
self.req = req
self.editable = editable
self.comments = comments
_rev_re = re.compile(r'-r(\d+)$')
_date_re = re.compile(r'-(20\d\d\d\d\d\d)$')
@classmethod
def from_dist(cls, dist, dependency_links):
location = os.path.normcase(os.path.abspath(dist.location))
comments = []
from pip.vcs import vcs, get_src_requirement
if dist_is_editable(dist) and vcs.get_backend_name(location):
editable = True
try:
req = get_src_requirement(dist, location)
except InstallationError as exc:
logger.warning(
"Error when trying to get requirement for VCS system %s, "
"falling back to uneditable format", exc
)
req = None
if req is None:
logger.warning(
'Could not determine repository location of %s', location
)
comments.append(
'## !! Could not determine repository location'
)
req = dist.as_requirement()
editable = False
else:
editable = False
req = dist.as_requirement()
specs = req.specs
assert len(specs) == 1 and specs[0][0] in ["==", "==="], \
'Expected 1 spec with == or ===; specs = %r; dist = %r' % \
(specs, dist)
version = specs[0][1]
ver_match = cls._rev_re.search(version)
date_match = cls._date_re.search(version)
if ver_match or date_match:
svn_backend = vcs.get_backend('svn')
if svn_backend:
svn_location = svn_backend().get_location(
dist,
dependency_links,
)
if not svn_location:
logger.warning(
'Warning: cannot find svn location for %s', req)
comments.append(
'## FIXME: could not find svn URL in dependency_links '
'for this package:'
)
else:
comments.append(
'# Installing as editable to satisfy requirement %s:' %
req
)
if ver_match:
rev = ver_match.group(1)
else:
rev = '{%s}' % date_match.group(1)
editable = True
req = '%s@%s#egg=%s' % (
svn_location,
rev,
cls.egg_name(dist)
)
return cls(dist.project_name, req, editable, comments)
@staticmethod
def egg_name(dist):
name = dist.egg_name()
match = re.search(r'-py\d\.\d$', name)
if match:
name = name[:match.start()]
return name
def __str__(self):
req = self.req
if self.editable:
req = '-e %s' % req
return '\n'.join(list(self.comments) + [str(req)]) + '\n'
if __name__ == '__main__':
sys.exit(main())
| mit |
jherico/ctypesgen | ctypesgencore/options.py | 13 | 1164 | #!/usr/bin/env python
"""
All of the components of ctypegencore require an argument called "options".
In command-line usage, this would be an optparser.Values object. However, if
ctypesgencore is used as a standard Python module, constructing this object
would be a pain. So this module exists to provide a "default" options object
for convenience.
"""
import optparse, copy
default_values={
"other_headers": [],
"modules": [],
"include_search_paths": [],
"compile_libdirs": [],
"runtime_libdirs": [],
"cpp": "gcc -E",
"save_preprocessed_headers": None,
"all_headers": False,
"builtin_symbols": False,
"include_symbols": None,
"exclude_symbols": None,
"show_all_errors": False,
"show_long_errors": False,
"show_macro_warnings": True,
"header_template": None,
"inserted_files": [],
"other_known_names": [],
"include_macros": True,
"libraries": [],
"strip_build_path": None,
"output_language": "python",
"no_stddef_types": False,
"no_gnu_types": False,
"no_python_types": False,
}
def get_default_options():
return optparse.Values(copy.deepcopy(default_values))
| bsd-3-clause |
translate/pootle | pytest_pootle/env.py | 3 | 23700 | # -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
import os
from datetime import timedelta
from dateutil.relativedelta import relativedelta
from translate.storage.factory import getclass
class PootleTestEnv(object):
methods = (
"redis", "case_sensitive_schema", "formats", "site_root",
"languages", "suggestion_states", "site_matrix", "system_users",
"permissions", "site_permissions", "tps", "templates",
"disabled_project", "subdirs", "submissions", "announcements",
"terminology", "fs", "vfolders", "complex_po")
def setup(self, **kwargs):
for method in self.methods:
should_setup = (
method not in kwargs
or kwargs[method])
if should_setup:
getattr(self, "setup_%s" % method)()
def setup_formats(self):
from pootle.core.delegate import formats
formats.get().initialize()
def setup_complex_po(self):
import pytest_pootle
from pytest_pootle.factories import StoreDBFactory
from pootle_translationproject.models import TranslationProject
po_file = os.path.join(
os.path.dirname(pytest_pootle.__file__),
*("data", "po", "complex.po"))
with open(po_file) as f:
ttk = getclass(f)(f.read())
tp = TranslationProject.objects.get(
project__code="project0",
language__code="language0")
store = StoreDBFactory(
parent=tp.directory,
translation_project=tp,
name="complex.po")
store.update(ttk)
def setup_suggestion_states(self):
from pootle_store.models import SuggestionState
for state in ["pending", "accepted", "rejected"]:
SuggestionState.objects.get_or_create(name=state)
def setup_announcements(self):
from pytest_pootle.factories import AnnouncementFactory
from pootle_project.models import Project
from pootle_language.models import Language
from pootle_translationproject.models import TranslationProject
for language in Language.objects.all():
AnnouncementFactory(
title="Language announcement for: %s" % language,
body=(
'<div dir="ltr" lang="en">This is an example announcements. '
'Just like a real announcement it contains text and some '
'markup, and even a random link about localisation.<br />'
'<a href="http://docs.translatehouse.org/languages/'
'localization-guide/en/latest/guide/start.html">localisation '
'guide</a>.</div>'),
virtual_path="announcements/%s" % language.code)
for project in Project.objects.all():
AnnouncementFactory(
title="Project announcement for: %s" % project,
body=(
'<div dir="ltr" lang="en">This is an example announcements. '
'Just like a real announcement it contains text and some '
'markup, and even a random link about localisation.<br />'
'<a href="http://docs.translatehouse.org/projects/'
'localization-guide/en/latest/guide/start.html">localisation '
'guide</a>.</div>'),
virtual_path="announcements/projects/%s" % project.code)
for tp in TranslationProject.objects.all():
AnnouncementFactory(
title="TP announcement for: %s" % tp,
body=(
'<div dir="ltr" lang="en">This is an example announcements. '
'Just like a real announcement it contains text and some '
'markup, and even a random link about localisation.<br />'
'<a href="http://docs.translatehouse.org/tps/'
'localization-guide/en/latest/guide/start.html">localisation '
'guide</a>.</div>'),
virtual_path="announcements/%s/%s"
% (tp.language.code, tp.project.code))
def setup_case_sensitive_schema(self):
from django.db import connection
from django.apps import apps
from pootle.core.utils.db import set_mysql_collation_for_column
cursor = connection.cursor()
# Language
set_mysql_collation_for_column(
apps,
cursor,
"pootle_language.Language",
"code",
"utf8_general_ci",
"varchar(50)")
# Project
set_mysql_collation_for_column(
apps,
cursor,
"pootle_project.Project",
"code",
"utf8_bin",
"varchar(255)")
# Directory
set_mysql_collation_for_column(
apps,
cursor,
"pootle_app.Directory",
"pootle_path",
"utf8_bin",
"varchar(255)")
set_mysql_collation_for_column(
apps,
cursor,
"pootle_app.Directory",
"name",
"utf8_bin",
"varchar(255)")
# Store
set_mysql_collation_for_column(
apps,
cursor,
"pootle_store.Store",
"pootle_path",
"utf8_bin",
"varchar(255)")
set_mysql_collation_for_column(
apps,
cursor,
"pootle_store.Store",
"name",
"utf8_bin",
"varchar(255)")
# Word.Stem
set_mysql_collation_for_column(
apps,
cursor,
"pootle_word.Stem",
"root",
"utf8_bin",
"varchar(255)")
def setup_permissions(self):
from django.contrib.contenttypes.models import ContentType
from .fixtures.models.permission import _require_permission
args = {
'app_label': 'pootle_app',
'model': 'directory'}
pootle_content_type = ContentType.objects.get(**args)
_require_permission(
'view',
'Can access a project',
pootle_content_type)
_require_permission(
'hide',
'Cannot access a project',
pootle_content_type)
_require_permission(
'suggest',
'Can make a suggestion',
pootle_content_type)
_require_permission(
'translate',
'Can submit translations',
pootle_content_type)
_require_permission(
'review',
'Can review translations',
pootle_content_type)
_require_permission(
'administrate',
'Can administrate a TP',
pootle_content_type)
def _setup_project_fs(self, project):
from pootle_fs.utils import FSPlugin
from pytest_pootle.utils import add_store_fs
project.config["pootle_fs.fs_type"] = "localfs"
project.config["pootle_fs.translation_mappings"] = {
"default": "/<language_code>/<dir_path>/<filename>.<ext>"}
project.config["pootle_fs.fs_url"] = "/tmp/path/for/setup"
plugin = FSPlugin(project)
for store in plugin.resources.stores:
add_store_fs(
store=store,
fs_path=plugin.get_fs_path(store.pootle_path),
synced=True)
def setup_fs(self):
from pootle_project.models import Project
for i in range(0, 2):
project = Project.objects.get(code="project%s" % i)
self._setup_project_fs(project)
self._setup_project_fs(
Project.objects.get(code="terminology"))
self._setup_project_fs(
Project.objects.get(code="disabled_project0"))
def setup_languages(self):
from .fixtures.models.language import _require_language
_require_language('en', 'English')
def setup_redis(self):
from pootle.core.models import Revision
Revision.initialize(force=True)
def setup_system_users(self):
from django.contrib.auth import get_user_model
from .fixtures.models.user import TEST_USERS, _require_user
users = {
user.username: user
for user
in get_user_model().objects.all()}
for username, user_params in TEST_USERS.items():
user_params["email"] = "%[email protected]" % username
TEST_USERS[username]["user"] = (
users.get(username)
or _require_user(username=username, **user_params))
def setup_site_permissions(self):
from django.contrib.auth import get_user_model
from pootle_app.models import Directory, PermissionSet
User = get_user_model()
nobody = User.objects.get_nobody_user()
default = User.objects.get_default_user()
from django.contrib.auth.models import Permission
view = Permission.objects.get(codename="view")
suggest = Permission.objects.get(codename="suggest")
translate = Permission.objects.get(codename="translate")
criteria = {
'user': nobody,
'directory': Directory.objects.root}
permission_set, created = PermissionSet.objects.get_or_create(**criteria)
if created:
permission_set.positive_permissions.set([view, suggest])
permission_set.save()
criteria['user'] = default
permission_set, created = PermissionSet.objects.get_or_create(**criteria)
if created:
permission_set.positive_permissions.set([view, suggest, translate])
permission_set.save()
def setup_site_root(self):
from pytest_pootle.factories import DirectoryFactory
DirectoryFactory(
name="projects",
parent=DirectoryFactory(parent=None, name=""))
def setup_site_matrix(self):
from pytest_pootle.factories import ProjectDBFactory, LanguageDBFactory
from pootle_format.models import Format
from pootle_language.models import Language
# add 2 languages
for i_ in range(0, 2):
LanguageDBFactory()
source_language = Language.objects.get(code="en")
po = Format.objects.get(name="po")
for i_ in range(0, 2):
# add 2 projects
project = ProjectDBFactory(
source_language=source_language)
project.filetypes.add(po)
def setup_terminology(self):
import pytest_pootle
from pytest_pootle.factories import (
ProjectDBFactory, StoreDBFactory, TranslationProjectFactory)
from pootle_language.models import Language
source_language = Language.objects.get(code="en")
terminology = ProjectDBFactory(code="terminology",
checkstyle="terminology",
fullname="Terminology",
source_language=source_language)
term_file = os.path.join(
os.path.dirname(pytest_pootle.__file__),
*("data", "po", "terminology.po"))
with open(term_file) as f:
term_ttk = getclass(f)(f.read())
for language in Language.objects.all():
tp = TranslationProjectFactory(
project=terminology, language=language)
if language.code not in ["language0", "language1"]:
continue
store = StoreDBFactory(
parent=tp.directory,
translation_project=tp,
name="terminology.po")
store.update(term_ttk)
def setup_disabled_project(self):
from pytest_pootle.factories import (DirectoryFactory,
ProjectDBFactory,
TranslationProjectFactory)
from pootle.core.contextmanagers import keep_data
from pootle_format.models import Format
from pootle_language.models import Language
with keep_data():
source_language = Language.objects.get(code="en")
project = ProjectDBFactory(code="disabled_project0",
fullname="Disabled Project 0",
source_language=source_language)
project.filetypes.add(Format.objects.get(name="po"))
project.disabled = True
project.save()
language = Language.objects.get(code="language0")
tp = TranslationProjectFactory(project=project, language=language)
tp_dir = tp.directory
tp_dir.obsolete = False
tp_dir.save()
self._add_stores(tp, n=(1, 1))
subdir0 = DirectoryFactory(name="subdir0", parent=tp.directory, tp=tp)
self._add_stores(tp, n=(1, 1), parent=subdir0)
def setup_subdirs(self):
from pytest_pootle.factories import DirectoryFactory
from pootle.core.contextmanagers import keep_data
from pootle_translationproject.models import TranslationProject
with keep_data():
for tp in TranslationProject.objects.all():
subdir0 = DirectoryFactory(
name="subdir0", parent=tp.directory, tp=tp)
subdir1 = DirectoryFactory(
name="subdir1", parent=subdir0, tp=tp)
self._add_stores(tp, n=(2, 1), parent=subdir0)
self._add_stores(tp, n=(1, 1), parent=subdir1)
def setup_submissions(self):
from django.contrib.auth import get_user_model
from django.utils import timezone
from pootle.core.contextmanagers import bulk_operations
from pootle_data.models import TPChecksData, TPData
from pootle_score.models import UserTPScore
from pootle_statistics.models import SubmissionTypes
from pootle_store.constants import UNTRANSLATED
from pootle_store.models import Unit, UnitChange
from pootle_translationproject.contextmanagers import update_tp_after
from pootle_translationproject.models import TranslationProject
year_ago = timezone.now() - relativedelta(years=1)
units = Unit.objects.all()
units.update(creation_time=year_ago)
User = get_user_model()
admin = User.objects.get(username="admin")
member = User.objects.get(username="member")
member2 = User.objects.get(username="member2")
UnitChange.objects.bulk_create(
UnitChange(unit_id=unit_id, changed_with=SubmissionTypes.SYSTEM)
for unit_id
in units.filter(state__gt=UNTRANSLATED).values_list("id", flat=True))
tps = TranslationProject.objects.exclude(
language__code="templates").select_related(
"language", "project__source_language").all()
bulk_pootle = bulk_operations(
models=(
get_user_model(),
UserTPScore,
TPData,
TPChecksData))
with bulk_pootle:
for tp in tps:
with update_tp_after(tp):
self._add_subs_to_stores(
tp.stores, admin, member, member2)
def _add_subs_to_stores(self, stores, admin, member, member2):
for store in stores.select_related("data", "parent"):
self._add_subs_to_store(store, admin, member, member2)
def _add_subs_to_store(self, store, admin, member, member2):
from django.utils import timezone
# from pootle_store.contextmanagers import update_store_after
year_ago = timezone.now() - relativedelta(years=1)
units = store.unit_set.select_related("change").all()
for unit in units:
self._add_submissions(
unit, year_ago, admin, member, member2)
def setup_templates(self):
from pootle.core.contextmanagers import keep_data
from pootle.core.signals import update_data
from pootle_project.models import Project
from pootle_translationproject.contextmanagers import update_tp_after
from pytest_pootle.factories import (
LanguageDBFactory, TranslationProjectFactory)
tps = []
with keep_data():
templates = LanguageDBFactory(code="templates")
for project in Project.objects.all():
# add a TP to the project for each language
tp = TranslationProjectFactory(project=project, language=templates)
# As there are no files on the FS we have to currently unobsolete
# the directory
tp_dir = tp.directory
tp_dir.obsolete = False
tp_dir.save()
self._add_template_stores(tp)
tps.append(tp)
for tp in tps:
with update_tp_after(tp):
for store in tp.stores.all():
update_data.send(
store.__class__,
instance=store)
def setup_tps(self):
from pootle.core.contextmanagers import keep_data
from pootle_project.models import Project
from pootle_language.models import Language
from pytest_pootle.factories import TranslationProjectFactory
with keep_data():
for project in Project.objects.select_related("source_language").all():
for language in Language.objects.exclude(code="en"):
# add a TP to the project for each language
tp = TranslationProjectFactory(
project=project, language=language)
# As there are no files on the FS we have to currently
# unobsolete the directory
tp_dir = tp.directory
tp_dir.obsolete = False
tp_dir.save()
self._add_stores(tp)
def _add_template_stores(self, tp, n=(3, 2), parent=None):
from pytest_pootle.factories import StoreDBFactory, UnitDBFactory
for i_ in range(0, n[0]):
# add 3 stores
store = StoreDBFactory(translation_project=tp)
store.filetype = tp.project.filetype_tool.choose_filetype(store.name)
store.save()
# add 8 units to each store
for i_ in range(0, 4):
UnitDBFactory(store=store, target="")
def _add_stores(self, tp, n=(3, 2), parent=None):
from pytest_pootle.factories import StoreDBFactory, UnitDBFactory
from pootle_store.constants import UNTRANSLATED, TRANSLATED, FUZZY, OBSOLETE
for i_ in range(0, n[0]):
# add 3 stores
if parent is None:
store = StoreDBFactory(translation_project=tp)
else:
store = StoreDBFactory(translation_project=tp, parent=parent)
store.filetype = tp.project.filetype_tool.choose_filetype(store.name)
store.save()
# add 8 units to each store
for state in [UNTRANSLATED, TRANSLATED, FUZZY, OBSOLETE]:
for i_ in range(0, n[1]):
UnitDBFactory(store=store, state=state)
def _update_submission_times(self, unit, update_time, last_update=None):
submissions = unit.submission_set.all()
if last_update:
submissions = submissions.exclude(
creation_time__lte=last_update)
submissions.update(creation_time=update_time)
def _add_submissions(self, unit, created, admin, member, member2):
from pootle.core.delegate import review
from pootle_store.constants import UNTRANSLATED, FUZZY, OBSOLETE
from pootle_store.models import Suggestion, Unit, UnitChange
from django.utils import timezone
original_state = unit.state
unit.created = created
first_modified = created + relativedelta(months=unit.index, days=10)
# add suggestion at first_modified
suggestion_review = review.get(Suggestion)
suggestion, created_ = suggestion_review().add(
unit,
"Suggestion for %s" % (unit.target or unit.source),
user=member)
self._update_submission_times(unit, first_modified, created)
# accept the suggestion 7 days later if not untranslated
next_time = first_modified + timedelta(days=7)
if original_state == UNTRANSLATED:
suggestion_review([suggestion], reviewer=admin).reject()
else:
Unit.objects.filter(pk=unit.pk).update(mtime=next_time)
UnitChange.objects.filter(
unit_id=unit.pk).update(
reviewed_on=next_time,
reviewed_by=admin,
submitted_on=next_time)
suggestion_review([suggestion], reviewer=admin).accept()
self._update_submission_times(
unit, next_time, first_modified)
# add another suggestion as different user 7 days later
suggestion2_, created_ = suggestion_review().add(
unit,
"Suggestion 2 for %s" % (unit.target or unit.source),
user=member2)
self._update_submission_times(
unit,
first_modified + timedelta(days=14),
next_time)
# mark FUZZY
if original_state == FUZZY:
unit.markfuzzy()
# mark OBSOLETE
elif original_state == OBSOLETE:
unit.makeobsolete()
elif unit.target:
# Re-edit units with translations, adding some submissions
# of SubmissionTypes.EDIT_TYPES
old_target = unit.target
current_time = timezone.now() - timedelta(days=14)
unit.__class__.objects.filter(id=unit.id).update(
target_f="Updated %s" % old_target,
mtime=current_time)
unit.change.save()
def setup_vfolders(self):
from pytest_pootle.factories import VirtualFolderDBFactory
from django.db import connection
from django.apps import apps
from pootle.core.utils.db import set_mysql_collation_for_column
from pootle_language.models import Language
from pootle_project.models import Project
cursor = connection.cursor()
# VirtualFolder
set_mysql_collation_for_column(
apps,
cursor,
"virtualfolder.VirtualFolder",
"name",
"utf8_bin",
"varchar(70)")
project0 = Project.objects.get(code="project0")
language0 = Language.objects.get(code="language0")
VirtualFolderDBFactory(filter_rules="store0.po")
VirtualFolderDBFactory(filter_rules="store1.po")
vf = VirtualFolderDBFactory(
all_languages=True,
is_public=False,
filter_rules="store0.po")
vf.projects.add(project0)
vf.save()
vf = VirtualFolderDBFactory(
all_languages=True,
is_public=False,
filter_rules="store1.po")
vf.projects.add(project0)
vf.save()
vf = VirtualFolderDBFactory(
filter_rules="subdir0/store4.po")
vf.languages.add(language0)
vf.projects.add(project0)
vf.save()
| gpl-3.0 |
alexandrul-ci/robotframework | utest/writer/test_rowsplitter.py | 7 | 5821 | import unittest
from robot.writer.rowsplitter import RowSplitter
from robot.utils.asserts import assert_equal
class TestRowSplitter(unittest.TestCase):
def _test(self, data, expected, cols=3, table_type='settings'):
splitter = RowSplitter(cols=cols)
actual = list(splitter.split(data, table_type))
assert_equal(actual, expected)
def test_escaping_empty_cells_at_eol(self):
self._test(['First', 'second', ''],
[['First', 'second', '${EMPTY}']])
self._test(['First', 'second', '', 'next line'],
[['First', 'second', '${EMPTY}'],
['...', 'next line']])
self._test(['1.1', '1.2', '1.3', '', '2.1', '2.2', '', '3.1', '', ''],
[['1.1', '1.2', '1.3', '${EMPTY}'],
['...', '2.1', '2.2', '${EMPTY}'],
['...', '3.1', '', '${EMPTY}']], cols=4)
def test_splitting_inside_comment(self):
self._test(['Kw', 'Arg', '#Comment in', 'many cells'],
[['Kw', 'Arg', '#Comment in'],
['...', '# many cells']])
self._test(['Kw', 'Arg', '# Comment', 'in', 'very', 'many', 'cells', '!'],
[['Kw', 'Arg', '# Comment'],
['...', '# in', 'very'],
['...', '# many', 'cells'],
['...', '# !']])
self._test(['Kw', 'Arg', '# Comment in', 'many cells'],
[['Kw', 'Arg'],
['...', '# Comment in'],
['...', '# many cells']], cols=2)
def test_no_extra_comment_marker(self):
self._test(['1', '2', '3', '# Comment'],
[['1', '2', '3'],
['...', '# Comment']])
self._test(['1', '2', '# C 1', '# C 2'],
[['1', '2', '# C 1'],
['...', '# C 2']])
def test_splitting_whitespace_rows(self):
data = ['', '', '', '', 'foo', '# Comment']
for cols, expected in [(4, [['', '', '', '${EMPTY}'],
['...', 'foo', '# Comment']]),
(3, [['', '', '${EMPTY}'],
['...', '', 'foo'],
['...', '# Comment']]),
(2, [['', '${EMPTY}'],
['...', '${EMPTY}'],
['...', '${EMPTY}'],
['...', 'foo'],
['...', '# Comment']])]:
self._test(data, expected, cols)
def test_min_indent(self):
self._test(['1', '2', '3', '4'],
[['1', '2', '3'], ['...', '4']])
self._test(['1', '2', '3', '4'],
[['1', '2', '3'], ['', '...', '4']], table_type='keyword')
self._test(['1', '2', '3', '4'],
[['1', '2', '3'], ['', '...', '4']], table_type='test case')
def test_split_else(self):
self._test(['Run Keyword If', 'expression', 'Kw 1', 'ELSE', 'Kw 2'],
[['Run Keyword If', 'expression', 'Kw 1'],
['...', 'ELSE', 'Kw 2']], cols=100)
self._test(['Run Keyword If', 'e1', 'Kw 1', 'ELSE IF', 'e2', 'Kw 2'],
[['Run Keyword If', 'e1', 'Kw 1'],
['...', 'ELSE IF', 'e2', 'Kw 2']], cols=100)
self._test(['1', '2', 'ELSE IF', '3', '4', 'ELSE IF', '5', 'ELSE', '6'],
[['1', '2'],
['...', 'ELSE IF', '3', '4'],
['...', 'ELSE IF', '5'],
['...', 'ELSE', '6']], cols=100)
def test_split_also_and(self):
self._test(['Run Keywords', 'k1', 'AND', 'k2', 'a', 'b', 'AND', 'k3'],
[['Run Keywords', 'k1'],
['...', 'AND', 'k2', 'a', 'b'],
['...', 'AND', 'k3']], cols=100)
self._test(['', '1', 'AND', '2', 'ELSE', '3', 'ELSE IF', '4', 'AND', '5'],
[['', '1'],
['', '...', 'AND', '2'],
['', '...', 'ELSE', '3'],
['', '...', 'ELSE IF', '4'],
['', '...', 'AND', '5']], cols=100)
def test_dont_split_else_or_and_in_first_cell(self):
for data in (['ELSE', '1', '2'],
['ELSE IF', '1', '2'],
['AND', '1', '2']):
for no_split in (data,
[''] + data,
['', '', ''] + data,
['...'] + data,
['', '...'] + data,
['', '', '', '...'] + data):
self._test(no_split, [no_split], cols=100)
def test_split_internal_else_lines(self):
data = ['1', '2', '3', '4', '5', '6', '7', '8']
self._test(data + ['ELSE IF'] + data + ['ELSE'] + data,
[['1', '2', '3', '4'],
['...', '5', '6', '7'],
['...', '8'],
['...', 'ELSE IF', '1', '2'],
['...', '3', '4', '5'],
['...', '6', '7', '8'],
['...', 'ELSE', '1', '2'],
['...', '3', '4', '5'],
['...', '6', '7', '8']],
cols=4)
self._test([''] + data + ['ELSE IF'] + data + ['ELSE'] + data,
[['', '1', '2', '3', '4', '5', '6', '7'],
['', '...', '8'],
['', '...', 'ELSE IF', '1', '2', '3', '4', '5'],
['', '...', '6', '7', '8'],
['', '...', 'ELSE', '1', '2', '3', '4', '5'],
['', '...', '6', '7', '8']],
cols=8)
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
DSMan195276/protura-binutils | gdb/syscalls/arm-linux.py | 46 | 1759 | # Copyright (C) 2013-2015 Free Software Foundation, Inc.
# Copying and distribution of this file, with or without modification,
# are permitted in any medium without royalty provided the copyright
# notice and this notice are preserved. This file is offered as-is,
# without any warranty.
import sys
import re
import time
infname = sys.argv[1]
inf = file(infname)
print("""\
<?xml version="1.0"?>
<!-- Copyright (C) 2009-%s Free Software Foundation, Inc.
Copying and distribution of this file, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. This file is offered as-is,
without any warranty. -->
<!DOCTYPE feature SYSTEM "gdb-syscalls.dtd">
<!-- This file was generated using the following file:
%s
The file mentioned above belongs to the Linux Kernel.
Some small hand-edits were made. -->
<syscalls_info>""" % (time.strftime("%Y"), infname))
def record(name, number, comment=None):
#nm = 'name="%s"' % name
#s = ' <syscall %-30s number="%d"/>' % (nm, number)
s = ' <syscall name="%s" number="%d"/>' % (name, number)
if comment:
s += ' <!-- %s -->' % comment
print(s)
for line in inf:
m = re.match(r'^#define __NR_(\w+)\s+\(__NR_SYSCALL_BASE\+\s*(\d+)\)',
line)
if m:
record(m.group(1), int(m.group(2)))
continue
m = re.match(r'^\s+/\* (\d+) was sys_(\w+) \*/$', line)
if m:
record(m.group(2), int(m.group(1)), 'removed')
m = re.match(r'^#define __ARM_NR_(\w+)\s+\(__ARM_NR_BASE\+\s*(\d+)\)',
line)
if m:
record('ARM_'+m.group(1), 0x0f0000+int(m.group(2)))
continue
print('</syscalls_info>')
| gpl-2.0 |
kose-y/pylearn2 | pylearn2/scripts/papers/dropout/tests/test_dropout.py | 46 | 2057 | """
Unit tests for dropout paper
"""
import os
from pylearn2.scripts.tests.yaml_testing import limited_epoch_train
from pylearn2.testing.skip import skip_if_no_data
from theano import config
from theano.compile import get_default_mode
yaml_file_path = os.path.abspath(os.path.join(os.path.dirname(__file__),
'..'))
save_path = os.path.dirname(os.path.realpath(__file__))
def test_mnist_valid():
"""
Tests mnist_valid.yaml by running it for only one epoch
"""
skip_if_no_data()
mode = get_default_mode()
if hasattr(mode, 'check_py_code'):
old_value = mode.check_py_code
mode.check_py_code = False
try:
if config.mode == "DEBUG_MODE":
yaml_file = 'mnist_valid_fast'
else:
yaml_file = 'mnist_valid'
limited_epoch_train(os.path.join(yaml_file_path, '%s.yaml'
% yaml_file))
try:
os.remove(os.path.join(save_path, '%s.pkl' % yaml_file))
os.remove(os.path.join(save_path, '%s_best.pkl' % yaml_file))
except Exception:
pass
finally:
if hasattr(mode, 'check_py_code'):
mode.check_py_code = old_value
def test_mnist():
"""
Tests mnist.yaml by running it for only one epoch
"""
skip_if_no_data()
mode = get_default_mode()
if hasattr(mode, 'check_py_code'):
old_value = mode.check_py_code
mode.check_py_code = False
try:
if config.mode == "DEBUG_MODE":
yaml_file = 'mnist_fast'
else:
yaml_file = 'mnist'
limited_epoch_train(os.path.join(yaml_file_path, '%s.yaml'
% yaml_file))
try:
os.remove(os.path.join(save_path, '%s.pkl' % yaml_file))
os.remove(os.path.join(save_path, '%s_best.pkl' % yaml_file))
except Exception:
pass
finally:
if hasattr(mode, 'check_py_code'):
mode.check_py_code = old_value
| bsd-3-clause |
ultimanet/nifty | rg/powerspectrum.py | 1 | 26583 | ## NIFTY (Numerical Information Field Theory) has been developed at the
## Max-Planck-Institute for Astrophysics.
##
## Copyright (C) 2013 Max-Planck-Society
##
## Author: Marco Selig
## Project homepage: <http://www.mpa-garching.mpg.de/ift/nifty/>
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
## See the GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
## TODO: cythonize
from __future__ import division
import numpy as np
def draw_vector_nd(axes,dgrid,ps,symtype=0,fourier=False,zerocentered=False,kpack=None):
"""
Draws a n-dimensional field on a regular grid from a given power
spectrum. The grid parameters need to be specified, together with a
couple of global options explained below. The dimensionality of the
field is determined automatically.
Parameters
----------
axes : ndarray
An array with the length of each axis.
dgrid : ndarray
An array with the pixel length of each axis.
ps : ndarray
The power spectrum as a function of Fourier modes.
symtype : int {0,1,2} : *optional*
Whether the output should be real valued (0), complex-hermitian (1)
or complex without symmetry (2). (default=0)
fourier : bool : *optional*
Whether the output should be in Fourier space or not
(default=False).
zerocentered : bool : *optional*
Whether the output array should be zerocentered, i.e. starting with
negative Fourier modes going over the zero mode to positive modes,
or not zerocentered, where zero, positive and negative modes are
simpy ordered consecutively.
Returns
-------
field : ndarray
The drawn random field.
"""
if(kpack is None):
kdict = np.fft.fftshift(nkdict_fast(axes,dgrid,fourier))
klength = nklength(kdict)
else:
kdict = kpack[1][np.fft.ifftshift(kpack[0],axes=shiftaxes(zerocentered,st_to_zero_mode=False))]
klength = kpack[1]
#output is in position space
if(not fourier):
#output is real-valued
if(symtype==0):
vector = drawherm(klength,kdict,ps)
if(np.any(zerocentered==True)):
return np.real(np.fft.fftshift(np.fft.ifftn(vector),axes=shiftaxes(zerocentered)))
else:
return np.real(np.fft.ifftn(vector))
#output is complex with hermitian symmetry
elif(symtype==1):
vector = drawwild(klength,kdict,ps,real_corr=2)
if(np.any(zerocentered==True)):
return np.fft.fftshift(np.fft.ifftn(np.real(vector)),axes=shiftaxes(zerocentered))
else:
return np.fft.ifftn(np.real(vector))
#output is complex without symmetry
else:
vector = drawwild(klength,kdict,ps)
if(np.any(zerocentered==True)):
return np.fft.fftshift(np.fft.ifftn(vector),axes=shiftaxes(zerocentered))
else:
return np.fft.ifftn(vector)
#output is in fourier space
else:
#output is real-valued
if(symtype==0):
vector = drawwild(klength,kdict,ps,real_corr=2)
if np.any(zerocentered == True):
return np.real(np.fft.fftshift(vector,axes=shiftaxes(zerocentered)))
else:
return np.real(vector)
#output is complex with hermitian symmetry
elif(symtype==1):
vector = drawherm(klength,kdict,ps)
if(np.any(zerocentered==True)):
return np.fft.fftshift(vector,axes=shiftaxes(zerocentered))
else:
return vector
#output is complex without symmetry
else:
vector = drawwild(klength,kdict,ps)
if(np.any(zerocentered==True)):
return np.fft.fftshift(vector,axes=shiftaxes(zerocentered))
else:
return vector
#def calc_ps(field,axes,dgrid,zerocentered=False,fourier=False):
#
# """
# Calculates the power spectrum of a given field assuming that the field
# is statistically homogenous and isotropic.
#
# Parameters
# ----------
# field : ndarray
# The input field from which the power spectrum should be determined.
#
# axes : ndarray
# An array with the length of each axis.
#
# dgrid : ndarray
# An array with the pixel length of each axis.
#
# zerocentered : bool : *optional*
# Whether the output array should be zerocentered, i.e. starting with
# negative Fourier modes going over the zero mode to positive modes,
# or not zerocentered, where zero, positive and negative modes are
# simpy ordered consecutively.
#
# fourier : bool : *optional*
# Whether the output should be in Fourier space or not
# (default=False).
#
# """
#
# ## field absolutes
# if(not fourier):
# foufield = np.fft.fftshift(np.fft.fftn(field))
# elif(np.any(zerocentered==False)):
# foufield = np.fft.fftshift(field, axes=shiftaxes(zerocentered,st_to_zero_mode=True))
# else:
# foufield = field
# fieldabs = np.abs(foufield)**2
#
# kdict = nkdict_fast(axes,dgrid,fourier)
# klength = nklength(kdict)
#
# ## power spectrum
# ps = np.zeros(klength.size)
# rho = np.zeros(klength.size)
# for ii in np.ndindex(kdict.shape):
# position = np.searchsorted(klength,kdict[ii])
# rho[position] += 1
# ps[position] += fieldabs[ii]
# ps = np.divide(ps,rho)
# return ps
def calc_ps_fast(field,axes,dgrid,zerocentered=False,fourier=False,pindex=None,kindex=None,rho=None):
"""
Calculates the power spectrum of a given field faster assuming that the
field is statistically homogenous and isotropic.
Parameters
----------
field : ndarray
The input field from which the power spectrum should be determined.
axes : ndarray
An array with the length of each axis.
dgrid : ndarray
An array with the pixel length of each axis.
zerocentered : bool : *optional*
Whether the output array should be zerocentered, i.e. starting with
negative Fourier modes going over the zero mode to positive modes,
or not zerocentered, where zero, positive and negative modes are
simpy ordered consecutively.
fourier : bool : *optional*
Whether the output should be in Fourier space or not
(default=False).
pindex : ndarray
Index of the Fourier grid points in a numpy.ndarray ordered
following the zerocentered flag (default=None).
kindex : ndarray
Array of all k-vector lengths (default=None).
rho : ndarray
Degeneracy of the Fourier grid, indicating how many k-vectors in
Fourier space have the same length (default=None).
"""
## field absolutes
if(not fourier):
foufield = np.fft.fftshift(np.fft.fftn(field))
elif(np.any(zerocentered==False)):
foufield = np.fft.fftshift(field, axes=shiftaxes(zerocentered,st_to_zero_mode=True))
else:
foufield = field
fieldabs = np.abs(foufield)**2
if(rho is None):
if(pindex is None):
## kdict
kdict = nkdict_fast(axes,dgrid,fourier)
## klength
if(kindex is None):
klength = nklength(kdict)
else:
klength = kindex
## power spectrum
ps = np.zeros(klength.size)
rho = np.zeros(klength.size)
for ii in np.ndindex(kdict.shape):
position = np.searchsorted(klength,kdict[ii])
ps[position] += fieldabs[ii]
rho[position] += 1
else:
## zerocenter pindex
if(np.any(zerocentered==False)):
pindex = np.fft.fftshift(pindex, axes=shiftaxes(zerocentered,st_to_zero_mode=True))
## power spectrum
ps = np.zeros(np.max(pindex)+1)
rho = np.zeros(ps.size)
for ii in np.ndindex(pindex.shape):
ps[pindex[ii]] += fieldabs[ii]
rho[pindex[ii]] += 1
elif(pindex is None):
## kdict
kdict = nkdict_fast(axes,dgrid,fourier)
## klength
if(kindex is None):
klength = nklength(kdict)
else:
klength = kindex
## power spectrum
ps = np.zeros(klength.size)
for ii in np.ndindex(kdict.shape):
position = np.searchsorted(klength,kdict[ii])
ps[position] += fieldabs[ii]
else:
## zerocenter pindex
if(np.any(zerocentered==False)):
pindex = np.fft.fftshift(pindex, axes=shiftaxes(zerocentered,st_to_zero_mode=True))
## power spectrum
ps = np.zeros(rho.size)
for ii in np.ndindex(pindex.shape):
ps[pindex[ii]] += fieldabs[ii]
ps = np.divide(ps,rho)
return ps
def get_power_index(axes,dgrid,zerocentered,irred=False,fourier=True):
"""
Returns the index of the Fourier grid points in a numpy
array, ordered following the zerocentered flag.
Parameters
----------
axes : ndarray
An array with the length of each axis.
dgrid : ndarray
An array with the pixel length of each axis.
zerocentered : bool
Whether the output array should be zerocentered, i.e. starting with
negative Fourier modes going over the zero mode to positive modes,
or not zerocentered, where zero, positive and negative modes are
simpy ordered consecutively.
irred : bool : *optional*
If True, the function returns an array of all k-vector lengths and
their degeneracy factors. If False, just the power index array is
returned.
fourier : bool : *optional*
Whether the output should be in Fourier space or not
(default=False).
Returns
-------
index or {klength, rho} : scalar or list
Returns either an array of all k-vector lengths and
their degeneracy factors or just the power index array
depending on the flag irred.
"""
## kdict, klength
if(np.any(zerocentered==False)):
kdict = np.fft.fftshift(nkdict_fast(axes,dgrid,fourier),axes=shiftaxes(zerocentered,st_to_zero_mode=True))
else:
kdict = nkdict_fast(axes,dgrid,fourier)
klength = nklength(kdict)
## output
if(irred):
rho = np.zeros(klength.shape,dtype=np.int)
for ii in np.ndindex(kdict.shape):
rho[np.searchsorted(klength,kdict[ii])] += 1
return klength,rho
else:
ind = np.empty(axes,dtype=np.int)
for ii in np.ndindex(kdict.shape):
ind[ii] = np.searchsorted(klength,kdict[ii])
return ind
def get_power_indices(axes,dgrid,zerocentered,fourier=True):
"""
Returns the index of the Fourier grid points in a numpy
array, ordered following the zerocentered flag.
Parameters
----------
axes : ndarray
An array with the length of each axis.
dgrid : ndarray
An array with the pixel length of each axis.
zerocentered : bool
Whether the output array should be zerocentered, i.e. starting with
negative Fourier modes going over the zero mode to positive modes,
or not zerocentered, where zero, positive and negative modes are
simpy ordered consecutively.
irred : bool : *optional*
If True, the function returns an array of all k-vector lengths and
their degeneracy factors. If False, just the power index array is
returned.
fourier : bool : *optional*
Whether the output should be in Fourier space or not
(default=False).
Returns
-------
index, klength, rho : ndarrays
Returns the power index array, an array of all k-vector lengths and
their degeneracy factors.
"""
## kdict, klength
if(np.any(zerocentered==False)):
kdict = np.fft.fftshift(nkdict_fast(axes,dgrid,fourier),axes=shiftaxes(zerocentered,st_to_zero_mode=True))
else:
kdict = nkdict_fast(axes,dgrid,fourier)
klength = nklength(kdict)
## output
ind = np.empty(axes,dtype=np.int)
rho = np.zeros(klength.shape,dtype=np.int)
for ii in np.ndindex(kdict.shape):
ind[ii] = np.searchsorted(klength,kdict[ii])
rho[ind[ii]] += 1
return ind,klength,rho
def get_power_indices2(axes,dgrid,zerocentered,fourier=True):
"""
Returns the index of the Fourier grid points in a numpy
array, ordered following the zerocentered flag.
Parameters
----------
axes : ndarray
An array with the length of each axis.
dgrid : ndarray
An array with the pixel length of each axis.
zerocentered : bool
Whether the output array should be zerocentered, i.e. starting with
negative Fourier modes going over the zero mode to positive modes,
or not zerocentered, where zero, positive and negative modes are
simpy ordered consecutively.
irred : bool : *optional*
If True, the function returns an array of all k-vector lengths and
their degeneracy factors. If False, just the power index array is
returned.
fourier : bool : *optional*
Whether the output should be in Fourier space or not
(default=False).
Returns
-------
index, klength, rho : ndarrays
Returns the power index array, an array of all k-vector lengths and
their degeneracy factors.
"""
## kdict, klength
if(np.any(zerocentered==False)):
kdict = np.fft.fftshift(nkdict_fast2(axes,dgrid,fourier),axes=shiftaxes(zerocentered,st_to_zero_mode=True))
else:
kdict = nkdict_fast2(axes,dgrid,fourier)
klength,rho,ind = nkdict_to_indices(kdict)
return ind,klength,rho
def nkdict_to_indices(kdict):
kindex,pindex = np.unique(kdict,return_inverse=True)
pindex = pindex.reshape(kdict.shape)
rho = pindex.flatten()
rho.sort()
rho = np.unique(rho,return_index=True,return_inverse=False)[1]
rho = np.append(rho[1:]-rho[:-1],[np.prod(pindex.shape)-rho[-1]])
return kindex,rho,pindex
def bin_power_indices(pindex,kindex,rho,log=False,nbin=None,binbounds=None):
"""
Returns the (re)binned power indices associated with the Fourier grid.
Parameters
----------
pindex : ndarray
Index of the Fourier grid points in a numpy.ndarray ordered
following the zerocentered flag (default=None).
kindex : ndarray
Array of all k-vector lengths (default=None).
rho : ndarray
Degeneracy of the Fourier grid, indicating how many k-vectors in
Fourier space have the same length (default=None).
log : bool
Flag specifying if the binning is performed on logarithmic scale
(default: False).
nbin : integer
Number of used bins (default: None).
binbounds : {list, array}
Array-like inner boundaries of the used bins (default: None).
Returns
-------
pindex, kindex, rho : ndarrays
The (re)binned power indices.
"""
## boundaries
if(binbounds is not None):
binbounds = np.sort(binbounds)
## equal binning
else:
if(log is None):
log = False
if(log):
k = np.r_[0,np.log(kindex[1:])]
else:
k = kindex
dk = np.max(k[2:]-k[1:-1]) ## minimal dk
if(nbin is None):
nbin = int((k[-1]-0.5*(k[2]+k[1]))/dk-0.5) ## maximal nbin
else:
nbin = min(int(nbin),int((k[-1]-0.5*(k[2]+k[1]))/dk+2.5))
dk = (k[-1]-0.5*(k[2]+k[1]))/(nbin-2.5)
binbounds = np.r_[0.5*(3*k[1]-k[2]),0.5*(k[1]+k[2])+dk*np.arange(nbin-2)]
if(log):
binbounds = np.exp(binbounds)
## reordering
reorder = np.searchsorted(binbounds,kindex)
rho_ = np.zeros(len(binbounds)+1,dtype=rho.dtype)
kindex_ = np.empty(len(binbounds)+1,dtype=kindex.dtype)
for ii in range(len(reorder)):
if(rho_[reorder[ii]]==0):
kindex_[reorder[ii]] = kindex[ii]
rho_[reorder[ii]] += rho[ii]
else:
kindex_[reorder[ii]] = (kindex_[reorder[ii]]*rho_[reorder[ii]]+kindex[ii]*rho[ii])/(rho_[reorder[ii]]+rho[ii])
rho_[reorder[ii]] += rho[ii]
return reorder[pindex],kindex_,rho_
def nhermitianize(field,zerocentered):
"""
Hermitianizes an arbitrary n-dimensional field. Becomes relatively slow
for large n.
Parameters
----------
field : ndarray
The input field that should be hermitianized.
zerocentered : bool
Whether the output array should be zerocentered, i.e. starting with
negative Fourier modes going over the zero mode to positive modes,
or not zerocentered, where zero, positive and negative modes are
simpy ordered consecutively.
Returns
-------
hermfield : ndarray
The hermitianized field.
"""
## shift zerocentered axes
if(np.any(zerocentered==True)):
field = np.fft.fftshift(field, axes=shiftaxes(zerocentered))
# for index in np.ndenumerate(field):
# negind = tuple(-np.array(index[0]))
# field[negind] = np.conjugate(index[1])
# if(field[negind]==field[index[0]]):
# field[index[0]] = np.abs(index[1])*(np.sign(index[1].real)+(np.sign(index[1].real)==0)*np.sign(index[1].imag)).astype(np.int)
subshape = np.array(field.shape,dtype=np.int) ## == axes
maxindex = subshape//2
subshape[np.argmax(subshape)] = subshape[np.argmax(subshape)]//2+1 ## ~half larges axis
for ii in np.ndindex(tuple(subshape)):
negii = tuple(-np.array(ii))
field[negii] = np.conjugate(field[ii])
for ii in np.ndindex((2,)*maxindex.size):
index = tuple(ii*maxindex)
field[index] = np.abs(field[index])*(np.sign(field[index].real)+(np.sign(field[index].real)==0)*-np.sign(field[index].imag)).astype(np.int) ## minus since overwritten before
## reshift zerocentered axes
if(np.any(zerocentered==True)):
field = np.fft.fftshift(field,axes=shiftaxes(zerocentered))
return field
def nhermitianize_fast(field,zerocentered,special=False):
"""
Hermitianizes an arbitrary n-dimensional field faster.
Still becomes comparably slow for large n.
Parameters
----------
field : ndarray
The input field that should be hermitianized.
zerocentered : bool
Whether the output array should be zerocentered, i.e. starting with
negative Fourier modes going over the zero mode to positive modes,
or not zerocentered, where zero, positive and negative modes are
simpy ordered consecutively.
special : bool, *optional*
Must be True for random fields drawn from Gaussian or pm1
distributions.
Returns
-------
hermfield : ndarray
The hermitianized field.
"""
## shift zerocentered axes
if(np.any(zerocentered==True)):
field = np.fft.fftshift(field, axes=shiftaxes(zerocentered))
dummy = np.conjugate(field)
## mirror conjugate field
for ii in range(field.ndim):
dummy = np.swapaxes(dummy,0,ii)
dummy = np.flipud(dummy)
dummy = np.roll(dummy,1,axis=0)
dummy = np.swapaxes(dummy,0,ii)
if(special): ## special normalisation for certain random fields
field = np.sqrt(0.5)*(field+dummy)
maxindex = np.array(field.shape,dtype=np.int)//2
for ii in np.ndindex((2,)*maxindex.size):
index = tuple(ii*maxindex)
field[index] *= np.sqrt(0.5)
else: ## regular case
field = 0.5*(field+dummy)
## reshift zerocentered axes
if(np.any(zerocentered==True)):
field = np.fft.fftshift(field,axes=shiftaxes(zerocentered))
return field
def random_hermitian_pm1(datatype,zerocentered,shape):
"""
Draws a set of hermitianized random, complex pm1 numbers.
"""
field = np.random.randint(4,high=None,size=np.prod(shape,axis=0,dtype=np.int,out=None)).reshape(shape,order='C')
dummy = np.copy(field)
## mirror field
for ii in range(field.ndim):
dummy = np.swapaxes(dummy,0,ii)
dummy = np.flipud(dummy)
dummy = np.roll(dummy,1,axis=0)
dummy = np.swapaxes(dummy,0,ii)
field = (field+dummy+2*(field>dummy)*((field+dummy)%2))%4 ## wicked magic
x = np.array([1+0j,0+1j,-1+0j,0-1j],dtype=datatype)[field]
## (re)shift zerocentered axes
if(np.any(zerocentered==True)):
field = np.fft.fftshift(field,axes=shiftaxes(zerocentered))
return x
#-----------------------------------------------------------------------------
# Auxiliary functions
#-----------------------------------------------------------------------------
def shiftaxes(zerocentered,st_to_zero_mode=False):
"""
Shifts the axes in a special way needed for some functions
"""
axes = []
for ii in range(len(zerocentered)):
if(st_to_zero_mode==False)and(zerocentered[ii]):
axes += [ii]
if(st_to_zero_mode==True)and(not zerocentered[ii]):
axes += [ii]
return axes
def nkdict(axes,dgrid,fourier=True):
"""
Calculates an n-dimensional array with its entries being the lengths of
the k-vectors from the zero point of the Fourier grid.
"""
if(fourier):
dk = dgrid
else:
dk = np.array([1/axes[i]/dgrid[i] for i in range(len(axes))])
kdict = np.empty(axes)
for ii in np.ndindex(kdict.shape):
kdict[ii] = np.sqrt(np.sum(((ii-axes//2)*dk)**2))
return kdict
def nkdict_fast(axes,dgrid,fourier=True):
"""
Calculates an n-dimensional array with its entries being the lengths of
the k-vectors from the zero point of the Fourier grid.
"""
if(fourier):
dk = dgrid
else:
dk = np.array([1/dgrid[i]/axes[i] for i in range(len(axes))])
temp_vecs = np.array(np.where(np.ones(axes)),dtype='float').reshape(np.append(len(axes),axes))
temp_vecs = np.rollaxis(temp_vecs,0,len(temp_vecs.shape))
temp_vecs -= axes//2
temp_vecs *= dk
temp_vecs *= temp_vecs
return np.sqrt(np.sum((temp_vecs),axis=-1))
def nkdict_fast2(axes,dgrid,fourier=True):
"""
Calculates an n-dimensional array with its entries being the lengths of
the k-vectors from the zero point of the grid.
"""
if(fourier):
dk = dgrid
else:
dk = np.array([1/dgrid[i]/axes[i] for i in range(len(axes))])
inds = []
for a in axes:
inds += [slice(0,a)]
cords = np.ogrid[inds]
dists = ((cords[0]-axes[0]//2)*dk[0])**2
for ii in range(1,len(axes)):
dists = dists + ((cords[ii]-axes[ii]//2)*dk[ii])**2
dists = np.sqrt(dists)
return dists
def nklength(kdict):
return np.sort(list(set(kdict.flatten())))
#def drawherm(vector,klength,kdict,ps): ## vector = np.zeros(kdict.shape,dtype=np.complex)
# for ii in np.ndindex(vector.shape):
# if(vector[ii]==np.complex(0.,0.)):
# vector[ii] = np.sqrt(0.5*ps[np.searchsorted(klength,kdict[ii])])*np.complex(np.random.normal(0.,1.),np.random.normal(0.,1.))
# negii = tuple(-np.array(ii))
# vector[negii] = np.conjugate(vector[ii])
# if(vector[negii]==vector[ii]):
# vector[ii] = np.float(np.sqrt(ps[klength==kdict[ii]]))*np.random.normal(0.,1.)
# return vector
def drawherm(klength,kdict,ps):
"""
Draws a hermitian random field from a Gaussian distribution.
"""
# vector = np.zeros(kdict.shape,dtype='complex')
# for ii in np.ndindex(vector.shape):
# if(vector[ii]==np.complex(0.,0.)):
# vector[ii] = np.sqrt(0.5*ps[np.searchsorted(klength,kdict[ii])])*np.complex(np.random.normal(0.,1.),np.random.normal(0.,1.))
# negii = tuple(-np.array(ii))
# vector[negii] = np.conjugate(vector[ii])
# if(vector[negii]==vector[ii]):
# vector[ii] = np.float(np.sqrt(ps[np.searchsorted(klength,kdict[ii])]))*np.random.normal(0.,1.)
# return vector
vec = np.random.normal(loc=0,scale=1,size=kdict.size).reshape(kdict.shape)
vec = np.fft.fftn(vec)/np.sqrt(np.prod(kdict.shape))
for ii in np.ndindex(kdict.shape):
vec[ii] *= np.sqrt(ps[np.searchsorted(klength,kdict[ii])])
return vec
#def drawwild(vector,klength,kdict,ps,real_corr=1): ## vector = np.zeros(kdict.shape,dtype=np.complex)
# for ii in np.ndindex(vector.shape):
# vector[ii] = np.sqrt(real_corr*0.5*ps[klength==kdict[ii]])*np.complex(np.random.normal(0.,1.),np.random.normal(0.,1.))
# return vector
def drawwild(klength,kdict,ps,real_corr=1):
"""
Draws a field of arbitrary symmetry from a Gaussian distribution.
"""
vec = np.empty(kdict.size,dtype=np.complex)
vec.real = np.random.normal(loc=0,scale=np.sqrt(real_corr*0.5),size=kdict.size)
vec.imag = np.random.normal(loc=0,scale=np.sqrt(real_corr*0.5),size=kdict.size)
vec = vec.reshape(kdict.shape)
for ii in np.ndindex(kdict.shape):
vec[ii] *= np.sqrt(ps[np.searchsorted(klength,kdict[ii])])
return vec
| gpl-3.0 |
MackZxh/OCA-Choice | hr/hr_contract_multi_jobs/__openerp__.py | 13 | 1494 | # -*- coding:utf-8 -*-
##############################################################################
#
# Copyright (C) 2014 Savoir-faire Linux. All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'HR Contract Multi Jobs',
'version': '8.0.1.0.0',
'license': 'AGPL-3',
'category': 'Generic Modules/Human Resources',
'author': 'Savoir-faire Linux, '
'Fekete Mihai (Forest and Biomass Services Romania), '
'Odoo Community Association (OCA)',
'website': 'https://www.savoirfairelinux.com/',
'depends': [
'hr_contract'
],
'data': [
'security/ir.model.access.csv',
'views/hr_contract_view.xml',
],
'test': [],
'demo': [],
'installable': True,
}
| lgpl-3.0 |
KublaikhanGeek/scrapy | scrapy/spidermiddlewares/offsite.py | 85 | 2120 | """
Offsite Spider Middleware
See documentation in docs/topics/spider-middleware.rst
"""
import re
import logging
from scrapy import signals
from scrapy.http import Request
from scrapy.utils.httpobj import urlparse_cached
logger = logging.getLogger(__name__)
class OffsiteMiddleware(object):
def __init__(self, stats):
self.stats = stats
@classmethod
def from_crawler(cls, crawler):
o = cls(crawler.stats)
crawler.signals.connect(o.spider_opened, signal=signals.spider_opened)
return o
def process_spider_output(self, response, result, spider):
for x in result:
if isinstance(x, Request):
if x.dont_filter or self.should_follow(x, spider):
yield x
else:
domain = urlparse_cached(x).hostname
if domain and domain not in self.domains_seen:
self.domains_seen.add(domain)
logger.debug("Filtered offsite request to %(domain)r: %(request)s",
{'domain': domain, 'request': x}, extra={'spider': spider})
self.stats.inc_value('offsite/domains', spider=spider)
self.stats.inc_value('offsite/filtered', spider=spider)
else:
yield x
def should_follow(self, request, spider):
regex = self.host_regex
# hostname can be None for wrong urls (like javascript links)
host = urlparse_cached(request).hostname or ''
return bool(regex.search(host))
def get_host_regex(self, spider):
"""Override this method to implement a different offsite policy"""
allowed_domains = getattr(spider, 'allowed_domains', None)
if not allowed_domains:
return re.compile('') # allow all by default
regex = r'^(.*\.)?(%s)$' % '|'.join(re.escape(d) for d in allowed_domains if d is not None)
return re.compile(regex)
def spider_opened(self, spider):
self.host_regex = self.get_host_regex(spider)
self.domains_seen = set()
| bsd-3-clause |
stackforge/tacker | tacker/tests/unit/conductor/conductorrpc/test_vnf_pkgm_rpc.py | 2 | 4155 | # Copyright (C) 2019 NTT DATA
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from tacker.common.rpc import BackingOffClient
from tacker.conductor.conductorrpc import vnf_pkgm_rpc
from tacker.objects import vnf_package
from tacker.tests import base
from tacker.tests.unit.conductor import fakes
class VnfPackageRPCTestCase(base.BaseTestCase):
def setUp(self):
super(VnfPackageRPCTestCase, self).setUp()
self.context = self.fake_admin_context()
self.rpc_api = vnf_pkgm_rpc.VNFPackageRPCAPI()
self.cctxt_mock = mock.MagicMock()
def test_upload_vnf_package_content(self):
@mock.patch.object(BackingOffClient, 'prepare')
def _test(prepare_mock):
prepare_mock.return_value = self.cctxt_mock
vnf_package_obj = vnf_package.VnfPackage(
self.context, **fakes.VNF_UPLOAD_VNF_PACKAGE_CONTENT)
self.rpc_api.upload_vnf_package_content(self.context,
vnf_package_obj, cast=True)
prepare_mock.assert_called()
self.cctxt_mock.cast.assert_called_once_with(
self.context, 'upload_vnf_package_content',
vnf_package=vnf_package_obj)
_test()
def test_upload_vnf_package_from_uri(self):
fake_addressInformation = "http://test_csar.zip"
@mock.patch.object(BackingOffClient, 'prepare')
def _test(prepare_mock):
prepare_mock.return_value = self.cctxt_mock
vnf_package_obj = vnf_package.VnfPackage(self.context,
**fakes.VNF_DATA)
self.rpc_api.upload_vnf_package_from_uri(self.context,
vnf_package_obj,
fake_addressInformation,
cast=True)
prepare_mock.assert_called()
self.cctxt_mock.cast.assert_called_once_with(
self.context, 'upload_vnf_package_from_uri',
vnf_package=vnf_package_obj,
address_information=fake_addressInformation,
password=None, user_name=None)
_test()
def test_delete_vnf_package(self):
@mock.patch.object(BackingOffClient, 'prepare')
def _test(prepare_mock):
prepare_mock.return_value = self.cctxt_mock
vnf_package_obj = vnf_package.VnfPackage(self.context,
**fakes.VNF_DATA)
self.rpc_api.delete_vnf_package(self.context,
vnf_package_obj, cast=True)
prepare_mock.assert_called()
self.cctxt_mock.cast.assert_called_once_with(
self.context, 'delete_vnf_package',
vnf_package=vnf_package_obj)
_test()
def test_get_vnf_package_vnfd(self):
@mock.patch.object(BackingOffClient, 'prepare')
def _test(prepare_mock):
prepare_mock.return_value = self.cctxt_mock
vnf_package_obj = vnf_package.VnfPackage(self.context,
**fakes.VNF_DATA)
self.rpc_api.get_vnf_package_vnfd(self.context,
vnf_package_obj, cast=False)
prepare_mock.assert_called()
self.cctxt_mock.call.assert_called_once_with(
self.context, 'get_vnf_package_vnfd',
vnf_package=vnf_package_obj)
_test()
| apache-2.0 |
IllusionRom-deprecated/android_platform_external_chromium_org | tools/idl_parser/idl_parser_test.py | 176 | 3689 | #!/usr/bin/env python
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import glob
import unittest
from idl_lexer import IDLLexer
from idl_parser import IDLParser, ParseFile
from idl_ppapi_lexer import IDLPPAPILexer
from idl_ppapi_parser import IDLPPAPIParser
def ParseCommentTest(comment):
comment = comment.strip()
comments = comment.split(None, 1)
return comments[0], comments[1]
class WebIDLParser(unittest.TestCase):
def setUp(self):
self.parser = IDLParser(IDLLexer(), mute_error=True)
self.filenames = glob.glob('test_parser/*_web.idl')
def _TestNode(self, node):
comments = node.GetListOf('Comment')
for comment in comments:
check, value = ParseCommentTest(comment.GetName())
if check == 'BUILD':
msg = 'Expecting %s, but found %s.\n' % (value, str(node))
self.assertEqual(value, str(node), msg)
if check == 'ERROR':
msg = node.GetLogLine('Expecting\n\t%s\nbut found \n\t%s\n' % (
value, str(node)))
self.assertEqual(value, node.GetName(), msg)
if check == 'PROP':
key, expect = value.split('=')
actual = str(node.GetProperty(key))
msg = 'Mismatched property %s: %s vs %s.\n' % (key, expect, actual)
self.assertEqual(expect, actual, msg)
if check == 'TREE':
quick = '\n'.join(node.Tree())
lineno = node.GetProperty('LINENO')
msg = 'Mismatched tree at line %d:\n%sVS\n%s' % (lineno, value, quick)
self.assertEqual(value, quick, msg)
def testExpectedNodes(self):
for filename in self.filenames:
filenode = ParseFile(self.parser, filename)
children = filenode.GetChildren()
self.assertTrue(len(children) > 2, 'Expecting children in %s.' %
filename)
for node in filenode.GetChildren()[2:]:
self._TestNode(node)
class PepperIDLParser(unittest.TestCase):
def setUp(self):
self.parser = IDLPPAPIParser(IDLPPAPILexer(), mute_error=True)
self.filenames = glob.glob('test_parser/*_ppapi.idl')
def _TestNode(self, filename, node):
comments = node.GetListOf('Comment')
for comment in comments:
check, value = ParseCommentTest(comment.GetName())
if check == 'BUILD':
msg = '%s - Expecting %s, but found %s.\n' % (
filename, value, str(node))
self.assertEqual(value, str(node), msg)
if check == 'ERROR':
msg = node.GetLogLine('%s - Expecting\n\t%s\nbut found \n\t%s\n' % (
filename, value, str(node)))
self.assertEqual(value, node.GetName(), msg)
if check == 'PROP':
key, expect = value.split('=')
actual = str(node.GetProperty(key))
msg = '%s - Mismatched property %s: %s vs %s.\n' % (
filename, key, expect, actual)
self.assertEqual(expect, actual, msg)
if check == 'TREE':
quick = '\n'.join(node.Tree())
lineno = node.GetProperty('LINENO')
msg = '%s - Mismatched tree at line %d:\n%sVS\n%s' % (
filename, lineno, value, quick)
self.assertEqual(value, quick, msg)
def testExpectedNodes(self):
for filename in self.filenames:
filenode = ParseFile(self.parser, filename)
children = filenode.GetChildren()
self.assertTrue(len(children) > 2, 'Expecting children in %s.' %
filename)
for node in filenode.GetChildren()[2:]:
self._TestNode(filename, node)
if __name__ == '__main__':
unittest.main(verbosity=2)
| bsd-3-clause |
fnordahl/nova | nova/exception.py | 1 | 56858 | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Nova base exception handling.
Includes decorator for re-raising Nova-type exceptions.
SHOULD include dedicated exception logging.
"""
import functools
import sys
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
import six
import webob.exc
from webob import util as woutil
from nova.i18n import _, _LE
from nova import safe_utils
LOG = logging.getLogger(__name__)
exc_log_opts = [
cfg.BoolOpt('fatal_exception_format_errors',
default=False,
help='Make exception message format errors fatal'),
]
CONF = cfg.CONF
CONF.register_opts(exc_log_opts)
class ConvertedException(webob.exc.WSGIHTTPException):
def __init__(self, code, title="", explanation=""):
self.code = code
# There is a strict rule about constructing status line for HTTP:
# '...Status-Line, consisting of the protocol version followed by a
# numeric status code and its associated textual phrase, with each
# element separated by SP characters'
# (http://www.faqs.org/rfcs/rfc2616.html)
# 'code' and 'title' can not be empty because they correspond
# to numeric status code and its associated text
if title:
self.title = title
else:
try:
self.title = woutil.status_reasons[self.code]
except KeyError:
msg = _LE("Improper or unknown HTTP status code used: %d")
LOG.error(msg, code)
self.title = woutil.status_generic_reasons[self.code // 100]
self.explanation = explanation
super(ConvertedException, self).__init__()
def _cleanse_dict(original):
"""Strip all admin_password, new_pass, rescue_pass keys from a dict."""
return {k: v for k, v in six.iteritems(original) if "_pass" not in k}
def wrap_exception(notifier=None, get_notifier=None):
"""This decorator wraps a method to catch any exceptions that may
get thrown. It also optionally sends the exception to the notification
system.
"""
def inner(f):
def wrapped(self, context, *args, **kw):
# Don't store self or context in the payload, it now seems to
# contain confidential information.
try:
return f(self, context, *args, **kw)
except Exception as e:
with excutils.save_and_reraise_exception():
if notifier or get_notifier:
payload = dict(exception=e)
call_dict = safe_utils.getcallargs(f, context,
*args, **kw)
cleansed = _cleanse_dict(call_dict)
payload.update({'args': cleansed})
# If f has multiple decorators, they must use
# functools.wraps to ensure the name is
# propagated.
event_type = f.__name__
(notifier or get_notifier()).error(context,
event_type,
payload)
return functools.wraps(f)(wrapped)
return inner
class NovaException(Exception):
"""Base Nova Exception
To correctly use this class, inherit from it and define
a 'msg_fmt' property. That msg_fmt will get printf'd
with the keyword arguments provided to the constructor.
"""
msg_fmt = _("An unknown exception occurred.")
code = 500
headers = {}
safe = False
def __init__(self, message=None, **kwargs):
self.kwargs = kwargs
if 'code' not in self.kwargs:
try:
self.kwargs['code'] = self.code
except AttributeError:
pass
if not message:
try:
message = self.msg_fmt % kwargs
except Exception:
exc_info = sys.exc_info()
# kwargs doesn't match a variable in the message
# log the issue and the kwargs
LOG.exception(_LE('Exception in string format operation'))
for name, value in six.iteritems(kwargs):
LOG.error("%s: %s" % (name, value)) # noqa
if CONF.fatal_exception_format_errors:
six.reraise(*exc_info)
else:
# at least get the core message out if something happened
message = self.msg_fmt
self.message = message
super(NovaException, self).__init__(message)
def format_message(self):
# NOTE(mrodden): use the first argument to the python Exception object
# which should be our full NovaException message, (see __init__)
return self.args[0]
class EncryptionFailure(NovaException):
msg_fmt = _("Failed to encrypt text: %(reason)s")
class DecryptionFailure(NovaException):
msg_fmt = _("Failed to decrypt text: %(reason)s")
class RevokeCertFailure(NovaException):
msg_fmt = _("Failed to revoke certificate for %(project_id)s")
class VirtualInterfaceCreateException(NovaException):
msg_fmt = _("Virtual Interface creation failed")
class VirtualInterfaceMacAddressException(NovaException):
msg_fmt = _("Creation of virtual interface with "
"unique mac address failed")
class VirtualInterfacePlugException(NovaException):
msg_fmt = _("Virtual interface plugin failed")
class GlanceConnectionFailed(NovaException):
msg_fmt = _("Connection to glance host %(host)s:%(port)s failed: "
"%(reason)s")
class CinderConnectionFailed(NovaException):
msg_fmt = _("Connection to cinder host failed: %(reason)s")
class Forbidden(NovaException):
ec2_code = 'AuthFailure'
msg_fmt = _("Not authorized.")
code = 403
class AdminRequired(Forbidden):
msg_fmt = _("User does not have admin privileges")
class PolicyNotAuthorized(Forbidden):
msg_fmt = _("Policy doesn't allow %(action)s to be performed.")
class VolumeLimitExceeded(Forbidden):
msg_fmt = _("Volume resource quota exceeded")
class ImageNotActive(NovaException):
# NOTE(jruzicka): IncorrectState is used for volumes only in EC2,
# but it still seems like the most appropriate option.
ec2_code = 'IncorrectState'
msg_fmt = _("Image %(image_id)s is not active.")
class ImageNotAuthorized(NovaException):
msg_fmt = _("Not authorized for image %(image_id)s.")
class Invalid(NovaException):
msg_fmt = _("Unacceptable parameters.")
code = 400
class InvalidBDM(Invalid):
msg_fmt = _("Block Device Mapping is Invalid.")
class InvalidBDMSnapshot(InvalidBDM):
msg_fmt = _("Block Device Mapping is Invalid: "
"failed to get snapshot %(id)s.")
class InvalidBDMVolume(InvalidBDM):
msg_fmt = _("Block Device Mapping is Invalid: "
"failed to get volume %(id)s.")
class InvalidBDMImage(InvalidBDM):
msg_fmt = _("Block Device Mapping is Invalid: "
"failed to get image %(id)s.")
class InvalidBDMBootSequence(InvalidBDM):
msg_fmt = _("Block Device Mapping is Invalid: "
"Boot sequence for the instance "
"and image/block device mapping "
"combination is not valid.")
class InvalidBDMLocalsLimit(InvalidBDM):
msg_fmt = _("Block Device Mapping is Invalid: "
"You specified more local devices than the "
"limit allows")
class InvalidBDMEphemeralSize(InvalidBDM):
msg_fmt = _("Ephemeral disks requested are larger than "
"the instance type allows.")
class InvalidBDMSwapSize(InvalidBDM):
msg_fmt = _("Swap drive requested is larger than instance type allows.")
class InvalidBDMFormat(InvalidBDM):
msg_fmt = _("Block Device Mapping is Invalid: "
"%(details)s")
class InvalidBDMForLegacy(InvalidBDM):
msg_fmt = _("Block Device Mapping cannot "
"be converted to legacy format. ")
class InvalidBDMVolumeNotBootable(InvalidBDM):
msg_fmt = _("Block Device %(id)s is not bootable.")
class InvalidAttribute(Invalid):
msg_fmt = _("Attribute not supported: %(attr)s")
class ValidationError(Invalid):
msg_fmt = "%(detail)s"
class VolumeUnattached(Invalid):
ec2_code = 'IncorrectState'
msg_fmt = _("Volume %(volume_id)s is not attached to anything")
class VolumeNotCreated(NovaException):
msg_fmt = _("Volume %(volume_id)s did not finish being created"
" even after we waited %(seconds)s seconds or %(attempts)s"
" attempts. And its status is %(volume_status)s.")
class VolumeEncryptionNotSupported(Invalid):
msg_fmt = _("Volume encryption is not supported for %(volume_type)s "
"volume %(volume_id)s")
class InvalidKeypair(Invalid):
ec2_code = 'InvalidKeyPair.Format'
msg_fmt = _("Keypair data is invalid: %(reason)s")
class InvalidRequest(Invalid):
msg_fmt = _("The request is invalid.")
class InvalidInput(Invalid):
msg_fmt = _("Invalid input received: %(reason)s")
class InvalidVolume(Invalid):
ec2_code = 'UnsupportedOperation'
msg_fmt = _("Invalid volume: %(reason)s")
class InvalidVolumeAccessMode(Invalid):
msg_fmt = _("Invalid volume access mode: %(access_mode)s")
class InvalidMetadata(Invalid):
msg_fmt = _("Invalid metadata: %(reason)s")
class InvalidMetadataSize(Invalid):
msg_fmt = _("Invalid metadata size: %(reason)s")
class InvalidPortRange(Invalid):
ec2_code = 'InvalidParameterValue'
msg_fmt = _("Invalid port range %(from_port)s:%(to_port)s. %(msg)s")
class InvalidIpProtocol(Invalid):
msg_fmt = _("Invalid IP protocol %(protocol)s.")
class InvalidContentType(Invalid):
msg_fmt = _("Invalid content type %(content_type)s.")
class InvalidAPIVersionString(Invalid):
msg_fmt = _("API Version String %(version)s is of invalid format. Must "
"be of format MajorNum.MinorNum.")
class VersionNotFoundForAPIMethod(Invalid):
msg_fmt = _("API version %(version)s is not supported on this method.")
class InvalidGlobalAPIVersion(Invalid):
msg_fmt = _("Version %(req_ver)s is not supported by the API. Minimum "
"is %(min_ver)s and maximum is %(max_ver)s.")
# Cannot be templated as the error syntax varies.
# msg needs to be constructed when raised.
class InvalidParameterValue(Invalid):
ec2_code = 'InvalidParameterValue'
msg_fmt = _("%(err)s")
class InvalidAggregateAction(Invalid):
msg_fmt = _("Unacceptable parameters.")
code = 400
class InvalidAggregateActionAdd(InvalidAggregateAction):
msg_fmt = _("Cannot add host to aggregate "
"%(aggregate_id)s. Reason: %(reason)s.")
class InvalidAggregateActionDelete(InvalidAggregateAction):
msg_fmt = _("Cannot remove host from aggregate "
"%(aggregate_id)s. Reason: %(reason)s.")
class InvalidAggregateActionUpdate(InvalidAggregateAction):
msg_fmt = _("Cannot update aggregate "
"%(aggregate_id)s. Reason: %(reason)s.")
class InvalidAggregateActionUpdateMeta(InvalidAggregateAction):
msg_fmt = _("Cannot update metadata of aggregate "
"%(aggregate_id)s. Reason: %(reason)s.")
class InvalidGroup(Invalid):
msg_fmt = _("Group not valid. Reason: %(reason)s")
class InvalidSortKey(Invalid):
msg_fmt = _("Sort key supplied was not valid.")
class InvalidStrTime(Invalid):
msg_fmt = _("Invalid datetime string: %(reason)s")
class InstanceInvalidState(Invalid):
msg_fmt = _("Instance %(instance_uuid)s in %(attr)s %(state)s. Cannot "
"%(method)s while the instance is in this state.")
class InstanceNotRunning(Invalid):
msg_fmt = _("Instance %(instance_id)s is not running.")
class InstanceNotInRescueMode(Invalid):
msg_fmt = _("Instance %(instance_id)s is not in rescue mode")
class InstanceNotRescuable(Invalid):
msg_fmt = _("Instance %(instance_id)s cannot be rescued: %(reason)s")
class InstanceNotReady(Invalid):
msg_fmt = _("Instance %(instance_id)s is not ready")
class InstanceSuspendFailure(Invalid):
msg_fmt = _("Failed to suspend instance: %(reason)s")
class InstanceResumeFailure(Invalid):
msg_fmt = _("Failed to resume instance: %(reason)s")
class InstancePowerOnFailure(Invalid):
msg_fmt = _("Failed to power on instance: %(reason)s")
class InstancePowerOffFailure(Invalid):
msg_fmt = _("Failed to power off instance: %(reason)s")
class InstanceRebootFailure(Invalid):
msg_fmt = _("Failed to reboot instance: %(reason)s")
class InstanceTerminationFailure(Invalid):
msg_fmt = _("Failed to terminate instance: %(reason)s")
class InstanceDeployFailure(Invalid):
msg_fmt = _("Failed to deploy instance: %(reason)s")
class MultiplePortsNotApplicable(Invalid):
msg_fmt = _("Failed to launch instances: %(reason)s")
class InvalidFixedIpAndMaxCountRequest(Invalid):
msg_fmt = _("Failed to launch instances: %(reason)s")
class ServiceUnavailable(Invalid):
msg_fmt = _("Service is unavailable at this time.")
class ComputeResourcesUnavailable(ServiceUnavailable):
msg_fmt = _("Insufficient compute resources: %(reason)s.")
class HypervisorUnavailable(NovaException):
msg_fmt = _("Connection to the hypervisor is broken on host: %(host)s")
class ComputeServiceUnavailable(ServiceUnavailable):
msg_fmt = _("Compute service of %(host)s is unavailable at this time.")
class ComputeServiceInUse(NovaException):
msg_fmt = _("Compute service of %(host)s is still in use.")
class UnableToMigrateToSelf(Invalid):
msg_fmt = _("Unable to migrate instance (%(instance_id)s) "
"to current host (%(host)s).")
class InvalidHypervisorType(Invalid):
msg_fmt = _("The supplied hypervisor type of is invalid.")
class DestinationHypervisorTooOld(Invalid):
msg_fmt = _("The instance requires a newer hypervisor version than "
"has been provided.")
class ServiceTooOld(Invalid):
msg_fmt = _("This service is older (v%(thisver)i) than the minimum "
"(v%(minver)i) version of the rest of the deployment. "
"Unable to continue.")
class DestinationDiskExists(Invalid):
msg_fmt = _("The supplied disk path (%(path)s) already exists, "
"it is expected not to exist.")
class InvalidDevicePath(Invalid):
msg_fmt = _("The supplied device path (%(path)s) is invalid.")
class DevicePathInUse(Invalid):
msg_fmt = _("The supplied device path (%(path)s) is in use.")
code = 409
class DeviceIsBusy(Invalid):
msg_fmt = _("The supplied device (%(device)s) is busy.")
class InvalidCPUInfo(Invalid):
msg_fmt = _("Unacceptable CPU info: %(reason)s")
class InvalidIpAddressError(Invalid):
msg_fmt = _("%(address)s is not a valid IP v4/6 address.")
class InvalidVLANTag(Invalid):
msg_fmt = _("VLAN tag is not appropriate for the port group "
"%(bridge)s. Expected VLAN tag is %(tag)s, "
"but the one associated with the port group is %(pgroup)s.")
class InvalidVLANPortGroup(Invalid):
msg_fmt = _("vSwitch which contains the port group %(bridge)s is "
"not associated with the desired physical adapter. "
"Expected vSwitch is %(expected)s, but the one associated "
"is %(actual)s.")
class InvalidDiskFormat(Invalid):
msg_fmt = _("Disk format %(disk_format)s is not acceptable")
class InvalidDiskInfo(Invalid):
msg_fmt = _("Disk info file is invalid: %(reason)s")
class DiskInfoReadWriteFail(Invalid):
msg_fmt = _("Failed to read or write disk info file: %(reason)s")
class ImageUnacceptable(Invalid):
msg_fmt = _("Image %(image_id)s is unacceptable: %(reason)s")
class InstanceUnacceptable(Invalid):
msg_fmt = _("Instance %(instance_id)s is unacceptable: %(reason)s")
class InvalidEc2Id(Invalid):
msg_fmt = _("Ec2 id %(ec2_id)s is unacceptable.")
class InvalidUUID(Invalid):
msg_fmt = _("Expected a uuid but received %(uuid)s.")
class InvalidID(Invalid):
msg_fmt = _("Invalid ID received %(id)s.")
class ConstraintNotMet(NovaException):
msg_fmt = _("Constraint not met.")
code = 412
class NotFound(NovaException):
msg_fmt = _("Resource could not be found.")
code = 404
class AgentBuildNotFound(NotFound):
msg_fmt = _("No agent-build associated with id %(id)s.")
class AgentBuildExists(NovaException):
msg_fmt = _("Agent-build with hypervisor %(hypervisor)s os %(os)s "
"architecture %(architecture)s exists.")
class VolumeNotFound(NotFound):
ec2_code = 'InvalidVolume.NotFound'
msg_fmt = _("Volume %(volume_id)s could not be found.")
class BDMNotFound(NotFound):
msg_fmt = _("No Block Device Mapping with id %(id)s.")
class VolumeBDMNotFound(NotFound):
msg_fmt = _("No volume Block Device Mapping with id %(volume_id)s.")
class VolumeBDMPathNotFound(VolumeBDMNotFound):
msg_fmt = _("No volume Block Device Mapping at path: %(path)s")
class SnapshotNotFound(NotFound):
ec2_code = 'InvalidSnapshot.NotFound'
msg_fmt = _("Snapshot %(snapshot_id)s could not be found.")
class DiskNotFound(NotFound):
msg_fmt = _("No disk at %(location)s")
class VolumeDriverNotFound(NotFound):
msg_fmt = _("Could not find a handler for %(driver_type)s volume.")
class InvalidImageRef(Invalid):
msg_fmt = _("Invalid image href %(image_href)s.")
class AutoDiskConfigDisabledByImage(Invalid):
msg_fmt = _("Requested image %(image)s "
"has automatic disk resize disabled.")
class ImageNotFound(NotFound):
msg_fmt = _("Image %(image_id)s could not be found.")
class PreserveEphemeralNotSupported(Invalid):
msg_fmt = _("The current driver does not support "
"preserving ephemeral partitions.")
# NOTE(jruzicka): ImageNotFound is not a valid EC2 error code.
class ImageNotFoundEC2(ImageNotFound):
msg_fmt = _("Image %(image_id)s could not be found. The nova EC2 API "
"assigns image ids dynamically when they are listed for the "
"first time. Have you listed image ids since adding this "
"image?")
class ProjectNotFound(NotFound):
msg_fmt = _("Project %(project_id)s could not be found.")
class StorageRepositoryNotFound(NotFound):
msg_fmt = _("Cannot find SR to read/write VDI.")
class InstanceMappingNotFound(NotFound):
msg_fmt = _("Instance %(uuid)s has no mapping to a cell.")
class NetworkDuplicated(Invalid):
msg_fmt = _("Network %(network_id)s is duplicated.")
class NetworkDhcpReleaseFailed(NovaException):
msg_fmt = _("Failed to release IP %(address)s with MAC %(mac_address)s")
class NetworkInUse(NovaException):
msg_fmt = _("Network %(network_id)s is still in use.")
class NetworkSetHostFailed(NovaException):
msg_fmt = _("Network set host failed for network %(network_id)s.")
class NetworkNotCreated(Invalid):
msg_fmt = _("%(req)s is required to create a network.")
class LabelTooLong(Invalid):
msg_fmt = _("Maximum allowed length for 'label' is 255.")
class InvalidIntValue(Invalid):
msg_fmt = _("%(key)s must be an integer.")
class InvalidCidr(Invalid):
msg_fmt = _("%(cidr)s is not a valid ip network.")
class InvalidAddress(Invalid):
msg_fmt = _("%(address)s is not a valid ip address.")
class AddressOutOfRange(Invalid):
msg_fmt = _("%(address)s is not within %(cidr)s.")
class DuplicateVlan(NovaException):
msg_fmt = _("Detected existing vlan with id %(vlan)d")
code = 409
class CidrConflict(NovaException):
msg_fmt = _('Requested cidr (%(cidr)s) conflicts '
'with existing cidr (%(other)s)')
code = 409
class NetworkHasProject(NetworkInUse):
msg_fmt = _('Network must be disassociated from project '
'%(project_id)s before it can be deleted.')
class NetworkNotFound(NotFound):
msg_fmt = _("Network %(network_id)s could not be found.")
class PortNotFound(NotFound):
msg_fmt = _("Port id %(port_id)s could not be found.")
class NetworkNotFoundForBridge(NetworkNotFound):
msg_fmt = _("Network could not be found for bridge %(bridge)s")
class NetworkNotFoundForUUID(NetworkNotFound):
msg_fmt = _("Network could not be found for uuid %(uuid)s")
class NetworkNotFoundForCidr(NetworkNotFound):
msg_fmt = _("Network could not be found with cidr %(cidr)s.")
class NetworkNotFoundForInstance(NetworkNotFound):
msg_fmt = _("Network could not be found for instance %(instance_id)s.")
class NoNetworksFound(NotFound):
msg_fmt = _("No networks defined.")
class NoMoreNetworks(NovaException):
msg_fmt = _("No more available networks.")
class NetworkNotFoundForProject(NetworkNotFound):
msg_fmt = _("Either network uuid %(network_uuid)s is not present or "
"is not assigned to the project %(project_id)s.")
class NetworkAmbiguous(Invalid):
msg_fmt = _("More than one possible network found. Specify "
"network ID(s) to select which one(s) to connect to.")
class NetworkRequiresSubnet(Invalid):
msg_fmt = _("Network %(network_uuid)s requires a subnet in order to boot"
" instances on.")
class ExternalNetworkAttachForbidden(Forbidden):
msg_fmt = _("It is not allowed to create an interface on "
"external network %(network_uuid)s")
class NetworkMissingPhysicalNetwork(NovaException):
msg_fmt = _("Physical network is missing for network %(network_uuid)s")
class VifDetailsMissingVhostuserSockPath(Invalid):
msg_fmt = _("vhostuser_sock_path not present in vif_details"
" for vif %(vif_id)s")
class VifDetailsMissingMacvtapParameters(Invalid):
msg_fmt = _("Parameters %(missing_params)s not present in"
" vif_details for vif %(vif_id)s. Check your Neutron"
" configuration to validate that the macvtap parameters are"
" correct.")
class DatastoreNotFound(NotFound):
msg_fmt = _("Could not find the datastore reference(s) which the VM uses.")
class PortInUse(Invalid):
msg_fmt = _("Port %(port_id)s is still in use.")
class PortRequiresFixedIP(Invalid):
msg_fmt = _("Port %(port_id)s requires a FixedIP in order to be used.")
class PortNotUsable(Invalid):
msg_fmt = _("Port %(port_id)s not usable for instance %(instance)s.")
class PortNotFree(Invalid):
msg_fmt = _("No free port available for instance %(instance)s.")
class PortBindingFailed(Invalid):
msg_fmt = _("Binding failed for port %(port_id)s, please check neutron "
"logs for more information.")
class FixedIpExists(NovaException):
msg_fmt = _("Fixed ip %(address)s already exists.")
class FixedIpNotFound(NotFound):
msg_fmt = _("No fixed IP associated with id %(id)s.")
class FixedIpNotFoundForAddress(FixedIpNotFound):
msg_fmt = _("Fixed ip not found for address %(address)s.")
class FixedIpNotFoundForInstance(FixedIpNotFound):
msg_fmt = _("Instance %(instance_uuid)s has zero fixed ips.")
class FixedIpNotFoundForNetworkHost(FixedIpNotFound):
msg_fmt = _("Network host %(host)s has zero fixed ips "
"in network %(network_id)s.")
class FixedIpNotFoundForSpecificInstance(FixedIpNotFound):
msg_fmt = _("Instance %(instance_uuid)s doesn't have fixed ip '%(ip)s'.")
class FixedIpNotFoundForNetwork(FixedIpNotFound):
msg_fmt = _("Fixed IP address (%(address)s) does not exist in "
"network (%(network_uuid)s).")
class FixedIpAssociateFailed(NovaException):
msg_fmt = _("Fixed IP associate failed for network: %(net)s.")
class FixedIpAlreadyInUse(NovaException):
msg_fmt = _("Fixed IP address %(address)s is already in use on instance "
"%(instance_uuid)s.")
class FixedIpAssociatedWithMultipleInstances(NovaException):
msg_fmt = _("More than one instance is associated with fixed ip address "
"'%(address)s'.")
class FixedIpInvalid(Invalid):
msg_fmt = _("Fixed IP address %(address)s is invalid.")
class NoMoreFixedIps(NovaException):
ec2_code = 'UnsupportedOperation'
msg_fmt = _("No fixed IP addresses available for network: %(net)s")
class NoFixedIpsDefined(NotFound):
msg_fmt = _("Zero fixed ips could be found.")
class FloatingIpExists(NovaException):
msg_fmt = _("Floating ip %(address)s already exists.")
class FloatingIpNotFound(NotFound):
ec2_code = "UnsupportedOperation"
msg_fmt = _("Floating ip not found for id %(id)s.")
class FloatingIpDNSExists(Invalid):
msg_fmt = _("The DNS entry %(name)s already exists in domain %(domain)s.")
class FloatingIpNotFoundForAddress(FloatingIpNotFound):
msg_fmt = _("Floating ip not found for address %(address)s.")
class FloatingIpNotFoundForHost(FloatingIpNotFound):
msg_fmt = _("Floating ip not found for host %(host)s.")
class FloatingIpMultipleFoundForAddress(NovaException):
msg_fmt = _("Multiple floating ips are found for address %(address)s.")
class FloatingIpPoolNotFound(NotFound):
msg_fmt = _("Floating ip pool not found.")
safe = True
class NoMoreFloatingIps(FloatingIpNotFound):
msg_fmt = _("Zero floating ips available.")
safe = True
class FloatingIpAssociated(NovaException):
ec2_code = "UnsupportedOperation"
msg_fmt = _("Floating ip %(address)s is associated.")
class FloatingIpNotAssociated(NovaException):
msg_fmt = _("Floating ip %(address)s is not associated.")
class NoFloatingIpsDefined(NotFound):
msg_fmt = _("Zero floating ips exist.")
class NoFloatingIpInterface(NotFound):
ec2_code = "UnsupportedOperation"
msg_fmt = _("Interface %(interface)s not found.")
class FloatingIpAllocateFailed(NovaException):
msg_fmt = _("Floating IP allocate failed.")
class FloatingIpAssociateFailed(NovaException):
msg_fmt = _("Floating IP %(address)s association has failed.")
class FloatingIpBadRequest(Invalid):
ec2_code = "UnsupportedOperation"
msg_fmt = _("The floating IP request failed with a BadRequest")
class CannotDisassociateAutoAssignedFloatingIP(NovaException):
ec2_code = "UnsupportedOperation"
msg_fmt = _("Cannot disassociate auto assigned floating ip")
class KeypairNotFound(NotFound):
ec2_code = 'InvalidKeyPair.NotFound'
msg_fmt = _("Keypair %(name)s not found for user %(user_id)s")
class ServiceNotFound(NotFound):
msg_fmt = _("Service %(service_id)s could not be found.")
class ServiceBinaryExists(NovaException):
msg_fmt = _("Service with host %(host)s binary %(binary)s exists.")
class ServiceTopicExists(NovaException):
msg_fmt = _("Service with host %(host)s topic %(topic)s exists.")
class HostNotFound(NotFound):
msg_fmt = _("Host %(host)s could not be found.")
class ComputeHostNotFound(HostNotFound):
msg_fmt = _("Compute host %(host)s could not be found.")
class ComputeHostNotCreated(HostNotFound):
msg_fmt = _("Compute host %(name)s needs to be created first"
" before updating.")
class HostBinaryNotFound(NotFound):
msg_fmt = _("Could not find binary %(binary)s on host %(host)s.")
class InvalidReservationExpiration(Invalid):
msg_fmt = _("Invalid reservation expiration %(expire)s.")
class InvalidQuotaValue(Invalid):
msg_fmt = _("Change would make usage less than 0 for the following "
"resources: %(unders)s")
class InvalidQuotaMethodUsage(Invalid):
msg_fmt = _("Wrong quota method %(method)s used on resource %(res)s")
class QuotaNotFound(NotFound):
msg_fmt = _("Quota could not be found")
class QuotaExists(NovaException):
msg_fmt = _("Quota exists for project %(project_id)s, "
"resource %(resource)s")
class QuotaResourceUnknown(QuotaNotFound):
msg_fmt = _("Unknown quota resources %(unknown)s.")
class ProjectUserQuotaNotFound(QuotaNotFound):
msg_fmt = _("Quota for user %(user_id)s in project %(project_id)s "
"could not be found.")
class ProjectQuotaNotFound(QuotaNotFound):
msg_fmt = _("Quota for project %(project_id)s could not be found.")
class QuotaClassNotFound(QuotaNotFound):
msg_fmt = _("Quota class %(class_name)s could not be found.")
class QuotaUsageNotFound(QuotaNotFound):
msg_fmt = _("Quota usage for project %(project_id)s could not be found.")
class ReservationNotFound(QuotaNotFound):
msg_fmt = _("Quota reservation %(uuid)s could not be found.")
class OverQuota(NovaException):
msg_fmt = _("Quota exceeded for resources: %(overs)s")
class SecurityGroupNotFound(NotFound):
msg_fmt = _("Security group %(security_group_id)s not found.")
class SecurityGroupNotFoundForProject(SecurityGroupNotFound):
msg_fmt = _("Security group %(security_group_id)s not found "
"for project %(project_id)s.")
class SecurityGroupNotFoundForRule(SecurityGroupNotFound):
msg_fmt = _("Security group with rule %(rule_id)s not found.")
class SecurityGroupExists(Invalid):
ec2_code = 'InvalidGroup.Duplicate'
msg_fmt = _("Security group %(security_group_name)s already exists "
"for project %(project_id)s.")
class SecurityGroupExistsForInstance(Invalid):
msg_fmt = _("Security group %(security_group_id)s is already associated"
" with the instance %(instance_id)s")
class SecurityGroupNotExistsForInstance(Invalid):
msg_fmt = _("Security group %(security_group_id)s is not associated with"
" the instance %(instance_id)s")
class SecurityGroupDefaultRuleNotFound(Invalid):
msg_fmt = _("Security group default rule (%rule_id)s not found.")
class SecurityGroupCannotBeApplied(Invalid):
msg_fmt = _("Network requires port_security_enabled and subnet associated"
" in order to apply security groups.")
class SecurityGroupRuleExists(Invalid):
ec2_code = 'InvalidPermission.Duplicate'
msg_fmt = _("Rule already exists in group: %(rule)s")
class NoUniqueMatch(NovaException):
msg_fmt = _("No Unique Match Found.")
code = 409
class MigrationNotFound(NotFound):
msg_fmt = _("Migration %(migration_id)s could not be found.")
class MigrationNotFoundByStatus(MigrationNotFound):
msg_fmt = _("Migration not found for instance %(instance_id)s "
"with status %(status)s.")
class ConsolePoolNotFound(NotFound):
msg_fmt = _("Console pool %(pool_id)s could not be found.")
class ConsolePoolExists(NovaException):
msg_fmt = _("Console pool with host %(host)s, console_type "
"%(console_type)s and compute_host %(compute_host)s "
"already exists.")
class ConsolePoolNotFoundForHostType(NotFound):
msg_fmt = _("Console pool of type %(console_type)s "
"for compute host %(compute_host)s "
"on proxy host %(host)s not found.")
class ConsoleNotFound(NotFound):
msg_fmt = _("Console %(console_id)s could not be found.")
class ConsoleNotFoundForInstance(ConsoleNotFound):
msg_fmt = _("Console for instance %(instance_uuid)s could not be found.")
class ConsoleNotFoundInPoolForInstance(ConsoleNotFound):
msg_fmt = _("Console for instance %(instance_uuid)s "
"in pool %(pool_id)s could not be found.")
class ConsoleTypeInvalid(Invalid):
msg_fmt = _("Invalid console type %(console_type)s")
class ConsoleTypeUnavailable(Invalid):
msg_fmt = _("Unavailable console type %(console_type)s.")
class ConsolePortRangeExhausted(NovaException):
msg_fmt = _("The console port range %(min_port)d-%(max_port)d is "
"exhausted.")
class FlavorNotFound(NotFound):
msg_fmt = _("Flavor %(flavor_id)s could not be found.")
class FlavorNotFoundByName(FlavorNotFound):
msg_fmt = _("Flavor with name %(flavor_name)s could not be found.")
class FlavorAccessNotFound(NotFound):
msg_fmt = _("Flavor access not found for %(flavor_id)s / "
"%(project_id)s combination.")
class FlavorExtraSpecUpdateCreateFailed(NovaException):
msg_fmt = _("Flavor %(id)d extra spec cannot be updated or created "
"after %(retries)d retries.")
class CellNotFound(NotFound):
msg_fmt = _("Cell %(cell_name)s doesn't exist.")
class CellExists(NovaException):
msg_fmt = _("Cell with name %(name)s already exists.")
class CellRoutingInconsistency(NovaException):
msg_fmt = _("Inconsistency in cell routing: %(reason)s")
class CellServiceAPIMethodNotFound(NotFound):
msg_fmt = _("Service API method not found: %(detail)s")
class CellTimeout(NotFound):
msg_fmt = _("Timeout waiting for response from cell")
class CellMaxHopCountReached(NovaException):
msg_fmt = _("Cell message has reached maximum hop count: %(hop_count)s")
class NoCellsAvailable(NovaException):
msg_fmt = _("No cells available matching scheduling criteria.")
class CellsUpdateUnsupported(NovaException):
msg_fmt = _("Cannot update cells configuration file.")
class InstanceUnknownCell(NotFound):
msg_fmt = _("Cell is not known for instance %(instance_uuid)s")
class SchedulerHostFilterNotFound(NotFound):
msg_fmt = _("Scheduler Host Filter %(filter_name)s could not be found.")
class FlavorExtraSpecsNotFound(NotFound):
msg_fmt = _("Flavor %(flavor_id)s has no extra specs with "
"key %(extra_specs_key)s.")
class ComputeHostMetricNotFound(NotFound):
msg_fmt = _("Metric %(name)s could not be found on the compute "
"host node %(host)s.%(node)s.")
class FileNotFound(NotFound):
msg_fmt = _("File %(file_path)s could not be found.")
class SwitchNotFoundForNetworkAdapter(NotFound):
msg_fmt = _("Virtual switch associated with the "
"network adapter %(adapter)s not found.")
class NetworkAdapterNotFound(NotFound):
msg_fmt = _("Network adapter %(adapter)s could not be found.")
class ClassNotFound(NotFound):
msg_fmt = _("Class %(class_name)s could not be found: %(exception)s")
class InstanceTagNotFound(NotFound):
msg_fmt = _("Instance %(instance_id)s has no tag '%(tag)s'")
class RotationRequiredForBackup(NovaException):
msg_fmt = _("Rotation param is required for backup image_type")
class KeyPairExists(NovaException):
ec2_code = 'InvalidKeyPair.Duplicate'
msg_fmt = _("Key pair '%(key_name)s' already exists.")
class InstanceExists(NovaException):
msg_fmt = _("Instance %(name)s already exists.")
class FlavorExists(NovaException):
msg_fmt = _("Flavor with name %(name)s already exists.")
class FlavorIdExists(NovaException):
msg_fmt = _("Flavor with ID %(flavor_id)s already exists.")
class FlavorAccessExists(NovaException):
msg_fmt = _("Flavor access already exists for flavor %(flavor_id)s "
"and project %(project_id)s combination.")
class InvalidSharedStorage(NovaException):
msg_fmt = _("%(path)s is not on shared storage: %(reason)s")
class InvalidLocalStorage(NovaException):
msg_fmt = _("%(path)s is not on local storage: %(reason)s")
class StorageError(NovaException):
msg_fmt = _("Storage error: %(reason)s")
class MigrationError(NovaException):
msg_fmt = _("Migration error: %(reason)s")
class MigrationPreCheckError(MigrationError):
msg_fmt = _("Migration pre-check error: %(reason)s")
class MalformedRequestBody(NovaException):
msg_fmt = _("Malformed message body: %(reason)s")
# NOTE(johannes): NotFound should only be used when a 404 error is
# appropriate to be returned
class ConfigNotFound(NovaException):
msg_fmt = _("Could not find config at %(path)s")
class PasteAppNotFound(NovaException):
msg_fmt = _("Could not load paste app '%(name)s' from %(path)s")
class CannotResizeToSameFlavor(NovaException):
msg_fmt = _("When resizing, instances must change flavor!")
class ResizeError(NovaException):
msg_fmt = _("Resize error: %(reason)s")
class CannotResizeDisk(NovaException):
msg_fmt = _("Server disk was unable to be resized because: %(reason)s")
class FlavorMemoryTooSmall(NovaException):
msg_fmt = _("Flavor's memory is too small for requested image.")
class FlavorDiskTooSmall(NovaException):
msg_fmt = _("The created instance's disk would be too small.")
class FlavorDiskSmallerThanImage(FlavorDiskTooSmall):
msg_fmt = _("Flavor's disk is too small for requested image. Flavor disk "
"is %(flavor_size)i bytes, image is %(image_size)i bytes.")
class FlavorDiskSmallerThanMinDisk(FlavorDiskTooSmall):
msg_fmt = _("Flavor's disk is smaller than the minimum size specified in "
"image metadata. Flavor disk is %(flavor_size)i bytes, "
"minimum size is %(image_min_disk)i bytes.")
class VolumeSmallerThanMinDisk(FlavorDiskTooSmall):
msg_fmt = _("Volume is smaller than the minimum size specified in image "
"metadata. Volume size is %(volume_size)i bytes, minimum "
"size is %(image_min_disk)i bytes.")
class InsufficientFreeMemory(NovaException):
msg_fmt = _("Insufficient free memory on compute node to start %(uuid)s.")
class NoValidHost(NovaException):
msg_fmt = _("No valid host was found. %(reason)s")
class MaxRetriesExceeded(NoValidHost):
msg_fmt = _("Exceeded maximum number of retries. %(reason)s")
class QuotaError(NovaException):
ec2_code = 'ResourceLimitExceeded'
msg_fmt = _("Quota exceeded: code=%(code)s")
# NOTE(cyeoh): 413 should only be used for the ec2 API
# The error status code for out of quota for the nova api should be
# 403 Forbidden.
code = 413
headers = {'Retry-After': 0}
safe = True
class TooManyInstances(QuotaError):
msg_fmt = _("Quota exceeded for %(overs)s: Requested %(req)s,"
" but already used %(used)s of %(allowed)s %(overs)s")
class FloatingIpLimitExceeded(QuotaError):
msg_fmt = _("Maximum number of floating ips exceeded")
class FixedIpLimitExceeded(QuotaError):
msg_fmt = _("Maximum number of fixed ips exceeded")
class MetadataLimitExceeded(QuotaError):
msg_fmt = _("Maximum number of metadata items exceeds %(allowed)d")
class OnsetFileLimitExceeded(QuotaError):
msg_fmt = _("Personality file limit exceeded")
class OnsetFilePathLimitExceeded(OnsetFileLimitExceeded):
msg_fmt = _("Personality file path too long")
class OnsetFileContentLimitExceeded(OnsetFileLimitExceeded):
msg_fmt = _("Personality file content too long")
class KeypairLimitExceeded(QuotaError):
msg_fmt = _("Maximum number of key pairs exceeded")
class SecurityGroupLimitExceeded(QuotaError):
ec2_code = 'SecurityGroupLimitExceeded'
msg_fmt = _("Maximum number of security groups or rules exceeded")
class PortLimitExceeded(QuotaError):
msg_fmt = _("Maximum number of ports exceeded")
class AggregateError(NovaException):
msg_fmt = _("Aggregate %(aggregate_id)s: action '%(action)s' "
"caused an error: %(reason)s.")
class AggregateNotFound(NotFound):
msg_fmt = _("Aggregate %(aggregate_id)s could not be found.")
class AggregateNameExists(NovaException):
msg_fmt = _("Aggregate %(aggregate_name)s already exists.")
class AggregateHostNotFound(NotFound):
msg_fmt = _("Aggregate %(aggregate_id)s has no host %(host)s.")
class AggregateMetadataNotFound(NotFound):
msg_fmt = _("Aggregate %(aggregate_id)s has no metadata with "
"key %(metadata_key)s.")
class AggregateHostExists(NovaException):
msg_fmt = _("Aggregate %(aggregate_id)s already has host %(host)s.")
class FlavorCreateFailed(NovaException):
msg_fmt = _("Unable to create flavor")
class InstancePasswordSetFailed(NovaException):
msg_fmt = _("Failed to set admin password on %(instance)s "
"because %(reason)s")
safe = True
class InstanceNotFound(NotFound):
ec2_code = 'InvalidInstanceID.NotFound'
msg_fmt = _("Instance %(instance_id)s could not be found.")
class InstanceInfoCacheNotFound(NotFound):
msg_fmt = _("Info cache for instance %(instance_uuid)s could not be "
"found.")
class InvalidAssociation(NotFound):
ec2_code = 'InvalidAssociationID.NotFound'
msg_fmt = _("Invalid association.")
class MarkerNotFound(NotFound):
msg_fmt = _("Marker %(marker)s could not be found.")
class InvalidInstanceIDMalformed(Invalid):
msg_fmt = _("Invalid id: %(instance_id)s (expecting \"i-...\")")
ec2_code = 'InvalidInstanceID.Malformed'
class InvalidVolumeIDMalformed(Invalid):
msg_fmt = _("Invalid id: %(volume_id)s (expecting \"i-...\")")
ec2_code = 'InvalidVolumeID.Malformed'
class CouldNotFetchImage(NovaException):
msg_fmt = _("Could not fetch image %(image_id)s")
class CouldNotUploadImage(NovaException):
msg_fmt = _("Could not upload image %(image_id)s")
class TaskAlreadyRunning(NovaException):
msg_fmt = _("Task %(task_name)s is already running on host %(host)s")
class TaskNotRunning(NovaException):
msg_fmt = _("Task %(task_name)s is not running on host %(host)s")
class InstanceIsLocked(InstanceInvalidState):
msg_fmt = _("Instance %(instance_uuid)s is locked")
class ConfigDriveInvalidValue(Invalid):
msg_fmt = _("Invalid value for Config Drive option: %(option)s")
class ConfigDriveMountFailed(NovaException):
msg_fmt = _("Could not mount vfat config drive. %(operation)s failed. "
"Error: %(error)s")
class ConfigDriveUnknownFormat(NovaException):
msg_fmt = _("Unknown config drive format %(format)s. Select one of "
"iso9660 or vfat.")
class InterfaceAttachFailed(Invalid):
msg_fmt = _("Failed to attach network adapter device to "
"%(instance_uuid)s")
class InterfaceDetachFailed(Invalid):
msg_fmt = _("Failed to detach network adapter device from "
"%(instance_uuid)s")
class InstanceUserDataTooLarge(NovaException):
msg_fmt = _("User data too large. User data must be no larger than "
"%(maxsize)s bytes once base64 encoded. Your data is "
"%(length)d bytes")
class InstanceUserDataMalformed(NovaException):
msg_fmt = _("User data needs to be valid base 64.")
class InstanceUpdateConflict(NovaException):
msg_fmt = _("Conflict updating instance %(instance_uuid)s. "
"Expected: %(expected)s. Actual: %(actual)s")
class UnknownInstanceUpdateConflict(InstanceUpdateConflict):
msg_fmt = _("Conflict updating instance %(instance_uuid)s, but we were "
"unable to determine the cause")
class UnexpectedTaskStateError(InstanceUpdateConflict):
pass
class UnexpectedDeletingTaskStateError(UnexpectedTaskStateError):
pass
class InstanceActionNotFound(NovaException):
msg_fmt = _("Action for request_id %(request_id)s on instance"
" %(instance_uuid)s not found")
class InstanceActionEventNotFound(NovaException):
msg_fmt = _("Event %(event)s not found for action id %(action_id)s")
class CryptoCAFileNotFound(FileNotFound):
msg_fmt = _("The CA file for %(project)s could not be found")
class CryptoCRLFileNotFound(FileNotFound):
msg_fmt = _("The CRL file for %(project)s could not be found")
class InstanceRecreateNotSupported(Invalid):
msg_fmt = _('Instance recreate is not supported.')
class ServiceGroupUnavailable(NovaException):
msg_fmt = _("The service from servicegroup driver %(driver)s is "
"temporarily unavailable.")
class DBNotAllowed(NovaException):
msg_fmt = _('%(binary)s attempted direct database access which is '
'not allowed by policy')
class UnsupportedVirtType(Invalid):
msg_fmt = _("Virtualization type '%(virt)s' is not supported by "
"this compute driver")
class UnsupportedHardware(Invalid):
msg_fmt = _("Requested hardware '%(model)s' is not supported by "
"the '%(virt)s' virt driver")
class Base64Exception(NovaException):
msg_fmt = _("Invalid Base 64 data for file %(path)s")
class BuildAbortException(NovaException):
msg_fmt = _("Build of instance %(instance_uuid)s aborted: %(reason)s")
class RescheduledException(NovaException):
msg_fmt = _("Build of instance %(instance_uuid)s was re-scheduled: "
"%(reason)s")
class ShadowTableExists(NovaException):
msg_fmt = _("Shadow table with name %(name)s already exists.")
class InstanceFaultRollback(NovaException):
def __init__(self, inner_exception=None):
message = _("Instance rollback performed due to: %s")
self.inner_exception = inner_exception
super(InstanceFaultRollback, self).__init__(message % inner_exception)
class OrphanedObjectError(NovaException):
msg_fmt = _('Cannot call %(method)s on orphaned %(objtype)s object')
class ObjectActionError(NovaException):
msg_fmt = _('Object action %(action)s failed because: %(reason)s')
class CoreAPIMissing(NovaException):
msg_fmt = _("Core API extensions are missing: %(missing_apis)s")
class AgentError(NovaException):
msg_fmt = _('Error during following call to agent: %(method)s')
class AgentTimeout(AgentError):
msg_fmt = _('Unable to contact guest agent. '
'The following call timed out: %(method)s')
class AgentNotImplemented(AgentError):
msg_fmt = _('Agent does not support the call: %(method)s')
class InstanceGroupNotFound(NotFound):
msg_fmt = _("Instance group %(group_uuid)s could not be found.")
class InstanceGroupIdExists(NovaException):
msg_fmt = _("Instance group %(group_uuid)s already exists.")
class InstanceGroupMemberNotFound(NotFound):
msg_fmt = _("Instance group %(group_uuid)s has no member with "
"id %(instance_id)s.")
class InstanceGroupPolicyNotFound(NotFound):
msg_fmt = _("Instance group %(group_uuid)s has no policy %(policy)s.")
class InstanceGroupSaveException(NovaException):
msg_fmt = _("%(field)s should not be part of the updates.")
class PluginRetriesExceeded(NovaException):
msg_fmt = _("Number of retries to plugin (%(num_retries)d) exceeded.")
class ImageDownloadModuleError(NovaException):
msg_fmt = _("There was an error with the download module %(module)s. "
"%(reason)s")
class ImageDownloadModuleMetaDataError(ImageDownloadModuleError):
msg_fmt = _("The metadata for this location will not work with this "
"module %(module)s. %(reason)s.")
class ImageDownloadModuleNotImplementedError(ImageDownloadModuleError):
msg_fmt = _("The method %(method_name)s is not implemented.")
class ImageDownloadModuleConfigurationError(ImageDownloadModuleError):
msg_fmt = _("The module %(module)s is misconfigured: %(reason)s.")
class ResourceMonitorError(NovaException):
msg_fmt = _("Error when creating resource monitor: %(monitor)s")
class PciDeviceWrongAddressFormat(NovaException):
msg_fmt = _("The PCI address %(address)s has an incorrect format.")
class PciDeviceInvalidAddressField(NovaException):
msg_fmt = _("Invalid PCI Whitelist: "
"The PCI address %(address)s has an invalid %(field)s.")
class PciDeviceInvalidDeviceName(NovaException):
msg_fmt = _("Invalid PCI Whitelist: "
"The PCI whitelist can specify devname or address,"
" but not both")
class PciDeviceNotFoundById(NotFound):
msg_fmt = _("PCI device %(id)s not found")
class PciDeviceNotFound(NotFound):
msg_fmt = _("PCI Device %(node_id)s:%(address)s not found.")
class PciDeviceInvalidStatus(Invalid):
msg_fmt = _(
"PCI device %(compute_node_id)s:%(address)s is %(status)s "
"instead of %(hopestatus)s")
class PciDeviceInvalidOwner(Invalid):
msg_fmt = _(
"PCI device %(compute_node_id)s:%(address)s is owned by %(owner)s "
"instead of %(hopeowner)s")
class PciDeviceRequestFailed(NovaException):
msg_fmt = _(
"PCI device request (%requests)s failed")
class PciDevicePoolEmpty(NovaException):
msg_fmt = _(
"Attempt to consume PCI device %(compute_node_id)s:%(address)s "
"from empty pool")
class PciInvalidAlias(Invalid):
msg_fmt = _("Invalid PCI alias definition: %(reason)s")
class PciRequestAliasNotDefined(NovaException):
msg_fmt = _("PCI alias %(alias)s is not defined")
class MissingParameter(NovaException):
ec2_code = 'MissingParameter'
msg_fmt = _("Not enough parameters: %(reason)s")
code = 400
class PciConfigInvalidWhitelist(Invalid):
msg_fmt = _("Invalid PCI devices Whitelist config %(reason)s")
# Cannot be templated, msg needs to be constructed when raised.
class InternalError(NovaException):
ec2_code = 'InternalError'
msg_fmt = "%(err)s"
class PciDevicePrepareFailed(NovaException):
msg_fmt = _("Failed to prepare PCI device %(id)s for instance "
"%(instance_uuid)s: %(reason)s")
class PciDeviceDetachFailed(NovaException):
msg_fmt = _("Failed to detach PCI device %(dev)s: %(reason)s")
class PciDeviceUnsupportedHypervisor(NovaException):
msg_fmt = _("%(type)s hypervisor does not support PCI devices")
class KeyManagerError(NovaException):
msg_fmt = _("Key manager error: %(reason)s")
class VolumesNotRemoved(Invalid):
msg_fmt = _("Failed to remove volume(s): (%(reason)s)")
class InvalidVideoMode(Invalid):
msg_fmt = _("Provided video model (%(model)s) is not supported.")
class RngDeviceNotExist(Invalid):
msg_fmt = _("The provided RNG device path: (%(path)s) is not "
"present on the host.")
class RequestedVRamTooHigh(NovaException):
msg_fmt = _("The requested amount of video memory %(req_vram)d is higher "
"than the maximum allowed by flavor %(max_vram)d.")
class InvalidWatchdogAction(Invalid):
msg_fmt = _("Provided watchdog action (%(action)s) is not supported.")
class NoLiveMigrationForConfigDriveInLibVirt(NovaException):
msg_fmt = _("Live migration of instances with config drives is not "
"supported in libvirt unless libvirt instance path and "
"drive data is shared across compute nodes.")
class LiveMigrationWithOldNovaNotSafe(NovaException):
msg_fmt = _("Host %(server)s is running an old version of Nova, "
"live migrations involving that version may cause data loss. "
"Upgrade Nova on %(server)s and try again.")
class UnshelveException(NovaException):
msg_fmt = _("Error during unshelve instance %(instance_id)s: %(reason)s")
class ImageVCPULimitsRangeExceeded(Invalid):
msg_fmt = _("Image vCPU limits %(sockets)d:%(cores)d:%(threads)d "
"exceeds permitted %(maxsockets)d:%(maxcores)d:%(maxthreads)d")
class ImageVCPUTopologyRangeExceeded(Invalid):
msg_fmt = _("Image vCPU topology %(sockets)d:%(cores)d:%(threads)d "
"exceeds permitted %(maxsockets)d:%(maxcores)d:%(maxthreads)d")
class ImageVCPULimitsRangeImpossible(Invalid):
msg_fmt = _("Requested vCPU limits %(sockets)d:%(cores)d:%(threads)d "
"are impossible to satisfy for vcpus count %(vcpus)d")
class InvalidArchitectureName(Invalid):
msg_fmt = _("Architecture name '%(arch)s' is not recognised")
class ImageNUMATopologyIncomplete(Invalid):
msg_fmt = _("CPU and memory allocation must be provided for all "
"NUMA nodes")
class ImageNUMATopologyForbidden(Forbidden):
msg_fmt = _("Image property '%(name)s' is not permitted to override "
"NUMA configuration set against the flavor")
class ImageNUMATopologyAsymmetric(Invalid):
msg_fmt = _("Asymmetric NUMA topologies require explicit assignment "
"of CPUs and memory to nodes in image or flavor")
class ImageNUMATopologyCPUOutOfRange(Invalid):
msg_fmt = _("CPU number %(cpunum)d is larger than max %(cpumax)d")
class ImageNUMATopologyCPUDuplicates(Invalid):
msg_fmt = _("CPU number %(cpunum)d is assigned to two nodes")
class ImageNUMATopologyCPUsUnassigned(Invalid):
msg_fmt = _("CPU number %(cpuset)s is not assigned to any node")
class ImageNUMATopologyMemoryOutOfRange(Invalid):
msg_fmt = _("%(memsize)d MB of memory assigned, but expected "
"%(memtotal)d MB")
class InvalidHostname(Invalid):
msg_fmt = _("Invalid characters in hostname '%(hostname)s'")
class NumaTopologyNotFound(NotFound):
msg_fmt = _("Instance %(instance_uuid)s does not specify a NUMA topology")
class MigrationContextNotFound(NotFound):
msg_fmt = _("Instance %(instance_uuid)s does not specify a migration "
"context.")
class SocketPortRangeExhaustedException(NovaException):
msg_fmt = _("Not able to acquire a free port for %(host)s")
class SocketPortInUseException(NovaException):
msg_fmt = _("Not able to bind %(host)s:%(port)d, %(error)s")
class ImageSerialPortNumberInvalid(Invalid):
msg_fmt = _("Number of serial ports '%(num_ports)s' specified in "
"'%(property)s' isn't valid.")
class ImageSerialPortNumberExceedFlavorValue(Invalid):
msg_fmt = _("Forbidden to exceed flavor value of number of serial "
"ports passed in image meta.")
class InvalidImageConfigDrive(Invalid):
msg_fmt = _("Image's config drive option '%(config_drive)s' is invalid")
class InvalidHypervisorVirtType(Invalid):
msg_fmt = _("Hypervisor virtualization type '%(hv_type)s' is not "
"recognised")
class InvalidVirtualMachineMode(Invalid):
msg_fmt = _("Virtual machine mode '%(vmmode)s' is not recognised")
class InvalidToken(Invalid):
msg_fmt = _("The token '%(token)s' is invalid or has expired")
class InvalidConnectionInfo(Invalid):
msg_fmt = _("Invalid Connection Info")
class InstanceQuiesceNotSupported(Invalid):
msg_fmt = _('Quiescing is not supported in instance %(instance_id)s')
class QemuGuestAgentNotEnabled(Invalid):
msg_fmt = _('QEMU guest agent is not enabled')
class SetAdminPasswdNotSupported(Invalid):
msg_fmt = _('Set admin password is not supported')
class MemoryPageSizeInvalid(Invalid):
msg_fmt = _("Invalid memory page size '%(pagesize)s'")
class MemoryPageSizeForbidden(Invalid):
msg_fmt = _("Page size %(pagesize)s forbidden against '%(against)s'")
class MemoryPageSizeNotSupported(Invalid):
msg_fmt = _("Page size %(pagesize)s is not supported by the host.")
class CPUPinningNotSupported(Invalid):
msg_fmt = _("CPU pinning is not supported by the host: "
"%(reason)s")
class CPUPinningInvalid(Invalid):
msg_fmt = _("Cannot pin/unpin cpus %(requested)s from the following "
"pinned set %(pinned)s")
class CPUPinningUnknown(Invalid):
msg_fmt = _("CPU set to pin/unpin %(requested)s must be a subset of "
"known CPU set %(cpuset)s")
class ImageCPUPinningForbidden(Forbidden):
msg_fmt = _("Image property 'hw_cpu_policy' is not permitted to override "
"CPU pinning policy set against the flavor")
class UnsupportedPolicyException(Invalid):
msg_fmt = _("ServerGroup policy is not supported: %(reason)s")
class CellMappingNotFound(NotFound):
msg_fmt = _("Cell %(uuid)s has no mapping.")
class NUMATopologyUnsupported(Invalid):
msg_fmt = _("Host does not support guests with NUMA topology set")
class MemoryPagesUnsupported(Invalid):
msg_fmt = _("Host does not support guests with custom memory page sizes")
class EnumFieldInvalid(Invalid):
msg_fmt = _('%(typename)s in %(fieldname)s is not an instance of Enum')
class EnumFieldUnset(Invalid):
msg_fmt = _('%(fieldname)s missing field type')
class InvalidImageFormat(Invalid):
msg_fmt = _("Invalid image format '%(format)s'")
class UnsupportedImageModel(Invalid):
msg_fmt = _("Image model '%(image)s' is not supported")
class HostMappingNotFound(Invalid):
msg_fmt = _("Host '%(name)s' is not mapped to any cell")
| apache-2.0 |
dpmatthews/rose | metomi/rose/apps/rose_prune.py | 4 | 11898 | # -*- coding: utf-8 -*-
# ----------------------------------------------------------------------------
# Copyright (C) 2012-2019 British Crown (Met Office) & Contributors.
#
# This file is part of Rose, a framework for meteorological suites.
#
# Rose is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Rose is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Rose. If not, see <http://www.gnu.org/licenses/>.
# ----------------------------------------------------------------------------
"""Builtin application: rose_prune: suite housekeeping application."""
import os
from random import shuffle
from metomi.rose.app_run import BuiltinApp, ConfigValueError
from metomi.rose.date import RoseDateTimeOperator
from metomi.rose.env import env_var_process, UnboundEnvironmentVariableError
from metomi.rose.fs_util import FileSystemEvent
from metomi.rose.host_select import HostSelector
from metomi.rose.popen import RosePopenError
import shlex
class RosePruneApp(BuiltinApp):
"""Prune files and directories generated by suite tasks."""
SCHEME = "rose_prune"
SECTION = "prune"
def run(self, app_runner, conf_tree, opts, args, uuid, work_files):
"""Suite housekeeping application.
This application is designed to work under "rose task-run" in a cycling
suite.
"""
suite_name = os.getenv("ROSE_SUITE_NAME")
if not suite_name:
return
# Tar-gzip job logs on suite host
# Prune job logs on remote hosts and suite host
prune_remote_logs_cycles = self._get_conf(
app_runner, conf_tree, "prune-remote-logs-at")
prune_server_logs_cycles = self._get_conf(
app_runner, conf_tree, "prune-server-logs-at")
archive_logs_cycles = self._get_conf(
app_runner, conf_tree, "archive-logs-at")
if (prune_remote_logs_cycles or
prune_server_logs_cycles or
archive_logs_cycles):
tmp_prune_remote_logs_cycles = []
for cycle in prune_remote_logs_cycles:
if cycle not in archive_logs_cycles:
tmp_prune_remote_logs_cycles.append(cycle)
prune_remote_logs_cycles = tmp_prune_remote_logs_cycles
tmp_prune_server_logs_cycles = []
for cycle in prune_server_logs_cycles:
if cycle not in archive_logs_cycles:
tmp_prune_server_logs_cycles.append(cycle)
prune_server_logs_cycles = tmp_prune_server_logs_cycles
if prune_remote_logs_cycles:
app_runner.suite_engine_proc.job_logs_pull_remote(
suite_name, prune_remote_logs_cycles,
prune_remote_mode=True)
if prune_server_logs_cycles:
app_runner.suite_engine_proc.job_logs_remove_on_server(
suite_name, prune_server_logs_cycles)
if archive_logs_cycles:
app_runner.suite_engine_proc.job_logs_archive(
suite_name, archive_logs_cycles)
# Prune other directories
globs, cycle_set = self._get_prune_globs(app_runner, conf_tree)
if not globs:
return
suite_engine_proc = app_runner.suite_engine_proc
hosts = suite_engine_proc.get_suite_jobs_auths(
suite_name, [(cycle, None) for cycle in cycle_set])
# A shuffle here should allow the load for doing "rm -rf" to be shared
# between job hosts who share a file system.
shuffle(hosts)
suite_dir_rel = suite_engine_proc.get_suite_dir_rel(suite_name)
form_dict = {"d": suite_dir_rel, "g": " ".join(globs)}
sh_cmd_head = r"set -e; cd %(d)s; " % form_dict
sh_cmd = (
r"set +e; ls -d %(g)s; " +
r"set -e; rm -fr %(g)s") % form_dict
cwd = os.getcwd()
host_selector = HostSelector(
app_runner.event_handler, app_runner.popen)
for host in hosts + [host_selector.get_local_host()]:
sdir = None
try:
if host_selector.is_local_host(host):
sdir = suite_engine_proc.get_suite_dir(suite_name)
app_runner.fs_util.chdir(sdir)
out = app_runner.popen.run_ok(
"bash", "-O", "extglob", "-c", sh_cmd)[0]
else:
cmd = app_runner.popen.get_cmd(
"ssh", host,
"bash -O extglob -c '" + sh_cmd_head + sh_cmd + "'")
out = app_runner.popen.run_ok(*cmd)[0]
except RosePopenError as exc:
app_runner.handle_event(exc)
else:
if sdir is None:
event = FileSystemEvent(FileSystemEvent.CHDIR,
host + ":" + suite_dir_rel)
app_runner.handle_event(event)
for line in sorted(out.splitlines()):
if not host_selector.is_local_host(host):
line = host + ":" + line
event = FileSystemEvent(
FileSystemEvent.DELETE, line.decode())
app_runner.handle_event(event)
finally:
if sdir:
app_runner.fs_util.chdir(cwd)
return
def _get_conf(self, app_runner, conf_tree, key, max_args=0):
"""Get a list of cycles from a configuration setting.
key -- An option key in self.SECTION to locate the setting.
max_args -- Maximum number of extra arguments for an item in the list.
The value of the setting is expected to be split by shlex.split into a
list of items. If max_args == 0, an item should be a string
representing a cycle or an cycle offset. If max_args > 0, the cycle
or cycle offset string can, optionally, have arguments. The arguments
are delimited by colons ":".
E.g.:
prune-remote-logs-at=-PT6H -PT12H
prune-server-logs-at=-P7D
prune-datac-at=-PT6H:foo/* -PT12H:'bar/* baz/*' -P1D
prune-work-at=-PT6H:t1*:*.tar -PT12H:t1*: -PT12H:*.gz -P1D
If max_args == 0, return a list of cycles.
If max_args > 0, return a list of (cycle, [arg, ...])
"""
items_str = conf_tree.node.get_value([self.SECTION, key])
if items_str is None:
return []
try:
items_str = env_var_process(items_str)
except UnboundEnvironmentVariableError as exc:
raise ConfigValueError([self.SECTION, key], items_str, exc)
items = []
ref_point_str = os.getenv(
RoseDateTimeOperator.TASK_CYCLE_TIME_ENV)
try:
ref_point = None
ref_fmt = None
for item_str in shlex.split(items_str):
args = item_str.split(":", max_args)
when = args.pop(0)
cycle = when
if ref_point_str is not None:
if self._get_cycling_mode() == "integer":
# Integer cycling
if "P" in when: # "when" is an offset
cycle = str(int(ref_point_str) +
int(when.replace("P", "")))
else: # "when" is a cycle point
cycle = str(when)
else:
# Date-time cycling
if ref_fmt is None:
ref_point, ref_fmt = (
app_runner.date_time_oper.date_parse(
ref_point_str))
try:
time_point = app_runner.date_time_oper.date_parse(
when)[0]
except ValueError:
time_point = app_runner.date_time_oper.date_shift(
ref_point, when)
cycle = app_runner.date_time_oper.date_format(
ref_fmt, time_point)
if max_args:
items.append((cycle, args))
else:
items.append(cycle)
except ValueError as exc:
raise ConfigValueError([self.SECTION, key], items_str, exc)
return items
@classmethod
def _get_cycling_mode(cls):
"""Return task cycling mode."""
return os.getenv("ROSE_CYCLING_MODE")
def _get_prune_globs(self, app_runner, conf_tree):
"""Return (globs, cycles).
where:
* globs is for matching items to prune.
* cycles is a set of relevant cycles.
"""
globs = []
nodes = conf_tree.node.get_value([self.SECTION])
if nodes is None:
return [], set()
cycle_formats = {}
for key, node in nodes.items():
if node.is_ignored():
continue
if key.startswith("cycle-format{") and key.endswith("}"):
fmt = key[len("cycle-format{"):-1]
try:
cycle_formats[fmt] = env_var_process(node.value)
# Check formats are valid
if self._get_cycling_mode() == "integer":
cycle_formats[fmt] % 0
else:
app_runner.date_time_oper.date_format(
cycle_formats[fmt])
except (UnboundEnvironmentVariableError, ValueError) as exc:
raise ConfigValueError(
[self.SECTION, key], node.value, exc)
cycle_set = set()
for key, node in sorted(nodes.items()):
if node.is_ignored():
continue
if key == "prune-datac-at": # backward compat
head = "share/cycle"
elif key == "prune-work-at": # backward compat
head = "work"
elif key.startswith("prune{") and key.endswith("}"):
head = key[len("prune{"):-1].strip() # remove "prune{" and "}"
else:
continue
for cycle, cycle_args in self._get_conf(
app_runner, conf_tree, key, max_args=1):
cycle_set.add(cycle)
if cycle_args:
cycle_strs = {"cycle": cycle}
for cycle_key, cycle_format in cycle_formats.items():
if self._get_cycling_mode() == "integer":
cycle_strs[cycle_key] = cycle_format % int(cycle)
else: # date time cycling
cycle_point = (
app_runner.date_time_oper.date_parse(cycle)[0])
cycle_strs[cycle_key] = (
app_runner.date_time_oper.date_format(
cycle_format, cycle_point))
for tail_glob in shlex.split(cycle_args.pop()):
glob_ = tail_glob % cycle_strs
if glob_ == tail_glob: # no substitution
glob_ = os.path.join(cycle, tail_glob)
globs.append(os.path.join(head, glob_))
else:
globs.append(os.path.join(head, cycle))
return globs, cycle_set
| gpl-3.0 |
aapav01/SM-T530_Opensource_Compare | Documentation/networking/cxacru-cf.py | 14668 | 1626 | #!/usr/bin/env python
# Copyright 2009 Simon Arlott
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 59
# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Usage: cxacru-cf.py < cxacru-cf.bin
# Output: values string suitable for the sysfs adsl_config attribute
#
# Warning: cxacru-cf.bin with MD5 hash cdbac2689969d5ed5d4850f117702110
# contains mis-aligned values which will stop the modem from being able
# to make a connection. If the first and last two bytes are removed then
# the values become valid, but the modulation will be forced to ANSI
# T1.413 only which may not be appropriate.
#
# The original binary format is a packed list of le32 values.
import sys
import struct
i = 0
while True:
buf = sys.stdin.read(4)
if len(buf) == 0:
break
elif len(buf) != 4:
sys.stdout.write("\n")
sys.stderr.write("Error: read {0} not 4 bytes\n".format(len(buf)))
sys.exit(1)
if i > 0:
sys.stdout.write(" ")
sys.stdout.write("{0:x}={1}".format(i, struct.unpack("<I", buf)[0]))
i += 1
sys.stdout.write("\n")
| gpl-2.0 |
followloda/PornGuys | FlaskServer/venv/Lib/site-packages/pip/_vendor/requests/packages/urllib3/response.py | 360 | 18615 | from __future__ import absolute_import
from contextlib import contextmanager
import zlib
import io
from socket import timeout as SocketTimeout
from socket import error as SocketError
from ._collections import HTTPHeaderDict
from .exceptions import (
ProtocolError, DecodeError, ReadTimeoutError, ResponseNotChunked
)
from .packages.six import string_types as basestring, binary_type, PY3
from .packages.six.moves import http_client as httplib
from .connection import HTTPException, BaseSSLError
from .util.response import is_fp_closed, is_response_to_head
class DeflateDecoder(object):
def __init__(self):
self._first_try = True
self._data = binary_type()
self._obj = zlib.decompressobj()
def __getattr__(self, name):
return getattr(self._obj, name)
def decompress(self, data):
if not data:
return data
if not self._first_try:
return self._obj.decompress(data)
self._data += data
try:
return self._obj.decompress(data)
except zlib.error:
self._first_try = False
self._obj = zlib.decompressobj(-zlib.MAX_WBITS)
try:
return self.decompress(self._data)
finally:
self._data = None
class GzipDecoder(object):
def __init__(self):
self._obj = zlib.decompressobj(16 + zlib.MAX_WBITS)
def __getattr__(self, name):
return getattr(self._obj, name)
def decompress(self, data):
if not data:
return data
return self._obj.decompress(data)
def _get_decoder(mode):
if mode == 'gzip':
return GzipDecoder()
return DeflateDecoder()
class HTTPResponse(io.IOBase):
"""
HTTP Response container.
Backwards-compatible to httplib's HTTPResponse but the response ``body`` is
loaded and decoded on-demand when the ``data`` property is accessed. This
class is also compatible with the Python standard library's :mod:`io`
module, and can hence be treated as a readable object in the context of that
framework.
Extra parameters for behaviour not present in httplib.HTTPResponse:
:param preload_content:
If True, the response's body will be preloaded during construction.
:param decode_content:
If True, attempts to decode specific content-encoding's based on headers
(like 'gzip' and 'deflate') will be skipped and raw data will be used
instead.
:param original_response:
When this HTTPResponse wrapper is generated from an httplib.HTTPResponse
object, it's convenient to include the original for debug purposes. It's
otherwise unused.
"""
CONTENT_DECODERS = ['gzip', 'deflate']
REDIRECT_STATUSES = [301, 302, 303, 307, 308]
def __init__(self, body='', headers=None, status=0, version=0, reason=None,
strict=0, preload_content=True, decode_content=True,
original_response=None, pool=None, connection=None):
if isinstance(headers, HTTPHeaderDict):
self.headers = headers
else:
self.headers = HTTPHeaderDict(headers)
self.status = status
self.version = version
self.reason = reason
self.strict = strict
self.decode_content = decode_content
self._decoder = None
self._body = None
self._fp = None
self._original_response = original_response
self._fp_bytes_read = 0
if body and isinstance(body, (basestring, binary_type)):
self._body = body
self._pool = pool
self._connection = connection
if hasattr(body, 'read'):
self._fp = body
# Are we using the chunked-style of transfer encoding?
self.chunked = False
self.chunk_left = None
tr_enc = self.headers.get('transfer-encoding', '').lower()
# Don't incur the penalty of creating a list and then discarding it
encodings = (enc.strip() for enc in tr_enc.split(","))
if "chunked" in encodings:
self.chunked = True
# If requested, preload the body.
if preload_content and not self._body:
self._body = self.read(decode_content=decode_content)
def get_redirect_location(self):
"""
Should we redirect and where to?
:returns: Truthy redirect location string if we got a redirect status
code and valid location. ``None`` if redirect status and no
location. ``False`` if not a redirect status code.
"""
if self.status in self.REDIRECT_STATUSES:
return self.headers.get('location')
return False
def release_conn(self):
if not self._pool or not self._connection:
return
self._pool._put_conn(self._connection)
self._connection = None
@property
def data(self):
# For backwords-compat with earlier urllib3 0.4 and earlier.
if self._body:
return self._body
if self._fp:
return self.read(cache_content=True)
@property
def connection(self):
return self._connection
def tell(self):
"""
Obtain the number of bytes pulled over the wire so far. May differ from
the amount of content returned by :meth:``HTTPResponse.read`` if bytes
are encoded on the wire (e.g, compressed).
"""
return self._fp_bytes_read
def _init_decoder(self):
"""
Set-up the _decoder attribute if necessar.
"""
# Note: content-encoding value should be case-insensitive, per RFC 7230
# Section 3.2
content_encoding = self.headers.get('content-encoding', '').lower()
if self._decoder is None and content_encoding in self.CONTENT_DECODERS:
self._decoder = _get_decoder(content_encoding)
def _decode(self, data, decode_content, flush_decoder):
"""
Decode the data passed in and potentially flush the decoder.
"""
try:
if decode_content and self._decoder:
data = self._decoder.decompress(data)
except (IOError, zlib.error) as e:
content_encoding = self.headers.get('content-encoding', '').lower()
raise DecodeError(
"Received response with content-encoding: %s, but "
"failed to decode it." % content_encoding, e)
if flush_decoder and decode_content:
data += self._flush_decoder()
return data
def _flush_decoder(self):
"""
Flushes the decoder. Should only be called if the decoder is actually
being used.
"""
if self._decoder:
buf = self._decoder.decompress(b'')
return buf + self._decoder.flush()
return b''
@contextmanager
def _error_catcher(self):
"""
Catch low-level python exceptions, instead re-raising urllib3
variants, so that low-level exceptions are not leaked in the
high-level api.
On exit, release the connection back to the pool.
"""
clean_exit = False
try:
try:
yield
except SocketTimeout:
# FIXME: Ideally we'd like to include the url in the ReadTimeoutError but
# there is yet no clean way to get at it from this context.
raise ReadTimeoutError(self._pool, None, 'Read timed out.')
except BaseSSLError as e:
# FIXME: Is there a better way to differentiate between SSLErrors?
if 'read operation timed out' not in str(e): # Defensive:
# This shouldn't happen but just in case we're missing an edge
# case, let's avoid swallowing SSL errors.
raise
raise ReadTimeoutError(self._pool, None, 'Read timed out.')
except (HTTPException, SocketError) as e:
# This includes IncompleteRead.
raise ProtocolError('Connection broken: %r' % e, e)
# If no exception is thrown, we should avoid cleaning up
# unnecessarily.
clean_exit = True
finally:
# If we didn't terminate cleanly, we need to throw away our
# connection.
if not clean_exit:
# The response may not be closed but we're not going to use it
# anymore so close it now to ensure that the connection is
# released back to the pool.
if self._original_response:
self._original_response.close()
# Closing the response may not actually be sufficient to close
# everything, so if we have a hold of the connection close that
# too.
if self._connection:
self._connection.close()
# If we hold the original response but it's closed now, we should
# return the connection back to the pool.
if self._original_response and self._original_response.isclosed():
self.release_conn()
def read(self, amt=None, decode_content=None, cache_content=False):
"""
Similar to :meth:`httplib.HTTPResponse.read`, but with two additional
parameters: ``decode_content`` and ``cache_content``.
:param amt:
How much of the content to read. If specified, caching is skipped
because it doesn't make sense to cache partial content as the full
response.
:param decode_content:
If True, will attempt to decode the body based on the
'content-encoding' header.
:param cache_content:
If True, will save the returned data such that the same result is
returned despite of the state of the underlying file object. This
is useful if you want the ``.data`` property to continue working
after having ``.read()`` the file object. (Overridden if ``amt`` is
set.)
"""
self._init_decoder()
if decode_content is None:
decode_content = self.decode_content
if self._fp is None:
return
flush_decoder = False
data = None
with self._error_catcher():
if amt is None:
# cStringIO doesn't like amt=None
data = self._fp.read()
flush_decoder = True
else:
cache_content = False
data = self._fp.read(amt)
if amt != 0 and not data: # Platform-specific: Buggy versions of Python.
# Close the connection when no data is returned
#
# This is redundant to what httplib/http.client _should_
# already do. However, versions of python released before
# December 15, 2012 (http://bugs.python.org/issue16298) do
# not properly close the connection in all cases. There is
# no harm in redundantly calling close.
self._fp.close()
flush_decoder = True
if data:
self._fp_bytes_read += len(data)
data = self._decode(data, decode_content, flush_decoder)
if cache_content:
self._body = data
return data
def stream(self, amt=2**16, decode_content=None):
"""
A generator wrapper for the read() method. A call will block until
``amt`` bytes have been read from the connection or until the
connection is closed.
:param amt:
How much of the content to read. The generator will return up to
much data per iteration, but may return less. This is particularly
likely when using compressed data. However, the empty string will
never be returned.
:param decode_content:
If True, will attempt to decode the body based on the
'content-encoding' header.
"""
if self.chunked:
for line in self.read_chunked(amt, decode_content=decode_content):
yield line
else:
while not is_fp_closed(self._fp):
data = self.read(amt=amt, decode_content=decode_content)
if data:
yield data
@classmethod
def from_httplib(ResponseCls, r, **response_kw):
"""
Given an :class:`httplib.HTTPResponse` instance ``r``, return a
corresponding :class:`urllib3.response.HTTPResponse` object.
Remaining parameters are passed to the HTTPResponse constructor, along
with ``original_response=r``.
"""
headers = r.msg
if not isinstance(headers, HTTPHeaderDict):
if PY3: # Python 3
headers = HTTPHeaderDict(headers.items())
else: # Python 2
headers = HTTPHeaderDict.from_httplib(headers)
# HTTPResponse objects in Python 3 don't have a .strict attribute
strict = getattr(r, 'strict', 0)
resp = ResponseCls(body=r,
headers=headers,
status=r.status,
version=r.version,
reason=r.reason,
strict=strict,
original_response=r,
**response_kw)
return resp
# Backwards-compatibility methods for httplib.HTTPResponse
def getheaders(self):
return self.headers
def getheader(self, name, default=None):
return self.headers.get(name, default)
# Overrides from io.IOBase
def close(self):
if not self.closed:
self._fp.close()
if self._connection:
self._connection.close()
@property
def closed(self):
if self._fp is None:
return True
elif hasattr(self._fp, 'closed'):
return self._fp.closed
elif hasattr(self._fp, 'isclosed'): # Python 2
return self._fp.isclosed()
else:
return True
def fileno(self):
if self._fp is None:
raise IOError("HTTPResponse has no file to get a fileno from")
elif hasattr(self._fp, "fileno"):
return self._fp.fileno()
else:
raise IOError("The file-like object this HTTPResponse is wrapped "
"around has no file descriptor")
def flush(self):
if self._fp is not None and hasattr(self._fp, 'flush'):
return self._fp.flush()
def readable(self):
# This method is required for `io` module compatibility.
return True
def readinto(self, b):
# This method is required for `io` module compatibility.
temp = self.read(len(b))
if len(temp) == 0:
return 0
else:
b[:len(temp)] = temp
return len(temp)
def _update_chunk_length(self):
# First, we'll figure out length of a chunk and then
# we'll try to read it from socket.
if self.chunk_left is not None:
return
line = self._fp.fp.readline()
line = line.split(b';', 1)[0]
try:
self.chunk_left = int(line, 16)
except ValueError:
# Invalid chunked protocol response, abort.
self.close()
raise httplib.IncompleteRead(line)
def _handle_chunk(self, amt):
returned_chunk = None
if amt is None:
chunk = self._fp._safe_read(self.chunk_left)
returned_chunk = chunk
self._fp._safe_read(2) # Toss the CRLF at the end of the chunk.
self.chunk_left = None
elif amt < self.chunk_left:
value = self._fp._safe_read(amt)
self.chunk_left = self.chunk_left - amt
returned_chunk = value
elif amt == self.chunk_left:
value = self._fp._safe_read(amt)
self._fp._safe_read(2) # Toss the CRLF at the end of the chunk.
self.chunk_left = None
returned_chunk = value
else: # amt > self.chunk_left
returned_chunk = self._fp._safe_read(self.chunk_left)
self._fp._safe_read(2) # Toss the CRLF at the end of the chunk.
self.chunk_left = None
return returned_chunk
def read_chunked(self, amt=None, decode_content=None):
"""
Similar to :meth:`HTTPResponse.read`, but with an additional
parameter: ``decode_content``.
:param decode_content:
If True, will attempt to decode the body based on the
'content-encoding' header.
"""
self._init_decoder()
# FIXME: Rewrite this method and make it a class with a better structured logic.
if not self.chunked:
raise ResponseNotChunked(
"Response is not chunked. "
"Header 'transfer-encoding: chunked' is missing.")
# Don't bother reading the body of a HEAD request.
if self._original_response and is_response_to_head(self._original_response):
self._original_response.close()
return
with self._error_catcher():
while True:
self._update_chunk_length()
if self.chunk_left == 0:
break
chunk = self._handle_chunk(amt)
decoded = self._decode(chunk, decode_content=decode_content,
flush_decoder=False)
if decoded:
yield decoded
if decode_content:
# On CPython and PyPy, we should never need to flush the
# decoder. However, on Jython we *might* need to, so
# lets defensively do it anyway.
decoded = self._flush_decoder()
if decoded: # Platform-specific: Jython.
yield decoded
# Chunk content ends with \r\n: discard it.
while True:
line = self._fp.fp.readline()
if not line:
# Some sites may not end with '\r\n'.
break
if line == b'\r\n':
break
# We read everything; close the "file".
if self._original_response:
self._original_response.close()
| gpl-3.0 |
acabey/acabey.github.io | projects/demos/engineering.purdue.edu/scriptingwithobjects/swocode/chap7/Private.py | 1 | 2342 | #!/usr/bin/python
# Private.py
#------------------------------ class X ------------------------------
class X: #(A)
__m = 10 #(B)
n = 20 #(C)
def __init__( self, pp, qq ): #(D)
self.__p = pp #(E)
self.q = qq + self.__p * X.__m + X.n #(F)
#----------------------------- Test Code ------------------------------
xobj = X( 30, 40 ) #(G)
#print xobj.__p # ERROR #(H)
print xobj.q # 360 #(I)
#print X.__m # ERROR #(J)
#print xobj.__m # ERROR #(K)
print X.n # 20 #(L)
print xobj.n # 20 #(M)
print X._X__m # 10 #(N)
print xobj._X__m # 10 #(O)
print xobj._X__p # 30 #(P)
print X.__dict__ # {'__module__': '__main__', #(Q)
# '__doc__': None,
# '_X__m': 10,
# '__init__': <function __init__ at 0x804f664>,
# 'n': 20}
print xobj.__dict__ # {'_X__p': 30, 'q': 360} #(R)
X.__m = 1000 # but this is not the same as __m of line (B) #(S)
print xobj.__m # 1000 #(T)
print X.__dict__ # {'__module__': '__main__', #(U)
# '_X__m': 10,
# '__m': 1000,
# '__doc__': None,
# '__init__': <function __init__ at 0x804f664>,
# 'n': 20}
xobj.__p = 2000 # but this is not the same as __p of line (E) #(V)
print xobj.__p # 2000 #(W)
print xobj.__dict__ # {'_X__p': 30, 'q': 360, '__p': 2000} #(X)
| gpl-3.0 |
alistairlow/tensorflow | tensorflow/python/summary/text_summary.py | 19 | 2872 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implements text_summary in TensorFlow, with TensorBoard support.
The text_summary is a wrapper around the generic tensor_summary that takes a
string-type tensor and emits a TensorSummary op with SummaryMetadata that
notes that this summary is textual data for the TensorBoard text plugin.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.framework import summary_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.ops.summary_ops import tensor_summary
PLUGIN_NAME = "text"
def text_summary(name, tensor, collections=None):
"""Summarizes textual data.
Text data summarized via this plugin will be visible in the Text Dashboard
in TensorBoard. The standard TensorBoard Text Dashboard will render markdown
in the strings, and will automatically organize 1d and 2d tensors into tables.
If a tensor with more than 2 dimensions is provided, a 2d subarray will be
displayed along with a warning message. (Note that this behavior is not
intrinsic to the text summary api, but rather to the default TensorBoard text
plugin.)
Args:
name: A name for the generated node. Will also serve as a series name in
TensorBoard.
tensor: a string-type Tensor to summarize.
collections: Optional list of ops.GraphKeys. The collections to add the
summary to. Defaults to [_ops.GraphKeys.SUMMARIES]
Returns:
A TensorSummary op that is configured so that TensorBoard will recognize
that it contains textual data. The TensorSummary is a scalar `Tensor` of
type `string` which contains `Summary` protobufs.
Raises:
ValueError: If tensor has the wrong type.
"""
if tensor.dtype != dtypes.string:
raise ValueError("Expected tensor %s to have dtype string, got %s" %
(tensor.name, tensor.dtype))
summary_metadata = summary_pb2.SummaryMetadata(
plugin_data=summary_pb2.SummaryMetadata.PluginData(
plugin_name=PLUGIN_NAME))
t_summary = tensor_summary(
name=name,
tensor=tensor,
summary_metadata=summary_metadata,
collections=collections)
return t_summary
| apache-2.0 |
radicalbit/ambari | ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/before-START/scripts/params.py | 4 | 6865 | """
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from resource_management import *
from resource_management.core.system import System
import os
config = Script.get_config()
#RPM versioning support
rpm_version = default("/configurations/cluster-env/rpm_version", None)
#hadoop params
if rpm_version:
mapreduce_libs_path = "/usr/bigtop/current/hadoop-mapreduce-client/*"
hadoop_libexec_dir = "/usr/bigtop/current/hadoop-client/libexec"
hadoop_lib_home = "/usr/bigtop/current/hadoop-client/lib"
hadoop_bin = "/usr/bigtop/current/hadoop-client/sbin"
hadoop_home = '/usr/bigtop/current/hadoop-client'
else:
mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"
hadoop_libexec_dir = "/usr/lib/hadoop/libexec"
hadoop_lib_home = "/usr/lib/hadoop/lib"
hadoop_bin = "/usr/lib/hadoop/sbin"
hadoop_home = '/usr'
hadoop_conf_dir = "/etc/hadoop/conf"
#security params
security_enabled = config['configurations']['cluster-env']['security_enabled']
#users and groups
mapred_user = config['configurations']['mapred-env']['mapred_user']
hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
yarn_user = config['configurations']['yarn-env']['yarn_user']
user_group = config['configurations']['cluster-env']['user_group']
#hosts
hostname = config["hostname"]
ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
rm_host = default("/clusterHostInfo/rm_host", [])
slave_hosts = default("/clusterHostInfo/slave_hosts", [])
oozie_servers = default("/clusterHostInfo/oozie_server", [])
hcat_server_hosts = default("/clusterHostInfo/webhcat_server_host", [])
hive_server_host = default("/clusterHostInfo/hive_server_host", [])
hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", [])
hs_host = default("/clusterHostInfo/hs_host", [])
jtnode_host = default("/clusterHostInfo/jtnode_host", [])
namenode_host = default("/clusterHostInfo/namenode_host", [])
zk_hosts = default("/clusterHostInfo/zookeeper_hosts", [])
ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_host", [])
has_namenode = not len(namenode_host) == 0
has_resourcemanager = not len(rm_host) == 0
has_slaves = not len(slave_hosts) == 0
has_oozie_server = not len(oozie_servers) == 0
has_hcat_server_host = not len(hcat_server_hosts) == 0
has_hive_server_host = not len(hive_server_host) == 0
has_hbase_masters = not len(hbase_master_hosts) == 0
has_zk_host = not len(zk_hosts) == 0
has_ganglia_server = not len(ganglia_server_hosts) == 0
is_namenode_master = hostname in namenode_host
is_jtnode_master = hostname in jtnode_host
is_rmnode_master = hostname in rm_host
is_hsnode_master = hostname in hs_host
is_hbase_master = hostname in hbase_master_hosts
is_slave = hostname in slave_hosts
if has_ganglia_server:
ganglia_server_host = ganglia_server_hosts[0]
#hadoop params
if has_namenode:
hadoop_tmp_dir = format("/tmp/hadoop-{hdfs_user}")
hadoop_pid_dir_prefix = config['configurations']['hadoop-env']['hadoop_pid_dir_prefix']
task_log4j_properties_location = os.path.join(hadoop_conf_dir, "task-log4j.properties")
hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
hbase_tmp_dir = config['configurations']['hbase-site']['hbase.tmp.dir']
#db params
server_db_name = config['hostLevelParams']['db_name']
db_driver_filename = config['hostLevelParams']['db_driver_filename']
oracle_driver_url = config['hostLevelParams']['oracle_jdbc_url']
mysql_driver_url = config['hostLevelParams']['mysql_jdbc_url']
ambari_server_resources = config['hostLevelParams']['jdk_location']
oracle_driver_symlink_url = format("{ambari_server_resources}oracle-jdbc-driver.jar")
mysql_driver_symlink_url = format("{ambari_server_resources}mysql-jdbc-driver.jar")
ambari_db_rca_url = config['hostLevelParams']['ambari_db_rca_url'][0]
ambari_db_rca_driver = config['hostLevelParams']['ambari_db_rca_driver'][0]
ambari_db_rca_username = config['hostLevelParams']['ambari_db_rca_username'][0]
ambari_db_rca_password = config['hostLevelParams']['ambari_db_rca_password'][0]
if has_namenode and 'rca_enabled' in config['configurations']['hadoop-env']:
rca_enabled = config['configurations']['hadoop-env']['rca_enabled']
else:
rca_enabled = False
rca_disabled_prefix = "###"
if rca_enabled == True:
rca_prefix = ""
else:
rca_prefix = rca_disabled_prefix
#hadoop-env.sh
java_home = config['hostLevelParams']['java_home']
if str(config['hostLevelParams']['stack_version']).startswith('2.0') and System.get_instance().os_family != "suse":
# deprecated rhel jsvc_path
jsvc_path = "/usr/libexec/bigtop-utils"
else:
jsvc_path = "/usr/lib/bigtop-utils"
hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize']
namenode_heapsize = config['configurations']['hadoop-env']['namenode_heapsize']
namenode_opt_newsize = config['configurations']['hadoop-env']['namenode_opt_newsize']
namenode_opt_maxnewsize = config['configurations']['hadoop-env']['namenode_opt_maxnewsize']
namenode_opt_permsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_permsize","128m")
namenode_opt_maxpermsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_maxpermsize","256m")
jtnode_opt_newsize = "200m"
jtnode_opt_maxnewsize = "200m"
jtnode_heapsize = "1024m"
ttnode_heapsize = "1024m"
dtnode_heapsize = config['configurations']['hadoop-env']['dtnode_heapsize']
mapred_pid_dir_prefix = default("/configurations/mapred-env/mapred_pid_dir_prefix","/var/run/hadoop-mapreduce")
mapred_log_dir_prefix = default("/configurations/mapred-env/mapred_log_dir_prefix","/var/log/hadoop-mapreduce")
#log4j.properties
yarn_log_dir_prefix = default("/configurations/yarn-env/yarn_log_dir_prefix","/var/log/hadoop-yarn")
dfs_hosts = default('/configurations/hdfs-site/dfs.hosts', None)
#log4j.properties
if (('hdfs-log4j' in config['configurations']) and ('content' in config['configurations']['hdfs-log4j'])):
log4j_props = config['configurations']['hdfs-log4j']['content']
if (('yarn-log4j' in config['configurations']) and ('content' in config['configurations']['yarn-log4j'])):
log4j_props += config['configurations']['yarn-log4j']['content']
else:
log4j_props = None
| apache-2.0 |
google/ion | ion/dev/doxygen_filter.py | 1 | 8299 | #!/usr/bin/python
#
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Doxygen pre-filter script for ion.
This filter processes code and adds Doxygen-compatible markup in various places
to enable Doxygen to read the docs more fully. Unlike some other Doxygen
filters, it is designed to work with Doxygen's newer markdown syntax.
In order to ensure proper syntax coloring of indented code blocks, make sure
there is a blank (commented) line both above and below the block. For example:
// Comment comment comment.
//
// int CodeBlock() {
// Goes here;
// }
//
// More comment.
"""
import re
import sys
class DoxygenFormatter(object):
"""Transforms lines of a source file to make them doxygen-friendly."""
ANYWHERE = 'anywhere'
COMMENT = 'comment'
def __init__(self, outfile):
# The file-like object to which we will write lines.
self.out = outfile
# A buffer for storing empty lines which we can use later if we need to
# retroactively insert markup without causing line number offset problems.
self.empty_line_buffer = []
# Whether we are currently inside an indented code block.
self.in_code_block = False
self.CompileExpressions()
def CompileExpressions(self):
"""Pre-compiles frequently used regexps for improved performance.
The regexps are arranged as a list of 3-tuples, where the second value is
the replacement string (which may include backreferences) and the third
value is one of the context constants ANYWHERE or COMMENT. This is a list
of tuples instead of a dictionary because order matters: earlier regexps
will be applied first, and the resulting text (not the original) will be
what is seen by subsequent regexps.
"""
self.comment_regex = re.compile(r'^\s*//')
self.substitutions = [
# Remove copyright lines.
(re.compile(r'^\s*//\s*[Cc]opyright.*Google.*'), r'', self.ANYWHERE),
# Remove any comment lines that consist of only punctuation (banners).
# We only allow a maximum of two spaces before the punctuation so we
# don't accidentally get rid of code examples with bare braces and
# whatnot.
(re.compile(r'(^\s*)//\s{0,2}[-=#/]+$'), r'\1//\n', self.ANYWHERE),
# If we find something that looks like a list item that is indented four
# or more spaces, pull it back to the left so doxygen's Markdown engine
# doesn't treat it like a code block.
(re.compile(r'(^\s*)//\s{4,}([-\d*].*)'), r'\1 \2', self.COMMENT),
(re.compile(r'TODO'), r'@todo ', self.COMMENT),
# Replace leading 'Note:' or 'Note that' in a comment with @note
(re.compile(r'(\/\/\s+)Note(?:\:| that)', re.I), r'\1@note',
self.COMMENT),
# Replace leading 'Warning:' in a comment with @warning
(re.compile(r'(\/\/\s+)Warning:', re.I), r'\1@warning', self.COMMENT),
# Replace leading 'Deprecated' in a comment with @deprecated
(re.compile(r'(\/\/\s+)Deprecated[^\w\s]*', re.I), r'\1@deprecated',
self.COMMENT),
# Replace pipe-delimited parameter names with backtick-delimiters
(re.compile(r'\|(\w+)\|'), r'`\1`', self.COMMENT),
# Convert standalone comment lines to Doxygen style.
(re.compile(r'(^\s*)//(?=[^/])'), r'\1///', self.ANYWHERE),
# Strip trailing comments from preprocessor directives.
(re.compile(r'(^#.*)//.*'), r'\1', self.ANYWHERE),
# Convert remaining trailing comments to doxygen style, unless they are
# documenting the end of a block.
(re.compile(r'([^} ]\s+)//(?=[^/])'), r'\1///<', self.ANYWHERE),
]
def Transform(self, line):
"""Performs the regexp transformations defined by self.substitutions.
Args:
line: The line to transform.
Returns:
The resulting line.
"""
for (regex, repl, where) in self.substitutions:
if where is self.COMMENT and not self.comment_regex.match(line):
return line
line = regex.sub(repl, line)
return line
def AppendToBufferedLine(self, text):
"""Appends text to the last buffered empty line.
Empty lines are buffered rather than being written out directly. This lets
us retroactively rewrite buffered lines to include markup that affects the
following line, while avoiding the line number offset that would result from
inserting a line that wasn't in the original source.
Args:
text: The text to append to the line.
Returns:
True if there was an available empty line to which text could be
appended, and False otherwise.
"""
if self.empty_line_buffer:
last_line = self.empty_line_buffer.pop().rstrip()
last_line += text + '\n'
self.empty_line_buffer.append(last_line)
return True
else:
return False
def ConvertCodeBlock(self, line):
"""Converts any code block that may begin or end on this line.
Doxygen has (at least) two kinds of code blocks. Any block indented at
least four spaces gets formatted as code, but (for some reason) no syntax
highlighting is applied. Any block surrounded by "~~~" on both sides is
also treated as code, but these are syntax highlighted intelligently
depending on the file type. We typically write code blocks in the former
style, but we'd like them to be highlighted, so this function converts them
to the latter style by adding in the ~~~ lines.
To make this a bit more complicated, we would really prefer not to insert
new lines into the file, since that will make the line numbers shown in
doxygen not match the line numbers in the actual source code. For this
reason, we only perform the conversion if at least one "blank" line (empty
comment line) appears before the start of the code block. If we get down to
the bottom of the block and there's no blank line after it, we will be
forced to add a line, since we can't go back and undo what we already did.
Args:
line: The line to process.
Returns:
The converted line.
"""
if not self.in_code_block and re.match(r'\s*///\s{4,}', line):
if self.AppendToBufferedLine(' ~~~'):
# If this fails, we'll just leave it un-highlighted.
self.in_code_block = True
elif self.in_code_block and not re.match(r'\s*///\s{4,}', line):
if not self.AppendToBufferedLine(' ~~~'):
# This is bad. We don't have a buffered line to use to end the code
# block, so we'll have to insert one. This will cause the line
# numbers to stop matching the original source, unfortunately.
line = '/// ~~~\n' + line
self.in_code_block = False
return line
def ProcessLine(self, line):
"""Processes a line.
If the line is an empty line inside a comment, we buffer it for possible
rewriting later on. Otherwise, we transform it using our regexps and
write it (as well as any buffered blank lines) out to the output.
Args:
line: The line to process.
"""
line = self.Transform(line)
if line.strip() == '///':
# We may repurpose this empty line later, so don't write it out yet.
self.empty_line_buffer.append(line)
else:
line = self.ConvertCodeBlock(line)
# Flush the line buffer and write this line as well.
for buffered_line in self.empty_line_buffer:
self.out.write(buffered_line)
self.empty_line_buffer = []
self.out.write(line)
def main(argv):
sourcefile = argv[1]
with open(sourcefile, 'r') as infile:
formatter = DoxygenFormatter(sys.stdout)
for line in infile:
formatter.ProcessLine(line)
if __name__ == '__main__':
main(sys.argv)
| apache-2.0 |
FlannelFox/FlannelFox | tests/flannelfox/torrenttools/test_torrentQueue.py | 1 | 1999 | # -*- coding: utf-8 -*-
import unittest
from unittest.mock import patch
import os
from flannelfox.torrenttools.TorrentQueue import Queue
from flannelfox.torrenttools import Torrents
class TestTorrentQueue(unittest.TestCase):
testDatabaseFile = 'ff.db'
def removeDatabase(self):
try:
os.remove(self.testDatabaseFile)
except Exception:
pass
@patch.object(Queue, 'databaseTorrentBlacklisted')
@patch.object(Queue, 'databaseTorrentExists')
def test_Queue(self, mockDatabaseTorrentExists, mockDatabaseTorrentBlacklisted):
self.removeDatabase()
torrentQueue = Queue()
mockDatabaseTorrentBlacklisted.return_value = False
mockDatabaseTorrentExists.return_value = False
# Ensure len returns a valid answer
self.assertEqual(len(torrentQueue), 0)
# Make sure appending an item works
torrentQueue.append(Torrents.TV(torrentTitle='some.show.s01e01.720p.junk.here'))
self.assertEqual(len(torrentQueue), 1)
# Make sure appending a duplicate item does not work
torrentQueue.append(Torrents.TV(torrentTitle='some.show.s01e01.720p.junk.here'))
self.assertEqual(len(torrentQueue), 1)
# Add a different item and make sure it works
torrentQueue.append(Torrents.TV(torrentTitle='some.show.s01e02.720p.junk.here2'))
self.assertEqual(len(torrentQueue), 2)
mockDatabaseTorrentBlacklisted.return_value = True
mockDatabaseTorrentExists.return_value = False
# Check if Blacklisted torrent gets blocked
torrentQueue.append(Torrents.TV(torrentTitle='some.show.s01e02.720p.junk.here3'))
self.assertEqual(len(torrentQueue), 2)
mockDatabaseTorrentBlacklisted.return_value = False
mockDatabaseTorrentExists.return_value = True
# Check if Existing Torrent in Database gets blocked
torrentQueue.append(Torrents.TV(torrentTitle='some.show.s01e02.720p.junk.here3'))
self.assertEqual(len(torrentQueue), 2)
mockDatabaseTorrentBlacklisted.return_value = False
mockDatabaseTorrentExists.return_value = False
if __name__ == '__main__':
unittest.main()
| mit |
makinacorpus/odoo | addons/l10n_ae/__init__.py | 669 | 1059 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2014 Tech Receptives (<http://techreceptives.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
vhosouza/invesalius3 | invesalius/gui/task_exporter.py | 1 | 15556 | #--------------------------------------------------------------------------
# Software: InVesalius - Software de Reconstrucao 3D de Imagens Medicas
# Copyright: (C) 2001 Centro de Pesquisas Renato Archer
# Homepage: http://www.softwarepublico.gov.br
# Contact: [email protected]
# License: GNU - GPL 2 (LICENSE.txt/LICENCA.txt)
#--------------------------------------------------------------------------
# Este programa e software livre; voce pode redistribui-lo e/ou
# modifica-lo sob os termos da Licenca Publica Geral GNU, conforme
# publicada pela Free Software Foundation; de acordo com a versao 2
# da Licenca.
#
# Este programa eh distribuido na expectativa de ser util, mas SEM
# QUALQUER GARANTIA; sem mesmo a garantia implicita de
# COMERCIALIZACAO ou de ADEQUACAO A QUALQUER PROPOSITO EM
# PARTICULAR. Consulte a Licenca Publica Geral GNU para obter mais
# detalhes.
#--------------------------------------------------------------------------
import os
import pathlib
import sys
import wx
try:
import wx.lib.agw.hyperlink as hl
except ImportError:
import wx.lib.hyperlink as hl
import wx.lib.platebtn as pbtn
from pubsub import pub as Publisher
import invesalius.constants as const
import invesalius.gui.dialogs as dlg
import invesalius.project as proj
import invesalius.session as ses
from invesalius import inv_paths
BTN_MASK = wx.NewId()
BTN_PICTURE = wx.NewId()
BTN_SURFACE = wx.NewId()
BTN_REPORT = wx.NewId()
BTN_REQUEST_RP = wx.NewId()
WILDCARD_SAVE_3D = "Inventor (*.iv)|*.iv|"\
"PLY (*.ply)|*.ply|"\
"Renderman (*.rib)|*.rib|"\
"STL (*.stl)|*.stl|"\
"STL ASCII (*.stl)|*.stl|"\
"VRML (*.vrml)|*.vrml|"\
"VTK PolyData (*.vtp)|*.vtp|"\
"Wavefront (*.obj)|*.obj|"\
"X3D (*.x3d)|*.x3d"
INDEX_TO_TYPE_3D = {0: const.FILETYPE_IV,
1: const.FILETYPE_PLY,
2: const.FILETYPE_RIB,
3: const.FILETYPE_STL,
4: const.FILETYPE_STL_ASCII,
5: const.FILETYPE_VRML,
6: const.FILETYPE_VTP,
7: const.FILETYPE_OBJ,
8: const.FILETYPE_X3D}
INDEX_TO_EXTENSION = {0: "iv",
1: "ply",
2: "rib",
3: "stl",
4: "stl",
5: "vrml",
6: "vtp",
7: "obj",
8: "x3d"}
WILDCARD_SAVE_2D = "BMP (*.bmp)|*.bmp|"\
"JPEG (*.jpg)|*.jpg|"\
"PNG (*.png)|*.png|"\
"PostScript (*.ps)|*.ps|"\
"Povray (*.pov)|*.pov|"\
"TIFF (*.tiff)|*.tiff"
INDEX_TO_TYPE_2D = {0: const.FILETYPE_BMP,
1: const.FILETYPE_JPG,
2: const.FILETYPE_PNG,
3: const.FILETYPE_PS,
4: const.FILETYPE_POV,
5: const.FILETYPE_OBJ}
WILDCARD_SAVE_MASK = "VTK ImageData (*.vti)|*.vti"
class TaskPanel(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent)
inner_panel = InnerTaskPanel(self)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(inner_panel, 1, wx.EXPAND | wx.GROW | wx.BOTTOM | wx.RIGHT |
wx.LEFT, 7)
sizer.Fit(self)
self.SetSizer(sizer)
self.Update()
self.SetAutoLayout(1)
class InnerTaskPanel(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent)
backgroud_colour = wx.Colour(255,255,255)
self.SetBackgroundColour(backgroud_colour)
self.SetAutoLayout(1)
# Counter for projects loaded in current GUI
# Fixed hyperlink items
tooltip = wx.ToolTip(_("Export InVesalius screen to an image file"))
link_export_picture = hl.HyperLinkCtrl(self, -1,
_("Export picture..."))
link_export_picture.SetUnderlines(False, False, False)
link_export_picture.SetBold(True)
link_export_picture.SetColours("BLACK", "BLACK", "BLACK")
link_export_picture.SetBackgroundColour(self.GetBackgroundColour())
link_export_picture.SetToolTip(tooltip)
link_export_picture.AutoBrowse(False)
link_export_picture.UpdateLink()
link_export_picture.Bind(hl.EVT_HYPERLINK_LEFT,
self.OnLinkExportPicture)
tooltip = wx.ToolTip(_("Export 3D surface"))
link_export_surface = hl.HyperLinkCtrl(self, -1,_("Export 3D surface..."))
link_export_surface.SetUnderlines(False, False, False)
link_export_surface.SetBold(True)
link_export_surface.SetColours("BLACK", "BLACK", "BLACK")
link_export_surface.SetBackgroundColour(self.GetBackgroundColour())
link_export_surface.SetToolTip(tooltip)
link_export_surface.AutoBrowse(False)
link_export_surface.UpdateLink()
link_export_surface.Bind(hl.EVT_HYPERLINK_LEFT,
self.OnLinkExportSurface)
#tooltip = wx.ToolTip(_("Export 3D mask (voxels)"))
#link_export_mask = hl.HyperLinkCtrl(self, -1,_("Export mask..."))
#link_export_mask.SetUnderlines(False, False, False)
#link_export_mask.SetColours("BLACK", "BLACK", "BLACK")
#link_export_mask.SetToolTip(tooltip)
#link_export_mask.AutoBrowse(False)
#link_export_mask.UpdateLink()
#link_export_mask.Bind(hl.EVT_HYPERLINK_LEFT,
# self.OnLinkExportMask)
#tooltip = wx.ToolTip("Request rapid prototyping services")
#link_request_rp = hl.HyperLinkCtrl(self,-1,"Request rapid prototyping...")
#link_request_rp.SetUnderlines(False, False, False)
#link_request_rp.SetColours("BLACK", "BLACK", "BLACK")
#link_request_rp.SetToolTip(tooltip)
#link_request_rp.AutoBrowse(False)
#link_request_rp.UpdateLink()
#link_request_rp.Bind(hl.EVT_HYPERLINK_LEFT, self.OnLinkRequestRP)
#tooltip = wx.ToolTip("Open report tool...")
#link_report = hl.HyperLinkCtrl(self,-1,"Open report tool...")
#link_report.SetUnderlines(False, False, False)
#link_report.SetColours("BLACK", "BLACK", "BLACK")
#link_report.SetToolTip(tooltip)
#link_report.AutoBrowse(False)
#link_report.UpdateLink()
#link_report.Bind(hl.EVT_HYPERLINK_LEFT, self.OnLinkReport)
# Image(s) for buttons
if sys.platform == 'darwin':
BMP_EXPORT_SURFACE = wx.Bitmap(\
os.path.join(inv_paths.ICON_DIR, "surface_export_original.png"),
wx.BITMAP_TYPE_PNG).ConvertToImage()\
.Rescale(25, 25).ConvertToBitmap()
BMP_TAKE_PICTURE = wx.Bitmap(\
os.path.join(inv_paths.ICON_DIR, "tool_photo_original.png"),
wx.BITMAP_TYPE_PNG).ConvertToImage()\
.Rescale(25, 25).ConvertToBitmap()
#BMP_EXPORT_MASK = wx.Bitmap("../icons/mask.png",
# wx.BITMAP_TYPE_PNG)
else:
BMP_EXPORT_SURFACE = wx.Bitmap(os.path.join(inv_paths.ICON_DIR, "surface_export.png"),
wx.BITMAP_TYPE_PNG).ConvertToImage()\
.Rescale(25, 25).ConvertToBitmap()
BMP_TAKE_PICTURE = wx.Bitmap(os.path.join(inv_paths.ICON_DIR, "tool_photo.png"),
wx.BITMAP_TYPE_PNG).ConvertToImage()\
.Rescale(25, 25).ConvertToBitmap()
#BMP_EXPORT_MASK = wx.Bitmap("../icons/mask_small.png",
# wx.BITMAP_TYPE_PNG)
# Buttons related to hyperlinks
button_style = pbtn.PB_STYLE_SQUARE | pbtn.PB_STYLE_DEFAULT
button_picture = pbtn.PlateButton(self, BTN_PICTURE, "",
BMP_TAKE_PICTURE,
style=button_style)
button_picture.SetBackgroundColour(self.GetBackgroundColour())
self.button_picture = button_picture
button_surface = pbtn.PlateButton(self, BTN_SURFACE, "",
BMP_EXPORT_SURFACE,
style=button_style)
button_surface.SetBackgroundColour(self.GetBackgroundColour())
#button_mask = pbtn.PlateButton(self, BTN_MASK, "",
# BMP_EXPORT_MASK,
# style=button_style)
#button_request_rp = pbtn.PlateButton(self, BTN_REQUEST_RP, "",
# BMP_IMPORT, style=button_style)
#button_report = pbtn.PlateButton(self, BTN_REPORT, "",
# BMP_IMPORT,
# style=button_style)
# When using PlaneButton, it is necessary to bind events from parent win
self.Bind(wx.EVT_BUTTON, self.OnButton)
# Tags and grid sizer for fixed items
flag_link = wx.EXPAND|wx.GROW|wx.LEFT|wx.TOP
flag_button = wx.EXPAND | wx.GROW
fixed_sizer = wx.FlexGridSizer(rows=2, cols=2, hgap=2, vgap=0)
fixed_sizer.AddGrowableCol(0, 1)
fixed_sizer.AddMany([ (link_export_picture, 1, flag_link, 3),
(button_picture, 0, flag_button),
(link_export_surface, 1, flag_link, 3),
(button_surface, 0, flag_button),])
#(link_export_mask, 1, flag_link, 3),
#(button_mask, 0, flag_button)])
#(link_report, 0, flag_link, 3),
#(button_report, 0, flag_button),
#(link_request_rp, 1, flag_link, 3),
#(button_request_rp, 0, flag_button)])
# Add line sizers into main sizer
main_sizer = wx.BoxSizer(wx.VERTICAL)
main_sizer.Add(fixed_sizer, 0, wx.GROW|wx.EXPAND)
# Update main sizer and panel layout
self.SetSizer(main_sizer)
self.Fit()
self.sizer = main_sizer
self.__init_menu()
def __init_menu(self):
menu = wx.Menu()
self.id_to_name = {const.AXIAL:_("Axial slice"),
const.CORONAL:_("Coronal slice"),
const.SAGITAL:_("Sagittal slice"),
const.VOLUME:_("Volume")}
for id in self.id_to_name:
item = wx.MenuItem(menu, id, self.id_to_name[id])
menu.Append(item)
self.menu_picture = menu
menu.Bind(wx.EVT_MENU, self.OnMenuPicture)
def OnMenuPicture(self, evt):
id = evt.GetId()
value = dlg.ExportPicture(self.id_to_name[id])
if value:
filename, filetype = value
Publisher.sendMessage('Export picture to file',
orientation=id, filename=filename, filetype=filetype)
def OnLinkExportPicture(self, evt=None):
self.button_picture.PopupMenu(self.menu_picture)
def OnLinkExportMask(self, evt=None):
project = proj.Project()
if sys.platform == 'win32':
project_name = project.name
else:
project_name = project.name+".vti"
dlg = wx.FileDialog(None,
"Save mask as...", # title
"", # last used directory
project_name, # filename
WILDCARD_SAVE_MASK,
wx.FD_SAVE|wx.FD_OVERWRITE_PROMPT)
dlg.SetFilterIndex(0) # default is VTI
if dlg.ShowModal() == wx.ID_OK:
filename = dlg.GetPath()
extension = "vti"
if sys.platform != 'win32':
if filename.split(".")[-1] != extension:
filename = filename + "."+ extension
filetype = const.FILETYPE_IMAGEDATA
Publisher.sendMessage('Export mask to file',
filename=filename,
filetype=filetype)
def OnLinkExportSurface(self, evt=None):
"OnLinkExportSurface"
project = proj.Project()
n_surface = 0
for index in project.surface_dict:
if project.surface_dict[index].is_shown:
n_surface += 1
if n_surface:
if sys.platform == 'win32':
project_name = pathlib.Path(project.name).stem
else:
project_name = pathlib.Path(project.name).stem + ".stl"
session = ses.Session()
last_directory = session.get('paths', 'last_directory_3d_surface', '')
dlg = wx.FileDialog(None,
_("Save 3D surface as..."), # title
last_directory, # last used directory
project_name, # filename
WILDCARD_SAVE_3D,
wx.FD_SAVE|wx.FD_OVERWRITE_PROMPT)
dlg.SetFilterIndex(3) # default is STL
if dlg.ShowModal() == wx.ID_OK:
filetype_index = dlg.GetFilterIndex()
filetype = INDEX_TO_TYPE_3D[filetype_index]
filename = dlg.GetPath()
extension = INDEX_TO_EXTENSION[filetype_index]
if sys.platform != 'win32':
if filename.split(".")[-1] != extension:
filename = filename + "."+ extension
if filename:
session['paths']['last_directory_3d_surface'] = os.path.split(filename)[0]
session.WriteSessionFile()
Publisher.sendMessage('Export surface to file',
filename=filename, filetype=filetype)
if not os.path.exists(filename):
dlg = wx.MessageDialog(None,
_("It was not possible to save the surface."),
_("Error saving surface"),
wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
else:
dlg = wx.MessageDialog(None,
_("You need to create a surface and make it ") +
_("visible before exporting it."),
'InVesalius 3',
wx.OK | wx.ICON_INFORMATION)
try:
dlg.ShowModal()
finally:
dlg.Destroy()
def OnLinkRequestRP(self, evt=None):
pass
def OnLinkReport(self, evt=None):
pass
def OnButton(self, evt):
id = evt.GetId()
if id == BTN_PICTURE:
self.OnLinkExportPicture()
elif id == BTN_SURFACE:
self.OnLinkExportSurface()
elif id == BTN_REPORT:
self.OnLinkReport()
elif id == BTN_REQUEST_RP:
self.OnLinkRequestRP()
else:# id == BTN_MASK:
self.OnLinkExportMask()
| gpl-2.0 |
benjaminrigaud/django | django/core/checks/model_checks.py | 525 | 2390 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import inspect
import types
from django.apps import apps
from django.core.checks import Error, Tags, register
@register(Tags.models)
def check_all_models(app_configs=None, **kwargs):
errors = []
for model in apps.get_models():
if app_configs is None or model._meta.app_config in app_configs:
if not inspect.ismethod(model.check):
errors.append(
Error(
"The '%s.check()' class method is "
"currently overridden by %r." % (
model.__name__, model.check),
hint=None,
obj=model,
id='models.E020'
)
)
else:
errors.extend(model.check(**kwargs))
return errors
@register(Tags.models, Tags.signals)
def check_model_signals(app_configs=None, **kwargs):
"""
Ensure lazily referenced model signals senders are installed.
"""
# Avoid circular import
from django.db import models
errors = []
for name in dir(models.signals):
obj = getattr(models.signals, name)
if isinstance(obj, models.signals.ModelSignal):
for reference, receivers in obj.unresolved_references.items():
for receiver, _, _ in receivers:
# The receiver is either a function or an instance of class
# defining a `__call__` method.
if isinstance(receiver, types.FunctionType):
description = "The '%s' function" % receiver.__name__
else:
description = "An instance of the '%s' class" % receiver.__class__.__name__
errors.append(
Error(
"%s was connected to the '%s' signal "
"with a lazy reference to the '%s' sender, "
"which has not been installed." % (
description, name, '.'.join(reference)
),
obj=receiver.__module__,
hint=None,
id='signals.E001'
)
)
return errors
| bsd-3-clause |
bmya/tkobr-addons | tko_web_sessions_management/main.py | 1 | 11671 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# ThinkOpen Solutions Brasil
# Copyright (C) Thinkopen Solutions <http://www.tkobr.com>.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
import openerp
from openerp.osv import fields, osv, orm
import pytz
from datetime import date, datetime, time, timedelta
from dateutil.relativedelta import *
from openerp.addons.base.ir.ir_cron import _intervalTypes
from openerp import SUPERUSER_ID
from openerp.tools import DEFAULT_SERVER_DATETIME_FORMAT
from openerp.http import request
from openerp.tools.translate import _
from openerp import http
import werkzeug.contrib.sessions
from openerp.http import Response
# from openerp import pooler
_logger = logging.getLogger(__name__)
class Home_tkobr(openerp.addons.web.controllers.main.Home):
@http.route('/web/login', type='http', auth="none")
def web_login(self, redirect=None, **kw):
openerp.addons.web.controllers.main.ensure_db()
multi_ok = True
calendar_set = 0
calendar_ok = False
calendar_group = ''
unsuccessful_message = ''
now = datetime.now()
if request.httprequest.method == 'GET' and redirect and request.session.uid:
return http.redirect_with_hash(redirect)
if not request.uid:
request.uid = openerp.SUPERUSER_ID
values = request.params.copy()
if not redirect:
redirect = '/web?' + request.httprequest.query_string
values['redirect'] = redirect
try:
values['databases'] = http.db_list()
except openerp.exceptions.AccessDenied:
values['databases'] = None
if request.httprequest.method == 'POST':
old_uid = request.uid
uid = False
if 'login' in request.params and 'password' in request.params:
uid = request.session.authenticate(request.session.db, request.params[
'login'], request.params['password'])
if uid is not False:
user = request.registry.get('res.users').browse(
request.cr, request.uid, uid, request.context)
if not uid is SUPERUSER_ID:
# check for multiple sessions block
sessions = request.registry.get('ir.sessions').search(
request.cr, request.uid, [
('user_id', '=', uid), ('logged_in', '=', True)], context=request.context)
if sessions and user.multiple_sessions_block:
multi_ok = False
if multi_ok:
# check calendars
calendar_obj = request.registry.get(
'resource.calendar')
attendance_obj = request.registry.get(
'resource.calendar.attendance')
# GET USER LOCAL TIME
if user.tz:
tz = pytz.timezone(user.tz)
else:
tz = pytz.timezone('GMT')
tzoffset = tz.utcoffset(now)
now = now + tzoffset
if user.login_calendar_id:
calendar_set += 1
# check user calendar
attendances = attendance_obj.search(request.cr,
request.uid, [('calendar_id', '=', user.login_calendar_id.id),
('dayofweek', '=', str(now.weekday())),
('hour_from', '<=', now.hour + now.minute / 60.0),
('hour_to', '>=', now.hour + now.minute / 60.0)],
context=request.context)
if attendances:
calendar_ok = True
else:
unsuccessful_message = "unsuccessful login from '%s', user time out of allowed calendar defined in user" % request.params[
'login']
else:
# check user groups calendar
for group in user.groups_id:
if group.login_calendar_id:
calendar_set += 1
attendances = attendance_obj.search(request.cr,
request.uid, [('calendar_id', '=', group.login_calendar_id.id),
('dayofweek', '=', str(now.weekday())),
('hour_from', '<=', now.hour + now.minute / 60.0),
('hour_to', '>=', now.hour + now.minute / 60.0)],
context=request.context)
if attendances:
calendar_ok = True
else:
calendar_group = group.name
if sessions and group.multiple_sessions_block and multi_ok:
multi_ok = False
unsuccessful_message = "unsuccessful login from '%s', multisessions block defined in group '%s'" % (
request.params['login'], group.name)
break
if calendar_set > 0 and calendar_ok == False:
unsuccessful_message = "unsuccessful login from '%s', user time out of allowed calendar defined in group '%s'" % (
request.params['login'], calendar_group)
else:
unsuccessful_message = "unsuccessful login from '%s', multisessions block defined in user" % request.params[
'login']
else:
unsuccessful_message = "unsuccessful login from '%s', wrong username or password" % request.params[
'login']
if not unsuccessful_message or uid is SUPERUSER_ID:
self.save_session(
request.cr,
uid,
user.tz,
request.httprequest.session.sid,
context=request.context)
return http.redirect_with_hash(redirect)
user = request.registry.get('res.users').browse(
request.cr, SUPERUSER_ID, SUPERUSER_ID, request.context)
self.save_session(
request.cr,
uid,
user.tz,
request.httprequest.session.sid,
unsuccessful_message,
request.context)
_logger.error(unsuccessful_message)
request.uid = old_uid
values['error'] = 'Login failed due to one of the following reasons:'
values['reason1'] = '- Wrong login/password'
values['reason2'] = '- User not allowed to have multiple logins'
values[
'reason3'] = '- User not allowed to login at this specific time or day'
return request.render('web.login', values)
def save_session(
self,
cr,
uid,
tz,
sid,
unsuccessful_message='',
context=None):
now = fields.datetime.now()
session_obj = request.registry.get('ir.sessions')
cr = request.registry.cursor()
# for GeoIP
geo_ip_resolver = None
ip_location = ""
try:
import GeoIP
geo_ip_resolver = GeoIP.open(
'/usr/share/GeoIP/GeoIP.dat',
GeoIP.GEOIP_STANDARD)
except ImportError:
geo_ip_resolver = False
if geo_ip_resolver:
ip_location = (str(geo_ip_resolver.country_name_by_addr(
request.httprequest.remote_addr)) or "")
# autocommit: our single update request will be performed atomically.
# (In this way, there is no opportunity to have two transactions
# interleaving their cr.execute()..cr.commit() calls and have one
# of them rolled back due to a concurrent access.)
cr.autocommit(True)
user = request.registry.get('res.users').browse(
cr, request.uid, uid, request.context)
ip = request.httprequest.headers.environ['REMOTE_ADDR']
logged_in = True
if unsuccessful_message:
uid = SUPERUSER_ID
logged_in = False
sessions = False
else:
sessions = session_obj.search(cr, uid, [('session_id', '=', sid),
('ip', '=', ip),
('user_id', '=', uid),
('logged_in', '=', True)],
context=context)
if not sessions:
values = {
'user_id': uid,
'logged_in': logged_in,
'session_id': sid,
'session_seconds': user.session_default_seconds,
'multiple_sessions_block': user.multiple_sessions_block,
'date_login': now,
'expiration_date': datetime.strftime(
(datetime.strptime(
now,
DEFAULT_SERVER_DATETIME_FORMAT) +
relativedelta(
seconds=user.session_default_seconds)),
DEFAULT_SERVER_DATETIME_FORMAT),
'ip': ip,
'ip_location': ip_location,
'remote_tz': tz or 'GMT',
'unsuccessful_message': unsuccessful_message,
}
session_obj.create(cr, uid, values, context=context)
cr.commit()
cr.close()
return True
@http.route('/web/session/logout', type='http', auth="none")
def logout(self, redirect='/web'):
request.session.logout(keep_db=True, logout_type='ul')
return werkzeug.utils.redirect(redirect, 303)
| agpl-3.0 |
jlaine/django-coconuts | tests/test_render.py | 1 | 4789 | #
# django-coconuts
# Copyright (c) 2008-2019, Jeremy Lainé
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import io
from PIL import Image
from tests import BaseTest
class RenderFileTest(BaseTest):
files = ['test.jpg', 'test.mp4', 'test.png', 'test.txt', 'test_portrait.jpg', 'test_portrait.mp4', 'test_rotated.jpg', 'test_rotated.mp4']
fixtures = ['test_users.json']
def assertImage(self, response, content_type, expected_size):
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], content_type)
self.assertTrue('Expires' in response)
self.assertTrue('Last-Modified' in response)
# check size
fp = io.BytesIO(b''.join(response.streaming_content))
img = Image.open(fp)
self.assertEqual(img.size, expected_size)
def test_as_anonymous(self):
"""
Anonymous user cannot render a file.
"""
# no size
response = self.client.get('/images/render/test.jpg')
self.assertEqual(response.status_code, 401)
# bad size
response = self.client.get('/images/render/test.jpg?size=123')
self.assertEqual(response.status_code, 401)
# good size, bad type
response = self.client.get('/images/render/test.txt?size=1024')
self.assertEqual(response.status_code, 401)
# good size, good path
response = self.client.get('/images/render/test.jpg?size=1024')
self.assertEqual(response.status_code, 401)
# good size, good path
response = self.client.get('/images/render/test.png?size=1024')
self.assertEqual(response.status_code, 401)
def test_as_user_bad(self):
"""
Authenticated user can render a file.
"""
self.client.login(username="test_user_1", password="test")
# no size
response = self.client.get('/images/render/test.jpg')
self.assertEqual(response.status_code, 400)
# bad size
response = self.client.get('/images/render/test.jpg?size=123')
self.assertEqual(response.status_code, 400)
# good size, bad path
response = self.client.get('/images/render/notfound.jpg?size=1024')
self.assertEqual(response.status_code, 404)
# good size, bad type
response = self.client.get('/images/render/test.txt?size=1024')
self.assertEqual(response.status_code, 400)
def test_as_user_good(self):
self.client.login(username="test_user_1", password="test")
response = self.client.get('/images/render/test.jpg?size=1024')
self.assertImage(response, 'image/jpeg', (1024, 682))
response = self.client.get('/images/render/test_portrait.jpg?size=1024')
self.assertImage(response, 'image/jpeg', (512, 768))
response = self.client.get('/images/render/test_portrait.mp4?size=1024')
self.assertImage(response, 'image/jpeg', (432, 768))
response = self.client.get('/images/render/test_rotated.jpg?size=1024')
self.assertImage(response, 'image/jpeg', (512, 768))
response = self.client.get('/images/render/test_rotated.mp4?size=1024')
self.assertImage(response, 'image/jpeg', (432, 768))
response = self.client.get('/images/render/test.png?size=1024')
self.assertImage(response, 'image/png', (24, 24))
response = self.client.get('/images/render/test.mp4?size=1024')
self.assertImage(response, 'image/jpeg', (1024, 576))
| bsd-2-clause |
ESS-LLP/erpnext-healthcare | erpnext/config/stock.py | 5 | 6499 | from __future__ import unicode_literals
from frappe import _
def get_data():
return [
{
"label": _("Stock Transactions"),
"items": [
{
"type": "doctype",
"name": "Stock Entry",
},
{
"type": "doctype",
"name": "Delivery Note",
},
{
"type": "doctype",
"name": "Purchase Receipt",
},
{
"type": "doctype",
"name": "Material Request",
},
]
},
{
"label": _("Stock Reports"),
"items": [
{
"type": "report",
"is_query_report": True,
"name": "Stock Ledger",
"doctype": "Stock Ledger Entry",
},
{
"type": "report",
"is_query_report": True,
"name": "Stock Balance",
"doctype": "Stock Ledger Entry"
},
{
"type": "report",
"is_query_report": True,
"name": "Stock Projected Qty",
"doctype": "Item",
},
{
"type": "page",
"name": "stock-balance",
"label": _("Stock Summary")
},
{
"type": "report",
"is_query_report": True,
"name": "Stock Ageing",
"doctype": "Item",
},
{
"type": "report",
"is_query_report": True,
"name": "Item Price Stock",
"doctype": "Item",
}
]
},
{
"label": _("Items and Pricing"),
"items": [
{
"type": "doctype",
"name": "Item",
},
{
"type": "doctype",
"name": "Item Alternative",
},
{
"type": "doctype",
"name": "Product Bundle",
},
{
"type": "doctype",
"name": "Price List",
},
{
"type": "doctype",
"name": "Item Group",
"icon": "fa fa-sitemap",
"label": _("Item Group"),
"link": "Tree/Item Group",
},
{
"type": "doctype",
"name": "Item Price",
},
{
"type": "doctype",
"name": "Shipping Rule",
},
{
"type": "doctype",
"name": "Pricing Rule",
},
{
"type": "doctype",
"name": "Item Variant Settings",
},
]
},
{
"label": _("Serial No and Batch"),
"items": [
{
"type": "doctype",
"name": "Serial No",
},
{
"type": "doctype",
"name": "Batch",
},
{
"type": "doctype",
"name": "Installation Note",
},
{
"type": "report",
"name": "Serial No Service Contract Expiry",
"doctype": "Serial No"
},
{
"type": "report",
"name": "Serial No Status",
"doctype": "Serial No"
},
{
"type": "report",
"name": "Serial No Warranty Expiry",
"doctype": "Serial No"
},
]
},
{
"label": _("Fulfilment"),
"items": [
{
"type": "doctype",
"name": "Delivery Trip",
"description": _("Delivery Trip service tours to customers.")
}
]
},
{
"label": _("Tools"),
"icon": "fa fa-wrench",
"items": [
{
"type": "doctype",
"name": "Stock Reconciliation",
},
{
"type": "doctype",
"name": "Packing Slip",
},
{
"type": "doctype",
"name": "Quality Inspection",
},
{
"type": "doctype",
"name": "Quality Inspection Template",
},
{
"type": "doctype",
"name": "Landed Cost Voucher",
}
]
},
{
"label": _("Setup"),
"icon": "fa fa-cog",
"items": [
{
"type": "doctype",
"name": "Stock Settings",
},
{
"type": "doctype",
"name": "Warehouse",
},
{
"type": "doctype",
"name": "UOM",
"label": _("Unit of Measure") + " (UOM)",
},
{
"type": "doctype",
"name": "Item Attribute",
},
{
"type": "doctype",
"name": "Brand",
},
{
"type": "doctype",
"name": "Item Variant Settings",
},
]
},
{
"label": _("Analytics"),
"icon": "fa fa-table",
"items": [
{
"type": "report",
"is_query_report": False,
"name": "Item-wise Price List Rate",
"doctype": "Item Price",
},
{
"type": "report",
"is_query_report": True,
"name": "Stock Analytics",
"doctype": "Stock Entry"
},
{
"type": "report",
"is_query_report": True,
"name": "Delivery Note Trends",
"doctype": "Delivery Note"
},
{
"type": "report",
"is_query_report": True,
"name": "Purchase Receipt Trends",
"doctype": "Purchase Receipt"
},
]
},
{
"label": _("Reports"),
"icon": "fa fa-list",
"items": [
{
"type": "report",
"is_query_report": True,
"name": "Ordered Items To Be Delivered",
"doctype": "Delivery Note"
},
{
"type": "report",
"is_query_report": True,
"name": "Purchase Order Items To Be Received",
"doctype": "Purchase Receipt"
},
{
"type": "report",
"name": "Item Shortage Report",
"route": "Report/Bin/Item Shortage Report",
"doctype": "Purchase Receipt"
},
{
"type": "report",
"is_query_report": True,
"name": "Requested Items To Be Transferred",
"doctype": "Material Request"
},
{
"type": "report",
"is_query_report": True,
"name": "Batch-Wise Balance History",
"doctype": "Batch"
},
{
"type": "report",
"is_query_report": True,
"name": "Batch Item Expiry Status",
"doctype": "Stock Ledger Entry"
},
{
"type": "report",
"is_query_report": True,
"name": "Item Prices",
"doctype": "Price List"
},
{
"type": "report",
"is_query_report": True,
"name": "Itemwise Recommended Reorder Level",
"doctype": "Item"
},
{
"type": "report",
"is_query_report": True,
"name": "Item Variant Details",
"doctype": "Item"
}
]
},
{
"label": _("Help"),
"icon": "fa fa-facetime-video",
"items": [
{
"type": "help",
"label": _("Items and Pricing"),
"youtube_id": "qXaEwld4_Ps"
},
{
"type": "help",
"label": _("Item Variants"),
"youtube_id": "OGBETlCzU5o"
},
{
"type": "help",
"label": _("Opening Stock Balance"),
"youtube_id": "0yPgrtfeCTs"
},
{
"type": "help",
"label": _("Making Stock Entries"),
"youtube_id": "Njt107hlY3I"
},
{
"type": "help",
"label": _("Serialized Inventory"),
"youtube_id": "gvOVlEwFDAk"
},
{
"type": "help",
"label": _("Batch Inventory"),
"youtube_id": "J0QKl7ABPKM"
},
{
"type": "help",
"label": _("Managing Subcontracting"),
"youtube_id": "ThiMCC2DtKo"
},
]
}
]
| gpl-3.0 |
stuntman723/rap-analyzer | rap_analyzer/lib/python2.7/site-packages/django/contrib/gis/forms/widgets.py | 422 | 3659 | from __future__ import unicode_literals
import logging
from django.conf import settings
from django.contrib.gis import gdal
from django.contrib.gis.geos import GEOSException, GEOSGeometry
from django.forms.widgets import Widget
from django.template import loader
from django.utils import six, translation
logger = logging.getLogger('django.contrib.gis')
class BaseGeometryWidget(Widget):
"""
The base class for rich geometry widgets.
Renders a map using the WKT of the geometry.
"""
geom_type = 'GEOMETRY'
map_srid = 4326
map_width = 600
map_height = 400
display_raw = False
supports_3d = False
template_name = '' # set on subclasses
def __init__(self, attrs=None):
self.attrs = {}
for key in ('geom_type', 'map_srid', 'map_width', 'map_height', 'display_raw'):
self.attrs[key] = getattr(self, key)
if attrs:
self.attrs.update(attrs)
def serialize(self, value):
return value.wkt if value else ''
def deserialize(self, value):
try:
return GEOSGeometry(value, self.map_srid)
except (GEOSException, ValueError) as err:
logger.error(
"Error creating geometry from value '%s' (%s)" % (
value, err)
)
return None
def render(self, name, value, attrs=None):
# If a string reaches here (via a validation error on another
# field) then just reconstruct the Geometry.
if isinstance(value, six.string_types):
value = self.deserialize(value)
if value:
# Check that srid of value and map match
if value.srid != self.map_srid:
try:
ogr = value.ogr
ogr.transform(self.map_srid)
value = ogr
except gdal.GDALException as err:
logger.error(
"Error transforming geometry from srid '%s' to srid '%s' (%s)" % (
value.srid, self.map_srid, err)
)
context = self.build_attrs(
attrs,
name=name,
module='geodjango_%s' % name.replace('-', '_'), # JS-safe
serialized=self.serialize(value),
geom_type=gdal.OGRGeomType(self.attrs['geom_type']),
STATIC_URL=settings.STATIC_URL,
LANGUAGE_BIDI=translation.get_language_bidi(),
)
return loader.render_to_string(self.template_name, context)
class OpenLayersWidget(BaseGeometryWidget):
template_name = 'gis/openlayers.html'
class Media:
js = (
'http://openlayers.org/api/2.13/OpenLayers.js',
'gis/js/OLMapWidget.js',
)
class OSMWidget(BaseGeometryWidget):
"""
An OpenLayers/OpenStreetMap-based widget.
"""
template_name = 'gis/openlayers-osm.html'
default_lon = 5
default_lat = 47
class Media:
js = (
'http://openlayers.org/api/2.13/OpenLayers.js',
'http://www.openstreetmap.org/openlayers/OpenStreetMap.js',
'gis/js/OLMapWidget.js',
)
def __init__(self, attrs=None):
super(OSMWidget, self).__init__()
for key in ('default_lon', 'default_lat'):
self.attrs[key] = getattr(self, key)
if attrs:
self.attrs.update(attrs)
@property
def map_srid(self):
# Use the official spherical mercator projection SRID when GDAL is
# available; otherwise, fallback to 900913.
if gdal.HAS_GDAL:
return 3857
else:
return 900913
| mit |
neurotechuoft/MindType | Code/V1/src/deprecated/pyqtgraph/flowchart/library/Data.py | 35 | 12746 | # -*- coding: utf-8 -*-
from ..Node import Node
from ...Qt import QtGui, QtCore
import numpy as np
from .common import *
from ...SRTTransform import SRTTransform
from ...Point import Point
from ...widgets.TreeWidget import TreeWidget
from ...graphicsItems.LinearRegionItem import LinearRegionItem
from . import functions
class ColumnSelectNode(Node):
"""Select named columns from a record array or MetaArray."""
nodeName = "ColumnSelect"
def __init__(self, name):
Node.__init__(self, name, terminals={'In': {'io': 'in'}})
self.columns = set()
self.columnList = QtGui.QListWidget()
self.axis = 0
self.columnList.itemChanged.connect(self.itemChanged)
def process(self, In, display=True):
if display:
self.updateList(In)
out = {}
if hasattr(In, 'implements') and In.implements('MetaArray'):
for c in self.columns:
out[c] = In[self.axis:c]
elif isinstance(In, np.ndarray) and In.dtype.fields is not None:
for c in self.columns:
out[c] = In[c]
else:
self.In.setValueAcceptable(False)
raise Exception("Input must be MetaArray or ndarray with named fields")
return out
def ctrlWidget(self):
return self.columnList
def updateList(self, data):
if hasattr(data, 'implements') and data.implements('MetaArray'):
cols = data.listColumns()
for ax in cols: ## find first axis with columns
if len(cols[ax]) > 0:
self.axis = ax
cols = set(cols[ax])
break
else:
cols = list(data.dtype.fields.keys())
rem = set()
for c in self.columns:
if c not in cols:
self.removeTerminal(c)
rem.add(c)
self.columns -= rem
self.columnList.blockSignals(True)
self.columnList.clear()
for c in cols:
item = QtGui.QListWidgetItem(c)
item.setFlags(QtCore.Qt.ItemIsEnabled|QtCore.Qt.ItemIsUserCheckable)
if c in self.columns:
item.setCheckState(QtCore.Qt.Checked)
else:
item.setCheckState(QtCore.Qt.Unchecked)
self.columnList.addItem(item)
self.columnList.blockSignals(False)
def itemChanged(self, item):
col = str(item.text())
if item.checkState() == QtCore.Qt.Checked:
if col not in self.columns:
self.columns.add(col)
self.addOutput(col)
else:
if col in self.columns:
self.columns.remove(col)
self.removeTerminal(col)
self.update()
def saveState(self):
state = Node.saveState(self)
state['columns'] = list(self.columns)
return state
def restoreState(self, state):
Node.restoreState(self, state)
self.columns = set(state.get('columns', []))
for c in self.columns:
self.addOutput(c)
class RegionSelectNode(CtrlNode):
"""Returns a slice from a 1-D array. Connect the 'widget' output to a plot to display a region-selection widget."""
nodeName = "RegionSelect"
uiTemplate = [
('start', 'spin', {'value': 0, 'step': 0.1}),
('stop', 'spin', {'value': 0.1, 'step': 0.1}),
('display', 'check', {'value': True}),
('movable', 'check', {'value': True}),
]
def __init__(self, name):
self.items = {}
CtrlNode.__init__(self, name, terminals={
'data': {'io': 'in'},
'selected': {'io': 'out'},
'region': {'io': 'out'},
'widget': {'io': 'out', 'multi': True}
})
self.ctrls['display'].toggled.connect(self.displayToggled)
self.ctrls['movable'].toggled.connect(self.movableToggled)
def displayToggled(self, b):
for item in self.items.values():
item.setVisible(b)
def movableToggled(self, b):
for item in self.items.values():
item.setMovable(b)
def process(self, data=None, display=True):
#print "process.."
s = self.stateGroup.state()
region = [s['start'], s['stop']]
if display:
conn = self['widget'].connections()
for c in conn:
plot = c.node().getPlot()
if plot is None:
continue
if c in self.items:
item = self.items[c]
item.setRegion(region)
#print " set rgn:", c, region
#item.setXVals(events)
else:
item = LinearRegionItem(values=region)
self.items[c] = item
#item.connect(item, QtCore.SIGNAL('regionChanged'), self.rgnChanged)
item.sigRegionChanged.connect(self.rgnChanged)
item.setVisible(s['display'])
item.setMovable(s['movable'])
#print " new rgn:", c, region
#self.items[c].setYRange([0., 0.2], relative=True)
if self['selected'].isConnected():
if data is None:
sliced = None
elif (hasattr(data, 'implements') and data.implements('MetaArray')):
sliced = data[0:s['start']:s['stop']]
else:
mask = (data['time'] >= s['start']) * (data['time'] < s['stop'])
sliced = data[mask]
else:
sliced = None
return {'selected': sliced, 'widget': self.items, 'region': region}
def rgnChanged(self, item):
region = item.getRegion()
self.stateGroup.setState({'start': region[0], 'stop': region[1]})
self.update()
class EvalNode(Node):
"""Return the output of a string evaluated/executed by the python interpreter.
The string may be either an expression or a python script, and inputs are accessed as the name of the terminal.
For expressions, a single value may be evaluated for a single output, or a dict for multiple outputs.
For a script, the text will be executed as the body of a function."""
nodeName = 'PythonEval'
def __init__(self, name):
Node.__init__(self, name,
terminals = {
'input': {'io': 'in', 'renamable': True, 'multiable': True},
'output': {'io': 'out', 'renamable': True, 'multiable': True},
},
allowAddInput=True, allowAddOutput=True)
self.ui = QtGui.QWidget()
self.layout = QtGui.QGridLayout()
#self.addInBtn = QtGui.QPushButton('+Input')
#self.addOutBtn = QtGui.QPushButton('+Output')
self.text = QtGui.QTextEdit()
self.text.setTabStopWidth(30)
self.text.setPlainText("# Access inputs as args['input_name']\nreturn {'output': None} ## one key per output terminal")
#self.layout.addWidget(self.addInBtn, 0, 0)
#self.layout.addWidget(self.addOutBtn, 0, 1)
self.layout.addWidget(self.text, 1, 0, 1, 2)
self.ui.setLayout(self.layout)
#QtCore.QObject.connect(self.addInBtn, QtCore.SIGNAL('clicked()'), self.addInput)
#self.addInBtn.clicked.connect(self.addInput)
#QtCore.QObject.connect(self.addOutBtn, QtCore.SIGNAL('clicked()'), self.addOutput)
#self.addOutBtn.clicked.connect(self.addOutput)
self.text.focusOutEvent = self.focusOutEvent
self.lastText = None
def ctrlWidget(self):
return self.ui
#def addInput(self):
#Node.addInput(self, 'input', renamable=True)
#def addOutput(self):
#Node.addOutput(self, 'output', renamable=True)
def focusOutEvent(self, ev):
text = str(self.text.toPlainText())
if text != self.lastText:
self.lastText = text
self.update()
return QtGui.QTextEdit.focusOutEvent(self.text, ev)
def process(self, display=True, **args):
l = locals()
l.update(args)
## try eval first, then exec
try:
text = str(self.text.toPlainText()).replace('\n', ' ')
output = eval(text, globals(), l)
except SyntaxError:
fn = "def fn(**args):\n"
run = "\noutput=fn(**args)\n"
text = fn + "\n".join([" "+l for l in str(self.text.toPlainText()).split('\n')]) + run
exec(text)
except:
print("Error processing node: %s" % self.name())
raise
return output
def saveState(self):
state = Node.saveState(self)
state['text'] = str(self.text.toPlainText())
#state['terminals'] = self.saveTerminals()
return state
def restoreState(self, state):
Node.restoreState(self, state)
self.text.clear()
self.text.insertPlainText(state['text'])
self.restoreTerminals(state['terminals'])
self.update()
class ColumnJoinNode(Node):
"""Concatenates record arrays and/or adds new columns"""
nodeName = 'ColumnJoin'
def __init__(self, name):
Node.__init__(self, name, terminals = {
'output': {'io': 'out'},
})
#self.items = []
self.ui = QtGui.QWidget()
self.layout = QtGui.QGridLayout()
self.ui.setLayout(self.layout)
self.tree = TreeWidget()
self.addInBtn = QtGui.QPushButton('+ Input')
self.remInBtn = QtGui.QPushButton('- Input')
self.layout.addWidget(self.tree, 0, 0, 1, 2)
self.layout.addWidget(self.addInBtn, 1, 0)
self.layout.addWidget(self.remInBtn, 1, 1)
self.addInBtn.clicked.connect(self.addInput)
self.remInBtn.clicked.connect(self.remInput)
self.tree.sigItemMoved.connect(self.update)
def ctrlWidget(self):
return self.ui
def addInput(self):
#print "ColumnJoinNode.addInput called."
term = Node.addInput(self, 'input', renamable=True, removable=True, multiable=True)
#print "Node.addInput returned. term:", term
item = QtGui.QTreeWidgetItem([term.name()])
item.term = term
term.joinItem = item
#self.items.append((term, item))
self.tree.addTopLevelItem(item)
def remInput(self):
sel = self.tree.currentItem()
term = sel.term
term.joinItem = None
sel.term = None
self.tree.removeTopLevelItem(sel)
self.removeTerminal(term)
self.update()
def process(self, display=True, **args):
order = self.order()
vals = []
for name in order:
if name not in args:
continue
val = args[name]
if isinstance(val, np.ndarray) and len(val.dtype) > 0:
vals.append(val)
else:
vals.append((name, None, val))
return {'output': functions.concatenateColumns(vals)}
def order(self):
return [str(self.tree.topLevelItem(i).text(0)) for i in range(self.tree.topLevelItemCount())]
def saveState(self):
state = Node.saveState(self)
state['order'] = self.order()
return state
def restoreState(self, state):
Node.restoreState(self, state)
inputs = self.inputs()
## Node.restoreState should have created all of the terminals we need
## However: to maintain support for some older flowchart files, we need
## to manually add any terminals that were not taken care of.
for name in [n for n in state['order'] if n not in inputs]:
Node.addInput(self, name, renamable=True, removable=True, multiable=True)
inputs = self.inputs()
order = [name for name in state['order'] if name in inputs]
for name in inputs:
if name not in order:
order.append(name)
self.tree.clear()
for name in order:
term = self[name]
item = QtGui.QTreeWidgetItem([name])
item.term = term
term.joinItem = item
#self.items.append((term, item))
self.tree.addTopLevelItem(item)
def terminalRenamed(self, term, oldName):
Node.terminalRenamed(self, term, oldName)
item = term.joinItem
item.setText(0, term.name())
self.update()
| agpl-3.0 |
loveshell/volatility | volatility/plugins/mac/compressed_swap.py | 11 | 11167 | # Volatility
# Copyright (C) 2007-2013 Volatility Foundation
#
# This file is part of Volatility.
#
# Volatility is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License Version 2 as
# published by the Free Software Foundation. You may not use, modify or
# distribute this program under any other version of the GNU General
# Public License.
#
# Volatility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#
"""
@author: Golden G. Richard III
@license: GNU General Public License 2.0
@contact: [email protected]
@organization: Arcane Alloy, LLC
"""
import volatility.obj as obj
import volatility.debug as debug
import volatility.plugins.mac.common as common
from struct import pack
import WKdm
class mac_compressed_swap(common.AbstractMacCommand):
""" Prints Mac OS X VM compressor stats and dumps all compressed pages """
def __init__(self, config, *args, **kwargs):
common.AbstractMacCommand.__init__(self, config, *args, **kwargs)
if config:
self._config.add_option('SKIP-WRITING', short_option = 't',
help = 'Skip writing decompressed pages, just print stats and test decompression',
action = 'store_true', default = False)
# defined in osfmk/vm/vm_compressor.h; proper decompression relies on these
self.C_SEG_BUFSIZE = (1024 * 256)
self.C_SEG_ALLOCSIZE = (self.C_SEG_BUFSIZE + 4096)
self.C_SEG_SLOT_ARRAYS = 6
self.C_SEG_SLOT_ARRAY_SIZE = 64
# defined in osfmk/vm/vm_compressor_pager.c; proper slot lookup relies on these
self.COMPRESSOR_SLOTS_CHUNK_SIZE = 512
self.COMPRESSOR_SLOTS_PER_CHUNK = 128 # (COMPRESSOR_SLOTS_CHUNK_SIZE / sizeof (compressor_slot_t)), compressor_slot_t is a 32-bit int
# WKdm decompression in Python
self.wkdm=WKdm.WKdm()
# buffer for decompression
self.dest = [0] * self.wkdm.PAGE_SIZE_IN_BYTES
def calculate(self):
common.set_plugin_members(self)
com_obj_addr = self.addr_space.profile.get_symbol("_compressor_object_store")
if not com_obj_addr:
debug.error("The given memory sample does not utilize compressed swap.")
# from osfmk/vm/vm_object.h. compressor_object is the high level VM object.
compressor_object = obj.Object("vm_object", offset = com_obj_addr, vm = self.addr_space)
# from osfmk/vm/vm_compressor.c. c_segments is an array of c_segu objects, which track and store compressed pages.
# c_segment_count is current size of c_segments array.
c_segment_count = obj.Object("unsigned int",
offset = self.addr_space.profile.get_symbol("_c_segment_count"),
vm = self.addr_space)
c_segments_ptr = obj.Object("Pointer", offset = self.addr_space.profile.get_symbol("_c_segments"),
vm = self.addr_space)
c_segments = obj.Object("Array", targetType = "c_segu", count = c_segment_count,
offset = c_segments_ptr, vm = self.addr_space)
c_segments_available = obj.Object("unsigned int",
offset = self.addr_space.profile.get_symbol("_c_segments_available"),
vm = self.addr_space)
c_segments_busy = obj.Object("unsigned int",
offset = self.addr_space.profile.get_symbol("_c_segments_busy"),
vm = self.addr_space)
c_segment_compressed_bytes = obj.Object("long long",
offset = self.addr_space.profile.get_symbol("_c_segment_compressed_bytes"),
vm = self.addr_space)
# This is probably a boring stat. Omit.
#c_segments_limit = obj.Object("unsigned int",
# offset = self.addr_space.profile.get_symbol("_c_segments_limit"),
# vm = self.addr_space)
#yield ("c_segments_limit", c_segments_limit, "")
# from osfmk/vm/vm_compressor.h
compressor_bytes_used = obj.Object("long long",
offset = self.addr_space.profile.get_symbol("_compressor_bytes_used"),
vm = self.addr_space)
yield ("Compressor memory used", compressor_bytes_used, "bytes")
# from osfmk/vm/vm_page.h
vm_page_active_count = obj.Object("unsigned int",
offset = self.addr_space.profile.get_symbol("_vm_page_active_count"),
vm = self.addr_space)
vm_page_inactive_count = obj.Object("unsigned int",
offset = self.addr_space.profile.get_symbol("_vm_page_inactive_count"),
vm = self.addr_space)
vm_page_free_count = obj.Object("unsigned int",
offset = self.addr_space.profile.get_symbol("_vm_page_free_count"),
vm = self.addr_space)
vm_page_speculative_count = obj.Object("unsigned int",
offset = self.addr_space.profile.get_symbol("_vm_page_speculative_count"),
vm = self.addr_space)
available_uncompressed = vm_page_active_count + vm_page_inactive_count + vm_page_free_count + vm_page_speculative_count
yield ("Available uncompressed memory", available_uncompressed, "pages")
available_memory = available_uncompressed + compressor_object.resident_page_count
yield ("Available memory", available_memory, "pages")
yield ("Segments available", c_segments_available, "segments")
yield ("Segments busy", c_segments_busy, "segments")
yield ("Current segment count", c_segment_count, "segments")
for i in range(c_segment_count):
if not c_segments[i].c_seg.is_valid():
yield("Segment " + str(i) + " is invalid", "SKIPPING", "")
continue
if c_segments[i].c_seg.c_ondisk == 1:
yield("Segment " + str(i) + " is swapped out", "SKIPPING", "")
continue
if c_segments[i].c_seg.c_bytes_used < 1 or c_segments[i].c_seg.c_bytes_used > self.C_SEG_ALLOCSIZE:
yield("Segment " + str(i) + " size is invalid", "SKIPPING", "")
continue
yield ("Segment " + str(i), c_segments[i].c_seg.c_bytes_used, "bytes used")
yield ("Segment " + str(i), c_segments[i].c_seg.c_bytes_unused, "bytes unused")
# walk over the two dimensional slot array (max C_SEG_SLOT_ARRAYS x C_SEG_SLOT_ARRAY SIZE elements)
# At least in 10.9, the OS X kernel zeroes an entire c_segment when it's allocated, but doesn't
# zero the C_SEG_SLOT_ARRAY_SIZE buffer when a new c_slots row is allocated, which means that
# the last valid slot needs to be tracked via the c_nextslot variable. Otherwise, garbage slots
# are encountered, which may look valid because of the limited number of bits allocated to fields
# in a struct c_slot.
j1 = 0
j2 = 0
c_nextslot = c_segments[i].c_seg.c_nextslot
yield ("Last valid slot", str((c_nextslot-1) / self.C_SEG_SLOT_ARRAY_SIZE) + ", " + str((c_nextslot-1) % self.C_SEG_SLOT_ARRAY_SIZE) , "")
while (j1 < self.C_SEG_SLOT_ARRAYS and j1 * self.C_SEG_SLOT_ARRAY_SIZE + j2 < c_nextslot):
cslot_array = c_segments[i].c_seg.c_slots[j1]
if cslot_array.is_valid():
cslots = obj.Object("Array", offset = cslot_array, targetType = "c_slot",
count = self.C_SEG_SLOT_ARRAY_SIZE, vm = self.addr_space)
while (j2 < self.C_SEG_SLOT_ARRAY_SIZE and j1 * self.C_SEG_SLOT_ARRAY_SIZE + j2 < c_nextslot):
cslot=cslots[j2]
(csize, compressed, status) = (4096 / 4, False, "UNCOMPRESSED") if (cslot.c_size == 4095) else (cslot.c_size / 4, True, "COMPRESSED")
if csize > 0:
yield (" Slot " + str(j1) + ", " + str(j2) + " offset", str(cslot.c_offset * 4), "bytes")
yield (" Slot " + str(j1) + ", " + str(j2) + " size", str(csize * 4), "bytes " + status)
cslot_data = obj.Object("Array", offset = c_segments[i].c_seg.c_store.c_buffer+cslot.c_offset * 4, targetType = "int",
count = csize, vm = self.addr_space)
yield (" Processing page at slot "+ str(j1) + ", " + str(j2),"", "")
if compressed:
# Try to decompress slot and optionally write result to file.
# Compressed data is fed to WKdm as an array of 32-bit ints.
decompressed = self.wkdm.WKdm_decompress(cslot_data, self.dest)
if decompressed > 0:
if not self._config.SKIP_WRITING:
f = open(str(i)+"-"+str(j1) + "-" + str(j2) + "-decompressed.out", 'wb')
for k in range(decompressed):
f.write(pack('<i', self.dest[k]))
f.close()
else:
yield (" Decompression failed on slot " + str(j1) + ", " + str(j2),"","SKIPPING")
elif not self._config.SKIP_WRITING:
f = open(str(i)+"-"+str(j1) + "-" + str(j2) + "-uncompressed.out", 'wb')
for k in range(0,csize):
f.write(pack('<i', cslot_data[k]))
f.close()
j2 += 1
j2=0
else:
yield(" Slot array " + str(j1) + " is invalid", "", "SKIPPING")
j1 += 1
def render_text(self, outfd, data):
for k, v1, v2 in data:
outfd.write("{0:<36} : {1:>12} {2}\n".format(k, v1, v2))
| gpl-2.0 |
limemadness/selenium_training | test_countries_sort.py | 1 | 2050 | import pytest
from selenium import webdriver
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
@pytest.fixture
#def driver(request):
# wd = webdriver.Firefox(firefox_binary="c:\\Program Files (x86)\\Mozilla Firefox\\firefox.exe")
# print(wd.capabilities)
# request.addfinalizer(wd.quit)
# return wd
def driver(request):
wd = webdriver.Chrome()
wd.implicitly_wait(10)
request.addfinalizer(wd.quit)
return wd
def test_countries_sort(driver):
driver.get("http://localhost/litecart/admin/")
driver.find_element_by_name("username").click()
driver.find_element_by_name("username").send_keys("admin")
driver.find_element_by_name("password").click()
driver.find_element_by_name("password").send_keys("admin")
driver.find_element_by_xpath("//div[2]/button").click()
driver.get("http://localhost/litecart/admin/?app=countries&doc=countries")
#get country data
countries = driver.find_elements_by_css_selector("#content tr.row")
countries_timezone_url = []
country_name = []
#verify alphabetical order of country names
for country in countries:
country_name.append(country.find_element_by_css_selector("td:nth-child(5)").text)
assert sorted(country_name) == country_name
#get countries with multiple timezones
for country in countries:
if int(country.find_element_by_css_selector("td:nth-child(6)").text) > 0:
countries_timezone_url.append(country.find_element_by_css_selector("td:nth-child(5) a").get_attribute("href"))
#verify alphabetical order of timezones
for country_timezone_url in countries_timezone_url:
driver.get(country_timezone_url)
timezone_list = driver.find_elements_by_css_selector("#table-zones td:nth-child(2)")
del timezone_list[-1:]
timezones = []
for timezone in timezone_list:
timezones.append(timezone.text)
print(timezones)
assert sorted(timezones) == timezones
| apache-2.0 |
hanhlh/hadoop-0.20.2_FatBTree | src/contrib/hod/hodlib/Hod/hadoop.py | 167 | 27677 | #Licensed to the Apache Software Foundation (ASF) under one
#or more contributor license agreements. See the NOTICE file
#distributed with this work for additional information
#regarding copyright ownership. The ASF licenses this file
#to you under the Apache License, Version 2.0 (the
#"License"); you may not use this file except in compliance
#with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
"""define WorkLoad as abstract interface for user job"""
# -*- python -*-
import os, time, sys, shutil, exceptions, re, threading, signal, urllib, pprint, math
from HTMLParser import HTMLParser
import xml.dom.minidom
import xml.dom.pulldom
from xml.dom import getDOMImplementation
from hodlib.Common.util import *
from hodlib.Common.xmlrpc import hodXRClient
from hodlib.Common.miniHTMLParser import miniHTMLParser
from hodlib.Common.nodepoolutil import NodePoolUtil
from hodlib.Common.tcp import tcpError, tcpSocket
reCommandDelimeterString = r"(?<!\\);"
reCommandDelimeter = re.compile(reCommandDelimeterString)
class hadoopConfig:
def __create_xml_element(self, doc, name, value, description, final = False):
prop = doc.createElement("property")
nameP = doc.createElement("name")
string = doc.createTextNode(name)
nameP.appendChild(string)
valueP = doc.createElement("value")
string = doc.createTextNode(value)
valueP.appendChild(string)
if final:
finalP = doc.createElement("final")
string = doc.createTextNode("true")
finalP.appendChild(string)
desc = doc.createElement("description")
string = doc.createTextNode(description)
desc.appendChild(string)
prop.appendChild(nameP)
prop.appendChild(valueP)
if final:
prop.appendChild(finalP)
prop.appendChild(desc)
return prop
def gen_site_conf(self, confDir, tempDir, numNodes, hdfsAddr, mrSysDir,\
mapredAddr=None, clientParams=None, serverParams=None,\
finalServerParams=None, clusterFactor=None):
if not mapredAddr:
mapredAddr = "dummy:8181"
implementation = getDOMImplementation()
doc = implementation.createDocument('', 'configuration', None)
comment = doc.createComment(
"This is an auto generated hadoop-site.xml, do not modify")
topElement = doc.documentElement
topElement.appendChild(comment)
description = {}
paramsDict = { 'mapred.job.tracker' : mapredAddr , \
'fs.default.name' : "hdfs://" + hdfsAddr, \
'hadoop.tmp.dir' : tempDir, \
}
paramsDict['mapred.system.dir'] = mrSysDir
# mapred-default.xml is no longer used now.
numred = int(math.floor(clusterFactor * (int(numNodes) - 1)))
paramsDict['mapred.reduce.tasks'] = str(numred)
# end
# for all the above vars generated, set the description
for k, v in paramsDict.iteritems():
description[k] = 'Hod generated parameter'
# finalservelParams
if finalServerParams:
for k, v in finalServerParams.iteritems():
if not description.has_key(k):
description[k] = "final server parameter"
paramsDict[k] = v
# servelParams
if serverParams:
for k, v in serverParams.iteritems():
if not description.has_key(k):
# if no final value for same param is mentioned
description[k] = "server parameter"
paramsDict[k] = v
# clientParams
if clientParams:
for k, v in clientParams.iteritems():
if not description.has_key(k) or description[k] == "server parameter":
# Just add, if no final value for same param is mentioned.
# Replace even if server param is mentioned for same config variable
description[k] = "client-side parameter"
paramsDict[k] = v
# generate the xml elements
for k,v in paramsDict.iteritems():
if ( description[k] == "final server parameter" or \
description[k] == "Hod generated parameter" ):
final = True
else: final = False
prop = self.__create_xml_element(doc, k, v, description[k], final)
topElement.appendChild(prop)
siteName = os.path.join(confDir, "hadoop-site.xml")
sitefile = file(siteName, 'w')
print >> sitefile, topElement.toxml()
sitefile.close()
class hadoopCluster:
def __init__(self, cfg, log):
self.__cfg = cfg
self.__log = log
self.__changedClusterParams = []
self.__hostname = local_fqdn()
self.__svcrgyClient = None
self.__nodePool = NodePoolUtil.getNodePool(self.__cfg['nodepooldesc'],
self.__cfg, self.__log)
self.__hadoopCfg = hadoopConfig()
self.jobId = None
self.mapredInfo = None
self.hdfsInfo = None
self.ringmasterXRS = None
def __get_svcrgy_client(self):
svcrgyUrl = to_http_url(self.__cfg['hod']['xrs-address'])
return hodXRClient(svcrgyUrl)
def __get_service_status(self):
serviceData = self.__get_service_data()
status = True
hdfs = False
mapred = False
for host in serviceData.keys():
for item in serviceData[host]:
service = item.keys()
if service[0] == 'hdfs.grid' and \
self.__cfg['gridservice-hdfs']['external'] == False:
hdfs = True
elif service[0] == 'mapred.grid':
mapred = True
if not mapred:
status = "mapred"
if not hdfs and self.__cfg['gridservice-hdfs']['external'] == False:
if status != True:
status = "mapred and hdfs"
else:
status = "hdfs"
return status
def __get_service_data(self):
registry = to_http_url(self.__cfg['hod']['xrs-address'])
serviceData = self.__svcrgyClient.getServiceInfo(
self.__cfg['hod']['userid'], self.__setup.np.getNodePoolId())
return serviceData
def __check_job_status(self):
failureCount = 0
status = False
state = 'Q'
userLimitsFirstFlag = True
while (state=='Q') or (state==False):
if hodInterrupt.isSet():
raise HodInterruptException()
jobInfo = self.__nodePool.getJobInfo()
state = jobInfo['job_state']
self.__log.debug('job state %s' % state)
if state == False:
failureCount += 1
if (failureCount >= self.__cfg['hod']['job-status-query-failure-retries']):
self.__log.debug('Number of retries reached max limit while querying job status')
break
time.sleep(self.__cfg['hod']['job-command-failure-interval'])
elif state!='Q':
break
else:
self.__log.debug('querying for job status after job-status-query-interval')
time.sleep(self.__cfg['hod']['job-status-query-interval'])
if self.__cfg['hod'].has_key('job-feasibility-attr') and \
self.__cfg['hod']['job-feasibility-attr']:
(status, msg) = self.__isJobFeasible()
if status == "Never":
self.__log.critical(TORQUE_USER_LIMITS_EXCEEDED_MSG + msg + \
"This cluster cannot be allocated now.")
return -1
elif status == False:
if userLimitsFirstFlag:
self.__log.critical(TORQUE_USER_LIMITS_EXCEEDED_MSG + msg + \
"This cluster allocation will succeed only after other " + \
"clusters are deallocated.")
userLimitsFirstFlag = False
if state and state != 'C':
status = True
return status
def __isJobFeasible(self):
return self.__nodePool.isJobFeasible()
def __get_ringmaster_client(self):
ringmasterXRS = None
ringList = self.__svcrgyClient.getServiceInfo(
self.__cfg['ringmaster']['userid'], self.__nodePool.getServiceId(),
'ringmaster', 'hod')
if ringList and len(ringList):
if isinstance(ringList, list):
ringmasterXRS = ringList[0]['xrs']
else:
count = 0
waitTime = self.__cfg['hod']['allocate-wait-time']
while count < waitTime:
if hodInterrupt.isSet():
raise HodInterruptException()
ringList = self.__svcrgyClient.getServiceInfo(
self.__cfg['ringmaster']['userid'], self.__nodePool.getServiceId(),
'ringmaster',
'hod')
if ringList and len(ringList):
if isinstance(ringList, list):
ringmasterXRS = ringList[0]['xrs']
if ringmasterXRS is not None:
break
else:
time.sleep(1)
count = count + 1
# check to see if the job exited by any chance in that time:
if (count % self.__cfg['hod']['job-status-query-interval'] == 0):
if not self.__check_job_status():
break
return ringmasterXRS
def __init_hadoop_service(self, serviceName, xmlrpcClient):
status = True
serviceAddress = None
serviceInfo = None
for i in range(0, 250):
try:
if hodInterrupt.isSet():
raise HodInterruptException()
serviceAddress = xmlrpcClient.getServiceAddr(serviceName)
if serviceAddress:
if serviceAddress == 'not found':
time.sleep(1)
# check to see if the job exited by any chance in that time:
if ((i+1) % self.__cfg['hod']['job-status-query-interval'] == 0):
if not self.__check_job_status():
break
else:
serviceInfo = xmlrpcClient.getURLs(serviceName)
break
except HodInterruptException,h :
raise h
except:
self.__log.critical("'%s': ringmaster xmlrpc error." % serviceName)
self.__log.debug(get_exception_string())
status = False
break
if serviceAddress == 'not found' or not serviceAddress:
self.__log.critical("Failed to retrieve '%s' service address." %
serviceName)
status = False
elif serviceAddress.startswith("Error: "):
errs = serviceAddress[len("Error: "):]
self.__log.critical("Cluster could not be allocated because of the following errors.\n%s" % \
errs)
status = False
else:
try:
self.__svcrgyClient.registerService(self.__cfg['hodring']['userid'],
self.jobId, self.__hostname,
serviceName, 'grid', serviceInfo)
except HodInterruptException, h:
raise h
except:
self.__log.critical("'%s': registry xmlrpc error." % serviceName)
self.__log.debug(get_exception_string())
status = False
return status, serviceAddress, serviceInfo
def __collect_jobtracker_ui(self, dir):
link = self.mapredInfo + "/jobtracker.jsp"
parser = miniHTMLParser()
parser.setBaseUrl(self.mapredInfo)
node_cache = {}
self.__log.debug("collect_jobtracker_ui seeded with " + link)
def alarm_handler(number, stack):
raise AlarmException("timeout")
signal.signal(signal.SIGALRM, alarm_handler)
input = None
while link:
self.__log.debug("link: %s" % link)
# taskstats.jsp,taskdetails.jsp not included since too many to collect
if re.search(
"jobfailures\.jsp|jobtracker\.jsp|jobdetails\.jsp|jobtasks\.jsp",
link):
for i in range(1,5):
if hodInterrupt.isSet():
raise HodInterruptException()
try:
input = urllib.urlopen(link)
break
except:
self.__log.debug(get_exception_string())
time.sleep(1)
if input:
out = None
self.__log.debug("collecting " + link + "...")
filename = re.sub(self.mapredInfo, "", link)
filename = dir + "/" + filename
filename = re.sub("http://","", filename)
filename = re.sub("[\?\&=:]","_",filename)
filename = filename + ".html"
try:
tempdir, tail = os.path.split(filename)
if not os.path.exists(tempdir):
os.makedirs(tempdir)
except:
self.__log.debug(get_exception_string())
out = open(filename, 'w')
bufSz = 8192
signal.alarm(10)
try:
self.__log.debug("Starting to grab: %s" % link)
buf = input.read(bufSz)
while len(buf) > 0:
# Feed the file into the HTML parser
parser.feed(buf)
# Re-write the hrefs in the file
p = re.compile("\?(.+?)=(.+?)")
buf = p.sub(r"_\1_\2",buf)
p= re.compile("&(.+?)=(.+?)")
buf = p.sub(r"_\1_\2",buf)
p = re.compile("http://(.+?):(\d+)?")
buf = p.sub(r"\1_\2/",buf)
buf = re.sub("href=\"/","href=\"",buf)
p = re.compile("href=\"(.+?)\"")
buf = p.sub(r"href=\1.html",buf)
out.write(buf)
buf = input.read(bufSz)
signal.alarm(0)
input.close()
if out:
out.close()
self.__log.debug("Finished grabbing: %s" % link)
except AlarmException:
if hodInterrupt.isSet():
raise HodInterruptException()
if out: out.close()
if input: input.close()
self.__log.debug("Failed to retrieve: %s" % link)
else:
self.__log.debug("Failed to retrieve: %s" % link)
# Get the next link in level traversal order
link = parser.getNextLink()
parser.close()
def check_cluster(self, clusterInfo):
status = 0
if 'mapred' in clusterInfo:
mapredAddress = clusterInfo['mapred'][7:]
hdfsAddress = clusterInfo['hdfs'][7:]
status = get_cluster_status(hdfsAddress, mapredAddress)
if status == 0:
status = 12
else:
status = 15
return status
def is_cluster_deallocated(self, jobId):
"""Returns True if the JobId that represents this cluster
is in the Completed or exiting state."""
jobInfo = self.__nodePool.getJobInfo(jobId)
state = None
if jobInfo is not None and jobInfo.has_key('job_state'):
state = jobInfo['job_state']
return ((state == 'C') or (state == 'E'))
def cleanup(self):
if self.__nodePool: self.__nodePool.finalize()
def get_job_id(self):
return self.jobId
def delete_job(self, jobId):
'''Delete a job given it's ID'''
ret = 0
if self.__nodePool:
ret = self.__nodePool.deleteJob(jobId)
else:
raise Exception("Invalid state: Node pool is not initialized to delete the given job.")
return ret
def is_valid_account(self):
"""Verify if the account being used to submit the job is a valid account.
This code looks for a file <install-dir>/bin/verify-account.
If the file is present, it executes the file, passing as argument
the account name. It returns the exit code and output from the
script on non-zero exit code."""
accountValidationScript = os.path.abspath('./verify-account')
if not os.path.exists(accountValidationScript):
return (0, None)
account = self.__nodePool.getAccountString()
exitCode = 0
errMsg = None
try:
accountValidationCmd = simpleCommand('Account Validation Command',\
'%s %s' % (accountValidationScript,
account))
accountValidationCmd.start()
accountValidationCmd.wait()
accountValidationCmd.join()
exitCode = accountValidationCmd.exit_code()
self.__log.debug('account validation script is run %d' \
% exitCode)
errMsg = None
if exitCode is not 0:
errMsg = accountValidationCmd.output()
except Exception, e:
exitCode = 0
self.__log.warn('Error executing account script: %s ' \
'Accounting is disabled.' \
% get_exception_error_string())
self.__log.debug(get_exception_string())
return (exitCode, errMsg)
def allocate(self, clusterDir, min, max=None):
status = 0
failureCount = 0
self.__svcrgyClient = self.__get_svcrgy_client()
self.__log.debug("allocate %s %s %s" % (clusterDir, min, max))
if min < 3:
self.__log.critical("Minimum nodes must be greater than 2.")
status = 2
else:
nodeSet = self.__nodePool.newNodeSet(min)
walltime = None
if self.__cfg['hod'].has_key('walltime'):
walltime = self.__cfg['hod']['walltime']
self.jobId, exitCode = self.__nodePool.submitNodeSet(nodeSet, walltime)
# if the job submission returned an error other than no resources
# retry a couple of times
while (self.jobId is False) and (exitCode != 188):
if hodInterrupt.isSet():
raise HodInterruptException()
failureCount += 1
if (failureCount >= self.__cfg['hod']['job-status-query-failure-retries']):
self.__log.debug("failed submitting job more than the retries. exiting")
break
else:
# wait a bit before retrying
time.sleep(self.__cfg['hod']['job-command-failure-interval'])
if hodInterrupt.isSet():
raise HodInterruptException()
self.jobId, exitCode = self.__nodePool.submitNodeSet(nodeSet, walltime)
if self.jobId:
jobStatus = None
try:
jobStatus = self.__check_job_status()
except HodInterruptException, h:
self.__log.info(HOD_INTERRUPTED_MESG)
self.delete_job(self.jobId)
self.__log.info("Cluster %s removed from queue." % self.jobId)
raise h
else:
if jobStatus == -1:
self.delete_job(self.jobId);
status = 4
return status
if jobStatus:
self.__log.info("Cluster Id %s" \
% self.jobId)
try:
self.ringmasterXRS = self.__get_ringmaster_client()
self.__log.debug("Ringmaster at : %s" % self.ringmasterXRS )
ringClient = None
if self.ringmasterXRS:
ringClient = hodXRClient(self.ringmasterXRS)
hdfsStatus, hdfsAddr, self.hdfsInfo = \
self.__init_hadoop_service('hdfs', ringClient)
if hdfsStatus:
self.__log.info("HDFS UI at http://%s" % self.hdfsInfo)
mapredStatus, mapredAddr, self.mapredInfo = \
self.__init_hadoop_service('mapred', ringClient)
if mapredStatus:
self.__log.info("Mapred UI at http://%s" % self.mapredInfo)
if self.__cfg['hod'].has_key('update-worker-info') \
and self.__cfg['hod']['update-worker-info']:
workerInfoMap = {}
workerInfoMap['HDFS UI'] = 'http://%s' % self.hdfsInfo
workerInfoMap['Mapred UI'] = 'http://%s' % self.mapredInfo
# Ringmaster URL sample format : http://hostname:port/
workerInfoMap['RM RPC Port'] = '%s' % self.ringmasterXRS.split(":")[2].strip("/")
if mapredAddr.find(':') != -1:
workerInfoMap['Mapred RPC Port'] = mapredAddr.split(':')[1]
ret = self.__nodePool.updateWorkerInfo(workerInfoMap, self.jobId)
if ret != 0:
self.__log.warn('Could not update HDFS and Mapred information.' \
'User Portal may not show relevant information.' \
'Error code=%s' % ret)
self.__cfg.replace_escape_seqs()
# Go generate the client side hadoop-site.xml now
# adding final-params as well, just so that conf on
# client-side and server-side are (almost) the same
clientParams = None
serverParams = {}
finalServerParams = {}
# client-params
if self.__cfg['hod'].has_key('client-params'):
clientParams = self.__cfg['hod']['client-params']
# server-params
if self.__cfg['gridservice-mapred'].has_key('server-params'):
serverParams.update(\
self.__cfg['gridservice-mapred']['server-params'])
if self.__cfg['gridservice-hdfs'].has_key('server-params'):
# note that if there are params in both mapred and hdfs
# sections, the ones in hdfs overwirte the ones in mapred
serverParams.update(\
self.__cfg['gridservice-hdfs']['server-params'])
# final-server-params
if self.__cfg['gridservice-mapred'].has_key(\
'final-server-params'):
finalServerParams.update(\
self.__cfg['gridservice-mapred']['final-server-params'])
if self.__cfg['gridservice-hdfs'].has_key(
'final-server-params'):
finalServerParams.update(\
self.__cfg['gridservice-hdfs']['final-server-params'])
clusterFactor = self.__cfg['hod']['cluster-factor']
tempDir = self.__cfg['hod']['temp-dir']
if not os.path.exists(tempDir):
os.makedirs(tempDir)
tempDir = os.path.join( tempDir, self.__cfg['hod']['userid']\
+ "." + self.jobId )
mrSysDir = getMapredSystemDirectory(self.__cfg['hodring']['mapred-system-dir-root'],\
self.__cfg['hod']['userid'], self.jobId)
self.__hadoopCfg.gen_site_conf(clusterDir, tempDir, min,\
hdfsAddr, mrSysDir, mapredAddr, clientParams,\
serverParams, finalServerParams,\
clusterFactor)
self.__log.info("hadoop-site.xml at %s" % clusterDir)
# end of hadoop-site.xml generation
else:
status = 8
else:
status = 7
else:
status = 6
if status != 0:
self.__log.debug("Cleaning up cluster id %s, as cluster could not be allocated." % self.jobId)
if ringClient is None:
self.delete_job(self.jobId)
else:
self.__log.debug("Calling rm.stop()")
ringClient.stopRM()
self.__log.debug("Returning from rm.stop()")
except HodInterruptException, h:
self.__log.info(HOD_INTERRUPTED_MESG)
if self.ringmasterXRS:
if ringClient is None:
ringClient = hodXRClient(self.ringmasterXRS)
self.__log.debug("Calling rm.stop()")
ringClient.stopRM()
self.__log.debug("Returning from rm.stop()")
self.__log.info("Cluster Shutdown by informing ringmaster.")
else:
self.delete_job(self.jobId)
self.__log.info("Cluster %s removed from queue directly." % self.jobId)
raise h
else:
self.__log.critical("No cluster found, ringmaster failed to run.")
status = 5
elif self.jobId == False:
if exitCode == 188:
self.__log.critical("Request execeeded maximum resource allocation.")
else:
self.__log.critical("Job submission failed with exit code %s" % exitCode)
status = 4
else:
self.__log.critical("Scheduler failure, allocation failed.\n\n")
status = 4
if status == 5 or status == 6:
ringMasterErrors = self.__svcrgyClient.getRMError()
if ringMasterErrors:
self.__log.critical("Cluster could not be allocated because" \
" of the following errors on the "\
"ringmaster host %s.\n%s" % \
(ringMasterErrors[0], ringMasterErrors[1]))
self.__log.debug("Stack trace on ringmaster: %s" % ringMasterErrors[2])
return status
def __isRingMasterAlive(self, rmAddr):
ret = True
rmSocket = tcpSocket(rmAddr)
try:
rmSocket.open()
rmSocket.close()
except tcpError:
ret = False
return ret
def deallocate(self, clusterDir, clusterInfo):
status = 0
nodeSet = self.__nodePool.newNodeSet(clusterInfo['min'],
id=clusterInfo['jobid'])
self.mapredInfo = clusterInfo['mapred']
self.hdfsInfo = clusterInfo['hdfs']
try:
if self.__cfg['hod'].has_key('hadoop-ui-log-dir'):
clusterStatus = self.check_cluster(clusterInfo)
if clusterStatus != 14 and clusterStatus != 10:
# If JT is still alive
self.__collect_jobtracker_ui(self.__cfg['hod']['hadoop-ui-log-dir'])
else:
self.__log.debug('hadoop-ui-log-dir not specified. Skipping Hadoop UI log collection.')
except HodInterruptException, h:
# got an interrupt. just pass and proceed to qdel
pass
except:
self.__log.info("Exception in collecting Job tracker logs. Ignoring.")
rmAddr = None
if clusterInfo.has_key('ring'):
# format is http://host:port/ We need host:port
rmAddr = clusterInfo['ring'][7:]
if rmAddr.endswith('/'):
rmAddr = rmAddr[:-1]
if (rmAddr is None) or (not self.__isRingMasterAlive(rmAddr)):
# Cluster is already dead, don't try to contact ringmaster.
self.__nodePool.finalize()
status = 10 # As cluster is dead, we just set the status to 'cluster dead'.
else:
xrsAddr = clusterInfo['ring']
rmClient = hodXRClient(xrsAddr)
self.__log.debug('calling rm.stop')
rmClient.stopRM()
self.__log.debug('completed rm.stop')
# cleanup hod temp dirs
tempDir = os.path.join( self.__cfg['hod']['temp-dir'], \
self.__cfg['hod']['userid'] + "." + clusterInfo['jobid'] )
if os.path.exists(tempDir):
shutil.rmtree(tempDir)
return status
class hadoopScript:
def __init__(self, conf, execDir):
self.__environ = os.environ.copy()
self.__environ['HADOOP_CONF_DIR'] = conf
self.__execDir = execDir
def run(self, script):
scriptThread = simpleCommand(script, script, self.__environ, 4, False,
False, self.__execDir)
scriptThread.start()
scriptThread.wait()
scriptThread.join()
return scriptThread.exit_code()
| apache-2.0 |
ashwini0529/Oreo | flask_user/tests/tst_app.py | 2 | 8125 | import os
import datetime
from flask import Flask, render_template_string, request
from flask.ext.babel import Babel
from flask.ext.mail import Mail
from flask.ext.sqlalchemy import SQLAlchemy
from flask.ext.user import login_required, SQLAlchemyAdapter, UserManager, UserMixin
from flask.ext.user import roles_required, confirm_email_required
# Use a Class-based config to avoid needing a 2nd file
# os.getenv() enables configuration through OS environment variables
class ConfigClass(object):
# Flask settings
SECRET_KEY = os.getenv('SECRET_KEY', 'THIS IS AN INSECURE SECRET')
SQLALCHEMY_DATABASE_URI = os.getenv('DATABASE_URL', 'sqlite:///tst_app.sqlite')
CSRF_ENABLED = True
# Flask-Mail settings
<<<<<<< HEAD
MAIL_USERNAME = os.getenv('MAIL_USERNAME', '[email protected]')
MAIL_PASSWORD = os.getenv('MAIL_PASSWORD', 'Wewillrockyou')
=======
MAIL_USERNAME = os.getenv('MAIL_USERNAME', '[email protected]')
MAIL_PASSWORD = os.getenv('MAIL_PASSWORD', 'password')
>>>>>>> 0b3d1889f172757bcd9df1076002ef012fb747f6
MAIL_DEFAULT_SENDER = os.getenv('MAIL_DEFAULT_SENDER', '"MyApp" <[email protected]>')
MAIL_SERVER = os.getenv('MAIL_SERVER', 'smtp.gmail.com')
MAIL_PORT = int(os.getenv('MAIL_PORT', '465'))
MAIL_USE_SSL = os.getenv('MAIL_USE_SSL', True)
# Flask-User settings
USER_APP_NAME = "AppName" # Used by email templates
USER_ENABLE_USERNAME = True
USER_ENABLE_EMAIL = True
USER_ENABLE_CONFIRM_EMAIL = True
USER_ENABLE_INVITATION = True
def create_app(test_config=None): # For automated tests
# Setup Flask and read config from ConfigClass defined above
app = Flask(__name__)
app.config.from_object(__name__+'.ConfigClass')
# Load local_settings.py if file exists # For automated tests
try: app.config.from_object('local_settings')
except: pass
# Load optional test_config # For automated tests
if test_config:
app.config.update(test_config)
# Initialize Flask extensions
db = SQLAlchemy(app) # Initialize Flask-SQLAlchemy
babel = Babel(app) # Initialize Flask-Babel
mail = Mail(app) # Initialize Flask-Mail
# Define the User data model. Make sure to add flask.ext.user UserMixin!!
class User(db.Model, UserMixin):
id = db.Column(db.Integer, primary_key=True)
# User authentication information
username = db.Column(db.String(50), nullable=True, unique=True)
password = db.Column(db.String(255), nullable=False, server_default='')
reset_password_token = db.Column(db.String(100), nullable=False, server_default='')
# User email information
email = db.Column(db.String(255), nullable=True, unique=True)
confirmed_at = db.Column(db.DateTime())
# User information
active = db.Column('is_active', db.Boolean(), nullable=False, server_default='0')
first_name = db.Column(db.String(100), nullable=False, server_default='')
last_name = db.Column(db.String(100), nullable=False, server_default='')
# Relationships
roles = db.relationship('Role', secondary='user_roles',
backref=db.backref('users', lazy='dynamic'))
# Define UserEmail DataModel.
class UserEmail(db.Model):
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
# User email information
email = db.Column(db.String(255), nullable=True, unique=True)
confirmed_at = db.Column(db.DateTime())
is_primary = db.Column(db.Boolean(), nullable=False, default=False)
# Relationship
user = db.relationship('User', uselist=False)
class UserInvitation(db.Model):
__tablename__ = 'user_invite'
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(255), nullable=False)
# save the user of the invitee
invited_by_user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
# token used for registration page to identify user registering
token = db.Column(db.String(100), nullable=False, server_default='')
# Define the Role data model
class Role(db.Model):
id = db.Column(db.Integer(), primary_key=True)
name = db.Column(db.String(50), unique=True)
# Define the UserRoles data model
class UserRoles(db.Model):
id = db.Column(db.Integer(), primary_key=True)
user_id = db.Column(db.Integer(), db.ForeignKey('user.id', ondelete='CASCADE'))
role_id = db.Column(db.Integer(), db.ForeignKey('role.id', ondelete='CASCADE'))
# Reset all the database tables
db.create_all()
# Setup Flask-User
db_adapter = SQLAlchemyAdapter(db, User, UserInvitationClass=UserInvitation)
user_manager = UserManager(db_adapter, app)
# Create regular 'member' user
if not User.query.filter(User.username=='member').first():
user = User(username='member', email='[email protected]', active=True,
password=user_manager.hash_password('Password1'), confirmed_at=datetime.datetime.utcnow())
db.session.add(user)
db.session.commit()
# Create 'user007' user with 'secret' and 'agent' roles
if not User.query.filter(User.username=='user007').first():
user1 = User(username='user007', email='[email protected]', active=True,
password=user_manager.hash_password('Password1'))
user1.roles.append(Role(name='secret'))
user1.roles.append(Role(name='agent'))
db.session.add(user1)
db.session.commit()
# The '/' page is accessible to anyone
@app.route('/')
def home_page():
# if current_user.is_authenticated():
# return user_profile_page()
return render_template_string("""
{% extends "base.html" %}
{% block content %}
<h2>{%trans%}Home Page{%endtrans%}</h2>
<p><a href="{{ url_for('user.login') }}">{%trans%}Sign in{%endtrans%}</a></p>
{% endblock %}
""")
# The '/profile' page requires a logged-in user
@app.route('/user/profile')
@login_required # Use of @login_required decorator
@confirm_email_required
def user_profile_page():
return render_template_string("""
{% extends "base.html" %}
{% block content %}
<h2>{%trans%}Profile Page{%endtrans%}</h2>
<p> {%trans%}Hello{%endtrans%}
{{ current_user.username or current_user.email }},</p>
<p> <a href="{{ url_for('user.change_username') }}">
{%trans%}Change username{%endtrans%}</a></p>
<p> <a href="{{ url_for('user.change_password') }}">
{%trans%}Change password{%endtrans%}</a></p>
<p> <a href="{{ url_for('user.invite') }}">
{%trans%}Invite User{%endtrans%}</a></p>
<p> <a href="{{ url_for('user.logout') }}?next={{ url_for('user.login') }}">
{%trans%}Sign out{%endtrans%}</a></p>
{% endblock %}
""")
# The '/special' page requires a user that has the 'special' AND ('sauce' OR 'agent') role.
@app.route('/special')
@roles_required('secret', ['sauce', 'agent']) # Use of @roles_required decorator
def special_page():
return render_template_string("""
{% extends "base.html" %}
{% block content %}
<h2>{%trans%}Special Page{%endtrans%}</h2>
{% endblock %}
""")
# For testing only
app.db = db
app.UserEmailClass = UserEmail
return app
# Start development web server
if __name__=='__main__':
app = create_app()
app.run(host='0.0.0.0', port=5555, debug=True)
| bsd-2-clause |