repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
helldorado/ansible | lib/ansible/modules/web_infrastructure/ansible_tower/tower_role.py | 7 | 4467 | #!/usr/bin/python
# coding: utf-8 -*-
# (c) 2017, Wayne Witzel III <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: tower_role
version_added: "2.3"
author: "Wayne Witzel III (@wwitzel3)"
short_description: create, update, or destroy Ansible Tower role.
description:
- Create, update, or destroy Ansible Tower roles. See
U(https://www.ansible.com/tower) for an overview.
options:
user:
description:
- User that receives the permissions specified by the role.
team:
description:
- Team that receives the permissions specified by the role.
role:
description:
- The role type to grant/revoke.
required: True
choices: ["admin", "read", "member", "execute", "adhoc", "update", "use", "auditor"]
target_team:
description:
- Team that the role acts on.
inventory:
description:
- Inventory the role acts on.
job_template:
description:
- The job template the role acts on.
credential:
description:
- Credential the role acts on.
organization:
description:
- Organization the role acts on.
project:
description:
- Project the role acts on.
state:
description:
- Desired state of the resource.
default: "present"
choices: ["present", "absent"]
extends_documentation_fragment: tower
'''
EXAMPLES = '''
- name: Add jdoe to the member role of My Team
tower_role:
user: jdoe
target_team: "My Team"
role: member
state: present
tower_config_file: "~/tower_cli.cfg"
'''
from ansible.module_utils.ansible_tower import TowerModule, tower_auth_config, tower_check_mode
try:
import tower_cli
import tower_cli.exceptions as exc
from tower_cli.conf import settings
except ImportError:
pass
def update_resources(module, p):
'''update_resources attempts to fetch any of the resources given
by name using their unique field (identity)
'''
params = p.copy()
identity_map = {
'user': 'username',
'team': 'name',
'target_team': 'name',
'inventory': 'name',
'job_template': 'name',
'credential': 'name',
'organization': 'name',
'project': 'name',
}
for k, v in identity_map.items():
try:
if params[k]:
key = 'team' if k == 'target_team' else k
result = tower_cli.get_resource(key).get(**{v: params[k]})
params[k] = result['id']
except (exc.NotFound) as excinfo:
module.fail_json(msg='Failed to update role, {0} not found: {1}'.format(k, excinfo), changed=False)
return params
def main():
argument_spec = dict(
user=dict(),
team=dict(),
role=dict(choices=["admin", "read", "member", "execute", "adhoc", "update", "use", "auditor"]),
target_team=dict(),
inventory=dict(),
job_template=dict(),
credential=dict(),
organization=dict(),
project=dict(),
state=dict(choices=['present', 'absent'], default='present'),
)
module = TowerModule(argument_spec=argument_spec, supports_check_mode=True)
role_type = module.params.pop('role')
state = module.params.pop('state')
json_output = {'role': role_type, 'state': state}
tower_auth = tower_auth_config(module)
with settings.runtime_values(**tower_auth):
tower_check_mode(module)
role = tower_cli.get_resource('role')
params = update_resources(module, module.params)
params['type'] = role_type
try:
if state == 'present':
result = role.grant(**params)
json_output['id'] = result['id']
elif state == 'absent':
result = role.revoke(**params)
except (exc.ConnectionError, exc.BadRequest, exc.NotFound, exc.AuthError) as excinfo:
module.fail_json(msg='Failed to update role: {0}'.format(excinfo), changed=False)
json_output['changed'] = result['changed']
module.exit_json(**json_output)
if __name__ == '__main__':
main()
| gpl-3.0 |
huahbo/pyamg | pyamg/krylov/_gmres.py | 2 | 4669 | from _gmres_mgs import gmres_mgs
from _gmres_householder import gmres_householder
__docformat__ = "restructuredtext en"
__all__ = ['gmres']
def gmres(A, b, x0=None, tol=1e-5, restrt=None, maxiter=None, xtype=None,
M=None, callback=None, residuals=None, orthog='mgs', **kwargs):
'''
Generalized Minimum Residual Method (GMRES)
GMRES iteratively refines the initial solution guess to the
system Ax = b
Parameters
----------
A : {array, matrix, sparse matrix, LinearOperator}
n x n, linear system to solve
b : {array, matrix}
right hand side, shape is (n,) or (n,1)
x0 : {array, matrix}
initial guess, default is a vector of zeros
tol : float
relative convergence tolerance, i.e. tol is scaled by the norm
of the initial preconditioned residual
restrt : {None, int}
- if int, restrt is max number of inner iterations
and maxiter is the max number of outer iterations
- if None, do not restart GMRES, and max number of inner iterations
is maxiter
maxiter : {None, int}
- if restrt is None, maxiter is the max number of inner iterations
and GMRES does not restart
- if restrt is int, maxiter is the max number of outer iterations,
and restrt is the max number of inner iterations
xtype : type
dtype for the solution, default is automatic type detection
M : {array, matrix, sparse matrix, LinearOperator}
n x n, inverted preconditioner, i.e. solve M A x = M b.
callback : function
User-supplied function is called after each iteration as
callback( ||rk||_2 ), where rk is the current preconditioned residual
vector
residuals : list
residuals contains the preconditioned residual norm history,
including the initial residual.
orthog : string
'householder' calls _gmres_householder which uses Householder
reflections to find the orthogonal basis for the Krylov space.
'mgs' calls _gmres_mgs which uses modified Gram-Schmidt to find the
orthogonal basis for the Krylov space
Returns
-------
(xNew, info)
xNew : an updated guess to the solution of Ax = b
info : halting status of gmres
== =============================================
0 successful exit
>0 convergence to tolerance not achieved,
return iteration count instead. This value
is precisely the order of the Krylov space.
<0 numerical breakdown, or illegal input
== =============================================
Notes
-----
- The LinearOperator class is in scipy.sparse.linalg.interface.
Use this class if you prefer to define A or M as a mat-vec routine
as opposed to explicitly constructing the matrix. A.psolve(..) is
still supported as a legacy.
- The orthogonalization method, orthog='householder', is more robust
than orthog='mgs', however for the majority of problems your
problem will converge before 'mgs' loses orthogonality in your basis.
- orthog='householder' has been more rigorously tested, and is
therefore currently the default
Examples
--------
>>> from pyamg.krylov import gmres
>>> from pyamg.util.linalg import norm
>>> import numpy as np
>>> from pyamg.gallery import poisson
>>> A = poisson((10,10))
>>> b = np.ones((A.shape[0],))
>>> (x,flag) = gmres(A,b, maxiter=2, tol=1e-8)
>>> print norm(b - A*x)
6.5428213057
References
----------
.. [1] Yousef Saad, "Iterative Methods for Sparse Linear Systems,
Second Edition", SIAM, pp. 151-172, pp. 272-275, 2003
http://www-users.cs.umn.edu/~saad/books.html
'''
# known bug vvvvvv remove when fixed
if A.dtype == complex and orthog == 'householder':
raise ValueError('[Known Bug] Housholder fails with complex matrices; \
use MGS')
# pass along **kwargs
if orthog == 'householder':
(x, flag) = gmres_householder(A, b, x0=x0, tol=tol, restrt=restrt,
maxiter=maxiter, xtype=xtype, M=M,
callback=callback, residuals=residuals,
**kwargs)
elif orthog == 'mgs':
(x, flag) = gmres_mgs(A, b, x0=x0, tol=tol, restrt=restrt,
maxiter=maxiter, xtype=xtype, M=M,
callback=callback, residuals=residuals, **kwargs)
return (x, flag)
| mit |
potatolondon/django-nonrel-1-4 | tests/regressiontests/views/models.py | 144 | 1202 | """
Regression tests for Django built-in views.
"""
from django.db import models
class Author(models.Model):
name = models.CharField(max_length=100)
def __unicode__(self):
return self.name
def get_absolute_url(self):
return '/views/authors/%s/' % self.id
class BaseArticle(models.Model):
"""
An abstract article Model so that we can create article models with and
without a get_absolute_url method (for create_update generic views tests).
"""
title = models.CharField(max_length=100)
slug = models.SlugField()
author = models.ForeignKey(Author)
class Meta:
abstract = True
def __unicode__(self):
return self.title
class Article(BaseArticle):
date_created = models.DateTimeField()
class UrlArticle(BaseArticle):
"""
An Article class with a get_absolute_url defined.
"""
date_created = models.DateTimeField()
def get_absolute_url(self):
return '/urlarticles/%s/' % self.slug
get_absolute_url.purge = True
class DateArticle(BaseArticle):
"""
An article Model with a DateField instead of DateTimeField,
for testing #7602
"""
date_created = models.DateField()
| bsd-3-clause |
SilentCircle/sentry | src/sentry/migrations/0026_auto__add_field_project_status.py | 7 | 12176 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Project.status'
db.add_column('sentry_project', 'status', self.gf('django.db.models.fields.PositiveIntegerField')(default=0, db_index=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'Project.status'
db.delete_column('sentry_project', 'status')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'sentry.event': {
'Meta': {'object_name': 'Event', 'db_table': "'sentry_message'"},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True', 'db_column': "'message_id'"}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'event_set'", 'null': 'True', 'to': "orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'server_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'site': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
'time_spent': ('django.db.models.fields.FloatField', [], {'null': 'True'})
},
'sentry.filtervalue': {
'Meta': {'unique_together': "(('project', 'key', 'value'),)", 'object_name': 'FilterValue'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.group': {
'Meta': {'unique_together': "(('project', 'logger', 'culprit', 'checksum'),)", 'object_name': 'Group', 'db_table': "'sentry_groupedmessage'"},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'})
},
'sentry.messagecountbyminute': {
'Meta': {'unique_together': "(('project', 'group', 'date'),)", 'object_name': 'MessageCountByMinute'},
'date': ('django.db.models.fields.DateTimeField', [], {}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.messagefiltervalue': {
'Meta': {'unique_together': "(('project', 'key', 'value', 'group'),)", 'object_name': 'MessageFilterValue'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.messageindex': {
'Meta': {'unique_together': "(('column', 'value', 'object_id'),)", 'object_name': 'MessageIndex'},
'column': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'sentry.project': {
'Meta': {'object_name': 'Project'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'owned_project_set'", 'null': 'True', 'to': "orm['auth.User']"}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.projectdomain': {
'Meta': {'unique_together': "(('project', 'domain'),)", 'object_name': 'ProjectDomain'},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'domain_set'", 'to': "orm['sentry.Project']"})
},
'sentry.projectmember': {
'Meta': {'unique_together': "(('project', 'user'),)", 'object_name': 'ProjectMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'permissions': ('django.db.models.fields.BigIntegerField', [], {}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'member_set'", 'to': "orm['sentry.Project']"}),
'public_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'secret_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'project_set'", 'to': "orm['auth.User']"})
}
}
complete_apps = ['sentry']
| bsd-3-clause |
kaiweifan/vse-lbaas-plugin-poc | quantum/db/migration/alembic_migrations/env.py | 8 | 2967 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2012 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Mark McClain, DreamHost
from logging.config import fileConfig
from alembic import context
from sqlalchemy import create_engine, pool
from quantum.db import model_base
from quantum.openstack.common import importutils
DATABASE_QUOTA_DRIVER = 'quantum.extensions._quotav2_driver.DbQuotaDriver'
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
quantum_config = config.quantum_config
# Interpret the config file for Python logging.
# This line sets up loggers basically.
fileConfig(config.config_file_name)
plugin_klass = importutils.import_class(quantum_config.core_plugin)
# set the target for 'autogenerate' support
target_metadata = model_base.BASEV2.metadata
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
context.configure(url=quantum_config.DATABASE.sql_connection)
with context.begin_transaction():
context.run_migrations(active_plugin=quantum_config.core_plugin,
options=build_options())
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
engine = create_engine(
quantum_config.DATABASE.sql_connection,
poolclass=pool.NullPool)
connection = engine.connect()
context.configure(
connection=connection,
target_metadata=target_metadata
)
try:
with context.begin_transaction():
context.run_migrations(active_plugin=quantum_config.core_plugin,
options=build_options())
finally:
connection.close()
def build_options():
return {'folsom_quota_db_enabled': is_db_quota_enabled()}
def is_db_quota_enabled():
return quantum_config.QUOTAS.quota_driver == DATABASE_QUOTA_DRIVER
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
| apache-2.0 |
alexandrucoman/vbox-nova-driver | nova/compute/utils.py | 1 | 18432 | # Copyright (c) 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Compute-related Utilities and helpers."""
import itertools
import string
import traceback
import netifaces
from oslo_config import cfg
from oslo_log import log
from nova import block_device
from nova.compute import power_state
from nova.compute import task_states
from nova import exception
from nova.i18n import _LW
from nova.network import model as network_model
from nova import notifications
from nova import objects
from nova import rpc
from nova import utils
from nova.virt import driver
CONF = cfg.CONF
CONF.import_opt('host', 'nova.netconf')
LOG = log.getLogger(__name__)
def exception_to_dict(fault):
"""Converts exceptions to a dict for use in notifications."""
# TODO(johngarbutt) move to nova/exception.py to share with wrap_exception
code = 500
if hasattr(fault, "kwargs"):
code = fault.kwargs.get('code', 500)
# get the message from the exception that was thrown
# if that does not exist, use the name of the exception class itself
try:
message = fault.format_message()
# These exception handlers are broad so we don't fail to log the fault
# just because there is an unexpected error retrieving the message
except Exception:
try:
message = unicode(fault)
except Exception:
message = None
if not message:
message = fault.__class__.__name__
# NOTE(dripton) The message field in the database is limited to 255 chars.
# MySQL silently truncates overly long messages, but PostgreSQL throws an
# error if we don't truncate it.
u_message = utils.safe_truncate(message, 255)
fault_dict = dict(exception=fault)
fault_dict["message"] = u_message
fault_dict["code"] = code
return fault_dict
def _get_fault_details(exc_info, error_code):
details = ''
if exc_info and error_code == 500:
tb = exc_info[2]
if tb:
details = ''.join(traceback.format_tb(tb))
return unicode(details)
def add_instance_fault_from_exc(context, instance, fault, exc_info=None):
"""Adds the specified fault to the database."""
fault_obj = objects.InstanceFault(context=context)
fault_obj.host = CONF.host
fault_obj.instance_uuid = instance.uuid
fault_obj.update(exception_to_dict(fault))
code = fault_obj.code
fault_obj.details = _get_fault_details(exc_info, code)
fault_obj.create()
def get_device_name_for_instance(context, instance, bdms, device):
"""Validates (or generates) a device name for instance.
This method is a wrapper for get_next_device_name that gets the list
of used devices and the root device from a block device mapping.
"""
mappings = block_device.instance_block_mapping(instance, bdms)
return get_next_device_name(instance, mappings.values(),
mappings['root'], device)
def default_device_names_for_instance(instance, root_device_name,
*block_device_lists):
"""Generate missing device names for an instance."""
dev_list = [bdm.device_name
for bdm in itertools.chain(*block_device_lists)
if bdm.device_name]
if root_device_name not in dev_list:
dev_list.append(root_device_name)
for bdm in itertools.chain(*block_device_lists):
dev = bdm.device_name
if not dev:
dev = get_next_device_name(instance, dev_list,
root_device_name)
bdm.device_name = dev
bdm.save()
dev_list.append(dev)
def get_next_device_name(instance, device_name_list,
root_device_name=None, device=None):
"""Validates (or generates) a device name for instance.
If device is not set, it will generate a unique device appropriate
for the instance. It uses the root_device_name (if provided) and
the list of used devices to find valid device names. If the device
name is valid but applicable to a different backend (for example
/dev/vdc is specified but the backend uses /dev/xvdc), the device
name will be converted to the appropriate format.
"""
is_xen = driver.compute_driver_matches('xenapi.XenAPIDriver')
req_prefix = None
req_letter = None
if device:
try:
req_prefix, req_letter = block_device.match_device(device)
except (TypeError, AttributeError, ValueError):
raise exception.InvalidDevicePath(path=device)
if not root_device_name:
root_device_name = block_device.DEFAULT_ROOT_DEV_NAME
try:
prefix = block_device.match_device(
block_device.prepend_dev(root_device_name))[0]
except (TypeError, AttributeError, ValueError):
raise exception.InvalidDevicePath(path=root_device_name)
# NOTE(vish): remove this when xenapi is setting default_root_device
if is_xen:
prefix = '/dev/xvd'
if req_prefix != prefix:
LOG.debug("Using %(prefix)s instead of %(req_prefix)s",
{'prefix': prefix, 'req_prefix': req_prefix})
used_letters = set()
for device_path in device_name_list:
letter = block_device.get_device_letter(device_path)
used_letters.add(letter)
# NOTE(vish): remove this when xenapi is properly setting
# default_ephemeral_device and default_swap_device
if is_xen:
flavor = instance.get_flavor()
if flavor.ephemeral_gb:
used_letters.add('b')
if flavor.swap:
used_letters.add('c')
if not req_letter:
req_letter = _get_unused_letter(used_letters)
if req_letter in used_letters:
raise exception.DevicePathInUse(path=device)
return prefix + req_letter
def _get_unused_letter(used_letters):
doubles = [first + second for second in string.ascii_lowercase
for first in string.ascii_lowercase]
all_letters = set(list(string.ascii_lowercase) + doubles)
letters = list(all_letters - used_letters)
# NOTE(vish): prepend ` so all shorter sequences sort first
letters.sort(key=lambda x: x.rjust(2, '`'))
return letters[0]
def get_image_metadata(context, image_api, image_id_or_uri, instance):
image_system_meta = {}
# In case of boot from volume, image_id_or_uri may be None or ''
if image_id_or_uri is not None and image_id_or_uri != '':
# If the base image is still available, get its metadata
try:
image = image_api.get(context, image_id_or_uri)
except (exception.ImageNotAuthorized,
exception.ImageNotFound,
exception.Invalid) as e:
LOG.warning(_LW("Can't access image %(image_id)s: %(error)s"),
{"image_id": image_id_or_uri, "error": e},
instance=instance)
else:
flavor = instance.get_flavor()
image_system_meta = utils.get_system_metadata_from_image(image,
flavor)
# Get the system metadata from the instance
system_meta = utils.instance_sys_meta(instance)
# Merge the metadata from the instance with the image's, if any
system_meta.update(image_system_meta)
# Convert the system metadata to image metadata
return utils.get_image_from_system_metadata(system_meta)
def get_value_from_system_metadata(instance, key, type, default):
"""Get a value of a specified type from image metadata.
@param instance: The instance object
@param key: The name of the property to get
@param type: The python type the value is be returned as
@param default: The value to return if key is not set or not the right type
"""
value = instance.system_metadata.get(key, default)
try:
return type(value)
except ValueError:
LOG.warning(_LW("Metadata value %(value)s for %(key)s is not of "
"type %(type)s. Using default value %(default)s."),
{'value': value, 'key': key, 'type': type,
'default': default}, instance=instance)
return default
def notify_usage_exists(notifier, context, instance_ref, current_period=False,
ignore_missing_network_data=True,
system_metadata=None, extra_usage_info=None):
"""Generates 'exists' notification for an instance for usage auditing
purposes.
:param notifier: a messaging.Notifier
:param current_period: if True, this will generate a usage for the
current usage period; if False, this will generate a usage for the
previous audit period.
:param ignore_missing_network_data: if True, log any exceptions generated
while getting network info; if False, raise the exception.
:param system_metadata: system_metadata DB entries for the instance,
if not None. *NOTE*: Currently unused here in trunk, but needed for
potential custom modifications.
:param extra_usage_info: Dictionary containing extra values to add or
override in the notification if not None.
"""
audit_start, audit_end = notifications.audit_period_bounds(current_period)
bw = notifications.bandwidth_usage(instance_ref, audit_start,
ignore_missing_network_data)
if system_metadata is None:
system_metadata = utils.instance_sys_meta(instance_ref)
# add image metadata to the notification:
image_meta = notifications.image_meta(system_metadata)
extra_info = dict(audit_period_beginning=str(audit_start),
audit_period_ending=str(audit_end),
bandwidth=bw, image_meta=image_meta)
if extra_usage_info:
extra_info.update(extra_usage_info)
notify_about_instance_usage(notifier, context, instance_ref, 'exists',
system_metadata=system_metadata, extra_usage_info=extra_info)
def notify_about_instance_usage(notifier, context, instance, event_suffix,
network_info=None, system_metadata=None,
extra_usage_info=None, fault=None):
"""Send a notification about an instance.
:param notifier: a messaging.Notifier
:param event_suffix: Event type like "delete.start" or "exists"
:param network_info: Networking information, if provided.
:param system_metadata: system_metadata DB entries for the instance,
if provided.
:param extra_usage_info: Dictionary containing extra values to add or
override in the notification.
"""
if not extra_usage_info:
extra_usage_info = {}
usage_info = notifications.info_from_instance(context, instance,
network_info, system_metadata, **extra_usage_info)
if fault:
# NOTE(johngarbutt) mirrors the format in wrap_exception
fault_payload = exception_to_dict(fault)
LOG.debug(fault_payload["message"], instance=instance)
usage_info.update(fault_payload)
if event_suffix.endswith("error"):
method = notifier.error
else:
method = notifier.info
method(context, 'compute.instance.%s' % event_suffix, usage_info)
def notify_about_server_group_update(context, event_suffix, sg_payload):
"""Send a notification about server group update.
:param event_suffix: Event type like "create.start" or "create.end"
:param sg_payload: payload for server group update
"""
notifier = rpc.get_notifier(service='servergroup')
notifier.info(context, 'servergroup.%s' % event_suffix, sg_payload)
def notify_about_aggregate_update(context, event_suffix, aggregate_payload):
"""Send a notification about aggregate update.
:param event_suffix: Event type like "create.start" or "create.end"
:param aggregate_payload: payload for aggregate update
"""
aggregate_identifier = aggregate_payload.get('aggregate_id', None)
if not aggregate_identifier:
aggregate_identifier = aggregate_payload.get('name', None)
if not aggregate_identifier:
LOG.debug("No aggregate id or name specified for this "
"notification and it will be ignored")
return
notifier = rpc.get_notifier(service='aggregate',
host=aggregate_identifier)
notifier.info(context, 'aggregate.%s' % event_suffix, aggregate_payload)
def notify_about_host_update(context, event_suffix, host_payload):
"""Send a notification about host update.
:param event_suffix: Event type like "create.start" or "create.end"
:param host_payload: payload for host update. It is a dict and there
should be at least the 'host_name' key in this
dict.
"""
host_identifier = host_payload.get('host_name')
if not host_identifier:
LOG.warning(_LW("No host name specified for the notification of "
"HostAPI.%s and it will be ignored"), event_suffix)
return
notifier = rpc.get_notifier(service='api', host=host_identifier)
notifier.info(context, 'HostAPI.%s' % event_suffix, host_payload)
def get_nw_info_for_instance(instance):
if instance.info_cache is None:
return network_model.NetworkInfo.hydrate([])
return instance.info_cache.network_info
def has_audit_been_run(context, conductor, host, timestamp=None):
begin, end = utils.last_completed_audit_period(before=timestamp)
task_log = conductor.task_log_get(context, "instance_usage_audit",
begin, end, host)
if task_log:
return True
else:
return False
def start_instance_usage_audit(context, conductor, begin, end, host,
num_instances):
conductor.task_log_begin_task(context, "instance_usage_audit", begin,
end, host, num_instances,
"Instance usage audit started...")
def finish_instance_usage_audit(context, conductor, begin, end, host, errors,
message):
conductor.task_log_end_task(context, "instance_usage_audit", begin, end,
host, errors, message)
def usage_volume_info(vol_usage):
def null_safe_str(s):
return str(s) if s else ''
tot_refreshed = vol_usage['tot_last_refreshed']
curr_refreshed = vol_usage['curr_last_refreshed']
if tot_refreshed and curr_refreshed:
last_refreshed_time = max(tot_refreshed, curr_refreshed)
elif tot_refreshed:
last_refreshed_time = tot_refreshed
else:
# curr_refreshed must be set
last_refreshed_time = curr_refreshed
usage_info = dict(
volume_id=vol_usage['volume_id'],
tenant_id=vol_usage['project_id'],
user_id=vol_usage['user_id'],
availability_zone=vol_usage['availability_zone'],
instance_id=vol_usage['instance_uuid'],
last_refreshed=null_safe_str(last_refreshed_time),
reads=vol_usage['tot_reads'] + vol_usage['curr_reads'],
read_bytes=vol_usage['tot_read_bytes'] +
vol_usage['curr_read_bytes'],
writes=vol_usage['tot_writes'] + vol_usage['curr_writes'],
write_bytes=vol_usage['tot_write_bytes'] +
vol_usage['curr_write_bytes'])
return usage_info
def get_reboot_type(task_state, current_power_state):
"""Checks if the current instance state requires a HARD reboot."""
if current_power_state != power_state.RUNNING:
return 'HARD'
soft_types = [task_states.REBOOT_STARTED, task_states.REBOOT_PENDING,
task_states.REBOOTING]
reboot_type = 'SOFT' if task_state in soft_types else 'HARD'
return reboot_type
def get_machine_ips():
"""Get the machine's ip addresses
:returns: list of Strings of ip addresses
"""
addresses = []
for interface in netifaces.interfaces():
try:
iface_data = netifaces.ifaddresses(interface)
for family in iface_data:
if family not in (netifaces.AF_INET, netifaces.AF_INET6):
continue
for address in iface_data[family]:
addr = address['addr']
# If we have an ipv6 address remove the
# %ether_interface at the end
if family == netifaces.AF_INET6:
addr = addr.split('%')[0]
addresses.append(addr)
except ValueError:
pass
return addresses
def remove_shelved_keys_from_system_metadata(instance):
# Delete system_metadata for a shelved instance
for key in ['shelved_at', 'shelved_image_id', 'shelved_host']:
if key in instance.system_metadata:
del (instance.system_metadata[key])
class EventReporter(object):
"""Context manager to report instance action events."""
def __init__(self, context, event_name, *instance_uuids):
self.context = context
self.event_name = event_name
self.instance_uuids = instance_uuids
def __enter__(self):
for uuid in self.instance_uuids:
objects.InstanceActionEvent.event_start(
self.context, uuid, self.event_name, want_result=False)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
for uuid in self.instance_uuids:
objects.InstanceActionEvent.event_finish_with_failure(
self.context, uuid, self.event_name, exc_val=exc_val,
exc_tb=exc_tb, want_result=False)
return False
class UnlimitedSemaphore(object):
def __enter__(self):
pass
def __exit__(self):
pass
@property
def balance(self):
return 0
| apache-2.0 |
Sispheor/piclodio3 | back/tests/test_views/test_web_radio_views/test_delete.py | 1 | 1729 | from rest_framework import status
from rest_framework.reverse import reverse
from rest_framework.test import APITestCase
from restapi.models import AlarmClock
from restapi.models.web_radio import WebRadio
class TestDelete(APITestCase):
def setUp(self):
super(TestDelete, self).setUp()
self.webradio_to_delete = WebRadio.objects.create(name="test", url="http://test.com")
self.url = reverse('api:webradios:retrieve_update_destroy',
kwargs={'pk': self.webradio_to_delete.id})
def test_delete(self):
response = self.client.delete(self.url, format='json')
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
self.assertEqual(WebRadio.objects.count(), 0)
def test_delete_with_alarm_clock(self):
should_be_deleted_alarm_clock = AlarmClock.objects.create(name="alarm1",
monday=True,
hour=8,
minute=20,
webradio=self.webradio_to_delete)
from utils.scheduler_manager import SchedulerManager
from unittest.mock import patch
with patch.object(SchedulerManager, 'delete_job_by_id', return_value=None) as mock_scheduler:
self.client.delete(self.url, format='json')
mock_scheduler.assert_called_with(should_be_deleted_alarm_clock.id)
# the linked alarm clock should be deleted
with self.assertRaises(AlarmClock.DoesNotExist):
AlarmClock.objects.get(pk=should_be_deleted_alarm_clock.id)
| mit |
musashin/ezTorrent | eztorrent.py | 1 | 11668 | #!/usr/bin/python
__author__ = 'Nicolas'
import t411
import transmissionrpc
import base64
import re
from commandline import CmdLine, command
import time
import filesize
import json
import os
TRANSMISSION_ADDRESS_FILE = 'transmission.json' #This is the file where the transmission server address is stored
#https://api.t411.me/
class TransmissionClient(transmissionrpc.Client):
"""
A wrapper around the transmission.rpc client that also prompt user for the server
address, and then stored the answers and reuse it in subsequent constructions.
"""
def __init__(self):
"""
Constructor: if not TRANSMISSION_ADDRESS_FILE file, ask address to user
and save it, otherwise, load the data from the file.
Then instantiate the Transmission client.
"""
try:
with open(TRANSMISSION_ADDRESS_FILE) as address_file:
connection = json.loads(address_file.read())
if 'address' not in connection or 'port' not in connection:
address, port = self.ask_for_connection()
else:
address = connection['address']
port = connection['port']
except:
address, port = self.ask_for_connection()
super(TransmissionClient, self).__init__(address=address, port=int(port))
@staticmethod
def ask_for_connection():
"""
Ask the user for the URL and port of the transmission server
:return: address and port of the transmission server
"""
address = raw_input('Please enter transmission RPC address: ')
port = raw_input('Please enter transmission RPC port: ')
connection_data = json.dumps({'address': '%s' % address, 'port': '%s' % port})
with open(TRANSMISSION_ADDRESS_FILE, 'w') as connection_file:
connection_file.write(connection_data)
return address, port
class T411Commands(CmdLine):
"""
This the T411 command line interface
"""
__result_len_limit__ = 20 # can be adjusted by the limit command
def __init__(self):
"""
Constructor: create T411 and transmission client connections.
"""
super(T411Commands, self).__init__(prompt='T411')
self.query_filters_names = ('cid',)
self.result_filters = {'grep': self.grep_results}
try:
print 'Connecting to T411'
self.t411 = t411.T411()
except Exception as e:
print 'Could not connect to T411: '+str(e)
try:
print 'Connecting to Transmission'
self.transmission = TransmissionClient()
except Exception as e:
print 'Could not connect to Transmission: '+str(e)
self.clear()
def get_search_string(self, query, filters):
"""
Create the T411 API search string
:param query: Queried string the user added
:param filters: The list of filters the user en entered (after pipe symbols)
:return: The T411 API post request string
"""
query = query.replace(' ', '+')
base_search_string = query+'?offset='+str(self.offset)+'&limit='+str(self.__result_len_limit__)
query_filters = [(index, filter['type'], filter['arg']) for index, filter in enumerate(filters)
if filter['type'] in self.query_filters_names]
if query_filters:
for filter in query_filters:
base_search_string += '&{!s}={!s}'.format(filter[1], filter[2])
return base_search_string
@staticmethod
def grep_results(results, filter_argument):
"""
Filter the results by name using the regular expressions
:param results: Search result
:param filter_argument: Regular expression
:return: results, but only those for which the name match regexp
"""
filter = re.compile(filter_argument[0])
filtered_result = dict()
filtered_result['total'] = results['total']
filtered_result['torrents'] = [torrent for torrent in results['torrents'] if filter.search(torrent['name'])]
return filtered_result
def print_search_results(self):
"""
Display the last search results on screen.
"""
print 'Found {!s} torrent'.format(self.last_search_result['total'])
if self.last_search_result:
for i, torrent in enumerate(self.last_search_result['torrents']):
print '\t-{!s} {} [{!s} -{!s}-]\t seeders:{!s}\t size = {!s}b'.format(i,
torrent['name'].encode('utf-8'),
torrent['categoryname'].encode('utf-8'),
torrent['category'],
torrent['seeders'],
filesize.size(int(torrent['size'])))
else:
print 'Nothing found.'
def search_t411(self, filters):
"""
Initiate a search on T411 and filters the results
:param filters: Filters to apply to the search
:return:
"""
query_result = self.t411.search(self.get_search_string(self.last_query_string, filters)).json()
for filter in filters:
if filter['type'] in self.result_filters:
query_result = self.result_filters[filter['type']](query_result, filter['arg'])
return query_result
@command('reset')
def reset(self, cmdArgs, filters):
"""
Reset saved settings (credentials and addresses).
Accepted arguments are:
[t411] -> to reset t411 credentials
[transmission] -> to reset transmission server address
"""
if cmdArgs.lower() == 't411':
os.remove(t411.USER_CREDENTIALS_FILE)
elif cmdArgs.lower() == 'transmission':
os.remove(TRANSMISSION_ADDRESS_FILE)
@command('clear')
def clear(self, *args):
"""
Clear previous search results
"""
self.offset = 0
self.last_search_result = dict()
self.last_query_string = ''
self.last_query_filters = ''
@command('limit')
def limit(self, cmdArgs, filters):
"""
set limit on query result (default is 20, argument is new limit)
"""
if cmdArgs:
try:
self.__result_len_limit__ = int(cmdArgs)
except:
pass
print 'Query limited to {!s} results.'.format(self.__result_len_limit__)
@command('search')
def search(self, cmdArgs, filters):
"""
[query string] -> Search Torrent
accept filters: cid [category_id] or grep [name regex filter]
for example: search avatar | cid 5 | grep ava[1-2]
"""
self.last_query_string = str(cmdArgs)
self.last_query_filters = filters
self.last_search_result = self.search_t411(filters)
self.print_search_results()
@command('info')
def info(self, cmdArgs, filters):
"""
[torrentID] -> Get Torrent Info
"""
infos = self.t411.details(self.last_search_result['torrents'][int(cmdArgs)]['id']).json()
for key, value in infos['terms'].iteritems():
print '\t- ' + key.encode('utf-8') + ':\t' + value.encode('utf-8')
@command('user')
def user(self, cmdArgs, filters):
"""
Show user data (ratio...)
"""
infos = self.t411.me().json()
print 'Uploaded: \t' + filesize.size(int(infos['uploaded']))+'b'
print 'Downloaded: \t' + filesize.size(int(infos['downloaded']))+'b'
print 'Ratio:\t{:.2f}'.format(float(infos['uploaded'])/float(infos['downloaded']))
@command('next')
def next(self, cmdArgs, filterss):
"""
Shows next results for last query
"""
if self.last_search_result:
self.offset += self.__result_len_limit__
self.last_search_result = self.search_t411(self.last_query_filters)
self.print_search_results()
else:
print 'You need to make a search first.'
@command('previous')
def previous(self, cmdArgs, filters):
"""
Shows previous results for last query
"""
if self.last_search_result:
self.offset -= self.__result_len_limit__
self.offset = max(0, self.offset)
self.last_search_result = self.search_t411(self.last_query_filters)
self.print_search_results()
else:
print 'You need to make a search first.'
@command('cat')
def cat(self, cmdArgs, filters):
"""
List categories
"""
cat_list = self.t411.categories().json()
for cat_id, cat_info in cat_list.iteritems():
if 'id' in cat_info:
print '\t-{!s}:\t{!s}'.format(cat_id, cat_info['name'].encode('utf-8'))
if 'cats' in cat_info:
for subcat_id, subcat_info in cat_info['cats'].iteritems():
print '\t\t-{!s}:\t{!s}'.format(subcat_id, subcat_info['name'].encode('utf-8'))
def get_download_list(self, cmdArgs):
"""
Return a list of indexes in the last search result that are selected by the user.
"""
if cmdArgs.lower() == 'all':
download_index_list = [torrent['id']for torrent in self.last_search_result['torrents']]
else:
from_to_format = re.compile(r'[\s]*(?P<start>[\d]+)[\s]*\-[\s]*(?P<end>[\d]+)[\s]*')
from_to = re.match(from_to_format, cmdArgs)
if from_to:
download_index_list = map(str, range(int(from_to.group('start')), int(from_to.group('end'))+1))
else:
download_index_list = cmdArgs.split(',')
if len(download_index_list) > 1:
if not CmdLine.confirm("Are you want to download the {!s} torrents".format(len(download_index_list))):
download_index_list = list()
return map(int, download_index_list)
@command('download')
def download(self, cmdArgs, filters):
"""
Download torrent. Accepted arguments are:
["all"] -> download all results
[X,Y] -> download torrents with result indexes X and Y.
[X-Y] -> download torrents with result indexes from X to Y.
[X] -> download torrents with result indexes X.
"""
download_index_list = self.get_download_list(cmdArgs)
for index in download_index_list:
try:
torrent = self.t411.download(self.last_search_result['torrents'][index]['id'])
#with open('temp.torrent', 'w') as torrent_file:
# torrent_file.write(torrent.content)
self.transmission.add_torrent(base64.b64encode(torrent.content))
time.sleep(1)
except Exception as e:
print 'Could not add torrent {!s} to download queue [{!s}]'.\
format(self.last_search_result['torrents'][index]['name'].encode('utf-8'), e)
else:
print 'successfully added torrent {!s} to download queue'\
.format(self.last_search_result['torrents'][index]['name'].encode('utf-8'))
if __name__ == '__main__':
cli = T411Commands()
cli.run()
| mit |
FreezyExp/dndtools | dndtools/dnd/contacts/views.py | 3 | 2891 | # -*- coding: utf-8 -*-
from django.core.mail.message import EmailMessage
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.shortcuts import render_to_response
from django.template.context import RequestContext
from dnd.menu import menu_item, submenu_item, MenuItem
from dnd.forms import ContactForm
from dnd.models import StaticPage
@menu_item(MenuItem.CONTACTS)
@submenu_item(MenuItem.Contacts.CONTACT_US)
def contact(request):
if request.method == 'POST':
form = ContactForm(request.POST,
initial={
'captcha': request.META['REMOTE_ADDR']})
if form.is_valid():
if form.cleaned_data['sender']:
headers = {
'Reply-To': form.cleaned_data['sender']}
else:
headers = {}
email = EmailMessage(
subject=form.cleaned_data['subject'],
body="%s\n\nfrom: %s" % (form.cleaned_data['message'],
form.cleaned_data['sender']),
from_email='[email protected]',
to=('[email protected]', '[email protected]'),
headers=headers,
)
email.send()
# Redirect after POST
return HttpResponseRedirect(reverse('contact_sent'))
else:
form = ContactForm() # An unbound form
# request context required for CSRF
return render_to_response('dnd/contacts/contact.html',
{
'request': request,
'form': form, }, context_instance=RequestContext(request), )
@menu_item(MenuItem.CONTACTS)
@submenu_item(MenuItem.Contacts.CONTACT_US)
def contact_sent(request):
return render_to_response('dnd/contacts/contact_sent.html',
{
'request': request,
}, context_instance=RequestContext(request), )
@menu_item(MenuItem.CONTACTS)
@submenu_item(MenuItem.Contacts.STAFF)
def staff(request):
page_body = StaticPage.objects.filter(name='staff')[0]
return render_to_response('dnd/contacts/staff.html',
{
'request': request,
'page_body': page_body,
}, context_instance=RequestContext(request), )
@menu_item(MenuItem.ANDROID)
def android(request):
page_body = StaticPage.objects.get(name='android')
return render_to_response('dnd/static/android.html',
{
'request': request,
'page_body': page_body,
}, context_instance=RequestContext(request), )
| mit |
mena-devs/slack_data_collector | slackcollector/tests/test_collector.py | 1 | 2654 | # The MIT License (MIT)
# Copyright (c) 2016 Mena-Devs
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import json
import os
import unittest
from slackcollector.collector import Collector
class TestCollector(unittest.TestCase):
def setUp(self):
self.config_file = 'config.example.yml'
self.collector_inst = Collector(self.config_file)
def test_load_config_file_success(self):
self.collector_inst.load_config(self.config_file)
self.assertIsNotNone(self.collector_inst.data_dir)
self.assertIsNotNone(self.collector_inst.data_file_prefix)
def test_load_config_file_failure(self):
"""
Test a non existent configuration file
"""
self.assertRaises(IOError, self.collector_inst.load_config,
'/boguspath')
def test_anonymize_data_success(self):
"""
Test whether the data anonymizer works by removing sensitive
JSON objects
"""
test_json_file = os.path.join(os.path.dirname(__file__),
'_test_data/sensitive_json.json')
with open(test_json_file) as data_file:
json_data = json.load(data_file)
clean_json_data = self.collector_inst.anonymize_data(json_data)
sensitive_keys_set = set(['profile', 'real_name', 'name'])
for item in clean_json_data['members']:
# If the intersection of the "sensitive_keys_set" and keys sets is
# empty the we have cleared these keys and their values
self.assertFalse(sensitive_keys_set & set(item))
if __name__ == '__main__':
unittest.main()
| mit |
blrm/openshift-tools | ansible/roles/lib_openshift_3.2/library/oc_process.py | 6 | 37409 | #!/usr/bin/env python # pylint: disable=too-many-lines
# ___ ___ _ _ ___ ___ _ _____ ___ ___
# / __| __| \| | __| _ \ /_\_ _| __| \
# | (_ | _|| .` | _|| / / _ \| | | _|| |) |
# \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____
# | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _|
# | |) | (_) | | .` | (_) || | | _|| |) | | | |
# |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_|
'''
OpenShiftCLI class that wraps the oc commands in a subprocess
'''
# pylint: disable=too-many-lines
import atexit
import json
import os
import re
import shutil
import subprocess
import ruamel.yaml as yaml
#import yaml
#
## This is here because of a bug that causes yaml
## to incorrectly handle timezone info on timestamps
#def timestamp_constructor(_, node):
# '''return timestamps as strings'''
# return str(node.value)
#yaml.add_constructor(u'tag:yaml.org,2002:timestamp', timestamp_constructor)
class OpenShiftCLIError(Exception):
'''Exception class for openshiftcli'''
pass
# pylint: disable=too-few-public-methods
class OpenShiftCLI(object):
''' Class to wrap the command line tools '''
def __init__(self,
namespace,
kubeconfig='/etc/origin/master/admin.kubeconfig',
verbose=False,
all_namespaces=False):
''' Constructor for OpenshiftCLI '''
self.namespace = namespace
self.verbose = verbose
self.kubeconfig = kubeconfig
self.all_namespaces = all_namespaces
# Pylint allows only 5 arguments to be passed.
# pylint: disable=too-many-arguments
def _replace_content(self, resource, rname, content, force=False, sep='.'):
''' replace the current object with the content '''
res = self._get(resource, rname)
if not res['results']:
return res
fname = '/tmp/%s' % rname
yed = Yedit(fname, res['results'][0], separator=sep)
changes = []
for key, value in content.items():
changes.append(yed.put(key, value))
if any([change[0] for change in changes]):
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._replace(fname, force)
return {'returncode': 0, 'updated': False}
def _replace(self, fname, force=False):
'''return all pods '''
cmd = ['-n', self.namespace, 'replace', '-f', fname]
if force:
cmd.append('--force')
return self.openshift_cmd(cmd)
def _create_from_content(self, rname, content):
'''return all pods '''
fname = '/tmp/%s' % rname
yed = Yedit(fname, content=content)
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._create(fname)
def _create(self, fname):
'''return all pods '''
return self.openshift_cmd(['create', '-f', fname, '-n', self.namespace])
def _delete(self, resource, rname, selector=None):
'''return all pods '''
cmd = ['delete', resource, rname, '-n', self.namespace]
if selector:
cmd.append('--selector=%s' % selector)
return self.openshift_cmd(cmd)
def _process(self, template_name, create=False, params=None, template_data=None):
'''return all pods '''
cmd = ['process', '-n', self.namespace]
if template_data:
cmd.extend(['-f', '-'])
else:
cmd.append(template_name)
if params:
param_str = ["%s=%s" % (key, value) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
results = self.openshift_cmd(cmd, output=True, input_data=template_data)
if results['returncode'] != 0 or not create:
return results
fname = '/tmp/%s' % template_name
yed = Yedit(fname, results['results'])
yed.write()
atexit.register(Utils.cleanup, [fname])
return self.openshift_cmd(['-n', self.namespace, 'create', '-f', fname])
def _get(self, resource, rname=None, selector=None):
'''return a resource by name '''
cmd = ['get', resource]
if selector:
cmd.append('--selector=%s' % selector)
if self.all_namespaces:
cmd.extend(['--all-namespaces'])
elif self.namespace:
cmd.extend(['-n', self.namespace])
cmd.extend(['-o', 'json'])
if rname:
cmd.append(rname)
rval = self.openshift_cmd(cmd, output=True)
# Ensure results are retuned in an array
if rval.has_key('items'):
rval['results'] = rval['items']
elif not isinstance(rval['results'], list):
rval['results'] = [rval['results']]
return rval
def _schedulable(self, node=None, selector=None, schedulable=True):
''' perform oadm manage-node scheduable '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector=%s' % selector)
cmd.append('--schedulable=%s' % schedulable)
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
def _list_pods(self, node=None, selector=None, pod_selector=None):
''' perform oadm manage-node evacuate '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector=%s' % selector)
if pod_selector:
cmd.append('--pod-selector=%s' % pod_selector)
cmd.extend(['--list-pods', '-o', 'json'])
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
#pylint: disable=too-many-arguments
def _evacuate(self, node=None, selector=None, pod_selector=None, dry_run=False, grace_period=None, force=False):
''' perform oadm manage-node evacuate '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector=%s' % selector)
if dry_run:
cmd.append('--dry-run')
if pod_selector:
cmd.append('--pod-selector=%s' % pod_selector)
if grace_period:
cmd.append('--grace-period=%s' % int(grace_period))
if force:
cmd.append('--force')
cmd.append('--evacuate')
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
def _import_image(self, url=None, name=None, tag=None):
''' perform image import '''
cmd = ['import-image']
image = '{0}'.format(name)
if tag:
image += ':{0}'.format(tag)
cmd.append(image)
if url:
cmd.append('--from={0}/{1}'.format(url, image))
cmd.append('-n{0}'.format(self.namespace))
cmd.append('--confirm')
return self.openshift_cmd(cmd)
#pylint: disable=too-many-arguments
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
'''Base command for oc '''
cmds = []
if oadm:
cmds = ['/usr/bin/oc', 'adm']
else:
cmds = ['/usr/bin/oc']
cmds.extend(cmd)
rval = {}
results = ''
err = None
if self.verbose:
print ' '.join(cmds)
proc = subprocess.Popen(cmds,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env={'KUBECONFIG': self.kubeconfig})
stdout, stderr = proc.communicate(input_data)
rval = {"returncode": proc.returncode,
"results": results,
"cmd": ' '.join(cmds),
}
if proc.returncode == 0:
if output:
if output_type == 'json':
try:
rval['results'] = json.loads(stdout)
except ValueError as err:
if "No JSON object could be decoded" in err.message:
err = err.message
elif output_type == 'raw':
rval['results'] = stdout
if self.verbose:
print stdout
print stderr
if err:
rval.update({"err": err,
"stderr": stderr,
"stdout": stdout,
"cmd": cmds
})
else:
rval.update({"stderr": stderr,
"stdout": stdout,
"results": {},
})
return rval
class Utils(object):
''' utilities for openshiftcli modules '''
@staticmethod
def create_file(rname, data, ftype='yaml'):
''' create a file in tmp with name and contents'''
path = os.path.join('/tmp', rname)
with open(path, 'w') as fds:
if ftype == 'yaml':
fds.write(yaml.dump(data, Dumper=yaml.RoundTripDumper))
elif ftype == 'json':
fds.write(json.dumps(data))
else:
fds.write(data)
# Register cleanup when module is done
atexit.register(Utils.cleanup, [path])
return path
@staticmethod
def create_files_from_contents(content, content_type=None):
'''Turn an array of dict: filename, content into a files array'''
if not isinstance(content, list):
content = [content]
files = []
for item in content:
path = Utils.create_file(item['path'], item['data'], ftype=content_type)
files.append({'name': os.path.basename(path), 'path': path})
return files
@staticmethod
def cleanup(files):
'''Clean up on exit '''
for sfile in files:
if os.path.exists(sfile):
if os.path.isdir(sfile):
shutil.rmtree(sfile)
elif os.path.isfile(sfile):
os.remove(sfile)
@staticmethod
def exists(results, _name):
''' Check to see if the results include the name '''
if not results:
return False
if Utils.find_result(results, _name):
return True
return False
@staticmethod
def find_result(results, _name):
''' Find the specified result by name'''
rval = None
for result in results:
if result.has_key('metadata') and result['metadata']['name'] == _name:
rval = result
break
return rval
@staticmethod
def get_resource_file(sfile, sfile_type='yaml'):
''' return the service file '''
contents = None
with open(sfile) as sfd:
contents = sfd.read()
if sfile_type == 'yaml':
contents = yaml.load(contents, yaml.RoundTripLoader)
elif sfile_type == 'json':
contents = json.loads(contents)
return contents
# Disabling too-many-branches. This is a yaml dictionary comparison function
# pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
@staticmethod
def check_def_equal(user_def, result_def, skip_keys=None, debug=False):
''' Given a user defined definition, compare it with the results given back by our query. '''
# Currently these values are autogenerated and we do not need to check them
skip = ['metadata', 'status']
if skip_keys:
skip.extend(skip_keys)
for key, value in result_def.items():
if key in skip:
continue
# Both are lists
if isinstance(value, list):
if not user_def.has_key(key):
if debug:
print 'User data does not have key [%s]' % key
print 'User data: %s' % user_def
return False
if not isinstance(user_def[key], list):
if debug:
print 'user_def[key] is not a list key=[%s] user_def[key]=%s' % (key, user_def[key])
return False
if len(user_def[key]) != len(value):
if debug:
print "List lengths are not equal."
print "key=[%s]: user_def[%s] != value[%s]" % (key, len(user_def[key]), len(value))
print "user_def: %s" % user_def[key]
print "value: %s" % value
return False
for values in zip(user_def[key], value):
if isinstance(values[0], dict) and isinstance(values[1], dict):
if debug:
print 'sending list - list'
print type(values[0])
print type(values[1])
result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug)
if not result:
print 'list compare returned false'
return False
elif value != user_def[key]:
if debug:
print 'value should be identical'
print value
print user_def[key]
return False
# recurse on a dictionary
elif isinstance(value, dict):
if not user_def.has_key(key):
if debug:
print "user_def does not have key [%s]" % key
return False
if not isinstance(user_def[key], dict):
if debug:
print "dict returned false: not instance of dict"
return False
# before passing ensure keys match
api_values = set(value.keys()) - set(skip)
user_values = set(user_def[key].keys()) - set(skip)
if api_values != user_values:
if debug:
print "keys are not equal in dict"
print api_values
print user_values
return False
result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
if not result:
if debug:
print "dict returned false"
print result
return False
# Verify each key, value pair is the same
else:
if not user_def.has_key(key) or value != user_def[key]:
if debug:
print "value not equal; user_def does not have key"
print key
print value
if user_def.has_key(key):
print user_def[key]
return False
if debug:
print 'returning true'
return True
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
self.kubeconfig = kubeconfig
self.name = rname
self.namespace = namespace
self._options = options
@property
def config_options(self):
''' return config options '''
return self._options
def to_option_list(self):
'''return all options as a string'''
return self.stringify()
def stringify(self):
''' return the options hash as cli params in a string '''
rval = []
for key, data in self.config_options.items():
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
rval.append('--%s=%s' % (key.replace('_', '-'), data['value']))
return rval
class YeditException(Exception):
''' Exception class for Yedit '''
pass
class Yedit(object):
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
def __init__(self, filename=None, content=None, content_type='yaml', separator='.', backup=False):
self.content = content
self._separator = separator
self.filename = filename
self.__yaml_dict = content
self.content_type = content_type
self.backup = backup
self.load(content_type=self.content_type)
if self.__yaml_dict == None:
self.__yaml_dict = {}
@property
def separator(self):
''' getter method for yaml_dict '''
return self._separator
@separator.setter
def separator(self):
''' getter method for yaml_dict '''
return self._separator
@property
def yaml_dict(self):
''' getter method for yaml_dict '''
return self.__yaml_dict
@yaml_dict.setter
def yaml_dict(self, value):
''' setter method for yaml_dict '''
self.__yaml_dict = value
@staticmethod
def parse_key(key, sep='.'):
'''parse the key allowing the appropriate separator'''
common_separators = list(Yedit.com_sep - set([sep]))
return re.findall(Yedit.re_key % ''.join(common_separators), key)
@staticmethod
def valid_key(key, sep='.'):
'''validate the incoming key'''
common_separators = list(Yedit.com_sep - set([sep]))
if not re.match(Yedit.re_valid_key % ''.join(common_separators), key):
return False
return True
@staticmethod
def remove_entry(data, key, sep='.'):
''' remove data at location key '''
if key == '' and isinstance(data, dict):
data.clear()
return True
elif key == '' and isinstance(data, list):
del data[:]
return True
if not (key and Yedit.valid_key(key, sep)) and isinstance(data, (list, dict)):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key and isinstance(data, dict):
data = data.get(dict_key, None)
elif arr_ind and isinstance(data, list) and int(arr_ind) <= len(data) - 1:
data = data[int(arr_ind)]
else:
return None
# process last index for remove
# expected list entry
if key_indexes[-1][0]:
if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1:
del data[int(key_indexes[-1][0])]
return True
# expected dict entry
elif key_indexes[-1][1]:
if isinstance(data, dict):
del data[key_indexes[-1][1]]
return True
@staticmethod
def add_entry(data, key, item=None, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a#b
return c
'''
if key == '':
pass
elif not (key and Yedit.valid_key(key, sep)) and isinstance(data, (list, dict)):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key:
if isinstance(data, dict) and data.has_key(dict_key) and data[dict_key]:
data = data[dict_key]
continue
elif data and not isinstance(data, dict):
raise YeditException("Unexpected item type found while going through key " +
"path: {} (at key: {})".format(key, dict_key))
data[dict_key] = {}
data = data[dict_key]
elif arr_ind and isinstance(data, list) and int(arr_ind) <= len(data) - 1:
data = data[int(arr_ind)]
else:
raise YeditException("Unexpected item type found while going through key path: {}".format(key))
if key == '':
data = item
# process last index for add
# expected list entry
elif key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1:
data[int(key_indexes[-1][0])] = item
# expected dict entry
elif key_indexes[-1][1] and isinstance(data, dict):
data[key_indexes[-1][1]] = item
# didn't add/update to an existing list, nor add/update key to a dict
# so we must have been provided some syntax like a.b.c[<int>] = "data" for a
# non-existent array
else:
raise YeditException("Error adding data to object at path: {}".format(key))
return data
@staticmethod
def get_entry(data, key, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a.b
return c
'''
if key == '':
pass
elif not (key and Yedit.valid_key(key, sep)) and isinstance(data, (list, dict)):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes:
if dict_key and isinstance(data, dict):
data = data.get(dict_key, None)
elif arr_ind and isinstance(data, list) and int(arr_ind) <= len(data) - 1:
data = data[int(arr_ind)]
else:
return None
return data
def write(self):
''' write to file '''
if not self.filename:
raise YeditException('Please specify a filename.')
if self.backup and self.file_exists():
shutil.copy(self.filename, self.filename + '.orig')
tmp_filename = self.filename + '.yedit'
try:
with open(tmp_filename, 'w') as yfd:
# pylint: disable=no-member,maybe-no-member
if hasattr(self.yaml_dict, 'fa'):
self.yaml_dict.fa.set_block_style()
yfd.write(yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
except Exception as err:
raise YeditException(err.message)
os.rename(tmp_filename, self.filename)
return (True, self.yaml_dict)
def read(self):
''' read from file '''
# check if it exists
if self.filename == None or not self.file_exists():
return None
contents = None
with open(self.filename) as yfd:
contents = yfd.read()
return contents
def file_exists(self):
''' return whether file exists '''
if os.path.exists(self.filename):
return True
return False
def load(self, content_type='yaml'):
''' return yaml file '''
contents = self.read()
if not contents and not self.content:
return None
if self.content:
if isinstance(self.content, dict):
self.yaml_dict = self.content
return self.yaml_dict
elif isinstance(self.content, str):
contents = self.content
# check if it is yaml
try:
if content_type == 'yaml' and contents:
self.yaml_dict = yaml.load(contents, yaml.RoundTripLoader)
# pylint: disable=no-member,maybe-no-member
if hasattr(self.yaml_dict, 'fa'):
self.yaml_dict.fa.set_block_style()
elif content_type == 'json' and contents:
self.yaml_dict = json.loads(contents)
except yaml.YAMLError as err:
# Error loading yaml or json
raise YeditException('Problem with loading yaml file. %s' % err)
return self.yaml_dict
def get(self, key):
''' get a specified key'''
try:
entry = Yedit.get_entry(self.yaml_dict, key, self.separator)
except KeyError as _:
entry = None
return entry
def pop(self, path, key_or_item):
''' remove a key, value pair from a dict or an item for a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError as _:
entry = None
if entry == None:
return (False, self.yaml_dict)
if isinstance(entry, dict):
# pylint: disable=no-member,maybe-no-member
if entry.has_key(key_or_item):
entry.pop(key_or_item)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
elif isinstance(entry, list):
# pylint: disable=no-member,maybe-no-member
ind = None
try:
ind = entry.index(key_or_item)
except ValueError:
return (False, self.yaml_dict)
entry.pop(ind)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
def delete(self, path):
''' remove path from a dict'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError as _:
entry = None
if entry == None:
return (False, self.yaml_dict)
result = Yedit.remove_entry(self.yaml_dict, path, self.separator)
if not result:
return (False, self.yaml_dict)
return (True, self.yaml_dict)
def exists(self, path, value):
''' check if value exists at path'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError as _:
entry = None
if isinstance(entry, list):
if value in entry:
return True
return False
elif isinstance(entry, dict):
if isinstance(value, dict):
rval = False
for key, val in value.items():
if entry[key] != val:
rval = False
break
else:
rval = True
return rval
return value in entry
return entry == value
def append(self, path, value):
'''append value to a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError as _:
entry = None
if entry is None:
self.put(path, [])
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
if not isinstance(entry, list):
return (False, self.yaml_dict)
# pylint: disable=no-member,maybe-no-member
entry.append(value)
return (True, self.yaml_dict)
# pylint: disable=too-many-arguments
def update(self, path, value, index=None, curr_value=None):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError as _:
entry = None
if isinstance(entry, dict):
# pylint: disable=no-member,maybe-no-member
if not isinstance(value, dict):
raise YeditException('Cannot replace key, value entry in dict with non-dict type.' \
' value=[%s] [%s]' % (value, type(value)))
entry.update(value)
return (True, self.yaml_dict)
elif isinstance(entry, list):
# pylint: disable=no-member,maybe-no-member
ind = None
if curr_value:
try:
ind = entry.index(curr_value)
except ValueError:
return (False, self.yaml_dict)
elif index != None:
ind = index
if ind != None and entry[ind] != value:
entry[ind] = value
return (True, self.yaml_dict)
# see if it exists in the list
try:
ind = entry.index(value)
except ValueError:
# doesn't exist, append it
entry.append(value)
return (True, self.yaml_dict)
#already exists, return
if ind != None:
return (False, self.yaml_dict)
return (False, self.yaml_dict)
def put(self, path, value):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError as _:
entry = None
if entry == value:
return (False, self.yaml_dict)
# deepcopy didn't work
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict, default_flow_style=False), yaml.RoundTripLoader)
# pylint: disable=no-member
if hasattr(self.yaml_dict, 'fa'):
tmp_copy.fa.set_block_style()
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if not result:
return (False, self.yaml_dict)
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
def create(self, path, value):
''' create a yaml file '''
if not self.file_exists():
# deepcopy didn't work
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict, default_flow_style=False), yaml.RoundTripLoader)
# pylint: disable=no-member
if hasattr(self.yaml_dict, 'fa'):
tmp_copy.fa.set_block_style()
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if result:
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
return (False, self.yaml_dict)
# pylint: disable=too-many-instance-attributes
class OCProcess(OpenShiftCLI):
''' Class to wrap the oc command line tools '''
# pylint allows 5. we need 6
# pylint: disable=too-many-arguments
def __init__(self,
namespace,
tname=None,
params=None,
create=False,
kubeconfig='/etc/origin/master/admin.kubeconfig',
tdata=None,
verbose=False):
''' Constructor for OpenshiftOC '''
super(OCProcess, self).__init__(namespace, kubeconfig)
self.namespace = namespace
self.name = tname
self.data = tdata
self.params = params
self.create = create
self.kubeconfig = kubeconfig
self.verbose = verbose
self._template = None
@property
def template(self):
'''template property'''
if self._template == None:
results = self._process(self.name, False, self.params, self.data)
if results['returncode'] != 0:
raise OpenShiftCLIError('Error processing template [%s].' % self.name)
self._template = results['results']['items']
return self._template
def get(self):
'''get the template'''
results = self._get('template', self.name)
if results['returncode'] != 0:
# Does the template exist??
if 'not found' in results['stderr']:
results['returncode'] = 0
results['exists'] = False
results['results'] = []
return results
def delete(self, obj):
'''delete a resource'''
return self._delete(obj['kind'], obj['metadata']['name'])
def create_obj(self, obj):
'''create a resource'''
return self._create_from_content(obj['metadata']['name'], obj)
def process(self, create=None):
'''process a template'''
do_create = False
if create != None:
do_create = create
else:
do_create = self.create
return self._process(self.name, do_create, self.params, self.data)
def exists(self):
'''return whether the template exists'''
# Always return true if we're being passed template data
if self.data:
return True
t_results = self._get('template', self.name)
if t_results['returncode'] != 0:
# Does the template exist??
if 'not found' in t_results['stderr']:
return False
else:
raise OpenShiftCLIError('Something went wrong. %s' % t_results)
return True
def needs_update(self):
'''attempt to process the template and return it for comparison with oc objects'''
obj_results = []
for obj in self.template:
# build a list of types to skip
skip = []
if obj['kind'] == 'ServiceAccount':
skip.extend(['secrets', 'imagePullSecrets'])
if obj['kind'] == 'BuildConfig':
skip.extend(['lastTriggeredImageID'])
if obj['kind'] == 'ImageStream':
skip.extend(['generation'])
if obj['kind'] == 'DeploymentConfig':
skip.extend(['lastTriggeredImage'])
# fetch the current object
curr_obj_results = self._get(obj['kind'], obj['metadata']['name'])
if curr_obj_results['returncode'] != 0:
# Does the template exist??
if 'not found' in curr_obj_results['stderr']:
obj_results.append((obj, True))
continue
# check the generated object against the existing object
if not Utils.check_def_equal(obj, curr_obj_results['results'][0], skip_keys=skip):
obj_results.append((obj, True))
continue
obj_results.append((obj, False))
return obj_results
# pylint: disable=too-many-branches
def main():
'''
ansible oc module for services
'''
module = AnsibleModule(
argument_spec=dict(
kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
state=dict(default='present', type='str', choices=['present', 'list']),
debug=dict(default=False, type='bool'),
namespace=dict(default='default', type='str'),
template_name=dict(default=None, type='str'),
content=dict(default=None, type='str'),
params=dict(default=None, type='dict'),
create=dict(default=False, type='bool'),
reconcile=dict(default=True, type='bool'),
),
supports_check_mode=True,
)
ocprocess = OCProcess(module.params['namespace'],
module.params['template_name'],
module.params['params'],
module.params['create'],
kubeconfig=module.params['kubeconfig'],
tdata=module.params['content'],
verbose=module.params['debug'])
state = module.params['state']
api_rval = ocprocess.get()
if state == 'list':
if api_rval['returncode'] != 0:
module.fail_json(msg=api_rval)
module.exit_json(changed=False, results=api_rval, state="list")
elif state == 'present':
if not ocprocess.exists() or not module.params['reconcile']:
#FIXME: this code will never get run in a way that succeeds when
# module.params['reconcile'] is true. Because oc_process doesn't
# create the actual template, the check of ocprocess.exists()
# is meaningless. Either it's already here and this code
# won't be run, or this code will fail because there is no
# template available for oc process to use. Have we conflated
# the template's existence with the existence of the objects
# it describes?
# Create it here
api_rval = ocprocess.process()
if api_rval['returncode'] != 0:
module.fail_json(msg=api_rval)
module.exit_json(changed=True, results=api_rval, state="present")
# verify results
update = False
rval = []
all_results = ocprocess.needs_update()
for obj, status in all_results:
if status:
ocprocess.delete(obj)
results = ocprocess.create_obj(obj)
results['kind'] = obj['kind']
rval.append(results)
update = True
if not update:
module.exit_json(changed=update, results=api_rval, state="present")
for cmd in rval:
if cmd['returncode'] != 0:
module.fail_json(changed=update, results=rval, state="present")
module.exit_json(changed=update, results=rval, state="present")
module.exit_json(failed=True,
changed=False,
results='Unknown state passed. %s' % state,
state="unknown")
# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
# import module snippets. This are required
if __name__ == '__main__':
from ansible.module_utils.basic import *
main()
| apache-2.0 |
3dfxmadscientist/cbss-server | addons/web/tests/test_menu.py | 65 | 5763 | # -*- coding: utf-8 -*-
import collections
import mock
import unittest2
from ..controllers import main
class Placeholder(object):
def __init__(self, **kwargs):
for k, v in kwargs.iteritems():
setattr(self, k, v)
class LoadTest(unittest2.TestCase):
def setUp(self):
self.menu = main.Menu()
self.request = mock.Mock()
# Have self.request.session.model() return a different mock object for
# each model (but always the same mock for a given model name)
models = collections.defaultdict(mock.Mock)
model = self.request.session.model.side_effect = \
lambda model_name: models[model_name]
self.MockMenus = model('ir.ui.menu')
# Mock the absence of custom menu
model('res.users').read.return_value = [{
'menu_id': False
}]
def tearDown(self):
del self.request
del self.MockMenus
del self.menu
def test_empty(self):
self.MockMenus.search.return_value = []
self.MockMenus.read.return_value = []
root = self.menu.load(self.request)
self.MockMenus.search.assert_called_with(
[('parent_id','=', False)], 0, False, False,
self.request.context)
self.assertEqual(root['all_menu_ids'], [])
self.assertListEqual(
root['children'],
[])
def test_applications_sort(self):
self.MockMenus.search.return_value = [1, 2, 3]
self.MockMenus.read.side_effect = lambda *args: [
{'id': 1, 'sequence': 1, 'parent_id': False},
{'id': 3, 'sequence': 2, 'parent_id': False},
{'id': 2, 'sequence': 3, 'parent_id': False},
]
root = self.menu.load(self.request)
self.MockMenus.search.assert_called_with(
[('id','child_of', [1, 2, 3])], 0, False, False,
self.request.context)
self.MockMenus.read.assert_called_with(
[1, 2, 3], ['name', 'sequence', 'parent_id',
'action'],
self.request.context)
self.assertEqual(root['all_menu_ids'], [1, 2, 3])
self.assertEqual(
root['children'],
[{
'id': 1, 'sequence': 1,
'parent_id': False, 'children': []
}, {
'id': 3, 'sequence': 2,
'parent_id': False, 'children': []
}, {
'id': 2, 'sequence': 3,
'parent_id': False, 'children': []
}])
def test_deep(self):
self.MockMenus.search.side_effect = lambda domain, *args: (
[1] if domain == [('parent_id', '=', False)] else [1, 2, 3, 4])
root = {'id': 1, 'sequence': 1, 'parent_id': False}
self.MockMenus.read.side_effect = lambda ids, *args: (
[root] if ids == [1] else [
{'id': 1, 'sequence': 1, 'parent_id': False},
{'id': 2, 'sequence': 2, 'parent_id': [1, '']},
{'id': 3, 'sequence': 1, 'parent_id': [2, '']},
{'id': 4, 'sequence': 2, 'parent_id': [2, '']},
])
root = self.menu.load(self.request)
self.MockMenus.search.assert_called_with(
[('id','child_of', [1])], 0, False, False,
self.request.context)
self.assertEqual(root['all_menu_ids'], [1, 2, 3, 4])
self.assertEqual(
root['children'],
[{
'id': 1,
'sequence': 1,
'parent_id': False,
'children': [{
'id': 2,
'sequence': 2,
'parent_id': [1, ''],
'children': [{
'id': 3,
'sequence': 1,
'parent_id': [2, ''],
'children': []
}, {
'id': 4,
'sequence': 2,
'parent_id': [2, ''],
'children': []
}]
}]
}]
)
class ActionMungerTest(unittest2.TestCase):
def setUp(self):
self.menu = main.Menu()
def test_actual_treeview(self):
action = {
"views": [[False, "tree"], [False, "form"],
[False, "calendar"]],
"view_type": "tree",
"view_id": False,
"view_mode": "tree,form,calendar"
}
changed = action.copy()
del action['view_type']
main.fix_view_modes(changed)
self.assertEqual(changed, action)
def test_list_view(self):
action = {
"views": [[False, "tree"], [False, "form"],
[False, "calendar"]],
"view_type": "form",
"view_id": False,
"view_mode": "tree,form,calendar"
}
main.fix_view_modes(action)
self.assertEqual(action, {
"views": [[False, "list"], [False, "form"],
[False, "calendar"]],
"view_id": False,
"view_mode": "list,form,calendar"
})
def test_redundant_views(self):
action = {
"views": [[False, "tree"], [False, "form"],
[False, "calendar"], [42, "tree"]],
"view_type": "form",
"view_id": False,
"view_mode": "tree,form,calendar"
}
main.fix_view_modes(action)
self.assertEqual(action, {
"views": [[False, "list"], [False, "form"],
[False, "calendar"], [42, "list"]],
"view_id": False,
"view_mode": "list,form,calendar"
})
| agpl-3.0 |
way2heavy/youtube-dl-1 | youtube_dl/extractor/goshgay.py | 127 | 1579 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import (
compat_parse_qs,
)
from ..utils import (
parse_duration,
)
class GoshgayIE(InfoExtractor):
_VALID_URL = r'https?://www\.goshgay\.com/video(?P<id>\d+?)($|/)'
_TEST = {
'url': 'http://www.goshgay.com/video299069/diesel_sfw_xxx_video',
'md5': '027fcc54459dff0feb0bc06a7aeda680',
'info_dict': {
'id': '299069',
'ext': 'flv',
'title': 'DIESEL SFW XXX Video',
'thumbnail': 're:^http://.*\.jpg$',
'duration': 79,
'age_limit': 18,
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
title = self._html_search_regex(
r'<h2>(.*?)<', webpage, 'title')
duration = parse_duration(self._html_search_regex(
r'<span class="duration">\s*-?\s*(.*?)</span>',
webpage, 'duration', fatal=False))
flashvars = compat_parse_qs(self._html_search_regex(
r'<embed.+?id="flash-player-embed".+?flashvars="([^"]+)"',
webpage, 'flashvars'))
thumbnail = flashvars.get('url_bigthumb', [None])[0]
video_url = flashvars['flv_url'][0]
return {
'id': video_id,
'url': video_url,
'title': title,
'thumbnail': thumbnail,
'duration': duration,
'age_limit': self._family_friendly_search(webpage),
}
| unlicense |
40223125/w16btest1 | static/Brython3.1.3-20150514-095342/Lib/queue.py | 818 | 8835 | '''A multi-producer, multi-consumer queue.'''
try:
import threading
except ImportError:
import dummy_threading as threading
from collections import deque
from heapq import heappush, heappop
try:
from time import monotonic as time
except ImportError:
from time import time
__all__ = ['Empty', 'Full', 'Queue', 'PriorityQueue', 'LifoQueue']
class Empty(Exception):
'Exception raised by Queue.get(block=0)/get_nowait().'
pass
class Full(Exception):
'Exception raised by Queue.put(block=0)/put_nowait().'
pass
class Queue:
'''Create a queue object with a given maximum size.
If maxsize is <= 0, the queue size is infinite.
'''
def __init__(self, maxsize=0):
self.maxsize = maxsize
self._init(maxsize)
# mutex must be held whenever the queue is mutating. All methods
# that acquire mutex must release it before returning. mutex
# is shared between the three conditions, so acquiring and
# releasing the conditions also acquires and releases mutex.
self.mutex = threading.Lock()
# Notify not_empty whenever an item is added to the queue; a
# thread waiting to get is notified then.
self.not_empty = threading.Condition(self.mutex)
# Notify not_full whenever an item is removed from the queue;
# a thread waiting to put is notified then.
self.not_full = threading.Condition(self.mutex)
# Notify all_tasks_done whenever the number of unfinished tasks
# drops to zero; thread waiting to join() is notified to resume
self.all_tasks_done = threading.Condition(self.mutex)
self.unfinished_tasks = 0
def task_done(self):
'''Indicate that a formerly enqueued task is complete.
Used by Queue consumer threads. For each get() used to fetch a task,
a subsequent call to task_done() tells the queue that the processing
on the task is complete.
If a join() is currently blocking, it will resume when all items
have been processed (meaning that a task_done() call was received
for every item that had been put() into the queue).
Raises a ValueError if called more times than there were items
placed in the queue.
'''
with self.all_tasks_done:
unfinished = self.unfinished_tasks - 1
if unfinished <= 0:
if unfinished < 0:
raise ValueError('task_done() called too many times')
self.all_tasks_done.notify_all()
self.unfinished_tasks = unfinished
def join(self):
'''Blocks until all items in the Queue have been gotten and processed.
The count of unfinished tasks goes up whenever an item is added to the
queue. The count goes down whenever a consumer thread calls task_done()
to indicate the item was retrieved and all work on it is complete.
When the count of unfinished tasks drops to zero, join() unblocks.
'''
with self.all_tasks_done:
while self.unfinished_tasks:
self.all_tasks_done.wait()
def qsize(self):
'''Return the approximate size of the queue (not reliable!).'''
with self.mutex:
return self._qsize()
def empty(self):
'''Return True if the queue is empty, False otherwise (not reliable!).
This method is likely to be removed at some point. Use qsize() == 0
as a direct substitute, but be aware that either approach risks a race
condition where a queue can grow before the result of empty() or
qsize() can be used.
To create code that needs to wait for all queued tasks to be
completed, the preferred technique is to use the join() method.
'''
with self.mutex:
return not self._qsize()
def full(self):
'''Return True if the queue is full, False otherwise (not reliable!).
This method is likely to be removed at some point. Use qsize() >= n
as a direct substitute, but be aware that either approach risks a race
condition where a queue can shrink before the result of full() or
qsize() can be used.
'''
with self.mutex:
return 0 < self.maxsize <= self._qsize()
def put(self, item, block=True, timeout=None):
'''Put an item into the queue.
If optional args 'block' is true and 'timeout' is None (the default),
block if necessary until a free slot is available. If 'timeout' is
a non-negative number, it blocks at most 'timeout' seconds and raises
the Full exception if no free slot was available within that time.
Otherwise ('block' is false), put an item on the queue if a free slot
is immediately available, else raise the Full exception ('timeout'
is ignored in that case).
'''
with self.not_full:
if self.maxsize > 0:
if not block:
if self._qsize() >= self.maxsize:
raise Full
elif timeout is None:
while self._qsize() >= self.maxsize:
self.not_full.wait()
elif timeout < 0:
raise ValueError("'timeout' must be a non-negative number")
else:
endtime = time() + timeout
while self._qsize() >= self.maxsize:
remaining = endtime - time()
if remaining <= 0.0:
raise Full
self.not_full.wait(remaining)
self._put(item)
self.unfinished_tasks += 1
self.not_empty.notify()
def get(self, block=True, timeout=None):
'''Remove and return an item from the queue.
If optional args 'block' is true and 'timeout' is None (the default),
block if necessary until an item is available. If 'timeout' is
a non-negative number, it blocks at most 'timeout' seconds and raises
the Empty exception if no item was available within that time.
Otherwise ('block' is false), return an item if one is immediately
available, else raise the Empty exception ('timeout' is ignored
in that case).
'''
with self.not_empty:
if not block:
if not self._qsize():
raise Empty
elif timeout is None:
while not self._qsize():
self.not_empty.wait()
elif timeout < 0:
raise ValueError("'timeout' must be a non-negative number")
else:
endtime = time() + timeout
while not self._qsize():
remaining = endtime - time()
if remaining <= 0.0:
raise Empty
self.not_empty.wait(remaining)
item = self._get()
self.not_full.notify()
return item
def put_nowait(self, item):
'''Put an item into the queue without blocking.
Only enqueue the item if a free slot is immediately available.
Otherwise raise the Full exception.
'''
return self.put(item, block=False)
def get_nowait(self):
'''Remove and return an item from the queue without blocking.
Only get an item if one is immediately available. Otherwise
raise the Empty exception.
'''
return self.get(block=False)
# Override these methods to implement other queue organizations
# (e.g. stack or priority queue).
# These will only be called with appropriate locks held
# Initialize the queue representation
def _init(self, maxsize):
self.queue = deque()
def _qsize(self):
return len(self.queue)
# Put a new item in the queue
def _put(self, item):
self.queue.append(item)
# Get an item from the queue
def _get(self):
return self.queue.popleft()
class PriorityQueue(Queue):
'''Variant of Queue that retrieves open entries in priority order (lowest first).
Entries are typically tuples of the form: (priority number, data).
'''
def _init(self, maxsize):
self.queue = []
def _qsize(self):
return len(self.queue)
def _put(self, item):
heappush(self.queue, item)
def _get(self):
return heappop(self.queue)
class LifoQueue(Queue):
'''Variant of Queue that retrieves most recently added entries first.'''
def _init(self, maxsize):
self.queue = []
def _qsize(self):
return len(self.queue)
def _put(self, item):
self.queue.append(item)
def _get(self):
return self.queue.pop()
| agpl-3.0 |
lucernae/geonode | geonode/upload/tests/test_files.py | 2 | 1660 | # -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2018 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
"""unit tests for geonode.upload.files module"""
from django.test import TestCase
from geonode.upload import files
class FilesTestCase(TestCase):
def test_scan_hint_kml_ground_overlay(self):
result = files.get_scan_hint(["kml", "other"])
kml_file_type = files.get_type("KML Ground Overlay")
self.assertEqual(result, kml_file_type.code)
def test_scan_hint_kmz_ground_overlay(self):
result = files.get_scan_hint(["kmz", "other"])
self.assertEqual(result, "kmz")
def test_get_type_non_existing_type(self):
self.assertIsNone(files.get_type("fake"))
def test_get_type_kml_ground_overlay(self):
file_type = files.get_type("KML Ground Overlay")
self.assertEqual(file_type.code, "kml-overlay")
self.assertIn("kmz", file_type.aliases)
| gpl-3.0 |
40223114/2015_g4 | static/Brython3.1.0-20150301-090019/Lib/operator.py | 674 | 7736 | #!/usr/bin/env python3
"""
Operator Interface
This module exports a set of functions corresponding to the intrinsic
operators of Python. For example, operator.add(x, y) is equivalent
to the expression x+y. The function names are those used for special
methods; variants without leading and trailing '__' are also provided
for convenience.
This is the pure Python implementation of the module.
"""
# downloaded from http://bugs.python.org/file28327/operator.py
#import builtins as _bi #there is no builtins module
def lt(a, b):
"Same as a < b."
return a < b
__lt__ = lt
def le(a, b):
"Same as a <= b."
return a <= b
__le__ = le
def eq(a, b):
"Same as a == b."
return a == b
__eq__ = eq
def ne(a, b):
"Same as a != b."
return a != b
__ne__ = ne
def ge(a, b):
"Same as a >= b."
return a >= b
__ge__ = ge
def gt(a, b):
"Same as a > b."
return a > b
__gt__ = gt
def not_(a):
"Same as not a."
return not a
__not__ = not_
def truth(a):
"Return True if a is true, False otherwise."
#return _bi.bool(a)
return bool(a)
def is_(a, b):
"Same as a is b."
return a is b
# brython does not like (causes syntax error)
#def is_not(a, b):
# "Same as a is not b."
# return a is not b
#recursion error or just comment out and add code below function
#def abs(a):
# "Same as abs(a)."
# #return _bi.abs(a)
# return abs(a)
__abs__ = abs
abs=abs
def add(a, b):
"Same as a + b."
return a + b
__add__ = add
def and_(a, b):
"Same as a & b."
return a & b
__and__ = and_
def floordiv(a, b):
"Same as a // b."
return a // b
__floordiv__ = floordiv
def index(a):
"Same as a.__index__()."
return a.__index__()
__index__ = index
def inv(a):
"Same as ~a."
return ~a #brython does not like
#return a^(2**31)
invert = __inv__ = __invert__ = inv
def lshift(a, b):
"Same as a << b."
return a << b
__lshift__ = lshift
def mod(a, b):
"Same as a % b."
return a % b
__mod__ = mod
def mul(a, b):
"Same as a * b."
return a * b
__mul__ = mul
def neg(a):
"Same as -a."
return -a
__neg__ = neg
def or_(a, b):
"Same as a | b."
return a | b
__or__ = or_
def pos(a):
"Same as +a."
return +a #brython does not like
if a >= 0: return a
return -a
__pos__ = pos
def pow(a, b):
"Same as a ** b."
return a ** b
__pow__ = pow
def rshift(a, b):
"Same as a >> b."
return a >> b
__rshift__ = rshift
def sub(a, b):
"Same as a - b."
return a - b
__sub__ = sub
def truediv(a, b):
"Same as a / b."
return a / b
__truediv__ = truediv
def xor(a, b):
"Same as a ^ b."
return a ^ b
__xor__ = xor
def concat(a, b):
"Same as a + b, for a and b sequences."
if not (hasattr(a, '__getitem__') and hasattr(b, '__getitem__')):
raise TypeError('a and b must be sequences')
return a + b
__concat__ = concat
def contains(a, b):
"Same as b in a (note reversed operands)."
return b in a
__contains__ = contains
def countOf(a, b):
"Return the number of times b occurs in a."
count = 0
for i in a:
if i == b:
count += 1
return count
def delitem(a, b):
"Same as del a[b]."
del a[b]
__delitem__ = delitem
def getitem(a, b):
"Same as a[b]."
return a[b]
__getitem__ = getitem
#fixme brython doesn't like this function
def indexOf(a, b):
"Return the first index of b in a."
#for i, j in _bi.enumerate(a):
for i, j in enumerate(a):
if j == b:
return i
else:
raise ValueError('b not found in a')
def setitem(a, b, c):
"Same as a[b] = c."
a[b] = c
__setitem__ = setitem
class attrgetter:
"""
Return a callable object that fetches the given attribute(s) from its operand.
After f=attrgetter('name'), the call f(r) returns r.name.
After g=attrgetter('name', 'date'), the call g(r) returns (r.name, r.date).
After h=attrgetter('name.first', 'name.last'), the call h(r) returns
(r.name.first, r.name.last).
"""
def __init__(self, attr, *attrs):
self._attrs = (attr,)
self._attrs += attrs
if any(not isinstance(attr, str) for attr in self._attrs):
raise TypeError('attribute name must be a string')
@staticmethod
def _resolve_attr(obj, attr):
for name in attr.split('.'):
#obj = _bi.getattr(obj, name)
obj = getattr(obj, name)
return obj
def __call__(self, obj):
if len(self._attrs) == 1:
return self._resolve_attr(obj, self._attrs[0])
return tuple(self._resolve_attr(obj, attr) for attr in self._attrs)
class itemgetter:
"""
Return a callable object that fetches the given item(s) from its operand.
After f=itemgetter(2), the call f(r) returns r[2].
After g=itemgetter(2,5,3), the call g(r) returns (r[2], r[5], r[3])
"""
def __init__(self, item, *items):
self._items = (item,)
self._items += items
def __call__(self, obj):
if len(self._items) == 1:
return obj[self._items[0]]
return tuple(obj[item] for item in self._items)
class methodcaller:
"""
Return a callable object that calls the given method on its operand.
After f = methodcaller('name'), the call f(r) returns r.name().
After g = methodcaller('name', 'date', foo=1), the call g(r) returns
r.name('date', foo=1).
"""
def __init__(self, name, *args, **kwargs):
self._name = name
self._args = args
self._kwargs = kwargs
def __call__(self, obj):
return getattr(obj, self._name)(*self._args, **self._kwargs)
def iadd(a, b):
"Same as a += b."
a += b
return a
__iadd__ = iadd
def iand(a, b):
"Same as a &= b."
a &= b
return a
__iand__ = iand
def iconcat(a, b):
"Same as a += b, for a and b sequences."
if not (hasattr(a, '__getitem__') and hasattr(b, '__getitem__')):
raise TypeError('a and b must be sequences')
a += b
return a
__iconcat__ = iconcat
def ifloordiv(a, b):
"Same as a //= b."
a //= b
return a
__ifloordiv__ = ifloordiv
def ilshift(a, b):
"Same as a <<= b."
a <<= b
return a
__ilshift__ = ilshift
def imod(a, b):
"Same as a %= b."
a %= b
return a
__imod__ = imod
def imul(a, b):
"Same as a *= b."
a *= b
return a
__imul__ = imul
def ior(a, b):
"Same as a |= b."
a |= b
return a
__ior__ = ior
def ipow(a, b):
"Same as a **= b."
a **=b
return a
__ipow__ = ipow
def irshift(a, b):
"Same as a >>= b."
a >>= b
return a
__irshift__ = irshift
def isub(a, b):
"Same as a -= b."
a -= b
return a
__isub__ = isub
def itruediv(a, b):
"Same as a /= b."
a /= b
return a
__itruediv__ = itruediv
def ixor(a, b):
"Same as a ^= b."
a ^= b
return a
__ixor__ = ixor
def length_hint(obj, default=0):
"""
Return an estimate of the number of items in obj.
This is useful for presizing containers when building from an iterable.
If the object supports len(), the result will be exact. Otherwise, it may
over- or under-estimate by an arbitrary amount. The result will be an
integer >= 0.
"""
try:
return len(obj)
except TypeError:
try:
val = obj.__length_hint__()
if val is NotImplemented:
raise TypeError
except (AttributeError, TypeError):
return default
else:
if not val > 0:
raise ValueError('default must be > 0')
return val
#try:
# from _operator import *
# from _operator import __doc__
#except ImportError:
# pass
| gpl-3.0 |
jgeskens/django | tests/admin_views/models.py | 5 | 17704 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import datetime
import tempfile
import os
from django.contrib.auth.models import User
from django.contrib.contenttypes import generic
from django.contrib.contenttypes.models import ContentType
from django.core.files.storage import FileSystemStorage
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
class Section(models.Model):
"""
A simple section that links to articles, to test linking to related items
in admin views.
"""
name = models.CharField(max_length=100)
@python_2_unicode_compatible
class Article(models.Model):
"""
A simple article to test admin views. Test backwards compatibility.
"""
title = models.CharField(max_length=100)
content = models.TextField()
date = models.DateTimeField()
section = models.ForeignKey(Section, null=True, blank=True)
def __str__(self):
return self.title
def model_year(self):
return self.date.year
model_year.admin_order_field = 'date'
model_year.short_description = ''
@python_2_unicode_compatible
class Book(models.Model):
"""
A simple book that has chapters.
"""
name = models.CharField(max_length=100, verbose_name='¿Name?')
def __str__(self):
return self.name
@python_2_unicode_compatible
class Promo(models.Model):
name = models.CharField(max_length=100, verbose_name='¿Name?')
book = models.ForeignKey(Book)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Chapter(models.Model):
title = models.CharField(max_length=100, verbose_name='¿Title?')
content = models.TextField()
book = models.ForeignKey(Book)
def __str__(self):
return self.title
class Meta:
# Use a utf-8 bytestring to ensure it works (see #11710)
verbose_name = '¿Chapter?'
@python_2_unicode_compatible
class ChapterXtra1(models.Model):
chap = models.OneToOneField(Chapter, verbose_name='¿Chap?')
xtra = models.CharField(max_length=100, verbose_name='¿Xtra?')
def __str__(self):
return '¿Xtra1: %s' % self.xtra
@python_2_unicode_compatible
class ChapterXtra2(models.Model):
chap = models.OneToOneField(Chapter, verbose_name='¿Chap?')
xtra = models.CharField(max_length=100, verbose_name='¿Xtra?')
def __str__(self):
return '¿Xtra2: %s' % self.xtra
class RowLevelChangePermissionModel(models.Model):
name = models.CharField(max_length=100, blank=True)
class CustomArticle(models.Model):
content = models.TextField()
date = models.DateTimeField()
@python_2_unicode_compatible
class ModelWithStringPrimaryKey(models.Model):
string_pk = models.CharField(max_length=255, primary_key=True)
def __str__(self):
return self.string_pk
def get_absolute_url(self):
return '/dummy/%s/' % self.string_pk
@python_2_unicode_compatible
class Color(models.Model):
value = models.CharField(max_length=10)
warm = models.BooleanField()
def __str__(self):
return self.value
# we replicate Color to register with another ModelAdmin
class Color2(Color):
class Meta:
proxy = True
@python_2_unicode_compatible
class Thing(models.Model):
title = models.CharField(max_length=20)
color = models.ForeignKey(Color, limit_choices_to={'warm': True})
pub_date = models.DateField(blank=True, null=True)
def __str__(self):
return self.title
@python_2_unicode_compatible
class Actor(models.Model):
name = models.CharField(max_length=50)
age = models.IntegerField()
def __str__(self):
return self.name
@python_2_unicode_compatible
class Inquisition(models.Model):
expected = models.BooleanField()
leader = models.ForeignKey(Actor)
country = models.CharField(max_length=20)
def __str__(self):
return "by %s from %s" % (self.leader, self.country)
@python_2_unicode_compatible
class Sketch(models.Model):
title = models.CharField(max_length=100)
inquisition = models.ForeignKey(Inquisition, limit_choices_to={'leader__name': 'Palin',
'leader__age': 27,
'expected': False,
})
def __str__(self):
return self.title
class Fabric(models.Model):
NG_CHOICES = (
('Textured', (
('x', 'Horizontal'),
('y', 'Vertical'),
)
),
('plain', 'Smooth'),
)
surface = models.CharField(max_length=20, choices=NG_CHOICES)
@python_2_unicode_compatible
class Person(models.Model):
GENDER_CHOICES = (
(1, "Male"),
(2, "Female"),
)
name = models.CharField(max_length=100)
gender = models.IntegerField(choices=GENDER_CHOICES)
age = models.IntegerField(default=21)
alive = models.BooleanField(default=True)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Persona(models.Model):
"""
A simple persona associated with accounts, to test inlining of related
accounts which inherit from a common accounts class.
"""
name = models.CharField(blank=False, max_length=80)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Account(models.Model):
"""
A simple, generic account encapsulating the information shared by all
types of accounts.
"""
username = models.CharField(blank=False, max_length=80)
persona = models.ForeignKey(Persona, related_name="accounts")
servicename = 'generic service'
def __str__(self):
return "%s: %s" % (self.servicename, self.username)
class FooAccount(Account):
"""A service-specific account of type Foo."""
servicename = 'foo'
class BarAccount(Account):
"""A service-specific account of type Bar."""
servicename = 'bar'
@python_2_unicode_compatible
class Subscriber(models.Model):
name = models.CharField(blank=False, max_length=80)
email = models.EmailField(blank=False, max_length=175)
def __str__(self):
return "%s (%s)" % (self.name, self.email)
class ExternalSubscriber(Subscriber):
pass
class OldSubscriber(Subscriber):
pass
class Media(models.Model):
name = models.CharField(max_length=60)
class Podcast(Media):
release_date = models.DateField()
class Meta:
ordering = ('release_date',) # overridden in PodcastAdmin
class Vodcast(Media):
media = models.OneToOneField(Media, primary_key=True, parent_link=True)
released = models.BooleanField(default=False)
class Parent(models.Model):
name = models.CharField(max_length=128)
class Child(models.Model):
parent = models.ForeignKey(Parent, editable=False)
name = models.CharField(max_length=30, blank=True)
@python_2_unicode_compatible
class EmptyModel(models.Model):
def __str__(self):
return "Primary key = %s" % self.id
temp_storage = FileSystemStorage(tempfile.mkdtemp(dir=os.environ['DJANGO_TEST_TEMP_DIR']))
UPLOAD_TO = os.path.join(temp_storage.location, 'test_upload')
class Gallery(models.Model):
name = models.CharField(max_length=100)
class Picture(models.Model):
name = models.CharField(max_length=100)
image = models.FileField(storage=temp_storage, upload_to='test_upload')
gallery = models.ForeignKey(Gallery, related_name="pictures")
class Language(models.Model):
iso = models.CharField(max_length=5, primary_key=True)
name = models.CharField(max_length=50)
english_name = models.CharField(max_length=50)
shortlist = models.BooleanField(default=False)
class Meta:
ordering = ('iso',)
# a base class for Recommender and Recommendation
class Title(models.Model):
pass
class TitleTranslation(models.Model):
title = models.ForeignKey(Title)
text = models.CharField(max_length=100)
class Recommender(Title):
pass
class Recommendation(Title):
recommender = models.ForeignKey(Recommender)
class Collector(models.Model):
name = models.CharField(max_length=100)
class Widget(models.Model):
owner = models.ForeignKey(Collector)
name = models.CharField(max_length=100)
class DooHickey(models.Model):
code = models.CharField(max_length=10, primary_key=True)
owner = models.ForeignKey(Collector)
name = models.CharField(max_length=100)
class Grommet(models.Model):
code = models.AutoField(primary_key=True)
owner = models.ForeignKey(Collector)
name = models.CharField(max_length=100)
class Whatsit(models.Model):
index = models.IntegerField(primary_key=True)
owner = models.ForeignKey(Collector)
name = models.CharField(max_length=100)
class Doodad(models.Model):
name = models.CharField(max_length=100)
class FancyDoodad(Doodad):
owner = models.ForeignKey(Collector)
expensive = models.BooleanField(default=True)
@python_2_unicode_compatible
class Category(models.Model):
collector = models.ForeignKey(Collector)
order = models.PositiveIntegerField()
class Meta:
ordering = ('order',)
def __str__(self):
return '%s:o%s' % (self.id, self.order)
class Link(models.Model):
posted = models.DateField(
default=lambda: datetime.date.today() - datetime.timedelta(days=7)
)
url = models.URLField()
post = models.ForeignKey("Post")
class PrePopulatedPost(models.Model):
title = models.CharField(max_length=100)
published = models.BooleanField()
slug = models.SlugField()
class PrePopulatedSubPost(models.Model):
post = models.ForeignKey(PrePopulatedPost)
subtitle = models.CharField(max_length=100)
subslug = models.SlugField()
class Post(models.Model):
title = models.CharField(max_length=100, help_text="Some help text for the title (with unicode ŠĐĆŽćžšđ)")
content = models.TextField(help_text="Some help text for the content (with unicode ŠĐĆŽćžšđ)")
posted = models.DateField(
default=datetime.date.today,
help_text="Some help text for the date (with unicode ŠĐĆŽćžšđ)"
)
public = models.NullBooleanField()
def awesomeness_level(self):
return "Very awesome."
@python_2_unicode_compatible
class Gadget(models.Model):
name = models.CharField(max_length=100)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Villain(models.Model):
name = models.CharField(max_length=100)
def __str__(self):
return self.name
class SuperVillain(Villain):
pass
@python_2_unicode_compatible
class FunkyTag(models.Model):
"Because we all know there's only one real use case for GFKs."
name = models.CharField(max_length=25)
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
content_object = generic.GenericForeignKey('content_type', 'object_id')
def __str__(self):
return self.name
@python_2_unicode_compatible
class Plot(models.Model):
name = models.CharField(max_length=100)
team_leader = models.ForeignKey(Villain, related_name='lead_plots')
contact = models.ForeignKey(Villain, related_name='contact_plots')
tags = generic.GenericRelation(FunkyTag)
def __str__(self):
return self.name
@python_2_unicode_compatible
class PlotDetails(models.Model):
details = models.CharField(max_length=100)
plot = models.OneToOneField(Plot)
def __str__(self):
return self.details
@python_2_unicode_compatible
class SecretHideout(models.Model):
""" Secret! Not registered with the admin! """
location = models.CharField(max_length=100)
villain = models.ForeignKey(Villain)
def __str__(self):
return self.location
@python_2_unicode_compatible
class SuperSecretHideout(models.Model):
""" Secret! Not registered with the admin! """
location = models.CharField(max_length=100)
supervillain = models.ForeignKey(SuperVillain)
def __str__(self):
return self.location
@python_2_unicode_compatible
class CyclicOne(models.Model):
name = models.CharField(max_length=25)
two = models.ForeignKey('CyclicTwo')
def __str__(self):
return self.name
@python_2_unicode_compatible
class CyclicTwo(models.Model):
name = models.CharField(max_length=25)
one = models.ForeignKey(CyclicOne)
def __str__(self):
return self.name
class Topping(models.Model):
name = models.CharField(max_length=20)
class Pizza(models.Model):
name = models.CharField(max_length=20)
toppings = models.ManyToManyField('Topping')
class Album(models.Model):
owner = models.ForeignKey(User)
title = models.CharField(max_length=30)
class Employee(Person):
code = models.CharField(max_length=20)
class WorkHour(models.Model):
datum = models.DateField()
employee = models.ForeignKey(Employee)
class Question(models.Model):
question = models.CharField(max_length=20)
@python_2_unicode_compatible
class Answer(models.Model):
question = models.ForeignKey(Question, on_delete=models.PROTECT)
answer = models.CharField(max_length=20)
def __str__(self):
return self.answer
class Reservation(models.Model):
start_date = models.DateTimeField()
price = models.IntegerField()
DRIVER_CHOICES = (
('bill', 'Bill G'),
('steve', 'Steve J'),
)
RESTAURANT_CHOICES = (
('indian', 'A Taste of India'),
('thai', 'Thai Pography'),
('pizza', 'Pizza Mama'),
)
class FoodDelivery(models.Model):
reference = models.CharField(max_length=100)
driver = models.CharField(max_length=100, choices=DRIVER_CHOICES, blank=True)
restaurant = models.CharField(max_length=100, choices=RESTAURANT_CHOICES, blank=True)
class Meta:
unique_together = (("driver", "restaurant"),)
@python_2_unicode_compatible
class CoverLetter(models.Model):
author = models.CharField(max_length=30)
date_written = models.DateField(null=True, blank=True)
def __str__(self):
return self.author
class Paper(models.Model):
title = models.CharField(max_length=30)
author = models.CharField(max_length=30, blank=True, null=True)
class ShortMessage(models.Model):
content = models.CharField(max_length=140)
timestamp = models.DateTimeField(null=True, blank=True)
@python_2_unicode_compatible
class Telegram(models.Model):
title = models.CharField(max_length=30)
date_sent = models.DateField(null=True, blank=True)
def __str__(self):
return self.title
class Story(models.Model):
title = models.CharField(max_length=100)
content = models.TextField()
class OtherStory(models.Model):
title = models.CharField(max_length=100)
content = models.TextField()
class ComplexSortedPerson(models.Model):
name = models.CharField(max_length=100)
age = models.PositiveIntegerField()
is_employee = models.NullBooleanField()
class PrePopulatedPostLargeSlug(models.Model):
"""
Regression test for #15938: a large max_length for the slugfield must not
be localized in prepopulated_fields_js.html or it might end up breaking
the javascript (ie, using THOUSAND_SEPARATOR ends up with maxLength=1,000)
"""
title = models.CharField(max_length=100)
published = models.BooleanField()
slug = models.SlugField(max_length=1000)
class AdminOrderedField(models.Model):
order = models.IntegerField()
stuff = models.CharField(max_length=200)
class AdminOrderedModelMethod(models.Model):
order = models.IntegerField()
stuff = models.CharField(max_length=200)
def some_order(self):
return self.order
some_order.admin_order_field = 'order'
class AdminOrderedAdminMethod(models.Model):
order = models.IntegerField()
stuff = models.CharField(max_length=200)
class AdminOrderedCallable(models.Model):
order = models.IntegerField()
stuff = models.CharField(max_length=200)
@python_2_unicode_compatible
class Report(models.Model):
title = models.CharField(max_length=100)
def __str__(self):
return self.title
class MainPrepopulated(models.Model):
name = models.CharField(max_length=100)
pubdate = models.DateField()
status = models.CharField(
max_length=20,
choices=(('option one', 'Option One'),
('option two', 'Option Two')))
slug1 = models.SlugField()
slug2 = models.SlugField()
class RelatedPrepopulated(models.Model):
parent = models.ForeignKey(MainPrepopulated)
name = models.CharField(max_length=75)
pubdate = models.DateField()
status = models.CharField(
max_length=20,
choices=(('option one', 'Option One'),
('option two', 'Option Two')))
slug1 = models.SlugField(max_length=50)
slug2 = models.SlugField(max_length=60)
class UnorderedObject(models.Model):
"""
Model without any defined `Meta.ordering`.
Refs #16819.
"""
name = models.CharField(max_length=255)
bool = models.BooleanField(default=True)
class UndeletableObject(models.Model):
"""
Model whose show_delete in admin change_view has been disabled
Refs #10057.
"""
name = models.CharField(max_length=255)
class UserMessenger(models.Model):
"""
Dummy class for testing message_user functions on ModelAdmin
"""
class Simple(models.Model):
"""
Simple model with nothing on it for use in testing
"""
class Choice(models.Model):
choice = models.IntegerField(blank=True, null=True,
choices=((1, 'Yes'), (0, 'No'), (None, 'No opinion')))
| bsd-3-clause |
gcsadovy/generalPY | final_project.py | 1 | 2736 | #final_project_code
#gcsadovy
#Garik Sadovy
#takes a data series of coordinates of fisher sightings over a period of years
#in kml files, returns statistics on clustering, and creates a weighted
#overlay, then loading the data to a map
#input arguments: directory containing kml files to be used,
#beginning year, ending year, a folder to store geodatabase files and layers
import arcpy, sys, os
def dictionary(list, dictionary, begYear, endYear):
n = int(begYear)
while n <= int(endYear):
for file in list:
y = str(n)
z = file
if y in z:
dictionary[z] = n
n = n + 1
return dictionary
def layer2shapefile(dictionary, directory):
for k, v in dictionary.items():
arcpy.FeatureClassToShapefile_conversion(v, directory)
list1 = []
for (directory, dirs, files) in os.walk(directory):
for file in files:
if file.endswith(".shp") == True:
list1.append(file)
return list1
folderIn = sys.argv[1] #folder containing the kml files
begYear = sys.argv[2] #the year you want to start with
endYear = sys.argv[3] #the year you want to end with
gdbName = sys.argv[4] #the name of a folder for your geodatabase files
ZField = sys.argv[5] #the field for the kml files that holds the trapping count (usually 'PopupInfo')
arcpy.env.workspace = folderIn
arcpy.env.overwriteOutput = True
#get a list of all of the Fisher kml files in a directory
try:
list1 = []
for file in os.listdir(folderIn):
if 'Fisher' in file and file.endswith('.kml') == True:
list1.append(file)
except:
arcpy.GetMessages()
#get a dictionary with file names as keys and corresponding years as values
#within beginning and ending parameters
try:
dic1 = {}
dic2 = dictionary(list1, dic1, begYear, endYear)
except:
arcpy.GetMessages()
#convert kml files to layers
#create dictionary with years as keys and full pathways to gdb layer files as values
try:
dic3 = {}
for k, v in dic2.items():
arcpy.KMLToLayer_conversion(folderIn+'/'+k, gdbName, k[:-4], "NO_GROUNDOVERLAY")
if dic3.has_key(v):
dic3[v].append(gdbName+'/'+k[:-4]+'.gdb/Placemarks/Points')
else:
dic3[v] = gdbName+'/'+k[:-4]+'.gdb/Placemarks/Points'
except:
arcpy.GetMessages()
#create shapefiles from layers
try:
list2 = layer2shapefile(dic3, gdbName)
print list2
except:
arcpy.GetMessages()
#part 4 - geoprocessing
#try:
#for shapeFile in list:
#arcpy.gp.Kriging_sa(shapeFile,
| gpl-3.0 |
freddy77/linux | tools/perf/scripts/python/sched-migration.py | 11215 | 11670 | #!/usr/bin/python
#
# Cpu task migration overview toy
#
# Copyright (C) 2010 Frederic Weisbecker <[email protected]>
#
# perf script event handlers have been generated by perf script -g python
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import os
import sys
from collections import defaultdict
from UserList import UserList
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
sys.path.append('scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from SchedGui import *
threads = { 0 : "idle"}
def thread_name(pid):
return "%s:%d" % (threads[pid], pid)
class RunqueueEventUnknown:
@staticmethod
def color():
return None
def __repr__(self):
return "unknown"
class RunqueueEventSleep:
@staticmethod
def color():
return (0, 0, 0xff)
def __init__(self, sleeper):
self.sleeper = sleeper
def __repr__(self):
return "%s gone to sleep" % thread_name(self.sleeper)
class RunqueueEventWakeup:
@staticmethod
def color():
return (0xff, 0xff, 0)
def __init__(self, wakee):
self.wakee = wakee
def __repr__(self):
return "%s woke up" % thread_name(self.wakee)
class RunqueueEventFork:
@staticmethod
def color():
return (0, 0xff, 0)
def __init__(self, child):
self.child = child
def __repr__(self):
return "new forked task %s" % thread_name(self.child)
class RunqueueMigrateIn:
@staticmethod
def color():
return (0, 0xf0, 0xff)
def __init__(self, new):
self.new = new
def __repr__(self):
return "task migrated in %s" % thread_name(self.new)
class RunqueueMigrateOut:
@staticmethod
def color():
return (0xff, 0, 0xff)
def __init__(self, old):
self.old = old
def __repr__(self):
return "task migrated out %s" % thread_name(self.old)
class RunqueueSnapshot:
def __init__(self, tasks = [0], event = RunqueueEventUnknown()):
self.tasks = tuple(tasks)
self.event = event
def sched_switch(self, prev, prev_state, next):
event = RunqueueEventUnknown()
if taskState(prev_state) == "R" and next in self.tasks \
and prev in self.tasks:
return self
if taskState(prev_state) != "R":
event = RunqueueEventSleep(prev)
next_tasks = list(self.tasks[:])
if prev in self.tasks:
if taskState(prev_state) != "R":
next_tasks.remove(prev)
elif taskState(prev_state) == "R":
next_tasks.append(prev)
if next not in next_tasks:
next_tasks.append(next)
return RunqueueSnapshot(next_tasks, event)
def migrate_out(self, old):
if old not in self.tasks:
return self
next_tasks = [task for task in self.tasks if task != old]
return RunqueueSnapshot(next_tasks, RunqueueMigrateOut(old))
def __migrate_in(self, new, event):
if new in self.tasks:
self.event = event
return self
next_tasks = self.tasks[:] + tuple([new])
return RunqueueSnapshot(next_tasks, event)
def migrate_in(self, new):
return self.__migrate_in(new, RunqueueMigrateIn(new))
def wake_up(self, new):
return self.__migrate_in(new, RunqueueEventWakeup(new))
def wake_up_new(self, new):
return self.__migrate_in(new, RunqueueEventFork(new))
def load(self):
""" Provide the number of tasks on the runqueue.
Don't count idle"""
return len(self.tasks) - 1
def __repr__(self):
ret = self.tasks.__repr__()
ret += self.origin_tostring()
return ret
class TimeSlice:
def __init__(self, start, prev):
self.start = start
self.prev = prev
self.end = start
# cpus that triggered the event
self.event_cpus = []
if prev is not None:
self.total_load = prev.total_load
self.rqs = prev.rqs.copy()
else:
self.rqs = defaultdict(RunqueueSnapshot)
self.total_load = 0
def __update_total_load(self, old_rq, new_rq):
diff = new_rq.load() - old_rq.load()
self.total_load += diff
def sched_switch(self, ts_list, prev, prev_state, next, cpu):
old_rq = self.prev.rqs[cpu]
new_rq = old_rq.sched_switch(prev, prev_state, next)
if old_rq is new_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def migrate(self, ts_list, new, old_cpu, new_cpu):
if old_cpu == new_cpu:
return
old_rq = self.prev.rqs[old_cpu]
out_rq = old_rq.migrate_out(new)
self.rqs[old_cpu] = out_rq
self.__update_total_load(old_rq, out_rq)
new_rq = self.prev.rqs[new_cpu]
in_rq = new_rq.migrate_in(new)
self.rqs[new_cpu] = in_rq
self.__update_total_load(new_rq, in_rq)
ts_list.append(self)
if old_rq is not out_rq:
self.event_cpus.append(old_cpu)
self.event_cpus.append(new_cpu)
def wake_up(self, ts_list, pid, cpu, fork):
old_rq = self.prev.rqs[cpu]
if fork:
new_rq = old_rq.wake_up_new(pid)
else:
new_rq = old_rq.wake_up(pid)
if new_rq is old_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def next(self, t):
self.end = t
return TimeSlice(t, self)
class TimeSliceList(UserList):
def __init__(self, arg = []):
self.data = arg
def get_time_slice(self, ts):
if len(self.data) == 0:
slice = TimeSlice(ts, TimeSlice(-1, None))
else:
slice = self.data[-1].next(ts)
return slice
def find_time_slice(self, ts):
start = 0
end = len(self.data)
found = -1
searching = True
while searching:
if start == end or start == end - 1:
searching = False
i = (end + start) / 2
if self.data[i].start <= ts and self.data[i].end >= ts:
found = i
end = i
continue
if self.data[i].end < ts:
start = i
elif self.data[i].start > ts:
end = i
return found
def set_root_win(self, win):
self.root_win = win
def mouse_down(self, cpu, t):
idx = self.find_time_slice(t)
if idx == -1:
return
ts = self[idx]
rq = ts.rqs[cpu]
raw = "CPU: %d\n" % cpu
raw += "Last event : %s\n" % rq.event.__repr__()
raw += "Timestamp : %d.%06d\n" % (ts.start / (10 ** 9), (ts.start % (10 ** 9)) / 1000)
raw += "Duration : %6d us\n" % ((ts.end - ts.start) / (10 ** 6))
raw += "Load = %d\n" % rq.load()
for t in rq.tasks:
raw += "%s \n" % thread_name(t)
self.root_win.update_summary(raw)
def update_rectangle_cpu(self, slice, cpu):
rq = slice.rqs[cpu]
if slice.total_load != 0:
load_rate = rq.load() / float(slice.total_load)
else:
load_rate = 0
red_power = int(0xff - (0xff * load_rate))
color = (0xff, red_power, red_power)
top_color = None
if cpu in slice.event_cpus:
top_color = rq.event.color()
self.root_win.paint_rectangle_zone(cpu, color, top_color, slice.start, slice.end)
def fill_zone(self, start, end):
i = self.find_time_slice(start)
if i == -1:
return
for i in xrange(i, len(self.data)):
timeslice = self.data[i]
if timeslice.start > end:
return
for cpu in timeslice.rqs:
self.update_rectangle_cpu(timeslice, cpu)
def interval(self):
if len(self.data) == 0:
return (0, 0)
return (self.data[0].start, self.data[-1].end)
def nr_rectangles(self):
last_ts = self.data[-1]
max_cpu = 0
for cpu in last_ts.rqs:
if cpu > max_cpu:
max_cpu = cpu
return max_cpu
class SchedEventProxy:
def __init__(self):
self.current_tsk = defaultdict(lambda : -1)
self.timeslices = TimeSliceList()
def sched_switch(self, headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
""" Ensure the task we sched out this cpu is really the one
we logged. Otherwise we may have missed traces """
on_cpu_task = self.current_tsk[headers.cpu]
if on_cpu_task != -1 and on_cpu_task != prev_pid:
print "Sched switch event rejected ts: %s cpu: %d prev: %s(%d) next: %s(%d)" % \
(headers.ts_format(), headers.cpu, prev_comm, prev_pid, next_comm, next_pid)
threads[prev_pid] = prev_comm
threads[next_pid] = next_comm
self.current_tsk[headers.cpu] = next_pid
ts = self.timeslices.get_time_slice(headers.ts())
ts.sched_switch(self.timeslices, prev_pid, prev_state, next_pid, headers.cpu)
def migrate(self, headers, pid, prio, orig_cpu, dest_cpu):
ts = self.timeslices.get_time_slice(headers.ts())
ts.migrate(self.timeslices, pid, orig_cpu, dest_cpu)
def wake_up(self, headers, comm, pid, success, target_cpu, fork):
if success == 0:
return
ts = self.timeslices.get_time_slice(headers.ts())
ts.wake_up(self.timeslices, pid, target_cpu, fork)
def trace_begin():
global parser
parser = SchedEventProxy()
def trace_end():
app = wx.App(False)
timeslices = parser.timeslices
frame = RootFrame(timeslices, "Migration")
app.MainLoop()
def sched__sched_stat_runtime(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, runtime, vruntime):
pass
def sched__sched_stat_iowait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_stat_sleep(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_stat_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_process_fork(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
parent_comm, parent_pid, child_comm, child_pid):
pass
def sched__sched_process_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_process_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_process_free(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_migrate_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, orig_cpu,
dest_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.migrate(headers, pid, prio, orig_cpu, dest_cpu)
def sched__sched_switch(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.sched_switch(headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio)
def sched__sched_wakeup_new(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.wake_up(headers, comm, pid, success, target_cpu, 1)
def sched__sched_wakeup(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.wake_up(headers, comm, pid, success, target_cpu, 0)
def sched__sched_wait_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_kthread_stop_ret(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
ret):
pass
def sched__sched_kthread_stop(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid):
pass
def trace_unhandled(event_name, context, common_cpu, common_secs, common_nsecs,
common_pid, common_comm):
pass
| gpl-2.0 |
offtherailz/mapstore | mapcomposer/app/static/externals/openlayers/tools/shrinksafe.py | 293 | 1498 | #!/usr/bin/env python
#
# Script to provide a wrapper around the ShrinkSafe "web service"
# <http://shrinksafe.dojotoolkit.org/>
#
#
# We use this script for two reasons:
#
# * This avoids having to install and configure Java and the standalone
# ShrinkSafe utility.
#
# * The current ShrinkSafe standalone utility was broken when we last
# used it.
#
import sys
import urllib
import urllib2
URL_SHRINK_SAFE = "http://shrinksafe.dojotoolkit.org/shrinksafe.php"
# This would normally be dynamically generated:
BOUNDARY_MARKER = "---------------------------72288400411964641492083565382"
if __name__ == "__main__":
## Grab the source code
try:
sourceFilename = sys.argv[1]
except:
print "Usage: %s (<source filename>|-)" % sys.argv[0]
raise SystemExit
if sourceFilename == "-":
sourceCode = sys.stdin.read()
sourceFilename = "stdin.js"
else:
sourceCode = open(sourceFilename).read()
## Create the request replicating posting of the form from the web page
request = urllib2.Request(url=URL_SHRINK_SAFE)
request.add_header("Content-Type",
"multipart/form-data; boundary=%s" % BOUNDARY_MARKER)
request.add_data("""
--%s
Content-Disposition: form-data; name="shrinkfile[]"; filename="%s"
Content-Type: application/x-javascript
%s
""" % (BOUNDARY_MARKER, sourceFilename, sourceCode))
## Deliver the result
print urllib2.urlopen(request).read(),
| gpl-3.0 |
luhn/AutobahnPython | examples/twisted/websocket/echo_site_tls/server.py | 18 | 2328 | ###############################################################################
##
## Copyright (C) 2011-2013 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
import sys
from twisted.internet import reactor, ssl
from twisted.python import log
from twisted.web.server import Site
from twisted.web.static import File
from autobahn.twisted.websocket import WebSocketServerFactory, \
WebSocketServerProtocol
from autobahn.twisted.resource import WebSocketResource, \
HTTPChannelHixie76Aware
class EchoServerProtocol(WebSocketServerProtocol):
def onMessage(self, payload, isBinary):
self.sendMessage(payload, isBinary)
if __name__ == '__main__':
if len(sys.argv) > 1 and sys.argv[1] == 'debug':
log.startLogging(sys.stdout)
debug = True
else:
debug = False
contextFactory = ssl.DefaultOpenSSLContextFactory('keys/server.key',
'keys/server.crt')
factory = WebSocketServerFactory("wss://localhost:8080",
debug = debug,
debugCodePaths = debug)
factory.protocol = EchoServerProtocol
factory.setProtocolOptions(allowHixie76 = True) # needed if Hixie76 is to be supported
resource = WebSocketResource(factory)
## we server static files under "/" ..
root = File(".")
## and our WebSocket server under "/ws"
root.putChild("ws", resource)
## both under one Twisted Web Site
site = Site(root)
site.protocol = HTTPChannelHixie76Aware # needed if Hixie76 is to be supported
reactor.listenSSL(8080, site, contextFactory)
reactor.run()
| apache-2.0 |
florian-dacosta/OpenUpgrade | addons/delivery/stock.py | 32 | 8892 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields,osv
from openerp.tools.translate import _
import openerp.addons.decimal_precision as dp
# Overloaded stock_picking to manage carriers :
class stock_picking(osv.osv):
_inherit = 'stock.picking'
def _cal_weight(self, cr, uid, ids, name, args, context=None):
res = {}
for picking in self.browse(cr, uid, ids, context=context):
total_weight = total_weight_net = 0.00
for move in picking.move_lines:
total_weight += move.weight
total_weight_net += move.weight_net
res[picking.id] = {
'weight': total_weight,
'weight_net': total_weight_net,
}
return res
def _get_picking_line(self, cr, uid, ids, context=None):
result = {}
for line in self.pool.get('stock.move').browse(cr, uid, ids, context=context):
result[line.picking_id.id] = True
return result.keys()
_columns = {
'carrier_id':fields.many2one("delivery.carrier","Carrier"),
'volume': fields.float('Volume'),
'weight': fields.function(_cal_weight, type='float', string='Weight', digits_compute= dp.get_precision('Stock Weight'), multi='_cal_weight',
store={
'stock.picking': (lambda self, cr, uid, ids, c={}: ids, ['move_lines'], 20),
'stock.move': (_get_picking_line, ['product_id','product_qty','product_uom','product_uos_qty'], 20),
}),
'weight_net': fields.function(_cal_weight, type='float', string='Net Weight', digits_compute= dp.get_precision('Stock Weight'), multi='_cal_weight',
store={
'stock.picking': (lambda self, cr, uid, ids, c={}: ids, ['move_lines'], 20),
'stock.move': (_get_picking_line, ['product_id','product_qty','product_uom','product_uos_qty'], 20),
}),
'carrier_tracking_ref': fields.char('Carrier Tracking Ref', size=32),
'number_of_packages': fields.integer('Number of Packages'),
'weight_uom_id': fields.many2one('product.uom', 'Unit of Measure', required=True,readonly="1",help="Unit of measurement for Weight",),
}
def _prepare_shipping_invoice_line(self, cr, uid, picking, invoice, context=None):
"""Prepare the invoice line to add to the shipping costs to the shipping's
invoice.
:param browse_record picking: the stock picking being invoiced
:param browse_record invoice: the stock picking's invoice
:return: dict containing the values to create the invoice line,
or None to create nothing
"""
carrier_obj = self.pool.get('delivery.carrier')
grid_obj = self.pool.get('delivery.grid')
if not picking.carrier_id or \
any(inv_line.product_id.id == picking.carrier_id.product_id.id
for inv_line in invoice.invoice_line):
return None
grid_id = carrier_obj.grid_get(cr, uid, [picking.carrier_id.id],
picking.partner_id.id, context=context)
if not grid_id:
raise osv.except_osv(_('Warning!'),
_('The carrier %s (id: %d) has no delivery grid!') \
% (picking.carrier_id.name,
picking.carrier_id.id))
price = grid_obj.get_price_from_picking(cr, uid, grid_id,
invoice.amount_untaxed, picking.weight, picking.volume,
context=context)
account_id = picking.carrier_id.product_id.property_account_income.id
if not account_id:
account_id = picking.carrier_id.product_id.categ_id\
.property_account_income_categ.id
taxes = picking.carrier_id.product_id.taxes_id
partner = picking.partner_id or False
if partner:
account_id = self.pool.get('account.fiscal.position').map_account(cr, uid, partner.property_account_position, account_id)
taxes_ids = self.pool.get('account.fiscal.position').map_tax(cr, uid, partner.property_account_position, taxes)
else:
taxes_ids = [x.id for x in taxes]
return {
'name': picking.carrier_id.name,
'invoice_id': invoice.id,
'uos_id': picking.carrier_id.product_id.uos_id.id,
'product_id': picking.carrier_id.product_id.id,
'account_id': account_id,
'price_unit': price,
'quantity': 1,
'invoice_line_tax_id': [(6, 0, taxes_ids)],
}
def _create_invoice_from_picking(self, cr, uid, picking, vals, context=None):
invoice_line_obj = self.pool.get('account.invoice.line')
invoice_id = super(stock_picking, self)._create_invoice_from_picking(cr, uid, picking, vals, context=context)
invoice = self.browse(cr, uid, invoice_id, context=context)
invoice_line = self._prepare_shipping_invoice_line(cr, uid, picking, invoice, context=context)
if invoice_line:
invoice_line_obj.create(cr, uid, invoice_line)
return invoice_id
def _get_default_uom(self, cr, uid, context=None):
uom_categ_id = self.pool.get('ir.model.data').xmlid_to_res_id(cr, uid, 'product.product_uom_categ_kgm')
return self.pool.get('product.uom').search(cr, uid, [('category_id', '=', uom_categ_id), ('factor', '=', 1)])[0]
_defaults = {
'weight_uom_id': lambda self, cr, uid, c: self._get_default_uom(cr, uid, c),
}
class stock_move(osv.osv):
_inherit = 'stock.move'
def _cal_move_weight(self, cr, uid, ids, name, args, context=None):
res = {}
uom_obj = self.pool.get('product.uom')
for move in self.browse(cr, uid, ids, context=context):
weight = weight_net = 0.00
if move.product_id.weight > 0.00:
converted_qty = move.product_qty
if move.product_uom.id <> move.product_id.uom_id.id:
converted_qty = uom_obj._compute_qty(cr, uid, move.product_uom.id, move.product_qty, move.product_id.uom_id.id)
weight = (converted_qty * move.product_id.weight)
if move.product_id.weight_net > 0.00:
weight_net = (converted_qty * move.product_id.weight_net)
res[move.id] = {
'weight': weight,
'weight_net': weight_net,
}
return res
_columns = {
'weight': fields.function(_cal_move_weight, type='float', string='Weight', digits_compute= dp.get_precision('Stock Weight'), multi='_cal_move_weight',
store={
'stock.move': (lambda self, cr, uid, ids, c=None: ids, ['product_id', 'product_qty', 'product_uom'], 20),
}),
'weight_net': fields.function(_cal_move_weight, type='float', string='Net weight', digits_compute= dp.get_precision('Stock Weight'), multi='_cal_move_weight',
store={
'stock.move': (lambda self, cr, uid, ids, c=None: ids, ['product_id', 'product_qty', 'product_uom'], 20),
}),
'weight_uom_id': fields.many2one('product.uom', 'Unit of Measure', required=True,readonly="1",help="Unit of Measure (Unit of Measure) is the unit of measurement for Weight",),
}
def _get_default_uom(self, cr, uid, context=None):
uom_categ_id = self.pool.get('ir.model.data').xmlid_to_res_id(cr, uid, 'product.product_uom_categ_kgm')
return self.pool.get('product.uom').search(cr, uid, [('category_id', '=', uom_categ_id),('factor','=',1)])[0]
_defaults = {
'weight_uom_id': lambda self, cr, uid, c: self._get_default_uom(cr, uid, c),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
noironetworks/nova | nova/db/sqlalchemy/migrate_repo/versions/273_sqlite_foreign_keys.py | 79 | 4690 | # Copyright 2014 Rackspace Hosting
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from migrate import ForeignKeyConstraint, UniqueConstraint
from oslo_db.sqlalchemy import utils
from sqlalchemy import MetaData, schema, Table
FKEYS = [
('fixed_ips', 'instance_uuid', 'instances', 'uuid',
'fixed_ips_instance_uuid_fkey'),
('block_device_mapping', 'instance_uuid', 'instances', 'uuid',
'block_device_mapping_instance_uuid_fkey'),
('instance_info_caches', 'instance_uuid', 'instances', 'uuid',
'instance_info_caches_instance_uuid_fkey'),
('instance_metadata', 'instance_uuid', 'instances', 'uuid',
'instance_metadata_instance_uuid_fkey'),
('instance_system_metadata', 'instance_uuid', 'instances', 'uuid',
'instance_system_metadata_ibfk_1'),
('instance_type_projects', 'instance_type_id', 'instance_types', 'id',
'instance_type_projects_ibfk_1'),
('iscsi_targets', 'volume_id', 'volumes', 'id',
'iscsi_targets_volume_id_fkey'),
('reservations', 'usage_id', 'quota_usages', 'id',
'reservations_ibfk_1'),
('security_group_instance_association', 'instance_uuid',
'instances', 'uuid',
'security_group_instance_association_instance_uuid_fkey'),
('security_group_instance_association', 'security_group_id',
'security_groups', 'id',
'security_group_instance_association_ibfk_1'),
('virtual_interfaces', 'instance_uuid', 'instances', 'uuid',
'virtual_interfaces_instance_uuid_fkey'),
('compute_nodes', 'service_id', 'services', 'id',
'fk_compute_nodes_service_id'),
('instance_actions', 'instance_uuid', 'instances', 'uuid',
'fk_instance_actions_instance_uuid'),
('instance_faults', 'instance_uuid', 'instances', 'uuid',
'fk_instance_faults_instance_uuid'),
('migrations', 'instance_uuid', 'instances', 'uuid',
'fk_migrations_instance_uuid'),
]
UNIQUES = [
('compute_nodes', 'uniq_compute_nodes0host0hypervisor_hostname',
['host', 'hypervisor_hostname']),
('fixed_ips', 'uniq_fixed_ips0address0deleted',
['address', 'deleted']),
('instance_info_caches', 'uniq_instance_info_caches0instance_uuid',
['instance_uuid']),
('instance_type_projects',
'uniq_instance_type_projects0instance_type_id0project_id0deleted',
['instance_type_id', 'project_id', 'deleted']),
('pci_devices', 'uniq_pci_devices0compute_node_id0address0deleted',
['compute_node_id', 'address', 'deleted']),
('virtual_interfaces', 'uniq_virtual_interfaces0address0deleted',
['address', 'deleted']),
]
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
if migrate_engine.name == 'sqlite':
# SQLite is also missing this one index
if not utils.index_exists(migrate_engine, 'fixed_ips', 'address'):
utils.add_index(migrate_engine, 'fixed_ips', 'address',
['address'])
for src_table, src_column, dst_table, dst_column, name in FKEYS:
src_table = Table(src_table, meta, autoload=True)
if name in set(fk.name for fk in src_table.foreign_keys):
continue
src_column = src_table.c[src_column]
dst_table = Table(dst_table, meta, autoload=True)
dst_column = dst_table.c[dst_column]
fkey = ForeignKeyConstraint(columns=[src_column],
refcolumns=[dst_column],
name=name)
fkey.create()
# SQLAlchemy versions < 1.0.0 don't reflect unique constraints
# for SQLite correctly causing sqlalchemy-migrate to recreate
# some tables with missing unique constraints. Re-add some
# potentially missing unique constraints as a workaround.
for table_name, name, column_names in UNIQUES:
table = Table(table_name, meta, autoload=True)
if name in set(c.name for c in table.constraints
if isinstance(table, schema.UniqueConstraint)):
continue
uc = UniqueConstraint(*column_names, table=table, name=name)
uc.create()
| apache-2.0 |
SickGear/SickGear | lib/sg_futures/futures/thread.py | 2 | 7291 | # Copyright 2009 Brian Quinlan. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Implements ThreadPoolExecutor."""
import atexit
from six import PY2
if PY2:
from . import _base
else:
from concurrent.futures import _base
import itertools
import Queue as queue
import threading
import weakref
import sys
try:
from multiprocessing import cpu_count
except ImportError:
# some platforms don't have multiprocessing
def cpu_count():
return None
__author__ = 'Brian Quinlan ([email protected])'
# Workers are created as daemon threads. This is done to allow the interpreter
# to exit when there are still idle threads in a ThreadPoolExecutor's thread
# pool (i.e. shutdown() was not called). However, allowing workers to die with
# the interpreter has two undesirable properties:
# - The workers would still be running during interpretor shutdown,
# meaning that they would fail in unpredictable ways.
# - The workers could be killed while evaluating a work item, which could
# be bad if the callable being evaluated has external side-effects e.g.
# writing to a file.
#
# To work around this problem, an exit handler is installed which tells the
# workers to exit when their work queues are empty and then waits until the
# threads finish.
_threads_queues = weakref.WeakKeyDictionary()
_shutdown = False
def _python_exit():
global _shutdown
_shutdown = True
items = list(_threads_queues.items()) if _threads_queues else ()
for t, q in items:
q.put(None)
for t, q in items:
t.join(sys.maxint)
atexit.register(_python_exit)
class _WorkItem(object):
def __init__(self, future, fn, args, kwargs):
self.future = future
self.fn = fn
self.args = args
self.kwargs = kwargs
def run(self):
if not self.future.set_running_or_notify_cancel():
return
try:
result = self.fn(*self.args, **self.kwargs)
except:
e, tb = sys.exc_info()[1:]
self.future.set_exception_info(e, tb)
else:
self.future.set_result(result)
def _worker(executor_reference, work_queue, initializer, initargs):
if initializer is not None:
try:
initializer(*initargs)
except BaseException:
_base.LOGGER.critical('Exception in initializer:', exc_info=True)
executor = executor_reference()
if executor is not None:
executor._initializer_failed()
return
try:
while True:
work_item = work_queue.get(block=True)
if work_item is not None:
work_item.run()
# Delete references to object. See issue16284
del work_item
# attempt to increment idle count
executor = executor_reference()
if executor is not None:
executor._idle_semaphore.release()
del executor
continue
executor = executor_reference()
# Exit if:
# - The interpreter is shutting down OR
# - The executor that owns the worker has been collected OR
# - The executor that owns the worker has been shutdown.
if _shutdown or executor is None or executor._shutdown:
# Notice other workers
work_queue.put(None)
return
del executor
except:
_base.LOGGER.critical('Exception in worker', exc_info=True)
class BrokenThreadPool(_base.BrokenExecutor):
"""
Raised when a worker thread in a ThreadPoolExecutor failed initializing.
"""
class ThreadPoolExecutor(_base.Executor):
# Used to assign unique thread names when thread_name_prefix is not supplied.
_counter = itertools.count().next
def __init__(self, max_workers=None, thread_name_prefix='', initializer=None, initargs=()):
"""Initializes a new ThreadPoolExecutor instance.
Args:
max_workers: The maximum number of threads that can be used to
execute the given calls.
thread_name_prefix: An optional name prefix to give our threads.
"""
if max_workers is None:
# Use this number because ThreadPoolExecutor is often
# used to overlap I/O instead of CPU work.
max_workers = (cpu_count() or 1) * 5
if max_workers <= 0:
raise ValueError("max_workers must be greater than 0")
self._max_workers = max_workers
self._initializer = initializer
self._initargs = initargs
self._work_queue = queue.Queue()
self._idle_semaphore = threading.Semaphore(0)
self._threads = set()
self._broken = False
self._shutdown = False
self._shutdown_lock = threading.Lock()
self._thread_name_prefix = (thread_name_prefix or
("ThreadPoolExecutor-%d" % self._counter()))
def submit(self, fn, *args, **kwargs):
with self._shutdown_lock:
if self._broken:
raise BrokenThreadPool(self._broken)
if self._shutdown:
raise RuntimeError('cannot schedule new futures after shutdown')
f = _base.Future()
w = _WorkItem(f, fn, args, kwargs)
self._work_queue.put(w)
self._adjust_thread_count()
return f
submit.__doc__ = _base.Executor.submit.__doc__
def _adjust_thread_count(self):
# if idle threads are available, don't spin new threads
if self._idle_semaphore.acquire(False):
return
# When the executor gets lost, the weakref callback will wake up
# the worker threads.
def weakref_cb(_, q=self._work_queue):
q.put(None)
num_threads = len(self._threads)
if num_threads < self._max_workers:
thread_name = '%s_%d' % (self._thread_name_prefix or self,
num_threads)
t = threading.Thread(name=thread_name, target=_worker,
args=(weakref.ref(self, weakref_cb),
self._work_queue, self._initializer, self._initargs))
t.daemon = True
t.start()
self._threads.add(t)
_threads_queues[t] = self._work_queue
def _initializer_failed(self):
with self._shutdown_lock:
self._broken = ('A thread initializer failed, the thread pool '
'is not usable anymore')
# Drain work queue and mark pending futures failed
while True:
try:
work_item = self._work_queue.get_nowait()
except queue.Empty:
break
if work_item is not None:
work_item.future.set_exception(BrokenThreadPool(self._broken))
def shutdown(self, wait=True):
with self._shutdown_lock:
self._shutdown = True
self._work_queue.put(None)
if wait:
for t in self._threads:
t.join(sys.maxint)
shutdown.__doc__ = _base.Executor.shutdown.__doc__
| gpl-3.0 |
thechampanurag/django-oscar | tests/integration/catalogue/reviews/model_tests.py | 35 | 4527 | from django.test import TestCase
from django.core.exceptions import ValidationError
from oscar.core.compat import get_user_model
from oscar.apps.catalogue.reviews import models
from oscar.test.factories import create_product
from oscar.test.factories import UserFactory
User = get_user_model()
class TestAnAnonymousReview(TestCase):
def setUp(self):
self.product = create_product()
self.data = {
'product': self.product,
'title': 'This product is lovely',
'body': 'I really like this cheese',
'score': 0,
'name': 'JR Hartley',
'email': '[email protected]'
}
def review(self, **kwargs):
if kwargs:
data = self.data.copy()
data.update(kwargs)
else:
data = self.data
return models.ProductReview(**data)
def test_can_be_created(self):
review = self.review()
review.full_clean()
def test_requires_a_title(self):
review = self.review(title="")
self.assertRaises(ValidationError, review.full_clean)
def test_requires_a_body(self):
review = self.review(body="")
self.assertRaises(ValidationError, review.full_clean)
def test_requires_a_name(self):
review = self.review(name="")
self.assertRaises(ValidationError, review.full_clean)
def test_requires_an_email_address(self):
review = self.review(email="")
self.assertRaises(ValidationError, review.full_clean)
def test_requires_non_whitespace_title(self):
review = self.review(title=" ")
self.assertRaises(ValidationError, review.full_clean)
def test_starts_with_no_votes(self):
review = self.review()
review.save()
self.assertFalse(review.has_votes)
self.assertEqual(0, review.num_up_votes)
self.assertEqual(0, review.num_down_votes)
def test_has_reviewer_name_property(self):
review = self.review(name="Dave")
self.assertEqual("Dave", review.reviewer_name)
class TestAUserReview(TestCase):
def setUp(self):
self.product = create_product()
self.user = UserFactory(first_name="Tom", last_name="Thumb")
self.data = {
'product': self.product,
'title': 'This product is lovely',
'body': 'I really like this cheese',
'score': 0,
'user': self.user
}
def review(self, **kwargs):
if kwargs:
data = self.data.copy()
data.update(kwargs)
else:
data = self.data
return models.ProductReview(**data)
def test_can_be_created(self):
review = self.review()
review.full_clean()
def test_requires_a_title(self):
review = self.review(title="")
self.assertRaises(ValidationError, review.full_clean)
def test_requires_a_body(self):
review = self.review(body="")
self.assertRaises(ValidationError, review.full_clean)
def test_has_reviewer_name_property(self):
review = self.review()
self.assertEqual("Tom Thumb", review.reviewer_name)
class TestVotingOnAReview(TestCase):
def setUp(self):
self.product = create_product()
self.user = UserFactory()
self.voter = UserFactory()
self.review = self.product.reviews.create(
title='This is nice',
score=3,
body="This is the body",
user=self.user)
def test_updates_totals_for_upvote(self):
self.review.vote_up(self.voter)
self.assertTrue(self.review.has_votes)
self.assertEqual(1, self.review.total_votes)
self.assertEqual(1, self.review.delta_votes)
def test_updates_totals_for_downvote(self):
self.review.vote_down(self.voter)
self.assertTrue(self.review.has_votes)
self.assertEqual(1, self.review.total_votes)
self.assertEqual(-1, self.review.delta_votes)
def test_is_permitted_for_normal_user(self):
is_allowed, reason = self.review.can_user_vote(self.voter)
self.assertTrue(is_allowed, reason)
def test_is_not_permitted_for_reviewer(self):
is_allowed, reason = self.review.can_user_vote(self.user)
self.assertFalse(is_allowed, reason)
def test_is_not_permitted_for_previous_voter(self):
self.review.vote_up(self.voter)
is_allowed, reason = self.review.can_user_vote(self.voter)
self.assertFalse(is_allowed, reason)
| bsd-3-clause |
fvalenza/pinocchio | python/bindings.py | 1 | 3282 | import pinocchio as se3
from pinocchio.utils import np, npl, rand, skew, zero
from test_case import TestCase
class TestSE3(TestCase):
def setUp(self):
self.R = rand([3, 3])
self.R, _, _ = npl.svd(self.R)
self.p = rand(3)
self.m = se3.SE3(self.R, self.p)
def test_se3(self):
R, p, m = self.R, self.p, self.m
X = np.vstack([np.hstack([R, skew(p) * R]), np.hstack([zero([3, 3]), R])])
self.assertApprox(m.action, X)
M = np.vstack([np.hstack([R, p]), np.matrix('0 0 0 1', np.double)])
self.assertApprox(m.homogeneous, M)
m2 = se3.SE3.Random()
self.assertApprox((m * m2).homogeneous, m.homogeneous * m2.homogeneous)
self.assertApprox((~m).homogeneous, npl.inv(m.homogeneous))
p = rand(3)
self.assertApprox(m * p, m.rotation * p + m.translation)
self.assertApprox(m.actInv(p), m.rotation.T * p - m.rotation.T * m.translation)
p = np.vstack([p, 1])
self.assertApprox(m * p, m.homogeneous * p)
self.assertApprox(m.actInv(p), npl.inv(m.homogeneous) * p)
p = rand(6)
self.assertApprox(m * p, m.action * p)
self.assertApprox(m.actInv(p), npl.inv(m.action) * p)
p = rand(5)
with self.assertRaises(ValueError):
m * p
with self.assertRaises(ValueError):
m.actInv(p)
with self.assertRaises(ValueError):
m.actInv('42')
def test_motion(self):
m = self.m
self.assertApprox(se3.Motion.Zero().vector, zero(6))
v = se3.Motion.Random()
self.assertApprox((m * v).vector, m.action * v.vector)
self.assertApprox((m.actInv(v)).vector, npl.inv(m.action) * v.vector)
vv = v.linear
vw = v.angular
self.assertApprox(v.vector, np.vstack([vv, vw]))
self.assertApprox((v ** v).vector, zero(6))
def test_force(self):
m = self.m
self.assertApprox(se3.Force.Zero().vector, zero(6))
f = se3.Force.Random()
ff = f.linear
ft = f.angular
self.assertApprox(f.vector, np.vstack([ff, ft]))
self.assertApprox((m * f).vector, npl.inv(m.action.T) * f.vector)
self.assertApprox((m.actInv(f)).vector, m.action.T * f.vector)
v = se3.Motion.Random()
f = se3.Force(np.vstack([v.vector[3:], v.vector[:3]]))
self.assertApprox((v ** f).vector, zero(6))
def test_inertia(self):
m = self.m
Y1 = se3.Inertia.Random()
Y2 = se3.Inertia.Random()
Y = Y1 + Y2
self.assertApprox(Y1.matrix() + Y2.matrix(), Y.matrix())
v = se3.Motion.Random()
self.assertApprox((Y * v).vector, Y.matrix() * v.vector)
self.assertApprox((m * Y).matrix(), m.inverse().action.T * Y.matrix() * m.inverse().action)
self.assertApprox((m.actInv(Y)).matrix(), m.action.T * Y.matrix() * m.action)
def test_cross(self):
m = se3.Motion.Random()
f = se3.Force.Random()
self.assertApprox(m ** m, m.cross_motion(m))
self.assertApprox(m ** f, m.cross_force(f))
with self.assertRaises(ValueError):
m ** 2
def test_exp(self):
m = se3.Motion.Random()
self.assertApprox(se3.exp(m), se3.exp6FromMotion(m))
| bsd-2-clause |
jianglu/mojo | mojo/public/tools/mojom_fetcher/pylib/fetcher/repository.py | 26 | 3776 | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
from fetcher.dependency import Dependency
from fetcher.mojom_directory import MojomDirectory
from fetcher.mojom_file import MojomFile
from mojom.parse.parser import Parse
class Repository(object):
"""Repository represents a code repository on the local disc."""
def __init__(self, root_dir, external_dir):
"""root_dir represents the root of the repository;
external_dir is the relative path of the external directory within the
repository (so, relative to root_dir)
"""
self._root_dir = os.path.normpath(root_dir)
self._external_dir = external_dir
def get_repo_root_directory(self):
return self._root_dir
def get_external_directory(self):
return os.path.join(self._root_dir, self._external_dir)
def get_external_suffix(self):
return self._external_dir
def _os_walk(self, root_directory):
# This method is included for dependency injection
return os.walk(root_directory)
def _open(self, filename):
# This method is included for dependency injection
return open(filename)
def _get_all_mojom_in_directory(self, root_directory):
mojoms = []
for dirname, _, files in self._os_walk(root_directory):
for f in files:
if f.endswith(".mojom"):
mojoms.append(os.path.join(dirname,f))
return mojoms
def _resolve_dependencies(self, dependencies, mojoms):
"""Resolve dependencies between discovered mojoms, so we know which are the
missing ones."""
missing = []
for dependency in dependencies:
found = False
for search_path in dependency.get_search_path_for_dependency():
if os.path.normpath(
os.path.join(search_path,
dependency.get_imported())) in mojoms:
found = True
break
if not found:
missing.append(dependency)
return missing
def get_missing_dependencies(self):
"""get_missing_dependencies returns a set of dependencies that are required
by mojoms in this repository but not available.
"""
# Update the list of available mojoms in this repository.
mojoms = set(self._get_all_mojom_in_directory(self._root_dir))
# Find all declared dependencies
needed_deps = set([])
for mojom in mojoms:
with self._open(mojom) as f:
source = f.read()
tree = Parse(source, mojom)
for dep in tree.import_list:
needed_deps.add(Dependency(self, dep.filename, dep.import_filename))
missing_deps = self._resolve_dependencies(needed_deps, mojoms)
return missing_deps
def get_external_urls(self):
"""Get all external mojom files in this repository, by urls (without
scheme)."""
mojoms = set(self._get_all_mojom_in_directory(
self.get_external_directory()))
urls = []
for mojom in mojoms:
urls.append(os.path.relpath(mojom, self.get_external_directory()))
return urls
def get_all_external_mojom_directories(self):
"""Get all external directories populated with their mojom files."""
mojoms = self._get_all_mojom_in_directory(self.get_external_directory())
directories = {}
for mojom_path in mojoms:
directory_path = os.path.dirname(mojom_path)
directory = directories.setdefault(
directory_path, MojomDirectory(directory_path))
with self._open(mojom_path) as f:
source = f.read()
tree = Parse(source, mojom_path)
mojom = MojomFile(self, mojom_path)
directory.add_mojom(mojom)
for dep in tree.import_list:
mojom.add_dependency(dep.import_filename)
return directories.values()
| bsd-3-clause |
tkinz27/ansible | v1/ansible/runner/lookup_plugins/sequence.py | 85 | 7309 | # (c) 2013, Jayson Vantuyl <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from ansible.errors import AnsibleError
import ansible.utils as utils
from re import compile as re_compile, IGNORECASE
# shortcut format
NUM = "(0?x?[0-9a-f]+)"
SHORTCUT = re_compile(
"^(" + # Group 0
NUM + # Group 1: Start
"-)?" +
NUM + # Group 2: End
"(/" + # Group 3
NUM + # Group 4: Stride
")?" +
"(:(.+))?$", # Group 5, Group 6: Format String
IGNORECASE
)
class LookupModule(object):
"""
sequence lookup module
Used to generate some sequence of items. Takes arguments in two forms.
The simple / shortcut form is:
[start-]end[/stride][:format]
As indicated by the brackets: start, stride, and format string are all
optional. The format string is in the style of printf. This can be used
to pad with zeros, format in hexadecimal, etc. All of the numerical values
can be specified in octal (i.e. 0664) or hexadecimal (i.e. 0x3f8).
Negative numbers are not supported.
Some examples:
5 -> ["1","2","3","4","5"]
5-8 -> ["5", "6", "7", "8"]
2-10/2 -> ["2", "4", "6", "8", "10"]
4:host%02d -> ["host01","host02","host03","host04"]
The standard Ansible key-value form is accepted as well. For example:
start=5 end=11 stride=2 format=0x%02x -> ["0x05","0x07","0x09","0x0a"]
This format takes an alternate form of "end" called "count", which counts
some number from the starting value. For example:
count=5 -> ["1", "2", "3", "4", "5"]
start=0x0f00 count=4 format=%04x -> ["0f00", "0f01", "0f02", "0f03"]
start=0 count=5 stride=2 -> ["0", "2", "4", "6", "8"]
start=1 count=5 stride=2 -> ["1", "3", "5", "7", "9"]
The count option is mostly useful for avoiding off-by-one errors and errors
calculating the number of entries in a sequence when a stride is specified.
"""
def __init__(self, basedir, **kwargs):
"""absorb any keyword args"""
self.basedir = basedir
def reset(self):
"""set sensible defaults"""
self.start = 1
self.count = None
self.end = None
self.stride = 1
self.format = "%d"
def parse_kv_args(self, args):
"""parse key-value style arguments"""
for arg in ["start", "end", "count", "stride"]:
try:
arg_raw = args.pop(arg, None)
if arg_raw is None:
continue
arg_cooked = int(arg_raw, 0)
setattr(self, arg, arg_cooked)
except ValueError:
raise AnsibleError(
"can't parse arg %s=%r as integer"
% (arg, arg_raw)
)
if 'format' in args:
self.format = args.pop("format")
if args:
raise AnsibleError(
"unrecognized arguments to with_sequence: %r"
% args.keys()
)
def parse_simple_args(self, term):
"""parse the shortcut forms, return True/False"""
match = SHORTCUT.match(term)
if not match:
return False
_, start, end, _, stride, _, format = match.groups()
if start is not None:
try:
start = int(start, 0)
except ValueError:
raise AnsibleError("can't parse start=%s as integer" % start)
if end is not None:
try:
end = int(end, 0)
except ValueError:
raise AnsibleError("can't parse end=%s as integer" % end)
if stride is not None:
try:
stride = int(stride, 0)
except ValueError:
raise AnsibleError("can't parse stride=%s as integer" % stride)
if start is not None:
self.start = start
if end is not None:
self.end = end
if stride is not None:
self.stride = stride
if format is not None:
self.format = format
def sanity_check(self):
if self.count is None and self.end is None:
raise AnsibleError(
"must specify count or end in with_sequence"
)
elif self.count is not None and self.end is not None:
raise AnsibleError(
"can't specify both count and end in with_sequence"
)
elif self.count is not None:
# convert count to end
if self.count != 0:
self.end = self.start + self.count * self.stride - 1
else:
self.start = 0
self.end = 0
self.stride = 0
del self.count
if self.stride > 0 and self.end < self.start:
raise AnsibleError("to count backwards make stride negative")
if self.stride < 0 and self.end > self.start:
raise AnsibleError("to count forward don't make stride negative")
if self.format.count('%') != 1:
raise AnsibleError("bad formatting string: %s" % self.format)
def generate_sequence(self):
if self.stride > 0:
adjust = 1
else:
adjust = -1
numbers = xrange(self.start, self.end + adjust, self.stride)
for i in numbers:
try:
formatted = self.format % i
yield formatted
except (ValueError, TypeError):
raise AnsibleError(
"problem formatting %r with %r" % self.format
)
def run(self, terms, inject=None, **kwargs):
results = []
terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
if isinstance(terms, basestring):
terms = [ terms ]
for term in terms:
try:
self.reset() # clear out things for this iteration
try:
if not self.parse_simple_args(term):
self.parse_kv_args(utils.parse_kv(term))
except Exception:
raise AnsibleError(
"unknown error parsing with_sequence arguments: %r"
% term
)
self.sanity_check()
if self.start != self.end:
results.extend(self.generate_sequence())
except AnsibleError:
raise
except Exception, e:
raise AnsibleError(
"unknown error generating sequence: %s" % str(e)
)
return results
| gpl-3.0 |
joetsoi/moonstone | python/assets/collide.py | 1 | 1897 | from collections import UserList, defaultdict
from enum import IntEnum, auto
from pathlib import Path, PureWindowsPath
from resources.extract import grouper
from settings import MOONSTONE_DIR
class Colliders(UserList):
def __init__(self, data=None):
super().__init__(data)
self.last_len = 0
self._max = (None, None)
@property
def max(self):
if len(self.data) != self.last_len:
self._max = (
max((i[0] for i in self.data), default=None),
max((i[1] for i in self.data), default=None),
)
return self._max
class ParseState(IntEnum):
FILENAME = auto()
COUNT = auto()
PADDING = auto()
COORDINATES = auto()
def parse_collision_file(collide_data):
collide_dict = defaultdict(list)
state = ParseState.FILENAME
pair_count = None
for line in collide_data:
if state == ParseState.FILENAME:
collide_file = line.strip().lower()
state = ParseState.COUNT
elif state == ParseState.COUNT:
pair_count = int(line)
if pair_count == 0:
collide_dict[collide_file].append(Colliders())
elif pair_count == 99:
state = ParseState.FILENAME
else:
state = ParseState.PADDING
elif state == ParseState.PADDING:
state = ParseState.COORDINATES
elif state == ParseState.COORDINATES:
# convert from list of str to list of ints
groups = (int(''.join(g)) for g in grouper(line.strip(), 3))
# group them into x y pairs
coordinates = Colliders(grouper(groups, 2))
assert len(coordinates) == pair_count
collide_dict[collide_file].append(coordinates)
state = ParseState.COUNT
collide_dict.default_factory = None
return collide_dict
| agpl-3.0 |
DataDog/integrations-core | gunicorn/tests/test_metadata.py | 1 | 2636 | # (C) Datadog, Inc. 2018-present
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
import pytest
from datadog_checks.gunicorn import GUnicornCheck
from .common import CHECK_NAME, CONTAINER_NAME, GUNICORN_VERSION, INSTANCE
# TODO: Test metadata in e2e when we can collect metadata from the agent
CHECK_ID = 'test:123'
def _assert_metadata(datadog_agent):
major, minor, patch = GUNICORN_VERSION.split('.')
version_metadata = {
'version.scheme': 'semver',
'version.major': major,
'version.minor': minor,
'version.patch': patch,
'version.raw': GUNICORN_VERSION,
}
datadog_agent.assert_metadata(CHECK_ID, version_metadata)
datadog_agent.assert_metadata_count(5)
@pytest.mark.skipif(not GUNICORN_VERSION, reason='Require GUNICORN_VERSION')
def test_collect_metadata_instance(aggregator, datadog_agent, setup_gunicorn):
instance = INSTANCE.copy()
instance['gunicorn'] = setup_gunicorn['gunicorn_bin_path']
check = GUnicornCheck(CHECK_NAME, {}, [instance])
check.check_id = CHECK_ID
check.check(instance)
_assert_metadata(datadog_agent)
@pytest.mark.skipif(not GUNICORN_VERSION, reason='Require GUNICORN_VERSION')
def test_collect_metadata_init_config(aggregator, datadog_agent, setup_gunicorn):
init_config = {'gunicorn': setup_gunicorn['gunicorn_bin_path']}
check = GUnicornCheck(CHECK_NAME, init_config, [INSTANCE])
check.check_id = CHECK_ID
check.check(INSTANCE)
_assert_metadata(datadog_agent)
@pytest.mark.skipif(not GUNICORN_VERSION, reason='Require GUNICORN_VERSION')
@pytest.mark.usefixtures('dd_environment')
def test_collect_metadata_docker(aggregator, datadog_agent, setup_gunicorn):
instance = INSTANCE.copy()
instance['gunicorn'] = 'docker exec {} gunicorn'.format(CONTAINER_NAME)
check = GUnicornCheck(CHECK_NAME, {}, [instance])
check.check_id = CHECK_ID
check.check(instance)
_assert_metadata(datadog_agent)
def test_collect_metadata_count(aggregator, datadog_agent, setup_gunicorn):
instance = INSTANCE.copy()
instance['gunicorn'] = setup_gunicorn['gunicorn_bin_path']
check = GUnicornCheck(CHECK_NAME, {}, [instance])
check.check_id = 'test:123'
check.check(instance)
datadog_agent.assert_metadata_count(5)
def test_collect_metadata_invalid_binary(aggregator, datadog_agent, setup_gunicorn):
instance = INSTANCE.copy()
instance['gunicorn'] = '/bin/not_exist'
check = GUnicornCheck(CHECK_NAME, {}, [instance])
check.check_id = CHECK_ID
check.check(instance)
datadog_agent.assert_metadata_count(0)
| bsd-3-clause |
haricot/djangocms-bs4forcascade | cmsplugin_bs4forcascade/bootstrap4/secondary_menu.py | 1 | 2963 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.forms import widgets
from django.utils.html import format_html
from django.utils.translation import ugettext_lazy as _, get_language_from_request
from cms.plugin_pool import plugin_pool
from cms.models.pagemodel import Page
from cmsplugin_cascade.fields import GlossaryField
from .plugin_base import BootstrapPluginBase
class BootstrapSecondaryMenuPlugin(BootstrapPluginBase):
"""
Use this plugin to display a secondary menu in arbitrary locations.
This renders links onto all CMS pages, which are children of the selected Page Id.
"""
name = _("Secondary Menu")
default_css_class = 'list-group'
require_parent = True
parent_classes = ('Bootstrap4ColumnPlugin',)
allow_children = False
render_template = 'cascade/bootstrap4/secmenu-list-group.html'
page_id = GlossaryField(
widgets.Select(choices=()),
label=_("CMS Page Id"),
help_text=_("Select a CMS page with a given unique Id (in advanced settings).")
)
offset = GlossaryField(
widgets.NumberInput(),
label=_("Offset"),
initial=0,
help_text=_("Starting from which child menu."),
)
limit = GlossaryField(
widgets.NumberInput(),
label=_("Limit"),
initial=100,
help_text=_("Number of child menus."),
)
glossary_field_order = ['page_id', 'offset', 'limit']
@classmethod
def get_identifier(cls, obj):
identifier = super(BootstrapSecondaryMenuPlugin, cls).get_identifier(obj)
content = obj.glossary.get('page_id', '')
return format_html('{0}{1}', identifier, content)
def get_form(self, request, obj=None, **kwargs):
lang = get_language_from_request(request)
choices = {}
for page in Page.objects.filter(reverse_id__isnull=False).order_by('publisher_is_draft'):
if page.reverse_id not in choices:
choices[page.reverse_id] = page.get_title(lang)
next(iter(self.glossary_fields)).widget.choices = list(choices.items())
return super(BootstrapSecondaryMenuPlugin, self).get_form(request, obj, **kwargs)
def render(self, context, instance, placeholder):
context = self.super(BootstrapSecondaryMenuPlugin, self).render(context, instance, placeholder)
context.update({
'page_id': instance.glossary['page_id'],
'offset': instance.glossary.get('offset', 0),
'limit': instance.glossary.get('limit', 100),
})
return context
@classmethod
def sanitize_model(cls, instance):
try:
if int(instance.glossary['offset']) < 0 or int(instance.glossary['limit']) < 0:
raise ValueError()
except (KeyError, ValueError):
instance.glossary['offset'] = 0
instance.glossary['limit'] = 100
plugin_pool.register_plugin(BootstrapSecondaryMenuPlugin)
| mit |
lihui7115/ChromiumGStreamerBackend | build/android/adb_logcat_printer.py | 69 | 7089 | #!/usr/bin/env python
#
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Shutdown adb_logcat_monitor and print accumulated logs.
To test, call './adb_logcat_printer.py <base_dir>' where
<base_dir> contains 'adb logcat -v threadtime' files named as
logcat_<deviceID>_<sequenceNum>
The script will print the files to out, and will combine multiple
logcats from a single device if there is overlap.
Additionally, if a <base_dir>/LOGCAT_MONITOR_PID exists, the script
will attempt to terminate the contained PID by sending a SIGINT and
monitoring for the deletion of the aforementioned file.
"""
# pylint: disable=W0702
import cStringIO
import logging
import optparse
import os
import re
import signal
import sys
import time
# Set this to debug for more verbose output
LOG_LEVEL = logging.INFO
def CombineLogFiles(list_of_lists, logger):
"""Splices together multiple logcats from the same device.
Args:
list_of_lists: list of pairs (filename, list of timestamped lines)
logger: handler to log events
Returns:
list of lines with duplicates removed
"""
cur_device_log = ['']
for cur_file, cur_file_lines in list_of_lists:
# Ignore files with just the logcat header
if len(cur_file_lines) < 2:
continue
common_index = 0
# Skip this step if list just has empty string
if len(cur_device_log) > 1:
try:
line = cur_device_log[-1]
# Used to make sure we only splice on a timestamped line
if re.match(r'^\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{3} ', line):
common_index = cur_file_lines.index(line)
else:
logger.warning('splice error - no timestamp in "%s"?', line.strip())
except ValueError:
# The last line was valid but wasn't found in the next file
cur_device_log += ['***** POSSIBLE INCOMPLETE LOGCAT *****']
logger.info('Unable to splice %s. Incomplete logcat?', cur_file)
cur_device_log += ['*'*30 + ' %s' % cur_file]
cur_device_log.extend(cur_file_lines[common_index:])
return cur_device_log
def FindLogFiles(base_dir):
"""Search a directory for logcat files.
Args:
base_dir: directory to search
Returns:
Mapping of device_id to a sorted list of file paths for a given device
"""
logcat_filter = re.compile(r'^logcat_(\S+)_(\d+)$')
# list of tuples (<device_id>, <seq num>, <full file path>)
filtered_list = []
for cur_file in os.listdir(base_dir):
matcher = logcat_filter.match(cur_file)
if matcher:
filtered_list += [(matcher.group(1), int(matcher.group(2)),
os.path.join(base_dir, cur_file))]
filtered_list.sort()
file_map = {}
for device_id, _, cur_file in filtered_list:
if device_id not in file_map:
file_map[device_id] = []
file_map[device_id] += [cur_file]
return file_map
def GetDeviceLogs(log_filenames, logger):
"""Read log files, combine and format.
Args:
log_filenames: mapping of device_id to sorted list of file paths
logger: logger handle for logging events
Returns:
list of formatted device logs, one for each device.
"""
device_logs = []
for device, device_files in log_filenames.iteritems():
logger.debug('%s: %s', device, str(device_files))
device_file_lines = []
for cur_file in device_files:
with open(cur_file) as f:
device_file_lines += [(cur_file, f.read().splitlines())]
combined_lines = CombineLogFiles(device_file_lines, logger)
# Prepend each line with a short unique ID so it's easy to see
# when the device changes. We don't use the start of the device
# ID because it can be the same among devices. Example lines:
# AB324: foo
# AB324: blah
device_logs += [('\n' + device[-5:] + ': ').join(combined_lines)]
return device_logs
def ShutdownLogcatMonitor(base_dir, logger):
"""Attempts to shutdown adb_logcat_monitor and blocks while waiting."""
try:
monitor_pid_path = os.path.join(base_dir, 'LOGCAT_MONITOR_PID')
with open(monitor_pid_path) as f:
monitor_pid = int(f.readline())
logger.info('Sending SIGTERM to %d', monitor_pid)
os.kill(monitor_pid, signal.SIGTERM)
i = 0
while True:
time.sleep(.2)
if not os.path.exists(monitor_pid_path):
return
if not os.path.exists('/proc/%d' % monitor_pid):
logger.warning('Monitor (pid %d) terminated uncleanly?', monitor_pid)
return
logger.info('Waiting for logcat process to terminate.')
i += 1
if i >= 10:
logger.warning('Monitor pid did not terminate. Continuing anyway.')
return
except (ValueError, IOError, OSError):
logger.exception('Error signaling logcat monitor - continuing')
def main(argv):
parser = optparse.OptionParser(usage='Usage: %prog [options] <log dir>')
parser.add_option('--output-path',
help='Output file path (if unspecified, prints to stdout)')
options, args = parser.parse_args(argv)
if len(args) != 1:
parser.error('Wrong number of unparsed args')
base_dir = args[0]
if options.output_path:
output_file = open(options.output_path, 'w')
else:
output_file = sys.stdout
log_stringio = cStringIO.StringIO()
logger = logging.getLogger('LogcatPrinter')
logger.setLevel(LOG_LEVEL)
sh = logging.StreamHandler(log_stringio)
sh.setFormatter(logging.Formatter('%(asctime)-2s %(levelname)-8s'
' %(message)s'))
logger.addHandler(sh)
try:
# Wait at least 5 seconds after base_dir is created before printing.
#
# The idea is that 'adb logcat > file' output consists of 2 phases:
# 1 Dump all the saved logs to the file
# 2 Stream log messages as they are generated
#
# We want to give enough time for phase 1 to complete. There's no
# good method to tell how long to wait, but it usually only takes a
# second. On most bots, this code path won't occur at all, since
# adb_logcat_monitor.py command will have spawned more than 5 seconds
# prior to called this shell script.
try:
sleep_time = 5 - (time.time() - os.path.getctime(base_dir))
except OSError:
sleep_time = 5
if sleep_time > 0:
logger.warning('Monitor just started? Sleeping %.1fs', sleep_time)
time.sleep(sleep_time)
assert os.path.exists(base_dir), '%s does not exist' % base_dir
ShutdownLogcatMonitor(base_dir, logger)
separator = '\n' + '*' * 80 + '\n\n'
for log in GetDeviceLogs(FindLogFiles(base_dir), logger):
output_file.write(log)
output_file.write(separator)
with open(os.path.join(base_dir, 'eventlog')) as f:
output_file.write('\nLogcat Monitor Event Log\n')
output_file.write(f.read())
except:
logger.exception('Unexpected exception')
logger.info('Done.')
sh.flush()
output_file.write('\nLogcat Printer Event Log\n')
output_file.write(log_stringio.getvalue())
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| bsd-3-clause |
appscode/chartify | hack/make.py | 2 | 3759 | #!/usr/bin/env python
# http://stackoverflow.com/a/14050282
def check_antipackage():
from sys import version_info
sys_version = version_info[:2]
found = True
if sys_version < (3, 0):
# 'python 2'
from pkgutil import find_loader
found = find_loader('antipackage') is not None
elif sys_version <= (3, 3):
# 'python <= 3.3'
from importlib import find_loader
found = find_loader('antipackage') is not None
else:
# 'python >= 3.4'
from importlib import util
found = util.find_spec('antipackage') is not None
if not found:
print('Install missing package "antipackage"')
print('Example: pip install git+https://github.com/ellisonbg/antipackage.git#egg=antipackage')
from sys import exit
exit(1)
check_antipackage()
# ref: https://github.com/ellisonbg/antipackage
import antipackage
from github.appscode.libbuild import libbuild
import datetime
import io
import json
import os
import os.path
import socket
import subprocess
import sys
from collections import OrderedDict
from os.path import expandvars
libbuild.REPO_ROOT = expandvars('$GOPATH') + '/src/github.com/appscode/chartify'
BUILD_METADATA = libbuild.metadata(libbuild.REPO_ROOT)
libbuild.BIN_MATRIX = {
'chartify': {
'type': 'go',
'go_version': True,
'distro': {
'darwin': ['386', 'amd64'],
'linux': ['arm', '386', 'amd64'],
'windows': ['386', 'amd64']
}
}
}
libbuild.BUCKET_MATRIX = {
'prod': 'gs://appscode-cdn',
'dev': 'gs://appscode-dev'
}
def call(cmd, stdin=None, cwd=libbuild.REPO_ROOT):
print(cmd)
return subprocess.call([expandvars(cmd)], shell=True, stdin=stdin, cwd=cwd)
def die(status):
if status:
sys.exit(status)
def version():
# json.dump(BUILD_METADATA, sys.stdout, sort_keys=True, indent=2)
for k in sorted(BUILD_METADATA):
print(k + '=' + BUILD_METADATA[k])
def fmt():
libbuild.ungroup_go_imports('pkg', 'main.go')
die(call('goimports -w pkg main.go'))
call('gofmt -s -w main.go pkg')
def lint():
call('golint ./pkg/... min.go')
def vet():
call('go vet ./pkg/...')
def build_cmd(name):
cfg = libbuild.BIN_MATRIX[name]
if cfg['type'] == 'go':
if 'distro' in cfg:
for goos, archs in cfg['distro'].items():
for goarch in archs:
libbuild.go_build(name, goos, goarch, main='main.go')
else:
libbuild.go_build(name, libbuild.GOHOSTOS, libbuild.GOHOSTARCH, main='main.go')
def build_cmds():
for name in libbuild.BIN_MATRIX:
build_cmd(name)
def build():
build_cmds()
def push(name=None):
if name:
bindir = libbuild.REPO_ROOT + '/dist/' + name
push_bin(bindir)
else:
dist = libbuild.REPO_ROOT + '/dist'
for name in os.listdir(dist):
d = dist + '/' + name
if os.path.isdir(d):
push_bin(d)
def push_bin(bindir):
call('rm -f *.md5', cwd=bindir)
call('rm -f *.sha1', cwd=bindir)
for f in os.listdir(bindir):
if os.path.isfile(bindir + '/' + f):
libbuild.upload_to_cloud(bindir, f, BUILD_METADATA['version'])
def update_registry():
libbuild.update_registry(BUILD_METADATA['version'])
def install():
die(call('GO15VENDOREXPERIMENT=1 ' + libbuild.GOC + ' install .'))
def default():
fmt()
die(call('GO15VENDOREXPERIMENT=1 ' + libbuild.GOC + ' install .'))
if __name__ == "__main__":
if len(sys.argv) > 1:
# http://stackoverflow.com/a/834451
# http://stackoverflow.com/a/817296
globals()[sys.argv[1]](*sys.argv[2:])
else:
default()
| apache-2.0 |
Jackysonglanlan/devops | IDEs/sublime/shared-pkgs/Packages/pygments/all/pygments/styles/algol_nu.py | 37 | 2278 | # -*- coding: utf-8 -*-
"""
pygments.styles.algol_nu
~~~~~~~~~~~~~~~~~~~~~~~~
Algol publication style without underlining of keywords.
This style renders source code for publication of algorithms in
scientific papers and academic texts, where its format is frequently used.
It is based on the style of the revised Algol-60 language report[1].
o No colours, only black, white and shades of grey are used.
o Keywords are rendered in lowercase boldface.
o Builtins are rendered in lowercase boldface italic.
o Docstrings and pragmas are rendered in dark grey boldface.
o Library identifiers are rendered in dark grey boldface italic.
o Comments are rendered in grey italic.
To render keywords with underlining, refer to the `Algol` style.
For lowercase conversion of keywords and builtins in languages where
these are not or might not be lowercase, a supporting lexer is required.
The Algol and Modula-2 lexers automatically convert to lowercase whenever
this style is selected.
[1] `Revised Report on the Algorithmic Language Algol-60 <http://www.masswerk.at/algol60/report.htm>`
:copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, Operator
class Algol_NuStyle(Style):
background_color = "#ffffff"
default_style = ""
styles = {
Comment: "italic #888",
Comment.Preproc: "bold noitalic #888",
Comment.Special: "bold noitalic #888",
Keyword: "bold",
Keyword.Declaration: "italic",
Name.Builtin: "bold italic",
Name.Builtin.Pseudo: "bold italic",
Name.Namespace: "bold italic #666",
Name.Class: "bold italic #666",
Name.Function: "bold italic #666",
Name.Variable: "bold italic #666",
Name.Constant: "bold italic #666",
Operator.Word: "bold",
String: "italic #666",
Error: "border:#FF0000"
}
| mit |
allanstone/InteligenciaArtificial | Tarea 1/Polinomios/Test/TestPolynomial.py | 1 | 2436 | #!/usr/bin/env python3
#-*- coding: utf-8 -*-
"""
.. module:: testPolynomial
.. moduleauthor:: Garrido Valencia Alan
:synopsis: Este es un modulo de pruebas unitarias para el modulo Polynomial.
"""
import unittest
from ..Scripts.Polinomio import Polynomial
from sympy import symbols
class TestPolynomial(unittest.TestCase):
"""
Esta clase es usada para pruebas del modulo Polinomio
"""
variable='x'
pol1=Polynomial(variable,2,[2,4,20])
pol2=Polynomial(variable,2,[4,40,100])
pol3=Polynomial(variable,1,[1,5])
def testInstance(self):
"""
Prueba unitaria de instancia
"""
x=symbols(self.variable)
exp=2.0*x**2+4.0*x+20
self.assertEqual(self.pol1.expresion ,exp)
def testValue(self):
"""
Prueba unitaria de valor en un Punto
"""
x=symbols(self.variable)
value=26
self.assertEqual(self.pol1.pointValue(1),value)
def testSum(self):
"""
Prueba unitaria para restar
"""
x=symbols(self.variable)
exp=6.0*x**2+44.0*x+120
self.assertEqual(self.pol1.add(self.pol2),exp)
def testRes(self):
"""
Prueba unitaria para sumar
"""
x=symbols(self.variable)
exp=(-2.0)*x**2-36.0*x-80.0
self.assertEqual(self.pol1.sub(self.pol2),exp)
def testMul(self):
"""
Prueba unitaria para multiplicar
"""
x=symbols(self.variable)
exp=8.0*x**4+96.0*x**3+440.0*x**2+1200.0*x+2000.0
self.assertEqual(self.pol1.mult(self.pol2),exp)
def testDiv(self):
"""
Prueba unitaria para dividir
"""
x=symbols(self.variable)
exp=4.0*x+20.0
self.assertEqual(self.pol2.divi(self.pol3),exp)
def testdiffer(self):
"""
Prueba unitaria para derivar
"""
x=symbols(self.variable)
exp=8.0*x + 40.0
self.assertEqual(self.pol2.differ(),exp)
def testIntegr(self):
"""
Prueba unitaria para integrar
"""
x=symbols(self.variable)
exp=(2/3)*x**3 + 2.0*x**2 + 20.0*x
self.assertEqual(self.pol1.integr(),exp)
def testIntegrDef(self):
"""
Prueba unitaria para integral definida
"""
value=6.50
self.assertEqual(self.pol3.integrDef(1,2),value)
if __name__ == '__main__':
unittest.main()
| mit |
astrobin/astrobin | astrobin_apps_premium/management/commands/send_expiring_subscription_autorenew_notifications.py | 1 | 1731 | # Python
from datetime import datetime, timedelta
# Django
from django.conf import settings
from django.core.management.base import BaseCommand
from django.core.urlresolvers import reverse
# Third party
from subscription.models import UserSubscription
# AstroBin
from astrobin_apps_notifications.utils import push_notification
class Command(BaseCommand):
help = "Send a notification to user when their premium subscription " +\
"auto-renews in one week."
def handle(self, *args, **kwargs):
user_subscriptions = UserSubscription.objects\
.filter(
subscription__name__in = [
"AstroBin Lite (autorenew)",
"AstroBin Premium (autorenew)",
],
cancelled=False,
expires = datetime.now() + timedelta(days = 7))\
.exclude(subscription__recurrence_unit = None)
for user_subscription in user_subscriptions:
push_notification([user_subscription.user], None, 'expiring_subscription_autorenew', {
'user_subscription': user_subscription,
})
user_subscriptions = UserSubscription.objects \
.filter(
subscription__name__in=[
"AstroBin Lite (autorenew)",
"AstroBin Premium (autorenew)",
],
cancelled=False,
expires=datetime.now() + timedelta(days=30)) \
.exclude(subscription__recurrence_unit=None)
for user_subscription in user_subscriptions:
push_notification([user_subscription.user], None, 'expiring_subscription_autorenew_30d', {
'user_subscription': user_subscription,
})
| agpl-3.0 |
yavalvas/yav_com | build/matplotlib/doc/mpl_examples/pylab_examples/annotation_demo.py | 3 | 5582 | """
Some examples of how to annotate points in figures. You specify an
annotation point xy=(x,y) and a text point xytext=(x,y) for the
annotated points and text location, respectively. Optionally, you can
specify the coordinate system of xy and xytext with one of the
following strings for xycoords and textcoords (default is 'data')
'figure points' : points from the lower left corner of the figure
'figure pixels' : pixels from the lower left corner of the figure
'figure fraction' : 0,0 is lower left of figure and 1,1 is upper, right
'axes points' : points from lower left corner of axes
'axes pixels' : pixels from lower left corner of axes
'axes fraction' : 0,0 is lower left of axes and 1,1 is upper right
'offset points' : Specify an offset (in points) from the xy value
'data' : use the axes data coordinate system
Optionally, you can specify arrow properties which draws and arrow
from the text to the annotated point by giving a dictionary of arrow
properties
Valid keys are
width : the width of the arrow in points
frac : the fraction of the arrow length occupied by the head
headwidth : the width of the base of the arrow head in points
shrink : move the tip and base some percent away from the
annotated point and text
any key for matplotlib.patches.polygon (eg facecolor)
For physical coordinate systems (points or pixels) the origin is the
(bottom, left) of the figure or axes. If the value is negative,
however, the origin is from the (right, top) of the figure or axes,
analogous to negative indexing of sequences.
"""
from matplotlib.pyplot import figure, show
from matplotlib.patches import Ellipse
import numpy as np
if 1:
# if only one location is given, the text and xypoint being
# annotated are assumed to be the same
fig = figure()
ax = fig.add_subplot(111, autoscale_on=False, xlim=(-1,5), ylim=(-3,5))
t = np.arange(0.0, 5.0, 0.01)
s = np.cos(2*np.pi*t)
line, = ax.plot(t, s, lw=3, color='purple')
ax.annotate('axes center', xy=(.5, .5), xycoords='axes fraction',
horizontalalignment='center', verticalalignment='center')
ax.annotate('pixels', xy=(20, 20), xycoords='figure pixels')
ax.annotate('points', xy=(100, 300), xycoords='figure points')
ax.annotate('offset', xy=(1, 1), xycoords='data',
xytext=(-15, 10), textcoords='offset points',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='right', verticalalignment='bottom',
)
ax.annotate('local max', xy=(3, 1), xycoords='data',
xytext=(0.8, 0.95), textcoords='axes fraction',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='right', verticalalignment='top',
)
ax.annotate('a fractional title', xy=(.025, .975),
xycoords='figure fraction',
horizontalalignment='left', verticalalignment='top',
fontsize=20)
# use negative points or pixels to specify from right, top -10, 10
# is 10 points to the left of the right side of the axes and 10
# points above the bottom
ax.annotate('bottom right (points)', xy=(-10, 10),
xycoords='axes points',
horizontalalignment='right', verticalalignment='bottom',
fontsize=20)
if 1:
# you can specify the xypoint and the xytext in different
# positions and coordinate systems, and optionally turn on a
# connecting line and mark the point with a marker. Annotations
# work on polar axes too. In the example below, the xy point is
# in native coordinates (xycoords defaults to 'data'). For a
# polar axes, this is in (theta, radius) space. The text in this
# example is placed in the fractional figure coordinate system.
# Text keyword args like horizontal and vertical alignment are
# respected
fig = figure()
ax = fig.add_subplot(111, polar=True)
r = np.arange(0,1,0.001)
theta = 2*2*np.pi*r
line, = ax.plot(theta, r, color='#ee8d18', lw=3)
ind = 800
thisr, thistheta = r[ind], theta[ind]
ax.plot([thistheta], [thisr], 'o')
ax.annotate('a polar annotation',
xy=(thistheta, thisr), # theta, radius
xytext=(0.05, 0.05), # fraction, fraction
textcoords='figure fraction',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='left',
verticalalignment='bottom',
)
if 1:
# You can also use polar notation on a cartesian axes. Here the
# native coordinate system ('data') is cartesian, so you need to
# specify the xycoords and textcoords as 'polar' if you want to
# use (theta, radius)
el = Ellipse((0,0), 10, 20, facecolor='r', alpha=0.5)
fig = figure()
ax = fig.add_subplot(111, aspect='equal')
ax.add_artist(el)
el.set_clip_box(ax.bbox)
ax.annotate('the top',
xy=(np.pi/2., 10.), # theta, radius
xytext=(np.pi/3, 20.), # theta, radius
xycoords='polar',
textcoords='polar',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='left',
verticalalignment='bottom',
clip_on=True, # clip to the axes bounding box
)
ax.set_xlim(-20, 20)
ax.set_ylim(-20, 20)
show()
| mit |
krinkin/linux | scripts/gdb/linux/symbols.py | 588 | 6302 | #
# gdb helper commands and functions for Linux kernel debugging
#
# load kernel and module symbols
#
# Copyright (c) Siemens AG, 2011-2013
#
# Authors:
# Jan Kiszka <[email protected]>
#
# This work is licensed under the terms of the GNU GPL version 2.
#
import gdb
import os
import re
from linux import modules
if hasattr(gdb, 'Breakpoint'):
class LoadModuleBreakpoint(gdb.Breakpoint):
def __init__(self, spec, gdb_command):
super(LoadModuleBreakpoint, self).__init__(spec, internal=True)
self.silent = True
self.gdb_command = gdb_command
def stop(self):
module = gdb.parse_and_eval("mod")
module_name = module['name'].string()
cmd = self.gdb_command
# enforce update if object file is not found
cmd.module_files_updated = False
# Disable pagination while reporting symbol (re-)loading.
# The console input is blocked in this context so that we would
# get stuck waiting for the user to acknowledge paged output.
show_pagination = gdb.execute("show pagination", to_string=True)
pagination = show_pagination.endswith("on.\n")
gdb.execute("set pagination off")
if module_name in cmd.loaded_modules:
gdb.write("refreshing all symbols to reload module "
"'{0}'\n".format(module_name))
cmd.load_all_symbols()
else:
cmd.load_module_symbols(module)
# restore pagination state
gdb.execute("set pagination %s" % ("on" if pagination else "off"))
return False
class LxSymbols(gdb.Command):
"""(Re-)load symbols of Linux kernel and currently loaded modules.
The kernel (vmlinux) is taken from the current working directly. Modules (.ko)
are scanned recursively, starting in the same directory. Optionally, the module
search path can be extended by a space separated list of paths passed to the
lx-symbols command."""
module_paths = []
module_files = []
module_files_updated = False
loaded_modules = []
breakpoint = None
def __init__(self):
super(LxSymbols, self).__init__("lx-symbols", gdb.COMMAND_FILES,
gdb.COMPLETE_FILENAME)
def _update_module_files(self):
self.module_files = []
for path in self.module_paths:
gdb.write("scanning for modules in {0}\n".format(path))
for root, dirs, files in os.walk(path):
for name in files:
if name.endswith(".ko"):
self.module_files.append(root + "/" + name)
self.module_files_updated = True
def _get_module_file(self, module_name):
module_pattern = ".*/{0}\.ko$".format(
module_name.replace("_", r"[_\-]"))
for name in self.module_files:
if re.match(module_pattern, name) and os.path.exists(name):
return name
return None
def _section_arguments(self, module):
try:
sect_attrs = module['sect_attrs'].dereference()
except gdb.error:
return ""
attrs = sect_attrs['attrs']
section_name_to_address = {
attrs[n]['name'].string(): attrs[n]['address']
for n in range(int(sect_attrs['nsections']))}
args = []
for section_name in [".data", ".data..read_mostly", ".rodata", ".bss"]:
address = section_name_to_address.get(section_name)
if address:
args.append(" -s {name} {addr}".format(
name=section_name, addr=str(address)))
return "".join(args)
def load_module_symbols(self, module):
module_name = module['name'].string()
module_addr = str(module['module_core']).split()[0]
module_file = self._get_module_file(module_name)
if not module_file and not self.module_files_updated:
self._update_module_files()
module_file = self._get_module_file(module_name)
if module_file:
gdb.write("loading @{addr}: {filename}\n".format(
addr=module_addr, filename=module_file))
cmdline = "add-symbol-file {filename} {addr}{sections}".format(
filename=module_file,
addr=module_addr,
sections=self._section_arguments(module))
gdb.execute(cmdline, to_string=True)
if module_name not in self.loaded_modules:
self.loaded_modules.append(module_name)
else:
gdb.write("no module object found for '{0}'\n".format(module_name))
def load_all_symbols(self):
gdb.write("loading vmlinux\n")
# Dropping symbols will disable all breakpoints. So save their states
# and restore them afterward.
saved_states = []
if hasattr(gdb, 'breakpoints') and not gdb.breakpoints() is None:
for bp in gdb.breakpoints():
saved_states.append({'breakpoint': bp, 'enabled': bp.enabled})
# drop all current symbols and reload vmlinux
gdb.execute("symbol-file", to_string=True)
gdb.execute("symbol-file vmlinux")
self.loaded_modules = []
module_list = modules.module_list()
if not module_list:
gdb.write("no modules found\n")
else:
[self.load_module_symbols(module) for module in module_list]
for saved_state in saved_states:
saved_state['breakpoint'].enabled = saved_state['enabled']
def invoke(self, arg, from_tty):
self.module_paths = arg.split()
self.module_paths.append(os.getcwd())
# enforce update
self.module_files = []
self.module_files_updated = False
self.load_all_symbols()
if hasattr(gdb, 'Breakpoint'):
if self.breakpoint is not None:
self.breakpoint.delete()
self.breakpoint = None
self.breakpoint = LoadModuleBreakpoint(
"kernel/module.c:do_init_module", self)
else:
gdb.write("Note: symbol update on module loading not supported "
"with this gdb version\n")
LxSymbols()
| gpl-2.0 |
MiLk/portia | slyd/slyd/bot.py | 1 | 5966 | """
Bot resource
Defines bot/fetch endpoint, e.g.:
curl -d '{"request": {"url": "http://scrapinghub.com/"}}' http://localhost:9001/bot/fetch
The "request" is an object whose fields match the parameters of a Scrapy
request:
http://doc.scrapy.org/en/latest/topics/request-response.html#scrapy.http.Request
Returns a json object. If there is an "error" field, that holds the request
error to display. Otherwise you will find the following fields:
* page -- the retrieved page - will be annotated in future
"""
import json, errno
from functools import partial
from twisted.web.resource import Resource
from twisted.web.server import NOT_DONE_YET
from scrapy.http import Request
from scrapy.spider import BaseSpider
from scrapy.item import DictItem
from scrapy import signals, log
from scrapy.crawler import Crawler
from scrapy.http import HtmlResponse
from scrapy.exceptions import DontCloseSpider
from scrapy.utils.request import request_fingerprint
from slybot.spider import IblSpider
from .html import html4annotation, extract_html
from .resource import SlydJsonResource
def create_bot_resource(spec_manager):
bot = Bot(spec_manager.settings, spec_manager)
bot.putChild('fetch', Fetch(bot))
return bot
class Bot(Resource):
spider = BaseSpider('slyd')
def __init__(self, settings, spec_manager):
# twisted base class is old-style so we cannot user super()
Resource.__init__(self)
self.spec_manager = spec_manager
# initialize scrapy crawler
crawler = Crawler(settings)
crawler.configure()
crawler.signals.connect(self.keep_spider_alive, signals.spider_idle)
crawler.crawl(self.spider)
crawler.start()
self.crawler = crawler
log.msg("bot initialized", level=log.DEBUG)
def keep_spider_alive(self, spider):
raise DontCloseSpider("keeping it open")
def stop(self):
"""Stop the crawler"""
self.crawler.stop()
log.msg("bot stopped", level=log.DEBUG)
class BotResource(SlydJsonResource):
def __init__(self, bot):
Resource.__init__(self)
self.bot = bot
class Fetch(BotResource):
isLeaf = True
def render_POST(self, request):
#TODO: validate input data, handle errors, etc.
params = self.read_json(request)
scrapy_request_kwargs = params['request']
scrapy_request_kwargs.update(
callback=self.fetch_callback,
errback=partial(self.fetch_errback, request),
dont_filter=True, # TODO: disable duplicate middleware
meta=dict(
handle_httpstatus_all=True,
twisted_request=request,
slyd_request_params=params
)
)
request = Request(**scrapy_request_kwargs)
self.bot.crawler.engine.schedule(request, self.bot.spider)
return NOT_DONE_YET
def fetch_callback(self, response):
request = response.meta['twisted_request']
result_response = dict(status=response.status,
headers=response.headers.to_string())
if response.status != 200:
finish_request(request, response=result_response)
return
if not isinstance(response, HtmlResponse):
msg = "Non-html response: %s" % response.headers.get(
'content-type', 'no content type')
finish_request(request, error=msg)
try:
params = response.meta['slyd_request_params']
original_html = extract_html(response)
cleaned_html = html4annotation(original_html, response.url)
# we may want to include some headers
fingerprint = request_fingerprint(response.request)
result_response = dict(status=response.status,
headers=response.headers.to_string())
result = dict(page=cleaned_html, original=original_html, fp=fingerprint,
response=result_response)
spider = self.create_spider(request.project, params)
if spider is not None:
items = []
links = []
for value in spider.parse(response):
if isinstance(value, Request):
links.append(value.url)
elif isinstance(value, DictItem):
items.append(value._values)
else:
raise ValueError("Unexpected type %s from spider"
% type(value))
result['items'] = items
result['links'] = links
finish_request(request, **result)
except Exception as ex:
log.err()
finish_request(request, response=result_response,
error="unexpected internal error: %s" % ex)
def create_spider(self, project, params, **kwargs):
spider = params.get('spider')
if spider is None:
return
pspec = self.bot.spec_manager.project_spec(project)
try:
spider_spec = pspec.resource('spiders', spider)
items_spec = pspec.resource('items')
extractors = pspec.resource('extractors')
return IblSpider(spider, spider_spec, items_spec, extractors,
**kwargs)
except IOError as ex:
if ex.errno == errno.ENOENT:
log.msg("skipping extraction, no spec: %s" % ex.filename)
else:
raise
def fetch_errback(self, twisted_request, failure):
msg = "unexpected error response: %s" % failure
log.msg(msg, level=log.ERROR)
finish_request(twisted_request, error=msg)
def finish_request(trequest, **resp_obj):
jdata = json.dumps(resp_obj)
trequest.setResponseCode(200)
trequest.setHeader('Content-Type', 'application/json')
trequest.setHeader('Content-Length', len(jdata))
trequest.write(jdata)
trequest.finish()
| bsd-3-clause |
nttks/jenkins-test | lms/djangoapps/courseware/management/commands/tests/test_dump_course.py | 12 | 8598 | # coding=utf-8
"""Tests for Django management commands"""
import json
from path import path
import shutil
from StringIO import StringIO
import tarfile
from tempfile import mkdtemp
from django.conf import settings
from django.core.management import call_command
from django.test.utils import override_settings
from django.test.testcases import TestCase
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase, mixed_store_config
from xmodule.modulestore.tests.django_utils import TEST_DATA_MONGO_MODULESTORE
from xmodule.modulestore.tests.factories import CourseFactory
from xmodule.modulestore.xml_importer import import_from_xml
DATA_DIR = settings.COMMON_TEST_DATA_ROOT
TEST_COURSE_ID = 'edX/simple/2012_Fall'
XML_COURSE_DIRS = ['toy', 'simple', 'open_ended']
MAPPINGS = {
'edX/toy/2012_Fall': 'xml',
'edX/simple/2012_Fall': 'xml',
'edX/open_ended/2012_Fall': 'xml',
}
TEST_DATA_MIXED_XML_MODULESTORE = mixed_store_config(
DATA_DIR, MAPPINGS, include_xml=True, xml_course_dirs=XML_COURSE_DIRS
)
class CommandsTestBase(TestCase):
"""
Base class for testing different django commands.
Must be subclassed using override_settings set to the modulestore
to be tested.
"""
def setUp(self):
self.loaded_courses = self.load_courses()
def load_courses(self):
"""Load test courses and return list of ids"""
store = modulestore()
# Add a course with a unicode name, if the modulestore
# supports adding modules.
if hasattr(store, 'create_xmodule'):
CourseFactory.create(org=u'ëḋẌ',
course=u'śíḿṕĺé',
display_name=u'2012_Fáĺĺ',
modulestore=store)
courses = store.get_courses()
# NOTE: if xml store owns these, it won't import them into mongo
if SlashSeparatedCourseKey.from_deprecated_string(TEST_COURSE_ID) not in [c.id for c in courses]:
import_from_xml(store, ModuleStoreEnum.UserID.mgmt_command, DATA_DIR, XML_COURSE_DIRS)
return [course.id for course in store.get_courses()]
def call_command(self, name, *args, **kwargs):
"""Call management command and return output"""
out = StringIO() # To Capture the output of the command
call_command(name, *args, stdout=out, **kwargs)
out.seek(0)
return out.read()
def test_dump_course_ids(self):
kwargs = {'modulestore': 'default'}
output = self.call_command('dump_course_ids', **kwargs)
dumped_courses = output.decode('utf-8').strip().split('\n')
course_ids = {course_id.to_deprecated_string() for course_id in self.loaded_courses}
dumped_ids = set(dumped_courses)
self.assertEqual(course_ids, dumped_ids)
def test_correct_course_structure_metadata(self):
course_id = 'edX/open_ended/2012_Fall'
args = [course_id]
kwargs = {'modulestore': 'default'}
try:
output = self.call_command('dump_course_structure', *args, **kwargs)
except TypeError, exception:
self.fail(exception)
dump = json.loads(output)
self.assertGreater(len(dump.values()), 0)
def test_dump_course_structure(self):
args = [TEST_COURSE_ID]
kwargs = {'modulestore': 'default'}
output = self.call_command('dump_course_structure', *args, **kwargs)
dump = json.loads(output)
# check that all elements in the course structure have metadata,
# but not inherited metadata:
for element in dump.itervalues():
self.assertIn('metadata', element)
self.assertIn('children', element)
self.assertIn('category', element)
self.assertNotIn('inherited_metadata', element)
# Check a few elements in the course dump
test_course_key = SlashSeparatedCourseKey.from_deprecated_string(TEST_COURSE_ID)
parent_id = test_course_key.make_usage_key('chapter', 'Overview').to_deprecated_string()
self.assertEqual(dump[parent_id]['category'], 'chapter')
self.assertEqual(len(dump[parent_id]['children']), 3)
child_id = dump[parent_id]['children'][1]
self.assertEqual(dump[child_id]['category'], 'videosequence')
self.assertEqual(len(dump[child_id]['children']), 2)
video_id = test_course_key.make_usage_key('video', 'Welcome').to_deprecated_string()
self.assertEqual(dump[video_id]['category'], 'video')
self.assertEqual(len(dump[video_id]['metadata']), 5)
self.assertIn('youtube_id_1_0', dump[video_id]['metadata'])
# Check if there are the right number of elements
self.assertEqual(len(dump), 16)
def test_dump_inherited_course_structure(self):
args = [TEST_COURSE_ID]
kwargs = {'modulestore': 'default', 'inherited': True}
output = self.call_command('dump_course_structure', *args, **kwargs)
dump = json.loads(output)
# check that all elements in the course structure have inherited metadata,
# and that it contains a particular value as well:
for element in dump.itervalues():
self.assertIn('metadata', element)
self.assertIn('children', element)
self.assertIn('category', element)
self.assertIn('inherited_metadata', element)
self.assertIsNone(element['inherited_metadata']['ispublic'])
# ... but does not contain inherited metadata containing a default value:
self.assertNotIn('due', element['inherited_metadata'])
def test_dump_inherited_course_structure_with_defaults(self):
args = [TEST_COURSE_ID]
kwargs = {'modulestore': 'default', 'inherited': True, 'inherited_defaults': True}
output = self.call_command('dump_course_structure', *args, **kwargs)
dump = json.loads(output)
# check that all elements in the course structure have inherited metadata,
# and that it contains a particular value as well:
for element in dump.itervalues():
self.assertIn('metadata', element)
self.assertIn('children', element)
self.assertIn('category', element)
self.assertIn('inherited_metadata', element)
self.assertIsNone(element['inherited_metadata']['ispublic'])
# ... and contains inherited metadata containing a default value:
self.assertIsNone(element['inherited_metadata']['due'])
def test_export_course(self):
tmp_dir = path(mkdtemp())
filename = tmp_dir / 'test.tar.gz'
try:
self.run_export_course(filename)
with tarfile.open(filename) as tar_file:
self.check_export_file(tar_file)
finally:
shutil.rmtree(tmp_dir)
def test_export_course_stdout(self):
output = self.run_export_course('-')
with tarfile.open(fileobj=StringIO(output)) as tar_file:
self.check_export_file(tar_file)
def run_export_course(self, filename): # pylint: disable=missing-docstring
args = [TEST_COURSE_ID, filename]
kwargs = {'modulestore': 'default'}
return self.call_command('export_course', *args, **kwargs)
def check_export_file(self, tar_file): # pylint: disable=missing-docstring
names = tar_file.getnames()
# Check if some of the files are present.
# The rest is of the code should be covered by the tests for
# xmodule.modulestore.xml_exporter, used by the dump_course command
assert_in = self.assertIn
assert_in('edX-simple-2012_Fall', names)
assert_in('edX-simple-2012_Fall/policies/2012_Fall/policy.json', names)
assert_in('edX-simple-2012_Fall/html/toylab.html', names)
assert_in('edX-simple-2012_Fall/videosequence/A_simple_sequence.xml', names)
assert_in('edX-simple-2012_Fall/sequential/Lecture_2.xml', names)
@override_settings(MODULESTORE=TEST_DATA_MIXED_XML_MODULESTORE)
class CommandsXMLTestCase(CommandsTestBase, ModuleStoreTestCase):
"""
Test case for management commands using the xml modulestore.
"""
@override_settings(MODULESTORE=TEST_DATA_MONGO_MODULESTORE)
class CommandsMongoTestCase(CommandsTestBase, ModuleStoreTestCase):
"""
Test case for management commands using the mixed mongo modulestore.
"""
| agpl-3.0 |
flexiant/xen | tools/xm-test/lib/XmTestLib/block_utils.py | 26 | 1410 | #!/usr/bin/python
# Copyright (c) 2006 XenSource Inc.
# Author: Ewan Mellor <[email protected]>
import time
from XmTestLib import *
import xen.util.blkif
__all__ = [ "block_attach", "block_detach" ]
def get_state(domain, devname):
(path, number) = xen.util.blkif.blkdev_name_to_number(devname)
s, o = traceCommand("xm block-list %s | awk '/^%d/ {print $4}'" %
(domain.getName(), number))
if s != 0:
FAIL("block-list failed")
if o == "":
return 0
else:
return int(o)
def block_attach(domain, phy, virt):
status, output = traceCommand("xm block-attach %s %s %s w" %
(domain.getName(), phy, virt))
if status != 0:
FAIL("xm block-attach returned invalid %i != 0" % status)
for i in range(10):
if get_state(domain, virt) == 4:
break
time.sleep(1)
else:
FAIL("block-attach failed: device did not switch to Connected state")
def block_detach(domain, virt):
status, output = traceCommand("xm block-detach %s %s" %
(domain.getName(), virt))
if status != 0:
FAIL("xm block-detach returned invalid %i != 0" % status)
for i in range(10):
if get_state(domain, virt) == 0:
break
time.sleep(1)
else:
FAIL("block-detach failed: device did not disappear")
| gpl-2.0 |
kparal/anaconda | tests/lib/filelist.py | 9 | 1953 | #
# filelist.py: method for determining which files to check
#
# Copyright (C) 2014 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation; either version 2.1 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Author: David Shea <[email protected]>
import os
import subprocess
def testfilelist(filterfunc=None):
"""A generator function for the list of file names to check.
If the check is run from a git work tree, the method returns a list
of files known to git. Otherwise, it returns a list of every file
beneath $top_srcdir.
top_srcdir must be set in the environment.
filterfunc, if provided, is a function that takes a filename as an
argument and returns True for files that should be included in the
file list. For example, something like lambda x: fnmatch(x, '*.py')
could be used to match only filenames that end in .py.
"""
if os.path.isdir(os.path.join(os.environ["top_srcdir"], ".git")):
output = subprocess.check_output(["git", "ls-files", "-c", os.environ["top_srcdir"]]).decode("utf-8")
filelist = output.split("\n")
else:
filelist = (os.path.join(path, testfile) \
for path, _dirs, files in os.walk(os.environ["top_srcdir"]) \
for testfile in files)
for f in filelist:
if not filterfunc or filterfunc(f):
yield f
| gpl-2.0 |
bebby520/essay_devel | venv/lib/python2.7/site-packages/requests/packages/chardet/universaldetector.py | 1776 | 6840 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from . import constants
import sys
import codecs
from .latin1prober import Latin1Prober # windows-1252
from .mbcsgroupprober import MBCSGroupProber # multi-byte character sets
from .sbcsgroupprober import SBCSGroupProber # single-byte character sets
from .escprober import EscCharSetProber # ISO-2122, etc.
import re
MINIMUM_THRESHOLD = 0.20
ePureAscii = 0
eEscAscii = 1
eHighbyte = 2
class UniversalDetector:
def __init__(self):
self._highBitDetector = re.compile(b'[\x80-\xFF]')
self._escDetector = re.compile(b'(\033|~{)')
self._mEscCharSetProber = None
self._mCharSetProbers = []
self.reset()
def reset(self):
self.result = {'encoding': None, 'confidence': 0.0}
self.done = False
self._mStart = True
self._mGotData = False
self._mInputState = ePureAscii
self._mLastChar = b''
if self._mEscCharSetProber:
self._mEscCharSetProber.reset()
for prober in self._mCharSetProbers:
prober.reset()
def feed(self, aBuf):
if self.done:
return
aLen = len(aBuf)
if not aLen:
return
if not self._mGotData:
# If the data starts with BOM, we know it is UTF
if aBuf[:3] == codecs.BOM_UTF8:
# EF BB BF UTF-8 with BOM
self.result = {'encoding': "UTF-8-SIG", 'confidence': 1.0}
elif aBuf[:4] == codecs.BOM_UTF32_LE:
# FF FE 00 00 UTF-32, little-endian BOM
self.result = {'encoding': "UTF-32LE", 'confidence': 1.0}
elif aBuf[:4] == codecs.BOM_UTF32_BE:
# 00 00 FE FF UTF-32, big-endian BOM
self.result = {'encoding': "UTF-32BE", 'confidence': 1.0}
elif aBuf[:4] == b'\xFE\xFF\x00\x00':
# FE FF 00 00 UCS-4, unusual octet order BOM (3412)
self.result = {
'encoding': "X-ISO-10646-UCS-4-3412",
'confidence': 1.0
}
elif aBuf[:4] == b'\x00\x00\xFF\xFE':
# 00 00 FF FE UCS-4, unusual octet order BOM (2143)
self.result = {
'encoding': "X-ISO-10646-UCS-4-2143",
'confidence': 1.0
}
elif aBuf[:2] == codecs.BOM_LE:
# FF FE UTF-16, little endian BOM
self.result = {'encoding': "UTF-16LE", 'confidence': 1.0}
elif aBuf[:2] == codecs.BOM_BE:
# FE FF UTF-16, big endian BOM
self.result = {'encoding': "UTF-16BE", 'confidence': 1.0}
self._mGotData = True
if self.result['encoding'] and (self.result['confidence'] > 0.0):
self.done = True
return
if self._mInputState == ePureAscii:
if self._highBitDetector.search(aBuf):
self._mInputState = eHighbyte
elif ((self._mInputState == ePureAscii) and
self._escDetector.search(self._mLastChar + aBuf)):
self._mInputState = eEscAscii
self._mLastChar = aBuf[-1:]
if self._mInputState == eEscAscii:
if not self._mEscCharSetProber:
self._mEscCharSetProber = EscCharSetProber()
if self._mEscCharSetProber.feed(aBuf) == constants.eFoundIt:
self.result = {'encoding': self._mEscCharSetProber.get_charset_name(),
'confidence': self._mEscCharSetProber.get_confidence()}
self.done = True
elif self._mInputState == eHighbyte:
if not self._mCharSetProbers:
self._mCharSetProbers = [MBCSGroupProber(), SBCSGroupProber(),
Latin1Prober()]
for prober in self._mCharSetProbers:
if prober.feed(aBuf) == constants.eFoundIt:
self.result = {'encoding': prober.get_charset_name(),
'confidence': prober.get_confidence()}
self.done = True
break
def close(self):
if self.done:
return
if not self._mGotData:
if constants._debug:
sys.stderr.write('no data received!\n')
return
self.done = True
if self._mInputState == ePureAscii:
self.result = {'encoding': 'ascii', 'confidence': 1.0}
return self.result
if self._mInputState == eHighbyte:
proberConfidence = None
maxProberConfidence = 0.0
maxProber = None
for prober in self._mCharSetProbers:
if not prober:
continue
proberConfidence = prober.get_confidence()
if proberConfidence > maxProberConfidence:
maxProberConfidence = proberConfidence
maxProber = prober
if maxProber and (maxProberConfidence > MINIMUM_THRESHOLD):
self.result = {'encoding': maxProber.get_charset_name(),
'confidence': maxProber.get_confidence()}
return self.result
if constants._debug:
sys.stderr.write('no probers hit minimum threshhold\n')
for prober in self._mCharSetProbers[0].mProbers:
if not prober:
continue
sys.stderr.write('%s confidence = %s\n' %
(prober.get_charset_name(),
prober.get_confidence()))
| apache-2.0 |
cdrttn/samba-regedit | lib/dnspython/tests/dnssec.py | 56 | 9344 | # Copyright (C) 2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import unittest
import dns.dnssec
import dns.name
import dns.rdata
import dns.rdataclass
import dns.rdatatype
import dns.rrset
abs_dnspython_org = dns.name.from_text('dnspython.org')
abs_keys = { abs_dnspython_org :
dns.rrset.from_text('dnspython.org.', 3600, 'IN', 'DNSKEY',
'257 3 5 AwEAAenVTr9L1OMlL1/N2ta0Qj9LLLnnmFWIr1dJoAsWM9BQfsbV7kFZ XbAkER/FY9Ji2o7cELxBwAsVBuWn6IUUAJXLH74YbC1anY0lifjgt29z SwDzuB7zmC7yVYZzUunBulVW4zT0tg1aePbpVL2EtTL8VzREqbJbE25R KuQYHZtFwG8S4iBxJUmT2Bbd0921LLxSQgVoFXlQx/gFV2+UERXcJ5ce iX6A6wc02M/pdg/YbJd2rBa0MYL3/Fz/Xltre0tqsImZGxzi6YtYDs45 NC8gH+44egz82e2DATCVM1ICPmRDjXYTLldQiWA2ZXIWnK0iitl5ue24 7EsWJefrIhE=',
'256 3 5 AwEAAdSSghOGjU33IQZgwZM2Hh771VGXX05olJK49FxpSyuEAjDBXY58 LGU9R2Zgeecnk/b9EAhFu/vCV9oECtiTCvwuVAkt9YEweqYDluQInmgP NGMJCKdSLlnX93DkjDw8rMYv5dqXCuSGPlKChfTJOLQxIAxGloS7lL+c 0CTZydAF')
}
rel_keys = { dns.name.empty :
dns.rrset.from_text('@', 3600, 'IN', 'DNSKEY',
'257 3 5 AwEAAenVTr9L1OMlL1/N2ta0Qj9LLLnnmFWIr1dJoAsWM9BQfsbV7kFZ XbAkER/FY9Ji2o7cELxBwAsVBuWn6IUUAJXLH74YbC1anY0lifjgt29z SwDzuB7zmC7yVYZzUunBulVW4zT0tg1aePbpVL2EtTL8VzREqbJbE25R KuQYHZtFwG8S4iBxJUmT2Bbd0921LLxSQgVoFXlQx/gFV2+UERXcJ5ce iX6A6wc02M/pdg/YbJd2rBa0MYL3/Fz/Xltre0tqsImZGxzi6YtYDs45 NC8gH+44egz82e2DATCVM1ICPmRDjXYTLldQiWA2ZXIWnK0iitl5ue24 7EsWJefrIhE=',
'256 3 5 AwEAAdSSghOGjU33IQZgwZM2Hh771VGXX05olJK49FxpSyuEAjDBXY58 LGU9R2Zgeecnk/b9EAhFu/vCV9oECtiTCvwuVAkt9YEweqYDluQInmgP NGMJCKdSLlnX93DkjDw8rMYv5dqXCuSGPlKChfTJOLQxIAxGloS7lL+c 0CTZydAF')
}
when = 1290250287
abs_soa = dns.rrset.from_text('dnspython.org.', 3600, 'IN', 'SOA',
'howl.dnspython.org. hostmaster.dnspython.org. 2010020047 3600 1800 604800 3600')
abs_other_soa = dns.rrset.from_text('dnspython.org.', 3600, 'IN', 'SOA',
'foo.dnspython.org. hostmaster.dnspython.org. 2010020047 3600 1800 604800 3600')
abs_soa_rrsig = dns.rrset.from_text('dnspython.org.', 3600, 'IN', 'RRSIG',
'SOA 5 2 3600 20101127004331 20101119213831 61695 dnspython.org. sDUlltRlFTQw5ITFxOXW3TgmrHeMeNpdqcZ4EXxM9FHhIlte6V9YCnDw t6dvM9jAXdIEi03l9H/RAd9xNNW6gvGMHsBGzpvvqFQxIBR2PoiZA1mX /SWHZFdbt4xjYTtXqpyYvrMK0Dt7bUYPadyhPFCJ1B+I8Zi7B5WJEOd0 8vs=')
rel_soa = dns.rrset.from_text('@', 3600, 'IN', 'SOA',
'howl hostmaster 2010020047 3600 1800 604800 3600')
rel_other_soa = dns.rrset.from_text('@', 3600, 'IN', 'SOA',
'foo hostmaster 2010020047 3600 1800 604800 3600')
rel_soa_rrsig = dns.rrset.from_text('@', 3600, 'IN', 'RRSIG',
'SOA 5 2 3600 20101127004331 20101119213831 61695 @ sDUlltRlFTQw5ITFxOXW3TgmrHeMeNpdqcZ4EXxM9FHhIlte6V9YCnDw t6dvM9jAXdIEi03l9H/RAd9xNNW6gvGMHsBGzpvvqFQxIBR2PoiZA1mX /SWHZFdbt4xjYTtXqpyYvrMK0Dt7bUYPadyhPFCJ1B+I8Zi7B5WJEOd0 8vs=')
sep_key = dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.DNSKEY,
'257 3 5 AwEAAenVTr9L1OMlL1/N2ta0Qj9LLLnnmFWIr1dJoAsWM9BQfsbV7kFZ XbAkER/FY9Ji2o7cELxBwAsVBuWn6IUUAJXLH74YbC1anY0lifjgt29z SwDzuB7zmC7yVYZzUunBulVW4zT0tg1aePbpVL2EtTL8VzREqbJbE25R KuQYHZtFwG8S4iBxJUmT2Bbd0921LLxSQgVoFXlQx/gFV2+UERXcJ5ce iX6A6wc02M/pdg/YbJd2rBa0MYL3/Fz/Xltre0tqsImZGxzi6YtYDs45 NC8gH+44egz82e2DATCVM1ICPmRDjXYTLldQiWA2ZXIWnK0iitl5ue24 7EsWJefrIhE=')
good_ds = dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.DS,
'57349 5 2 53A79A3E7488AB44FFC56B2D1109F0699D1796DD977E72108B841F96 E47D7013')
when2 = 1290425644
abs_example = dns.name.from_text('example')
abs_dsa_keys = { abs_example :
dns.rrset.from_text('example.', 86400, 'IN', 'DNSKEY',
'257 3 3 CI3nCqyJsiCJHTjrNsJOT4RaszetzcJPYuoH3F9ZTVt3KJXncCVR3bwn 1w0iavKljb9hDlAYSfHbFCp4ic/rvg4p1L8vh5s8ToMjqDNl40A0hUGQ Ybx5hsECyK+qHoajilUX1phYSAD8d9WAGO3fDWzUPBuzR7o85NiZCDxz yXuNVfni0uhj9n1KYhEO5yAbbruDGN89wIZcxMKuQsdUY2GYD93ssnBv a55W6XRABYWayKZ90WkRVODLVYLSn53Pj/wwxGH+XdhIAZJXimrZL4yl My7rtBsLMqq8Ihs4Tows7LqYwY7cp6y/50tw6pj8tFqMYcPUjKZV36l1 M/2t5BVg3i7IK61Aidt6aoC3TDJtzAxg3ZxfjZWJfhHjMJqzQIfbW5b9 q1mjFsW5EUv39RaNnX+3JWPRLyDqD4pIwDyqfutMsdk/Py3paHn82FGp CaOg+nicqZ9TiMZURN/XXy5JoXUNQ3RNvbHCUiPUe18KUkY6mTfnyHld 1l9YCWmzXQVClkx/hOYxjJ4j8Ife58+Obu5X',
'256 3 3 CJE1yb9YRQiw5d2xZrMUMR+cGCTt1bp1KDCefmYKmS+Z1+q9f42ETVhx JRiQwXclYwmxborzIkSZegTNYIV6mrYwbNB27Q44c3UGcspb3PiOw5TC jNPRYEcdwGvDZ2wWy+vkSV/S9tHXY8O6ODiE6abZJDDg/RnITyi+eoDL R3KZ5n/V1f1T1b90rrV6EewhBGQJpQGDogaXb2oHww9Tm6NfXyo7SoMM pbwbzOckXv+GxRPJIQNSF4D4A9E8XCksuzVVdE/0lr37+uoiAiPia38U 5W2QWe/FJAEPLjIp2eTzf0TrADc1pKP1wrA2ASpdzpm/aX3IB5RPp8Ew S9U72eBFZJAUwg635HxJVxH1maG6atzorR566E+e0OZSaxXS9o1o6QqN 3oPlYLGPORDiExilKfez3C/x/yioOupW9K5eKF0gmtaqrHX0oq9s67f/ RIM2xVaKHgG9Vf2cgJIZkhv7sntujr+E4htnRmy9P9BxyFxsItYxPI6Z bzygHAZpGhlI/7ltEGlIwKxyTK3ZKBm67q7B')
}
abs_dsa_soa = dns.rrset.from_text('example.', 86400, 'IN', 'SOA',
'ns1.example. hostmaster.example. 2 10800 3600 604800 86400')
abs_other_dsa_soa = dns.rrset.from_text('example.', 86400, 'IN', 'SOA',
'ns1.example. hostmaster.example. 2 10800 3600 604800 86401')
abs_dsa_soa_rrsig = dns.rrset.from_text('example.', 86400, 'IN', 'RRSIG',
'SOA 3 1 86400 20101129143231 20101122112731 42088 example. CGul9SuBofsktunV8cJs4eRs6u+3NCS3yaPKvBbD+pB2C76OUXDZq9U=')
example_sep_key = dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.DNSKEY,
'257 3 3 CI3nCqyJsiCJHTjrNsJOT4RaszetzcJPYuoH3F9ZTVt3KJXncCVR3bwn 1w0iavKljb9hDlAYSfHbFCp4ic/rvg4p1L8vh5s8ToMjqDNl40A0hUGQ Ybx5hsECyK+qHoajilUX1phYSAD8d9WAGO3fDWzUPBuzR7o85NiZCDxz yXuNVfni0uhj9n1KYhEO5yAbbruDGN89wIZcxMKuQsdUY2GYD93ssnBv a55W6XRABYWayKZ90WkRVODLVYLSn53Pj/wwxGH+XdhIAZJXimrZL4yl My7rtBsLMqq8Ihs4Tows7LqYwY7cp6y/50tw6pj8tFqMYcPUjKZV36l1 M/2t5BVg3i7IK61Aidt6aoC3TDJtzAxg3ZxfjZWJfhHjMJqzQIfbW5b9 q1mjFsW5EUv39RaNnX+3JWPRLyDqD4pIwDyqfutMsdk/Py3paHn82FGp CaOg+nicqZ9TiMZURN/XXy5JoXUNQ3RNvbHCUiPUe18KUkY6mTfnyHld 1l9YCWmzXQVClkx/hOYxjJ4j8Ife58+Obu5X')
example_ds_sha1 = dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.DS,
'18673 3 1 71b71d4f3e11bbd71b4eff12cde69f7f9215bbe7')
example_ds_sha256 = dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.DS,
'18673 3 2 eb8344cbbf07c9d3d3d6c81d10c76653e28d8611a65e639ef8f716e4e4e5d913')
class DNSSECValidatorTestCase(unittest.TestCase):
def testAbsoluteRSAGood(self):
dns.dnssec.validate(abs_soa, abs_soa_rrsig, abs_keys, None, when)
def testAbsoluteRSABad(self):
def bad():
dns.dnssec.validate(abs_other_soa, abs_soa_rrsig, abs_keys, None,
when)
self.failUnlessRaises(dns.dnssec.ValidationFailure, bad)
def testRelativeRSAGood(self):
dns.dnssec.validate(rel_soa, rel_soa_rrsig, rel_keys,
abs_dnspython_org, when)
def testRelativeRSABad(self):
def bad():
dns.dnssec.validate(rel_other_soa, rel_soa_rrsig, rel_keys,
abs_dnspython_org, when)
self.failUnlessRaises(dns.dnssec.ValidationFailure, bad)
def testMakeSHA256DS(self):
ds = dns.dnssec.make_ds(abs_dnspython_org, sep_key, 'SHA256')
self.failUnless(ds == good_ds)
def testAbsoluteDSAGood(self):
dns.dnssec.validate(abs_dsa_soa, abs_dsa_soa_rrsig, abs_dsa_keys, None,
when2)
def testAbsoluteDSABad(self):
def bad():
dns.dnssec.validate(abs_other_dsa_soa, abs_dsa_soa_rrsig,
abs_dsa_keys, None, when2)
self.failUnlessRaises(dns.dnssec.ValidationFailure, bad)
def testMakeExampleSHA1DS(self):
ds = dns.dnssec.make_ds(abs_example, example_sep_key, 'SHA1')
self.failUnless(ds == example_ds_sha1)
def testMakeExampleSHA256DS(self):
ds = dns.dnssec.make_ds(abs_example, example_sep_key, 'SHA256')
self.failUnless(ds == example_ds_sha256)
if __name__ == '__main__':
import_ok = False
try:
import Crypto.Util.number
import_ok = True
except:
pass
if import_ok:
unittest.main()
else:
print 'skipping DNSSEC tests because pycrypto is not installed'
| gpl-3.0 |
macs03/demo-cms | cms/lib/python2.7/site-packages/cms/plugin_rendering.py | 4 | 8482 | # -*- coding: utf-8 -*-
from django.template import Template, Context
from django.template.loader import render_to_string
from django.utils import six
from django.utils.safestring import mark_safe
from cms.models.placeholdermodel import Placeholder
from cms.plugin_processors import (plugin_meta_context_processor, mark_safe_plugin_processor)
from cms.utils import get_language_from_request
from cms.utils.conf import get_cms_setting
from cms.utils.django_load import iterload_objects
from cms.utils.placeholder import get_placeholder_conf, restore_sekizai_context
# these are always called before all other plugin context processors
from sekizai.helpers import Watcher
DEFAULT_PLUGIN_CONTEXT_PROCESSORS = (
plugin_meta_context_processor,
)
# these are always called after all other plugin processors
DEFAULT_PLUGIN_PROCESSORS = (
mark_safe_plugin_processor,
)
class PluginContext(Context):
"""
This subclass of template.Context automatically populates itself using
the processors defined in CMS_PLUGIN_CONTEXT_PROCESSORS.
Additional processors can be specified as a list of callables
using the "processors" keyword argument.
"""
def __init__(self, dict, instance, placeholder, processors=None, current_app=None):
super(PluginContext, self).__init__(dict, current_app=current_app)
if not processors:
processors = []
for processor in DEFAULT_PLUGIN_CONTEXT_PROCESSORS:
self.update(processor(instance, placeholder, self))
for processor in iterload_objects(get_cms_setting('PLUGIN_CONTEXT_PROCESSORS')):
self.update(processor(instance, placeholder, self))
for processor in processors:
self.update(processor(instance, placeholder, self))
def render_plugin(context, instance, placeholder, template, processors=None, current_app=None):
"""
Renders a single plugin and applies the post processors to it's rendered
content.
"""
if not processors:
processors = []
if isinstance(template, six.string_types):
content = render_to_string(template, context_instance=context)
elif isinstance(template, Template):
content = template.render(context)
else:
content = ''
for processor in iterload_objects(get_cms_setting('PLUGIN_PROCESSORS')):
content = processor(instance, placeholder, content, context)
for processor in processors:
content = processor(instance, placeholder, content, context)
for processor in DEFAULT_PLUGIN_PROCESSORS:
content = processor(instance, placeholder, content, context)
return content
def render_plugins(plugins, context, placeholder, processors=None):
"""
Renders a collection of plugins with the given context, using the appropriate processors
for a given placeholder name, and returns a list containing a "rendered content" string
for each plugin.
This is the main plugin rendering utility function, use this function rather than
Plugin.render_plugin().
"""
out = []
total = len(plugins)
for index, plugin in enumerate(plugins):
plugin._render_meta.total = total
plugin._render_meta.index = index
context.push()
out.append(plugin.render_plugin(context, placeholder, processors=processors))
context.pop()
return out
def render_placeholder(placeholder, context_to_copy,
name_fallback="Placeholder", lang=None, default=None, editable=True,
use_cache=True):
"""
Renders plugins for a placeholder on the given page using shallow copies of the
given context, and returns a string containing the rendered output.
Set editable = False to disable front-end editing for this placeholder
during rendering. This is primarily used for the "as" variant of the
render_placeholder tag.
"""
if not placeholder:
return
from cms.utils.plugins import get_plugins
context = context_to_copy
context.push()
request = context['request']
if not hasattr(request, 'placeholders'):
request.placeholders = []
if placeholder.has_change_permission(request):
request.placeholders.append(placeholder)
if hasattr(placeholder, 'content_cache'):
return mark_safe(placeholder.content_cache)
page = placeholder.page if placeholder else None
# It's kind of duplicate of the similar call in `get_plugins`, but it's required
# to have a valid language in this function for `get_fallback_languages` to work
if lang:
save_language = lang
else:
lang = get_language_from_request(request)
save_language = lang
# Prepend frontedit toolbar output if applicable
toolbar = getattr(request, 'toolbar', None)
if getattr(toolbar, 'edit_mode', False) and getattr(placeholder, 'is_editable', True) and editable:
from cms.middleware.toolbar import toolbar_plugin_processor
processors = (toolbar_plugin_processor,)
edit = True
else:
processors = None
edit = False
from django.core.cache import cache
if get_cms_setting('PLACEHOLDER_CACHE') and use_cache:
cache_key = placeholder.get_cache_key(lang)
if not edit and placeholder and not hasattr(placeholder, 'cache_checked'):
cached_value = cache.get(cache_key)
if not cached_value is None:
restore_sekizai_context(context, cached_value['sekizai'])
return mark_safe(cached_value['content'])
if page:
template = page.template
else:
template = None
plugins = [plugin for plugin in get_plugins(request, placeholder, template, lang=lang)]
# Add extra context as defined in settings, but do not overwrite existing context variables,
# since settings are general and database/template are specific
# TODO this should actually happen as a plugin context processor, but these currently overwrite
# existing context -- maybe change this order?
slot = getattr(placeholder, 'slot', None)
extra_context = {}
if slot:
extra_context = get_placeholder_conf("extra_context", slot, template, {})
for key, value in extra_context.items():
if key not in context:
context[key] = value
content = []
watcher = Watcher(context)
content.extend(render_plugins(plugins, context, placeholder, processors))
toolbar_content = ''
if edit and editable:
if not hasattr(request.toolbar, 'placeholders'):
request.toolbar.placeholders = {}
if placeholder.pk not in request.toolbar.placeholders:
request.toolbar.placeholders[placeholder.pk] = placeholder
toolbar_content = mark_safe(render_placeholder_toolbar(placeholder, context, name_fallback, save_language))
if content:
content = mark_safe("".join(content))
elif default:
#should be nodelist from a template
content = mark_safe(default.render(context_to_copy))
else:
content = ''
context['content'] = content
context['placeholder'] = toolbar_content
context['edit'] = edit
result = render_to_string("cms/toolbar/content.html", context)
changes = watcher.get_changes()
if placeholder and not edit and placeholder.cache_placeholder and get_cms_setting('PLACEHOLDER_CACHE') and use_cache:
cache.set(cache_key, {'content': result, 'sekizai': changes}, get_cms_setting('CACHE_DURATIONS')['content'])
context.pop()
return result
def render_placeholder_toolbar(placeholder, context, name_fallback, save_language):
from cms.plugin_pool import plugin_pool
request = context['request']
page = placeholder.page if placeholder else None
if not page:
page = getattr(request, 'current_page', None)
if page:
if name_fallback and not placeholder:
placeholder = Placeholder.objects.create(slot=name_fallback)
page.placeholders.add(placeholder)
placeholder.page = page
if placeholder:
slot = placeholder.slot
else:
slot = None
context.push()
# to restrict child-only plugins from draggables..
context['allowed_plugins'] = [cls.__name__ for cls in plugin_pool.get_all_plugins(slot, page)]
context['placeholder'] = placeholder
context['language'] = save_language
context['page'] = page
toolbar = render_to_string("cms/toolbar/placeholder.html", context)
context.pop()
return toolbar
| mit |
hogarthj/ansible | lib/ansible/modules/system/beadm.py | 56 | 11657 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, Adam Števko <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: beadm
short_description: Manage ZFS boot environments on FreeBSD/Solaris/illumos systems.
description:
- Create, delete or activate ZFS boot environments.
- Mount and unmount ZFS boot environments.
version_added: "2.3"
author: Adam Števko (@xen0l)
options:
name:
description:
- ZFS boot environment name.
aliases: [ "be" ]
required: True
snapshot:
description:
- If specified, the new boot environment will be cloned from the given
snapshot or inactive boot environment.
required: false
default: false
description:
description:
- Associate a description with a new boot environment. This option is
available only on Solarish platforms.
required: false
default: false
options:
description:
- Create the datasets for new BE with specific ZFS properties. Multiple
options can be specified. This option is available only on
Solarish platforms.
required: false
default: false
mountpoint:
description:
- Path where to mount the ZFS boot environment
required: false
default: false
state:
description:
- Create or delete ZFS boot environment.
required: false
default: "present"
choices: [ "present", "absent", "activated", "mounted", "unmounted" ]
force:
description:
- Specifies if the unmount should be forced.
required: false
default: false
choices: [ "true", "false" ]
'''
EXAMPLES = '''
- name: Create ZFS boot environment
beadm:
name: upgrade-be
state: present
- name: Create ZFS boot environment from existing inactive boot environment
beadm:
name: upgrade-be
snapshot: be@old
state: present
- name: Create ZFS boot environment with compression enabled and description "upgrade"
beadm:
name: upgrade-be
options: "compression=on"
description: upgrade
state: present
- name: Delete ZFS boot environment
beadm:
name: old-be
state: absent
- name: Mount ZFS boot environment on /tmp/be
beadm:
name: BE
mountpoint: /tmp/be
state: mounted
- name: Unmount ZFS boot environment
beadm:
name: BE
state: unmounted
- name: Activate ZFS boot environment
beadm:
name: upgrade-be
state: activated
'''
RETURN = '''
name:
description: BE name
returned: always
type: string
sample: pre-upgrade
snapshot:
description: ZFS snapshot to create BE from
returned: always
type: string
sample: rpool/ROOT/oi-hipster@fresh
description:
description: BE description
returned: always
type: string
sample: Upgrade from 9.0 to 10.0
options:
description: BE additional options
returned: always
type: string
sample: compression=on
mountpoint:
description: BE mountpoint
returned: always
type: string
sample: /mnt/be
state:
description: state of the target
returned: always
type: string
sample: present
force:
description: if forced action is wanted
returned: always
type: boolean
sample: False
'''
import os
from ansible.module_utils.basic import AnsibleModule
class BE(object):
def __init__(self, module):
self.module = module
self.name = module.params['name']
self.snapshot = module.params['snapshot']
self.description = module.params['description']
self.options = module.params['options']
self.mountpoint = module.params['mountpoint']
self.state = module.params['state']
self.force = module.params['force']
self.is_freebsd = os.uname()[0] == 'FreeBSD'
def _beadm_list(self):
cmd = [self.module.get_bin_path('beadm')]
cmd.append('list')
cmd.append('-H')
if not self.is_freebsd:
cmd.append(self.name)
return self.module.run_command(cmd)
def _find_be_by_name(self, out):
for line in out.splitlines():
if line.split('\t')[0] == self.name:
return line
return None
def exists(self):
(rc, out, _) = self._beadm_list()
if rc == 0:
if self.is_freebsd:
if self._find_be_by_name(out):
return True
else:
return True
else:
return False
def is_activated(self):
(rc, out, _) = self._beadm_list()
if rc == 0:
if self.is_freebsd:
line = self._find_be_by_name(out)
if line is not None and 'R' in line.split('\t')[1]:
return True
else:
if 'R' in out.split(';')[2]:
return True
return False
def activate_be(self):
cmd = [self.module.get_bin_path('beadm')]
cmd.append('activate')
cmd.append(self.name)
return self.module.run_command(cmd)
def create_be(self):
cmd = [self.module.get_bin_path('beadm')]
cmd.append('create')
if self.snapshot:
cmd.append('-e')
cmd.append(self.snapshot)
if not self.is_freebsd:
if self.description:
cmd.append('-d')
cmd.append(self.description)
if self.options:
cmd.append('-o')
cmd.append(self.options)
cmd.append(self.name)
return self.module.run_command(cmd)
def destroy_be(self):
cmd = [self.module.get_bin_path('beadm')]
cmd.append('destroy')
cmd.append('-F')
cmd.append(self.name)
return self.module.run_command(cmd)
def is_mounted(self):
(rc, out, _) = self._beadm_list()
if rc == 0:
if self.is_freebsd:
line = self._find_be_by_name(out)
# On FreeBSD, we exclude currently mounted BE on /, as it is
# special and can be activated even if it is mounted. That is not
# possible with non-root BEs.
if line.split('\t')[2] is not '-' and \
line.split('\t')[2] is not '/':
return True
else:
if out.split(';')[3]:
return True
return False
def mount_be(self):
cmd = [self.module.get_bin_path('beadm')]
cmd.append('mount')
cmd.append(self.name)
if self.mountpoint:
cmd.append(self.mountpoint)
return self.module.run_command(cmd)
def unmount_be(self):
cmd = [self.module.get_bin_path('beadm')]
cmd.append('unmount')
if self.force:
cmd.append('-f')
cmd.append(self.name)
return self.module.run_command(cmd)
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True, aliases=['be'], type='str'),
snapshot=dict(type='str'),
description=dict(type='str'),
options=dict(type='str'),
mountpoint=dict(default=False, type='path'),
state=dict(
default='present',
choices=['present', 'absent', 'activated',
'mounted', 'unmounted']),
force=dict(default=False, type='bool'),
),
supports_check_mode=True
)
be = BE(module)
rc = None
out = ''
err = ''
result = {}
result['name'] = be.name
result['state'] = be.state
if be.snapshot:
result['snapshot'] = be.snapshot
if be.description:
result['description'] = be.description
if be.options:
result['options'] = be.options
if be.mountpoint:
result['mountpoint'] = be.mountpoint
if be.state == 'absent':
# beadm on FreeBSD and Solarish systems differs in delete behaviour in
# that we are not allowed to delete activated BE on FreeBSD while on
# Solarish systems we cannot delete BE if it is mounted. We add mount
# check for both platforms as BE should be explicitly unmounted before
# being deleted. On FreeBSD, we also check if the BE is activated.
if be.exists():
if not be.is_mounted():
if module.check_mode:
module.exit_json(changed=True)
if be.is_freebsd:
if be.is_activated():
module.fail_json(msg='Unable to remove active BE!')
(rc, out, err) = be.destroy_be()
if rc != 0:
module.fail_json(msg='Error while destroying BE: "%s"' % err,
name=be.name,
stderr=err,
rc=rc)
else:
module.fail_json(msg='Unable to remove BE as it is mounted!')
elif be.state == 'present':
if not be.exists():
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err) = be.create_be()
if rc != 0:
module.fail_json(msg='Error while creating BE: "%s"' % err,
name=be.name,
stderr=err,
rc=rc)
elif be.state == 'activated':
if not be.is_activated():
if module.check_mode:
module.exit_json(changed=True)
# On FreeBSD, beadm is unable to activate mounted BEs, so we add
# an explicit check for that case.
if be.is_freebsd:
if be.is_mounted():
module.fail_json(msg='Unable to activate mounted BE!')
(rc, out, err) = be.activate_be()
if rc != 0:
module.fail_json(msg='Error while activating BE: "%s"' % err,
name=be.name,
stderr=err,
rc=rc)
elif be.state == 'mounted':
if not be.is_mounted():
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err) = be.mount_be()
if rc != 0:
module.fail_json(msg='Error while mounting BE: "%s"' % err,
name=be.name,
stderr=err,
rc=rc)
elif be.state == 'unmounted':
if be.is_mounted():
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err) = be.unmount_be()
if rc != 0:
module.fail_json(msg='Error while unmounting BE: "%s"' % err,
name=be.name,
stderr=err,
rc=rc)
if rc is None:
result['changed'] = False
else:
result['changed'] = True
if out:
result['stdout'] = out
if err:
result['stderr'] = err
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
darktears/chromium-crosswalk | tools/cr/cr/base/context.py | 103 | 6668 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Application context management for the cr tool.
Contains all the support code to enable the shared context used by the cr tool.
This includes the configuration variables and command line handling.
"""
import argparse
import os
import cr
class _DumpVisitor(cr.visitor.ExportVisitor):
"""A visitor that prints all variables in a config hierarchy."""
def __init__(self, with_source):
super(_DumpVisitor, self).__init__({})
self.to_dump = {}
self.with_source = with_source
def StartNode(self):
if self.with_source:
self._DumpNow()
super(_DumpVisitor, self).StartNode()
def EndNode(self):
if self.with_source or not self.stack:
self._DumpNow()
super(_DumpVisitor, self).EndNode()
if not self.stack:
self._DumpNow()
def Visit(self, key, value):
super(_DumpVisitor, self).Visit(key, value)
if key in self.store:
str_value = str(self.store[key])
if str_value != str(os.environ.get(key, None)):
self.to_dump[key] = str_value
def _DumpNow(self):
if self.to_dump:
if self.with_source:
print 'From', self.Where()
for key in sorted(self.to_dump.keys()):
print ' ', key, '=', self.to_dump[key]
self.to_dump = {}
class _ShowHelp(argparse.Action):
"""An argparse action to print the help text.
This is like the built in help text printing action, except it knows to do
nothing when we are just doing the early speculative parse of the args.
"""
def __call__(self, parser, namespace, values, option_string=None):
if cr.context.speculative:
return
command = cr.Command.GetActivePlugin()
if command:
command.parser.print_help()
else:
parser.print_help()
exit(1)
class _ArgumentParser(argparse.ArgumentParser):
"""An extension of an ArgumentParser to enable speculative parsing.
It supports doing an early parse that never produces errors or output, to do
early collection of arguments that may affect what other arguments are
allowed.
"""
def error(self, message):
if cr.context.speculative:
return
super(_ArgumentParser, self).error(message)
def parse_args(self):
if cr.context.speculative:
result = self.parse_known_args()
if result:
return result[0]
return None
return super(_ArgumentParser, self).parse_args()
def parse_known_args(self, args=None, namespace=None):
result = super(_ArgumentParser, self).parse_known_args(args, namespace)
if result is None:
return namespace, None
return result
# The context stack
_stack = []
class _ContextData:
pass
class Context(cr.config.Config):
"""The base context holder for the cr system.
This holds the common context shared throughout cr.
Mostly this is stored in the Config structure of variables.
"""
def __init__(self, name='Context'):
super(Context, self).__init__(name)
self._data = _ContextData()
def CreateData(self, description='', epilog=''):
self._data.args = None
self._data.arguments = cr.config.Config('ARGS')
self._data.derived = cr.config.Config('DERIVED')
self.AddChildren(*cr.config.GLOBALS)
self.AddChildren(
cr.config.Config('ENVIRONMENT', literal=True, export=True).Set(
{k: self.ParseValue(v) for k, v in os.environ.items()}),
self._data.arguments,
self._data.derived,
)
# Build the command line argument parser
self._data.parser = _ArgumentParser(add_help=False, description=description,
epilog=epilog)
self._data.subparsers = self.parser.add_subparsers()
# Add the global arguments
self.AddCommonArguments(self._data.parser)
self._data.gclient = {}
@property
def data(self):
return self._data
def __enter__(self):
""" To support using 'with cr.base.context.Create():'"""
_stack.append(self)
cr.context = self
return self
def __exit__(self, *_):
_stack.pop()
if _stack:
cr.context = _stack[-1]
return False
def AddSubParser(self, source):
parser = source.AddArguments(self._data.subparsers)
@classmethod
def AddCommonArguments(cls, parser):
"""Adds the command line arguments common to all commands in cr."""
parser.add_argument(
'-h', '--help',
action=_ShowHelp, nargs=0,
help='show the help message and exit.'
)
parser.add_argument(
'--dry-run', dest='CR_DRY_RUN',
action='store_true', default=None,
help="""
Don't execute commands, just print them. Implies verbose.
Overrides CR_DRY_RUN
"""
)
parser.add_argument(
'-v', '--verbose', dest='CR_VERBOSE',
action='count', default=None,
help="""
Print information about commands being performed.
Repeating multiple times increases the verbosity level.
Overrides CR_VERBOSE
"""
)
@property
def args(self):
return self._data.args
@property
def arguments(self):
return self._data.arguments
@property
def speculative(self):
return self._data.speculative
@property
def derived(self):
return self._data.derived
@property
def parser(self):
return self._data.parser
@property
def remains(self):
remains = getattr(self._data.args, '_remains', None)
if remains and remains[0] == '--':
remains = remains[1:]
return remains
@property
def verbose(self):
if self.autocompleting:
return False
return self.Find('CR_VERBOSE') or self.dry_run
@property
def dry_run(self):
if self.autocompleting:
return True
return self.Find('CR_DRY_RUN')
@property
def autocompleting(self):
return 'COMP_WORD' in os.environ
@property
def gclient(self):
if not self._data.gclient:
self._data.gclient = cr.base.client.ReadGClient()
return self._data.gclient
def ParseArgs(self, speculative=False):
cr.plugin.DynamicChoices.only_active = not speculative
self._data.speculative = speculative
self._data.args = self._data.parser.parse_args()
self._data.arguments.Wipe()
if self._data.args:
self._data.arguments.Set(
{k: v for k, v in vars(self._data.args).items() if v is not None})
def DumpValues(self, with_source):
_DumpVisitor(with_source).VisitNode(self)
def Create(description='', epilog=''):
context = Context()
context.CreateData(description=description, epilog=epilog)
return context
| bsd-3-clause |
sushantlp/wrdly | public/bower_components/jvectormap/converter/processor.py | 130 | 20218 | import sys
import json
import csv
import shapely.wkb
import shapely.geometry
import shapely.ops
import codecs
import os
import inspect
import copy
from osgeo import ogr
from osgeo import osr
from booleano.parser import Grammar, EvaluableParseManager, SymbolTable, Bind
from booleano.operations import Variable
class Map:
def __init__(self, name, language):
self.paths = {}
self.name = name
self.language = language
self.width = 0
self.height = 0
self.bbox = []
def addPath(self, path, code, name):
self.paths[code] = {"path": path, "name": name}
def getJSCode(self):
map = {"paths": self.paths, "width": self.width, "height": self.height, "insets": self.insets, "projection": self.projection}
return "jQuery.fn.vectorMap('addMap', '"+self.name+"_"+self.projection['type']+"',"+json.dumps(map)+');'
class Converter:
def __init__(self, config):
args = {
'buffer_distance': -0.4,
'simplify_tolerance': 0.2,
'longitude0': 0,
'projection': 'mill',
'name': 'world',
'width': 900,
'left': 0,
'top': 0,
'language': 'en',
'precision': 2,
'insets': []
}
args.update(config)
self.config = args
self.map = Map(args['name'], args.get('language'))
if args.get('sources'):
self.sources = args['sources']
else:
self.sources = [{
'input_file': args.get('input_file'),
'where': args.get('where'),
'name_field': args.get('name_field'),
'code_field': args.get('code_field'),
'input_file_encoding': args.get('input_file_encoding')
}]
default_source = {
'where': '',
'name_field': 0,
'code_field': 1,
'input_file_encoding': 'iso-8859-1'
}
for index in range(len(self.sources)):
for key in default_source:
if self.sources[index].get(key) is None:
self.sources[index][key] = default_source[key]
self.features = {}
self.width = args.get('width')
self.left = args.get('left')
self.top = args.get('top')
self.minimal_area = args.get('minimal_area')
self.longitude0 = float(args.get('longitude0'))
self.projection = args.get('projection')
self.precision = args.get('precision')
self.buffer_distance = args.get('buffer_distance')
self.simplify_tolerance = args.get('simplify_tolerance')
self.for_each = args.get('for_each')
self.emulate_longitude0 = args.get('emulate_longitude0')
if args.get('emulate_longitude0') is None and (self.projection == 'merc' or self.projection =='mill') and self.longitude0 != 0:
self.emulate_longitude0 = True
if args.get('viewport'):
self.viewport = map(lambda s: float(s), args.get('viewport').split(' '))
else:
self.viewport = False
# spatial reference to convert to
self.spatialRef = osr.SpatialReference()
projString = '+proj='+str(self.projection)+' +a=6381372 +b=6381372 +lat_0=0'
if not self.emulate_longitude0:
projString += ' +lon_0='+str(self.longitude0)
self.spatialRef.ImportFromProj4(projString)
# handle map insets
if args.get('insets'):
self.insets = args.get('insets')
else:
self.insets = []
def convert(self, data_source, output_file):
codes = map(lambda g: g.properties[self.config['code_field']], data_source.geometries)
main_codes = copy.copy(codes)
self.map.insets = []
envelope = []
for inset in self.insets:
insetBbox = self.renderMapInset(data_source, inset['codes'], inset['left'], inset['top'], inset['width'])
insetHeight = (insetBbox[3] - insetBbox[1]) * (inset['width'] / (insetBbox[2] - insetBbox[0]))
self.map.insets.append({
"bbox": [{"x": insetBbox[0], "y": -insetBbox[3]}, {"x": insetBbox[2], "y": -insetBbox[1]}],
"left": inset['left'],
"top": inset['top'],
"width": inset['width'],
"height": insetHeight
})
envelope.append(
shapely.geometry.box(
inset['left'], inset['top'], inset['left'] + inset['width'], inset['top'] + insetHeight
)
)
for code in inset['codes']:
main_codes.remove(code)
insetBbox = self.renderMapInset(data_source, main_codes, self.left, self.top, self.width)
insetHeight = (insetBbox[3] - insetBbox[1]) * (self.width / (insetBbox[2] - insetBbox[0]))
envelope.append( shapely.geometry.box( self.left, self.top, self.left + self.width, self.top + insetHeight ) )
mapBbox = shapely.geometry.MultiPolygon( envelope ).bounds
self.map.width = mapBbox[2] + mapBbox[0]
self.map.height = mapBbox[3] + mapBbox[1]
self.map.insets.append({
"bbox": [{"x": insetBbox[0], "y": -insetBbox[3]}, {"x": insetBbox[2], "y": -insetBbox[1]}],
"left": self.left,
"top": self.top,
"width": self.width,
"height": insetHeight
})
self.map.projection = {"type": self.projection, "centralMeridian": float(self.longitude0)}
open(output_file, 'w').write( self.map.getJSCode() )
if self.for_each is not None:
for code in codes:
childConfig = copy.deepcopy(self.for_each)
for param in ('input_file', 'output_file', 'where', 'name'):
childConfig[param] = childConfig[param].replace('{{code}}', code.lower())
converter = Converter(childConfig)
converter.convert(childConfig['output_file'])
def renderMapInset(self, data_source, codes, left, top, width):
envelope = []
geometries = filter(lambda g: g.properties[self.config['code_field']] in codes, data_source.geometries)
for geometry in geometries:
envelope.append( geometry.geom.envelope )
bbox = shapely.geometry.MultiPolygon( envelope ).bounds
scale = (bbox[2]-bbox[0]) / width
# generate SVG paths
for geometry in geometries:
geom = geometry.geom
if self.buffer_distance:
geom = geom.buffer(self.buffer_distance*scale, 1)
if geom.is_empty:
continue
if self.simplify_tolerance:
geom = geom.simplify(self.simplify_tolerance*scale, preserve_topology=True)
if isinstance(geom, shapely.geometry.multipolygon.MultiPolygon):
polygons = geom.geoms
else:
polygons = [geom]
path = ''
for polygon in polygons:
rings = []
rings.append(polygon.exterior)
rings.extend(polygon.interiors)
for ring in rings:
for pointIndex in range( len(ring.coords) ):
point = ring.coords[pointIndex]
if pointIndex == 0:
path += 'M'+str( round( (point[0]-bbox[0]) / scale + left, self.precision) )
path += ','+str( round( (bbox[3] - point[1]) / scale + top, self.precision) )
else:
path += 'l' + str( round(point[0]/scale - ring.coords[pointIndex-1][0]/scale, self.precision) )
path += ',' + str( round(ring.coords[pointIndex-1][1]/scale - point[1]/scale, self.precision) )
path += 'Z'
self.map.addPath(path, geometry.properties[self.config['code_field']], geometry.properties[self.config['name_field']])
return bbox
class Geometry:
def __init__(self, geometry, properties):
self.geom = geometry
self.properties = properties
class GeometryProperty(Variable):
operations = set(["equality", "membership"])
def __init__(self, name):
self.name = name
def equals(self, value, context):
return context[self.name] == value
def belongs_to(self, value, context):
return value in context[self.name]
def is_subset(self, value, context):
return set(value).issubset(set(context[self.name]))
def to_python(self, value):
return unicode(value[self.name])
class DataSource:
def __init__(self, config):
default_config = {
"projection": "merc",
"longitude0": 0
}
default_config.update(config)
self.config = default_config
self.spatialRef = osr.SpatialReference()
projString = '+proj='+str(self.config['projection'])+' +a=6381372 +b=6381372 +lat_0=0'
#if 'emulate_longitude0' in self.config and not self.config['emulate_longitude0']:
projString += ' +lon_0='+str(self.config['longitude0'])
self.spatialRef.ImportFromProj4(projString)
def load_data(self):
self.source = ogr.Open( self.config['file_name'], update = 0 )
self.layer = self.source.GetLayer(0)
if 'filter' in self.config and self.config['filter'] is not None:
self.layer.SetAttributeFilter( self.config['filter'].encode('ascii') )
self.layer_dfn = self.layer.GetLayerDefn()
self.fields = []
field_count = self.layer_dfn.GetFieldCount()
for field_index in range(field_count):
field = self.layer_dfn.GetFieldDefn( field_index )
self.fields.append({
'name': field.GetName(),
'type': field.GetType(),
'width': field.GetWidth(),
'precision': field.GetPrecision()
})
self.geometries = []
for feature in self.layer:
geometry = feature.GetGeometryRef()
geometry.TransformTo( self.spatialRef )
geometry = shapely.wkb.loads( geometry.ExportToWkb() )
if not geometry.is_valid:
geometry = geometry.buffer(0)
properties = {}
for field in self.fields:
properties[field['name']] = feature.GetFieldAsString(field['name']).decode('utf-8')
self.geometries.append( Geometry(geometry, properties) )
self.layer.ResetReading()
self.create_grammar()
def create_grammar(self):
root_table = SymbolTable("root",
map( lambda f: Bind(f['name'], GeometryProperty(f['name'])), self.fields )
)
tokens = {
'not': 'not',
'eq': '==',
'ne': '!=',
'belongs_to': 'in',
'is_subset': 'are included in',
'or': "or",
'and': 'and'
}
grammar = Grammar(**tokens)
self.parse_manager = EvaluableParseManager(root_table, grammar)
def output(self, output):
if output.get('format') == 'jvectormap':
self.output_jvm(output)
else:
self.output_ogr(output)
def output_ogr(self, output):
driver = ogr.GetDriverByName( 'ESRI Shapefile' )
if os.path.exists( output['file_name'] ):
driver.DeleteDataSource( output['file_name'] )
source = driver.CreateDataSource( output['file_name'] )
layer = source.CreateLayer( self.layer_dfn.GetName(),
geom_type = self.layer_dfn.GetGeomType(),
srs = self.layer.GetSpatialRef() )
for field in self.fields:
fd = ogr.FieldDefn( str(field['name']), field['type'] )
fd.SetWidth( field['width'] )
if 'precision' in field:
fd.SetPrecision( field['precision'] )
layer.CreateField( fd )
for geometry in self.geometries:
if geometry.geom is not None:
feature = ogr.Feature( feature_def = layer.GetLayerDefn() )
for index, field in enumerate(self.fields):
if field['name'] in geometry.properties:
feature.SetField( index, geometry.properties[field['name']].encode('utf-8') )
else:
feature.SetField( index, '' )
feature.SetGeometryDirectly(
ogr.CreateGeometryFromWkb(
shapely.wkb.dumps(
geometry.geom
)
)
)
layer.CreateFeature( feature )
feature.Destroy()
source.Destroy()
def output_jvm(self, output):
params = copy.deepcopy(output['params'])
params.update({
"projection": self.config["projection"],
"longitude0": self.config["longitude0"]
})
converter = Converter(params)
converter.convert(self, output['file_name'])
class PolygonSimplifier:
def __init__(self, geometries):
self.format = '%.8f %.8f'
self.tolerance = 0.05
self.geometries = geometries
connections = {}
counter = 0
for geom in geometries:
counter += 1
polygons = []
if isinstance(geom, shapely.geometry.Polygon):
polygons.append(geom)
else:
for polygon in geom:
polygons.append(polygon)
for polygon in polygons:
if polygon.area > 0:
lines = []
lines.append(polygon.exterior)
for line in polygon.interiors:
lines.append(line)
for line in lines:
for i in range(len(line.coords)-1):
indexFrom = i
indexTo = i+1
pointFrom = self.format % line.coords[indexFrom]
pointTo = self.format % line.coords[indexTo]
if pointFrom == pointTo:
continue
if not (pointFrom in connections):
connections[pointFrom] = {}
connections[pointFrom][pointTo] = 1
if not (pointTo in connections):
connections[pointTo] = {}
connections[pointTo][pointFrom] = 1
self.connections = connections
self.simplifiedLines = {}
self.pivotPoints = {}
def simplifyRing(self, ring):
coords = list(ring.coords)[0:-1]
simpleCoords = []
isPivot = False
pointIndex = 0
while not isPivot and pointIndex < len(coords):
pointStr = self.format % coords[pointIndex]
pointIndex += 1
isPivot = ((len(self.connections[pointStr]) > 2) or (pointStr in self.pivotPoints))
pointIndex = pointIndex - 1
if not isPivot:
simpleRing = shapely.geometry.LineString(coords).simplify(self.tolerance)
if len(simpleRing.coords) <= 2:
return None
else:
self.pivotPoints[self.format % coords[0]] = True
self.pivotPoints[self.format % coords[-1]] = True
simpleLineKey = self.format % coords[0]+':'+self.format % coords[1]+':'+self.format % coords[-1]
self.simplifiedLines[simpleLineKey] = simpleRing.coords
return simpleRing
else:
points = coords[pointIndex:len(coords)]
points.extend(coords[0:pointIndex+1])
iFrom = 0
for i in range(1, len(points)):
pointStr = self.format % points[i]
if ((len(self.connections[pointStr]) > 2) or (pointStr in self.pivotPoints)):
line = points[iFrom:i+1]
lineKey = self.format % line[-1]+':'+self.format % line[-2]+':'+self.format % line[0]
if lineKey in self.simplifiedLines:
simpleLine = self.simplifiedLines[lineKey]
simpleLine = list(reversed(simpleLine))
else:
simpleLine = shapely.geometry.LineString(line).simplify(self.tolerance).coords
lineKey = self.format % line[0]+':'+self.format % line[1]+':'+self.format % line[-1]
self.simplifiedLines[lineKey] = simpleLine
simpleCoords.extend( simpleLine[0:-1] )
iFrom = i
if len(simpleCoords) <= 2:
return None
else:
return shapely.geometry.LineString(simpleCoords)
def simplifyPolygon(self, polygon):
simpleExtRing = self.simplifyRing(polygon.exterior)
if simpleExtRing is None:
return None
simpleIntRings = []
for ring in polygon.interiors:
simpleIntRing = self.simplifyRing(ring)
if simpleIntRing is not None:
simpleIntRings.append(simpleIntRing)
return shapely.geometry.Polygon(simpleExtRing, simpleIntRings)
def simplify(self):
results = []
for geom in self.geometries:
polygons = []
simplePolygons = []
if isinstance(geom, shapely.geometry.Polygon):
polygons.append(geom)
else:
for polygon in geom:
polygons.append(polygon)
for polygon in polygons:
simplePolygon = self.simplifyPolygon(polygon)
if not (simplePolygon is None or simplePolygon._geom is None):
simplePolygons.append(simplePolygon)
if len(simplePolygons) > 0:
results.append(shapely.geometry.MultiPolygon(simplePolygons))
else:
results.append(None)
return results
class Processor:
def __init__(self, config):
self.config = config
def process(self):
self.data_sources = {}
for action in self.config:
getattr(self, action['name'])( action, self.data_sources.get(".") )
def read_data(self, config, data_source):
self.data_sources["."] = DataSource( config )
self.data_sources["."].load_data()
def write_data(self, config, data_source):
data_source.output( config )
def union(self, config, data_source):
groups = {}
geometries = []
for geometry in data_source.geometries:
if geometry.properties[config['by']] in groups:
groups[geometry.properties[config['by']]]['geoms'].append(geometry.geom)
else:
groups[geometry.properties[config['by']]] = {
'geoms': [geometry.geom],
'properties': geometry.properties
}
for key in groups:
geometries.append( Geometry(shapely.ops.cascaded_union( groups[key]['geoms'] ), groups[key]['properties']) )
data_source.geometries = geometries
def merge(self, config, data_source):
new_geometries = []
for rule in config['rules']:
expression = data_source.parse_manager.parse( rule['where'] )
geometries = filter(lambda g: expression(g.properties), data_source.geometries)
geometries = map(lambda g: g.geom, geometries)
new_geometries.append( Geometry(shapely.ops.cascaded_union( geometries ), rule['fields']) )
data_source.fields = config['fields']
data_source.geometries = new_geometries
def join_data(self, config, data_source):
field_names = [f['name'] for f in config['fields']]
if 'data' in config:
data_col = config['data']
else:
data_file = open(config['file_name'], 'rb')
data_col = csv.reader(data_file, delimiter='\t', quotechar='"')
data = {}
for row in data_col:
row_dict = dict(zip(field_names, row))
data[row_dict.pop(config['on'])] = row_dict
for geometry in data_source.geometries:
if geometry.properties[config['on']] in data:
geometry.properties.update( data[geometry.properties[config['on']]] )
field_names = map(lambda f: f['name'], data_source.fields)
data_source.fields = data_source.fields + filter(lambda f: f['name'] not in field_names, config['fields'])
def remove(self, config, data_source):
expression = data_source.parse_manager.parse( config['where'] )
data_source.geometries = filter(lambda g: not expression(g.properties), data_source.geometries)
def remove_fields(self, config, data_source):
data_source.fields = filter(lambda f: f.name not in config['fields'], data_source.fields)
def remove_other_fields(self, config, data_source):
data_source.fields = filter(lambda f: f['name'] in config['fields'], data_source.fields)
def buffer(self, config, data_source):
for geometry in data_source.geometries:
geometry.geom = geometry.geom.buffer(config['distance'], config['resolution'])
def simplify_adjancent_polygons(self, config, data_source):
simple_geometries = PolygonSimplifier( map( lambda g: g.geom, data_source.geometries ) ).simplify()
for i in range(len(data_source.geometries)):
data_source.geometries[i].geom = simple_geometries[i]
def intersect_rect(self, config, data_source):
transform = osr.CoordinateTransformation( data_source.layer.GetSpatialRef(), data_source.spatialRef )
point1 = transform.TransformPoint(config['rect'][0], config['rect'][1])
point2 = transform.TransformPoint(config['rect'][2], config['rect'][3])
rect = shapely.geometry.box(point1[0], point1[1], point2[0], point2[1])
for geometry in data_source.geometries:
geometry.geom = geometry.geom.intersection(rect)
def remove_small_polygons(self, config, data_source):
for geometry in data_source.geometries:
if isinstance(geometry.geom, shapely.geometry.multipolygon.MultiPolygon):
polygons = geometry.geom.geoms
else:
polygons = [geometry.geom]
polygons = filter(lambda p: p.area > config['minimal_area'], polygons)
if len(polygons) > 0:
geometry.geom = shapely.geometry.multipolygon.MultiPolygon(polygons)
args = {}
if len(sys.argv) > 1:
paramsJson = open(sys.argv[1], 'r').read()
else:
paramsJson = sys.stdin.read()
paramsJson = json.loads(paramsJson)
processor = Processor(paramsJson)
processor.process()
| gpl-3.0 |
saiwing-yeung/scikit-learn | examples/model_selection/plot_learning_curve.py | 33 | 4505 | """
========================
Plotting Learning Curves
========================
On the left side the learning curve of a naive Bayes classifier is shown for
the digits dataset. Note that the training score and the cross-validation score
are both not very good at the end. However, the shape of the curve can be found
in more complex datasets very often: the training score is very high at the
beginning and decreases and the cross-validation score is very low at the
beginning and increases. On the right side we see the learning curve of an SVM
with RBF kernel. We can see clearly that the training score is still around
the maximum and the validation score could be increased with more training
samples.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from sklearn.datasets import load_digits
from sklearn.model_selection import learning_curve
from sklearn.model_selection import ShuffleSplit
def plot_learning_curve(estimator, title, X, y, ylim=None, cv=None,
n_jobs=1, train_sizes=np.linspace(.1, 1.0, 5)):
"""
Generate a simple plot of the test and training learning curve.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
title : string
Title for the chart.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
ylim : tuple, shape (ymin, ymax), optional
Defines minimum and maximum yvalues plotted.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`StratifiedKFold` used. If the estimator is not a classifier
or if ``y`` is neither binary nor multiclass, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validators that can be used here.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
"""
plt.figure()
plt.title(title)
if ylim is not None:
plt.ylim(*ylim)
plt.xlabel("Training examples")
plt.ylabel("Score")
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.grid()
plt.fill_between(train_sizes, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.1,
color="r")
plt.fill_between(train_sizes, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.1, color="g")
plt.plot(train_sizes, train_scores_mean, 'o-', color="r",
label="Training score")
plt.plot(train_sizes, test_scores_mean, 'o-', color="g",
label="Cross-validation score")
plt.legend(loc="best")
return plt
digits = load_digits()
X, y = digits.data, digits.target
title = "Learning Curves (Naive Bayes)"
# Cross validation with 100 iterations to get smoother mean test and train
# score curves, each time with 20% data randomly selected as a validation set.
cv = ShuffleSplit(n_iter=100, test_size=0.2, random_state=0)
estimator = GaussianNB()
plot_learning_curve(estimator, title, X, y, ylim=(0.7, 1.01), cv=cv, n_jobs=4)
title = "Learning Curves (SVM, RBF kernel, $\gamma=0.001$)"
# SVC is more expensive so we do a lower number of CV iterations:
cv = ShuffleSplit(n_iter=10, test_size=0.2, random_state=0)
estimator = SVC(gamma=0.001)
plot_learning_curve(estimator, title, X, y, (0.7, 1.01), cv=cv, n_jobs=4)
plt.show()
| bsd-3-clause |
fluks/youtube-dl | youtube_dl/extractor/vidme.py | 36 | 2580 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
int_or_none,
float_or_none,
str_to_int,
)
class VidmeIE(InfoExtractor):
_VALID_URL = r'https?://vid\.me/(?:e/)?(?P<id>[\da-zA-Z]+)'
_TEST = {
'url': 'https://vid.me/QNB',
'md5': 'f42d05e7149aeaec5c037b17e5d3dc82',
'info_dict': {
'id': 'QNB',
'ext': 'mp4',
'title': 'Fishing for piranha - the easy way',
'description': 'source: https://www.facebook.com/photo.php?v=312276045600871',
'duration': 119.92,
'timestamp': 1406313244,
'upload_date': '20140725',
'thumbnail': 're:^https?://.*\.jpg',
},
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
webpage = self._download_webpage(url, video_id)
video_url = self._html_search_regex(r'<source src="([^"]+)"', webpage, 'video URL')
title = self._og_search_title(webpage)
description = self._og_search_description(webpage, default='')
thumbnail = self._og_search_thumbnail(webpage)
timestamp = int_or_none(self._og_search_property('updated_time', webpage, fatal=False))
width = int_or_none(self._og_search_property('video:width', webpage, fatal=False))
height = int_or_none(self._og_search_property('video:height', webpage, fatal=False))
duration = float_or_none(self._html_search_regex(
r'data-duration="([^"]+)"', webpage, 'duration', fatal=False))
view_count = str_to_int(self._html_search_regex(
r'<span class="video_views">\s*([\d,\.]+)\s*plays?', webpage, 'view count', fatal=False))
like_count = str_to_int(self._html_search_regex(
r'class="score js-video-vote-score"[^>]+data-score="([\d,\.\s]+)">',
webpage, 'like count', fatal=False))
comment_count = str_to_int(self._html_search_regex(
r'class="js-comment-count"[^>]+data-count="([\d,\.\s]+)">',
webpage, 'comment count', fatal=False))
return {
'id': video_id,
'url': video_url,
'title': title,
'description': description,
'thumbnail': thumbnail,
'timestamp': timestamp,
'width': width,
'height': height,
'duration': duration,
'view_count': view_count,
'like_count': like_count,
'comment_count': comment_count,
}
| unlicense |
etzhou/edx-platform | lms/djangoapps/courseware/entrance_exams.py | 34 | 6544 | """
This file contains all entrance exam related utils/logic.
"""
from django.conf import settings
from courseware.access import has_access
from courseware.model_data import FieldDataCache, ScoresClient
from opaque_keys.edx.keys import UsageKey
from student.models import EntranceExamConfiguration
from util.milestones_helpers import get_required_content
from util.module_utils import yield_dynamic_descriptor_descendants
from xmodule.modulestore.django import modulestore
def feature_is_enabled():
"""
Checks to see if the Entrance Exams feature is enabled
Use this operation instead of checking the feature flag all over the place
"""
return settings.FEATURES.get('ENTRANCE_EXAMS', False)
def course_has_entrance_exam(course):
"""
Checks to see if a course is properly configured for an entrance exam
"""
if not feature_is_enabled():
return False
if not course.entrance_exam_enabled:
return False
if not course.entrance_exam_id:
return False
return True
def user_can_skip_entrance_exam(request, user, course):
"""
Checks all of the various override conditions for a user to skip an entrance exam
Begin by short-circuiting if the course does not have an entrance exam
"""
if not course_has_entrance_exam(course):
return True
if not user.is_authenticated():
return False
if has_access(user, 'staff', course):
return True
if EntranceExamConfiguration.user_can_skip_entrance_exam(user, course.id):
return True
if not get_entrance_exam_content(request, course):
return True
return False
def user_has_passed_entrance_exam(request, course):
"""
Checks to see if the user has attained a sufficient score to pass the exam
Begin by short-circuiting if the course does not have an entrance exam
"""
if not course_has_entrance_exam(course):
return True
if not request.user.is_authenticated():
return False
entrance_exam_score = get_entrance_exam_score(request, course)
if entrance_exam_score >= course.entrance_exam_minimum_score_pct:
return True
return False
# pylint: disable=invalid-name
def user_must_complete_entrance_exam(request, user, course):
"""
Some courses can be gated on an Entrance Exam, which is a specially-configured chapter module which
presents users with a problem set which they must complete. This particular workflow determines
whether or not the user is allowed to clear the Entrance Exam gate and access the rest of the course.
"""
# First, let's see if the user is allowed to skip
if user_can_skip_entrance_exam(request, user, course):
return False
# If they can't actually skip the exam, we'll need to see if they've already passed it
if user_has_passed_entrance_exam(request, course):
return False
# Can't skip, haven't passed, must take the exam
return True
def _calculate_entrance_exam_score(user, course_descriptor, exam_modules):
"""
Calculates the score (percent) of the entrance exam using the provided modules
"""
student_module_dict = {}
scores_client = ScoresClient(course_descriptor.id, user.id)
locations = [exam_module.location for exam_module in exam_modules]
scores_client.fetch_scores(locations)
# Iterate over all of the exam modules to get score of user for each of them
for exam_module in exam_modules:
exam_module_score = scores_client.get(exam_module.location)
if exam_module_score:
student_module_dict[unicode(exam_module.location)] = {
'grade': exam_module_score.correct,
'max_grade': exam_module_score.total
}
exam_percentage = 0
module_percentages = []
ignore_categories = ['course', 'chapter', 'sequential', 'vertical']
for module in exam_modules:
if module.graded and module.category not in ignore_categories:
module_percentage = 0
module_location = unicode(module.location)
if module_location in student_module_dict and student_module_dict[module_location]['max_grade']:
student_module = student_module_dict[module_location]
module_percentage = student_module['grade'] / student_module['max_grade']
module_percentages.append(module_percentage)
if module_percentages:
exam_percentage = sum(module_percentages) / float(len(module_percentages))
return exam_percentage
def get_entrance_exam_score(request, course):
"""
Gather the set of modules which comprise the entrance exam
Note that 'request' may not actually be a genuine request, due to the
circular nature of module_render calling entrance_exams and get_module_for_descriptor
being used here. In some use cases, the caller is actually mocking a request, although
in these scenarios the 'user' child object can be trusted and used as expected.
It's a much larger refactoring job to break this legacy mess apart, unfortunately.
"""
exam_key = UsageKey.from_string(course.entrance_exam_id)
exam_descriptor = modulestore().get_item(exam_key)
def inner_get_module(descriptor):
"""
Delegate to get_module_for_descriptor (imported here to avoid circular reference)
"""
from courseware.module_render import get_module_for_descriptor
field_data_cache = FieldDataCache([descriptor], course.id, request.user)
return get_module_for_descriptor(
request.user,
request,
descriptor,
field_data_cache,
course.id,
course=course
)
exam_module_generators = yield_dynamic_descriptor_descendants(
exam_descriptor,
request.user.id,
inner_get_module
)
exam_modules = [module for module in exam_module_generators]
return _calculate_entrance_exam_score(request.user, course, exam_modules)
def get_entrance_exam_content(request, course):
"""
Get the entrance exam content information (ie, chapter module)
"""
required_content = get_required_content(course, request.user)
exam_module = None
for content in required_content:
usage_key = course.id.make_usage_key_from_deprecated_string(content)
module_item = modulestore().get_item(usage_key)
if not module_item.hide_from_toc and module_item.is_entrance_exam:
exam_module = module_item
break
return exam_module
| agpl-3.0 |
chienlieu2017/it_management | odoo/addons/mail/tests/test_invite.py | 20 | 2267 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo.addons.mail.tests.common import TestMail
from odoo.tools import mute_logger
class TestInvite(TestMail):
@mute_logger('odoo.addons.mail.models.mail_mail')
def test_invite_email(self):
mail_invite = self.env['mail.wizard.invite'].with_context({
'default_res_model': 'mail.channel',
'default_res_id': self.group_pigs.id
}).sudo(self.user_employee.id).create({
'partner_ids': [(4, self.user_portal.partner_id.id), (4, self.partner_1.id)],
'send_mail': True})
mail_invite.add_followers()
# Test: Pigs followers should contain Admin, Bert
self.assertEqual(self.group_pigs.message_partner_ids,
self.user_portal.partner_id | self.partner_1,
'invite wizard: Pigs followers after invite is incorrect, should be Admin + added follower')
self.assertEqual(self.group_pigs.message_follower_ids.mapped('channel_id'),
self.env['mail.channel'],
'invite wizard: Pigs followers after invite is incorrect, should not have channels')
# Test: (pretend to) send email and check subject, body
self.assertEqual(len(self._mails), 2, 'invite wizard: sent email number incorrect, should be only for Bert')
self.assertEqual(self._mails[0].get('subject'), 'Invitation to follow Discussion channel: Pigs',
'invite wizard: subject of invitation email is incorrect')
self.assertEqual(self._mails[1].get('subject'), 'Invitation to follow Discussion channel: Pigs',
'invite wizard: subject of invitation email is incorrect')
self.assertIn('%s invited you to follow Discussion channel document: Pigs' % self.user_employee.name,
self._mails[0].get('body'),
'invite wizard: body of invitation email is incorrect')
self.assertIn('%s invited you to follow Discussion channel document: Pigs' % self.user_employee.name,
self._mails[1].get('body'),
'invite wizard: body of invitation email is incorrect')
| gpl-3.0 |
harshavardhana/minio-py | examples/list_buckets.py | 3 | 1026 | # -*- coding: utf-8 -*-
# Minio Python Library for Amazon S3 Compatible Cloud Storage, (C) 2015 Minio, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Note: YOUR-ACCESSKEYID and YOUR-SECRETACCESSKEY are
# dummy values, please replace them with original values.
from minio import Minio
client = Minio('s3.amazonaws.com',
access_key='YOUR-ACCESSKEYID',
secret_key='YOUR-SECRETACCESSKEY')
buckets = client.list_buckets()
for bucket in buckets:
print(bucket.name, bucket.creation_date)
| apache-2.0 |
farhi-naz/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/thirdparty/ordered_dict.py | 131 | 2984 | # Copyright (c) 2009 Raymond Hettinger.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# This code is obtained from http://code.activestate.com/recipes/576669/
from collections import MutableMapping
class OrderedDict(dict, MutableMapping):
# Methods with direct access to underlying attributes
def __init__(self, *args, **kwds):
if len(args) > 1:
raise TypeError('expected at 1 argument, got %d', len(args))
if not hasattr(self, '_keys'):
self._keys = []
self.update(*args, **kwds)
def clear(self):
del self._keys[:]
dict.clear(self)
def __setitem__(self, key, value):
if key not in self:
self._keys.append(key)
dict.__setitem__(self, key, value)
def __delitem__(self, key):
dict.__delitem__(self, key)
self._keys.remove(key)
def __iter__(self):
return iter(self._keys)
def __reversed__(self):
return reversed(self._keys)
def popitem(self):
if not self:
raise KeyError
key = self._keys.pop()
value = dict.pop(self, key)
return key, value
def __reduce__(self):
items = [[k, self[k]] for k in self]
inst_dict = vars(self).copy()
inst_dict.pop('_keys', None)
return (self.__class__, (items,), inst_dict)
# Methods with indirect access via the above methods
setdefault = MutableMapping.setdefault
update = MutableMapping.update
pop = MutableMapping.pop
keys = MutableMapping.keys
values = MutableMapping.values
items = MutableMapping.items
def __repr__(self):
pairs = ', '.join(map('%r: %r'.__mod__, self.items()))
return '%s({%s})' % (self.__class__.__name__, pairs)
def copy(self):
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
d = cls()
for key in iterable:
d[key] = value
return d
| bsd-3-clause |
apporc/cinder | cinder/tests/unit/volume/drivers/netapp/eseries/test_library.py | 2 | 64492 | # Copyright (c) 2014 Andrew Kerr
# Copyright (c) 2015 Alex Meade
# Copyright (c) 2015 Rushil Chugh
# Copyright (c) 2015 Yogesh Kshirsagar
# Copyright (c) 2015 Michael Price
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import ddt
import time
import mock
from oslo_utils import units
import six
from cinder import exception
from cinder import test
from cinder.tests.unit import fake_snapshot
from cinder.tests.unit import utils as cinder_utils
from cinder.tests.unit.volume.drivers.netapp.eseries import fakes as \
eseries_fake
from cinder.volume.drivers.netapp.eseries import client as es_client
from cinder.volume.drivers.netapp.eseries import exception as eseries_exc
from cinder.volume.drivers.netapp.eseries import host_mapper
from cinder.volume.drivers.netapp.eseries import library
from cinder.volume.drivers.netapp.eseries import utils
from cinder.volume.drivers.netapp import utils as na_utils
from cinder.zonemanager import utils as fczm_utils
def get_fake_volume():
return {
'id': '114774fb-e15a-4fae-8ee2-c9723e3645ef', 'size': 1,
'volume_name': 'lun1', 'host': 'hostname@backend#DDP',
'os_type': 'linux', 'provider_location': 'lun1',
'name_id': '114774fb-e15a-4fae-8ee2-c9723e3645ef',
'provider_auth': 'provider a b', 'project_id': 'project',
'display_name': None, 'display_description': 'lun1',
'volume_type_id': None, 'migration_status': None, 'attach_status':
"detached"
}
@ddt.ddt
class NetAppEseriesLibraryTestCase(test.TestCase):
def setUp(self):
super(NetAppEseriesLibraryTestCase, self).setUp()
kwargs = {'configuration':
eseries_fake.create_configuration_eseries()}
self.library = library.NetAppESeriesLibrary('FAKE', **kwargs)
# Deprecated Option
self.library.configuration.netapp_storage_pools = None
self.library._client = eseries_fake.FakeEseriesClient()
with mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall',
new = cinder_utils.ZeroIntervalLoopingCall):
self.library.check_for_setup_error()
def test_do_setup(self):
self.mock_object(self.library,
'_check_mode_get_or_register_storage_system')
self.mock_object(es_client, 'RestClient',
eseries_fake.FakeEseriesClient)
mock_check_flags = self.mock_object(na_utils, 'check_flags')
self.library.do_setup(mock.Mock())
self.assertTrue(mock_check_flags.called)
@ddt.data(('optimal', True), ('offline', False), ('needsAttn', True),
('neverContacted', False), ('newKey', True), (None, True))
@ddt.unpack
@mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', new=
cinder_utils.ZeroIntervalLoopingCall)
def test_check_storage_system_status(self, status, status_valid):
system = copy.deepcopy(eseries_fake.STORAGE_SYSTEM)
system['status'] = status
status = status.lower() if status is not None else ''
actual_status, actual_valid = (
self.library._check_storage_system_status(system))
self.assertEqual(status, actual_status)
self.assertEqual(status_valid, actual_valid)
@ddt.data(('valid', True), ('invalid', False), ('unknown', False),
('newKey', True), (None, True))
@ddt.unpack
@mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', new=
cinder_utils.ZeroIntervalLoopingCall)
def test_check_password_status(self, status, status_valid):
system = copy.deepcopy(eseries_fake.STORAGE_SYSTEM)
system['passwordStatus'] = status
status = status.lower() if status is not None else ''
actual_status, actual_valid = (
self.library._check_password_status(system))
self.assertEqual(status, actual_status)
self.assertEqual(status_valid, actual_valid)
def test_check_storage_system_bad_system(self):
exc_str = "bad_system"
controller_ips = self.library.configuration.netapp_controller_ips
self.library._client.list_storage_system = mock.Mock(
side_effect=exception.NetAppDriverException(message=exc_str))
info_log = self.mock_object(library.LOG, 'info', mock.Mock())
self.assertRaisesRegexp(exception.NetAppDriverException, exc_str,
self.library._check_storage_system)
info_log.assert_called_once_with(mock.ANY, controller_ips)
@mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', new=
cinder_utils.ZeroIntervalLoopingCall)
def test_check_storage_system(self):
system = copy.deepcopy(eseries_fake.STORAGE_SYSTEM)
self.mock_object(self.library._client, 'list_storage_system',
new_attr=mock.Mock(return_value=system))
update_password = self.mock_object(self.library._client,
'update_stored_system_password')
info_log = self.mock_object(library.LOG, 'info', mock.Mock())
self.library._check_storage_system()
self.assertTrue(update_password.called)
self.assertTrue(info_log.called)
@ddt.data({'status': 'optimal', 'passwordStatus': 'invalid'},
{'status': 'offline', 'passwordStatus': 'valid'})
@mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', new=
cinder_utils.ZeroIntervalLoopingCall)
def test_check_storage_system_bad_status(self, system):
self.mock_object(self.library._client, 'list_storage_system',
new_attr=mock.Mock(return_value=system))
self.mock_object(self.library._client, 'update_stored_system_password')
self.mock_object(time, 'time', new_attr = mock.Mock(
side_effect=xrange(0, 60, 5)))
self.assertRaisesRegexp(exception.NetAppDriverException,
'bad.*?status',
self.library._check_storage_system)
@mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', new=
cinder_utils.ZeroIntervalLoopingCall)
def test_check_storage_system_update_password(self):
self.library.configuration.netapp_sa_password = 'password'
def get_system_iter():
key = 'passwordStatus'
system = copy.deepcopy(eseries_fake.STORAGE_SYSTEM)
system[key] = 'invalid'
yield system
yield system
system[key] = 'valid'
yield system
self.mock_object(self.library._client, 'list_storage_system',
new_attr=mock.Mock(side_effect=get_system_iter()))
update_password = self.mock_object(self.library._client,
'update_stored_system_password',
new_attr=mock.Mock())
info_log = self.mock_object(library.LOG, 'info', mock.Mock())
self.library._check_storage_system()
update_password.assert_called_once_with(
self.library.configuration.netapp_sa_password)
self.assertTrue(info_log.called)
def test_get_storage_pools_empty_result(self):
"""Verify an exception is raised if no pools are returned."""
self.library.configuration.netapp_pool_name_search_pattern = '$'
def test_get_storage_pools_invalid_conf(self):
"""Verify an exception is raised if the regex pattern is invalid."""
self.library.configuration.netapp_pool_name_search_pattern = '(.*'
self.assertRaises(exception.InvalidConfigurationValue,
self.library._get_storage_pools)
def test_get_storage_pools_default(self):
"""Verify that all pools are returned if the search option is empty."""
filtered_pools = self.library._get_storage_pools()
self.assertEqual(eseries_fake.STORAGE_POOLS, filtered_pools)
@ddt.data(('[\d]+,a', ['1', '2', 'a', 'b'], ['1', '2', 'a']),
('1 , 3', ['1', '2', '3'], ['1', '3']),
('$,3', ['1', '2', '3'], ['3']),
('[a-zA-Z]+', ['1', 'a', 'B'], ['a', 'B']),
('', ['1', '2'], ['1', '2'])
)
@ddt.unpack
def test_get_storage_pools(self, pool_filter, pool_labels,
expected_pool_labels):
"""Verify that pool filtering via the search_pattern works correctly
:param pool_filter: A regular expression to be used for filtering via
pool labels
:param pool_labels: A list of pool labels
:param expected_pool_labels: The labels from 'pool_labels' that
should be matched by 'pool_filter'
"""
self.library.configuration.netapp_pool_name_search_pattern = (
pool_filter)
pools = [{'label': label} for label in pool_labels]
self.library._client.list_storage_pools = mock.Mock(
return_value=pools)
filtered_pools = self.library._get_storage_pools()
filtered_pool_labels = [pool['label'] for pool in filtered_pools]
self.assertEqual(expected_pool_labels, filtered_pool_labels)
def test_get_volume(self):
fake_volume = copy.deepcopy(get_fake_volume())
volume = copy.deepcopy(eseries_fake.VOLUME)
self.library._client.list_volume = mock.Mock(return_value=volume)
result = self.library._get_volume(fake_volume['id'])
self.assertEqual(1, self.library._client.list_volume.call_count)
self.assertDictMatch(volume, result)
def test_get_volume_bad_input(self):
volume = copy.deepcopy(eseries_fake.VOLUME)
self.library._client.list_volume = mock.Mock(return_value=volume)
self.assertRaises(exception.InvalidInput, self.library._get_volume,
None)
def test_get_volume_bad_uuid(self):
volume = copy.deepcopy(eseries_fake.VOLUME)
self.library._client.list_volume = mock.Mock(return_value=volume)
self.assertRaises(ValueError, self.library._get_volume, '1')
def test_update_ssc_info_no_ssc(self):
drives = [{'currentVolumeGroupRef': 'test_vg1',
'driveMediaType': 'ssd'}]
pools = [{'volumeGroupRef': 'test_vg1', 'label': 'test_vg1',
'raidLevel': 'raid6', 'securityType': 'enabled'}]
self.library._client = mock.Mock()
self.library._client.features.SSC_API_V2 = na_utils.FeatureState(
False, minimum_version="1.53.9000.1")
self.library._client.SSC_VALID_VERSIONS = [(1, 53, 9000, 1),
(1, 53, 9010, 15)]
self.library.configuration.netapp_pool_name_search_pattern = "test_vg1"
self.library._client.list_storage_pools = mock.Mock(return_value=pools)
self.library._client.list_drives = mock.Mock(return_value=drives)
self.library._update_ssc_info()
self.assertEqual(
{'test_vg1': {'netapp_disk_encryption': 'true',
'netapp_disk_type': 'SSD',
'netapp_raid_type': 'raid6'}},
self.library._ssc_stats)
@ddt.data(True, False)
def test_update_ssc_info(self, data_assurance_supported):
self.library._client = mock.Mock()
self.library._client.features.SSC_API_V2 = na_utils.FeatureState(
True, minimum_version="1.53.9000.1")
self.library._client.list_ssc_storage_pools = mock.Mock(
return_value=eseries_fake.SSC_POOLS)
self.library._get_storage_pools = mock.Mock(
return_value=eseries_fake.STORAGE_POOLS)
# Data Assurance is not supported on some storage backends
self.library._is_data_assurance_supported = mock.Mock(
return_value=data_assurance_supported)
self.library._update_ssc_info()
for pool in eseries_fake.SSC_POOLS:
poolId = pool['poolId']
raid_lvl = self.library.SSC_RAID_TYPE_MAPPING.get(
pool['raidLevel'], 'unknown')
if pool['pool']["driveMediaType"] == 'ssd':
disk_type = 'SSD'
else:
disk_type = pool['pool']['drivePhysicalType']
disk_type = (
self.library.SSC_DISK_TYPE_MAPPING.get(
disk_type, 'unknown'))
da_enabled = pool['dataAssuranceCapable'] and (
data_assurance_supported)
thin_provisioned = pool['thinProvisioningCapable']
expected = {
'netapp_disk_encryption':
six.text_type(pool['encrypted']).lower(),
'netapp_eseries_flash_read_cache':
six.text_type(pool['flashCacheCapable']).lower(),
'netapp_thin_provisioned':
six.text_type(thin_provisioned).lower(),
'netapp_eseries_data_assurance':
six.text_type(da_enabled).lower(),
'netapp_eseries_disk_spindle_speed': pool['spindleSpeed'],
'netapp_raid_type': raid_lvl,
'netapp_disk_type': disk_type
}
actual = self.library._ssc_stats[poolId]
self.assertDictMatch(expected, actual)
@ddt.data(('FC', True), ('iSCSI', False))
@ddt.unpack
def test_is_data_assurance_supported(self, backend_storage_protocol,
enabled):
self.mock_object(self.library, 'driver_protocol',
backend_storage_protocol)
actual = self.library._is_data_assurance_supported()
self.assertEqual(enabled, actual)
@ddt.data('scsi', 'fibre', 'sas', 'sata', 'garbage')
def test_update_ssc_disk_types(self, disk_type):
drives = [{'currentVolumeGroupRef': 'test_vg1',
'interfaceType': {'driveType': disk_type}}]
pools = [{'volumeGroupRef': 'test_vg1'}]
self.library._client.list_drives = mock.Mock(return_value=drives)
self.library._client.get_storage_pool = mock.Mock(return_value=pools)
ssc_stats = self.library._update_ssc_disk_types(pools)
expected = self.library.SSC_DISK_TYPE_MAPPING.get(disk_type, 'unknown')
self.assertEqual({'test_vg1': {'netapp_disk_type': expected}},
ssc_stats)
@ddt.data('scsi', 'fibre', 'sas', 'sata', 'garbage')
def test_update_ssc_disk_types_ssd(self, disk_type):
drives = [{'currentVolumeGroupRef': 'test_vg1',
'driveMediaType': 'ssd', 'driveType': disk_type}]
pools = [{'volumeGroupRef': 'test_vg1'}]
self.library._client.list_drives = mock.Mock(return_value=drives)
self.library._client.get_storage_pool = mock.Mock(return_value=pools)
ssc_stats = self.library._update_ssc_disk_types(pools)
self.assertEqual({'test_vg1': {'netapp_disk_type': 'SSD'}},
ssc_stats)
@ddt.data('enabled', 'none', 'capable', 'unknown', '__UNDEFINED',
'garbage')
def test_update_ssc_disk_encryption(self, securityType):
pools = [{'volumeGroupRef': 'test_vg1', 'securityType': securityType}]
self.library._client.list_storage_pools = mock.Mock(return_value=pools)
ssc_stats = self.library._update_ssc_disk_encryption(pools)
# Convert the boolean value to a lower-case string value
expected = 'true' if securityType == "enabled" else 'false'
self.assertEqual({'test_vg1': {'netapp_disk_encryption': expected}},
ssc_stats)
def test_update_ssc_disk_encryption_multiple(self):
pools = [{'volumeGroupRef': 'test_vg1', 'securityType': 'none'},
{'volumeGroupRef': 'test_vg2', 'securityType': 'enabled'}]
self.library._client.list_storage_pools = mock.Mock(return_value=pools)
ssc_stats = self.library._update_ssc_disk_encryption(pools)
self.assertEqual({'test_vg1': {'netapp_disk_encryption': 'false'},
'test_vg2': {'netapp_disk_encryption': 'true'}},
ssc_stats)
@ddt.data(True, False)
def test_get_volume_stats(self, refresh):
fake_stats = {'key': 'val'}
def populate_stats():
self.library._stats = fake_stats
self.library._update_volume_stats = mock.Mock(
side_effect=populate_stats)
self.library._update_ssc_info = mock.Mock()
self.library._ssc_stats = {self.library.THIN_UQ_SPEC: True}
actual = self.library.get_volume_stats(refresh = refresh)
if(refresh):
self.library._update_volume_stats.assert_called_once_with()
self.assertEqual(fake_stats, actual)
else:
self.assertEqual(0, self.library._update_volume_stats.call_count)
self.assertEqual(0, self.library._update_ssc_info.call_count)
def test_get_volume_stats_no_ssc(self):
"""Validate that SSC data is collected if not yet populated"""
fake_stats = {'key': 'val'}
def populate_stats():
self.library._stats = fake_stats
self.library._update_volume_stats = mock.Mock(
side_effect=populate_stats)
self.library._update_ssc_info = mock.Mock()
self.library._ssc_stats = None
actual = self.library.get_volume_stats(refresh = True)
self.library._update_volume_stats.assert_called_once_with()
self.library._update_ssc_info.assert_called_once_with()
self.assertEqual(fake_stats, actual)
def test_update_volume_stats_provisioning(self):
"""Validate pool capacity calculations"""
fake_pool = copy.deepcopy(eseries_fake.STORAGE_POOL)
self.library._get_storage_pools = mock.Mock(return_value=[fake_pool])
self.mock_object(self.library, '_ssc_stats', new_attr={fake_pool[
"volumeGroupRef"]: {self.library.THIN_UQ_SPEC: True}})
self.library.configuration = mock.Mock()
reserved_pct = 5
over_subscription_ratio = 1.0
self.library.configuration.max_over_subscription_ratio = (
over_subscription_ratio)
self.library.configuration.reserved_percentage = reserved_pct
total_gb = int(fake_pool['totalRaidedSpace']) / units.Gi
used_gb = int(fake_pool['usedSpace']) / units.Gi
free_gb = total_gb - used_gb
self.library._update_volume_stats()
self.assertEqual(1, len(self.library._stats['pools']))
pool_stats = self.library._stats['pools'][0]
self.assertEqual(fake_pool['label'], pool_stats.get('pool_name'))
self.assertEqual(reserved_pct, pool_stats['reserved_percentage'])
self.assertEqual(over_subscription_ratio,
pool_stats['max_oversubscription_ratio'])
self.assertEqual(total_gb, pool_stats.get('total_capacity_gb'))
self.assertEqual(used_gb, pool_stats.get('provisioned_capacity_gb'))
self.assertEqual(free_gb, pool_stats.get('free_capacity_gb'))
@ddt.data(False, True)
def test_update_volume_stats_thin_provisioning(self, thin_provisioning):
"""Validate that thin provisioning support is correctly reported"""
fake_pool = copy.deepcopy(eseries_fake.STORAGE_POOL)
self.library._get_storage_pools = mock.Mock(return_value=[fake_pool])
self.mock_object(self.library, '_ssc_stats', new_attr={fake_pool[
"volumeGroupRef"]: {self.library.THIN_UQ_SPEC: thin_provisioning}})
self.library._update_volume_stats()
self.assertEqual(1, len(self.library._stats['pools']))
pool_stats = self.library._stats['pools'][0]
self.assertEqual(thin_provisioning, pool_stats.get(
'thin_provisioning_support'))
# Should always be True
self.assertTrue(pool_stats.get('thick_provisioning_support'))
def test_update_volume_stats_ssc(self):
"""Ensure that the SSC data is correctly reported in the pool stats"""
ssc = {self.library.THIN_UQ_SPEC: True, 'key': 'val'}
fake_pool = copy.deepcopy(eseries_fake.STORAGE_POOL)
self.library._get_storage_pools = mock.Mock(return_value=[fake_pool])
self.mock_object(self.library, '_ssc_stats', new_attr={fake_pool[
"volumeGroupRef"]: ssc})
self.library._update_volume_stats()
self.assertEqual(1, len(self.library._stats['pools']))
pool_stats = self.library._stats['pools'][0]
for key in ssc:
self.assertIn(key, pool_stats)
self.assertEqual(ssc[key], pool_stats[key])
def test_update_volume_stats_no_ssc(self):
"""Ensure that that pool stats are correctly reported without SSC"""
fake_pool = copy.deepcopy(eseries_fake.STORAGE_POOL)
self.library._get_storage_pools = mock.Mock(return_value=[fake_pool])
self.library._update_volume_stats()
self.assertEqual(1, len(self.library._stats['pools']))
pool_stats = self.library._stats['pools'][0]
self.assertFalse(pool_stats.get('thin_provisioning_support'))
# Should always be True
self.assertTrue(pool_stats.get('thick_provisioning_support'))
def test_terminate_connection_iscsi_no_hosts(self):
connector = {'initiator': eseries_fake.INITIATOR_NAME}
self.mock_object(self.library._client, 'list_hosts',
mock.Mock(return_value=[]))
self.assertRaises(exception.NotFound,
self.library.terminate_connection_iscsi,
get_fake_volume(),
connector)
def test_terminate_connection_iscsi_volume_not_mapped(self):
connector = {'initiator': eseries_fake.INITIATOR_NAME}
volume = copy.deepcopy(eseries_fake.VOLUME)
volume['listOfMappings'] = []
self.library._get_volume = mock.Mock(return_value=volume)
self.assertRaises(eseries_exc.VolumeNotMapped,
self.library.terminate_connection_iscsi,
get_fake_volume(),
connector)
def test_terminate_connection_iscsi_volume_mapped(self):
connector = {'initiator': eseries_fake.INITIATOR_NAME}
fake_eseries_volume = copy.deepcopy(eseries_fake.VOLUME)
fake_eseries_volume['listOfMappings'] = [
eseries_fake.VOLUME_MAPPING
]
self.mock_object(self.library._client, 'list_volume',
mock.Mock(return_value=fake_eseries_volume))
self.mock_object(host_mapper, 'unmap_volume_from_host')
self.library.terminate_connection_iscsi(get_fake_volume(), connector)
self.assertTrue(host_mapper.unmap_volume_from_host.called)
def test_terminate_connection_iscsi_not_mapped_initiator_does_not_exist(
self):
connector = {'initiator': eseries_fake.INITIATOR_NAME}
self.mock_object(self.library._client, 'list_hosts',
mock.Mock(return_value=[eseries_fake.HOST_2]))
self.assertRaises(exception.NotFound,
self.library.terminate_connection_iscsi,
get_fake_volume(),
connector)
def test_initialize_connection_iscsi_volume_not_mapped(self):
connector = {'initiator': eseries_fake.INITIATOR_NAME}
self.mock_object(self.library._client,
'get_volume_mappings_for_volume',
mock.Mock(return_value=[]))
self.mock_object(host_mapper, 'map_volume_to_single_host',
mock.Mock(
return_value=eseries_fake.VOLUME_MAPPING))
fake_eseries_volume = copy.deepcopy(eseries_fake.VOLUME)
fake_eseries_volume['listOfMappings'] = [
eseries_fake.VOLUME_MAPPING
]
self.mock_object(self.library._client, 'list_volume',
mock.Mock(return_value=fake_eseries_volume))
self.library.initialize_connection_iscsi(get_fake_volume(), connector)
self.assertTrue(
self.library._client.get_volume_mappings_for_volume.called)
self.assertTrue(host_mapper.map_volume_to_single_host.called)
def test_initialize_connection_iscsi_volume_not_mapped_host_does_not_exist(
self):
connector = {'initiator': eseries_fake.INITIATOR_NAME}
self.mock_object(self.library._client,
'get_volume_mappings_for_volume',
mock.Mock(return_value=[]))
self.mock_object(self.library._client, 'list_hosts',
mock.Mock(return_value=[]))
self.mock_object(self.library._client, 'create_host_with_ports',
mock.Mock(return_value=eseries_fake.HOST))
self.mock_object(host_mapper, 'map_volume_to_single_host',
mock.Mock(
return_value=eseries_fake.VOLUME_MAPPING))
fake_eseries_volume = copy.deepcopy(eseries_fake.VOLUME)
fake_eseries_volume['listOfMappings'] = [
eseries_fake.VOLUME_MAPPING
]
self.mock_object(self.library._client, 'list_volume',
mock.Mock(return_value=fake_eseries_volume))
self.library.initialize_connection_iscsi(get_fake_volume(), connector)
self.assertTrue(
self.library._client.get_volume_mappings_for_volume.called)
self.assertTrue(self.library._client.list_hosts.called)
self.assertTrue(self.library._client.create_host_with_ports.called)
self.assertTrue(host_mapper.map_volume_to_single_host.called)
def test_initialize_connection_iscsi_volume_already_mapped_to_target_host(
self):
"""Should be a no-op"""
connector = {'initiator': eseries_fake.INITIATOR_NAME}
self.mock_object(host_mapper, 'map_volume_to_single_host',
mock.Mock(
return_value=eseries_fake.VOLUME_MAPPING))
fake_eseries_volume = copy.deepcopy(eseries_fake.VOLUME)
self.mock_object(self.library._client, 'list_volume',
mock.Mock(return_value=fake_eseries_volume))
self.library.initialize_connection_iscsi(get_fake_volume(), connector)
self.assertTrue(host_mapper.map_volume_to_single_host.called)
def test_initialize_connection_iscsi_volume_mapped_to_another_host(self):
"""Should raise error saying multiattach not enabled"""
connector = {'initiator': eseries_fake.INITIATOR_NAME}
fake_mapping_to_other_host = copy.deepcopy(
eseries_fake.VOLUME_MAPPING)
fake_mapping_to_other_host['mapRef'] = eseries_fake.HOST_2[
'hostRef']
self.mock_object(host_mapper, 'map_volume_to_single_host',
mock.Mock(
side_effect=exception.NetAppDriverException))
self.assertRaises(exception.NetAppDriverException,
self.library.initialize_connection_iscsi,
get_fake_volume(), connector)
self.assertTrue(host_mapper.map_volume_to_single_host.called)
@ddt.data(eseries_fake.WWPN,
fczm_utils.get_formatted_wwn(eseries_fake.WWPN))
def test_get_host_with_matching_port_wwpn(self, port_id):
port_ids = [port_id]
host = copy.deepcopy(eseries_fake.HOST)
host.update(
{
'hostSidePorts': [{'label': 'NewStore', 'type': 'fc',
'address': eseries_fake.WWPN}]
}
)
host_2 = copy.deepcopy(eseries_fake.HOST_2)
host_2.update(
{
'hostSidePorts': [{'label': 'NewStore', 'type': 'fc',
'address': eseries_fake.WWPN_2}]
}
)
host_list = [host, host_2]
self.mock_object(self.library._client,
'list_hosts',
mock.Mock(return_value=host_list))
actual_host = self.library._get_host_with_matching_port(
port_ids)
self.assertEqual(host, actual_host)
def test_get_host_with_matching_port_iqn(self):
port_ids = [eseries_fake.INITIATOR_NAME]
host = copy.deepcopy(eseries_fake.HOST)
host.update(
{
'hostSidePorts': [{'label': 'NewStore', 'type': 'iscsi',
'address': eseries_fake.INITIATOR_NAME}]
}
)
host_2 = copy.deepcopy(eseries_fake.HOST_2)
host_2.update(
{
'hostSidePorts': [{'label': 'NewStore', 'type': 'iscsi',
'address': eseries_fake.INITIATOR_NAME_2}]
}
)
host_list = [host, host_2]
self.mock_object(self.library._client,
'list_hosts',
mock.Mock(return_value=host_list))
actual_host = self.library._get_host_with_matching_port(
port_ids)
self.assertEqual(host, actual_host)
def test_terminate_connection_fc_no_hosts(self):
connector = {'wwpns': [eseries_fake.WWPN]}
self.mock_object(self.library._client, 'list_hosts',
mock.Mock(return_value=[]))
self.assertRaises(exception.NotFound,
self.library.terminate_connection_fc,
get_fake_volume(),
connector)
def test_terminate_connection_fc_volume_not_mapped(self):
connector = {'wwpns': [eseries_fake.WWPN]}
fake_host = copy.deepcopy(eseries_fake.HOST)
fake_host['hostSidePorts'] = [{
'label': 'NewStore',
'type': 'fc',
'address': eseries_fake.WWPN
}]
volume = copy.deepcopy(eseries_fake.VOLUME)
volume['listOfMappings'] = []
self.mock_object(self.library, '_get_volume',
mock.Mock(return_value=volume))
self.mock_object(self.library._client, 'list_hosts',
mock.Mock(return_value=[fake_host]))
self.assertRaises(eseries_exc.VolumeNotMapped,
self.library.terminate_connection_fc,
get_fake_volume(),
connector)
def test_terminate_connection_fc_volume_mapped(self):
connector = {'wwpns': [eseries_fake.WWPN]}
fake_host = copy.deepcopy(eseries_fake.HOST)
fake_host['hostSidePorts'] = [{
'label': 'NewStore',
'type': 'fc',
'address': eseries_fake.WWPN
}]
fake_eseries_volume = copy.deepcopy(eseries_fake.VOLUME)
fake_eseries_volume['listOfMappings'] = [
copy.deepcopy(eseries_fake.VOLUME_MAPPING)
]
self.mock_object(self.library._client, 'list_hosts',
mock.Mock(return_value=[fake_host]))
self.mock_object(self.library._client, 'list_volume',
mock.Mock(return_value=fake_eseries_volume))
self.mock_object(host_mapper, 'unmap_volume_from_host')
self.library.terminate_connection_fc(get_fake_volume(), connector)
self.assertTrue(host_mapper.unmap_volume_from_host.called)
def test_terminate_connection_fc_volume_mapped_no_cleanup_zone(self):
connector = {'wwpns': [eseries_fake.WWPN]}
fake_host = copy.deepcopy(eseries_fake.HOST)
fake_host['hostSidePorts'] = [{
'label': 'NewStore',
'type': 'fc',
'address': eseries_fake.WWPN
}]
expected_target_info = {
'driver_volume_type': 'fibre_channel',
'data': {},
}
fake_eseries_volume = copy.deepcopy(eseries_fake.VOLUME)
fake_eseries_volume['listOfMappings'] = [
copy.deepcopy(eseries_fake.VOLUME_MAPPING)
]
self.mock_object(self.library._client, 'list_hosts',
mock.Mock(return_value=[fake_host]))
self.mock_object(self.library._client, 'list_volume',
mock.Mock(return_value=fake_eseries_volume))
self.mock_object(host_mapper, 'unmap_volume_from_host')
self.mock_object(self.library._client, 'get_volume_mappings_for_host',
mock.Mock(return_value=[copy.deepcopy
(eseries_fake.
VOLUME_MAPPING)]))
target_info = self.library.terminate_connection_fc(get_fake_volume(),
connector)
self.assertDictEqual(expected_target_info, target_info)
self.assertTrue(host_mapper.unmap_volume_from_host.called)
def test_terminate_connection_fc_volume_mapped_cleanup_zone(self):
connector = {'wwpns': [eseries_fake.WWPN]}
fake_host = copy.deepcopy(eseries_fake.HOST)
fake_host['hostSidePorts'] = [{
'label': 'NewStore',
'type': 'fc',
'address': eseries_fake.WWPN
}]
expected_target_info = {
'driver_volume_type': 'fibre_channel',
'data': {
'target_wwn': [eseries_fake.WWPN_2],
'initiator_target_map': {
eseries_fake.WWPN: [eseries_fake.WWPN_2]
},
},
}
fake_eseries_volume = copy.deepcopy(eseries_fake.VOLUME)
fake_eseries_volume['listOfMappings'] = [
copy.deepcopy(eseries_fake.VOLUME_MAPPING)
]
self.mock_object(self.library._client, 'list_hosts',
mock.Mock(return_value=[fake_host]))
self.mock_object(self.library._client, 'list_volume',
mock.Mock(return_value=fake_eseries_volume))
self.mock_object(host_mapper, 'unmap_volume_from_host')
self.mock_object(self.library._client, 'get_volume_mappings_for_host',
mock.Mock(return_value=[]))
target_info = self.library.terminate_connection_fc(get_fake_volume(),
connector)
self.assertDictEqual(expected_target_info, target_info)
self.assertTrue(host_mapper.unmap_volume_from_host.called)
def test_terminate_connection_fc_not_mapped_host_with_wwpn_does_not_exist(
self):
connector = {'wwpns': [eseries_fake.WWPN]}
self.mock_object(self.library._client, 'list_hosts',
mock.Mock(return_value=[eseries_fake.HOST_2]))
self.assertRaises(exception.NotFound,
self.library.terminate_connection_fc,
get_fake_volume(),
connector)
def test_initialize_connection_fc_volume_not_mapped(self):
connector = {'wwpns': [eseries_fake.WWPN]}
self.mock_object(self.library._client,
'get_volume_mappings_for_volume',
mock.Mock(return_value=[]))
self.mock_object(host_mapper, 'map_volume_to_single_host',
mock.Mock(
return_value=eseries_fake.VOLUME_MAPPING))
expected_target_info = {
'driver_volume_type': 'fibre_channel',
'data': {
'target_discovered': True,
'target_lun': 0,
'target_wwn': [eseries_fake.WWPN_2],
'access_mode': 'rw',
'initiator_target_map': {
eseries_fake.WWPN: [eseries_fake.WWPN_2]
},
},
}
target_info = self.library.initialize_connection_fc(get_fake_volume(),
connector)
self.assertTrue(
self.library._client.get_volume_mappings_for_volume.called)
self.assertTrue(host_mapper.map_volume_to_single_host.called)
self.assertDictEqual(expected_target_info, target_info)
def test_initialize_connection_fc_volume_not_mapped_host_does_not_exist(
self):
connector = {'wwpns': [eseries_fake.WWPN]}
self.library.driver_protocol = 'FC'
self.mock_object(self.library._client,
'get_volume_mappings_for_volume',
mock.Mock(return_value=[]))
self.mock_object(self.library._client, 'list_hosts',
mock.Mock(return_value=[]))
self.mock_object(self.library._client, 'create_host_with_ports',
mock.Mock(return_value=eseries_fake.HOST))
self.mock_object(host_mapper, 'map_volume_to_single_host',
mock.Mock(
return_value=eseries_fake.VOLUME_MAPPING))
self.library.initialize_connection_fc(get_fake_volume(), connector)
self.library._client.create_host_with_ports.assert_called_once_with(
mock.ANY, mock.ANY,
[fczm_utils.get_formatted_wwn(eseries_fake.WWPN)],
port_type='fc', group_id=None
)
def test_initialize_connection_fc_volume_already_mapped_to_target_host(
self):
"""Should be a no-op"""
connector = {'wwpns': [eseries_fake.WWPN]}
self.mock_object(host_mapper, 'map_volume_to_single_host',
mock.Mock(
return_value=eseries_fake.VOLUME_MAPPING))
self.library.initialize_connection_fc(get_fake_volume(), connector)
self.assertTrue(host_mapper.map_volume_to_single_host.called)
def test_initialize_connection_fc_volume_mapped_to_another_host(self):
"""Should raise error saying multiattach not enabled"""
connector = {'wwpns': [eseries_fake.WWPN]}
fake_mapping_to_other_host = copy.deepcopy(
eseries_fake.VOLUME_MAPPING)
fake_mapping_to_other_host['mapRef'] = eseries_fake.HOST_2[
'hostRef']
self.mock_object(host_mapper, 'map_volume_to_single_host',
mock.Mock(
side_effect=exception.NetAppDriverException))
self.assertRaises(exception.NetAppDriverException,
self.library.initialize_connection_fc,
get_fake_volume(), connector)
self.assertTrue(host_mapper.map_volume_to_single_host.called)
def test_initialize_connection_fc_no_target_wwpns(self):
"""Should be a no-op"""
connector = {'wwpns': [eseries_fake.WWPN]}
self.mock_object(host_mapper, 'map_volume_to_single_host',
mock.Mock(
return_value=eseries_fake.VOLUME_MAPPING))
self.mock_object(self.library._client, 'list_target_wwpns',
mock.Mock(return_value=[]))
self.assertRaises(exception.VolumeBackendAPIException,
self.library.initialize_connection_fc,
get_fake_volume(), connector)
self.assertTrue(host_mapper.map_volume_to_single_host.called)
def test_build_initiator_target_map_fc_with_lookup_service(
self):
connector = {'wwpns': [eseries_fake.WWPN, eseries_fake.WWPN_2]}
self.library.lookup_service = mock.Mock()
self.library.lookup_service.get_device_mapping_from_network = (
mock.Mock(return_value=eseries_fake.FC_FABRIC_MAP))
(target_wwpns, initiator_target_map, num_paths) = (
self.library._build_initiator_target_map_fc(connector))
self.assertSetEqual(set(eseries_fake.FC_TARGET_WWPNS),
set(target_wwpns))
self.assertDictEqual(eseries_fake.FC_I_T_MAP, initiator_target_map)
self.assertEqual(4, num_paths)
@ddt.data(('raid0', 'raid0'), ('raid1', 'raid1'), ('raid3', 'raid5'),
('raid5', 'raid5'), ('raid6', 'raid6'), ('raidDiskPool', 'DDP'))
@ddt.unpack
def test_update_ssc_raid_type(self, raid_lvl, raid_lvl_mapping):
pools = [{'volumeGroupRef': 'test_vg1', 'raidLevel': raid_lvl}]
self.library._client.list_storage_pools = mock.Mock(return_value=pools)
ssc_stats = self.library._update_ssc_raid_type(pools)
self.assertEqual({'test_vg1': {'netapp_raid_type': raid_lvl_mapping}},
ssc_stats)
@ddt.data('raidAll', '__UNDEFINED', 'unknown',
'raidUnsupported', 'garbage')
def test_update_ssc_raid_type_invalid(self, raid_lvl):
pools = [{'volumeGroupRef': 'test_vg1', 'raidLevel': raid_lvl}]
self.library._client.list_storage_pools = mock.Mock(return_value=pools)
ssc_stats = self.library._update_ssc_raid_type(pools)
self.assertEqual({'test_vg1': {'netapp_raid_type': 'unknown'}},
ssc_stats)
def test_create_asup(self):
self.library._client = mock.Mock()
self.library._client.features.AUTOSUPPORT = na_utils.FeatureState()
self.library._client.api_operating_mode = (
eseries_fake.FAKE_ASUP_DATA['operating-mode'])
self.library._app_version = eseries_fake.FAKE_APP_VERSION
self.mock_object(
self.library._client, 'get_firmware_version',
mock.Mock(return_value=(
eseries_fake.FAKE_ASUP_DATA['system-version'])))
self.mock_object(
self.library._client, 'get_serial_numbers',
mock.Mock(return_value=eseries_fake.FAKE_SERIAL_NUMBERS))
self.mock_object(
self.library._client, 'get_model_name',
mock.Mock(
return_value=eseries_fake.FAKE_CONTROLLERS[0]['modelName']))
self.mock_object(
self.library._client, 'set_counter',
mock.Mock(return_value={'value': 1}))
mock_invoke = self.mock_object(
self.library._client, 'add_autosupport_data')
self.library._create_asup(eseries_fake.FAKE_CINDER_HOST)
mock_invoke.assert_called_with(eseries_fake.FAKE_KEY,
eseries_fake.FAKE_ASUP_DATA)
def test_create_asup_not_supported(self):
self.library._client = mock.Mock()
self.library._client.features.AUTOSUPPORT = na_utils.FeatureState(
supported=False)
mock_invoke = self.mock_object(
self.library._client, 'add_autosupport_data')
self.library._create_asup(eseries_fake.FAKE_CINDER_HOST)
mock_invoke.assert_not_called()
@mock.patch.object(library, 'LOG', mock.Mock())
def test_create_volume_fail_clean(self):
"""Test volume creation fail w/o a partial volume being created.
Test the failed creation of a volume where a partial volume with
the name has not been created, thus no cleanup is required.
"""
self.library._get_volume = mock.Mock(
side_effect = exception.VolumeNotFound(message=''))
self.library._client.create_volume = mock.Mock(
side_effect = exception.NetAppDriverException)
self.library._client.delete_volume = mock.Mock()
fake_volume = copy.deepcopy(get_fake_volume())
self.assertRaises(exception.NetAppDriverException,
self.library.create_volume, fake_volume)
self.assertTrue(self.library._get_volume.called)
self.assertFalse(self.library._client.delete_volume.called)
self.assertEqual(1, library.LOG.error.call_count)
@mock.patch.object(library, 'LOG', mock.Mock())
def test_create_volume_fail_dirty(self):
"""Test volume creation fail where a partial volume has been created.
Test scenario where the creation of a volume fails and a partial
volume is created with the name/id that was supplied by to the
original creation call. In this situation the partial volume should
be detected and removed.
"""
fake_volume = copy.deepcopy(get_fake_volume())
self.library._get_volume = mock.Mock(return_value=fake_volume)
self.library._client.list_volume = mock.Mock(return_value=fake_volume)
self.library._client.create_volume = mock.Mock(
side_effect = exception.NetAppDriverException)
self.library._client.delete_volume = mock.Mock()
self.assertRaises(exception.NetAppDriverException,
self.library.create_volume, fake_volume)
self.assertTrue(self.library._get_volume.called)
self.assertTrue(self.library._client.delete_volume.called)
self.library._client.delete_volume.assert_called_once_with(
fake_volume["id"])
self.assertEqual(1, library.LOG.error.call_count)
@mock.patch.object(library, 'LOG', mock.Mock())
def test_create_volume_fail_dirty_fail_delete(self):
"""Volume creation fail with partial volume deletion fails
Test scenario where the creation of a volume fails and a partial
volume is created with the name/id that was supplied by to the
original creation call. The partial volume is detected but when
the cleanup deletetion of that fragment volume is attempted it fails.
"""
fake_volume = copy.deepcopy(get_fake_volume())
self.library._get_volume = mock.Mock(return_value=fake_volume)
self.library._client.list_volume = mock.Mock(return_value=fake_volume)
self.library._client.create_volume = mock.Mock(
side_effect = exception.NetAppDriverException)
self.library._client.delete_volume = mock.Mock(
side_effect = exception.NetAppDriverException)
self.assertRaises(exception.NetAppDriverException,
self.library.create_volume, fake_volume)
self.assertTrue(self.library._get_volume.called)
self.assertTrue(self.library._client.delete_volume.called)
self.library._client.delete_volume.assert_called_once_with(
fake_volume["id"])
self.assertEqual(2, library.LOG.error.call_count)
@ddt.ddt
class NetAppEseriesLibraryMultiAttachTestCase(test.TestCase):
"""Test driver when netapp_enable_multiattach is enabled.
Test driver behavior when the netapp_enable_multiattach configuration
option is True.
"""
def setUp(self):
super(NetAppEseriesLibraryMultiAttachTestCase, self).setUp()
config = eseries_fake.create_configuration_eseries()
config.netapp_enable_multiattach = True
kwargs = {'configuration': config}
self.library = library.NetAppESeriesLibrary("FAKE", **kwargs)
self.library._client = eseries_fake.FakeEseriesClient()
with mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall',
new = cinder_utils.ZeroIntervalLoopingCall):
self.library.check_for_setup_error()
def test_do_setup_host_group_already_exists(self):
mock_check_flags = self.mock_object(na_utils, 'check_flags')
self.mock_object(self.library,
'_check_mode_get_or_register_storage_system')
fake_rest_client = eseries_fake.FakeEseriesClient()
self.mock_object(self.library, '_create_rest_client',
mock.Mock(return_value=fake_rest_client))
mock_create = self.mock_object(fake_rest_client, 'create_host_group')
self.library.do_setup(mock.Mock())
self.assertTrue(mock_check_flags.called)
self.assertFalse(mock_create.call_count)
def test_do_setup_host_group_does_not_exist(self):
mock_check_flags = self.mock_object(na_utils, 'check_flags')
fake_rest_client = eseries_fake.FakeEseriesClient()
self.mock_object(self.library, '_create_rest_client',
mock.Mock(return_value=fake_rest_client))
mock_get_host_group = self.mock_object(
fake_rest_client, "get_host_group_by_name",
mock.Mock(side_effect=exception.NotFound))
self.mock_object(self.library,
'_check_mode_get_or_register_storage_system')
self.library.do_setup(mock.Mock())
self.assertTrue(mock_check_flags.called)
self.assertTrue(mock_get_host_group.call_count)
def test_create_volume(self):
self.library._client.create_volume = mock.Mock(
return_value=eseries_fake.VOLUME)
self.library.create_volume(get_fake_volume())
self.assertTrue(self.library._client.create_volume.call_count)
@ddt.data(('netapp_eseries_flash_read_cache', 'flash_cache', 'true'),
('netapp_eseries_flash_read_cache', 'flash_cache', 'false'),
('netapp_eseries_flash_read_cache', 'flash_cache', None),
('netapp_thin_provisioned', 'thin_provision', 'true'),
('netapp_thin_provisioned', 'thin_provision', 'false'),
('netapp_thin_provisioned', 'thin_provision', None),
('netapp_eseries_data_assurance', 'data_assurance', 'true'),
('netapp_eseries_data_assurance', 'data_assurance', 'false'),
('netapp_eseries_data_assurance', 'data_assurance', None),
('netapp:write_cache', 'write_cache', 'true'),
('netapp:write_cache', 'write_cache', 'false'),
('netapp:write_cache', 'write_cache', None),
('netapp:read_cache', 'read_cache', 'true'),
('netapp:read_cache', 'read_cache', 'false'),
('netapp:read_cache', 'read_cache', None),
('netapp_eseries_flash_read_cache', 'flash_cache', 'True'),
('netapp_eseries_flash_read_cache', 'flash_cache', '1'),
('netapp_eseries_data_assurance', 'data_assurance', ''))
@ddt.unpack
def test_create_volume_with_extra_spec(self, spec, key, value):
fake_volume = get_fake_volume()
extra_specs = {spec: value}
volume = copy.deepcopy(eseries_fake.VOLUME)
self.library._client.create_volume = mock.Mock(
return_value=volume)
# Make this utility method return our extra spec
mocked_spec_method = self.mock_object(na_utils,
'get_volume_extra_specs')
mocked_spec_method.return_value = extra_specs
self.library.create_volume(fake_volume)
self.assertEqual(1, self.library._client.create_volume.call_count)
# Ensure create_volume is called with the correct argument
args, kwargs = self.library._client.create_volume.call_args
self.assertIn(key, kwargs)
if(value is not None):
expected = na_utils.to_bool(value)
else:
expected = value
self.assertEqual(expected, kwargs[key])
def test_create_volume_too_many_volumes(self):
self.library._client.list_volumes = mock.Mock(
return_value=[eseries_fake.VOLUME for __ in
range(utils.MAX_LUNS_PER_HOST_GROUP + 1)])
self.library._client.create_volume = mock.Mock(
return_value=eseries_fake.VOLUME)
self.assertRaises(exception.NetAppDriverException,
self.library.create_volume,
get_fake_volume())
self.assertFalse(self.library._client.create_volume.call_count)
def test_create_volume_from_snapshot(self):
fake_eseries_volume = copy.deepcopy(eseries_fake.VOLUME)
self.mock_object(self.library, "_schedule_and_create_volume",
mock.Mock(return_value=fake_eseries_volume))
self.mock_object(self.library, "_create_snapshot_volume",
mock.Mock(return_value=fake_eseries_volume))
self.mock_object(self.library._client, "delete_snapshot_volume")
self.library.create_volume_from_snapshot(
get_fake_volume(), fake_snapshot.fake_snapshot_obj(None))
self.assertEqual(
1, self.library._schedule_and_create_volume.call_count)
self.assertEqual(1, self.library._create_snapshot_volume.call_count)
self.assertEqual(
1, self.library._client.delete_snapshot_volume.call_count)
def test_create_volume_from_snapshot_create_fails(self):
fake_dest_eseries_volume = copy.deepcopy(eseries_fake.VOLUME)
self.mock_object(self.library, "_schedule_and_create_volume",
mock.Mock(return_value=fake_dest_eseries_volume))
self.mock_object(self.library, "_create_snapshot_volume",
mock.Mock(side_effect=exception.NetAppDriverException)
)
self.mock_object(self.library._client, "delete_snapshot_volume")
self.mock_object(self.library._client, "delete_volume")
self.assertRaises(exception.NetAppDriverException,
self.library.create_volume_from_snapshot,
get_fake_volume(),
fake_snapshot.fake_snapshot_obj(None))
self.assertEqual(
1, self.library._schedule_and_create_volume.call_count)
self.assertEqual(1, self.library._create_snapshot_volume.call_count)
self.assertEqual(
0, self.library._client.delete_snapshot_volume.call_count)
# Ensure the volume we were going to copy to is cleaned up
self.library._client.delete_volume.assert_called_once_with(
fake_dest_eseries_volume['volumeRef'])
def test_create_volume_from_snapshot_copy_job_fails(self):
fake_dest_eseries_volume = copy.deepcopy(eseries_fake.VOLUME)
self.mock_object(self.library, "_schedule_and_create_volume",
mock.Mock(return_value=fake_dest_eseries_volume))
self.mock_object(self.library, "_create_snapshot_volume",
mock.Mock(return_value=fake_dest_eseries_volume))
self.mock_object(self.library._client, "delete_snapshot_volume")
self.mock_object(self.library._client, "delete_volume")
fake_failed_volume_copy_job = copy.deepcopy(
eseries_fake.VOLUME_COPY_JOB)
fake_failed_volume_copy_job['status'] = 'failed'
self.mock_object(self.library._client,
"create_volume_copy_job",
mock.Mock(return_value=fake_failed_volume_copy_job))
self.mock_object(self.library._client,
"list_vol_copy_job",
mock.Mock(return_value=fake_failed_volume_copy_job))
self.assertRaises(exception.NetAppDriverException,
self.library.create_volume_from_snapshot,
get_fake_volume(),
fake_snapshot.fake_snapshot_obj(None))
self.assertEqual(
1, self.library._schedule_and_create_volume.call_count)
self.assertEqual(1, self.library._create_snapshot_volume.call_count)
self.assertEqual(
1, self.library._client.delete_snapshot_volume.call_count)
# Ensure the volume we were going to copy to is cleaned up
self.library._client.delete_volume.assert_called_once_with(
fake_dest_eseries_volume['volumeRef'])
def test_create_volume_from_snapshot_fail_to_delete_snapshot_volume(self):
fake_dest_eseries_volume = copy.deepcopy(eseries_fake.VOLUME)
fake_dest_eseries_volume['volumeRef'] = 'fake_volume_ref'
self.mock_object(self.library, "_schedule_and_create_volume",
mock.Mock(return_value=fake_dest_eseries_volume))
self.mock_object(self.library, "_create_snapshot_volume",
mock.Mock(return_value=copy.deepcopy(
eseries_fake.VOLUME)))
self.mock_object(self.library._client, "delete_snapshot_volume",
mock.Mock(side_effect=exception.NetAppDriverException)
)
self.mock_object(self.library._client, "delete_volume")
self.library.create_volume_from_snapshot(
get_fake_volume(), fake_snapshot.fake_snapshot_obj(None))
self.assertEqual(
1, self.library._schedule_and_create_volume.call_count)
self.assertEqual(1, self.library._create_snapshot_volume.call_count)
self.assertEqual(
1, self.library._client.delete_snapshot_volume.call_count)
# Ensure the volume we created is not cleaned up
self.assertEqual(0, self.library._client.delete_volume.call_count)
@ddt.data(False, True)
def test_get_pool_operation_progress(self, expect_complete):
"""Validate the operation progress is interpreted correctly"""
pool = copy.deepcopy(eseries_fake.STORAGE_POOL)
if expect_complete:
pool_progress = []
else:
pool_progress = copy.deepcopy(
eseries_fake.FAKE_POOL_ACTION_PROGRESS)
expected_actions = set(action['currentAction'] for action in
pool_progress)
expected_eta = reduce(lambda x, y: x + y['estimatedTimeToCompletion'],
pool_progress, 0)
self.library._client.get_pool_operation_progress = mock.Mock(
return_value=pool_progress)
complete, actions, eta = self.library._get_pool_operation_progress(
pool['id'])
self.assertEqual(expect_complete, complete)
self.assertEqual(expected_actions, actions)
self.assertEqual(expected_eta, eta)
@ddt.data(False, True)
def test_get_pool_operation_progress_with_action(self, expect_complete):
"""Validate the operation progress is interpreted correctly"""
expected_action = 'fakeAction'
pool = copy.deepcopy(eseries_fake.STORAGE_POOL)
if expect_complete:
pool_progress = copy.deepcopy(
eseries_fake.FAKE_POOL_ACTION_PROGRESS)
for progress in pool_progress:
progress['currentAction'] = 'none'
else:
pool_progress = copy.deepcopy(
eseries_fake.FAKE_POOL_ACTION_PROGRESS)
pool_progress[0]['currentAction'] = expected_action
expected_actions = set(action['currentAction'] for action in
pool_progress)
expected_eta = reduce(lambda x, y: x + y['estimatedTimeToCompletion'],
pool_progress, 0)
self.library._client.get_pool_operation_progress = mock.Mock(
return_value=pool_progress)
complete, actions, eta = self.library._get_pool_operation_progress(
pool['id'], expected_action)
self.assertEqual(expect_complete, complete)
self.assertEqual(expected_actions, actions)
self.assertEqual(expected_eta, eta)
@mock.patch('eventlet.greenthread.sleep')
def test_extend_volume(self, _mock_sleep):
"""Test volume extend with a thick-provisioned volume"""
def get_copy_progress():
for eta in xrange(5, -1, -1):
action_status = 'none' if eta == 0 else 'remappingDve'
complete = action_status == 'none'
yield complete, action_status, eta
fake_volume = copy.deepcopy(get_fake_volume())
volume = copy.deepcopy(eseries_fake.VOLUME)
new_capacity = 10
volume['objectType'] = 'volume'
self.library._client.expand_volume = mock.Mock()
self.library._get_pool_operation_progress = mock.Mock(
side_effect=get_copy_progress())
self.library._get_volume = mock.Mock(return_value=volume)
self.library.extend_volume(fake_volume, new_capacity)
# Ensure that the extend method waits until the expansion is completed
self.assertEqual(6,
self.library._get_pool_operation_progress.call_count
)
self.library._client.expand_volume.assert_called_with(volume['id'],
new_capacity,
False)
def test_extend_volume_thin(self):
"""Test volume extend with a thin-provisioned volume"""
fake_volume = copy.deepcopy(get_fake_volume())
volume = copy.deepcopy(eseries_fake.VOLUME)
new_capacity = 10
volume['objectType'] = 'thinVolume'
self.library._client.expand_volume = mock.Mock(return_value=volume)
self.library._get_volume_operation_progress = mock.Mock()
self.library._get_volume = mock.Mock(return_value=volume)
self.library.extend_volume(fake_volume, new_capacity)
self.assertFalse(self.library._get_volume_operation_progress.called)
self.library._client.expand_volume.assert_called_with(volume['id'],
new_capacity,
True)
def test_delete_non_existing_volume(self):
volume2 = get_fake_volume()
# Change to a nonexistent id.
volume2['name_id'] = '88888888-4444-4444-4444-cccccccccccc'
self.assertIsNone(self.library.delete_volume(volume2))
def test_map_volume_to_host_volume_not_mapped(self):
"""Map the volume directly to destination host."""
self.mock_object(self.library._client,
'get_volume_mappings_for_volume',
mock.Mock(return_value=[]))
self.mock_object(host_mapper, 'map_volume_to_single_host',
mock.Mock(
return_value=eseries_fake.VOLUME_MAPPING))
self.library.map_volume_to_host(get_fake_volume(),
eseries_fake.VOLUME,
eseries_fake.INITIATOR_NAME_2)
self.assertTrue(
self.library._client.get_volume_mappings_for_volume.called)
self.assertTrue(host_mapper.map_volume_to_single_host.called)
def test_map_volume_to_host_volume_not_mapped_host_does_not_exist(self):
"""Should create the host map directly to the host."""
self.mock_object(self.library._client, 'list_hosts',
mock.Mock(return_value=[]))
self.mock_object(self.library._client, 'create_host_with_ports',
mock.Mock(
return_value=eseries_fake.HOST_2))
self.mock_object(self.library._client,
'get_volume_mappings_for_volume',
mock.Mock(return_value=[]))
self.mock_object(host_mapper, 'map_volume_to_single_host',
mock.Mock(
return_value=eseries_fake.VOLUME_MAPPING))
self.library.map_volume_to_host(get_fake_volume(),
eseries_fake.VOLUME,
eseries_fake.INITIATOR_NAME_2)
self.assertTrue(self.library._client.create_host_with_ports.called)
self.assertTrue(
self.library._client.get_volume_mappings_for_volume.called)
self.assertTrue(host_mapper.map_volume_to_single_host.called)
def test_map_volume_to_host_volume_already_mapped(self):
"""Should be a no-op."""
self.mock_object(host_mapper, 'map_volume_to_multiple_hosts',
mock.Mock(
return_value=eseries_fake.VOLUME_MAPPING))
self.library.map_volume_to_host(get_fake_volume(),
eseries_fake.VOLUME,
eseries_fake.INITIATOR_NAME)
self.assertTrue(host_mapper.map_volume_to_multiple_hosts.called)
| apache-2.0 |
dischinator/pyload | module/plugins/accounts/TurbobitNet.py | 3 | 2354 | # -*- coding: utf-8 -*-
import re
import time
from module.plugins.internal.Account import Account
from module.plugins.internal.misc import parse_html_form, set_cookie
class TurbobitNet(Account):
__name__ = "TurbobitNet"
__type__ = "account"
__version__ = "0.10"
__status__ = "testing"
__description__ = """TurbobitNet account plugin"""
__license__ = "GPLv3"
__authors__ = [("zoidberg", "[email protected]" ),
("GammaC0de", "nitzo2001[AT]yahoo[DOT]com")]
LOGIN_FAIL_PATTERN = r'>(?:E-Mail address appears to be invalid\. Please try again|Incorrect login or password)</div>'
def grab_info(self, user, password, data):
html = self.load("http://turbobit.net/")
m = re.search(r'>Turbo access till ([\d.]+)<', html)
if m is not None:
premium = True
validuntil = time.mktime(time.strptime(m.group(1), "%d.%m.%Y"))
else:
premium = False
validuntil = -1
return {'premium': premium, 'trafficleft': -1, 'validuntil': validuntil}
def signin(self, user, password, data):
set_cookie(self.req.cj, "turbobit.net", "user_lang", "en")
self.data = self.load("http://turbobit.net/login")
if "<a href='/user/logout'" in self.data:
self.skip_login()
action, inputs = parse_html_form('class="form-horizontal login mail"', self.data)
if not inputs:
self.fail_login(_("Login form not found"))
inputs['user[login]'] = user
inputs['user[pass]'] = password
inputs['user[submit]'] = "Sign in"
if inputs.get('user[captcha_type]'):
self.fail_login(_("Logging in with captcha is not supported, please disable catcha in turbobit's account settings"))
self.data = self.load("http://turbobit.net/user/login", post=inputs)
if "<a href='/user/logout'" in self.data:
self.log_debug(_("Login successful"))
elif re.search(self.LOGIN_FAIL_PATTERN, self.data):
self.fail_login()
elif ">Please enter the captcha code.</div>" in self.data:
self.fail_login(_("Logging in with captcha is not supported, please disable catcha in turbobit's account settings"))
else:
self.fail_login(_("Unknown response"))
| gpl-3.0 |
andela-ooladayo/django | tests/reserved_names/tests.py | 405 | 1686 | from __future__ import unicode_literals
import datetime
from django.test import TestCase
from .models import Thing
class ReservedNameTests(TestCase):
def generate(self):
day1 = datetime.date(2005, 1, 1)
Thing.objects.create(when='a', join='b', like='c', drop='d',
alter='e', having='f', where=day1, has_hyphen='h')
day2 = datetime.date(2006, 2, 2)
Thing.objects.create(when='h', join='i', like='j', drop='k',
alter='l', having='m', where=day2)
def test_simple(self):
day1 = datetime.date(2005, 1, 1)
t = Thing.objects.create(when='a', join='b', like='c', drop='d',
alter='e', having='f', where=day1, has_hyphen='h')
self.assertEqual(t.when, 'a')
day2 = datetime.date(2006, 2, 2)
u = Thing.objects.create(when='h', join='i', like='j', drop='k',
alter='l', having='m', where=day2)
self.assertEqual(u.when, 'h')
def test_order_by(self):
self.generate()
things = [t.when for t in Thing.objects.order_by('when')]
self.assertEqual(things, ['a', 'h'])
def test_fields(self):
self.generate()
v = Thing.objects.get(pk='a')
self.assertEqual(v.join, 'b')
self.assertEqual(v.where, datetime.date(year=2005, month=1, day=1))
def test_dates(self):
self.generate()
resp = Thing.objects.dates('where', 'year')
self.assertEqual(list(resp), [
datetime.date(2005, 1, 1),
datetime.date(2006, 1, 1),
])
def test_month_filter(self):
self.generate()
self.assertEqual(Thing.objects.filter(where__month=1)[0].when, 'a')
| bsd-3-clause |
cloudbase/nova-virtualbox | nova/tests/unit/api/openstack/compute/contrib/test_quota_classes.py | 26 | 6784 | # Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob
from nova.api.openstack.compute.contrib import quota_classes
from nova.api.openstack.compute import plugins
from nova.api.openstack.compute.plugins.v3 import quota_classes \
as quota_classes_v21
from nova.api.openstack import extensions
from nova import exception
from nova import test
from nova.tests.unit.api.openstack import fakes
def quota_set(class_name):
return {'quota_class_set': {'id': class_name, 'metadata_items': 128,
'ram': 51200, 'floating_ips': 10,
'fixed_ips': -1, 'instances': 10,
'injected_files': 5, 'cores': 20,
'injected_file_content_bytes': 10240,
'security_groups': 10,
'security_group_rules': 20, 'key_pairs': 100,
'injected_file_path_bytes': 255}}
class QuotaClassSetsTestV21(test.TestCase):
validation_error = exception.ValidationError
def setUp(self):
super(QuotaClassSetsTestV21, self).setUp()
self.req_admin = fakes.HTTPRequest.blank('', use_admin_context=True)
self.req = fakes.HTTPRequest.blank('')
self._setup()
def _setup(self):
ext_info = plugins.LoadedExtensionInfo()
self.controller = quota_classes_v21.QuotaClassSetsController(
extension_info=ext_info)
def test_format_quota_set(self):
raw_quota_set = {
'instances': 10,
'cores': 20,
'ram': 51200,
'floating_ips': 10,
'fixed_ips': -1,
'metadata_items': 128,
'injected_files': 5,
'injected_file_path_bytes': 255,
'injected_file_content_bytes': 10240,
'security_groups': 10,
'security_group_rules': 20,
'key_pairs': 100,
}
quota_set = self.controller._format_quota_set('test_class',
raw_quota_set)
qs = quota_set['quota_class_set']
self.assertEqual(qs['id'], 'test_class')
self.assertEqual(qs['instances'], 10)
self.assertEqual(qs['cores'], 20)
self.assertEqual(qs['ram'], 51200)
self.assertEqual(qs['floating_ips'], 10)
self.assertEqual(qs['fixed_ips'], -1)
self.assertEqual(qs['metadata_items'], 128)
self.assertEqual(qs['injected_files'], 5)
self.assertEqual(qs['injected_file_path_bytes'], 255)
self.assertEqual(qs['injected_file_content_bytes'], 10240)
self.assertEqual(qs['security_groups'], 10)
self.assertEqual(qs['security_group_rules'], 20)
self.assertEqual(qs['key_pairs'], 100)
def test_quotas_show_as_admin(self):
res_dict = self.controller.show(self.req_admin, 'test_class')
self.assertEqual(res_dict, quota_set('test_class'))
def test_quotas_show_as_unauthorized_user(self):
self.assertRaises(webob.exc.HTTPForbidden, self.controller.show,
self.req, 'test_class')
def test_quotas_update_as_admin(self):
body = {'quota_class_set': {'instances': 50, 'cores': 50,
'ram': 51200, 'floating_ips': 10,
'fixed_ips': -1, 'metadata_items': 128,
'injected_files': 5,
'injected_file_content_bytes': 10240,
'injected_file_path_bytes': 255,
'security_groups': 10,
'security_group_rules': 20,
'key_pairs': 100}}
res_dict = self.controller.update(self.req_admin, 'test_class',
body=body)
self.assertEqual(res_dict, body)
def test_quotas_update_as_user(self):
body = {'quota_class_set': {'instances': 50, 'cores': 50,
'ram': 51200, 'floating_ips': 10,
'fixed_ips': -1, 'metadata_items': 128,
'injected_files': 5,
'injected_file_content_bytes': 10240,
'security_groups': 10,
'security_group_rules': 20,
'key_pairs': 100,
}}
self.assertRaises(webob.exc.HTTPForbidden, self.controller.update,
self.req, 'test_class', body=body)
def test_quotas_update_with_empty_body(self):
body = {}
self.assertRaises(self.validation_error, self.controller.update,
self.req_admin, 'test_class', body=body)
def test_quotas_update_with_non_integer(self):
body = {'quota_class_set': {'instances': "abc"}}
self.assertRaises(self.validation_error, self.controller.update,
self.req_admin, 'test_class', body=body)
body = {'quota_class_set': {'instances': 50.5}}
self.assertRaises(self.validation_error, self.controller.update,
self.req_admin, 'test_class', body=body)
body = {'quota_class_set': {
'instances': u'\u30aa\u30fc\u30d7\u30f3'}}
self.assertRaises(self.validation_error, self.controller.update,
self.req_admin, 'test_class', body=body)
def test_quotas_update_with_unsupported_quota_class(self):
body = {'quota_class_set': {'instances': 50, 'cores': 50,
'ram': 51200, 'unsupported': 12}}
self.assertRaises(self.validation_error, self.controller.update,
self.req_admin, 'test_class', body=body)
class QuotaClassSetsTestV2(QuotaClassSetsTestV21):
validation_error = webob.exc.HTTPBadRequest
def _setup(self):
ext_mgr = extensions.ExtensionManager()
ext_mgr.extensions = {}
self.controller = quota_classes.QuotaClassSetsController(ext_mgr)
| apache-2.0 |
poussik/vcrpy | tests/integration/test_tornado.py | 2 | 13097 | # -*- coding: utf-8 -*-
'''Test requests' interaction with vcr'''
import json
import pytest
import vcr
from vcr.errors import CannotOverwriteExistingCassetteException
from assertions import assert_cassette_empty, assert_is_json
tornado = pytest.importorskip("tornado")
http = pytest.importorskip("tornado.httpclient")
# whether the current version of Tornado supports the raise_error argument for
# fetch().
supports_raise_error = tornado.version_info >= (4,)
@pytest.fixture(params=['simple', 'curl', 'default'])
def get_client(request):
if request.param == 'simple':
from tornado import simple_httpclient as simple
return (lambda: simple.SimpleAsyncHTTPClient())
elif request.param == 'curl':
curl = pytest.importorskip("tornado.curl_httpclient")
return (lambda: curl.CurlAsyncHTTPClient())
else:
return (lambda: http.AsyncHTTPClient())
def get(client, url, **kwargs):
fetch_kwargs = {}
if supports_raise_error:
fetch_kwargs['raise_error'] = kwargs.pop('raise_error', True)
return client.fetch(
http.HTTPRequest(url, method='GET', **kwargs),
**fetch_kwargs
)
def post(client, url, data=None, **kwargs):
if data:
kwargs['body'] = json.dumps(data)
return client.fetch(http.HTTPRequest(url, method='POST', **kwargs))
@pytest.fixture(params=["https", "http"])
def scheme(request):
'''Fixture that returns both http and https.'''
return request.param
@pytest.mark.gen_test
def test_status_code(get_client, scheme, tmpdir):
'''Ensure that we can read the status code'''
url = scheme + '://httpbin.org/'
with vcr.use_cassette(str(tmpdir.join('atts.yaml'))):
status_code = (yield get(get_client(), url)).code
with vcr.use_cassette(str(tmpdir.join('atts.yaml'))) as cass:
assert status_code == (yield get(get_client(), url)).code
assert 1 == cass.play_count
@pytest.mark.gen_test
def test_headers(get_client, scheme, tmpdir):
'''Ensure that we can read the headers back'''
url = scheme + '://httpbin.org/'
with vcr.use_cassette(str(tmpdir.join('headers.yaml'))):
headers = (yield get(get_client(), url)).headers
with vcr.use_cassette(str(tmpdir.join('headers.yaml'))) as cass:
assert headers == (yield get(get_client(), url)).headers
assert 1 == cass.play_count
@pytest.mark.gen_test
def test_body(get_client, tmpdir, scheme):
'''Ensure the responses are all identical enough'''
url = scheme + '://httpbin.org/bytes/1024'
with vcr.use_cassette(str(tmpdir.join('body.yaml'))):
content = (yield get(get_client(), url)).body
with vcr.use_cassette(str(tmpdir.join('body.yaml'))) as cass:
assert content == (yield get(get_client(), url)).body
assert 1 == cass.play_count
@pytest.mark.gen_test
def test_effective_url(get_client, scheme, tmpdir):
'''Ensure that the effective_url is captured'''
url = scheme + '://httpbin.org/redirect-to?url=/html'
with vcr.use_cassette(str(tmpdir.join('url.yaml'))):
effective_url = (yield get(get_client(), url)).effective_url
assert effective_url == scheme + '://httpbin.org/html'
with vcr.use_cassette(str(tmpdir.join('url.yaml'))) as cass:
assert effective_url == (yield get(get_client(), url)).effective_url
assert 1 == cass.play_count
@pytest.mark.gen_test
def test_auth(get_client, tmpdir, scheme):
'''Ensure that we can handle basic auth'''
auth = ('user', 'passwd')
url = scheme + '://httpbin.org/basic-auth/user/passwd'
with vcr.use_cassette(str(tmpdir.join('auth.yaml'))):
one = yield get(
get_client(), url, auth_username=auth[0], auth_password=auth[1]
)
with vcr.use_cassette(str(tmpdir.join('auth.yaml'))) as cass:
two = yield get(
get_client(), url, auth_username=auth[0], auth_password=auth[1]
)
assert one.body == two.body
assert one.code == two.code
assert 1 == cass.play_count
@pytest.mark.gen_test
def test_auth_failed(get_client, tmpdir, scheme):
'''Ensure that we can save failed auth statuses'''
auth = ('user', 'wrongwrongwrong')
url = scheme + '://httpbin.org/basic-auth/user/passwd'
with vcr.use_cassette(str(tmpdir.join('auth-failed.yaml'))) as cass:
# Ensure that this is empty to begin with
assert_cassette_empty(cass)
with pytest.raises(http.HTTPError) as exc_info:
yield get(
get_client(),
url,
auth_username=auth[0],
auth_password=auth[1],
)
one = exc_info.value.response
assert exc_info.value.code == 401
with vcr.use_cassette(str(tmpdir.join('auth-failed.yaml'))) as cass:
with pytest.raises(http.HTTPError) as exc_info:
two = yield get(
get_client(),
url,
auth_username=auth[0],
auth_password=auth[1],
)
two = exc_info.value.response
assert exc_info.value.code == 401
assert one.body == two.body
assert one.code == two.code == 401
assert 1 == cass.play_count
@pytest.mark.gen_test
def test_post(get_client, tmpdir, scheme):
'''Ensure that we can post and cache the results'''
data = {'key1': 'value1', 'key2': 'value2'}
url = scheme + '://httpbin.org/post'
with vcr.use_cassette(str(tmpdir.join('requests.yaml'))):
req1 = (yield post(get_client(), url, data)).body
with vcr.use_cassette(str(tmpdir.join('requests.yaml'))) as cass:
req2 = (yield post(get_client(), url, data)).body
assert req1 == req2
assert 1 == cass.play_count
@pytest.mark.gen_test
def test_redirects(get_client, tmpdir, scheme):
'''Ensure that we can handle redirects'''
url = scheme + '://httpbin.org/redirect-to?url=bytes/1024'
with vcr.use_cassette(str(tmpdir.join('requests.yaml'))):
content = (yield get(get_client(), url)).body
with vcr.use_cassette(str(tmpdir.join('requests.yaml'))) as cass:
assert content == (yield get(get_client(), url)).body
assert cass.play_count == 1
@pytest.mark.gen_test
def test_cross_scheme(get_client, tmpdir, scheme):
'''Ensure that requests between schemes are treated separately'''
# First fetch a url under http, and then again under https and then
# ensure that we haven't served anything out of cache, and we have two
# requests / response pairs in the cassette
with vcr.use_cassette(str(tmpdir.join('cross_scheme.yaml'))) as cass:
yield get(get_client(), 'https://httpbin.org/')
yield get(get_client(), 'http://httpbin.org/')
assert cass.play_count == 0
assert len(cass) == 2
# Then repeat the same requests and ensure both were replayed.
with vcr.use_cassette(str(tmpdir.join('cross_scheme.yaml'))) as cass:
yield get(get_client(), 'https://httpbin.org/')
yield get(get_client(), 'http://httpbin.org/')
assert cass.play_count == 2
@pytest.mark.gen_test
def test_gzip(get_client, tmpdir, scheme):
'''
Ensure that httpclient is able to automatically decompress the response
body
'''
url = scheme + '://httpbin.org/gzip'
# use_gzip was renamed to decompress_response in 4.0
kwargs = {}
if tornado.version_info < (4,):
kwargs['use_gzip'] = True
else:
kwargs['decompress_response'] = True
with vcr.use_cassette(str(tmpdir.join('gzip.yaml'))):
response = yield get(get_client(), url, **kwargs)
assert_is_json(response.body)
with vcr.use_cassette(str(tmpdir.join('gzip.yaml'))) as cass:
response = yield get(get_client(), url, **kwargs)
assert_is_json(response.body)
assert 1 == cass.play_count
@pytest.mark.gen_test
def test_https_with_cert_validation_disabled(get_client, tmpdir):
cass_path = str(tmpdir.join('cert_validation_disabled.yaml'))
with vcr.use_cassette(cass_path):
yield get(get_client(), 'https://httpbin.org', validate_cert=False)
with vcr.use_cassette(cass_path) as cass:
yield get(get_client(), 'https://httpbin.org', validate_cert=False)
assert 1 == cass.play_count
@pytest.mark.gen_test
def test_unsupported_features_raises_in_future(get_client, tmpdir):
'''Ensure that the exception for an AsyncHTTPClient feature not being
supported is raised inside the future.'''
def callback(chunk):
assert False, "Did not expect to be called."
with vcr.use_cassette(str(tmpdir.join('invalid.yaml'))):
future = get(
get_client(), 'http://httpbin.org', streaming_callback=callback
)
with pytest.raises(Exception) as excinfo:
yield future
assert "not yet supported by VCR" in str(excinfo)
@pytest.mark.skipif(
not supports_raise_error,
reason='raise_error unavailable in tornado <= 3',
)
@pytest.mark.gen_test
def test_unsupported_features_raise_error_disabled(get_client, tmpdir):
'''Ensure that the exception for an AsyncHTTPClient feature not being
supported is not raised if raise_error=False.'''
def callback(chunk):
assert False, "Did not expect to be called."
with vcr.use_cassette(str(tmpdir.join('invalid.yaml'))):
response = yield get(
get_client(),
'http://httpbin.org',
streaming_callback=callback,
raise_error=False,
)
assert "not yet supported by VCR" in str(response.error)
@pytest.mark.gen_test
def test_cannot_overwrite_cassette_raises_in_future(get_client, tmpdir):
'''Ensure that CannotOverwriteExistingCassetteException is raised inside
the future.'''
with vcr.use_cassette(str(tmpdir.join('overwrite.yaml'))):
yield get(get_client(), 'http://httpbin.org/get')
with vcr.use_cassette(str(tmpdir.join('overwrite.yaml'))):
future = get(get_client(), 'http://httpbin.org/headers')
with pytest.raises(CannotOverwriteExistingCassetteException):
yield future
@pytest.mark.skipif(
not supports_raise_error,
reason='raise_error unavailable in tornado <= 3',
)
@pytest.mark.gen_test
def test_cannot_overwrite_cassette_raise_error_disabled(get_client, tmpdir):
'''Ensure that CannotOverwriteExistingCassetteException is not raised if
raise_error=False in the fetch() call.'''
with vcr.use_cassette(str(tmpdir.join('overwrite.yaml'))):
yield get(
get_client(), 'http://httpbin.org/get', raise_error=False
)
with vcr.use_cassette(str(tmpdir.join('overwrite.yaml'))):
response = yield get(
get_client(), 'http://httpbin.org/headers', raise_error=False
)
assert isinstance(response.error, CannotOverwriteExistingCassetteException)
@pytest.mark.gen_test
@vcr.use_cassette(path_transformer=vcr.default_vcr.ensure_suffix('.yaml'))
def test_tornado_with_decorator_use_cassette(get_client):
response = yield get_client().fetch(
http.HTTPRequest('http://www.google.com/', method='GET')
)
assert response.body.decode('utf-8') == "not actually google"
@pytest.mark.gen_test
@vcr.use_cassette(path_transformer=vcr.default_vcr.ensure_suffix('.yaml'))
def test_tornado_exception_can_be_caught(get_client):
try:
yield get(get_client(), 'http://httpbin.org/status/500')
except http.HTTPError as e:
assert e.code == 500
try:
yield get(get_client(), 'http://httpbin.org/status/404')
except http.HTTPError as e:
assert e.code == 404
@pytest.mark.gen_test
def test_existing_references_get_patched(tmpdir):
from tornado.httpclient import AsyncHTTPClient
with vcr.use_cassette(str(tmpdir.join('data.yaml'))):
client = AsyncHTTPClient()
yield get(client, 'http://httpbin.org/get')
with vcr.use_cassette(str(tmpdir.join('data.yaml'))) as cass:
yield get(client, 'http://httpbin.org/get')
assert cass.play_count == 1
@pytest.mark.gen_test
def test_existing_instances_get_patched(get_client, tmpdir):
'''Ensure that existing instances of AsyncHTTPClient get patched upon
entering VCR context.'''
client = get_client()
with vcr.use_cassette(str(tmpdir.join('data.yaml'))):
yield get(client, 'http://httpbin.org/get')
with vcr.use_cassette(str(tmpdir.join('data.yaml'))) as cass:
yield get(client, 'http://httpbin.org/get')
assert cass.play_count == 1
@pytest.mark.gen_test
def test_request_time_is_set(get_client, tmpdir):
'''Ensures that the request_time on HTTPResponses is set.'''
with vcr.use_cassette(str(tmpdir.join('data.yaml'))):
client = get_client()
response = yield get(client, 'http://httpbin.org/get')
assert response.request_time is not None
with vcr.use_cassette(str(tmpdir.join('data.yaml'))) as cass:
client = get_client()
response = yield get(client, 'http://httpbin.org/get')
assert response.request_time is not None
assert cass.play_count == 1
| mit |
nharraud/b2share | invenio/legacy/bibsort/scripts/bibsort.py | 13 | 2546 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2010, 2011, 2012 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
Usage: bibsort [options]
BibSort tool
Options:
-h, --help show this help message and exit
-l, --load-config Loads the configuration from bibsort.conf into the
database
-d, --dump-config Outputs a database dump in form of a config file
-p, --print-sorting-methods
Prints the available sorting methods
-B, --rebalance Runs the sorting methods given in '--metods'and
rebalances all the buckets.
If no method is specified, the rebalance will be done
for all the methods in the config file.
-S, --update-sorting Runs the sorting methods given in '--methods' for the
recids given in '--recids'.
If no method is specified, the update will be done for
all the methods in the config file.
If no recids are specified, the update will be done
for all the records that have been
modified/inserted from the last run of the sorting.
If you want to run the sorting for all records, you
should use the '-B' option
-M, --methods=METHODS Specify the sorting methods for which the
update_sorting or rebalancing will run
(ex: --methods=method1,method2,method3).
-R, --recids=RECIDS Specify the records for which the update_sorting will
run (ex: --recids=1,2-56,72)
"""
from invenio.base.factory import with_app_context
@with_app_context()
def main():
from invenio.legacy.bibsort.daemon import main as bibsort_main
return bibsort_main()
| gpl-2.0 |
momentum/canteen | canteen_tests/test_adapters/test_abstract.py | 2 | 33257 | # -*- coding: utf-8 -*-
"""
abstract adapter tests
~~~~~~~~~~~~~~~~~~~~~~
tests abstract adapter classes, that enforce/expose
interfaces known to the model engine proper.
:author: Sam Gammon <[email protected]>
:copyright: (c) Sam Gammon, 2014
:license: This software makes use of the MIT Open Source License.
A copy of this license is included as ``LICENSE.md`` in
the root of the project.
"""
# stdlib
import datetime
# canteen test
from canteen import test
# canteen model API
from canteen import model
from canteen.model.adapter import abstract
## Globals
_target = lambda k: k.flatten(True)[1]
class SampleModel(model.Model):
""" Test model. """
string = basestring, {'required': True}
integer = int, {'repeated': True}
number = int, {'repeated': False}
floating = float, {'required': False}
date = datetime.datetime
class TestGraphPerson(model.Vertex):
""" simple test person object """
name = str, {'indexed': True}
class TestGraphFriends(TestGraphPerson > TestGraphPerson):
""" simple test friends edge """
year_met = int, {'indexed': True}
class TestGraphGift(TestGraphPerson >> TestGraphPerson):
""" simple directed gift edge """
price = float, {'indexed': True}
class AbstractModelAdapterTests(test.FrameworkTest):
""" Tests `model.adapter.abstract.ModelAdapter` """
__abstract__ = True
subject = abstract.ModelAdapter
def _construct(self):
""" Construct a copy of the local adapter. """
# set to testing mode
if hasattr(self.subject, '__testing__'):
self.subject.__testing__ = True
return self.subject()
def test_abstract(self):
""" Test `ModelAdapter` interface abstractness """
if getattr(self, '__abstract__', False):
with self.assertRaises(TypeError):
self._construct()
else: # pragma: no cover
self._construct()
return getattr(self, '__abstract__', False)
def test_utilities(self):
""" Test `ModelAdapter` internal utilities """
assert hasattr(self.subject, 'acquire')
assert hasattr(self.subject, 'config')
assert hasattr(self.subject, 'logging')
assert hasattr(self.subject, 'serializer')
assert hasattr(self.subject, 'encoder')
assert hasattr(self.subject, 'compressor')
def test_base_interface_compliance(self):
""" Test base `ModelAdapter` interface compliance """
assert hasattr(self.subject, 'get')
assert hasattr(self.subject, 'get_multi')
assert hasattr(self.subject, 'put')
assert hasattr(self.subject, 'delete')
assert hasattr(self.subject, 'allocate_ids')
assert hasattr(self.subject, 'encode_key')
def test_construct(self):
""" Test basic `ModelAdapter` construction """
if self.__abstract__:
with self.assertRaises(TypeError):
self._construct()
else:
x = self._construct()
assert x
assert isinstance(x, self.subject)
def test_invalid_get(self):
""" Test fetching a key that doesn't exist """
if not self.__abstract__:
# get missing entity
model_key = model.Key("Sample", "_____")
entity = model_key.get(adapter=self._construct())
assert entity is None
def test_named_entity_get_put(self):
""" Test putting and getting an entity with a named key """
if not self.__abstract__:
# put entity
m = SampleModel(
key=model.Key(SampleModel.kind(), "NamedEntity"),
string="suphomies",
integer=[4, 5, 6, 7])
m_k = m.put(adapter=self._construct())
entities = []
# simulate getting entity at key level
explicit_key = model.Key(SampleModel.kind(), "NamedEntity")
entity = explicit_key.get()
entities.append(entity)
# simulate getting entity at model level
explicit_entity = SampleModel.get(name="NamedEntity")
entities.append(explicit_entity)
# test urlsafe-d key model-level get()
urlsafed_entity = SampleModel.get(key=explicit_key.urlsafe())
entities.append(urlsafed_entity)
# test raw-d key model-level get()
flattened = explicit_key.flatten(False)
rawd_entity = SampleModel.get(key=flattened[1:])
entities.append(rawd_entity)
for entity in entities:
# make sure things match on key level
assert entity.string == "suphomies"
assert len(entity.integer) == 4
assert entity.key.id == "NamedEntity"
assert entity.key.kind == SampleModel.kind()
def test_id_entity_get_put(self):
""" Test putting and getting an entity with an ID'd key """
if not self.__abstract__:
# put entity
m = SampleModel(string="hello", integer=[1, 2, 3])
m_k = m.put(adapter=self._construct())
# make sure flags match
assert m_k == m.key
assert m_k.__persisted__
assert m.__persisted__
assert (not m.__dirty__)
# simulate getting entity via urlsafe
entity = SampleModel.get(m_k, adapter=self._construct())
# make sure things match
assert entity.string == "hello"
assert len(entity.integer) == 3
assert entity.key.kind == SampleModel.kind()
def test_entity_multiget(self):
""" Test retrieving multiple entities at once via `get_multi` """
if not self.__abstract__:
# put some entities
entities = [
SampleModel(string='hi', integer=[1, 2, 3]),
SampleModel(string='sup', integer=[4, 5, 6]),
SampleModel(string='hola', integer=[7, 8, 9])]
keys = []
for entity in entities:
keys.append(entity.put(adapter=self._construct()))
# retrieve entities in bulk
_results = []
for key, entity in zip(keys,
SampleModel.get_multi(keys, adapter=self._construct())):
assert key.kind == SampleModel.kind()
assert entity, (
"failed to retrieve entity with adapter '%s'" % self._construct())
assert entity.string in ('hi', 'sup', 'hola')
assert len(entity.integer) == 3
_results.append(entity)
assert len(_results) == 3
def test_delete_existing_entity_via_key(self):
""" Test deleting an existing entity via `Key.delete()` """
if not self.__abstract__:
# put entity
m = SampleModel(string="hello", integer=[1, 2, 3])
m_k = m.put(adapter=self._construct())
# delete it
res = m_k.delete(adapter=self._construct())
# make sure it's unknown and gone
assert res
assert not SampleModel.get(m_k, adapter=self._construct())
def test_delete_existing_entity_via_model(self):
""" Test deleting an existing entity via `Model.delete()` """
if not self.__abstract__:
# put entity
m = SampleModel(string="hello", integer=[1, 2, 3])
m_k = m.put(adapter=self._construct())
# delete it
res = m.delete(adapter=self._construct())
# make sure it's unknown and gone
assert res
assert not SampleModel.get(m_k, adapter=self._construct())
def test_delete_invalid_entity(self):
""" Test deleting an invalid entity """
if not self.__abstract__:
# manufacture a key that shouldn't exist...
m_k = model.Key("SampleKind", "____InvalidKey____")
# make sure it's unknown
assert not SampleModel.get(m_k, adapter=self._construct())
# delete it
res = m_k.delete(adapter=self._construct())
# make sure it's unknown, and we couldn't delete it
assert (not res)
assert not SampleModel.get(m_k, adapter=self._construct())
def test_allocate_ids(self):
""" Test allocating one and numerous ID's """
if not self.__abstract__:
# try allocating one ID
next = self._construct().allocate_ids(model.Key, "SampleModel", 1)
assert isinstance(next, int)
# try allocating 10 ID's
next_range = [i for i in self._construct().allocate_ids(*(
model.Key, "Sample", 10))()]
assert len(next_range) == 10
for i in next_range:
assert isinstance(i, int)
class IndexedModelAdapterTests(AbstractModelAdapterTests):
""" Tests `model.adapter.abstract.IndexedModelAdapter` """
__abstract__ = True
subject = abstract.IndexedModelAdapter
def test_indexed_interface_compliance(self):
""" Test `IndexedModelAdapter` interface compliance """
assert hasattr(self.subject, 'write_indexes')
assert hasattr(self.subject, 'clean_indexes')
assert hasattr(self.subject, 'execute_query')
def test_attached_indexer_compliance(self):
""" Test `IndexedModelAdapter.Indexer` for basic functionality """
assert hasattr(self.subject, 'Indexer')
indexer = self.subject.Indexer
# interrogate indexer
assert hasattr(indexer, 'convert_key')
assert hasattr(indexer, 'convert_date')
assert hasattr(indexer, 'convert_time')
assert hasattr(indexer, 'convert_datetime')
def test_indexer_convert_key(self):
""" Test `Indexer.convert_key` """
indexer = self.subject.Indexer
sample_key = model.Key('Sample', 'key')
converted = indexer.convert_key(sample_key)
# interrogate converted key
assert isinstance(converted, tuple)
def test_indexer_convert_date(self):
""" Test `Indexer.convert_date` """
indexer = self.subject.Indexer
sample_date = datetime.date(year=2014, month=7, day=29)
converted = indexer.convert_date(sample_date)
# interrogate converted date
assert isinstance(converted, tuple)
def test_indexer_convert_time(self):
""" Test `Indexer.convert_time` """
indexer = self.subject.Indexer
sample_time = datetime.time(hour=12, minute=30)
converted = indexer.convert_time(sample_time)
# interrogate converted date
assert isinstance(converted, tuple)
def test_indexer_convert_datetime(self):
""" Test `Indexer.convert_datetime` """
indexer = self.subject.Indexer
sample_datetime = datetime.datetime(year=2014,
month=7,
day=29,
hour=12,
minute=30)
converted = indexer.convert_datetime(sample_datetime)
# interrogate converted date
assert isinstance(converted, tuple)
def test_equality_query(self):
""" Test equality queries with `IndexedMemoryAdapter` """
if not self.__abstract__:
# make some models
m = [
SampleModel(string="soop", integer=[1, 2, 3]),
SampleModel(string="soop", integer=[1, 2, 3]),
SampleModel(string="soop", integer=[1, 2, 3])]
for _m in m: _m.put(adapter=self._construct())
# single get
q = SampleModel.query().filter(SampleModel.string == "soop")
result = q.get(adapter=self._construct())
assert result.string == "soop"
# submit query
q = SampleModel.query().filter(SampleModel.string == "soop")
result = q.fetch(limit=50, adapter=self._construct())
for r in result:
assert r.string == "soop"
def test_inequality_query(self):
""" Test inequality queries with `IndexedMemoryAdapter` """
if not self.__abstract__:
# make some models
m = [
SampleModel(string="soop", integer=[1, 2, 3]),
SampleModel(string="soop", integer=[1, 2, 3]),
SampleModel(string="soop", integer=[1, 2, 3]),
SampleModel(string="sploop", integer=[1, 2])]
for _m in m: _m.put(adapter=self._construct())
# submit query
q = SampleModel.query().filter(SampleModel.string != "sploop")
result = q.fetch(limit=50, adapter=self._construct())
assert len(result) > 0, (
"got no results for inequality query"
" (got '%s' from adapter '%s')" % (result, self.subject))
for r in result:
assert r.string != "sploop"
def test_range_query(self):
""" Test range queries with `IndexedMemoryAdapter` """
if not self.__abstract__:
now = datetime.datetime.now()
# make some models
m = [
SampleModel(string="soop", date=now + datetime.timedelta(days=1)),
SampleModel(string="soop", date=now + datetime.timedelta(days=2)),
SampleModel(string="soop", date=now + datetime.timedelta(days=3))]
for _m in m: _m.put(adapter=self._construct())
# submit query
q = SampleModel.query().filter(SampleModel.date > now)
result = q.fetch(limit=50, adapter=self._construct())
for result_i in result:
assert result_i.date > now
def test_ancestry_query(self):
""" Test ancestry queries with `IndexedMemoryAdapter` """
if not self.__abstract__:
root = model.Key(SampleModel, 'heyo')
_key = lambda x: model.Key(SampleModel, x, parent=root)
child1 = _key('child')
child2 = _key('child2')
# make some models
m = [
SampleModel(key=root, string='soop'),
SampleModel(key=child1, string='soop'),
SampleModel(key=child2, string='soop')]
for _m in m: _m.put(adapter=self._construct())
# submit query
q = SampleModel.query(ancestor=root, limit=50, adapter=self._construct())
result = q.fetch()
assert len(result) == 2
def test_compound_query(self):
""" Test compound queries with `IndexedMemoryAdapter` """
if not self.__abstract__:
now = datetime.datetime.now()
root = model.Key(SampleModel, 'hi')
_key = lambda x: model.Key(SampleModel, x, parent=root)
child1 = _key('child1')
child2 = _key('child2')
child3 = _key('child3')
# make some models
m = [
SampleModel(key=root,
string="hithere",
date=now + datetime.timedelta(days=1)),
SampleModel(key=child1,
string="hithere",
date=now + datetime.timedelta(days=2)),
SampleModel(key=child2,
string="hithere",
date=now + datetime.timedelta(days=3)),
SampleModel(key=child3,
string="noway",
date=now + datetime.timedelta(days=4))]
for _m in m: _m.put(adapter=self._construct())
# submit query
q = SampleModel.query(limit=50)
q.filter(SampleModel.string == "hithere")
q.filter(SampleModel.date > now)
result = q.fetch(adapter=self._construct())
assert len(result) == 3
# submit query
q = SampleModel.query(ancestor=root, limit=50)
q.filter(SampleModel.date > now)
result = q.fetch(adapter=self._construct())
assert len(result) == 3
# submit query
q = SampleModel.query(ancestor=root, limit=50)
q.filter(SampleModel.date > now)
q.filter(SampleModel.string == "hithere")
result = q.fetch(adapter=self._construct())
assert len(result) == 2
def test_ascending_sort_string(self):
""" Test an ASC sort on a string property with `IndexedMemoryAdapter` """
if not self.__abstract__:
root = model.Key(SampleModel, 'sorted-string')
_key = lambda x: model.Key(SampleModel, x, parent=root)
child1 = _key('child1')
child2 = _key('child2')
child3 = _key('child3')
child4 = _key('child4')
# make some models
m = [
SampleModel(key=child1, string="aardvark"),
SampleModel(key=child2, string="blasphemy"),
SampleModel(key=child3, string="xylophone"),
SampleModel(key=child4, string="yompin")]
for _m in m: _m.put(adapter=self._construct())
# submit query
q = SampleModel.query(ancestor=root, limit=50).sort(+SampleModel.string)
result = q.fetch(adapter=self._construct())
assert len(result) == 4
for l, r in zip(result, reversed(("aardvark",
"blasphemy",
"xylophone",
"yompin"))):
assert l.string == r
def test_descending_sort_string(self):
""" Test a DSC sort on a string property with `IndexedMemoryAdapter` """
if not self.__abstract__:
root = model.Key(SampleModel, 'sorted-string-2')
_key = lambda x: model.Key(SampleModel, x, parent=root)
child1 = _key('child1')
child2 = _key('child2')
child3 = _key('child3')
child4 = _key('child4')
# make some models
m = [
SampleModel(key=child1, string="aardvark"),
SampleModel(key=child2, string="blasphemy"),
SampleModel(key=child3, string="xylophone"),
SampleModel(key=child4, string="yompin")]
for _m in m: _m.put(adapter=self._construct())
# submit query
q = SampleModel.query(ancestor=root, limit=50).sort(-SampleModel.string)
result = q.fetch(adapter=self._construct())
assert len(result) == 4
for l, r in zip(result, ("aardvark", "blasphemy", "xylophone", "yompin")):
assert l.string == r
def test_ascending_sort_integer(self):
""" Test an ASC sort on an integer property with `IndexedMemoryAdapter` """
if not self.__abstract__:
root = model.Key(SampleModel, 'sorted-int')
_key = lambda x: model.Key(SampleModel, x, parent=root)
child1 = _key('child1')
child2 = _key('child2')
child3 = _key('child3')
child4 = _key('child4')
# make some models
m = [
SampleModel(key=child1, string="aardvark", number=5),
SampleModel(key=child2, string="blasphemy", number=6),
SampleModel(key=child3, string="xylophone", number=7),
SampleModel(key=child4, string="yompin", number=8)]
for _m in m: _m.put(adapter=self._construct())
# submit query
q = SampleModel.query(ancestor=root, limit=50).sort(+SampleModel.number)
result = q.fetch(adapter=self._construct())
assert len(result) == 4
for l, r in zip(result, (5, 6, 7, 8)):
assert l.number == r
def test_descending_sort_integer(self):
""" Test a DSC sort on an integer property with `IndexedMemoryAdapter` """
if not self.__abstract__:
root = model.Key(SampleModel, 'sorted-int-2')
_key = lambda x: model.Key(SampleModel, x, parent=root)
child1 = _key('child1')
child2 = _key('child2')
child3 = _key('child3')
child4 = _key('child4')
# make some models
m = [
SampleModel(key=child1, string="aardvark", number=5),
SampleModel(key=child2, string="blasphemy", number=6),
SampleModel(key=child3, string="xylophone", number=7),
SampleModel(key=child4, string="yompin", number=8)]
for _m in m: _m.put(adapter=self._construct())
# submit query
q = SampleModel.query(ancestor=root, limit=50).sort(-SampleModel.number)
result = q.fetch(adapter=self._construct())
assert len(result) == 4
for l, r in zip(result, reversed((5, 6, 7, 8))):
assert l.number == r
def test_ascending_sort_float(self):
""" Test an ASC sort on a float property with `IndexedMemoryAdapter` """
if not self.__abstract__:
root = model.Key(SampleModel, 'sorted-float')
_key = lambda x: model.Key(SampleModel, x, parent=root)
child1 = _key('child1')
child2 = _key('child2')
child3 = _key('child3')
child4 = _key('child4')
# make some models
m = [
SampleModel(key=child1, string="aardvark", floating=5.5),
SampleModel(key=child2, string="blasphemy", floating=6.5),
SampleModel(key=child3, string="xylophone", floating=7.5),
SampleModel(key=child4, string="yompin", floating=8.5)]
for _m in m: _m.put(adapter=self._construct())
# submit query
q = (
SampleModel.query(ancestor=root, limit=50).sort(
+SampleModel.floating))
result = q.fetch(adapter=self._construct())
assert len(result) == 4
for l, r in zip(result, (5.5, 6.5, 7.5, 8.5)):
assert l.floating == r
def test_descending_sort_float(self):
""" Test a DSC sort on a float property with `IndexedModelAdapter` """
if not self.__abstract__:
root = model.Key(SampleModel, 'sorted-float')
_key = lambda x: model.Key(SampleModel, x, parent=root)
child1 = _key('child1')
child2 = _key('child2')
child3 = _key('child3')
child4 = _key('child4')
# make some models
m = [
SampleModel(key=child1, string="aardvark", floating=5.5),
SampleModel(key=child2, string="blasphemy", floating=6.5),
SampleModel(key=child3, string="xylophone", floating=7.5),
SampleModel(key=child4, string="yompin", floating=8.5)]
for _m in m: _m.put(adapter=self._construct())
# submit query
q = (
SampleModel.query(ancestor=root, limit=50).sort(
-SampleModel.floating))
result = q.fetch(adapter=self._construct())
assert len(result) == 4
for l, r in zip(result, reversed((5.5, 6.5, 7.5, 8.5))):
assert l.floating == r
def test_ascending_sort_datetime(self):
""" Test an ASC sort on a `datetime` property with `IndexedModelAdapter` """
if not self.__abstract__:
now = datetime.datetime.now()
later = lambda n: now + datetime.timedelta(days=n)
root = model.Key(SampleModel, 'sorted-datetime')
_key = lambda x: model.Key(SampleModel, x, parent=root)
child1 = _key('child1')
child2 = _key('child2')
child3 = _key('child3')
child4 = _key('child4')
# make some models
m = [
SampleModel(key=child1, string="aardvark", date=later(1)),
SampleModel(key=child2, string="blasphemy", date=later(2)),
SampleModel(key=child3, string="xylophone", date=later(3)),
SampleModel(key=child4, string="yompin", date=later(4))]
for _m in m: _m.put(adapter=self._construct())
# submit query
q = SampleModel.query(ancestor=root, limit=50).sort(+SampleModel.date)
result = q.fetch(adapter=self._construct())
assert len(result) == 4
for l, r in zip(result, (later(1), later(2), later(3), later(4))):
assert l.date == r
def test_descending_sort_datetime(self):
""" Test a DSC sort on a `datetime` property with `IndexedModelAdapter` """
if not self.__abstract__:
now = datetime.datetime.now()
later = lambda n: now + datetime.timedelta(days=n)
root = model.Key(SampleModel, 'sorted-datetime')
_key = lambda x: model.Key(SampleModel, x, parent=root)
child1 = _key('child1')
child2 = _key('child2')
child3 = _key('child3')
child4 = _key('child4')
# make some models
m = [
SampleModel(key=child1, string="aardvark", date=later(1)),
SampleModel(key=child2, string="blasphemy", date=later(2)),
SampleModel(key=child3, string="xylophone", date=later(3)),
SampleModel(key=child4, string="yompin", date=later(4))]
for _m in m: _m.put(adapter=self._construct())
# submit query
q = SampleModel.query(ancestor=root, limit=50).sort(-SampleModel.date)
result = q.fetch(adapter=self._construct())
assert len(result) == 4
for l, r in zip(result, reversed((
later(1), later(2), later(3), later(4)))):
assert l.date == r
class GraphModelAdapterTests(IndexedModelAdapterTests):
""" Tests `model.adapter.abstract.GraphModelAdapter` """
__abstract__ = True
subject = abstract.GraphModelAdapter
def test_make_vertex_nokeyname(self):
""" Test `GraphModelAdapter` `Vertex` put with no keyname """
if not self.test_abstract():
t = TestGraphPerson(name="Steve")
k = t.put(adapter=self.subject())
assert isinstance(t, TestGraphPerson)
assert isinstance(k, model.Key)
assert isinstance(k, model.VertexKey)
assert isinstance(k.id, (int, long))
def test_make_vertex_keyname(self):
""" Test `GraphModelAdapter` `Vertex` put with a keyname """
if not self.test_abstract():
t = TestGraphPerson(key=model.VertexKey(TestGraphPerson, "steve"),
name="Steve")
k = t.put(adapter=self.subject())
assert isinstance(t, TestGraphPerson)
assert isinstance(k, model.Key)
assert isinstance(k, model.VertexKey)
assert isinstance(k.id, basestring)
return t
def test_get_vertex(self):
""" Test `GraphModelAdapter` `Vertex` get """
if not self.test_abstract():
x = self.test_make_vertex_keyname()
pulled = TestGraphPerson.get(x.key, adapter=self.subject())
assert pulled.key == x.key
assert isinstance(pulled.key, model.Key)
assert isinstance(pulled.key, model.VertexKey)
assert isinstance(pulled.key.id, basestring)
def test_make_edge_nokeyname(self):
""" Test `GraphModelAdapter` `Edge` put with no keyname """
if not self.test_abstract():
bob = TestGraphPerson(key=model.VertexKey(TestGraphPerson, "bob"),
name="Bob")
k = bob.put(adapter=self.subject())
assert isinstance(bob, TestGraphPerson)
assert isinstance(k, model.Key)
assert isinstance(k, model.VertexKey)
assert isinstance(k.id, basestring)
steve = self.test_make_vertex_keyname()
f = TestGraphFriends(bob, steve)
ek = f.put(adapter=self.subject())
assert isinstance(ek, model.EdgeKey)
assert isinstance(ek.id, (int, long))
assert isinstance(f, TestGraphFriends)
def test_make_edge_keyname(self):
""" Test `GraphModelAdapter` `Edge` put with a keyname """
if not self.test_abstract():
bob = TestGraphPerson(key=model.VertexKey(TestGraphPerson, "bob"),
name="Bob")
k = bob.put(adapter=self.subject())
assert isinstance(bob, TestGraphPerson)
assert isinstance(k, model.Key)
assert isinstance(k, model.VertexKey)
assert isinstance(k.id, basestring)
steve = self.test_make_vertex_keyname()
_orig_ek = model.EdgeKey(TestGraphFriends, "some-friendship")
f = TestGraphFriends(bob, steve, key=_orig_ek)
ek = f.put(adapter=self.subject())
assert isinstance(ek, model.EdgeKey)
assert isinstance(ek.id, basestring)
assert isinstance(f, TestGraphFriends)
assert ek.id == "some-friendship"
return bob, steve, f
def test_get_edge(self):
""" Test `GraphModelAdapter` `Edge` get """
if not self.test_abstract():
bob, steve, friendship = self.test_make_edge_keyname()
# fetch by key
_f = TestGraphFriends.get(friendship.key, adapter=self.subject())
assert isinstance(_f.key, model.Key)
assert isinstance(_f.key, model.EdgeKey)
assert isinstance(_f.key.id, basestring)
assert _f.key.id == "some-friendship"
assert _f.key.kind == "TestGraphFriends"
def test_vertex_edges(self):
""" Test retrieving `Edges` for a `Vertex` with `GraphModelAdapter` """
if not self.test_abstract():
bob, steve, friendship = self.test_make_edge_keyname()
# friendship edge should appear for both vertexes
_q = bob.edges(keys_only=True).fetch(adapter=self.subject(), limit=10)
assert friendship.key in _q, (
"expected friendship key but got:"
" '%s' with adapter '%s'" % (
[i for i in _q], repr(self.subject())))
assert friendship.key in (
steve.edges(keys_only=True).fetch(adapter=self.subject(), limit=10))
assert "Edges" in repr(steve.edges(keys_only=True))
assert "CONTAINS" in repr(steve.edges(keys_only=True))
def test_vertex_neighbors(self):
""" Test retrieving `Vertex`es for a `Vertex` with `GraphModelAdapter` """
if not self.test_abstract():
bob, steve, friendship = self.test_make_edge_keyname()
# see if we can get bob's friends, which should include steve
_q = bob.neighbors(keys_only=True).fetch(adapter=self.subject(), limit=10)
assert steve.key in _q, (
"failed to find steve's key in bob's neighbors."
" instead, got '%s' for adapter '%s'" % (
[i for i in _q], repr(self.subject())))
# see if we can get steve's friends, which should include bob
assert bob.key in (
steve.neighbors(keys_only=True).fetch(adapter=self.subject(), limit=10))
assert "Neighbors" in repr(steve.neighbors(keys_only=True))
assert "CONTAINS" in repr(steve.neighbors(keys_only=True))
class DirectedGraphAdapterTests(GraphModelAdapterTests):
""" Tests `model.adapter.abstract.DirectedGraphAdapter` """
__abstract__ = True
subject = abstract.DirectedGraphAdapter
def test_make_directed_edge_nokeyname(self):
""" Test saving a directed `Edge` with no keyname """
if not self.test_abstract():
bob = TestGraphPerson(key=model.VertexKey(TestGraphPerson, "bob"),
name="Bob")
k = bob.put(adapter=self.subject())
assert isinstance(bob, TestGraphPerson)
assert isinstance(k, model.Key)
assert isinstance(k, model.VertexKey)
assert isinstance(k.id, basestring)
steve = self.test_make_vertex_keyname()
f = TestGraphGift(bob, steve)
ek = f.put(adapter=self.subject())
assert bob.key == f.source
assert steve.key in f.target
assert isinstance(ek, model.EdgeKey)
assert isinstance(ek.id, (int, long))
assert isinstance(f, TestGraphGift)
def test_make_directed_edge_keyname(self):
""" Test saving a directed `Edge` with a keyname """
if not self.test_abstract():
bob = TestGraphPerson(key=model.VertexKey(TestGraphPerson, "bob"),
name="Bob")
k = bob.put(adapter=self.subject())
assert isinstance(k, model.Key), (
"instead of a key, got back the object: '%s'" % k)
assert isinstance(bob, TestGraphPerson)
assert isinstance(k, model.VertexKey)
assert isinstance(k.id, basestring)
steve = self.test_make_vertex_keyname()
_orig_ek = model.EdgeKey(TestGraphGift, "some-gift")
f = TestGraphGift(bob, steve, key=_orig_ek)
ek = f.put(adapter=self.subject())
assert isinstance(ek, model.EdgeKey)
assert isinstance(ek.id, basestring)
assert isinstance(f, TestGraphGift)
assert ek.id == "some-gift"
assert bob.key == f.source
assert steve.key in f.target
return bob, steve, f
def test_get_directed_edge_nokeyname(self):
""" Test retrieving a directed `Edge` by keyname """
if not self.test_abstract():
bob, steve, gift = self.test_make_directed_edge_keyname()
# fetch by key
_f = TestGraphGift.get(gift.key, adapter=self.subject())
assert isinstance(_f.key, model.Key)
assert isinstance(_f.key, model.EdgeKey)
assert isinstance(_f.key.id, basestring)
assert _f.key.id == "some-gift"
assert _f.key.kind == "TestGraphGift"
def test_edge_heads(self):
""" Test retrieving incoming `Edge`s ending at a particular `Vertex` """
if not self.test_abstract():
bob, steve, gift = self.test_make_directed_edge_keyname()
# friendship edge should appear for both vertexes
_q = bob.edges(tails=False, keys_only=True)\
.fetch(adapter=self.subject(), limit=10)
assert gift.key not in _q, (
"found gift's key among bob's edges heads, but shouldn't have."
" instead, got: '%s' with adapter '%s'" % (
[i for i in _q], repr(self.subject())))
_q = steve.edges(tails=False, keys_only=True)\
.fetch(adapter=self.subject(), limit=10)
assert gift.key in _q, (
"couldn't find gift's key among steve's edges heads."
" instead, got: '%s' with adapter '%s'" % (
[i for i in _q], repr(self.subject())))
def test_edge_tails(self):
""" Test retrieving outbound `Edge`s coming from a particular `Vertex` """
if not self.test_abstract():
bob, steve, gift = self.test_make_directed_edge_keyname()
# friendship edge should appear for both vertexes
_q = bob.edges(tails=True, keys_only=True)\
.fetch(adapter=self.subject(), limit=10)
assert gift.key in _q
_q = steve.edges(tails=True, keys_only=True)\
.fetch(adapter=self.subject(), limit=10)
assert gift.key not in _q
def test_neighbor_heads(self):
""" Test retrieving incoming `Vertex`s ending at a particular `Vertex` """
if not self.test_abstract():
bob, steve, gift = self.test_make_directed_edge_keyname()
# see if we can get steve's friends, which should include bob
_q = steve.neighbors(tails=False, keys_only=True)\
.fetch(adapter=self.subject(), limit=10)
assert bob.key in _q, (
"didn't find bob's key among steve's friends."
" instead, got: '%s' with adapter '%s'" % (
[i for i in _q], repr(self.subject())))
def test_neighbor_tails(self):
""" Test retrieving outbound `Vertex`s coming from a particular `Vertex` """
if not self.test_abstract():
bob, steve, gift = self.test_make_directed_edge_keyname()
# see if we can get bob's friends, which should include steve
assert steve.key in bob.neighbors(tails=True, keys_only=True)\
.fetch(adapter=self.subject(), limit=10)
| mit |
zzacharo/inspire-next | tests/integration/workflows/test_arxiv_workflow.py | 1 | 6964 | # -*- coding: utf-8 -*-
#
# This file is part of INSPIRE.
# Copyright (C) 2014-2017 CERN.
#
# INSPIRE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# INSPIRE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with INSPIRE. If not, see <http://www.gnu.org/licenses/>.
#
# In applying this license, CERN does not waive the privileges and immunities
# granted to it by virtue of its status as an Intergovernmental Organization
# or submit itself to any jurisdiction.
"""Tests for arXiv workflows."""
from __future__ import absolute_import, division, print_function
import mock
import re
import requests_mock
from invenio_db import db
from invenio_workflows import (
ObjectStatus,
WorkflowEngine,
start,
workflow_object_class,
)
from calls import (
already_harvested_on_legacy_record,
do_accept_core,
do_webcoll_callback,
do_robotupload_callback,
generate_record
)
from mocks import (
fake_download_file,
fake_beard_api_request,
fake_magpie_api_request,
)
from utils import get_halted_workflow
@mock.patch(
'inspirehep.modules.workflows.utils.download_file_to_workflow',
side_effect=fake_download_file,
)
@mock.patch(
'inspirehep.modules.workflows.tasks.beard.json_api_request',
side_effect=fake_beard_api_request,
)
@mock.patch(
'inspirehep.modules.workflows.tasks.magpie.json_api_request',
side_effect=fake_magpie_api_request,
)
@mock.patch(
'inspirehep.modules.workflows.tasks.refextract.extract_references_from_file',
return_value=[],
)
def test_harvesting_arxiv_workflow_manual_rejected(
mocked_refextract_extract_refs,
mocked_api_request_magpie,
mocked_api_request_beard,
mocked_download,
small_app,
):
"""Test a full harvesting workflow."""
record = generate_record()
extra_config = {
"BEARD_API_URL": "http://example.com/beard",
"MAGPIE_API_URL": "http://example.com/magpie",
}
workflow_uuid = None
with small_app.app_context():
workflow_uuid, eng, obj = get_halted_workflow(
app=small_app,
extra_config=extra_config,
record=record,
)
# Now let's resolve it as accepted and continue
# FIXME Should be accept, but record validation prevents us.
obj.remove_action()
obj.extra_data["approved"] = False
# obj.extra_data["core"] = True
obj.save()
db.session.commit()
eng = WorkflowEngine.from_uuid(workflow_uuid)
obj = eng.processed_objects[0]
obj_id = obj.id
obj.continue_workflow()
obj = workflow_object_class.get(obj_id)
# It was rejected
assert obj.status == ObjectStatus.COMPLETED
@mock.patch(
'inspirehep.modules.workflows.utils.download_file_to_workflow',
side_effect=fake_download_file,
)
@mock.patch(
'inspirehep.modules.workflows.tasks.beard.json_api_request',
side_effect=fake_beard_api_request,
)
@mock.patch(
'inspirehep.modules.workflows.tasks.magpie.json_api_request',
side_effect=fake_magpie_api_request,
)
@mock.patch(
'inspirehep.modules.workflows.tasks.refextract.extract_references_from_file',
return_value=[],
)
def test_harvesting_arxiv_workflow_already_on_legacy(
mocked_refextract_extract_refs,
mocked_api_request_magpie,
mocked_api_request_beard,
mocked_download,
small_app
):
"""Test a full harvesting workflow."""
record, categories = already_harvested_on_legacy_record()
extra_config = {
"BEARD_API_URL": "http://example.com/beard",
"MAGPIE_API_URL": "http://example.com/magpie",
'ARXIV_CATEGORIES_ALREADY_HARVESTED_ON_LEGACY': categories,
}
with small_app.app_context():
with mock.patch.dict(small_app.config, extra_config):
workflow_uuid = start('article', [record])
eng = WorkflowEngine.from_uuid(workflow_uuid)
obj = eng.processed_objects[0]
assert obj.status == ObjectStatus.COMPLETED
assert 'already-ingested' in obj.extra_data
assert obj.extra_data['already-ingested']
@mock.patch(
'inspirehep.modules.workflows.tasks.arxiv.download_file_to_workflow',
side_effect=fake_download_file,
)
@mock.patch(
'inspirehep.modules.workflows.utils.download_file_to_workflow',
side_effect=fake_download_file,
)
@mock.patch(
'inspirehep.modules.workflows.tasks.beard.json_api_request',
side_effect=fake_beard_api_request,
)
@mock.patch(
'inspirehep.modules.workflows.tasks.magpie.json_api_request',
side_effect=fake_magpie_api_request,
)
@mock.patch(
'inspirehep.modules.workflows.tasks.matching.match',
return_value=iter([]),
)
@mock.patch(
'inspirehep.modules.workflows.tasks.refextract.extract_references_from_file',
return_value=[],
)
def test_harvesting_arxiv_workflow_manual_accepted(
mocked_refextract_extract_refs,
mocked_matching_match,
mocked_api_request_magpie,
mocked_api_request_beard,
mocked_download_utils,
mocked_download_arxiv,
workflow_app,
):
record = generate_record()
"""Test a full harvesting workflow."""
with requests_mock.Mocker() as requests_mocker:
requests_mocker.register_uri(
requests_mock.ANY,
re.compile('.*(indexer|localhost).*'),
real_http=True,
)
requests_mocker.register_uri(
'POST',
re.compile(
'https?://localhost:1234.*',
),
text=u'[INFO]',
status_code=200,
)
workflow_uuid, eng, obj = get_halted_workflow(
app=workflow_app,
extra_config={'PRODUCTION_MODE': False},
record=record,
)
do_accept_core(
app=workflow_app,
workflow_id=obj.id,
)
eng = WorkflowEngine.from_uuid(workflow_uuid)
obj = eng.processed_objects[0]
assert obj.status == ObjectStatus.WAITING
response = do_robotupload_callback(
app=workflow_app,
workflow_id=obj.id,
recids=[12345],
)
assert response.status_code == 200
obj = workflow_object_class.get(obj.id)
assert obj.status == ObjectStatus.WAITING
response = do_webcoll_callback(app=workflow_app, recids=[12345])
assert response.status_code == 200
eng = WorkflowEngine.from_uuid(workflow_uuid)
obj = eng.processed_objects[0]
# It was accepted
assert obj.status == ObjectStatus.COMPLETED
| gpl-3.0 |
Intel-tensorflow/tensorflow | tensorflow/python/kernel_tests/linalg/linear_operator_kronecker_test.py | 14 | 9997 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import variables as variables_module
from tensorflow.python.ops.linalg import linalg as linalg_lib
from tensorflow.python.ops.linalg import linear_operator_kronecker as kronecker
from tensorflow.python.ops.linalg import linear_operator_lower_triangular as lower_triangular
from tensorflow.python.ops.linalg import linear_operator_test_util
from tensorflow.python.ops.linalg import linear_operator_util
from tensorflow.python.platform import test
linalg = linalg_lib
rng = np.random.RandomState(0)
def _kronecker_dense(factors):
"""Convert a list of factors, into a dense Kronecker product."""
product = factors[0]
for factor in factors[1:]:
product = product[..., array_ops.newaxis, :, array_ops.newaxis]
factor_to_mul = factor[..., array_ops.newaxis, :, array_ops.newaxis, :]
product *= factor_to_mul
product = array_ops.reshape(
product,
shape=array_ops.concat(
[array_ops.shape(product)[:-4],
[array_ops.shape(product)[-4] * array_ops.shape(product)[-3],
array_ops.shape(product)[-2] * array_ops.shape(product)[-1]]
], axis=0))
return product
class KroneckerDenseTest(test.TestCase):
"""Test of `_kronecker_dense` function."""
def test_kronecker_dense_matrix(self):
x = ops.convert_to_tensor([[2., 3.], [1., 2.]], dtype=dtypes.float32)
y = ops.convert_to_tensor([[1., 2.], [5., -1.]], dtype=dtypes.float32)
# From explicitly writing out the kronecker product of x and y.
z = ops.convert_to_tensor([
[2., 4., 3., 6.],
[10., -2., 15., -3.],
[1., 2., 2., 4.],
[5., -1., 10., -2.]], dtype=dtypes.float32)
# From explicitly writing out the kronecker product of y and x.
w = ops.convert_to_tensor([
[2., 3., 4., 6.],
[1., 2., 2., 4.],
[10., 15., -2., -3.],
[5., 10., -1., -2.]], dtype=dtypes.float32)
self.assertAllClose(
self.evaluate(_kronecker_dense([x, y])), self.evaluate(z))
self.assertAllClose(
self.evaluate(_kronecker_dense([y, x])), self.evaluate(w))
@test_util.run_all_in_graph_and_eager_modes
class SquareLinearOperatorKroneckerTest(
linear_operator_test_util.SquareLinearOperatorDerivedClassTest):
"""Most tests done in the base class LinearOperatorDerivedClassTest."""
def setUp(self):
# Increase from 1e-6 to 1e-4
self._atol[dtypes.float32] = 1e-4
self._atol[dtypes.complex64] = 1e-4
self._rtol[dtypes.float32] = 1e-4
self._rtol[dtypes.complex64] = 1e-4
@staticmethod
def operator_shapes_infos():
shape_info = linear_operator_test_util.OperatorShapesInfo
return [
shape_info((1, 1), factors=[(1, 1), (1, 1)]),
shape_info((8, 8), factors=[(2, 2), (2, 2), (2, 2)]),
shape_info((12, 12), factors=[(2, 2), (3, 3), (2, 2)]),
shape_info((1, 3, 3), factors=[(1, 1), (1, 3, 3)]),
shape_info((3, 6, 6), factors=[(3, 1, 1), (1, 2, 2), (1, 3, 3)]),
]
def operator_and_matrix(
self, build_info, dtype, use_placeholder,
ensure_self_adjoint_and_pd=False):
# Kronecker products constructed below will be from symmetric
# positive-definite matrices.
del ensure_self_adjoint_and_pd
shape = list(build_info.shape)
expected_factors = build_info.__dict__["factors"]
matrices = [
linear_operator_test_util.random_positive_definite_matrix(
block_shape, dtype, force_well_conditioned=True)
for block_shape in expected_factors
]
lin_op_matrices = matrices
if use_placeholder:
lin_op_matrices = [
array_ops.placeholder_with_default(m, shape=None) for m in matrices]
operator = kronecker.LinearOperatorKronecker(
[linalg.LinearOperatorFullMatrix(
l,
is_square=True,
is_self_adjoint=True,
is_positive_definite=True)
for l in lin_op_matrices])
matrices = linear_operator_util.broadcast_matrix_batch_dims(matrices)
kronecker_dense = _kronecker_dense(matrices)
if not use_placeholder:
kronecker_dense.set_shape(shape)
return operator, kronecker_dense
def test_is_x_flags(self):
# Matrix with two positive eigenvalues, 1, and 1.
# The matrix values do not effect auto-setting of the flags.
matrix = [[1., 0.], [1., 1.]]
operator = kronecker.LinearOperatorKronecker(
[linalg.LinearOperatorFullMatrix(matrix),
linalg.LinearOperatorFullMatrix(matrix)],
is_positive_definite=True,
is_non_singular=True,
is_self_adjoint=False)
self.assertTrue(operator.is_positive_definite)
self.assertTrue(operator.is_non_singular)
self.assertFalse(operator.is_self_adjoint)
def test_is_non_singular_auto_set(self):
# Matrix with two positive eigenvalues, 11 and 8.
# The matrix values do not effect auto-setting of the flags.
matrix = [[11., 0.], [1., 8.]]
operator_1 = linalg.LinearOperatorFullMatrix(matrix, is_non_singular=True)
operator_2 = linalg.LinearOperatorFullMatrix(matrix, is_non_singular=True)
operator = kronecker.LinearOperatorKronecker(
[operator_1, operator_2],
is_positive_definite=False, # No reason it HAS to be False...
is_non_singular=None)
self.assertFalse(operator.is_positive_definite)
self.assertTrue(operator.is_non_singular)
with self.assertRaisesRegex(ValueError, "always non-singular"):
kronecker.LinearOperatorKronecker(
[operator_1, operator_2], is_non_singular=False)
def test_name(self):
matrix = [[11., 0.], [1., 8.]]
operator_1 = linalg.LinearOperatorFullMatrix(matrix, name="left")
operator_2 = linalg.LinearOperatorFullMatrix(matrix, name="right")
operator = kronecker.LinearOperatorKronecker([operator_1, operator_2])
self.assertEqual("left_x_right", operator.name)
def test_different_dtypes_raises(self):
operators = [
linalg.LinearOperatorFullMatrix(rng.rand(2, 3, 3)),
linalg.LinearOperatorFullMatrix(rng.rand(2, 3, 3).astype(np.float32))
]
with self.assertRaisesRegex(TypeError, "same dtype"):
kronecker.LinearOperatorKronecker(operators)
def test_empty_or_one_operators_raises(self):
with self.assertRaisesRegex(ValueError, ">=1 operators"):
kronecker.LinearOperatorKronecker([])
def test_kronecker_adjoint_type(self):
matrix = [[1., 0.], [0., 1.]]
operator = kronecker.LinearOperatorKronecker(
[
linalg.LinearOperatorFullMatrix(
matrix, is_non_singular=True),
linalg.LinearOperatorFullMatrix(
matrix, is_non_singular=True),
],
is_non_singular=True,
)
adjoint = operator.adjoint()
self.assertIsInstance(
adjoint,
kronecker.LinearOperatorKronecker)
self.assertEqual(2, len(adjoint.operators))
def test_kronecker_cholesky_type(self):
matrix = [[1., 0.], [0., 1.]]
operator = kronecker.LinearOperatorKronecker(
[
linalg.LinearOperatorFullMatrix(
matrix,
is_positive_definite=True,
is_self_adjoint=True,
),
linalg.LinearOperatorFullMatrix(
matrix,
is_positive_definite=True,
is_self_adjoint=True,
),
],
is_positive_definite=True,
is_self_adjoint=True,
)
cholesky_factor = operator.cholesky()
self.assertIsInstance(
cholesky_factor,
kronecker.LinearOperatorKronecker)
self.assertEqual(2, len(cholesky_factor.operators))
self.assertIsInstance(
cholesky_factor.operators[0],
lower_triangular.LinearOperatorLowerTriangular)
self.assertIsInstance(
cholesky_factor.operators[1],
lower_triangular.LinearOperatorLowerTriangular)
def test_kronecker_inverse_type(self):
matrix = [[1., 0.], [0., 1.]]
operator = kronecker.LinearOperatorKronecker(
[
linalg.LinearOperatorFullMatrix(
matrix, is_non_singular=True),
linalg.LinearOperatorFullMatrix(
matrix, is_non_singular=True),
],
is_non_singular=True,
)
inverse = operator.inverse()
self.assertIsInstance(
inverse,
kronecker.LinearOperatorKronecker)
self.assertEqual(2, len(inverse.operators))
def test_tape_safe(self):
matrix_1 = variables_module.Variable([[1., 0.], [0., 1.]])
matrix_2 = variables_module.Variable([[2., 0.], [0., 2.]])
operator = kronecker.LinearOperatorKronecker(
[
linalg.LinearOperatorFullMatrix(
matrix_1, is_non_singular=True),
linalg.LinearOperatorFullMatrix(
matrix_2, is_non_singular=True),
],
is_non_singular=True,
)
self.check_tape_safe(operator)
if __name__ == "__main__":
linear_operator_test_util.add_tests(SquareLinearOperatorKroneckerTest)
test.main()
| apache-2.0 |
Jorge-Rodriguez/ansible | lib/ansible/modules/cloud/profitbricks/profitbricks_nic.py | 59 | 8530 | #!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: profitbricks_nic
short_description: Create or Remove a NIC.
description:
- This module allows you to create or restore a volume snapshot. This module has a dependency on profitbricks >= 1.0.0
version_added: "2.0"
options:
datacenter:
description:
- The datacenter in which to operate.
required: true
server:
description:
- The server name or ID.
required: true
name:
description:
- The name or ID of the NIC. This is only required on deletes, but not on create.
required: true
lan:
description:
- The LAN to place the NIC on. You can pass a LAN that doesn't exist and it will be created. Required on create.
required: true
subscription_user:
description:
- The ProfitBricks username. Overrides the PB_SUBSCRIPTION_ID environment variable.
required: false
subscription_password:
description:
- THe ProfitBricks password. Overrides the PB_PASSWORD environment variable.
required: false
wait:
description:
- wait for the operation to complete before returning
required: false
default: "yes"
type: bool
wait_timeout:
description:
- how long before wait gives up, in seconds
default: 600
state:
description:
- Indicate desired state of the resource
required: false
default: 'present'
choices: ["present", "absent"]
requirements: [ "profitbricks" ]
author: Matt Baldwin (@baldwinSPC) <[email protected]>
'''
EXAMPLES = '''
# Create a NIC
- profitbricks_nic:
datacenter: Tardis One
server: node002
lan: 2
wait_timeout: 500
state: present
# Remove a NIC
- profitbricks_nic:
datacenter: Tardis One
server: node002
name: 7341c2454f
wait_timeout: 500
state: absent
'''
import re
import uuid
import time
HAS_PB_SDK = True
try:
from profitbricks.client import ProfitBricksService, NIC
except ImportError:
HAS_PB_SDK = False
from ansible.module_utils.basic import AnsibleModule
uuid_match = re.compile(
r'[\w]{8}-[\w]{4}-[\w]{4}-[\w]{4}-[\w]{12}', re.I)
def _wait_for_completion(profitbricks, promise, wait_timeout, msg):
if not promise:
return
wait_timeout = time.time() + wait_timeout
while wait_timeout > time.time():
time.sleep(5)
operation_result = profitbricks.get_request(
request_id=promise['requestId'],
status=True)
if operation_result['metadata']['status'] == "DONE":
return
elif operation_result['metadata']['status'] == "FAILED":
raise Exception(
'Request failed to complete ' + msg + ' "' + str(
promise['requestId']) + '" to complete.')
raise Exception(
'Timed out waiting for async operation ' + msg + ' "' + str(
promise['requestId']
) + '" to complete.')
def create_nic(module, profitbricks):
"""
Creates a NIC.
module : AnsibleModule object
profitbricks: authenticated profitbricks object.
Returns:
True if the nic creates, false otherwise
"""
datacenter = module.params.get('datacenter')
server = module.params.get('server')
lan = module.params.get('lan')
name = module.params.get('name')
wait = module.params.get('wait')
wait_timeout = module.params.get('wait_timeout')
# Locate UUID for Datacenter
if not (uuid_match.match(datacenter)):
datacenter_list = profitbricks.list_datacenters()
for d in datacenter_list['items']:
dc = profitbricks.get_datacenter(d['id'])
if datacenter == dc['properties']['name']:
datacenter = d['id']
break
# Locate UUID for Server
if not (uuid_match.match(server)):
server_list = profitbricks.list_servers(datacenter)
for s in server_list['items']:
if server == s['properties']['name']:
server = s['id']
break
try:
n = NIC(
name=name,
lan=lan
)
nic_response = profitbricks.create_nic(datacenter, server, n)
if wait:
_wait_for_completion(profitbricks, nic_response,
wait_timeout, "create_nic")
return nic_response
except Exception as e:
module.fail_json(msg="failed to create the NIC: %s" % str(e))
def delete_nic(module, profitbricks):
"""
Removes a NIC
module : AnsibleModule object
profitbricks: authenticated profitbricks object.
Returns:
True if the NIC was removed, false otherwise
"""
datacenter = module.params.get('datacenter')
server = module.params.get('server')
name = module.params.get('name')
# Locate UUID for Datacenter
if not (uuid_match.match(datacenter)):
datacenter_list = profitbricks.list_datacenters()
for d in datacenter_list['items']:
dc = profitbricks.get_datacenter(d['id'])
if datacenter == dc['properties']['name']:
datacenter = d['id']
break
# Locate UUID for Server
server_found = False
if not (uuid_match.match(server)):
server_list = profitbricks.list_servers(datacenter)
for s in server_list['items']:
if server == s['properties']['name']:
server_found = True
server = s['id']
break
if not server_found:
return False
# Locate UUID for NIC
nic_found = False
if not (uuid_match.match(name)):
nic_list = profitbricks.list_nics(datacenter, server)
for n in nic_list['items']:
if name == n['properties']['name']:
nic_found = True
name = n['id']
break
if not nic_found:
return False
try:
nic_response = profitbricks.delete_nic(datacenter, server, name)
return nic_response
except Exception as e:
module.fail_json(msg="failed to remove the NIC: %s" % str(e))
def main():
module = AnsibleModule(
argument_spec=dict(
datacenter=dict(),
server=dict(),
name=dict(default=str(uuid.uuid4()).replace('-', '')[:10]),
lan=dict(),
subscription_user=dict(),
subscription_password=dict(no_log=True),
wait=dict(type='bool', default=True),
wait_timeout=dict(type='int', default=600),
state=dict(default='present'),
)
)
if not HAS_PB_SDK:
module.fail_json(msg='profitbricks required for this module')
if not module.params.get('subscription_user'):
module.fail_json(msg='subscription_user parameter is required')
if not module.params.get('subscription_password'):
module.fail_json(msg='subscription_password parameter is required')
if not module.params.get('datacenter'):
module.fail_json(msg='datacenter parameter is required')
if not module.params.get('server'):
module.fail_json(msg='server parameter is required')
subscription_user = module.params.get('subscription_user')
subscription_password = module.params.get('subscription_password')
profitbricks = ProfitBricksService(
username=subscription_user,
password=subscription_password)
state = module.params.get('state')
if state == 'absent':
if not module.params.get('name'):
module.fail_json(msg='name parameter is required')
try:
(changed) = delete_nic(module, profitbricks)
module.exit_json(changed=changed)
except Exception as e:
module.fail_json(msg='failed to set nic state: %s' % str(e))
elif state == 'present':
if not module.params.get('lan'):
module.fail_json(msg='lan parameter is required')
try:
(nic_dict) = create_nic(module, profitbricks)
module.exit_json(nics=nic_dict)
except Exception as e:
module.fail_json(msg='failed to set nic state: %s' % str(e))
if __name__ == '__main__':
main()
| gpl-3.0 |
yancharkin/games_nebula_goglib_scripts | konung_legend_of_the_north/settings.py | 1 | 10548 | import sys, os
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk, Gdk, GLib
import gettext
import imp
try:
from ConfigParser import ConfigParser as ConfigParser
except:
from configparser import ConfigParser as ConfigParser
nebula_dir = os.getenv('NEBULA_DIR')
modules_dir = nebula_dir + '/modules'
set_visuals = imp.load_source('set_visuals', modules_dir + '/set_visuals.py')
gettext.bindtextdomain('games_nebula', nebula_dir + '/locale')
gettext.textdomain('games_nebula')
_ = gettext.gettext
current_dir = sys.path[0]
download_dir = os.getenv('DOWNLOAD_DIR')
install_dir = os.getenv('INSTALL_DIR')
game_dir = install_dir + '/konung_legend_of_the_north/game'
patch_path = download_dir + '/_distr/konung_legend_of_the_north/konung_widescreen_v2_co.7z'
link_patch = 'http://www.wsgf.org/dr/konung-legends-north/en'
os.system('mkdir -p "' + download_dir + '/_distr/konung_legend_of_the_north"')
is_english_version = os.path.exists(game_dir + '/MUSIC')
class GUI:
def __init__(self):
self.config_load()
if is_english_version:
self.create_chooser_window()
else:
if os.path.exists(patch_path):
if not os.path.exists(game_dir + '/res_patch'):
os.system('7z x -aoa -o"' + game_dir + '/res_patch" "' + patch_path + '"')
self.create_set_res_window()
self.set_res_window.show_all()
else:
self.create_download_window()
self.download_window.show_all()
def config_load(self):
config_file = current_dir + '/settings.ini'
config_parser = ConfigParser()
config_parser.read(config_file)
if not config_parser.has_section('Settings'):
config_parser.add_section('Settings')
if is_english_version:
if not config_parser.has_option('Settings', 'exe'):
self.exe = 'KONUNG.EXE'
config_parser.set('Settings', 'exe', str(self.exe))
else:
self.exe = config_parser.get('Settings', 'exe')
else:
if not config_parser.has_option('Settings', 'resolution'):
self.resolution = 0
config_parser.set('Settings', 'resolution', str(self.resolution))
else:
self.resolution = config_parser.getint('Settings', 'resolution')
new_config_file = open(config_file, 'w')
config_parser.write(new_config_file)
new_config_file.close()
def create_chooser_window(self):
self.chooser_window = Gtk.Window(
title = _("Konung: Legends of the North"),
type = Gtk.WindowType.TOPLEVEL,
window_position = Gtk.WindowPosition.CENTER_ALWAYS,
resizable = False,
default_width = 360
)
self.chooser_window.connect('delete-event', self.quit_app)
frame_launch = Gtk.Frame(
label = _("Launch options"),
label_xalign = 0.5,
margin_left = 10,
margin_right = 10,
margin_top = 10,
margin_bottom = 10
)
box_launch = Gtk.Box(
margin_left = 10,
margin_right = 10,
margin_top = 10,
margin_bottom = 10,
spacing = 10,
orientation = Gtk.Orientation.VERTICAL
)
radiobutton_single = Gtk.RadioButton(
label = _("Singleplayer"),
name = 'KONUNG.EXE'
)
radiobutton_lan = Gtk.RadioButton(
label = _("LAN multiplayer"),
name = 'LAN_KONG.EXE'
)
radiobutton_lan.join_group(radiobutton_single)
radiobutton_tutorial = Gtk.RadioButton(
label = _("Tutorial"),
name = 'TUTORIAL.EXE'
)
radiobutton_tutorial.join_group(radiobutton_single)
button_save = Gtk.Button(
label = _("Save and quit")
)
button_save.connect('clicked', self.cb_button_save)
box_launch.pack_start(radiobutton_single, True, True, 0)
box_launch.pack_start(radiobutton_lan, True, True, 0)
box_launch.pack_start(radiobutton_tutorial, True, True, 0)
box_launch.pack_start(button_save, True, True, 0)
frame_launch.add(box_launch)
for radiobutton in box_launch.get_children():
if radiobutton.get_name() == self.exe:
radiobutton.set_active(True)
radiobutton_single.connect('clicked', self.cb_radiobuttons)
radiobutton_lan.connect('clicked', self.cb_radiobuttons)
radiobutton_tutorial.connect('clicked', self.cb_radiobuttons)
self.chooser_window.add(frame_launch)
self.chooser_window.show_all()
def create_download_window(self):
self.download_window = Gtk.Window(
title = _("Konung: Legends of the North"),
type = Gtk.WindowType.TOPLEVEL,
window_position = Gtk.WindowPosition.CENTER_ALWAYS,
resizable = False,
default_width = 360
)
self.download_window.connect('delete-event', self.quit_app)
box = Gtk.Box(
orientation = Gtk.Orientation.VERTICAL,
margin_left = 10,
margin_right = 10,
margin_top = 10,
margin_bottom = 10,
spacing = 10
)
linkbutton_download = Gtk.LinkButton(
label = _("Download resolution patch"),
uri = link_patch
)
linkbutton_put = Gtk.LinkButton(
label = _("Put it here"),
uri = 'file://' + download_dir + '/_distr/konung_legend_of_the_north'
)
button_install = Gtk.Button(label=_("Install"))
button_install.connect('clicked', self.cb_button_install)
box.pack_start(linkbutton_download, True, True, 0)
box.pack_start(linkbutton_put, True, True, 0)
box.pack_start(button_install, True, True, 0)
self.download_window.add(box)
def create_set_res_window(self):
self.set_res_window = Gtk.Window(
title = _("Konung: Legends of the North"),
type = Gtk.WindowType.TOPLEVEL,
window_position = Gtk.WindowPosition.CENTER_ALWAYS,
resizable = False,
default_width = 360
)
self.set_res_window.connect('delete-event', self.quit_app)
grid = Gtk.Grid(
margin_left = 10,
margin_right = 10,
margin_top = 10,
margin_bottom = 10,
row_spacing = 10,
column_spacing = 10,
column_homogeneous = True,
)
label_custom_res = Gtk.Label(
label = _("Custom resolution:")
)
self.combobox_resolution = Gtk.ComboBoxText()
resolutions_list = sorted(os.listdir(game_dir + '/res_patch'))
for resolution in resolutions_list:
self.combobox_resolution.append_text(resolution)
self.combobox_resolution.set_active(self.resolution)
self.combobox_resolution.connect('changed', self.cb_combobox_resolution)
button_save = Gtk.Button(
label = _("Save and quit"),
)
button_save.connect('clicked', self.cb_button_save)
grid.attach(label_custom_res, 0, 0, 1, 1)
grid.attach(self.combobox_resolution, 1, 0, 1, 1)
grid.attach(button_save, 0, 2, 2, 1)
self.set_res_window.add(grid)
def modify_start_file(self):
new_launch_command = \
'python "$NEBULA_DIR/launcher_wine.py" konung_legend_of_the_north "' + self.exe + '"'
start_file = open(current_dir + '/start.sh', 'r')
start_file_content = start_file.readlines()
start_file.close()
for i in range(len(start_file_content)):
if '.exe' in start_file_content[i].lower():
start_file_content[i] = new_launch_command
start_file = open(current_dir + '/start.sh', 'w')
start_file.writelines(start_file_content)
start_file.close()
def cb_button_install(self, button):
if not os.path.exists(patch_path):
self.download_window.hide()
message_dialog = Gtk.MessageDialog(
self.download_window,
0,
Gtk.MessageType.ERROR,
Gtk.ButtonsType.OK,
_("Error")
)
message_dialog.format_secondary_text(_("Patch not found in download directory."))
content_area = message_dialog.get_content_area()
content_area.set_property('margin-left', 10)
content_area.set_property('margin-right', 10)
content_area.set_property('margin-top', 10)
content_area.set_property('margin-bottom', 10)
message_dialog.run()
message_dialog.destroy()
self.download_window.show()
else:
os.system('7z x -aoa -o"' + game_dir + '/res_patch" "' + patch_path + '"')
while Gtk.events_pending():
Gtk.main_iteration()
self.download_window.hide()
self.create_set_res_window()
self.set_res_window.show_all()
def cb_combobox_resolution(self, combobox):
self.resolution = combobox.get_active()
def cb_button_save(self, button):
config_file = current_dir + '/settings.ini'
config_parser = ConfigParser()
config_parser.read(config_file)
if is_english_version:
self.modify_start_file()
config_parser.set('Settings', 'exe', str(self.exe))
else:
if not os.path.exists(game_dir + '/konung.exe.original'):
os.system('mv "' + game_dir + '/konung.exe" "' + game_dir + '/konung.exe.original"')
selected_resolution = self.combobox_resolution.get_active_text()
os.system('cp "' + game_dir + '/res_patch/' + selected_resolution + '"/* "' + game_dir + '"')
config_parser.set('Settings', 'resolution', str(self.resolution))
new_config_file = open(config_file, 'w')
config_parser.write(new_config_file)
new_config_file.close()
Gtk.main_quit()
def cb_radiobuttons(self, radiobutton):
self.exe = radiobutton.get_name()
def quit_app(self, window, event):
Gtk.main_quit()
def main():
import sys
app = GUI()
Gtk.main()
if __name__ == '__main__':
sys.exit(main())
| gpl-3.0 |
nrego/westpa | lib/examples/nacl_openmm/openmm_propagator.py | 4 | 8843 | from __future__ import division, print_function; __metaclass__ = type
import os
import errno
import random
import time
import numpy as np
import west
from west.propagators import WESTPropagator
from west import Segment
from west.states import BasisState, InitialState
import simtk.openmm.openmm as openmm
import simtk.unit as units
import logging
log = logging.getLogger(__name__)
log.debug('loading module %r' % __name__)
pcoord_len = 11
pcoord_dtype = np.float32
class OpenMMPropagator(WESTPropagator):
def __init__(self, rc=None):
super(OpenMMPropagator, self).__init__(rc)
self.pcoord_len = pcoord_len
self.pcoord_dtype = pcoord_dtype
self.pcoord_ndim = 1
self.basis_coordinates = np.array([[5.0, 0.0, 0.0], [-5.0, 0.0, 0.0]], dtype=pcoord_dtype)
# Default platform properties
self.platform_properties = {'OpenCLPrecision': 'mixed',
'OpenCLPlatformIndex': '0',
'OpenCLDeviceIndex': '0',
'CudaPrecision': 'mixed',
'CudaDeviceIndex': '0'}
config = self.rc.config
# Validate configuration
for key in [('west', 'openmm', 'system', 'file'),
('west', 'openmm', 'integrator', 'file'),
('west', 'openmm', 'integrator', 'steps_per_tau'),
('west', 'openmm', 'integrator', 'steps_per_write'),
('west', 'openmm', 'platform', 'name'),
('west', 'data', 'data_refs', 'initial_state')]:
config.require(key)
self.initial_state_ref_template = config['west','data','data_refs','initial_state']
system_xml_file = config['west', 'openmm', 'system', 'file']
self.integrator_xml_file = config['west', 'openmm', 'integrator', 'file']
self.steps_per_tau = config['west', 'openmm', 'integrator', 'steps_per_tau']
self.steps_per_write = config['west', 'openmm', 'integrator', 'steps_per_write']
self.nblocks = (self.steps_per_tau // self.steps_per_write) + 1
platform_name = config['west', 'openmm', 'platform', 'name'] or 'Reference'
config_platform_properties = config['west', 'openmm', 'platform', 'properties'] or {}
# Set up OpenMM
with open(system_xml_file, 'r') as f:
# NOTE: calling the system self.system causes a namespace collision in the propagator
self.mmsystem = openmm.XmlSerializer.deserialize(f.read())
with open(self.integrator_xml_file, 'r') as f:
integrator = openmm.XmlSerializer.deserialize(f.read())
self.platform = openmm.Platform.getPlatformByName(platform_name)
self.platform_properties.update(config_platform_properties)
self.temperature = integrator.getTemperature()
@staticmethod
def dist(x, y):
return np.sqrt(np.sum((x-y)**2))
@staticmethod
def makepath(template, template_args=None,
expanduser=True, expandvars=True, abspath=False, realpath=False):
template_args = template_args or {}
path = template.format(**template_args)
if expandvars: path = os.path.expandvars(path)
if expanduser: path = os.path.expanduser(path)
if realpath: path = os.path.realpath(path)
if abspath: path = os.path.abspath(path)
path = os.path.normpath(path)
return path
@staticmethod
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def get_pcoord(self, state):
if isinstance(state, BasisState):
coords = self.basis_coordinates.copy()
elif isinstance(state, InitialState):
template_args = {'initial_state': state}
istate_data_ref = self.makepath(self.initial_state_ref_template, template_args)
coords = np.loadtxt(istate_data_ref)
else:
raise TypeError('state must be BasisState or InitialState')
state.pcoord = self.dist(coords[0,:], coords[1,:])
def propagate(self, segments):
platform_properties = self.platform_properties.copy()
try:
process_id = os.environ['WM_PROCESS_INDEX']
platform_properties['OpenCLDeviceIndex'] = process_id
platform_properties['CudaDeviceIndex'] = process_id
except KeyError:
pass
with open(self.integrator_xml_file, 'r') as f:
integrator = openmm.XmlSerializer.deserialize(f.read())
integrator.setRandomNumberSeed(random.randint(0, 2**16))
context = openmm.Context(self.mmsystem, integrator, self.platform, platform_properties)
for segment in segments:
starttime = time.time()
# Set up arrays to hold trajectory data for pcoords, coordinates and velocities
pcoords = np.empty((self.nblocks, 1))
pcoords[0] = segment.pcoord[0]
coordinates = np.empty((self.nblocks, self.mmsystem.getNumParticles(), 3))
velocities = np.empty((self.nblocks, self.mmsystem.getNumParticles(), 3))
# Get initial coordinates and velocities from restarts or initial state
if segment.initpoint_type == Segment.SEG_INITPOINT_CONTINUES:
# Get restart data
assert 'restart_coord' in segment.data
assert 'restart_veloc' in segment.data
coordinates[0] = segment.data['restart_coord']
velocities[0] = segment.data['restart_veloc']
initial_coords = units.Quantity(segment.data['restart_coord'], units.nanometer)
initial_velocs = units.Quantity(segment.data['restart_veloc'], units.nanometer / units.picosecond)
context.setPositions(initial_coords)
context.setVelocities(initial_velocs)
del segment.data['restart_coord']
del segment.data['restart_veloc']
elif segment.initpoint_type == Segment.SEG_INITPOINT_NEWTRAJ:
initial_state = self.initial_states[segment.initial_state_id]
assert initial_state.istate_type == InitialState.ISTATE_TYPE_GENERATED
# Load coordinates coresponding to the initial state
new_template_args = {'initial_state': initial_state}
istate_data_ref = self.makepath(self.initial_state_ref_template, new_template_args)
initial_coords = units.Quantity(np.loadtxt(istate_data_ref), units.angstrom)
# Set up context for this segment
context.setPositions(initial_coords)
context.setVelocitiesToTemperature(self.temperature)
state = context.getState(getPositions=True, getVelocities=True)
coordinates[0] = state.getPositions(asNumpy=True)
velocities[0] = state.getVelocities(asNumpy=True)
# Run dynamics
for istep in xrange(1, self.nblocks):
integrator.step(self.steps_per_write)
state = context.getState(getPositions=True, getVelocities=True)
coordinates[istep] = state.getPositions(asNumpy=True)
velocities[istep] = state.getVelocities(asNumpy=True)
pcoords[istep] = 10.0 * self.dist(coordinates[istep,0,:], coordinates[istep,1,:])
# Finalize segment trajectory
segment.pcoord = pcoords[...].astype(pcoord_dtype)
segment.data['coord'] = coordinates[...]
segment.data['veloc'] = velocities[...]
segment.status = Segment.SEG_STATUS_COMPLETE
segment.walltime = time.time() - starttime
return segments
def gen_istate(self, basis_state, initial_state):
'''Generate a new initial state from the given basis state.'''
initial_coords = self.basis_coordinates.copy()
initial_coords[0,0] = random.randrange(5, 16)
new_template_args = {'initial_state': initial_state}
istate_data_ref = self.makepath(self.initial_state_ref_template, new_template_args)
self.mkdir_p(os.path.dirname(istate_data_ref))
# Save coordinates of initial state as a text file
# NOTE: this is ok for this example, but should be optimized for large systems
np.savetxt(istate_data_ref, initial_coords)
# Calculate pcoord for generated initial state
pcoord = self.dist(initial_coords[0,:], initial_coords[1,:])
initial_state.pcoord = np.array([pcoord], dtype=pcoord_dtype)
initial_state.istate_status = initial_state.ISTATE_STATUS_PREPARED
return initial_state
| gpl-3.0 |
capturePointer/or-tools | examples/python/p_median.py | 34 | 3462 | # Copyright 2010 Hakan Kjellerstrand [email protected]
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
P-median problem in Google CP Solver.
Model and data from the OPL Manual, which describes the problem:
'''
The P-Median problem is a well known problem in Operations Research.
The problem can be stated very simply, like this: given a set of customers
with known amounts of demand, a set of candidate locations for warehouses,
and the distance between each pair of customer-warehouse, choose P
warehouses to open that minimize the demand-weighted distance of serving
all customers from those P warehouses.
'''
Compare with the following models:
* MiniZinc: http://hakank.org/minizinc/p_median.mzn
* Comet: http://hakank.org/comet/p_median.co
This model was created by Hakan Kjellerstrand ([email protected])
Also see my other Google CP Solver models:
http://www.hakank.org/google_or_tools/
"""
import sys
from ortools.constraint_solver import pywrapcp
def main():
# Create the solver.
solver = pywrapcp.Solver('P-median problem')
#
# data
#
p = 2
num_customers = 4
customers = range(num_customers)
Albert, Bob, Chris, Daniel = customers
num_warehouses = 3
warehouses = range(num_warehouses)
Santa_Clara, San_Jose, Berkeley = warehouses
demand = [100, 80, 80, 70]
distance = [
[2, 10, 50],
[2, 10, 52],
[50, 60, 3],
[40, 60, 1]
]
#
# declare variables
#
open = [solver.IntVar(warehouses, 'open[%i]% % i')
for w in warehouses]
ship = {}
for c in customers:
for w in warehouses:
ship[c, w] = solver.IntVar(0, 1, 'ship[%i,%i]' % (c, w))
ship_flat = [ship[c, w]
for c in customers
for w in warehouses]
z = solver.IntVar(0, 1000, 'z')
#
# constraints
#
z_sum = solver.Sum([demand[c] * distance[c][w] * ship[c, w]
for c in customers
for w in warehouses])
solver.Add(z == z_sum)
for c in customers:
s = solver.Sum([ship[c, w]
for w in warehouses])
solver.Add(s == 1)
solver.Add(solver.Sum(open) == p)
for c in customers:
for w in warehouses:
solver.Add(ship[c, w] <= open[w])
# objective
objective = solver.Minimize(z, 1)
#
# solution and search
#
db = solver.Phase(open + ship_flat,
solver.INT_VAR_DEFAULT,
solver.INT_VALUE_DEFAULT)
solver.NewSearch(db, [objective])
num_solutions = 0
while solver.NextSolution():
num_solutions += 1
print 'z:', z.Value()
print 'open:', [open[w].Value() for w in warehouses]
for c in customers:
for w in warehouses:
print ship[c, w].Value(),
print
print
print 'num_solutions:', num_solutions
print 'failures:', solver.Failures()
print 'branches:', solver.Branches()
print 'WallTime:', solver.WallTime(), 'ms'
if __name__ == '__main__':
main()
| apache-2.0 |
rsalmaso/django-allauth | allauth/socialaccount/providers/gitlab/tests.py | 2 | 1656 | # -*- coding: utf-8 -*-
from allauth.socialaccount.providers.gitlab.provider import GitLabProvider
from allauth.socialaccount.tests import OAuth2TestsMixin
from allauth.tests import MockedResponse, TestCase
class GitLabTests(OAuth2TestsMixin, TestCase):
provider_id = GitLabProvider.id
def get_mocked_response(self):
return MockedResponse(
200,
"""
{
"avatar_url": "https://secure.gravatar.com/avatar/123",
"bio": null,
"can_create_group": true,
"can_create_project": true,
"color_scheme_id": 5,
"confirmed_at": "2015-03-02T16:53:58.370Z",
"created_at": "2015-03-02T16:53:58.885Z",
"current_sign_in_at": "2018-06-12T18:44:49.985Z",
"email": "[email protected]",
"external": false,
"id": 2,
"identities": [],
"last_activity_on": "2018-06-11",
"last_sign_in_at": "2018-05-31T14:59:44.527Z",
"linkedin": "",
"location": null,
"name": "Mr Bob",
"organization": null,
"projects_limit": 10,
"shared_runners_minutes_limit": 2000,
"skype": "",
"state": "active",
"theme_id": 6,
"twitter": "mrbob",
"two_factor_enabled": true,
"username": "mr.bob",
"web_url": "https://gitlab.example.com/u/mr.bob",
"website_url": ""
}
""",
)
| mit |
shaunbrady/boto | tests/integration/configservice/test_configservice.py | 93 | 2081 | # Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import boto
from boto.configservice.exceptions import NoSuchConfigurationRecorderException
from tests.compat import unittest
class TestConfigService(unittest.TestCase):
def setUp(self):
self.configservice = boto.connect_configservice()
def test_describe_configuration_recorders(self):
response = self.configservice.describe_configuration_recorders()
self.assertIn('ConfigurationRecorders', response)
def test_handle_no_such_configuration_recorder(self):
with self.assertRaises(NoSuchConfigurationRecorderException):
self.configservice.describe_configuration_recorders(
configuration_recorder_names=['non-existant-recorder'])
def test_connect_to_non_us_east_1(self):
self.configservice = boto.configservice.connect_to_region('us-west-2')
response = self.configservice.describe_configuration_recorders()
self.assertIn('ConfigurationRecorders', response)
| mit |
dmlc/mxnet | example/nce-loss/lstm_net.py | 26 | 4926 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-docstring
from __future__ import print_function
from collections import namedtuple
import mxnet as mx
from nce import nce_loss
LSTMState = namedtuple("LSTMState", ["c", "h"])
LSTMParam = namedtuple("LSTMParam", ["i2h_weight", "i2h_bias",
"h2h_weight", "h2h_bias"])
def _lstm(num_hidden, indata, prev_state, param, seqidx, layeridx, dropout=0.):
"""LSTM Cell symbol"""
if dropout > 0.:
indata = mx.sym.Dropout(data=indata, p=dropout)
i2h = mx.sym.FullyConnected(data=indata,
weight=param.i2h_weight,
bias=param.i2h_bias,
num_hidden=num_hidden * 4,
name="t%d_l%d_i2h" % (seqidx, layeridx))
h2h = mx.sym.FullyConnected(data=prev_state.h,
weight=param.h2h_weight,
bias=param.h2h_bias,
num_hidden=num_hidden * 4,
name="t%d_l%d_h2h" % (seqidx, layeridx))
gates = i2h + h2h
slice_gates = mx.sym.SliceChannel(gates, num_outputs=4,
name="t%d_l%d_slice" % (seqidx, layeridx))
in_gate = mx.sym.Activation(slice_gates[0], act_type="sigmoid")
in_transform = mx.sym.Activation(slice_gates[1], act_type="tanh")
forget_gate = mx.sym.Activation(slice_gates[2], act_type="sigmoid")
out_gate = mx.sym.Activation(slice_gates[3], act_type="sigmoid")
next_c = (forget_gate * prev_state.c) + (in_gate * in_transform)
next_h = out_gate * mx.sym.Activation(next_c, act_type="tanh")
return LSTMState(c=next_c, h=next_h)
def get_lstm_net(vocab_size, seq_len, num_lstm_layer, num_hidden):
param_cells = []
last_states = []
for i in range(num_lstm_layer):
param_cells.append(LSTMParam(i2h_weight=mx.sym.Variable("l%d_i2h_weight" % i),
i2h_bias=mx.sym.Variable("l%d_i2h_bias" % i),
h2h_weight=mx.sym.Variable("l%d_h2h_weight" % i),
h2h_bias=mx.sym.Variable("l%d_h2h_bias" % i)))
state = LSTMState(c=mx.sym.Variable("l%d_init_c" % i),
h=mx.sym.Variable("l%d_init_h" % i))
last_states.append(state)
data = mx.sym.Variable('data')
label = mx.sym.Variable('label')
label_weight = mx.sym.Variable('label_weight')
embed_weight = mx.sym.Variable('embed_weight')
label_embed_weight = mx.sym.Variable('label_embed_weight')
data_embed = mx.sym.Embedding(data=data, input_dim=vocab_size,
weight=embed_weight,
output_dim=100, name='data_embed')
datavec = mx.sym.SliceChannel(data=data_embed,
num_outputs=seq_len,
squeeze_axis=True, name='data_slice')
labelvec = mx.sym.SliceChannel(data=label,
num_outputs=seq_len,
squeeze_axis=True, name='label_slice')
labelweightvec = mx.sym.SliceChannel(data=label_weight,
num_outputs=seq_len,
squeeze_axis=True, name='label_weight_slice')
probs = []
for seqidx in range(seq_len):
hidden = datavec[seqidx]
for i in range(num_lstm_layer):
next_state = _lstm(num_hidden, indata=hidden,
prev_state=last_states[i],
param=param_cells[i],
seqidx=seqidx, layeridx=i)
hidden = next_state.h
last_states[i] = next_state
probs.append(nce_loss(data=hidden,
label=labelvec[seqidx],
label_weight=labelweightvec[seqidx],
embed_weight=label_embed_weight,
vocab_size=vocab_size,
num_hidden=100))
return mx.sym.Group(probs)
| apache-2.0 |
abonaca/gary | gary/potential/custom.py | 1 | 4179 | # coding: utf-8
""" Potential used in Price-Whelan et al. (in prep.) TODO """
from __future__ import division, print_function
__author__ = "adrn <[email protected]>"
# Third-party
from astropy.constants import G
import astropy.units as u
import numpy as np
# Project
# from .cpotential import CCompositePotential
from .core import CompositePotential
from .cbuiltin import HernquistPotential, MiyamotoNagaiPotential, \
LeeSutoTriaxialNFWPotential, SphericalNFWPotential, LogarithmicPotential
from ..units import galactic
__all__ = ['PW14Potential', 'LM10Potential', 'TriaxialMWPotential']
class PW14Potential(CompositePotential):
def __init__(self, units=galactic, disk=dict(), bulge=dict(), halo=dict()):
default_disk = dict(m=6.5E10, a=6.5, b=0.26)
default_bulge = dict(m=2E10, c=0.3)
default_halo = dict(a=1.4, b=1., c=0.6, v_c=0.247, r_s=30.,
phi=np.pi/2., theta=np.pi/2., psi=np.pi/2.)
for k,v in default_disk.items():
if k not in disk:
disk[k] = v
for k,v in default_bulge.items():
if k not in bulge:
bulge[k] = v
for k,v in default_halo.items():
if k not in halo:
halo[k] = v
kwargs = dict()
kwargs["disk"] = MiyamotoNagaiPotential(units=units, **disk)
kwargs["bulge"] = HernquistPotential(units=units, **bulge)
if halo['a'] == 1 and halo['b'] == 1 and halo['c'] == 1:
kwargs["halo"] = SphericalNFWPotential(units=units,
v_c=halo['v_c'],
r_s=halo['r_s'])
else:
kwargs["halo"] = LeeSutoTriaxialNFWPotential(units=units, **halo)
super(PW14Potential,self).__init__(**kwargs)
class LM10Potential(CompositePotential):
def __init__(self, units=galactic, disk=dict(), bulge=dict(), halo=dict()):
default_disk = dict(m=1E11, a=6.5, b=0.26)
default_bulge = dict(m=3.4E10, c=0.7)
default_halo = dict(q1=1.38, q2=1., q3=1.36, r_h=12.,
phi=(97*u.degree).to(u.radian).value,
v_c=np.sqrt(2)*(121.858*u.km/u.s).to(u.kpc/u.Myr).value)
for k,v in default_disk.items():
if k not in disk:
disk[k] = v
for k,v in default_bulge.items():
if k not in bulge:
bulge[k] = v
for k,v in default_halo.items():
if k not in halo:
halo[k] = v
kwargs = dict()
kwargs["disk"] = MiyamotoNagaiPotential(units=units, **disk)
kwargs["bulge"] = HernquistPotential(units=units, **bulge)
kwargs["halo"] = LogarithmicPotential(units=units, **halo)
super(LM10Potential,self).__init__(**kwargs)
class TriaxialMWPotential(CompositePotential):
def __init__(self, units=galactic,
disk=dict(), bulge=dict(), halo=dict()):
""" Axis ratio values taken from Jing & Suto (2002). Other
parameters come from a by-eye fit to Bovy's MW2014Potential.
Choice of v_c sets circular velocity at Sun to 220 km/s
"""
default_disk = dict(m=7E10, a=3.5, b=0.14)
default_bulge = dict(m=1E10, c=1.1)
default_halo = dict(a=1., b=0.75, c=0.55,
v_c=0.239225, r_s=30.,
phi=0., theta=0., psi=0.)
for k,v in default_disk.items():
if k not in disk:
disk[k] = v
for k,v in default_bulge.items():
if k not in bulge:
bulge[k] = v
for k,v in default_halo.items():
if k not in halo:
halo[k] = v
kwargs = dict()
kwargs["disk"] = MiyamotoNagaiPotential(units=units, **disk)
kwargs["bulge"] = HernquistPotential(units=units, **bulge)
kwargs["halo"] = LeeSutoTriaxialNFWPotential(units=units, **halo)
super(TriaxialMWPotential,self).__init__(**kwargs)
stuff = """
def busey():
import webbrowser
webbrowser.open("http://i.imgur.com/KNoyPwW.jpg")
"""
| mit |
bregman-arie/ansible | lib/ansible/plugins/filter/network.py | 37 | 11389 | #
# {c) 2017 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import re
import os
import traceback
from collections import Mapping
from xml.etree.ElementTree import fromstring
from ansible.module_utils.network.common.utils import Template
from ansible.module_utils.six import iteritems, string_types
from ansible.errors import AnsibleError
try:
import yaml
HAS_YAML = True
except ImportError:
HAS_YAML = False
try:
import textfsm
HAS_TEXTFSM = True
except ImportError:
HAS_TEXTFSM = False
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
def re_matchall(regex, value):
objects = list()
for match in re.findall(regex.pattern, value, re.M):
obj = {}
if regex.groupindex:
for name, index in iteritems(regex.groupindex):
if len(regex.groupindex) == 1:
obj[name] = match
else:
obj[name] = match[index - 1]
objects.append(obj)
return objects
def re_search(regex, value):
obj = {}
match = regex.search(value, re.M)
if match:
items = list(match.groups())
if regex.groupindex:
for name, index in iteritems(regex.groupindex):
obj[name] = items[index - 1]
return obj
def parse_cli(output, tmpl):
if not isinstance(output, string_types):
raise AnsibleError("parse_cli input should be a string, but was given a input of %s" % (type(output)))
if not os.path.exists(tmpl):
raise AnsibleError('unable to locate parse_cli template: %s' % tmpl)
try:
template = Template()
except ImportError as exc:
raise AnsibleError(str(exc))
spec = yaml.safe_load(open(tmpl).read())
obj = {}
for name, attrs in iteritems(spec['keys']):
value = attrs['value']
try:
variables = spec.get('vars', {})
value = template(value, variables)
except:
pass
if 'start_block' in attrs and 'end_block' in attrs:
start_block = re.compile(attrs['start_block'])
end_block = re.compile(attrs['end_block'])
blocks = list()
lines = None
block_started = False
for line in output.split('\n'):
match_start = start_block.match(line)
match_end = end_block.match(line)
if match_start:
lines = list()
lines.append(line)
block_started = True
elif match_end:
if lines:
lines.append(line)
blocks.append('\n'.join(lines))
block_started = False
elif block_started:
if lines:
lines.append(line)
regex_items = [re.compile(r) for r in attrs['items']]
objects = list()
for block in blocks:
if isinstance(value, Mapping) and 'key' not in value:
items = list()
for regex in regex_items:
match = regex.search(block)
if match:
item_values = match.groupdict()
item_values['match'] = list(match.groups())
items.append(item_values)
else:
items.append(None)
obj = {}
for k, v in iteritems(value):
try:
obj[k] = template(v, {'item': items}, fail_on_undefined=False)
except:
obj[k] = None
objects.append(obj)
elif isinstance(value, Mapping):
items = list()
for regex in regex_items:
match = regex.search(block)
if match:
item_values = match.groupdict()
item_values['match'] = list(match.groups())
items.append(item_values)
else:
items.append(None)
key = template(value['key'], {'item': items})
values = dict([(k, template(v, {'item': items})) for k, v in iteritems(value['values'])])
objects.append({key: values})
return objects
elif 'items' in attrs:
regexp = re.compile(attrs['items'])
when = attrs.get('when')
conditional = "{%% if %s %%}True{%% else %%}False{%% endif %%}" % when
if isinstance(value, Mapping) and 'key' not in value:
values = list()
for item in re_matchall(regexp, output):
entry = {}
for item_key, item_value in iteritems(value):
entry[item_key] = template(item_value, {'item': item})
if when:
if template(conditional, {'item': entry}):
values.append(entry)
else:
values.append(entry)
obj[name] = values
elif isinstance(value, Mapping):
values = dict()
for item in re_matchall(regexp, output):
entry = {}
for item_key, item_value in iteritems(value['values']):
entry[item_key] = template(item_value, {'item': item})
key = template(value['key'], {'item': item})
if when:
if template(conditional, {'item': {'key': key, 'value': entry}}):
values[key] = entry
else:
values[key] = entry
obj[name] = values
else:
item = re_search(regexp, output)
obj[name] = template(value, {'item': item})
else:
obj[name] = value
return obj
def parse_cli_textfsm(value, template):
if not HAS_TEXTFSM:
raise AnsibleError('parse_cli_textfsm filter requires TextFSM library to be installed')
if not isinstance(value, string_types):
raise AnsibleError("parse_cli_textfsm input should be a string, but was given a input of %s" % (type(value)))
if not os.path.exists(template):
raise AnsibleError('unable to locate parse_cli_textfsm template: %s' % template)
try:
template = open(template)
except IOError as exc:
raise AnsibleError(str(exc))
re_table = textfsm.TextFSM(template)
fsm_results = re_table.ParseText(value)
results = list()
for item in fsm_results:
results.append(dict(zip(re_table.header, item)))
return results
def _extract_param(template, root, attrs, value):
key = None
when = attrs.get('when')
conditional = "{%% if %s %%}True{%% else %%}False{%% endif %%}" % when
param_to_xpath_map = attrs['items']
if isinstance(value, Mapping):
key = value.get('key', None)
if key:
value = value['values']
entries = dict() if key else list()
for element in root.findall(attrs['top']):
entry = dict()
item_dict = dict()
for param, param_xpath in iteritems(param_to_xpath_map):
fields = None
try:
fields = element.findall(param_xpath)
except:
display.warning("Failed to evaluate value of '%s' with XPath '%s'.\nUnexpected error: %s." % (param, param_xpath, traceback.format_exc()))
tags = param_xpath.split('/')
# check if xpath ends with attribute.
# If yes set attribute key/value dict to param value in case attribute matches
# else if it is a normal xpath assign matched element text value.
if len(tags) and tags[-1].endswith(']'):
if fields:
if len(fields) > 1:
item_dict[param] = [field.attrib for field in fields]
else:
item_dict[param] = fields[0].attrib
else:
item_dict[param] = {}
else:
if fields:
if len(fields) > 1:
item_dict[param] = [field.text for field in fields]
else:
item_dict[param] = fields[0].text
else:
item_dict[param] = None
if isinstance(value, Mapping):
for item_key, item_value in iteritems(value):
entry[item_key] = template(item_value, {'item': item_dict})
else:
entry = template(value, {'item': item_dict})
if key:
expanded_key = template(key, {'item': item_dict})
if when:
if template(conditional, {'item': {'key': expanded_key, 'value': entry}}):
entries[expanded_key] = entry
else:
entries[expanded_key] = entry
else:
if when:
if template(conditional, {'item': entry}):
entries.append(entry)
else:
entries.append(entry)
return entries
def parse_xml(output, tmpl):
if not os.path.exists(tmpl):
raise AnsibleError('unable to locate parse_cli template: %s' % tmpl)
if not isinstance(output, string_types):
raise AnsibleError('parse_xml works on string input, but given input of : %s' % type(output))
root = fromstring(output)
try:
template = Template()
except ImportError as exc:
raise AnsibleError(str(exc))
spec = yaml.safe_load(open(tmpl).read())
obj = {}
for name, attrs in iteritems(spec['keys']):
value = attrs['value']
try:
variables = spec.get('vars', {})
value = template(value, variables)
except:
pass
if 'items' in attrs:
obj[name] = _extract_param(template, root, attrs, value)
else:
obj[name] = value
return obj
class FilterModule(object):
"""Filters for working with output from network devices"""
filter_map = {
'parse_cli': parse_cli,
'parse_cli_textfsm': parse_cli_textfsm,
'parse_xml': parse_xml
}
def filters(self):
return self.filter_map
| gpl-3.0 |
serl/hls-bba-testbed | vlc_test.py | 1 | 4096 | import sys, itertools
from pylibs.test import Test, Player, BwChange, DelayChange, TcpDump
from pylibs.generic import PlainObject
server_url = 'http://192.168.100.10:3000/static'
bigbuckbunny8_url = server_url + '/bbb8/play_size.m3u8' # rates: 350k 470k 630k 845k 1130k 1520k 2040k 2750k, duration (s): ~600
settings = PlainObject()
settings.video_label = 'bbb8'
settings.video_url = bigbuckbunny8_url
settings.kill_after = 700
settings.rtt = ('20ms', '80ms')
settings.rtt_zfill = 4
settings.buffer_size = ('25%', '50%', '100%', '600%') #won't be given directly to tc_helper, but will pass through get_bdp_fraction
settings.buffer_size_zfill = 4
settings.fairshare = range(600, 3001, 300)
settings.aqm = ('droptail', 'ared', 'codel')
settings.clients = (1, 2, 3)
settings.algorithms = ('classic-119', 'bba2', 'bba3')
settings.curl = ('yes', 'bandwidth')
def get_curl_label(curl):
if curl == 'yes':
return 'keepalive'
if curl == 'bandwidth':
return 'keepalive_est'
return 'close'
def get_algocurl_tuples(algorithms, curl_tuple):
tuples = []
for curl in curl_tuple:
for algo in algorithms:
if algo.startswith('bba') and curl == 'yes':
tuples.append((algo, 'bandwidth'))
continue
tuples.append((algo, curl))
return set(tuples)
from pylibs.test import delay_convert, router_buffer_convert
def get_bdp_fraction(rtt, bandwidth, fraction): #in packets
rtt_ms = delay_convert(rtt)
if rtt_ms == 20:
bandwidth = '50mbit'
elif rtt_ms == 80:
bandwidth = '10mbit'
return router_buffer_convert(fraction, bandwidth, rtt)
def add_tcpdump(t):
t.add_event(TcpDump(host='bandwidth', iface='eth1'))
t.add_event(TcpDump(host='bandwidth', iface='eth2'))
if __name__ == "__main__":
for (buffer_size, rtt, (algo, curl), n_clients, aqm) in itertools.product(settings.buffer_size, settings.rtt, get_algocurl_tuples(settings.algorithms, settings.curl), settings.clients, settings.aqm):
if (aqm == 'ared' or (algo == 'classic-119' and curl == 'yes')) and n_clients > 1:
continue
rtt_label = str(rtt).zfill(settings.rtt_zfill)
buffer_size_label = str(buffer_size).zfill(settings.buffer_size_zfill)
#constant
collection = 'constant_{}_{}_{}_{}BDP_{}'.format(n_clients, settings.video_label, rtt_label, buffer_size_label, aqm)
num = 1
for bw_kbits in settings.fairshare:
bw = str(bw_kbits*n_clients)+'kbit'
bwchange = BwChange(bw=bw, buffer_size=get_bdp_fraction(rtt, bw, buffer_size), rtt=rtt)
t = Test(name='c{:02d}_{}_{}_{}_{}_{}'.format(num, n_clients, settings.video_label, bw, algo, get_curl_label(curl)), collection=collection, init_bw=bwchange, packet_delay=rtt, aqm_algorithm=aqm)
t.add_event(TcpDump(host='bandwidth', iface='eth1'))
t.add_event(TcpDump(host='bandwidth', iface='eth2'))
for client_id in range(n_clients):
player = Player(delay=1, host='client{}'.format(client_id), algo=algo, curl=curl, url=settings.video_url, kill_after=settings.kill_after)
t.add_event(player)
t.generate_schedule()
num += 1
#variable
bandwidths_coll = [ # actually: fairshare - it will be multiplied by the number of clients
{0: 4000000, 120: 1000000, 240: 4000000, 360: 600000, 480: 4000000},
{0: 4000000, 100: 2800000, 200: 1500000, 300: 1000000, 400: 600000, 500: 4000000},
]
collection = 'variable_{}_{}_{}_{}BDP_{}'.format(n_clients, settings.video_label, rtt_label, buffer_size_label, aqm)
num = 1
for bandwidths in bandwidths_coll:
t = Test(name='v{:02d}_{}_{}_{}_{}'.format(num, n_clients, settings.video_label, algo, get_curl_label(curl)), collection=collection, packet_delay=rtt, aqm_algorithm=aqm)
for d, bw in bandwidths.iteritems():
t.add_event(BwChange(delay=d, bw=bw*n_clients, buffer_size=get_bdp_fraction(rtt, bw, buffer_size), rtt=rtt))
t.add_event(TcpDump(host='bandwidth', iface='eth1'))
t.add_event(TcpDump(host='bandwidth', iface='eth2'))
for client_id in range(n_clients):
player = Player(delay=1, host='client{}'.format(client_id), algo=algo, curl=curl, url=settings.video_url, kill_after=settings.kill_after)
t.add_event(player)
t.generate_schedule()
num += 1
| mit |
bitmazk/django-event-rsvp | event_rsvp/migrations/0006_auto__chg_field_event_max_seats_per_guest.py | 1 | 7401 | # flake8: noqa
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Event.max_seats_per_guest'
db.alter_column('event_rsvp_event', 'max_seats_per_guest', self.gf('django.db.models.fields.PositiveIntegerField')(null=True))
def backwards(self, orm):
# Changing field 'Event.max_seats_per_guest'
db.alter_column('event_rsvp_event', 'max_seats_per_guest', self.gf('django.db.models.fields.PositiveIntegerField')())
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'event_rsvp.event': {
'Meta': {'object_name': 'Event'},
'allow_anonymous_rsvp': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'available_seats': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'contact_email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'contact_person': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'contact_phone': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'end': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 4, 9, 0, 0)'}),
'hide_available_seats': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'max_seats_per_guest': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'required_fields': ('event_rsvp.models.MultiSelectField', [], {'max_length': '250', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'start': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 4, 8, 0, 0)'}),
'street': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'template_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'venue': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'zip': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'})
},
'event_rsvp.guest': {
'Meta': {'object_name': 'Guest'},
'creation_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'event': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'guests'", 'to': "orm['event_rsvp.Event']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_attending': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'message': ('django.db.models.fields.TextField', [], {'max_length': '4000', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'number_of_seats': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['event_rsvp'] | mit |
xxd3vin/spp-sdk | opt/Python27/Lib/encodings/iso8859_6.py | 593 | 11089 | """ Python Character Mapping Codec iso8859_6 generated from 'MAPPINGS/ISO8859/8859-6.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='iso8859-6',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x04' # 0x04 -> END OF TRANSMISSION
u'\x05' # 0x05 -> ENQUIRY
u'\x06' # 0x06 -> ACKNOWLEDGE
u'\x07' # 0x07 -> BELL
u'\x08' # 0x08 -> BACKSPACE
u'\t' # 0x09 -> HORIZONTAL TABULATION
u'\n' # 0x0A -> LINE FEED
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x14' # 0x14 -> DEVICE CONTROL FOUR
u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x16 -> SYNCHRONOUS IDLE
u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x1a' # 0x1A -> SUBSTITUTE
u'\x1b' # 0x1B -> ESCAPE
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> DELETE
u'\x80' # 0x80 -> <control>
u'\x81' # 0x81 -> <control>
u'\x82' # 0x82 -> <control>
u'\x83' # 0x83 -> <control>
u'\x84' # 0x84 -> <control>
u'\x85' # 0x85 -> <control>
u'\x86' # 0x86 -> <control>
u'\x87' # 0x87 -> <control>
u'\x88' # 0x88 -> <control>
u'\x89' # 0x89 -> <control>
u'\x8a' # 0x8A -> <control>
u'\x8b' # 0x8B -> <control>
u'\x8c' # 0x8C -> <control>
u'\x8d' # 0x8D -> <control>
u'\x8e' # 0x8E -> <control>
u'\x8f' # 0x8F -> <control>
u'\x90' # 0x90 -> <control>
u'\x91' # 0x91 -> <control>
u'\x92' # 0x92 -> <control>
u'\x93' # 0x93 -> <control>
u'\x94' # 0x94 -> <control>
u'\x95' # 0x95 -> <control>
u'\x96' # 0x96 -> <control>
u'\x97' # 0x97 -> <control>
u'\x98' # 0x98 -> <control>
u'\x99' # 0x99 -> <control>
u'\x9a' # 0x9A -> <control>
u'\x9b' # 0x9B -> <control>
u'\x9c' # 0x9C -> <control>
u'\x9d' # 0x9D -> <control>
u'\x9e' # 0x9E -> <control>
u'\x9f' # 0x9F -> <control>
u'\xa0' # 0xA0 -> NO-BREAK SPACE
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\xa4' # 0xA4 -> CURRENCY SIGN
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\u060c' # 0xAC -> ARABIC COMMA
u'\xad' # 0xAD -> SOFT HYPHEN
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\u061b' # 0xBB -> ARABIC SEMICOLON
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\u061f' # 0xBF -> ARABIC QUESTION MARK
u'\ufffe'
u'\u0621' # 0xC1 -> ARABIC LETTER HAMZA
u'\u0622' # 0xC2 -> ARABIC LETTER ALEF WITH MADDA ABOVE
u'\u0623' # 0xC3 -> ARABIC LETTER ALEF WITH HAMZA ABOVE
u'\u0624' # 0xC4 -> ARABIC LETTER WAW WITH HAMZA ABOVE
u'\u0625' # 0xC5 -> ARABIC LETTER ALEF WITH HAMZA BELOW
u'\u0626' # 0xC6 -> ARABIC LETTER YEH WITH HAMZA ABOVE
u'\u0627' # 0xC7 -> ARABIC LETTER ALEF
u'\u0628' # 0xC8 -> ARABIC LETTER BEH
u'\u0629' # 0xC9 -> ARABIC LETTER TEH MARBUTA
u'\u062a' # 0xCA -> ARABIC LETTER TEH
u'\u062b' # 0xCB -> ARABIC LETTER THEH
u'\u062c' # 0xCC -> ARABIC LETTER JEEM
u'\u062d' # 0xCD -> ARABIC LETTER HAH
u'\u062e' # 0xCE -> ARABIC LETTER KHAH
u'\u062f' # 0xCF -> ARABIC LETTER DAL
u'\u0630' # 0xD0 -> ARABIC LETTER THAL
u'\u0631' # 0xD1 -> ARABIC LETTER REH
u'\u0632' # 0xD2 -> ARABIC LETTER ZAIN
u'\u0633' # 0xD3 -> ARABIC LETTER SEEN
u'\u0634' # 0xD4 -> ARABIC LETTER SHEEN
u'\u0635' # 0xD5 -> ARABIC LETTER SAD
u'\u0636' # 0xD6 -> ARABIC LETTER DAD
u'\u0637' # 0xD7 -> ARABIC LETTER TAH
u'\u0638' # 0xD8 -> ARABIC LETTER ZAH
u'\u0639' # 0xD9 -> ARABIC LETTER AIN
u'\u063a' # 0xDA -> ARABIC LETTER GHAIN
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\u0640' # 0xE0 -> ARABIC TATWEEL
u'\u0641' # 0xE1 -> ARABIC LETTER FEH
u'\u0642' # 0xE2 -> ARABIC LETTER QAF
u'\u0643' # 0xE3 -> ARABIC LETTER KAF
u'\u0644' # 0xE4 -> ARABIC LETTER LAM
u'\u0645' # 0xE5 -> ARABIC LETTER MEEM
u'\u0646' # 0xE6 -> ARABIC LETTER NOON
u'\u0647' # 0xE7 -> ARABIC LETTER HEH
u'\u0648' # 0xE8 -> ARABIC LETTER WAW
u'\u0649' # 0xE9 -> ARABIC LETTER ALEF MAKSURA
u'\u064a' # 0xEA -> ARABIC LETTER YEH
u'\u064b' # 0xEB -> ARABIC FATHATAN
u'\u064c' # 0xEC -> ARABIC DAMMATAN
u'\u064d' # 0xED -> ARABIC KASRATAN
u'\u064e' # 0xEE -> ARABIC FATHA
u'\u064f' # 0xEF -> ARABIC DAMMA
u'\u0650' # 0xF0 -> ARABIC KASRA
u'\u0651' # 0xF1 -> ARABIC SHADDA
u'\u0652' # 0xF2 -> ARABIC SUKUN
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| mit |
jinie/sublime-wakatime | packages/wakatime/packages/pygments_py3/pygments/lexers/c_like.py | 72 | 16179 | # -*- coding: utf-8 -*-
"""
pygments.lexers.c_like
~~~~~~~~~~~~~~~~~~~~~~
Lexers for other C-like languages.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, include, bygroups, inherit, words, \
default
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation
from pygments.lexers.c_cpp import CLexer, CppLexer
from pygments.lexers import _mql_builtins
__all__ = ['PikeLexer', 'NesCLexer', 'ClayLexer', 'ECLexer', 'ValaLexer',
'CudaLexer', 'SwigLexer', 'MqlLexer']
class PikeLexer(CppLexer):
"""
For `Pike <http://pike.lysator.liu.se/>`_ source code.
.. versionadded:: 2.0
"""
name = 'Pike'
aliases = ['pike']
filenames = ['*.pike', '*.pmod']
mimetypes = ['text/x-pike']
tokens = {
'statements': [
(words((
'catch', 'new', 'private', 'protected', 'public', 'gauge',
'throw', 'throws', 'class', 'interface', 'implement', 'abstract', 'extends', 'from',
'this', 'super', 'constant', 'final', 'static', 'import', 'use', 'extern',
'inline', 'proto', 'break', 'continue', 'if', 'else', 'for',
'while', 'do', 'switch', 'case', 'as', 'in', 'version', 'return', 'true', 'false', 'null',
'__VERSION__', '__MAJOR__', '__MINOR__', '__BUILD__', '__REAL_VERSION__',
'__REAL_MAJOR__', '__REAL_MINOR__', '__REAL_BUILD__', '__DATE__', '__TIME__',
'__FILE__', '__DIR__', '__LINE__', '__AUTO_BIGNUM__', '__NT__', '__PIKE__',
'__amigaos__', '_Pragma', 'static_assert', 'defined', 'sscanf'), suffix=r'\b'),
Keyword),
(r'(bool|int|long|float|short|double|char|string|object|void|mapping|'
r'array|multiset|program|function|lambda|mixed|'
r'[a-z_][a-z0-9_]*_t)\b',
Keyword.Type),
(r'(class)(\s+)', bygroups(Keyword, Text), 'classname'),
(r'[~!%^&*+=|?:<>/@-]', Operator),
inherit,
],
'classname': [
(r'[a-zA-Z_]\w*', Name.Class, '#pop'),
# template specification
(r'\s*(?=>)', Text, '#pop'),
],
}
class NesCLexer(CLexer):
"""
For `nesC <https://github.com/tinyos/nesc>`_ source code with preprocessor
directives.
.. versionadded:: 2.0
"""
name = 'nesC'
aliases = ['nesc']
filenames = ['*.nc']
mimetypes = ['text/x-nescsrc']
tokens = {
'statements': [
(words((
'abstract', 'as', 'async', 'atomic', 'call', 'command', 'component',
'components', 'configuration', 'event', 'extends', 'generic',
'implementation', 'includes', 'interface', 'module', 'new', 'norace',
'post', 'provides', 'signal', 'task', 'uses'), suffix=r'\b'),
Keyword),
(words(('nx_struct', 'nx_union', 'nx_int8_t', 'nx_int16_t', 'nx_int32_t',
'nx_int64_t', 'nx_uint8_t', 'nx_uint16_t', 'nx_uint32_t',
'nx_uint64_t'), suffix=r'\b'),
Keyword.Type),
inherit,
],
}
class ClayLexer(RegexLexer):
"""
For `Clay <http://claylabs.com/clay/>`_ source.
.. versionadded:: 2.0
"""
name = 'Clay'
filenames = ['*.clay']
aliases = ['clay']
mimetypes = ['text/x-clay']
tokens = {
'root': [
(r'\s', Text),
(r'//.*?$', Comment.Singleline),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
(r'\b(public|private|import|as|record|variant|instance'
r'|define|overload|default|external|alias'
r'|rvalue|ref|forward|inline|noinline|forceinline'
r'|enum|var|and|or|not|if|else|goto|return|while'
r'|switch|case|break|continue|for|in|true|false|try|catch|throw'
r'|finally|onerror|staticassert|eval|when|newtype'
r'|__FILE__|__LINE__|__COLUMN__|__ARG__'
r')\b', Keyword),
(r'[~!%^&*+=|:<>/-]', Operator),
(r'[#(){}\[\],;.]', Punctuation),
(r'0x[0-9a-fA-F]+[LlUu]*', Number.Hex),
(r'\d+[LlUu]*', Number.Integer),
(r'\b(true|false)\b', Name.Builtin),
(r'(?i)[a-z_?][\w?]*', Name),
(r'"""', String, 'tdqs'),
(r'"', String, 'dqs'),
],
'strings': [
(r'(?i)\\(x[0-9a-f]{2}|.)', String.Escape),
(r'.', String),
],
'nl': [
(r'\n', String),
],
'dqs': [
(r'"', String, '#pop'),
include('strings'),
],
'tdqs': [
(r'"""', String, '#pop'),
include('strings'),
include('nl'),
],
}
class ECLexer(CLexer):
"""
For eC source code with preprocessor directives.
.. versionadded:: 1.5
"""
name = 'eC'
aliases = ['ec']
filenames = ['*.ec', '*.eh']
mimetypes = ['text/x-echdr', 'text/x-ecsrc']
tokens = {
'statements': [
(words((
'virtual', 'class', 'private', 'public', 'property', 'import',
'delete', 'new', 'new0', 'renew', 'renew0', 'define', 'get',
'set', 'remote', 'dllexport', 'dllimport', 'stdcall', 'subclass',
'__on_register_module', 'namespace', 'using', 'typed_object',
'any_object', 'incref', 'register', 'watch', 'stopwatching', 'firewatchers',
'watchable', 'class_designer', 'class_fixed', 'class_no_expansion', 'isset',
'class_default_property', 'property_category', 'class_data',
'class_property', 'thisclass', 'dbtable', 'dbindex',
'database_open', 'dbfield'), suffix=r'\b'), Keyword),
(words(('uint', 'uint16', 'uint32', 'uint64', 'bool', 'byte',
'unichar', 'int64'), suffix=r'\b'),
Keyword.Type),
(r'(class)(\s+)', bygroups(Keyword, Text), 'classname'),
(r'(null|value|this)\b', Name.Builtin),
inherit,
],
'classname': [
(r'[a-zA-Z_]\w*', Name.Class, '#pop'),
# template specification
(r'\s*(?=>)', Text, '#pop'),
],
}
class ValaLexer(RegexLexer):
"""
For Vala source code with preprocessor directives.
.. versionadded:: 1.1
"""
name = 'Vala'
aliases = ['vala', 'vapi']
filenames = ['*.vala', '*.vapi']
mimetypes = ['text/x-vala']
tokens = {
'whitespace': [
(r'^\s*#if\s+0', Comment.Preproc, 'if0'),
(r'\n', Text),
(r'\s+', Text),
(r'\\\n', Text), # line continuation
(r'//(\n|(.|\n)*?[^\\]\n)', Comment.Single),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
],
'statements': [
(r'[L@]?"', String, 'string'),
(r"L?'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'",
String.Char),
(r'(?s)""".*?"""', String), # verbatim strings
(r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[lL]?', Number.Float),
(r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float),
(r'0x[0-9a-fA-F]+[Ll]?', Number.Hex),
(r'0[0-7]+[Ll]?', Number.Oct),
(r'\d+[Ll]?', Number.Integer),
(r'[~!%^&*+=|?:<>/-]', Operator),
(r'(\[)(Compact|Immutable|(?:Boolean|Simple)Type)(\])',
bygroups(Punctuation, Name.Decorator, Punctuation)),
# TODO: "correctly" parse complex code attributes
(r'(\[)(CCode|(?:Integer|Floating)Type)',
bygroups(Punctuation, Name.Decorator)),
(r'[()\[\],.]', Punctuation),
(words((
'as', 'base', 'break', 'case', 'catch', 'construct', 'continue',
'default', 'delete', 'do', 'else', 'enum', 'finally', 'for',
'foreach', 'get', 'if', 'in', 'is', 'lock', 'new', 'out', 'params',
'return', 'set', 'sizeof', 'switch', 'this', 'throw', 'try',
'typeof', 'while', 'yield'), suffix=r'\b'),
Keyword),
(words((
'abstract', 'const', 'delegate', 'dynamic', 'ensures', 'extern',
'inline', 'internal', 'override', 'owned', 'private', 'protected',
'public', 'ref', 'requires', 'signal', 'static', 'throws', 'unowned',
'var', 'virtual', 'volatile', 'weak', 'yields'), suffix=r'\b'),
Keyword.Declaration),
(r'(namespace|using)(\s+)', bygroups(Keyword.Namespace, Text),
'namespace'),
(r'(class|errordomain|interface|struct)(\s+)',
bygroups(Keyword.Declaration, Text), 'class'),
(r'(\.)([a-zA-Z_]\w*)',
bygroups(Operator, Name.Attribute)),
# void is an actual keyword, others are in glib-2.0.vapi
(words((
'void', 'bool', 'char', 'double', 'float', 'int', 'int8', 'int16',
'int32', 'int64', 'long', 'short', 'size_t', 'ssize_t', 'string',
'time_t', 'uchar', 'uint', 'uint8', 'uint16', 'uint32', 'uint64',
'ulong', 'unichar', 'ushort'), suffix=r'\b'),
Keyword.Type),
(r'(true|false|null)\b', Name.Builtin),
('[a-zA-Z_]\w*', Name),
],
'root': [
include('whitespace'),
default('statement'),
],
'statement': [
include('whitespace'),
include('statements'),
('[{}]', Punctuation),
(';', Punctuation, '#pop'),
],
'string': [
(r'"', String, '#pop'),
(r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape),
(r'[^\\"\n]+', String), # all other characters
(r'\\\n', String), # line continuation
(r'\\', String), # stray backslash
],
'if0': [
(r'^\s*#if.*?(?<!\\)\n', Comment.Preproc, '#push'),
(r'^\s*#el(?:se|if).*\n', Comment.Preproc, '#pop'),
(r'^\s*#endif.*?(?<!\\)\n', Comment.Preproc, '#pop'),
(r'.*?\n', Comment),
],
'class': [
(r'[a-zA-Z_]\w*', Name.Class, '#pop')
],
'namespace': [
(r'[a-zA-Z_][\w.]*', Name.Namespace, '#pop')
],
}
class CudaLexer(CLexer):
"""
For NVIDIA `CUDA™ <http://developer.nvidia.com/category/zone/cuda-zone>`_
source.
.. versionadded:: 1.6
"""
name = 'CUDA'
filenames = ['*.cu', '*.cuh']
aliases = ['cuda', 'cu']
mimetypes = ['text/x-cuda']
function_qualifiers = set(('__device__', '__global__', '__host__',
'__noinline__', '__forceinline__'))
variable_qualifiers = set(('__device__', '__constant__', '__shared__',
'__restrict__'))
vector_types = set(('char1', 'uchar1', 'char2', 'uchar2', 'char3', 'uchar3',
'char4', 'uchar4', 'short1', 'ushort1', 'short2', 'ushort2',
'short3', 'ushort3', 'short4', 'ushort4', 'int1', 'uint1',
'int2', 'uint2', 'int3', 'uint3', 'int4', 'uint4', 'long1',
'ulong1', 'long2', 'ulong2', 'long3', 'ulong3', 'long4',
'ulong4', 'longlong1', 'ulonglong1', 'longlong2',
'ulonglong2', 'float1', 'float2', 'float3', 'float4',
'double1', 'double2', 'dim3'))
variables = set(('gridDim', 'blockIdx', 'blockDim', 'threadIdx', 'warpSize'))
functions = set(('__threadfence_block', '__threadfence', '__threadfence_system',
'__syncthreads', '__syncthreads_count', '__syncthreads_and',
'__syncthreads_or'))
execution_confs = set(('<<<', '>>>'))
def get_tokens_unprocessed(self, text):
for index, token, value in CLexer.get_tokens_unprocessed(self, text):
if token is Name:
if value in self.variable_qualifiers:
token = Keyword.Type
elif value in self.vector_types:
token = Keyword.Type
elif value in self.variables:
token = Name.Builtin
elif value in self.execution_confs:
token = Keyword.Pseudo
elif value in self.function_qualifiers:
token = Keyword.Reserved
elif value in self.functions:
token = Name.Function
yield index, token, value
class SwigLexer(CppLexer):
"""
For `SWIG <http://www.swig.org/>`_ source code.
.. versionadded:: 2.0
"""
name = 'SWIG'
aliases = ['swig']
filenames = ['*.swg', '*.i']
mimetypes = ['text/swig']
priority = 0.04 # Lower than C/C++ and Objective C/C++
tokens = {
'statements': [
# SWIG directives
(r'(%[a-z_][a-z0-9_]*)', Name.Function),
# Special variables
('\$\**\&?\w+', Name),
# Stringification / additional preprocessor directives
(r'##*[a-zA-Z_]\w*', Comment.Preproc),
inherit,
],
}
# This is a far from complete set of SWIG directives
swig_directives = set((
# Most common directives
'%apply', '%define', '%director', '%enddef', '%exception', '%extend',
'%feature', '%fragment', '%ignore', '%immutable', '%import', '%include',
'%inline', '%insert', '%module', '%newobject', '%nspace', '%pragma',
'%rename', '%shared_ptr', '%template', '%typecheck', '%typemap',
# Less common directives
'%arg', '%attribute', '%bang', '%begin', '%callback', '%catches', '%clear',
'%constant', '%copyctor', '%csconst', '%csconstvalue', '%csenum',
'%csmethodmodifiers', '%csnothrowexception', '%default', '%defaultctor',
'%defaultdtor', '%defined', '%delete', '%delobject', '%descriptor',
'%exceptionclass', '%exceptionvar', '%extend_smart_pointer', '%fragments',
'%header', '%ifcplusplus', '%ignorewarn', '%implicit', '%implicitconv',
'%init', '%javaconst', '%javaconstvalue', '%javaenum', '%javaexception',
'%javamethodmodifiers', '%kwargs', '%luacode', '%mutable', '%naturalvar',
'%nestedworkaround', '%perlcode', '%pythonabc', '%pythonappend',
'%pythoncallback', '%pythoncode', '%pythondynamic', '%pythonmaybecall',
'%pythonnondynamic', '%pythonprepend', '%refobject', '%shadow', '%sizeof',
'%trackobjects', '%types', '%unrefobject', '%varargs', '%warn',
'%warnfilter'))
def analyse_text(text):
rv = 0
# Search for SWIG directives, which are conventionally at the beginning of
# a line. The probability of them being within a line is low, so let another
# lexer win in this case.
matches = re.findall(r'^\s*(%[a-z_][a-z0-9_]*)', text, re.M)
for m in matches:
if m in SwigLexer.swig_directives:
rv = 0.98
break
else:
rv = 0.91 # Fraction higher than MatlabLexer
return rv
class MqlLexer(CppLexer):
"""
For `MQL4 <http://docs.mql4.com/>`_ and
`MQL5 <http://www.mql5.com/en/docs>`_ source code.
.. versionadded:: 2.0
"""
name = 'MQL'
aliases = ['mql', 'mq4', 'mq5', 'mql4', 'mql5']
filenames = ['*.mq4', '*.mq5', '*.mqh']
mimetypes = ['text/x-mql']
tokens = {
'statements': [
(words(_mql_builtins.keywords, suffix=r'\b'), Keyword),
(words(_mql_builtins.c_types, suffix=r'\b'), Keyword.Type),
(words(_mql_builtins.types, suffix=r'\b'), Name.Function),
(words(_mql_builtins.constants, suffix=r'\b'), Name.Constant),
(words(_mql_builtins.colors, prefix='(clr)?', suffix=r'\b'),
Name.Constant),
inherit,
],
}
| bsd-3-clause |
mozilla/addons-server | services/utils.py | 4 | 3245 | import logging
import logging.config
import os
import posixpath
import re
import sys
import MySQLdb as mysql
import sqlalchemy.pool as pool
from urllib.parse import urlencode
from services.settings import settings
import olympia.core.logger
# This is not DRY: it's a copy of amo.helpers.user_media_path, to avoid an
# import (which should triggers an import loop).
# See bug 1055654.
def user_media_path(what):
"""Make it possible to override storage paths in settings.
By default, all storage paths are in the MEDIA_ROOT.
This is backwards compatible.
"""
default = os.path.join(settings.MEDIA_ROOT, what)
key = '{0}_PATH'.format(what.upper())
return getattr(settings, key, default)
# This is not DRY: it's a copy of amo.helpers.user_media_url, to avoid an
# import (which should be avoided, according to the comments above, and which
# triggers an import loop).
# See bug 1055654.
def user_media_url(what):
"""
Generate default media url, and make possible to override it from
settings.
"""
default = '%s%s/' % (settings.MEDIA_URL, what)
key = '{0}_URL'.format(what.upper().replace('-', '_'))
return getattr(settings, key, default)
version_re = re.compile(
r"""(?P<major>\d+) # major (x in x.y)
\.(?P<minor1>\d+) # minor1 (y in x.y)
\.?(?P<minor2>\d+|\*)? # minor2 (z in x.y.z)
\.?(?P<minor3>\d+|\*)? # minor3 (w in x.y.z.w)
(?P<alpha>[a|b]?) # alpha/beta
(?P<alpha_ver>\d*) # alpha/beta version
(?P<pre>pre)? # pre release
(?P<pre_ver>\d)? # pre release version
""",
re.VERBOSE,
)
def get_cdn_url(id, row):
host = user_media_url('addons')
url = posixpath.join(host, str(id), row['filename'])
params = urlencode({'filehash': row['hash']})
return '{0}?{1}'.format(url, params)
def getconn():
db = settings.SERVICES_DATABASE
return mysql.connect(
host=db['HOST'],
user=db['USER'],
passwd=db['PASSWORD'],
db=db['NAME'],
charset=db['OPTIONS']['charset'],
)
mypool = pool.QueuePool(getconn, max_overflow=10, pool_size=5, recycle=300)
def log_configure():
"""You have to call this to explicitly configure logging."""
cfg = {
'version': 1,
'filters': {},
'handlers': {
'mozlog': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'json',
},
},
'formatters': {
'json': {
'()': olympia.core.logger.JsonFormatter,
'logger_name': 'http_app_addons',
},
},
}
logging.config.dictConfig(cfg)
def log_exception(data):
# Note: although this logs exceptions, it logs at the info level so that
# on prod, we log at the error level and result in no logs on prod.
typ, value, discard = sys.exc_info()
error_log = olympia.core.logger.getLogger('z.update')
error_log.exception('Type: %s, %s. Data: %s' % (typ, value, data))
| bsd-3-clause |
jlegendary/youtube-dl | youtube_dl/extractor/kankan.py | 124 | 1738 | from __future__ import unicode_literals
import re
import hashlib
from .common import InfoExtractor
_md5 = lambda s: hashlib.md5(s.encode('utf-8')).hexdigest()
class KankanIE(InfoExtractor):
_VALID_URL = r'https?://(?:.*?\.)?kankan\.com/.+?/(?P<id>\d+)\.shtml'
_TEST = {
'url': 'http://yinyue.kankan.com/vod/48/48863.shtml',
'md5': '29aca1e47ae68fc28804aca89f29507e',
'info_dict': {
'id': '48863',
'ext': 'flv',
'title': 'Ready To Go',
},
'skip': 'Only available from China',
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
title = self._search_regex(r'(?:G_TITLE=|G_MOVIE_TITLE = )[\'"](.+?)[\'"]', webpage, 'video title')
surls = re.search(r'surls:\[\'.+?\'\]|lurl:\'.+?\.flv\'', webpage).group(0)
gcids = re.findall(r"http://.+?/.+?/(.+?)/", surls)
gcid = gcids[-1]
info_url = 'http://p2s.cl.kankan.com/getCdnresource_flv?gcid=%s' % gcid
video_info_page = self._download_webpage(
info_url, video_id, 'Downloading video url info')
ip = self._search_regex(r'ip:"(.+?)"', video_info_page, 'video url ip')
path = self._search_regex(r'path:"(.+?)"', video_info_page, 'video url path')
param1 = self._search_regex(r'param1:(\d+)', video_info_page, 'param1')
param2 = self._search_regex(r'param2:(\d+)', video_info_page, 'param2')
key = _md5('xl_mp43651' + param1 + param2)
video_url = 'http://%s%s?key=%s&key1=%s' % (ip, path, key, param2)
return {
'id': video_id,
'title': title,
'url': video_url,
}
| unlicense |
liavkoren/djangoDev | django/contrib/gis/sitemaps/kml.py | 49 | 2544 | from django.apps import apps
from django.core import urlresolvers
from django.contrib.sitemaps import Sitemap
from django.contrib.gis.db.models.fields import GeometryField
from django.db import models
class KMLSitemap(Sitemap):
"""
A minimal hook to produce KML sitemaps.
"""
geo_format = 'kml'
def __init__(self, locations=None):
# If no locations specified, then we try to build for
# every model in installed applications.
self.locations = self._build_kml_sources(locations)
def _build_kml_sources(self, sources):
"""
Goes through the given sources and returns a 3-tuple of
the application label, module name, and field name of every
GeometryField encountered in the sources.
If no sources are provided, then all models.
"""
kml_sources = []
if sources is None:
sources = apps.get_models()
for source in sources:
if isinstance(source, models.base.ModelBase):
for field in source._meta.fields:
if isinstance(field, GeometryField):
kml_sources.append((source._meta.app_label,
source._meta.model_name,
field.name))
elif isinstance(source, (list, tuple)):
if len(source) != 3:
raise ValueError('Must specify a 3-tuple of (app_label, module_name, field_name).')
kml_sources.append(source)
else:
raise TypeError('KML Sources must be a model or a 3-tuple.')
return kml_sources
def get_urls(self, page=1, site=None, protocol=None):
"""
This method is overrridden so the appropriate `geo_format` attribute
is placed on each URL element.
"""
urls = Sitemap.get_urls(self, page=page, site=site, protocol=protocol)
for url in urls:
url['geo_format'] = self.geo_format
return urls
def items(self):
return self.locations
def location(self, obj):
return urlresolvers.reverse('django.contrib.gis.sitemaps.views.%s' % self.geo_format,
kwargs={'label': obj[0],
'model': obj[1],
'field_name': obj[2],
}
)
class KMZSitemap(KMLSitemap):
geo_format = 'kmz'
| bsd-3-clause |
johankaito/fufuka | microblog/old-flask/lib/python2.7/site-packages/werkzeug/test.py | 116 | 34255 | # -*- coding: utf-8 -*-
"""
werkzeug.test
~~~~~~~~~~~~~
This module implements a client to WSGI applications for testing.
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import sys
import mimetypes
from time import time
from random import random
from itertools import chain
from tempfile import TemporaryFile
from io import BytesIO
try:
from urllib2 import Request as U2Request
except ImportError:
from urllib.request import Request as U2Request
try:
from http.cookiejar import CookieJar
except ImportError: # Py2
from cookielib import CookieJar
from werkzeug._compat import iterlists, iteritems, itervalues, to_bytes, \
string_types, text_type, reraise, wsgi_encoding_dance, \
make_literal_wrapper
from werkzeug._internal import _empty_stream, _get_environ
from werkzeug.wrappers import BaseRequest
from werkzeug.urls import url_encode, url_fix, iri_to_uri, url_unquote, \
url_unparse, url_parse
from werkzeug.wsgi import get_host, get_current_url, ClosingIterator
from werkzeug.utils import dump_cookie
from werkzeug.datastructures import FileMultiDict, MultiDict, \
CombinedMultiDict, Headers, FileStorage
def stream_encode_multipart(values, use_tempfile=True, threshold=1024 * 500,
boundary=None, charset='utf-8'):
"""Encode a dict of values (either strings or file descriptors or
:class:`FileStorage` objects.) into a multipart encoded string stored
in a file descriptor.
"""
if boundary is None:
boundary = '---------------WerkzeugFormPart_%s%s' % (time(), random())
_closure = [BytesIO(), 0, False]
if use_tempfile:
def write_binary(string):
stream, total_length, on_disk = _closure
if on_disk:
stream.write(string)
else:
length = len(string)
if length + _closure[1] <= threshold:
stream.write(string)
else:
new_stream = TemporaryFile('wb+')
new_stream.write(stream.getvalue())
new_stream.write(string)
_closure[0] = new_stream
_closure[2] = True
_closure[1] = total_length + length
else:
write_binary = _closure[0].write
def write(string):
write_binary(string.encode(charset))
if not isinstance(values, MultiDict):
values = MultiDict(values)
for key, values in iterlists(values):
for value in values:
write('--%s\r\nContent-Disposition: form-data; name="%s"' %
(boundary, key))
reader = getattr(value, 'read', None)
if reader is not None:
filename = getattr(value, 'filename',
getattr(value, 'name', None))
content_type = getattr(value, 'content_type', None)
if content_type is None:
content_type = filename and \
mimetypes.guess_type(filename)[0] or \
'application/octet-stream'
if filename is not None:
write('; filename="%s"\r\n' % filename)
else:
write('\r\n')
write('Content-Type: %s\r\n\r\n' % content_type)
while 1:
chunk = reader(16384)
if not chunk:
break
write_binary(chunk)
else:
if not isinstance(value, string_types):
value = str(value)
else:
value = to_bytes(value, charset)
write('\r\n\r\n')
write_binary(value)
write('\r\n')
write('--%s--\r\n' % boundary)
length = int(_closure[0].tell())
_closure[0].seek(0)
return _closure[0], length, boundary
def encode_multipart(values, boundary=None, charset='utf-8'):
"""Like `stream_encode_multipart` but returns a tuple in the form
(``boundary``, ``data``) where data is a bytestring.
"""
stream, length, boundary = stream_encode_multipart(
values, use_tempfile=False, boundary=boundary, charset=charset)
return boundary, stream.read()
def File(fd, filename=None, mimetype=None):
"""Backwards compat."""
from warnings import warn
warn(DeprecationWarning('werkzeug.test.File is deprecated, use the '
'EnvironBuilder or FileStorage instead'))
return FileStorage(fd, filename=filename, content_type=mimetype)
class _TestCookieHeaders(object):
"""A headers adapter for cookielib
"""
def __init__(self, headers):
self.headers = headers
def getheaders(self, name):
headers = []
name = name.lower()
for k, v in self.headers:
if k.lower() == name:
headers.append(v)
return headers
def get_all(self, name, default=None):
rv = []
for k, v in self.headers:
if k.lower() == name.lower():
rv.append(v)
return rv or default or []
class _TestCookieResponse(object):
"""Something that looks like a httplib.HTTPResponse, but is actually just an
adapter for our test responses to make them available for cookielib.
"""
def __init__(self, headers):
self.headers = _TestCookieHeaders(headers)
def info(self):
return self.headers
class _TestCookieJar(CookieJar):
"""A cookielib.CookieJar modified to inject and read cookie headers from
and to wsgi environments, and wsgi application responses.
"""
def inject_wsgi(self, environ):
"""Inject the cookies as client headers into the server's wsgi
environment.
"""
cvals = []
for cookie in self:
cvals.append('%s=%s' % (cookie.name, cookie.value))
if cvals:
environ['HTTP_COOKIE'] = '; '.join(cvals)
def extract_wsgi(self, environ, headers):
"""Extract the server's set-cookie headers as cookies into the
cookie jar.
"""
self.extract_cookies(
_TestCookieResponse(headers),
U2Request(get_current_url(environ)),
)
def _iter_data(data):
"""Iterates over a dict or multidict yielding all keys and values.
This is used to iterate over the data passed to the
:class:`EnvironBuilder`.
"""
if isinstance(data, MultiDict):
for key, values in iterlists(data):
for value in values:
yield key, value
else:
for key, values in iteritems(data):
if isinstance(values, list):
for value in values:
yield key, value
else:
yield key, values
class EnvironBuilder(object):
"""This class can be used to conveniently create a WSGI environment
for testing purposes. It can be used to quickly create WSGI environments
or request objects from arbitrary data.
The signature of this class is also used in some other places as of
Werkzeug 0.5 (:func:`create_environ`, :meth:`BaseResponse.from_values`,
:meth:`Client.open`). Because of this most of the functionality is
available through the constructor alone.
Files and regular form data can be manipulated independently of each
other with the :attr:`form` and :attr:`files` attributes, but are
passed with the same argument to the constructor: `data`.
`data` can be any of these values:
- a `str`: If it's a string it is converted into a :attr:`input_stream`,
the :attr:`content_length` is set and you have to provide a
:attr:`content_type`.
- a `dict`: If it's a dict the keys have to be strings and the values
any of the following objects:
- a :class:`file`-like object. These are converted into
:class:`FileStorage` objects automatically.
- a tuple. The :meth:`~FileMultiDict.add_file` method is called
with the tuple items as positional arguments.
.. versionadded:: 0.6
`path` and `base_url` can now be unicode strings that are encoded using
the :func:`iri_to_uri` function.
:param path: the path of the request. In the WSGI environment this will
end up as `PATH_INFO`. If the `query_string` is not defined
and there is a question mark in the `path` everything after
it is used as query string.
:param base_url: the base URL is a URL that is used to extract the WSGI
URL scheme, host (server name + server port) and the
script root (`SCRIPT_NAME`).
:param query_string: an optional string or dict with URL parameters.
:param method: the HTTP method to use, defaults to `GET`.
:param input_stream: an optional input stream. Do not specify this and
`data`. As soon as an input stream is set you can't
modify :attr:`args` and :attr:`files` unless you
set the :attr:`input_stream` to `None` again.
:param content_type: The content type for the request. As of 0.5 you
don't have to provide this when specifying files
and form data via `data`.
:param content_length: The content length for the request. You don't
have to specify this when providing data via
`data`.
:param errors_stream: an optional error stream that is used for
`wsgi.errors`. Defaults to :data:`stderr`.
:param multithread: controls `wsgi.multithread`. Defaults to `False`.
:param multiprocess: controls `wsgi.multiprocess`. Defaults to `False`.
:param run_once: controls `wsgi.run_once`. Defaults to `False`.
:param headers: an optional list or :class:`Headers` object of headers.
:param data: a string or dict of form data. See explanation above.
:param environ_base: an optional dict of environment defaults.
:param environ_overrides: an optional dict of environment overrides.
:param charset: the charset used to encode unicode data.
"""
#: the server protocol to use. defaults to HTTP/1.1
server_protocol = 'HTTP/1.1'
#: the wsgi version to use. defaults to (1, 0)
wsgi_version = (1, 0)
#: the default request class for :meth:`get_request`
request_class = BaseRequest
def __init__(self, path='/', base_url=None, query_string=None,
method='GET', input_stream=None, content_type=None,
content_length=None, errors_stream=None, multithread=False,
multiprocess=False, run_once=False, headers=None, data=None,
environ_base=None, environ_overrides=None, charset='utf-8'):
path_s = make_literal_wrapper(path)
if query_string is None and path_s('?') in path:
path, query_string = path.split(path_s('?'), 1)
self.charset = charset
self.path = iri_to_uri(path)
if base_url is not None:
base_url = url_fix(iri_to_uri(base_url, charset), charset)
self.base_url = base_url
if isinstance(query_string, (bytes, text_type)):
self.query_string = query_string
else:
if query_string is None:
query_string = MultiDict()
elif not isinstance(query_string, MultiDict):
query_string = MultiDict(query_string)
self.args = query_string
self.method = method
if headers is None:
headers = Headers()
elif not isinstance(headers, Headers):
headers = Headers(headers)
self.headers = headers
if content_type is not None:
self.content_type = content_type
if errors_stream is None:
errors_stream = sys.stderr
self.errors_stream = errors_stream
self.multithread = multithread
self.multiprocess = multiprocess
self.run_once = run_once
self.environ_base = environ_base
self.environ_overrides = environ_overrides
self.input_stream = input_stream
self.content_length = content_length
self.closed = False
if data:
if input_stream is not None:
raise TypeError('can\'t provide input stream and data')
if isinstance(data, text_type):
data = data.encode(self.charset)
if isinstance(data, bytes):
self.input_stream = BytesIO(data)
if self.content_length is None:
self.content_length = len(data)
else:
for key, value in _iter_data(data):
if isinstance(value, (tuple, dict)) or \
hasattr(value, 'read'):
self._add_file_from_data(key, value)
else:
self.form.setlistdefault(key).append(value)
def _add_file_from_data(self, key, value):
"""Called in the EnvironBuilder to add files from the data dict."""
if isinstance(value, tuple):
self.files.add_file(key, *value)
elif isinstance(value, dict):
from warnings import warn
warn(DeprecationWarning('it\'s no longer possible to pass dicts '
'as `data`. Use tuples or FileStorage '
'objects instead'), stacklevel=2)
value = dict(value)
mimetype = value.pop('mimetype', None)
if mimetype is not None:
value['content_type'] = mimetype
self.files.add_file(key, **value)
else:
self.files.add_file(key, value)
def _get_base_url(self):
return url_unparse((self.url_scheme, self.host,
self.script_root, '', '')).rstrip('/') + '/'
def _set_base_url(self, value):
if value is None:
scheme = 'http'
netloc = 'localhost'
script_root = ''
else:
scheme, netloc, script_root, qs, anchor = url_parse(value)
if qs or anchor:
raise ValueError('base url must not contain a query string '
'or fragment')
self.script_root = script_root.rstrip('/')
self.host = netloc
self.url_scheme = scheme
base_url = property(_get_base_url, _set_base_url, doc='''
The base URL is a URL that is used to extract the WSGI
URL scheme, host (server name + server port) and the
script root (`SCRIPT_NAME`).''')
del _get_base_url, _set_base_url
def _get_content_type(self):
ct = self.headers.get('Content-Type')
if ct is None and not self._input_stream:
if self._files:
return 'multipart/form-data'
elif self._form:
return 'application/x-www-form-urlencoded'
return None
return ct
def _set_content_type(self, value):
if value is None:
self.headers.pop('Content-Type', None)
else:
self.headers['Content-Type'] = value
content_type = property(_get_content_type, _set_content_type, doc='''
The content type for the request. Reflected from and to the
:attr:`headers`. Do not set if you set :attr:`files` or
:attr:`form` for auto detection.''')
del _get_content_type, _set_content_type
def _get_content_length(self):
return self.headers.get('Content-Length', type=int)
def _set_content_length(self, value):
if value is None:
self.headers.pop('Content-Length', None)
else:
self.headers['Content-Length'] = str(value)
content_length = property(_get_content_length, _set_content_length, doc='''
The content length as integer. Reflected from and to the
:attr:`headers`. Do not set if you set :attr:`files` or
:attr:`form` for auto detection.''')
del _get_content_length, _set_content_length
def form_property(name, storage, doc):
key = '_' + name
def getter(self):
if self._input_stream is not None:
raise AttributeError('an input stream is defined')
rv = getattr(self, key)
if rv is None:
rv = storage()
setattr(self, key, rv)
return rv
def setter(self, value):
self._input_stream = None
setattr(self, key, value)
return property(getter, setter, doc)
form = form_property('form', MultiDict, doc='''
A :class:`MultiDict` of form values.''')
files = form_property('files', FileMultiDict, doc='''
A :class:`FileMultiDict` of uploaded files. You can use the
:meth:`~FileMultiDict.add_file` method to add new files to the
dict.''')
del form_property
def _get_input_stream(self):
return self._input_stream
def _set_input_stream(self, value):
self._input_stream = value
self._form = self._files = None
input_stream = property(_get_input_stream, _set_input_stream, doc='''
An optional input stream. If you set this it will clear
:attr:`form` and :attr:`files`.''')
del _get_input_stream, _set_input_stream
def _get_query_string(self):
if self._query_string is None:
if self._args is not None:
return url_encode(self._args, charset=self.charset)
return ''
return self._query_string
def _set_query_string(self, value):
self._query_string = value
self._args = None
query_string = property(_get_query_string, _set_query_string, doc='''
The query string. If you set this to a string :attr:`args` will
no longer be available.''')
del _get_query_string, _set_query_string
def _get_args(self):
if self._query_string is not None:
raise AttributeError('a query string is defined')
if self._args is None:
self._args = MultiDict()
return self._args
def _set_args(self, value):
self._query_string = None
self._args = value
args = property(_get_args, _set_args, doc='''
The URL arguments as :class:`MultiDict`.''')
del _get_args, _set_args
@property
def server_name(self):
"""The server name (read-only, use :attr:`host` to set)"""
return self.host.split(':', 1)[0]
@property
def server_port(self):
"""The server port as integer (read-only, use :attr:`host` to set)"""
pieces = self.host.split(':', 1)
if len(pieces) == 2 and pieces[1].isdigit():
return int(pieces[1])
elif self.url_scheme == 'https':
return 443
return 80
def __del__(self):
try:
self.close()
except Exception:
pass
def close(self):
"""Closes all files. If you put real :class:`file` objects into the
:attr:`files` dict you can call this method to automatically close
them all in one go.
"""
if self.closed:
return
try:
files = itervalues(self.files)
except AttributeError:
files = ()
for f in files:
try:
f.close()
except Exception:
pass
self.closed = True
def get_environ(self):
"""Return the built environ."""
input_stream = self.input_stream
content_length = self.content_length
content_type = self.content_type
if input_stream is not None:
start_pos = input_stream.tell()
input_stream.seek(0, 2)
end_pos = input_stream.tell()
input_stream.seek(start_pos)
content_length = end_pos - start_pos
elif content_type == 'multipart/form-data':
values = CombinedMultiDict([self.form, self.files])
input_stream, content_length, boundary = \
stream_encode_multipart(values, charset=self.charset)
content_type += '; boundary="%s"' % boundary
elif content_type == 'application/x-www-form-urlencoded':
#py2v3 review
values = url_encode(self.form, charset=self.charset)
values = values.encode('ascii')
content_length = len(values)
input_stream = BytesIO(values)
else:
input_stream = _empty_stream
result = {}
if self.environ_base:
result.update(self.environ_base)
def _path_encode(x):
return wsgi_encoding_dance(url_unquote(x, self.charset), self.charset)
qs = wsgi_encoding_dance(self.query_string)
result.update({
'REQUEST_METHOD': self.method,
'SCRIPT_NAME': _path_encode(self.script_root),
'PATH_INFO': _path_encode(self.path),
'QUERY_STRING': qs,
'SERVER_NAME': self.server_name,
'SERVER_PORT': str(self.server_port),
'HTTP_HOST': self.host,
'SERVER_PROTOCOL': self.server_protocol,
'CONTENT_TYPE': content_type or '',
'CONTENT_LENGTH': str(content_length or '0'),
'wsgi.version': self.wsgi_version,
'wsgi.url_scheme': self.url_scheme,
'wsgi.input': input_stream,
'wsgi.errors': self.errors_stream,
'wsgi.multithread': self.multithread,
'wsgi.multiprocess': self.multiprocess,
'wsgi.run_once': self.run_once
})
for key, value in self.headers.to_wsgi_list():
result['HTTP_%s' % key.upper().replace('-', '_')] = value
if self.environ_overrides:
result.update(self.environ_overrides)
return result
def get_request(self, cls=None):
"""Returns a request with the data. If the request class is not
specified :attr:`request_class` is used.
:param cls: The request wrapper to use.
"""
if cls is None:
cls = self.request_class
return cls(self.get_environ())
class ClientRedirectError(Exception):
"""
If a redirect loop is detected when using follow_redirects=True with
the :cls:`Client`, then this exception is raised.
"""
class Client(object):
"""This class allows to send requests to a wrapped application.
The response wrapper can be a class or factory function that takes
three arguments: app_iter, status and headers. The default response
wrapper just returns a tuple.
Example::
class ClientResponse(BaseResponse):
...
client = Client(MyApplication(), response_wrapper=ClientResponse)
The use_cookies parameter indicates whether cookies should be stored and
sent for subsequent requests. This is True by default, but passing False
will disable this behaviour.
If you want to request some subdomain of your application you may set
`allow_subdomain_redirects` to `True` as if not no external redirects
are allowed.
.. versionadded:: 0.5
`use_cookies` is new in this version. Older versions did not provide
builtin cookie support.
"""
def __init__(self, application, response_wrapper=None, use_cookies=True,
allow_subdomain_redirects=False):
self.application = application
self.response_wrapper = response_wrapper
if use_cookies:
self.cookie_jar = _TestCookieJar()
else:
self.cookie_jar = None
self.allow_subdomain_redirects = allow_subdomain_redirects
def set_cookie(self, server_name, key, value='', max_age=None,
expires=None, path='/', domain=None, secure=None,
httponly=False, charset='utf-8'):
"""Sets a cookie in the client's cookie jar. The server name
is required and has to match the one that is also passed to
the open call.
"""
assert self.cookie_jar is not None, 'cookies disabled'
header = dump_cookie(key, value, max_age, expires, path, domain,
secure, httponly, charset)
environ = create_environ(path, base_url='http://' + server_name)
headers = [('Set-Cookie', header)]
self.cookie_jar.extract_wsgi(environ, headers)
def delete_cookie(self, server_name, key, path='/', domain=None):
"""Deletes a cookie in the test client."""
self.set_cookie(server_name, key, expires=0, max_age=0,
path=path, domain=domain)
def run_wsgi_app(self, environ, buffered=False):
"""Runs the wrapped WSGI app with the given environment."""
if self.cookie_jar is not None:
self.cookie_jar.inject_wsgi(environ)
rv = run_wsgi_app(self.application, environ, buffered=buffered)
if self.cookie_jar is not None:
self.cookie_jar.extract_wsgi(environ, rv[2])
return rv
def resolve_redirect(self, response, new_location, environ, buffered=False):
"""Resolves a single redirect and triggers the request again
directly on this redirect client.
"""
scheme, netloc, script_root, qs, anchor = url_parse(new_location)
base_url = url_unparse((scheme, netloc, '', '', '')).rstrip('/') + '/'
cur_server_name = netloc.split(':', 1)[0].split('.')
real_server_name = get_host(environ).rsplit(':', 1)[0].split('.')
if self.allow_subdomain_redirects:
allowed = cur_server_name[-len(real_server_name):] == real_server_name
else:
allowed = cur_server_name == real_server_name
if not allowed:
raise RuntimeError('%r does not support redirect to '
'external targets' % self.__class__)
status_code = int(response[1].split(None, 1)[0])
if status_code == 307:
method = environ['REQUEST_METHOD']
else:
method = 'GET'
# For redirect handling we temporarily disable the response
# wrapper. This is not threadsafe but not a real concern
# since the test client must not be shared anyways.
old_response_wrapper = self.response_wrapper
self.response_wrapper = None
try:
return self.open(path=script_root, base_url=base_url,
query_string=qs, as_tuple=True,
buffered=buffered, method=method)
finally:
self.response_wrapper = old_response_wrapper
def open(self, *args, **kwargs):
"""Takes the same arguments as the :class:`EnvironBuilder` class with
some additions: You can provide a :class:`EnvironBuilder` or a WSGI
environment as only argument instead of the :class:`EnvironBuilder`
arguments and two optional keyword arguments (`as_tuple`, `buffered`)
that change the type of the return value or the way the application is
executed.
.. versionchanged:: 0.5
If a dict is provided as file in the dict for the `data` parameter
the content type has to be called `content_type` now instead of
`mimetype`. This change was made for consistency with
:class:`werkzeug.FileWrapper`.
The `follow_redirects` parameter was added to :func:`open`.
Additional parameters:
:param as_tuple: Returns a tuple in the form ``(environ, result)``
:param buffered: Set this to True to buffer the application run.
This will automatically close the application for
you as well.
:param follow_redirects: Set this to True if the `Client` should
follow HTTP redirects.
"""
as_tuple = kwargs.pop('as_tuple', False)
buffered = kwargs.pop('buffered', False)
follow_redirects = kwargs.pop('follow_redirects', False)
environ = None
if not kwargs and len(args) == 1:
if isinstance(args[0], EnvironBuilder):
environ = args[0].get_environ()
elif isinstance(args[0], dict):
environ = args[0]
if environ is None:
builder = EnvironBuilder(*args, **kwargs)
try:
environ = builder.get_environ()
finally:
builder.close()
response = self.run_wsgi_app(environ, buffered=buffered)
# handle redirects
redirect_chain = []
while 1:
status_code = int(response[1].split(None, 1)[0])
if status_code not in (301, 302, 303, 305, 307) \
or not follow_redirects:
break
new_location = response[2]['location']
method = 'GET'
if status_code == 307:
method = environ['REQUEST_METHOD']
new_redirect_entry = (new_location, status_code)
if new_redirect_entry in redirect_chain:
raise ClientRedirectError('loop detected')
redirect_chain.append(new_redirect_entry)
environ, response = self.resolve_redirect(response, new_location,
environ,
buffered=buffered)
if self.response_wrapper is not None:
response = self.response_wrapper(*response)
if as_tuple:
return environ, response
return response
def get(self, *args, **kw):
"""Like open but method is enforced to GET."""
kw['method'] = 'GET'
return self.open(*args, **kw)
def patch(self, *args, **kw):
"""Like open but method is enforced to PATCH."""
kw['method'] = 'PATCH'
return self.open(*args, **kw)
def post(self, *args, **kw):
"""Like open but method is enforced to POST."""
kw['method'] = 'POST'
return self.open(*args, **kw)
def head(self, *args, **kw):
"""Like open but method is enforced to HEAD."""
kw['method'] = 'HEAD'
return self.open(*args, **kw)
def put(self, *args, **kw):
"""Like open but method is enforced to PUT."""
kw['method'] = 'PUT'
return self.open(*args, **kw)
def delete(self, *args, **kw):
"""Like open but method is enforced to DELETE."""
kw['method'] = 'DELETE'
return self.open(*args, **kw)
def options(self, *args, **kw):
"""Like open but method is enforced to OPTIONS."""
kw['method'] = 'OPTIONS'
return self.open(*args, **kw)
def trace(self, *args, **kw):
"""Like open but method is enforced to TRACE."""
kw['method'] = 'TRACE'
return self.open(*args, **kw)
def __repr__(self):
return '<%s %r>' % (
self.__class__.__name__,
self.application
)
def create_environ(*args, **kwargs):
"""Create a new WSGI environ dict based on the values passed. The first
parameter should be the path of the request which defaults to '/'. The
second one can either be an absolute path (in that case the host is
localhost:80) or a full path to the request with scheme, netloc port and
the path to the script.
This accepts the same arguments as the :class:`EnvironBuilder`
constructor.
.. versionchanged:: 0.5
This function is now a thin wrapper over :class:`EnvironBuilder` which
was added in 0.5. The `headers`, `environ_base`, `environ_overrides`
and `charset` parameters were added.
"""
builder = EnvironBuilder(*args, **kwargs)
try:
return builder.get_environ()
finally:
builder.close()
def run_wsgi_app(app, environ, buffered=False):
"""Return a tuple in the form (app_iter, status, headers) of the
application output. This works best if you pass it an application that
returns an iterator all the time.
Sometimes applications may use the `write()` callable returned
by the `start_response` function. This tries to resolve such edge
cases automatically. But if you don't get the expected output you
should set `buffered` to `True` which enforces buffering.
If passed an invalid WSGI application the behavior of this function is
undefined. Never pass non-conforming WSGI applications to this function.
:param app: the application to execute.
:param buffered: set to `True` to enforce buffering.
:return: tuple in the form ``(app_iter, status, headers)``
"""
environ = _get_environ(environ)
response = []
buffer = []
def start_response(status, headers, exc_info=None):
if exc_info is not None:
reraise(*exc_info)
response[:] = [status, headers]
return buffer.append
app_rv = app(environ, start_response)
close_func = getattr(app_rv, 'close', None)
app_iter = iter(app_rv)
# when buffering we emit the close call early and convert the
# application iterator into a regular list
if buffered:
try:
app_iter = list(app_iter)
finally:
if close_func is not None:
close_func()
# otherwise we iterate the application iter until we have a response, chain
# the already received data with the already collected data and wrap it in
# a new `ClosingIterator` if we need to restore a `close` callable from the
# original return value.
else:
while not response:
buffer.append(next(app_iter))
if buffer:
app_iter = chain(buffer, app_iter)
if close_func is not None and app_iter is not app_rv:
app_iter = ClosingIterator(app_iter, close_func)
return app_iter, response[0], Headers(response[1])
| apache-2.0 |
yvaucher/l10n-italy | __unported__/l10n_it_ricevute_bancarie/wizard/__init__.py | 12 | 1330 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2012 Andrea Cometa.
# Email: [email protected]
# Web site: http://www.andreacometa.it
# Copyright (C) 2012 Agile Business Group sagl (<http://www.agilebg.com>)
# Copyright (C) 2012 Domsense srl (<http://www.domsense.com>)
# Copyright (C) 2012 Associazione OpenERP Italia
# (<http://www.openerp-italia.org>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import wizard_emissione_riba
import riba_file_export
import wizard_accreditation
import wizard_unsolved
| agpl-3.0 |
hammerlab/mhctools | test/test_mhc_formats.py | 1 | 10284 | from mhctools.parsing import (
parse_netmhcpan28_stdout,
parse_netmhcpan3_stdout,
parse_netmhc3_stdout,
parse_netmhc4_stdout,
)
def test_netmhc3_stdout():
"""
Test parsing of NetMHC output of predictions of HLA-A*02:01
and HLA-A*02:03 for three epitopes:
- CFTWNQMNL
- SLYNTVATL
- SLYNTVATF
"""
netmhc_output = """
NetMHC version 3.4. 9mer predictions using Artificial Neural Networks - Direct. Allele HLA-A02:01.
Strong binder threshold 50 nM. Weak binder threshold score 500 nM
----------------------------------------------------------------------------------------------------
pos peptide logscore affinity(nM) Bind Level Protein Name Allele
----------------------------------------------------------------------------------------------------
0 CFTWNQMNL 0.085 19899 seq4 HLA-A02:01
--------------------------------------------------------------------------------------------------
0 SLYNTVATL 0.579 94 WB seq5 HLA-A02:01
--------------------------------------------------------------------------------------------------
0 SLYNTVATF 0.289 2186 seq6 HLA-A02:01
--------------------------------------------------------------------------------------------------
NetMHC version 3.4. 9mer predictions using Artificial Neural Networks - Direct. Allele HLA-A02:03.
Strong binder threshold 50 nM. Weak binder threshold score 500 nM
----------------------------------------------------------------------------------------------------
pos peptide logscore affinity(nM) Bind Level Protein Name Allele
----------------------------------------------------------------------------------------------------
0 CFTWNQMNL 0.113 14800 seq4 HLA-A02:03
--------------------------------------------------------------------------------------------------
0 SLYNTVATL 0.730 18 SB seq5 HLA-A02:03
--------------------------------------------------------------------------------------------------
0 SLYNTVATF 0.493 239 WB seq6 HLA-A02:03
--------------------------------------------------------------------------------------------------
"""
n_sequences = 3
n_alleles = 2
n_expected = n_alleles * n_sequences
binding_predictions = parse_netmhc3_stdout(netmhc_output)
assert len(binding_predictions) == n_expected, \
"Wrong number of binding predictions: %d (expected %d)" % (
len(binding_predictions), n_expected)
for entry in binding_predictions:
# make sure both allele's tables get parsed
assert entry.allele in ('HLA-A*02:01', 'HLA-A*02:03'), entry
# expect the HIV epitope SLYNTVATL to be a predicted binder for both
# alleles
if entry.peptide == "SLYNTVATL":
assert entry.value < 100, entry
def test_netmhc4_stdout():
netmhc_output = """
# NetMHC version 4.0
# Read 132 elements on pairlist /Users/tavi/drive/work/repos/cancer/n-b/netMHC-4.0/Darwin_x86_64/data/allelelist
# Input is in PEPTIDE format
# Rank Threshold for Strong binding peptides 0.500
# Rank Threshold for Weak binding peptides 2.000
-----------------------------------------------------------------------------------
pos HLA peptide Core Offset I_pos I_len D_pos D_len iCore Identity 1-log50k(aff) Affinity(nM) %Rank BindLevel
-----------------------------------------------------------------------------------
0 HLA-A0201 AAAAAWYLWEV AAAWYLWEV 0 0 0 1 2 AAAAAWYLWEV SEQ_A 0.349 1147.39 4.50
0 HLA-A0201 AEFGPWQTV AEFGPWQTV 0 0 0 0 0 AEFGPWQTV SEQ_B 0.129 12361.73 18.00
-----------------------------------------------------------------------------------
Protein PEPLIST. Allele HLA-A0201. Number of high binders 0. Number of weak binders 0. Number of peptides 10
-----------------------------------------------------------------------------------
# Rank Threshold for Strong binding peptides 0.500
# Rank Threshold for Weak binding peptides 2.000
-----------------------------------------------------------------------------------
pos HLA peptide Core Offset I_pos I_len D_pos D_len iCore Identity 1-log50k(aff) Affinity(nM) %Rank BindLevel
-----------------------------------------------------------------------------------
0 HLA-A0202 AEFGPWQTV AEFGPWQTV 0 0 0 0 0 AEFGPWQTV SEQ_C 0.136 11437.51 23.00
219 HLA-A0202 QLLRDNLTL QLLRDNLTL 0 0 0 0 0 QLLRDNLTL SEQ_D 0.527 167.10 1.50 <= WB
-----------------------------------------------------------------------------------
Protein PEPLIST. Allele HLA-A0202. Number of high binders 0. Number of weak binders 0. Number of peptides 10
-----------------------------------------------------------------------------------
"""
n_sequences = 2
n_alleles = 2
n_expected = n_sequences * n_alleles
binding_predictions = parse_netmhc4_stdout(netmhc_output)
assert len(binding_predictions) == n_expected, \
"Wrong number of binding predictions: %d (expected %d)" % (
len(binding_predictions), n_expected)
for entry in binding_predictions:
# make sure both allele's tables get parsed
assert entry.allele in ('HLA-A*02:01', 'HLA-A*02:02'), entry
assert 0 < entry.value < 50000, entry
# expect the epitope AEFGPWQTV to have high affinity for both
# alleles
if entry.peptide == "AEFGPWQTV":
assert entry.value > 10000, entry
def test_mhcpan28_stdout():
netmhcpan28_output = """
# Affinity Threshold for Strong binding peptides 50.000',
# Affinity Threshold for Weak binding peptides 500.000',
# Rank Threshold for Strong binding peptides 0.500',
# Rank Threshold for Weak binding peptides 2.000',
---------------------------------------------------x
pos HLA peptide Identity 1-log50k(aff) Affinity(nM) %Rank BindLevel
----------------------------------------------------------------------------
0 HLA-A*02:03 QQQQQYFPE id0 0.024 38534.25 50.00
1 HLA-A*02:03 QQQQYFPEI id0 0.278 2461.53 15.00
2 HLA-A*02:03 QQQYFPEIT id0 0.078 21511.53 50.00
3 HLA-A*02:03 QQYFPEITH id0 0.041 32176.84 50.00
4 HLA-A*02:03 QYFPEITHI id0 0.085 19847.09 32.00
5 HLA-A*02:03 YFPEITHII id0 0.231 4123.85 15.00
6 HLA-A*02:03 FPEITHIII id0 0.060 26134.28 50.00
7 HLA-A*02:03 PEITHIIIA id0 0.034 34524.63 50.00
8 HLA-A*02:03 EITHIIIAS id0 0.076 21974.48 50.00
9 HLA-A*02:03 ITHIIIASS id0 0.170 7934.26 32.00
10 HLA-A*02:03 THIIIASSS id0 0.040 32361.18 50.00
11 HLA-A*02:03 HIIIASSSL id0 0.515 189.74 4.00 <= WB
"""
binding_predictions = parse_netmhcpan28_stdout(netmhcpan28_output)
assert len(binding_predictions) == 12, \
"Expected 12 binding predictions but got %d" % (len(binding_predictions),)
for entry in binding_predictions:
assert entry.allele == 'HLA-A*02:03', \
"Expected entry %s to have allele 'HLA-A*02:03'" % (entry,)
if entry.peptide == "HIIIASSSL":
# expect the epitopes to be sorted in increasing IC50
assert entry.value == 189.74, entry
assert entry.percentile_rank == 4.00, entry
def test_mhcpan3_stdout():
netmhcpan3_output = """
# Rank Threshold for Strong binding peptides 0.500
# Rank Threshold for Weak binding peptides 2.000
-----------------------------------------------------------------------------------
Pos HLA Peptide Core Of Gp Gl Ip Il Icore Identity Score Aff(nM) %Rank BindLevel
-----------------------------------------------------------------------------------
1 HLA-B*18:01 QQQQQYFP QQQQQYFP- 0 0 0 8 1 QQQQQYFP id0 0.06456 24866.4 17.00
2 HLA-B*18:01 QQQQYFPE QQQQYFPE- 0 0 0 8 1 QQQQYFPE id0 0.06446 24892.8 17.00
3 HLA-B*18:01 QQQYFPEI QQ-QYFPEI 0 0 0 2 1 QQQYFPEI id0 0.06108 25819.2 18.00
4 HLA-B*18:01 QQYFPEIT QQYFPEIT- 0 0 0 8 1 QQYFPEIT id0 0.04229 31642.1 29.00
5 HLA-B*18:01 QYFPEITH -QYFPEITH 0 0 0 0 1 QYFPEITH id0 0.05316 28130.5 22.00
6 HLA-B*18:01 YFPEITHI Y-FPEITHI 0 0 0 1 1 YFPEITHI id0 0.02576 37836.9 50.00
7 HLA-B*18:01 FPEITHII FP-EITHII 0 0 0 2 1 FPEITHII id0 0.06199 25566.2 18.00
8 HLA-B*18:01 PEITHIIA PEITHIIA- 0 0 0 8 1 PEITHIIA id0 0.06692 24239.3 16.00
9 HLA-B*18:01 EITHIIAS -EITHIIAS 0 0 0 0 1 EITHIIAS id0 0.09323 18234.7 10.00
10 HLA-B*18:01 ITHIIASS ITHIIASS- 0 0 0 8 1 ITHIIASS id0 0.01784 41223.5 70.00
11 HLA-B*18:01 THIIASSS THIIASSS- 0 0 0 8 1 THIIASSS id0 0.03335 34856.1 38.00
12 HLA-B*18:01 HIIASSSL -HIIASSSL 0 0 0 0 1 HIIASSSL id0 0.03049 35949.6 42.00
"""
binding_predictions = parse_netmhcpan3_stdout(netmhcpan3_output)
assert len(binding_predictions) == 12
for entry in binding_predictions:
assert entry.allele == 'HLA-B*18:01', \
"Expected entry %s to have allele 'HLA-B*18:01'" % (entry,)
if entry.peptide == "EITHIIAS":
# expect the epitopes to be sorted in increasing IC50
assert entry.value == 18234.7, entry
assert entry.percentile_rank == 10.00, entry
| apache-2.0 |
acenario/Payable | lib/python2.7/site-packages/setuptools/command/upload_docs.py | 332 | 6811 | # -*- coding: utf-8 -*-
"""upload_docs
Implements a Distutils 'upload_docs' subcommand (upload documentation to
PyPI's pythonhosted.org).
"""
import os
import socket
import zipfile
import tempfile
import sys
import shutil
from base64 import standard_b64encode
from pkg_resources import iter_entry_points
from distutils import log
from distutils.errors import DistutilsOptionError
from distutils.command.upload import upload
from setuptools.compat import httplib, urlparse, unicode, iteritems, PY3
errors = 'surrogateescape' if PY3 else 'strict'
# This is not just a replacement for byte literals
# but works as a general purpose encoder
def b(s, encoding='utf-8'):
if isinstance(s, unicode):
return s.encode(encoding, errors)
return s
class upload_docs(upload):
description = 'Upload documentation to PyPI'
user_options = [
('repository=', 'r',
"url of repository [default: %s]" % upload.DEFAULT_REPOSITORY),
('show-response', None,
'display full response text from server'),
('upload-dir=', None, 'directory to upload'),
]
boolean_options = upload.boolean_options
def has_sphinx(self):
if self.upload_dir is None:
for ep in iter_entry_points('distutils.commands', 'build_sphinx'):
return True
sub_commands = [('build_sphinx', has_sphinx)]
def initialize_options(self):
upload.initialize_options(self)
self.upload_dir = None
self.target_dir = None
def finalize_options(self):
upload.finalize_options(self)
if self.upload_dir is None:
if self.has_sphinx():
build_sphinx = self.get_finalized_command('build_sphinx')
self.target_dir = build_sphinx.builder_target_dir
else:
build = self.get_finalized_command('build')
self.target_dir = os.path.join(build.build_base, 'docs')
else:
self.ensure_dirname('upload_dir')
self.target_dir = self.upload_dir
self.announce('Using upload directory %s' % self.target_dir)
def create_zipfile(self, filename):
zip_file = zipfile.ZipFile(filename, "w")
try:
self.mkpath(self.target_dir) # just in case
for root, dirs, files in os.walk(self.target_dir):
if root == self.target_dir and not files:
raise DistutilsOptionError(
"no files found in upload directory '%s'"
% self.target_dir)
for name in files:
full = os.path.join(root, name)
relative = root[len(self.target_dir):].lstrip(os.path.sep)
dest = os.path.join(relative, name)
zip_file.write(full, dest)
finally:
zip_file.close()
def run(self):
# Run sub commands
for cmd_name in self.get_sub_commands():
self.run_command(cmd_name)
tmp_dir = tempfile.mkdtemp()
name = self.distribution.metadata.get_name()
zip_file = os.path.join(tmp_dir, "%s.zip" % name)
try:
self.create_zipfile(zip_file)
self.upload_file(zip_file)
finally:
shutil.rmtree(tmp_dir)
def upload_file(self, filename):
f = open(filename, 'rb')
content = f.read()
f.close()
meta = self.distribution.metadata
data = {
':action': 'doc_upload',
'name': meta.get_name(),
'content': (os.path.basename(filename), content),
}
# set up the authentication
credentials = b(self.username + ':' + self.password)
credentials = standard_b64encode(credentials)
if PY3:
credentials = credentials.decode('ascii')
auth = "Basic " + credentials
# Build up the MIME payload for the POST data
boundary = '--------------GHSKFJDLGDS7543FJKLFHRE75642756743254'
sep_boundary = b('\n--') + b(boundary)
end_boundary = sep_boundary + b('--')
body = []
for key, values in iteritems(data):
title = '\nContent-Disposition: form-data; name="%s"' % key
# handle multiple entries for the same name
if not isinstance(values, list):
values = [values]
for value in values:
if type(value) is tuple:
title += '; filename="%s"' % value[0]
value = value[1]
else:
value = b(value)
body.append(sep_boundary)
body.append(b(title))
body.append(b("\n\n"))
body.append(value)
if value and value[-1:] == b('\r'):
body.append(b('\n')) # write an extra newline (lurve Macs)
body.append(end_boundary)
body.append(b("\n"))
body = b('').join(body)
self.announce("Submitting documentation to %s" % (self.repository),
log.INFO)
# build the Request
# We can't use urllib2 since we need to send the Basic
# auth right with the first request
schema, netloc, url, params, query, fragments = \
urlparse(self.repository)
assert not params and not query and not fragments
if schema == 'http':
conn = httplib.HTTPConnection(netloc)
elif schema == 'https':
conn = httplib.HTTPSConnection(netloc)
else:
raise AssertionError("unsupported schema "+schema)
data = ''
try:
conn.connect()
conn.putrequest("POST", url)
content_type = 'multipart/form-data; boundary=%s' % boundary
conn.putheader('Content-type', content_type)
conn.putheader('Content-length', str(len(body)))
conn.putheader('Authorization', auth)
conn.endheaders()
conn.send(body)
except socket.error:
e = sys.exc_info()[1]
self.announce(str(e), log.ERROR)
return
r = conn.getresponse()
if r.status == 200:
self.announce('Server response (%s): %s' % (r.status, r.reason),
log.INFO)
elif r.status == 301:
location = r.getheader('Location')
if location is None:
location = 'https://pythonhosted.org/%s/' % meta.get_name()
self.announce('Upload successful. Visit %s' % location,
log.INFO)
else:
self.announce('Upload failed (%s): %s' % (r.status, r.reason),
log.ERROR)
if self.show_response:
print('-'*75, r.read(), '-'*75)
| mit |
moonso/genmod | genmod/utils/get_features.py | 1 | 2303 | from __future__ import (print_function)
import logging
from genmod.utils import INTERESTING_SO_TERMS, EXONIC_SO_TERMS
def check_vep_annotation(variant):
"""
Return a set with the genes that vep has annotated this variant with.
Vep annotates all variants but we are only interested in the exonic ones.
The terms are specified in INTERESTING_SO_TERMS
Arguments:
variant (dict): A variant dictionary
Returns:
annotation (set): A set with genes
"""
annotation = set()
# vep_info is a dictionary with genes as key and annotation as values
##TODO use extract_vcf to get the annotation here
vep_info = variant.get('vep_info',{})
for allele in vep_info:
for vep_annotation in variant['vep_info'][allele]:
for consequence in vep_annotation.get('Consequence', {}).split('&'):
# These are the SO terms that indicate that the variant
# belongs to a gene
if consequence in INTERESTING_SO_TERMS:
annotation.add(vep_annotation.get('Gene', ''))
return annotation
def get_annotation(variant, annotation_key="Annotation", vep=False):
"""
Return the features that a variant belongs to.
Arguments:
variant (dict): A variant dictionary
annotation_key (str): The name of the info field to search
vep (bool): If variants are annotated with vep
Returns:
annotations (set): A set with annotated features
"""
logger = logging.getLogger(__name__)
##TODO use extract_vcf to get the annotation here
annotation = set()
variant_id = variant.get('variant_id', '')
logger.debug("Checking variant annotation for {0}".format(variant_id))
# If the variant has already been annotated by genmod we do not need to
# check again
if vep:
logger.debug("Using vep annotation.")
annotation = check_vep_annotation(variant)
else:
info_dict = variant.get('info_dict', {})
if info_dict.get(annotation_key, None):
annotation = set(info_dict[annotation_key].split(','))
logger.debug("Annotations found for {0}: {1}".format(
variant_id, ','.join(annotation)
))
return annotation
| mit |
wangxiangyu/horizon | openstack_dashboard/dashboards/admin/defaults/tabs.py | 82 | 1417 | # Copyright 2013 Kylin, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import tabs
from openstack_dashboard.usage import quotas
from openstack_dashboard.dashboards.admin.defaults import tables
class DefaultQuotasTab(tabs.TableTab):
table_classes = (tables.QuotasTable,)
name = _("Default Quotas")
slug = "quotas"
template_name = ("horizon/common/_detail_table.html")
def get_quotas_data(self):
request = self.tab_group.request
try:
data = quotas.get_default_quota_data(request)
except Exception:
data = []
exceptions.handle(self.request, _('Unable to get quota info.'))
return data
class DefaultsTabs(tabs.TabGroup):
slug = "defaults"
tabs = (DefaultQuotasTab,)
sticky = True
| apache-2.0 |
USU-Robosub/Gilligan | rosWorkspace/Brain/src/utils.py | 2 | 4579 | import rospy
import smach
from Robosub.msg import HighLevelControl
from SubImageRecognition.msg import ImgRecObject
def move(direction, motion_type, value):
move.msg.Direction = direction
move.msg.MotionType = motion_type
move.msg.Value = value
move.pub.publish(move.msg)
move.msg = HighLevelControl()
move.pub = rospy.Publisher('/High_Level_Motion', HighLevelControl)
move.COMMAND = 'Command'
move.FORWARD = 'Forward'
move.MANUAL = 'Manual'
move.STRAFE = 'Strafe'
move.TURN = 'Turn'
move.DIVE = 'Depth'
def turn(value):
move(move.TURN, move.COMMAND, value)
def forward(value):
move(move.TURN, move.COMMAND, 0)
move(move.FORWARD, move.COMMAND, value)
def strafe(value):
move(move.STRAFE, move.COMMAND, value)
def dive(value):
move(move.DIVE, move.COMMAND, value)
class ScanNarrow(smach.State):
def __init__(self, img_rec_topic, scan_degrees_start = 15, reverse_speed = -.5, scan_gains = 1, scan_duration = 1):
super(ScanNarrow, self).__init__(outcomes=['succeeded', 'timed_out', 'preempted'])
self._img_rec_topic = img_rec_topic
self._scan_degrees_start = scan_degrees_start * scan_gains
self._reverse_gains = reverse_speed
self._reverse_duration = reverse_speed * -4
self._scan_duration = scan_duration
self._reverse_speed = reverse_speed
def reset(self):
self.scan_degrees = self._scan_degrees_start
self.obj_found = 0
self.scan_count = 0
def execute(self, userdata):
self.reset()
sub = rospy.Subscriber(self._img_rec_topic, ImgRecObject, self.obj_found_cb)
while not self.obj_found:
#Back up a little bit
forward(self._reverse_speed)
rospy.sleep(self._reverse_duration)
forward(0.0)
#Scan to the left
turn(-self.scan_degrees)
for i in range(self._scan_duration):
if self.preempt_requested():
self.service_preempt()
self.reset()
return 'preempted'
rospy.sleep(1)
#Scan to the right
turn(self.scan_degrees * 2)
for i in range(self._scan_duration * 2):
if self.preempt_requested():
self.service_preempt()
self.reset()
return 'preempted'
rospy.sleep(1)
#return to center (probably)
turn(-self.scan_degrees)
for i in range(self._scan_duration):
if self.preempt_requested():
self.service_preempt()
self.reset()
return 'preempted'
rospy.sleep(1)
self.scan_count += 1
self.reset()
return 'succeeded'
def obj_found_cb(self, msg):
self.obj_found = True
## I started working on this to add a timeout but gave up for now -Chris
#class MonitorState(smach.State):
# def __init__(self, topic, msg_type, cond_cb, max_checks=-1):
# smach.State.__init__(self,outcomes=['valid','invalid','preempted'])
# self._topic = topic
# self._msg_type = msg_type
# self._cond_cb = cond_cb
# self._max_checks = max_checks
# self._n_checks = 0
# self._trigger_cond = threading.Condition()
# def execute(self, ud):
# self._n_checks = 0
# self._sub = rospy.Subscriber(self._topic, self._msg_type, self._cb, callback_args=[ud])
# with self._trigger_cond:
# self._trigger_cond.wait()
# self._sub.unregister()
# if self.preempt_requested():
# self.service_preempt()
# return 'preempted'
# if self._max_checks > 0 and self._n_checks >= self._max_checks:
# return 'valid'
# return 'invalid'
# def _cb(self, msg, ud):
# self._n_checks += 1
# try:
# if (self._max_checks > 0 and self._n_checks >= self._max_checks) or not self._cond_cb(ud, msg):
# self._wake()
# except:
# rospy.logerr("Error thrown while executing condition callback %s" % str(self._cond_cb))
# self._wake()
# def request_preempt(self):
# smach.State.request_preempt(self)
# self._wake()
# def _timeout_cb(self):
# self._wake()
# def _wake(self):
# with self._trigger_cond:
# self._trigger_cond.notify()
| apache-2.0 |
dhomeier/astropy | astropy/units/__init__.py | 8 | 1312 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This subpackage contains classes and functions for defining and converting
between different physical units.
This code is adapted from the `pynbody
<https://github.com/pynbody/pynbody>`_ units module written by Andrew
Pontzen, who has granted the Astropy project permission to use the
code under a BSD license.
"""
# Lots of things to import - go from more basic to advanced, so that
# whatever advanced ones need generally has been imported already;
# this helps prevent circular imports and makes it easier to understand
# where most time is spent (e.g., using python -X importtime).
from .core import *
from .quantity import *
from . import si
from . import cgs
from . import astrophys
from . import photometric
from . import misc
from .function import units as function_units
from .si import *
from .astrophys import *
from .photometric import *
from .cgs import *
from .physical import *
from .function.units import *
from .misc import *
from .equivalencies import *
from .function.core import *
from .function.logarithmic import *
from .decorators import *
del bases
# Enable the set of default units. This notably does *not* include
# Imperial units.
set_enabled_units([si, cgs, astrophys, function_units, misc, photometric])
| bsd-3-clause |
HopkinsIDD/EpiForecastStatMech | epi_forecast_stat_mech/iterative_estimator.py | 1 | 10955 | # Lint as: python3
"""An iterative epi_forecast_stat_mech.estimator_base.Estimator."""
import collections
import functools
import pickle
from epi_forecast_stat_mech import data_model
from epi_forecast_stat_mech import estimator_base
from epi_forecast_stat_mech import mask_time
from epi_forecast_stat_mech import optim_lib
from epi_forecast_stat_mech.mechanistic_models import mechanistic_models
from epi_forecast_stat_mech.mechanistic_models import predict_lib
from epi_forecast_stat_mech.statistical_models import probability as stat_prob
import jax
import jax.numpy as jnp
from matplotlib import pyplot as plt
import numpy as np
import sklearn
import sklearn.inspection
def np_float(x):
return np.asarray(x, dtype=np.float64)
def jnp_float(x):
return jnp.asarray(x, dtype=jnp.float32)
def load(file_in):
return pickle.load(file_in)
class IterativeEstimator(estimator_base.Estimator):
def save(self, file_out):
pickle.dump(self, file_out)
def __init__(self,
stat_estimators=None,
mech_model=None,
hat_interpolation_alpha=0.5,
iter_max=100,
gradient_steps=10000,
learning_rate=1E-4,
verbose=1,
time_mask_fn=functools.partial(mask_time.make_mask, min_value=1),
rng=None
):
"""Construct an IterativeEstimator.
Args:
stat_estimators: A dictionary with an sklearn regressor for each
encoded_param_name of the mech_model. Using a defaultdict is a
convenient way to impose a default type of regressor, e.g.:
collections.defaultdict(lambda: sklearn.ensemble.RandomForestRegressor(
...)). The fitted estimators will be saved in this dictionary, which
is also saved on the instance.
mech_model: An instance of a MechanisticModel.
hat_interpolation_alpha: float between 0. and 1. representing how much
to move toward the newly estimated mech_params_hat in each loop.
iter_max: positive integer. How many iterative loops to perform.
gradient_steps: The number of adam steps to perform per iter.
learning_rate: The learning rate (positive float; default 1E-4).
verbose: (Integer >= 0, default 1) Verbosity:
0: Quiet.
1: Reports every 1k steps.
2: Also report initial value and gradient.
time_mask_fn: A function that returns a np.array that can be used to mask
part of the new_infections curve.
rng: A jax.random.PRNGKey.
"""
if stat_estimators is None:
stat_estimators = collections.defaultdict(
lambda: sklearn.ensemble.RandomForestRegressor(n_estimators=50))
self.stat_estimators = stat_estimators
self.hat_interpolation_alpha = hat_interpolation_alpha
self.iter_max = iter_max
self.gradient_steps = gradient_steps
self.learning_rate = learning_rate
self.verbose = verbose
if mech_model is None:
mech_model = mechanistic_models.ViboudChowellModel()
self.mech_model = mech_model
self.encoded_param_names = self.mech_model.encoded_param_names
self.mech_bottom_scale = self.mech_model.bottom_scale
self.out_dim = len(self.encoded_param_names)
self.time_mask_fn = time_mask_fn
if rng is None:
rng = jax.random.PRNGKey(0)
self.rng = rng
def _unflatten(self, x):
return jnp.reshape(x, (-1, self.out_dim))
def fit(self, data):
data_model.validate_data_for_fit(data)
self.data = data
num_locations = data.sizes['location']
self.epidemics = epidemics = mechanistic_models.pack_epidemics_record_tuple(
data)
self.time_mask = time_mask = self.time_mask_fn(data)
self.v_df = v_df = _get_static_covariate_df(data)
def mech_plus_stat_errors(mech_params_stack, mech_params_hat_stack=None):
mech_log_prior = jnp.sum(
jax.vmap(self.mech_model.log_prior)(mech_params_stack))
mech_log_lik_terms_raw = jax.vmap(self.mech_model.log_likelihood)(
mech_params_stack, epidemics)
mech_log_lik_terms = jnp.where(time_mask, mech_log_lik_terms_raw, 0.)
mech_log_lik = jnp.sum(mech_log_lik_terms)
mech_log_prob = mech_log_prior + mech_log_lik
mech_log_prob = mech_log_lik
stat_plugin_error_model = stat_prob.gaussian_with_softplus_scale_estimate(
mech_params_stack,
axis=0,
min_scale=self.mech_bottom_scale,
mean=mech_params_hat_stack,
softness=self.mech_bottom_scale)
# shape: (out_dim,)
stat_log_prob = stat_plugin_error_model.log_prob(mech_params_stack).sum(
axis=0)
mech_plus_current_stat_loss = -(mech_log_prob + jnp.sum(stat_log_prob))
return mech_plus_current_stat_loss
mech_params_stack = mechanistic_models.initialize_mech_model_stack(
self.rng, self.mech_model, data, epidemics)
assert mech_params_stack.shape[1] == len(self.encoded_param_names)
mech_params_hat_stack = mech_params_stack
for _ in range(self.iter_max):
# Update mech_params_stack "regularized" by current mech_params_hat_stack.
# N.B. This is not a maximum likelihood update.
# We run two optimizers consecutively to try to unstick each.
mech_params_stack = optim_lib.adam_optimize(
functools.partial(
mech_plus_stat_errors,
mech_params_hat_stack=mech_params_hat_stack),
mech_params_stack,
train_steps=self.gradient_steps,
learning_rate=self.learning_rate,
verbose=self.verbose)
mech_params_stack, opt_status, _ = optim_lib.lbfgs_optimize(
functools.partial(
mech_plus_stat_errors,
mech_params_hat_stack=mech_params_hat_stack), mech_params_stack)
if not opt_status[0]:
print('optimizer reports: %s' % (opt_status,))
# Find an update for mech_params_hat_stack by calling upon the regressors
# to fit each mech_param. Each of these fits is responsible for it's own
# internal regularization.
hat_accum = []
for j, param_name in enumerate(self.encoded_param_names):
regressor = self.stat_estimators[param_name]
regressor.fit(v_df, np_float(mech_params_stack[:, j]))
hat_accum.append(regressor.predict(v_df))
proposed_mech_params_hat_stack = jnp.stack(hat_accum, axis=1)
# To stabilize the iteration, we don't jump all the way to the new fit,
# but to this convex combination with the old value.
mech_params_hat_stack = (
self.hat_interpolation_alpha * proposed_mech_params_hat_stack +
(1 - self.hat_interpolation_alpha) * mech_params_hat_stack)
self.mech_params_stack = mech_params_stack
self.mech_params_hat_stack = mech_params_hat_stack
return self
def _check_fitted(self):
if not hasattr(self, 'mech_params_stack'):
raise AttributeError('`fit` must be called before `predict`.')
def plot_partial_dependence(self):
self._check_fitted()
v_df = self.v_df
for _, param_name in enumerate(self.encoded_param_names):
print('Partial dependence plot for %s' % (param_name,))
regressor = self.stat_estimators[param_name]
sklearn.inspection.plot_partial_dependence(
regressor, v_df, v_df.columns, n_jobs=3, grid_resolution=20)
plt.show()
def plot_permutation_importances(self):
# needs sklearn 0.22.
self._check_fitted()
v_df = self.v_df
for j, param_name in enumerate(self.encoded_param_names):
print('Importance plot for %s' % (param_name,))
regressor = self.stat_estimators[param_name]
imp1 = sklearn.inspection.permutation_importance(
regressor, self.v_df, np_float(self.mech_params_stack[:, j]))
sorted_idx = imp1.importances_mean.argsort()
fig, ax = plt.subplots()
ax.boxplot(
imp1.importances[sorted_idx].T,
vert=False,
labels=v_df.columns[sorted_idx])
ax.set_title('Permutation Importances (train set)')
fig.tight_layout()
plt.show()
def predict(self, test_data, num_samples, seed=0):
self._check_fitted()
rng = jax.random.PRNGKey(seed)
mech_params = self.mech_params_stack
dynamic_covariates = predict_lib.prepare_dynamic_covariates(
self.data, test_data)
sample_mech_params_fn = getattr(
self, 'sample_mech_params_fn', lambda rngkey, num_samples: jnp.swapaxes(
jnp.broadcast_to(mech_params,
(num_samples,) + mech_params.shape), 1, 0))
return predict_lib.simulate_dynamic_predictions(
self.mech_model, mech_params, self.data, self.epidemics,
dynamic_covariates, num_samples, rng, sample_mech_params_fn)
@property
def mech_params(self):
self._check_fitted()
return predict_lib.mech_params_array(self.data, self.mech_model,
self.mech_params_stack)
@property
def mech_params_hat(self):
self._check_fitted()
return predict_lib.mech_params_array(self.data, self.mech_model,
self.mech_params_hat_stack)
@property
def encoded_mech_params(self):
self._check_fitted()
return predict_lib.encoded_mech_params_array(self.data, self.mech_model,
self.mech_params_stack)
@property
def mech_params_for_jax_code(self):
return self.encoded_mech_params.values
def _get_static_covariate_df(trajectories):
"""The (static) covariate matrix."""
raw_v_df = (
trajectories.static_covariates.reset_coords(drop=True).transpose(
'location', 'static_covariate').to_pandas())
# This can then be used with, e.g. patsy.
# expanded_v_df = patsy(raw_v_df, ...patsy details...)
# Optionally it can be converted back to xa using.
# expanded_v_xa = xarray.DataArray(expanded_v_df)
# for now...
v_df = raw_v_df
return v_df
def make_mean_estimators():
return collections.defaultdict(
lambda: sklearn.dummy.DummyRegressor(strategy='mean'))
def get_estimator_dict():
estimator_dict = {}
estimator_dict['iterative_randomforest__VC'] = IterativeEstimator()
estimator_dict['iterative_mean__VC'] = IterativeEstimator(
stat_estimators=make_mean_estimators())
estimator_dict['iterative_randomforest__VC_PL'] = IterativeEstimator(
mech_model=mechanistic_models.ViboudChowellModelPseudoLikelihood())
estimator_dict['iterative_mean__VC_PL'] = IterativeEstimator(
stat_estimators=make_mean_estimators(),
mech_model=mechanistic_models.ViboudChowellModelPseudoLikelihood())
estimator_dict['iterative_randomforest__Gaussian_PL'] = IterativeEstimator(
mech_model=mechanistic_models.GaussianModelPseudoLikelihood())
estimator_dict['iterative_mean__Gaussian_PL'] = IterativeEstimator(
stat_estimators=make_mean_estimators(),
mech_model=mechanistic_models.GaussianModelPseudoLikelihood())
return estimator_dict
| gpl-3.0 |
Dangetsu/vnr | Frameworks/Sakura/py/libs/freem/game.py | 1 | 5284 | # coding: utf8
# game.py
# 10/20/2013 jichi
__all__ = 'GameApi',
if __name__ == '__main__': # DEBUG
import sys
sys.path.append("..")
import re
from sakurakit import sknetio
from sakurakit.skcontainer import uniquelist
from sakurakit.skstr import unescapehtml
#from sakurakit.skdebug import dwarn
class GameApi(object):
QUERY_HOST = "http://www.freem.ne.jp"
QUERY_PATH = "/win/game/%s"
API = QUERY_HOST + QUERY_PATH
ENCODING = 'utf8'
session = None # requests.Session or None
def _makereq(self, id):
"""
@param kw
@return kw
"""
return {'url':self._makeurl(id)}
def _makeurl(self, id):
"""
@param id int
@return str
"""
return self.API % id
def _fetch(self, url):
"""
@param url str
@return str
"""
return sknetio.getdata(url, gzip=True, session=self.session) #, cookies=self.COOKIES)
def query(self, id):
"""
@param id str or int softId
@return {kw} or None
"""
url = self._makeurl(id)
h = self._fetch(url)
if h:
h = h.decode(self.ENCODING, errors='ignore')
if h:
ret = self._parse(h)
if ret:
ret['id'] = long(id)
ret['url'] = "http://freem.ne.jp/win/game/%s" % id # strip off www.
img = 'http://pic.freem.ne.jp/win/%s.jpg' % id
ret['img'] = img if img in h else ''
ret['sampleImages'] = list(self._iterparsesampleimages(h, id)) # [unicode]
return ret
def _parse(self, h):
"""
@param h unicode html
@return {kw}
"""
title = self._parsetitle(h)
if title:
title = self._fixtitle(title)
bl = u'BLゲーム' in h
otome = bl or u'女性向' in h or u'乙女ゲーム' in h
ecchi = u'全年齢' in h
ret = {
'title': title, # unicode
'otome': otome, # bool
'ecchi': ecchi, # bool
'filesize': self._parsesize(h), # int
'description': self._parsedesc(h), # unicode or None
'videos': uniquelist(self._iterparsevideos(h)), # [kw]
}
ret.update(self._iterparsefields(h))
return ret
# Example: RPGを初めて遊ぶ人のためのRPG ver1.32
_re_fixtitle = re.compile(' ver[0-9. ]+$')
def _fixtitle(self, t):
"""
@param t unicode
@return unicode
"""
return self._re_fixtitle.sub('', t)
# Example: <meta name="twitter:title" content="「恋と友情の境界線-体験版-」:無料ゲーム by ふりーむ!">
_re_title = re.compile(ur'<meta name="twitter:title" content="([^"]+?):無料ゲーム[^"]*"')
def _parsetitle(self, h):
"""
@param h unicode html
@return unicode or None
"""
m = self._re_title.search(h)
if m:
return unescapehtml(m.group(1))
_re_fields = (
# Example: <meta name="description" content="「赤い森の魔女」:樵の少年と魔女のお話" />
('slogan', re.compile(ur'<meta name="description" content="[^"]+」:([^"]+?)"')),
('brand', re.compile(r'href="/brand/\d+">([^<]+)<')),
# Example: ■登録日<br />2015-01-11<br />
('date', re.compile(ur'■登録日<br />([0-9-]+)<')),
)
def _iterparsefields(self, h):
"""
@param h unicode
@yield (str key, unicode or None)
"""
for k,rx in self._re_fields:
m = rx.search(h)
if m:
yield k, unescapehtml(m.group(1))
# Example:
# ■容量<br />
# 16,121 KByte<br />
_re_size = re.compile(ur'■容量<br />\n*\r*\s*([0-9,]*) KByte')
def _parsesize(self, h):
"""
@param h unicode html
@return long not None
"""
m = self._re_size.search(h)
if m:
try: return 1024 * long(m.group(1).replace(',', ''))
except: pass
return 0
# Example: http://pic.freem.ne.jp/win/123_2.jpg
def _iterparsesampleimages(self, h, id):
"""
@param h unicode html
@param id long
@yield unicode
"""
x = re.compile(r"http://pic.freem.ne.jp/win/%s_\d+.jpg" % id)
for m in x.finditer(h):
yield m.group()
# Example: https://www.youtube.com/watch?v=-Xsa47nj8uk
_re_youtube = re.compile(r'youtube.com/watch\?v=([0-9a-zA-Z_-]+)')
def _iterparsevideos(self, h): # the method apply to all case
"""
@param h unicode html
@yield unicode
"""
if 'youtube.com' in h:
for m in self._re_youtube.finditer(h):
yield m.group(1)
# Example:
# <!-- ■ゲーム説明スペース開始 -->
# <div id="gameExplain">
# </div>
# <!-- //□ゲーム説明スペース終了 -->
_re_desc = re.compile(
ur'<!-- ■ゲーム説明スペース開始 -->'
r'(.*?)'
ur'<!-- //□ゲーム説明スペース終了 -->'
, re.DOTALL)
def _parsedesc(self, h):
"""
@param h unicode html
@param id long
@yield unicode or None
"""
m = self._re_desc.search(h)
if m:
return m.group(1)
if __name__ == '__main__':
api = GameApi()
k = 8329 # http://www.freem.ne.jp/win/game/8329
k = 3055 # http://www.freem.ne.jp/win/game/3055
k = 7190 # http://www.freem.ne.jp/win/game/7190
k = 5414 # special name
k = 4467 # http://www.freem.ne.jp/win/game/4467
k = 3781
# Youtube Video
print '-' * 10
q = api.query(k)
for k,v in q.iteritems():
print k, ':', v
# EOF
| gpl-3.0 |
einarhuseby/arctic | tests/integration/tickstore/test_ts_delete.py | 3 | 1908 | from datetime import datetime as dt
from mock import patch
import numpy as np
from pandas.util.testing import assert_frame_equal
import pytest
from arctic import arctic as m
from arctic.date import DateRange, CLOSED_OPEN, mktz
from arctic.exceptions import OverlappingDataException, \
NoDataFoundException
def test_delete(tickstore_lib):
DUMMY_DATA = [
{'a': 1.,
'b': 2.,
'index': dt(2013, 1, 1, tzinfo=mktz('Europe/London'))
},
{'a': 3.,
'b': 4.,
'index': dt(2013, 1, 30, tzinfo=mktz('Europe/London'))
},
]
tickstore_lib.chunk_size = 1
tickstore_lib.write('SYM', DUMMY_DATA)
tickstore_lib.delete('SYM')
with pytest.raises(NoDataFoundException):
tickstore_lib.read('SYM', date_range=DateRange(20130102), columns=None)
# Delete with a date-range
tickstore_lib.write('SYM', DUMMY_DATA)
tickstore_lib.delete('SYM', DateRange(dt(2013, 1, 1, tzinfo=mktz('Europe/London')), dt(2013, 1, 2, tzinfo=mktz('Europe/London'))))
df = tickstore_lib.read('SYM', columns=None)
assert np.allclose(df['b'].values, np.array([4.]))
def test_delete_daterange(tickstore_lib):
DUMMY_DATA = [
{'a': 1.,
'b': 2.,
'index': dt(2013, 1, 1, tzinfo=mktz('Europe/London'))
},
{'a': 3.,
'b': 4.,
'index': dt(2013, 2, 1, tzinfo=mktz('Europe/London'))
},
]
tickstore_lib.chunk_size = 1
tickstore_lib.write('SYM', DUMMY_DATA)
# Delete with a date-range
tickstore_lib.delete('SYM', DateRange(dt(2013, 1, 1, tzinfo=mktz('Europe/London')), dt(2013, 2, 1, tzinfo=mktz('Europe/London')), CLOSED_OPEN))
df = tickstore_lib.read('SYM', columns=None)
assert np.allclose(df['b'].values, np.array([4.]))
| lgpl-2.1 |
jabesq/home-assistant | tests/components/zha/test_light.py | 1 | 8829 | """Test zha light."""
import asyncio
from unittest.mock import MagicMock, call, patch, sentinel
from homeassistant.components.light import DOMAIN
from homeassistant.const import STATE_OFF, STATE_ON, STATE_UNAVAILABLE
from .common import (
async_enable_traffic, async_init_zigpy_device, async_test_device_join,
make_attribute, make_entity_id)
from tests.common import mock_coro
ON = 1
OFF = 0
async def test_light(hass, config_entry, zha_gateway, monkeypatch):
"""Test zha light platform."""
from zigpy.zcl.clusters.general import OnOff, LevelControl, Basic
from zigpy.zcl.foundation import Status
from zigpy.profiles.zha import DeviceType
# create zigpy devices
zigpy_device_on_off = await async_init_zigpy_device(
hass,
[OnOff.cluster_id, Basic.cluster_id],
[],
DeviceType.ON_OFF_LIGHT,
zha_gateway
)
zigpy_device_level = await async_init_zigpy_device(
hass,
[OnOff.cluster_id, LevelControl.cluster_id, Basic.cluster_id],
[],
DeviceType.ON_OFF_LIGHT,
zha_gateway,
ieee="00:0d:6f:11:0a:90:69:e7",
manufacturer="FakeLevelManufacturer",
model="FakeLevelModel"
)
# load up light domain
await hass.config_entries.async_forward_entry_setup(
config_entry, DOMAIN)
await hass.async_block_till_done()
# on off light
on_off_device_on_off_cluster = zigpy_device_on_off.endpoints.get(1).on_off
on_off_entity_id = make_entity_id(DOMAIN, zigpy_device_on_off,
on_off_device_on_off_cluster,
use_suffix=False)
on_off_zha_device = zha_gateway.get_device(zigpy_device_on_off.ieee)
# dimmable light
level_device_on_off_cluster = zigpy_device_level.endpoints.get(1).on_off
level_device_level_cluster = zigpy_device_level.endpoints.get(1).level
on_off_mock = MagicMock(side_effect=asyncio.coroutine(MagicMock(
return_value=[sentinel.data, Status.SUCCESS])))
level_mock = MagicMock(side_effect=asyncio.coroutine(MagicMock(
return_value=[sentinel.data, Status.SUCCESS])))
monkeypatch.setattr(level_device_on_off_cluster, 'request', on_off_mock)
monkeypatch.setattr(level_device_level_cluster, 'request', level_mock)
level_entity_id = make_entity_id(DOMAIN, zigpy_device_level,
level_device_on_off_cluster,
use_suffix=False)
level_zha_device = zha_gateway.get_device(zigpy_device_level.ieee)
# test that the lights were created and that they are unavailable
assert hass.states.get(on_off_entity_id).state == STATE_UNAVAILABLE
assert hass.states.get(level_entity_id).state == STATE_UNAVAILABLE
# allow traffic to flow through the gateway and device
await async_enable_traffic(hass, zha_gateway,
[on_off_zha_device, level_zha_device])
# test that the lights were created and are off
assert hass.states.get(on_off_entity_id).state == STATE_OFF
assert hass.states.get(level_entity_id).state == STATE_OFF
# test turning the lights on and off from the light
await async_test_on_off_from_light(
hass, on_off_device_on_off_cluster, on_off_entity_id)
await async_test_on_off_from_light(
hass, level_device_on_off_cluster, level_entity_id)
# test turning the lights on and off from the HA
await async_test_on_off_from_hass(
hass, on_off_device_on_off_cluster, on_off_entity_id)
await async_test_level_on_off_from_hass(
hass, level_device_on_off_cluster, level_device_level_cluster,
level_entity_id)
# test turning the lights on and off from the light
await async_test_on_from_light(
hass, level_device_on_off_cluster, level_entity_id)
# test getting a brightness change from the network
await async_test_dimmer_from_light(
hass, level_device_level_cluster, level_entity_id, 150, STATE_ON)
# test adding a new light to the network and HA
await async_test_device_join(
hass, zha_gateway, OnOff.cluster_id,
DOMAIN, device_type=DeviceType.ON_OFF_LIGHT)
async def async_test_on_off_from_light(hass, cluster, entity_id):
"""Test on off functionality from the light."""
# turn on at light
attr = make_attribute(0, 1)
cluster.handle_message(False, 1, 0x0a, [[attr]])
await hass.async_block_till_done()
assert hass.states.get(entity_id).state == STATE_ON
# turn off at light
attr.value.value = 0
cluster.handle_message(False, 0, 0x0a, [[attr]])
await hass.async_block_till_done()
assert hass.states.get(entity_id).state == STATE_OFF
async def async_test_on_from_light(hass, cluster, entity_id):
"""Test on off functionality from the light."""
# turn on at light
attr = make_attribute(0, 1)
cluster.handle_message(False, 1, 0x0a, [[attr]])
await hass.async_block_till_done()
assert hass.states.get(entity_id).state == STATE_ON
async def async_test_on_off_from_hass(hass, cluster, entity_id):
"""Test on off functionality from hass."""
from zigpy.zcl.foundation import Status
with patch(
'zigpy.zcl.Cluster.request',
return_value=mock_coro([0x00, Status.SUCCESS])):
# turn on via UI
await hass.services.async_call(DOMAIN, 'turn_on', {
'entity_id': entity_id
}, blocking=True)
assert cluster.request.call_count == 1
assert cluster.request.call_args == call(
False, ON, (), expect_reply=True, manufacturer=None)
await async_test_off_from_hass(hass, cluster, entity_id)
async def async_test_off_from_hass(hass, cluster, entity_id):
"""Test turning off the light from homeassistant."""
from zigpy.zcl.foundation import Status
with patch(
'zigpy.zcl.Cluster.request',
return_value=mock_coro([0x01, Status.SUCCESS])):
# turn off via UI
await hass.services.async_call(DOMAIN, 'turn_off', {
'entity_id': entity_id
}, blocking=True)
assert cluster.request.call_count == 1
assert cluster.request.call_args == call(
False, OFF, (), expect_reply=True, manufacturer=None)
async def async_test_level_on_off_from_hass(hass, on_off_cluster,
level_cluster, entity_id):
"""Test on off functionality from hass."""
from zigpy import types
# turn on via UI
await hass.services.async_call(DOMAIN, 'turn_on', {'entity_id': entity_id},
blocking=True)
assert on_off_cluster.request.call_count == 1
assert level_cluster.request.call_count == 0
assert on_off_cluster.request.call_args == call(
False, 1, (), expect_reply=True, manufacturer=None)
on_off_cluster.request.reset_mock()
level_cluster.request.reset_mock()
await hass.services.async_call(DOMAIN, 'turn_on',
{'entity_id': entity_id, 'transition': 10},
blocking=True)
assert on_off_cluster.request.call_count == 1
assert level_cluster.request.call_count == 1
assert on_off_cluster.request.call_args == call(
False, 1, (), expect_reply=True, manufacturer=None)
assert level_cluster.request.call_args == call(
False, 4, (types.uint8_t, types.uint16_t), 254, 100.0,
expect_reply=True, manufacturer=None)
on_off_cluster.request.reset_mock()
level_cluster.request.reset_mock()
await hass.services.async_call(DOMAIN, 'turn_on',
{'entity_id': entity_id, 'brightness': 10},
blocking=True)
assert on_off_cluster.request.call_count == 1
assert level_cluster.request.call_count == 1
assert on_off_cluster.request.call_args == call(
False, 1, (), expect_reply=True, manufacturer=None)
assert level_cluster.request.call_args == call(
False, 4, (types.uint8_t, types.uint16_t), 10, 5.0,
expect_reply=True, manufacturer=None)
on_off_cluster.request.reset_mock()
level_cluster.request.reset_mock()
await async_test_off_from_hass(hass, on_off_cluster, entity_id)
async def async_test_dimmer_from_light(hass, cluster, entity_id,
level, expected_state):
"""Test dimmer functionality from the light."""
attr = make_attribute(0, level)
cluster.handle_message(False, 1, 0x0a, [[attr]])
await hass.async_block_till_done()
assert hass.states.get(entity_id).state == expected_state
# hass uses None for brightness of 0 in state attributes
if level == 0:
level = None
assert hass.states.get(entity_id).attributes.get('brightness') == level
| apache-2.0 |
admin-zhx/httpbin | httpbin/core.py | 6 | 20354 | # -*- coding: utf-8 -*-
"""
httpbin.core
~~~~~~~~~~~~
This module provides the core HttpBin experience.
"""
import base64
import json
import os
import random
import time
import uuid
from flask import Flask, Response, request, render_template, redirect, jsonify as flask_jsonify, make_response, url_for
from werkzeug.datastructures import WWWAuthenticate, MultiDict
from werkzeug.http import http_date
from werkzeug.wrappers import BaseResponse
from six.moves import range as xrange
from . import filters
from .helpers import get_headers, status_code, get_dict, get_request_range, check_basic_auth, check_digest_auth, secure_cookie, H, ROBOT_TXT, ANGRY_ASCII
from .utils import weighted_choice
from .structures import CaseInsensitiveDict
ENV_COOKIES = (
'_gauges_unique',
'_gauges_unique_year',
'_gauges_unique_month',
'_gauges_unique_day',
'_gauges_unique_hour',
'__utmz',
'__utma',
'__utmb'
)
def jsonify(*args, **kwargs):
response = flask_jsonify(*args, **kwargs)
if not response.data.endswith(b'\n'):
response.data += b'\n'
return response
# Prevent WSGI from correcting the casing of the Location header
BaseResponse.autocorrect_location_header = False
# Find the correct template folder when running from a different location
tmpl_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'templates')
app = Flask(__name__, template_folder=tmpl_dir)
# Set up Bugsnag exception tracking, if desired. To use Bugsnag, install the
# Bugsnag Python client with the command "pip install bugsnag", and set the
# environment variable BUGSNAG_API_KEY. You can also optionally set
# BUGSNAG_RELEASE_STAGE.
if os.environ.get("BUGSNAG_API_KEY") is not None:
try:
import bugsnag
import bugsnag.flask
release_stage = os.environ.get("BUGSNAG_RELEASE_STAGE") or "production"
bugsnag.configure(api_key=os.environ.get("BUGSNAG_API_KEY"),
project_root=os.path.dirname(os.path.abspath(__file__)),
use_ssl=True, release_stage=release_stage,
ignore_classes=['werkzeug.exceptions.NotFound'])
bugsnag.flask.handle_exceptions(app)
except:
app.logger.warning("Unable to initialize Bugsnag exception handling.")
# -----------
# Middlewares
# -----------
@app.after_request
def set_cors_headers(response):
response.headers['Access-Control-Allow-Origin'] = request.headers.get('Origin', '*')
response.headers['Access-Control-Allow-Credentials'] = 'true'
if request.method == 'OPTIONS':
# Both of these headers are only used for the "preflight request"
# http://www.w3.org/TR/cors/#access-control-allow-methods-response-header
response.headers['Access-Control-Allow-Methods'] = 'GET, POST, PUT, DELETE, PATCH, OPTIONS'
response.headers['Access-Control-Max-Age'] = '3600' # 1 hour cache
if request.headers.get('Access-Control-Request-Headers') is not None:
response.headers['Access-Control-Allow-Headers'] = request.headers['Access-Control-Request-Headers']
return response
# ------
# Routes
# ------
@app.route('/')
def view_landing_page():
"""Generates Landing Page."""
return render_template('index.html')
@app.route('/html')
def view_html_page():
"""Simple Html Page"""
return render_template('moby.html')
@app.route('/robots.txt')
def view_robots_page():
"""Simple Html Page"""
response = make_response()
response.data = ROBOT_TXT
response.content_type = "text/plain"
return response
@app.route('/deny')
def view_deny_page():
"""Simple Html Page"""
response = make_response()
response.data = ANGRY_ASCII
response.content_type = "text/plain"
return response
# return "YOU SHOULDN'T BE HERE"
@app.route('/ip')
def view_origin():
"""Returns Origin IP."""
return jsonify(origin=request.headers.get('X-Forwarded-For', request.remote_addr))
@app.route('/headers')
def view_headers():
"""Returns HTTP HEADERS."""
return jsonify(get_dict('headers'))
@app.route('/user-agent')
def view_user_agent():
"""Returns User-Agent."""
headers = get_headers()
return jsonify({'user-agent': headers['user-agent']})
@app.route('/get', methods=('GET',))
def view_get():
"""Returns GET Data."""
return jsonify(get_dict('url', 'args', 'headers', 'origin'))
@app.route('/post', methods=('POST',))
def view_post():
"""Returns POST Data."""
return jsonify(get_dict(
'url', 'args', 'form', 'data', 'origin', 'headers', 'files', 'json'))
@app.route('/put', methods=('PUT',))
def view_put():
"""Returns PUT Data."""
return jsonify(get_dict(
'url', 'args', 'form', 'data', 'origin', 'headers', 'files', 'json'))
@app.route('/patch', methods=('PATCH',))
def view_patch():
"""Returns PATCH Data."""
return jsonify(get_dict(
'url', 'args', 'form', 'data', 'origin', 'headers', 'files', 'json'))
@app.route('/delete', methods=('DELETE',))
def view_delete():
"""Returns DELETE Data."""
return jsonify(get_dict(
'url', 'args', 'form', 'data', 'origin', 'headers', 'files', 'json'))
@app.route('/gzip')
@filters.gzip
def view_gzip_encoded_content():
"""Returns GZip-Encoded Data."""
return jsonify(get_dict(
'origin', 'headers', method=request.method, gzipped=True))
@app.route('/deflate')
@filters.deflate
def view_deflate_encoded_content():
"""Returns Deflate-Encoded Data."""
return jsonify(get_dict(
'origin', 'headers', method=request.method, deflated=True))
@app.route('/redirect/<int:n>')
def redirect_n_times(n):
"""302 Redirects n times."""
assert n > 0
absolute = request.args.get('absolute', 'false').lower() == 'true'
if n == 1:
return redirect(url_for('view_get', _external=absolute))
if absolute:
return _redirect('absolute', n, True)
else:
return _redirect('relative', n, False)
def _redirect(kind, n, external):
return redirect(url_for('{0}_redirect_n_times'.format(kind), n=n - 1, _external=external))
@app.route('/redirect-to')
def redirect_to():
"""302 Redirects to the given URL."""
args = CaseInsensitiveDict(request.args.items())
# We need to build the response manually and convert to UTF-8 to prevent
# werkzeug from "fixing" the URL. This endpoint should set the Location
# header to the exact string supplied.
response = app.make_response('')
response.status_code = 302
response.headers['Location'] = args['url'].encode('utf-8')
return response
@app.route('/relative-redirect/<int:n>')
def relative_redirect_n_times(n):
"""302 Redirects n times."""
assert n > 0
response = app.make_response('')
response.status_code = 302
if n == 1:
response.headers['Location'] = url_for('view_get')
return response
response.headers['Location'] = url_for('relative_redirect_n_times', n=n - 1)
return response
@app.route('/absolute-redirect/<int:n>')
def absolute_redirect_n_times(n):
"""302 Redirects n times."""
assert n > 0
if n == 1:
return redirect(url_for('view_get', _external=True))
return _redirect('absolute', n, True)
@app.route('/stream/<int:n>')
def stream_n_messages(n):
"""Stream n JSON messages"""
response = get_dict('url', 'args', 'headers', 'origin')
n = min(n, 100)
def generate_stream():
for i in range(n):
response['id'] = i
yield json.dumps(response) + '\n'
return Response(generate_stream(), headers={
"Transfer-Encoding": "chunked",
"Content-Type": "application/json",
})
@app.route('/status/<codes>', methods=['GET', 'POST', 'PUT', 'DELETE', 'PATCH', 'TRACE'])
def view_status_code(codes):
"""Return status code or random status code if more than one are given"""
if not ',' in codes:
code = int(codes)
return status_code(code)
choices = []
for choice in codes.split(','):
if not ':' in choice:
code = choice
weight = 1
else:
code, weight = choice.split(':')
choices.append((int(code), float(weight)))
code = weighted_choice(choices)
return status_code(code)
@app.route('/response-headers')
def response_headers():
"""Returns a set of response headers from the query string """
headers = MultiDict(request.args.items(multi=True))
response = jsonify(headers.lists())
while True:
content_len_shown = response.headers['Content-Length']
d = {}
for key in response.headers.keys():
value = response.headers.get_all(key)
if len(value) == 1:
value = value[0]
d[key] = value
response = jsonify(d)
for key, value in headers.items(multi=True):
response.headers.add(key, value)
if response.headers['Content-Length'] == content_len_shown:
break
return response
@app.route('/cookies')
def view_cookies(hide_env=True):
"""Returns cookie data."""
cookies = dict(request.cookies.items())
if hide_env and ('show_env' not in request.args):
for key in ENV_COOKIES:
try:
del cookies[key]
except KeyError:
pass
return jsonify(cookies=cookies)
@app.route('/forms/post')
def view_forms_post():
"""Simple HTML form."""
return render_template('forms-post.html')
@app.route('/cookies/set/<name>/<value>')
def set_cookie(name, value):
"""Sets a cookie and redirects to cookie list."""
r = app.make_response(redirect(url_for('view_cookies')))
r.set_cookie(key=name, value=value, secure=secure_cookie())
return r
@app.route('/cookies/set')
def set_cookies():
"""Sets cookie(s) as provided by the query string and redirects to cookie list."""
cookies = dict(request.args.items())
r = app.make_response(redirect(url_for('view_cookies')))
for key, value in cookies.items():
r.set_cookie(key=key, value=value, secure=secure_cookie())
return r
@app.route('/cookies/delete')
def delete_cookies():
"""Deletes cookie(s) as provided by the query string and redirects to cookie list."""
cookies = dict(request.args.items())
r = app.make_response(redirect(url_for('view_cookies')))
for key, value in cookies.items():
r.delete_cookie(key=key)
return r
@app.route('/basic-auth/<user>/<passwd>')
def basic_auth(user='user', passwd='passwd'):
"""Prompts the user for authorization using HTTP Basic Auth."""
if not check_basic_auth(user, passwd):
return status_code(401)
return jsonify(authenticated=True, user=user)
@app.route('/hidden-basic-auth/<user>/<passwd>')
def hidden_basic_auth(user='user', passwd='passwd'):
"""Prompts the user for authorization using HTTP Basic Auth."""
if not check_basic_auth(user, passwd):
return status_code(404)
return jsonify(authenticated=True, user=user)
@app.route('/digest-auth/<qop>/<user>/<passwd>')
def digest_auth(qop=None, user='user', passwd='passwd'):
"""Prompts the user for authorization using HTTP Digest auth"""
if qop not in ('auth', 'auth-int'):
qop = None
if 'Authorization' not in request.headers or \
not check_digest_auth(user, passwd) or \
not 'Cookie' in request.headers:
response = app.make_response('')
response.status_code = 401
# RFC2616 Section4.2: HTTP headers are ASCII. That means
# request.remote_addr was originally ASCII, so I should be able to
# encode it back to ascii. Also, RFC2617 says about nonces: "The
# contents of the nonce are implementation dependent"
nonce = H(b''.join([
getattr(request,'remote_addr',u'').encode('ascii'),
b':',
str(time.time()).encode('ascii'),
b':',
os.urandom(10)
]))
opaque = H(os.urandom(10))
auth = WWWAuthenticate("digest")
auth.set_digest('[email protected]', nonce, opaque=opaque,
qop=('auth', 'auth-int') if qop is None else (qop, ))
response.headers['WWW-Authenticate'] = auth.to_header()
response.headers['Set-Cookie'] = 'fake=fake_value'
return response
return jsonify(authenticated=True, user=user)
@app.route('/delay/<int:delay>')
def delay_response(delay):
"""Returns a delayed response"""
delay = min(delay, 10)
time.sleep(delay)
return jsonify(get_dict(
'url', 'args', 'form', 'data', 'origin', 'headers', 'files'))
@app.route('/drip')
def drip():
"""Drips data over a duration after an optional initial delay."""
args = CaseInsensitiveDict(request.args.items())
duration = float(args.get('duration', 2))
numbytes = int(args.get('numbytes', 10))
code = int(args.get('code', 200))
pause = duration / numbytes
delay = float(args.get('delay', 0))
if delay > 0:
time.sleep(delay)
def generate_bytes():
for i in xrange(numbytes):
yield u"*".encode('utf-8')
time.sleep(pause)
response = Response(generate_bytes(), headers={
"Content-Type": "application/octet-stream",
"Content-Length": str(numbytes),
})
response.status_code = code
return response
@app.route('/base64/<value>')
def decode_base64(value):
"""Decodes base64url-encoded string"""
encoded = value.encode('utf-8') # base64 expects binary string as input
return base64.urlsafe_b64decode(encoded).decode('utf-8')
@app.route('/cache', methods=('GET',))
def cache():
"""Returns a 304 if an If-Modified-Since header or If-None-Match is present. Returns the same as a GET otherwise."""
is_conditional = request.headers.get('If-Modified-Since') or request.headers.get('If-None-Match')
if is_conditional is None:
response = view_get()
response.headers['Last-Modified'] = http_date()
response.headers['ETag'] = uuid.uuid4().hex
return response
else:
return status_code(304)
@app.route('/cache/<int:value>')
def cache_control(value):
"""Sets a Cache-Control header."""
response = view_get()
response.headers['Cache-Control'] = 'public, max-age={0}'.format(value)
return response
@app.route('/encoding/utf8')
def encoding():
return render_template('UTF-8-demo.txt')
@app.route('/bytes/<int:n>')
def random_bytes(n):
"""Returns n random bytes generated with given seed."""
n = min(n, 100 * 1024) # set 100KB limit
params = CaseInsensitiveDict(request.args.items())
if 'seed' in params:
random.seed(int(params['seed']))
response = make_response()
# Note: can't just use os.urandom here because it ignores the seed
response.data = bytearray(random.randint(0, 255) for i in range(n))
response.content_type = 'application/octet-stream'
return response
@app.route('/stream-bytes/<int:n>')
def stream_random_bytes(n):
"""Streams n random bytes generated with given seed, at given chunk size per packet."""
n = min(n, 100 * 1024) # set 100KB limit
params = CaseInsensitiveDict(request.args.items())
if 'seed' in params:
random.seed(int(params['seed']))
if 'chunk_size' in params:
chunk_size = max(1, int(params['chunk_size']))
else:
chunk_size = 10 * 1024
def generate_bytes():
chunks = bytearray()
for i in xrange(n):
chunks.append(random.randint(0, 255))
if len(chunks) == chunk_size:
yield(bytes(chunks))
chunks = bytearray()
if chunks:
yield(bytes(chunks))
headers = {'Transfer-Encoding': 'chunked',
'Content-Type': 'application/octet-stream'}
return Response(generate_bytes(), headers=headers)
@app.route('/range/<int:numbytes>')
def range_request(numbytes):
"""Streams n random bytes generated with given seed, at given chunk size per packet."""
if numbytes <= 0 or numbytes > (100 * 1024):
response = Response(headers={
'ETag' : 'range%d' % numbytes,
'Accept-Ranges' : 'bytes'
})
response.status_code = 404
response.data = 'number of bytes must be in the range (0, 10240]'
return response
params = CaseInsensitiveDict(request.args.items())
if 'chunk_size' in params:
chunk_size = max(1, int(params['chunk_size']))
else:
chunk_size = 10 * 1024
duration = float(params.get('duration', 0))
pause_per_byte = duration / numbytes
request_headers = get_headers()
first_byte_pos, last_byte_pos = get_request_range(request_headers, numbytes)
if first_byte_pos > last_byte_pos or first_byte_pos not in xrange(0, numbytes) or last_byte_pos not in xrange(0, numbytes):
response = Response(headers={
'ETag' : 'range%d' % numbytes,
'Accept-Ranges' : 'bytes',
'Content-Range' : 'bytes */%d' % numbytes
})
response.status_code = 416
return response
def generate_bytes():
chunks = bytearray()
for i in xrange(first_byte_pos, last_byte_pos + 1):
# We don't want the resource to change across requests, so we need
# to use a predictable data generation function
chunks.append(ord('a') + (i % 26))
if len(chunks) == chunk_size:
yield(bytes(chunks))
time.sleep(pause_per_byte * chunk_size)
chunks = bytearray()
if chunks:
time.sleep(pause_per_byte * len(chunks))
yield(bytes(chunks))
content_range = 'bytes %d-%d/%d' % (first_byte_pos, last_byte_pos, numbytes)
response_headers = {
'Transfer-Encoding': 'chunked',
'Content-Type': 'application/octet-stream',
'ETag' : 'range%d' % numbytes,
'Accept-Ranges' : 'bytes',
'Content-Range' : content_range }
response = Response(generate_bytes(), headers=response_headers)
if (first_byte_pos == 0) and (last_byte_pos == (numbytes - 1)):
response.status_code = 200
else:
response.status_code = 206
return response
@app.route('/links/<int:n>/<int:offset>')
def link_page(n, offset):
"""Generate a page containing n links to other pages which do the same."""
n = min(max(1, n), 200) # limit to between 1 and 200 links
link = "<a href='{0}'>{1}</a> "
html = ['<html><head><title>Links</title></head><body>']
for i in xrange(n):
if i == offset:
html.append("{0} ".format(i))
else:
html.append(link.format(url_for('link_page', n=n, offset=i), i))
html.append('</body></html>')
return ''.join(html)
@app.route('/links/<int:n>')
def links(n):
"""Redirect to first links page."""
return redirect(url_for('link_page', n=n, offset=0))
@app.route('/image')
def image():
"""Returns a simple image of the type suggest by the Accept header."""
headers = get_headers()
if 'accept' not in headers:
return image_png() # Default media type to png
accept = headers['accept'].lower()
if 'image/webp' in accept:
return image_webp()
elif 'image/jpeg' in accept:
return image_jpeg()
elif 'image/png' in accept or 'image/*' in accept:
return image_png()
else:
return status_code(404)
@app.route('/image/png')
def image_png():
data = resource('images/pig_icon.png')
return Response(data, headers={'Content-Type': 'image/png'})
@app.route('/image/jpeg')
def image_jpeg():
data = resource('images/jackal.jpg')
return Response(data, headers={'Content-Type': 'image/jpeg'})
@app.route('/image/webp')
def image_webp():
data = resource('images/wolf_1.webp')
return Response(data, headers={'Content-Type': 'image/webp'})
def resource(filename):
path = os.path.join(
tmpl_dir,
filename)
return open(path, 'rb').read()
@app.route("/xml")
def xml():
response = make_response(render_template("sample.xml"))
response.headers["Content-Type"] = "application/xml"
return response
if __name__ == '__main__':
app.run()
| isc |
ldirer/scikit-learn | sklearn/feature_extraction/tests/test_text.py | 8 | 35969 | from __future__ import unicode_literals
import warnings
from sklearn.feature_extraction.text import strip_tags
from sklearn.feature_extraction.text import strip_accents_unicode
from sklearn.feature_extraction.text import strip_accents_ascii
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import ENGLISH_STOP_WORDS
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from sklearn.base import clone
import numpy as np
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
from numpy.testing import assert_raises
from sklearn.utils.testing import (assert_equal, assert_false, assert_true,
assert_not_equal, assert_almost_equal,
assert_in, assert_less, assert_greater,
assert_warns_message, assert_raise_message,
clean_warning_registry, SkipTest)
from collections import defaultdict, Mapping
from functools import partial
import pickle
from io import StringIO
JUNK_FOOD_DOCS = (
"the pizza pizza beer copyright",
"the pizza burger beer copyright",
"the the pizza beer beer copyright",
"the burger beer beer copyright",
"the coke burger coke copyright",
"the coke burger burger",
)
NOTJUNK_FOOD_DOCS = (
"the salad celeri copyright",
"the salad salad sparkling water copyright",
"the the celeri celeri copyright",
"the tomato tomato salad water",
"the tomato salad water copyright",
)
ALL_FOOD_DOCS = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
def uppercase(s):
return strip_accents_unicode(s).upper()
def strip_eacute(s):
return s.replace('\xe9', 'e')
def split_tokenize(s):
return s.split()
def lazy_analyze(s):
return ['the_ultimate_feature']
def test_strip_accents():
# check some classical latin accentuated symbols
a = '\xe0\xe1\xe2\xe3\xe4\xe5\xe7\xe8\xe9\xea\xeb'
expected = 'aaaaaaceeee'
assert_equal(strip_accents_unicode(a), expected)
a = '\xec\xed\xee\xef\xf1\xf2\xf3\xf4\xf5\xf6\xf9\xfa\xfb\xfc\xfd'
expected = 'iiiinooooouuuuy'
assert_equal(strip_accents_unicode(a), expected)
# check some arabic
a = '\u0625' # halef with a hamza below
expected = '\u0627' # simple halef
assert_equal(strip_accents_unicode(a), expected)
# mix letters accentuated and not
a = "this is \xe0 test"
expected = 'this is a test'
assert_equal(strip_accents_unicode(a), expected)
def test_to_ascii():
# check some classical latin accentuated symbols
a = '\xe0\xe1\xe2\xe3\xe4\xe5\xe7\xe8\xe9\xea\xeb'
expected = 'aaaaaaceeee'
assert_equal(strip_accents_ascii(a), expected)
a = '\xec\xed\xee\xef\xf1\xf2\xf3\xf4\xf5\xf6\xf9\xfa\xfb\xfc\xfd'
expected = 'iiiinooooouuuuy'
assert_equal(strip_accents_ascii(a), expected)
# check some arabic
a = '\u0625' # halef with a hamza below
expected = '' # halef has no direct ascii match
assert_equal(strip_accents_ascii(a), expected)
# mix letters accentuated and not
a = "this is \xe0 test"
expected = 'this is a test'
assert_equal(strip_accents_ascii(a), expected)
def test_word_analyzer_unigrams():
for Vectorizer in (CountVectorizer, HashingVectorizer):
wa = Vectorizer(strip_accents='ascii').build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
"c'\xe9tait pas tr\xeas bon.")
expected = ['ai', 'mange', 'du', 'kangourou', 'ce', 'midi',
'etait', 'pas', 'tres', 'bon']
assert_equal(wa(text), expected)
text = "This is a test, really.\n\n I met Harry yesterday."
expected = ['this', 'is', 'test', 'really', 'met', 'harry',
'yesterday']
assert_equal(wa(text), expected)
wa = Vectorizer(input='file').build_analyzer()
text = StringIO("This is a test with a file-like object!")
expected = ['this', 'is', 'test', 'with', 'file', 'like',
'object']
assert_equal(wa(text), expected)
# with custom preprocessor
wa = Vectorizer(preprocessor=uppercase).build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
" c'\xe9tait pas tr\xeas bon.")
expected = ['AI', 'MANGE', 'DU', 'KANGOUROU', 'CE', 'MIDI',
'ETAIT', 'PAS', 'TRES', 'BON']
assert_equal(wa(text), expected)
# with custom tokenizer
wa = Vectorizer(tokenizer=split_tokenize,
strip_accents='ascii').build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
"c'\xe9tait pas tr\xeas bon.")
expected = ["j'ai", 'mange', 'du', 'kangourou', 'ce', 'midi,',
"c'etait", 'pas', 'tres', 'bon.']
assert_equal(wa(text), expected)
def test_word_analyzer_unigrams_and_bigrams():
wa = CountVectorizer(analyzer="word", strip_accents='unicode',
ngram_range=(1, 2)).build_analyzer()
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon."
expected = ['ai', 'mange', 'du', 'kangourou', 'ce', 'midi',
'etait', 'pas', 'tres', 'bon', 'ai mange', 'mange du',
'du kangourou', 'kangourou ce', 'ce midi', 'midi etait',
'etait pas', 'pas tres', 'tres bon']
assert_equal(wa(text), expected)
def test_unicode_decode_error():
# decode_error default to strict, so this should fail
# First, encode (as bytes) a unicode string.
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon."
text_bytes = text.encode('utf-8')
# Then let the Analyzer try to decode it as ascii. It should fail,
# because we have given it an incorrect encoding.
wa = CountVectorizer(ngram_range=(1, 2), encoding='ascii').build_analyzer()
assert_raises(UnicodeDecodeError, wa, text_bytes)
ca = CountVectorizer(analyzer='char', ngram_range=(3, 6),
encoding='ascii').build_analyzer()
assert_raises(UnicodeDecodeError, ca, text_bytes)
def test_char_ngram_analyzer():
cnga = CountVectorizer(analyzer='char', strip_accents='unicode',
ngram_range=(3, 6)).build_analyzer()
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon"
expected = ["j'a", "'ai", 'ai ', 'i m', ' ma']
assert_equal(cnga(text)[:5], expected)
expected = ['s tres', ' tres ', 'tres b', 'res bo', 'es bon']
assert_equal(cnga(text)[-5:], expected)
text = "This \n\tis a test, really.\n\n I met Harry yesterday"
expected = ['thi', 'his', 'is ', 's i', ' is']
assert_equal(cnga(text)[:5], expected)
expected = [' yeste', 'yester', 'esterd', 'sterda', 'terday']
assert_equal(cnga(text)[-5:], expected)
cnga = CountVectorizer(input='file', analyzer='char',
ngram_range=(3, 6)).build_analyzer()
text = StringIO("This is a test with a file-like object!")
expected = ['thi', 'his', 'is ', 's i', ' is']
assert_equal(cnga(text)[:5], expected)
def test_char_wb_ngram_analyzer():
cnga = CountVectorizer(analyzer='char_wb', strip_accents='unicode',
ngram_range=(3, 6)).build_analyzer()
text = "This \n\tis a test, really.\n\n I met Harry yesterday"
expected = [' th', 'thi', 'his', 'is ', ' thi']
assert_equal(cnga(text)[:5], expected)
expected = ['yester', 'esterd', 'sterda', 'terday', 'erday ']
assert_equal(cnga(text)[-5:], expected)
cnga = CountVectorizer(input='file', analyzer='char_wb',
ngram_range=(3, 6)).build_analyzer()
text = StringIO("A test with a file-like object!")
expected = [' a ', ' te', 'tes', 'est', 'st ', ' tes']
assert_equal(cnga(text)[:6], expected)
def test_countvectorizer_custom_vocabulary():
vocab = {"pizza": 0, "beer": 1}
terms = set(vocab.keys())
# Try a few of the supported types.
for typ in [dict, list, iter, partial(defaultdict, int)]:
v = typ(vocab)
vect = CountVectorizer(vocabulary=v)
vect.fit(JUNK_FOOD_DOCS)
if isinstance(v, Mapping):
assert_equal(vect.vocabulary_, vocab)
else:
assert_equal(set(vect.vocabulary_), terms)
X = vect.transform(JUNK_FOOD_DOCS)
assert_equal(X.shape[1], len(terms))
def test_countvectorizer_custom_vocabulary_pipeline():
what_we_like = ["pizza", "beer"]
pipe = Pipeline([
('count', CountVectorizer(vocabulary=what_we_like)),
('tfidf', TfidfTransformer())])
X = pipe.fit_transform(ALL_FOOD_DOCS)
assert_equal(set(pipe.named_steps['count'].vocabulary_),
set(what_we_like))
assert_equal(X.shape[1], len(what_we_like))
def test_countvectorizer_custom_vocabulary_repeated_indeces():
vocab = {"pizza": 0, "beer": 0}
try:
CountVectorizer(vocabulary=vocab)
except ValueError as e:
assert_in("vocabulary contains repeated indices", str(e).lower())
def test_countvectorizer_custom_vocabulary_gap_index():
vocab = {"pizza": 1, "beer": 2}
try:
CountVectorizer(vocabulary=vocab)
except ValueError as e:
assert_in("doesn't contain index", str(e).lower())
def test_countvectorizer_stop_words():
cv = CountVectorizer()
cv.set_params(stop_words='english')
assert_equal(cv.get_stop_words(), ENGLISH_STOP_WORDS)
cv.set_params(stop_words='_bad_str_stop_')
assert_raises(ValueError, cv.get_stop_words)
cv.set_params(stop_words='_bad_unicode_stop_')
assert_raises(ValueError, cv.get_stop_words)
stoplist = ['some', 'other', 'words']
cv.set_params(stop_words=stoplist)
assert_equal(cv.get_stop_words(), set(stoplist))
def test_countvectorizer_empty_vocabulary():
try:
vect = CountVectorizer(vocabulary=[])
vect.fit(["foo"])
assert False, "we shouldn't get here"
except ValueError as e:
assert_in("empty vocabulary", str(e).lower())
try:
v = CountVectorizer(max_df=1.0, stop_words="english")
# fit on stopwords only
v.fit(["to be or not to be", "and me too", "and so do you"])
assert False, "we shouldn't get here"
except ValueError as e:
assert_in("empty vocabulary", str(e).lower())
def test_fit_countvectorizer_twice():
cv = CountVectorizer()
X1 = cv.fit_transform(ALL_FOOD_DOCS[:5])
X2 = cv.fit_transform(ALL_FOOD_DOCS[5:])
assert_not_equal(X1.shape[1], X2.shape[1])
def test_tf_idf_smoothing():
X = [[1, 1, 1],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=True, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
# check normalization
assert_array_almost_equal((tfidf ** 2).sum(axis=1), [1., 1., 1.])
# this is robust to features with only zeros
X = [[1, 1, 0],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=True, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
def test_tfidf_no_smoothing():
X = [[1, 1, 1],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=False, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
# check normalization
assert_array_almost_equal((tfidf ** 2).sum(axis=1), [1., 1., 1.])
# the lack of smoothing make IDF fragile in the presence of feature with
# only zeros
X = [[1, 1, 0],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=False, norm='l2')
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
1. / np.array([0.])
numpy_provides_div0_warning = len(w) == 1
in_warning_message = 'divide by zero'
tfidf = assert_warns_message(RuntimeWarning, in_warning_message,
tr.fit_transform, X).toarray()
if not numpy_provides_div0_warning:
raise SkipTest("Numpy does not provide div 0 warnings.")
def test_sublinear_tf():
X = [[1], [2], [3]]
tr = TfidfTransformer(sublinear_tf=True, use_idf=False, norm=None)
tfidf = tr.fit_transform(X).toarray()
assert_equal(tfidf[0], 1)
assert_greater(tfidf[1], tfidf[0])
assert_greater(tfidf[2], tfidf[1])
assert_less(tfidf[1], 2)
assert_less(tfidf[2], 3)
def test_vectorizer():
# raw documents as an iterator
train_data = iter(ALL_FOOD_DOCS[:-1])
test_data = [ALL_FOOD_DOCS[-1]]
n_train = len(ALL_FOOD_DOCS) - 1
# test without vocabulary
v1 = CountVectorizer(max_df=0.5)
counts_train = v1.fit_transform(train_data)
if hasattr(counts_train, 'tocsr'):
counts_train = counts_train.tocsr()
assert_equal(counts_train[0, v1.vocabulary_["pizza"]], 2)
# build a vectorizer v1 with the same vocabulary as the one fitted by v1
v2 = CountVectorizer(vocabulary=v1.vocabulary_)
# compare that the two vectorizer give the same output on the test sample
for v in (v1, v2):
counts_test = v.transform(test_data)
if hasattr(counts_test, 'tocsr'):
counts_test = counts_test.tocsr()
vocabulary = v.vocabulary_
assert_equal(counts_test[0, vocabulary["salad"]], 1)
assert_equal(counts_test[0, vocabulary["tomato"]], 1)
assert_equal(counts_test[0, vocabulary["water"]], 1)
# stop word from the fixed list
assert_false("the" in vocabulary)
# stop word found automatically by the vectorizer DF thresholding
# words that are high frequent across the complete corpus are likely
# to be not informative (either real stop words of extraction
# artifacts)
assert_false("copyright" in vocabulary)
# not present in the sample
assert_equal(counts_test[0, vocabulary["coke"]], 0)
assert_equal(counts_test[0, vocabulary["burger"]], 0)
assert_equal(counts_test[0, vocabulary["beer"]], 0)
assert_equal(counts_test[0, vocabulary["pizza"]], 0)
# test tf-idf
t1 = TfidfTransformer(norm='l1')
tfidf = t1.fit(counts_train).transform(counts_train).toarray()
assert_equal(len(t1.idf_), len(v1.vocabulary_))
assert_equal(tfidf.shape, (n_train, len(v1.vocabulary_)))
# test tf-idf with new data
tfidf_test = t1.transform(counts_test).toarray()
assert_equal(tfidf_test.shape, (len(test_data), len(v1.vocabulary_)))
# test tf alone
t2 = TfidfTransformer(norm='l1', use_idf=False)
tf = t2.fit(counts_train).transform(counts_train).toarray()
assert_false(hasattr(t2, "idf_"))
# test idf transform with unlearned idf vector
t3 = TfidfTransformer(use_idf=True)
assert_raises(ValueError, t3.transform, counts_train)
# test idf transform with incompatible n_features
X = [[1, 1, 5],
[1, 1, 0]]
t3.fit(X)
X_incompt = [[1, 3],
[1, 3]]
assert_raises(ValueError, t3.transform, X_incompt)
# L1-normalized term frequencies sum to one
assert_array_almost_equal(np.sum(tf, axis=1), [1.0] * n_train)
# test the direct tfidf vectorizer
# (equivalent to term count vectorizer + tfidf transformer)
train_data = iter(ALL_FOOD_DOCS[:-1])
tv = TfidfVectorizer(norm='l1')
tv.max_df = v1.max_df
tfidf2 = tv.fit_transform(train_data).toarray()
assert_false(tv.fixed_vocabulary_)
assert_array_almost_equal(tfidf, tfidf2)
# test the direct tfidf vectorizer with new data
tfidf_test2 = tv.transform(test_data).toarray()
assert_array_almost_equal(tfidf_test, tfidf_test2)
# test transform on unfitted vectorizer with empty vocabulary
v3 = CountVectorizer(vocabulary=None)
assert_raises(ValueError, v3.transform, train_data)
# ascii preprocessor?
v3.set_params(strip_accents='ascii', lowercase=False)
assert_equal(v3.build_preprocessor(), strip_accents_ascii)
# error on bad strip_accents param
v3.set_params(strip_accents='_gabbledegook_', preprocessor=None)
assert_raises(ValueError, v3.build_preprocessor)
# error with bad analyzer type
v3.set_params = '_invalid_analyzer_type_'
assert_raises(ValueError, v3.build_analyzer)
def test_tfidf_vectorizer_setters():
tv = TfidfVectorizer(norm='l2', use_idf=False, smooth_idf=False,
sublinear_tf=False)
tv.norm = 'l1'
assert_equal(tv._tfidf.norm, 'l1')
tv.use_idf = True
assert_true(tv._tfidf.use_idf)
tv.smooth_idf = True
assert_true(tv._tfidf.smooth_idf)
tv.sublinear_tf = True
assert_true(tv._tfidf.sublinear_tf)
def test_hashing_vectorizer():
v = HashingVectorizer()
X = v.transform(ALL_FOOD_DOCS)
token_nnz = X.nnz
assert_equal(X.shape, (len(ALL_FOOD_DOCS), v.n_features))
assert_equal(X.dtype, v.dtype)
# By default the hashed values receive a random sign and l2 normalization
# makes the feature values bounded
assert_true(np.min(X.data) > -1)
assert_true(np.min(X.data) < 0)
assert_true(np.max(X.data) > 0)
assert_true(np.max(X.data) < 1)
# Check that the rows are normalized
for i in range(X.shape[0]):
assert_almost_equal(np.linalg.norm(X[0].data, 2), 1.0)
# Check vectorization with some non-default parameters
v = HashingVectorizer(ngram_range=(1, 2), non_negative=True, norm='l1')
X = v.transform(ALL_FOOD_DOCS)
assert_equal(X.shape, (len(ALL_FOOD_DOCS), v.n_features))
assert_equal(X.dtype, v.dtype)
# ngrams generate more non zeros
ngrams_nnz = X.nnz
assert_true(ngrams_nnz > token_nnz)
assert_true(ngrams_nnz < 2 * token_nnz)
# makes the feature values bounded
assert_true(np.min(X.data) > 0)
assert_true(np.max(X.data) < 1)
# Check that the rows are normalized
for i in range(X.shape[0]):
assert_almost_equal(np.linalg.norm(X[0].data, 1), 1.0)
def test_feature_names():
cv = CountVectorizer(max_df=0.5)
# test for Value error on unfitted/empty vocabulary
assert_raises(ValueError, cv.get_feature_names)
X = cv.fit_transform(ALL_FOOD_DOCS)
n_samples, n_features = X.shape
assert_equal(len(cv.vocabulary_), n_features)
feature_names = cv.get_feature_names()
assert_equal(len(feature_names), n_features)
assert_array_equal(['beer', 'burger', 'celeri', 'coke', 'pizza',
'salad', 'sparkling', 'tomato', 'water'],
feature_names)
for idx, name in enumerate(feature_names):
assert_equal(idx, cv.vocabulary_.get(name))
def test_vectorizer_max_features():
vec_factories = (
CountVectorizer,
TfidfVectorizer,
)
expected_vocabulary = set(['burger', 'beer', 'salad', 'pizza'])
expected_stop_words = set([u'celeri', u'tomato', u'copyright', u'coke',
u'sparkling', u'water', u'the'])
for vec_factory in vec_factories:
# test bounded number of extracted features
vectorizer = vec_factory(max_df=0.6, max_features=4)
vectorizer.fit(ALL_FOOD_DOCS)
assert_equal(set(vectorizer.vocabulary_), expected_vocabulary)
assert_equal(vectorizer.stop_words_, expected_stop_words)
def test_count_vectorizer_max_features():
# Regression test: max_features didn't work correctly in 0.14.
cv_1 = CountVectorizer(max_features=1)
cv_3 = CountVectorizer(max_features=3)
cv_None = CountVectorizer(max_features=None)
counts_1 = cv_1.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
counts_3 = cv_3.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
counts_None = cv_None.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
features_1 = cv_1.get_feature_names()
features_3 = cv_3.get_feature_names()
features_None = cv_None.get_feature_names()
# The most common feature is "the", with frequency 7.
assert_equal(7, counts_1.max())
assert_equal(7, counts_3.max())
assert_equal(7, counts_None.max())
# The most common feature should be the same
assert_equal("the", features_1[np.argmax(counts_1)])
assert_equal("the", features_3[np.argmax(counts_3)])
assert_equal("the", features_None[np.argmax(counts_None)])
def test_vectorizer_max_df():
test_data = ['abc', 'dea', 'eat']
vect = CountVectorizer(analyzer='char', max_df=1.0)
vect.fit(test_data)
assert_true('a' in vect.vocabulary_.keys())
assert_equal(len(vect.vocabulary_.keys()), 6)
assert_equal(len(vect.stop_words_), 0)
vect.max_df = 0.5 # 0.5 * 3 documents -> max_doc_count == 1.5
vect.fit(test_data)
assert_true('a' not in vect.vocabulary_.keys()) # {ae} ignored
assert_equal(len(vect.vocabulary_.keys()), 4) # {bcdt} remain
assert_true('a' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 2)
vect.max_df = 1
vect.fit(test_data)
assert_true('a' not in vect.vocabulary_.keys()) # {ae} ignored
assert_equal(len(vect.vocabulary_.keys()), 4) # {bcdt} remain
assert_true('a' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 2)
def test_vectorizer_min_df():
test_data = ['abc', 'dea', 'eat']
vect = CountVectorizer(analyzer='char', min_df=1)
vect.fit(test_data)
assert_true('a' in vect.vocabulary_.keys())
assert_equal(len(vect.vocabulary_.keys()), 6)
assert_equal(len(vect.stop_words_), 0)
vect.min_df = 2
vect.fit(test_data)
assert_true('c' not in vect.vocabulary_.keys()) # {bcdt} ignored
assert_equal(len(vect.vocabulary_.keys()), 2) # {ae} remain
assert_true('c' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 4)
vect.min_df = 0.8 # 0.8 * 3 documents -> min_doc_count == 2.4
vect.fit(test_data)
assert_true('c' not in vect.vocabulary_.keys()) # {bcdet} ignored
assert_equal(len(vect.vocabulary_.keys()), 1) # {a} remains
assert_true('c' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 5)
def test_count_binary_occurrences():
# by default multiple occurrences are counted as longs
test_data = ['aaabc', 'abbde']
vect = CountVectorizer(analyzer='char', max_df=1.0)
X = vect.fit_transform(test_data).toarray()
assert_array_equal(['a', 'b', 'c', 'd', 'e'], vect.get_feature_names())
assert_array_equal([[3, 1, 1, 0, 0],
[1, 2, 0, 1, 1]], X)
# using boolean features, we can fetch the binary occurrence info
# instead.
vect = CountVectorizer(analyzer='char', max_df=1.0, binary=True)
X = vect.fit_transform(test_data).toarray()
assert_array_equal([[1, 1, 1, 0, 0],
[1, 1, 0, 1, 1]], X)
# check the ability to change the dtype
vect = CountVectorizer(analyzer='char', max_df=1.0,
binary=True, dtype=np.float32)
X_sparse = vect.fit_transform(test_data)
assert_equal(X_sparse.dtype, np.float32)
def test_hashed_binary_occurrences():
# by default multiple occurrences are counted as longs
test_data = ['aaabc', 'abbde']
vect = HashingVectorizer(analyzer='char', non_negative=True,
norm=None)
X = vect.transform(test_data)
assert_equal(np.max(X[0:1].data), 3)
assert_equal(np.max(X[1:2].data), 2)
assert_equal(X.dtype, np.float64)
# using boolean features, we can fetch the binary occurrence info
# instead.
vect = HashingVectorizer(analyzer='char', non_negative=True, binary=True,
norm=None)
X = vect.transform(test_data)
assert_equal(np.max(X.data), 1)
assert_equal(X.dtype, np.float64)
# check the ability to change the dtype
vect = HashingVectorizer(analyzer='char', non_negative=True, binary=True,
norm=None, dtype=np.float64)
X = vect.transform(test_data)
assert_equal(X.dtype, np.float64)
def test_vectorizer_inverse_transform():
# raw documents
data = ALL_FOOD_DOCS
for vectorizer in (TfidfVectorizer(), CountVectorizer()):
transformed_data = vectorizer.fit_transform(data)
inversed_data = vectorizer.inverse_transform(transformed_data)
analyze = vectorizer.build_analyzer()
for doc, inversed_terms in zip(data, inversed_data):
terms = np.sort(np.unique(analyze(doc)))
inversed_terms = np.sort(np.unique(inversed_terms))
assert_array_equal(terms, inversed_terms)
# Test that inverse_transform also works with numpy arrays
transformed_data = transformed_data.toarray()
inversed_data2 = vectorizer.inverse_transform(transformed_data)
for terms, terms2 in zip(inversed_data, inversed_data2):
assert_array_equal(np.sort(terms), np.sort(terms2))
def test_count_vectorizer_pipeline_grid_selection():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
# split the dataset for model development and final evaluation
train_data, test_data, target_train, target_test = train_test_split(
data, target, test_size=.2, random_state=0)
pipeline = Pipeline([('vect', CountVectorizer()),
('svc', LinearSVC())])
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
'svc__loss': ('hinge', 'squared_hinge')
}
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=1)
# Check that the best model found by grid search is 100% correct on the
# held out evaluation set.
pred = grid_search.fit(train_data, target_train).predict(test_data)
assert_array_equal(pred, target_test)
# on this toy dataset bigram representation which is used in the last of
# the grid_search is considered the best estimator since they all converge
# to 100% accuracy models
assert_equal(grid_search.best_score_, 1.0)
best_vectorizer = grid_search.best_estimator_.named_steps['vect']
assert_equal(best_vectorizer.ngram_range, (1, 1))
def test_vectorizer_pipeline_grid_selection():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
# split the dataset for model development and final evaluation
train_data, test_data, target_train, target_test = train_test_split(
data, target, test_size=.1, random_state=0)
pipeline = Pipeline([('vect', TfidfVectorizer()),
('svc', LinearSVC())])
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
'vect__norm': ('l1', 'l2'),
'svc__loss': ('hinge', 'squared_hinge'),
}
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=1)
# Check that the best model found by grid search is 100% correct on the
# held out evaluation set.
pred = grid_search.fit(train_data, target_train).predict(test_data)
assert_array_equal(pred, target_test)
# on this toy dataset bigram representation which is used in the last of
# the grid_search is considered the best estimator since they all converge
# to 100% accuracy models
assert_equal(grid_search.best_score_, 1.0)
best_vectorizer = grid_search.best_estimator_.named_steps['vect']
assert_equal(best_vectorizer.ngram_range, (1, 1))
assert_equal(best_vectorizer.norm, 'l2')
assert_false(best_vectorizer.fixed_vocabulary_)
def test_vectorizer_pipeline_cross_validation():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
pipeline = Pipeline([('vect', TfidfVectorizer()),
('svc', LinearSVC())])
cv_scores = cross_val_score(pipeline, data, target, cv=3)
assert_array_equal(cv_scores, [1., 1., 1.])
def test_vectorizer_unicode():
# tests that the count vectorizer works with cyrillic.
document = (
"\xd0\x9c\xd0\xb0\xd1\x88\xd0\xb8\xd0\xbd\xd0\xbd\xd0\xbe\xd0"
"\xb5 \xd0\xbe\xd0\xb1\xd1\x83\xd1\x87\xd0\xb5\xd0\xbd\xd0\xb8\xd0"
"\xb5 \xe2\x80\x94 \xd0\xbe\xd0\xb1\xd1\x88\xd0\xb8\xd1\x80\xd0\xbd"
"\xd1\x8b\xd0\xb9 \xd0\xbf\xd0\xbe\xd0\xb4\xd1\x80\xd0\xb0\xd0\xb7"
"\xd0\xb4\xd0\xb5\xd0\xbb \xd0\xb8\xd1\x81\xd0\xba\xd1\x83\xd1\x81"
"\xd1\x81\xd1\x82\xd0\xb2\xd0\xb5\xd0\xbd\xd0\xbd\xd0\xbe\xd0\xb3"
"\xd0\xbe \xd0\xb8\xd0\xbd\xd1\x82\xd0\xb5\xd0\xbb\xd0\xbb\xd0"
"\xb5\xd0\xba\xd1\x82\xd0\xb0, \xd0\xb8\xd0\xb7\xd1\x83\xd1\x87"
"\xd0\xb0\xd1\x8e\xd1\x89\xd0\xb8\xd0\xb9 \xd0\xbc\xd0\xb5\xd1\x82"
"\xd0\xbe\xd0\xb4\xd1\x8b \xd0\xbf\xd0\xbe\xd1\x81\xd1\x82\xd1\x80"
"\xd0\xbe\xd0\xb5\xd0\xbd\xd0\xb8\xd1\x8f \xd0\xb0\xd0\xbb\xd0\xb3"
"\xd0\xbe\xd1\x80\xd0\xb8\xd1\x82\xd0\xbc\xd0\xbe\xd0\xb2, \xd1\x81"
"\xd0\xbf\xd0\xbe\xd1\x81\xd0\xbe\xd0\xb1\xd0\xbd\xd1\x8b\xd1\x85 "
"\xd0\xbe\xd0\xb1\xd1\x83\xd1\x87\xd0\xb0\xd1\x82\xd1\x8c\xd1\x81\xd1"
"\x8f.")
vect = CountVectorizer()
X_counted = vect.fit_transform([document])
assert_equal(X_counted.shape, (1, 15))
vect = HashingVectorizer(norm=None, non_negative=True)
X_hashed = vect.transform([document])
assert_equal(X_hashed.shape, (1, 2 ** 20))
# No collisions on such a small dataset
assert_equal(X_counted.nnz, X_hashed.nnz)
# When norm is None and non_negative, the tokens are counted up to
# collisions
assert_array_equal(np.sort(X_counted.data), np.sort(X_hashed.data))
def test_tfidf_vectorizer_with_fixed_vocabulary():
# non regression smoke test for inheritance issues
vocabulary = ['pizza', 'celeri']
vect = TfidfVectorizer(vocabulary=vocabulary)
X_1 = vect.fit_transform(ALL_FOOD_DOCS)
X_2 = vect.transform(ALL_FOOD_DOCS)
assert_array_almost_equal(X_1.toarray(), X_2.toarray())
assert_true(vect.fixed_vocabulary_)
def test_pickling_vectorizer():
instances = [
HashingVectorizer(),
HashingVectorizer(norm='l1'),
HashingVectorizer(binary=True),
HashingVectorizer(ngram_range=(1, 2)),
CountVectorizer(),
CountVectorizer(preprocessor=strip_tags),
CountVectorizer(analyzer=lazy_analyze),
CountVectorizer(preprocessor=strip_tags).fit(JUNK_FOOD_DOCS),
CountVectorizer(strip_accents=strip_eacute).fit(JUNK_FOOD_DOCS),
TfidfVectorizer(),
TfidfVectorizer(analyzer=lazy_analyze),
TfidfVectorizer().fit(JUNK_FOOD_DOCS),
]
for orig in instances:
s = pickle.dumps(orig)
copy = pickle.loads(s)
assert_equal(type(copy), orig.__class__)
assert_equal(copy.get_params(), orig.get_params())
assert_array_equal(
copy.fit_transform(JUNK_FOOD_DOCS).toarray(),
orig.fit_transform(JUNK_FOOD_DOCS).toarray())
def test_countvectorizer_vocab_sets_when_pickling():
# ensure that vocabulary of type set is coerced to a list to
# preserve iteration ordering after deserialization
rng = np.random.RandomState(0)
vocab_words = np.array(['beer', 'burger', 'celeri', 'coke', 'pizza',
'salad', 'sparkling', 'tomato', 'water'])
for x in range(0, 100):
vocab_set = set(rng.choice(vocab_words, size=5, replace=False))
cv = CountVectorizer(vocabulary=vocab_set)
unpickled_cv = pickle.loads(pickle.dumps(cv))
cv.fit(ALL_FOOD_DOCS)
unpickled_cv.fit(ALL_FOOD_DOCS)
assert_equal(cv.get_feature_names(), unpickled_cv.get_feature_names())
def test_countvectorizer_vocab_dicts_when_pickling():
rng = np.random.RandomState(0)
vocab_words = np.array(['beer', 'burger', 'celeri', 'coke', 'pizza',
'salad', 'sparkling', 'tomato', 'water'])
for x in range(0, 100):
vocab_dict = dict()
words = rng.choice(vocab_words, size=5, replace=False)
for y in range(0, 5):
vocab_dict[words[y]] = y
cv = CountVectorizer(vocabulary=vocab_dict)
unpickled_cv = pickle.loads(pickle.dumps(cv))
cv.fit(ALL_FOOD_DOCS)
unpickled_cv.fit(ALL_FOOD_DOCS)
assert_equal(cv.get_feature_names(), unpickled_cv.get_feature_names())
def test_stop_words_removal():
# Ensure that deleting the stop_words_ attribute doesn't affect transform
fitted_vectorizers = (
TfidfVectorizer().fit(JUNK_FOOD_DOCS),
CountVectorizer(preprocessor=strip_tags).fit(JUNK_FOOD_DOCS),
CountVectorizer(strip_accents=strip_eacute).fit(JUNK_FOOD_DOCS)
)
for vect in fitted_vectorizers:
vect_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
vect.stop_words_ = None
stop_None_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
delattr(vect, 'stop_words_')
stop_del_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
assert_array_equal(stop_None_transform, vect_transform)
assert_array_equal(stop_del_transform, vect_transform)
def test_pickling_transformer():
X = CountVectorizer().fit_transform(JUNK_FOOD_DOCS)
orig = TfidfTransformer().fit(X)
s = pickle.dumps(orig)
copy = pickle.loads(s)
assert_equal(type(copy), orig.__class__)
assert_array_equal(
copy.fit_transform(X).toarray(),
orig.fit_transform(X).toarray())
def test_non_unique_vocab():
vocab = ['a', 'b', 'c', 'a', 'a']
vect = CountVectorizer(vocabulary=vocab)
assert_raises(ValueError, vect.fit, [])
def test_hashingvectorizer_nan_in_docs():
# np.nan can appear when using pandas to load text fields from a csv file
# with missing values.
message = "np.nan is an invalid document, expected byte or unicode string."
exception = ValueError
def func():
hv = HashingVectorizer()
hv.fit_transform(['hello world', np.nan, 'hello hello'])
assert_raise_message(exception, message, func)
def test_tfidfvectorizer_binary():
# Non-regression test: TfidfVectorizer used to ignore its "binary" param.
v = TfidfVectorizer(binary=True, use_idf=False, norm=None)
assert_true(v.binary)
X = v.fit_transform(['hello world', 'hello hello']).toarray()
assert_array_equal(X.ravel(), [1, 1, 1, 0])
X2 = v.transform(['hello world', 'hello hello']).toarray()
assert_array_equal(X2.ravel(), [1, 1, 1, 0])
def test_tfidfvectorizer_export_idf():
vect = TfidfVectorizer(use_idf=True)
vect.fit(JUNK_FOOD_DOCS)
assert_array_almost_equal(vect.idf_, vect._tfidf.idf_)
def test_vectorizer_vocab_clone():
vect_vocab = TfidfVectorizer(vocabulary=["the"])
vect_vocab_clone = clone(vect_vocab)
vect_vocab.fit(ALL_FOOD_DOCS)
vect_vocab_clone.fit(ALL_FOOD_DOCS)
assert_equal(vect_vocab_clone.vocabulary_, vect_vocab.vocabulary_)
def test_vectorizer_string_object_as_input():
message = ("Iterable over raw text documents expected, "
"string object received.")
for vec in [CountVectorizer(), TfidfVectorizer(), HashingVectorizer()]:
assert_raise_message(
ValueError, message, vec.fit_transform, "hello world!")
assert_raise_message(
ValueError, message, vec.fit, "hello world!")
assert_raise_message(
ValueError, message, vec.transform, "hello world!")
| bsd-3-clause |
annajordanous/network-analysis | genre_relationships.py | 2 | 7928 | '''
Created on Apr 9, 2014
@author: daniel-allington
'''
# Creates a new database containing: (a) table of genre strings, with
# absolute frequencies, in order of frequency, leaving out any below a
# given threshold of frequency; (b) as a but for tags; (c) table of
# users with tracks, giving (i) all genre strings associated with each
# user's tracks, with frequency, in order of frequency, (ii) the
# user's most common genre string, (iii) the user's most common three
# genre strings (in alphabetical ordre; (d) as c but for tags. This
# database is stored in an sqlite file with '_deriv' appended to the
# name of the database it's derived from.
# Where the program has to choose between genres/tags that a user has
# used with equal frequency, it chooses the one that is more frequent
# in the dataset as a whole (where this is tied, it chooses the
# shorter string; where that is tied, the alphabetically prior
# string).
# Purpose: it will then be possible to create an undirected network of
# users with edges based not on followings etc but on use of similar
# genres/tags - and a network of genres/tags based on which ones are
# associated with tracks uploaded by the same individuals. Hopefully
# clusters in the two networks will give us a sense of the broad
# stylistic groupings behind the huge range of genre terms used on
# SoundCloud. Calculating betweenness centrality for these clusters
# will help to identify key terms and individuals.
# Edit: this now removes all spaces and hyphens from within strings.
# Reason is to stop 'hip hop', 'hip-hop', and 'hiphop' appearing as
# three different things.
import sqlite3
import re
import collections
import add_data
import cPickle
import deriv_db
genre_sep = re.compile(r'"|,|/|\\')
tag_captu = re.compile(r'"(.+?)"|\b(\S+?)\b')
to_remove = re.compile(r'[ -]')
genre_threshold = 2
tag_threshold = 2
f = open('stopwords') # extracted from NLTK
stop = cPickle.load(f)
f.close()
def flatten(l):
return [i for subl in l for i in subl]
def user_data(curs,user,col):
# Apologies for combining a string operation with the proper
# SQLite insertion method (with ?) - for some reason, when I try
# to insert a table name with ?, it thinks this is a value that I
# want it to return. The database is safe so this isn't the
# security issue that it might have been - but I'll change it once
# I figure out how.
return curs.execute('SELECT {} FROM tracks WHERE user_id=?'.format(col),(user,))
def all_genres(curs):
curs.execute('SELECT * FROM sqlite_master')
return curs.execute('SELECT genre FROM tracks')
def all_tags(curs):
return curs.execute('SELECT tag_list FROM tracks')
def clean(l):
l2=[to_remove.sub('',i) for i in l]
return [i for i in l2 if len(i)>1 and i not in stop]
def strings_from_string(s,col):
if col=='genre':
return clean([g.strip()
for g in genre_sep.split(s.lower().strip('"\' '))])
elif col=='tag_list':
return clean([group[0] if group[0] else group[1]
for group in tag_captu.findall(s.lower())])
else: print 'Unrecognised source column name: {}'.format(col)
def strings_from_iterator(ite,col):
strings=[]
for i in ite:
if i[0]: strings.extend(strings_from_string(i[0],col))
return strings
def n_from_list(l,n,cursderiv,ranktable):
sorting_list=[]
for item in l:
cursderiv.execute('SELECT rank FROM {} WHERE string=?'.format(ranktable),
(item[0],))
c = cursderiv.fetchone()
if c: rank=c[0]
else: rank=10000000
sorting_list.append((rank,len(item[0]),item[0]))
return [(i[2],) for i in sorted(sorting_list)[:n]]
def n_most_common(counted,n,cursderiv,ranktable):
c = (x for x in counted)
l = []
unused = None
current= []
while c:
while True:
try:
item = c.next()
if not current:
current.append(item)
elif item[1] == current[0][1]:
current.append(item)
else:
unused = [item]
break
except StopIteration:
c=False
break
if len(l)+len(current) <= n:
l.extend(current)
current = unused
unused = None
else:
break
if current:
l.extend(n_from_list(current,n-len(l),cursderiv,ranktable))
string_list=[i[0] for i in l]
return sorted(string_list+(['']*(n-len(string_list))))
def add_ranks(l,threshold):
if not l: return [('','',0)]
counted = collections.Counter(l).most_common()
nums=list(reversed(sorted(set(zip(*counted)[1]))))
return [(c[0],c[1],nums.index(c[1])+1) for c in counted if c[1]>=threshold]
def create_gt_table(curssourc,cursderiv,colsourc,tabderiv):
add_data.create_table(cursderiv,tabderiv)
entries = (all_genres(curssourc) if tabderiv=='genres'
else all_tags(curssourc))
l = []
for e in entries:
if e[0]:
l.extend(strings_from_string(e[0],colsourc))
sql=('INSERT INTO {} (string,frequency,rank) '
'VALUES(?,?,?)'.format(tabderiv))
thresh = (genre_threshold if tabderiv == 'genres' else tag_threshold)
cursderiv.executemany(sql,add_ranks(l,thresh))
def check_tables(cursderiv,required_tables):
tables_present=[]
for t in required_tables:
cursderiv.execute("SELECT name FROM sqlite_master WHERE type='table' "
"AND name=?",(t,))
tables_present.append(True if len (cursderiv.fetchall()) > 0
else False)
return tables_present
def gt_tables(db_source):
connsourc,connderiv = deriv_db.connect_databases(db_source)
curssourc = connsourc.cursor()
cursderiv = connderiv.cursor()
for colsourc,table in [('genre','genres'),('tag_list','tags')]:
create_gt_table(curssourc,cursderiv,colsourc,table)
connderiv.commit()
def deriv_user_data(curssourc,cursderiv,users,colsourc,ranktable):
for user in users:
print 'Working with user: '+str(user)
to_count=strings_from_iterator(user_data(curssourc,user[0],colsourc),
colsourc)
counted=collections.Counter(to_count).most_common()
mcstring = unicode(n_most_common(counted,
1,cursderiv,ranktable)[0])
cstrings = ' | '.join(n_most_common(counted,
3,cursderiv,ranktable))
str_counted= ' | '.join([u'{}, {}'.format(c[0],c[1])
for c in counted])
yield user[0],str_counted,mcstring,cstrings
def user_gt_tables(db_source):
connsourc,connderiv = deriv_db.connect_databases(db_source)
curssourc = connsourc.cursor()
cursderiv = connderiv.cursor()
required=['genres','tags']
ct = check_tables(cursderiv,required)
if not ct[0] or not ct[1]:
for n,r in enumerate(ct):
if not r: print 'Could not find {} table.'.format(required[n])
print ('Before calling this function, call gt_tables with '
'path of source database to create necessary tables.')
return False
curssourc.execute('SELECT user_id FROM tracks')
users=set(curssourc.fetchall())
for colsourc,tabderiv,ranktable in [('genre','user_genres','genres'),
('tag_list','user_tags','tags')]:
print 'Now working with: '+ranktable
add_data.create_table(cursderiv,tabderiv)
add_data.insert_deriv_data(cursderiv,tabderiv,
deriv_user_data(curssourc,cursderiv,
users,colsourc,ranktable))
connderiv.commit()
return True
| gpl-2.0 |
sqall01/alertR | shared_code/clients_sensor/lib/sensor/eventHandler.py | 1 | 2632 | #!/usr/bin/env python3
# written by sqall
# twitter: https://twitter.com/sqall01
# blog: https://h4des.org
# github: https://github.com/sqall01
#
# Licensed under the GNU Affero General Public License, version 3.
import logging
import os
from typing import List, Any
from ..client import EventHandler
from ..globalData import ManagerObjManager, ManagerObjNode, ManagerObjOption, ManagerObjSensorAlert, \
ManagerObjAlertLevel, ManagerObjAlert, ManagerObjSensor, ManagerObjProfile
from ..globalData import SensorDataType
class SensorEventHandler(EventHandler):
def __init__(self):
super().__init__()
# file name of this file (used for logging)
self._log_tag = os.path.basename(__file__)
# noinspection PyTypeChecker
def status_update(self,
msg_time: int,
options: List[ManagerObjOption],
profiles: List[ManagerObjProfile],
nodes: List[ManagerObjNode],
sensors: List[ManagerObjSensor],
managers: List[ManagerObjManager],
alerts: List[ManagerObjAlert],
alert_levels: List[ManagerObjAlertLevel]) -> bool:
logging.critical("[%s]: status_update() not supported by node of type 'sensor'." % self._log_tag)
raise NotImplementedError("Not supported by node of type 'sensor'.")
# noinspection PyTypeChecker
def sensor_alert(self,
msg_time: int,
sensor_alert: ManagerObjSensorAlert) -> bool:
logging.critical("[%s]: sensor_alert() not supported by node of type 'sensor'." % self._log_tag)
raise NotImplementedError("Not supported by node of type 'sensor'.")
# noinspection PyTypeChecker
def profile_change(self,
msg_time: int,
profile: ManagerObjProfile) -> bool:
logging.critical("[%s]: profile_change() not supported by node of type 'sensor'." % self._log_tag)
raise NotImplementedError("Not supported by node of type 'sensor'.")
# noinspection PyTypeChecker
def state_change(self,
msg_time: int,
sensor_id: int,
state: int,
data_type: SensorDataType,
sensor_data: Any) -> bool:
logging.critical("[%s]: state_change() not supported by node of type 'sensor'." % self._log_tag)
raise NotImplementedError("Not supported by node of type 'sensor'.")
def close_connection(self):
pass
def new_connection(self):
pass
| agpl-3.0 |
dymkowsk/mantid | Framework/PythonInterface/test/python/plugins/algorithms/FilterLogByTimeTest.py | 3 | 5672 | from __future__ import (absolute_import, division, print_function)
import unittest
import numpy
from mantid.simpleapi import *
from mantid.kernel import *
from mantid.api import *
class FilterLogByTimeTest(unittest.TestCase):
__ws = None
''' Log file contents.
2008-06-17T11:10:44 -0.86526
2008-06-17T11:10:45 -1.17843
2008-06-17T11:10:47 -1.27995
2008-06-17T11:20:15 -1.38216
2008-06-17T11:20:16 -1.87435
2008-06-17T11:20:17 -2.70547
2008-06-17T11:20:19 -2.99125
2008-06-17T11:20:20 -3
2008-06-17T11:20:27 -2.98519
2008-06-17T11:20:29 -2.68904
2008-06-17T11:20:30 -2.5
2008-06-17T11:20:38 -2.45909
2008-06-17T11:20:39 -2.08764
2008-06-17T11:20:40 -2
2008-06-17T11:20:50 -1.85174
2008-06-17T11:20:51 -1.51258
2008-06-17T11:20:52 -1.5
2008-06-17T11:21:01 -1.48566
2008-06-17T11:21:02 -1.18799
2008-06-17T11:21:04 -1
2008-06-17T11:21:11 -0.98799
2008-06-17T11:21:13 -0.63694
2008-06-17T11:21:14 -0.5
2008-06-17T11:21:23 -0.46247
2008-06-17T11:21:24 -0.08519
2008-06-17T11:21:25 0
2008-06-17T11:21:32 0
'''
def setUp(self):
x = numpy.arange(0, 1, 0.25)
ws =CreateWorkspace(UnitX="1/q", DataX=x, DataY=[0,0,0], NSpec=1)
self.__ws = ws
LoadLog(Workspace=self.__ws,Filename='CSP78173_height.txt',Names='height')
def tearDown(self):
DeleteWorkspace(self.__ws)
def test_startdate_after_enddate(self):
try:
results = FilterLogByTime(InputWorkspace=self.__ws, LogName='height', StartTime=1, EndTime=0)
self.assertTrue(False, "End time < Start time.")
except RuntimeError:
pass
def test_without_limits(self):
AddSampleLog(Workspace=self.__ws,LogName='run_start',LogText='1900-Jan-01 00:00:00')
AddSampleLog(Workspace=self.__ws,LogName='run_end',LogText='2100-Jan-02 00:00:00')
results, stats = FilterLogByTime(InputWorkspace=self.__ws, LogName='height')
self.assertTrue(isinstance(results, numpy.ndarray), "Should give back an array")
self.assertTrue(isinstance(stats, float), "Should give back a single result")
expected_size = self.__ws.getRun().getLogData('height').size()
actual_size = results.size
self.assertEqual(expected_size, actual_size, "Nothing filtered out")
def test_with_start_limit(self):
AddSampleLog(Workspace=self.__ws,LogName='run_start',LogText='2008-06-17T11:10:44')
AddSampleLog(Workspace=self.__ws,LogName='run_end',LogText='2100-Jan-02 00:00:00')
results, stats = FilterLogByTime(InputWorkspace=self.__ws, LogName='height', StartTime=1)
self.assertTrue(isinstance(results, numpy.ndarray), "Should give back an array")
self.assertTrue(isinstance(stats, float), "Should give back a single result")
expected_size = self.__ws.getRun().getLogData('height').size() - 1
actual_size = results.size
self.assertEqual(expected_size, actual_size, "Should filter one out expected_size %s, actual_size %s" % (str(expected_size), str(actual_size)))
def test_with_end_limit(self):
AddSampleLog(Workspace=self.__ws,LogName='run_start',LogText='2008-06-17T11:10:44')
AddSampleLog(Workspace=self.__ws,LogName='run_end',LogText='2100-Jan-02 00:00:00')
results, stats = FilterLogByTime(InputWorkspace=self.__ws, LogName='height', EndTime=0.99)
self.assertTrue(isinstance(results, numpy.ndarray), "Should give back an array")
self.assertTrue(isinstance(stats, float), "Should give back a single result")
expected_size = 1
actual_size = results.size
self.assertEqual(expected_size, actual_size, "Expected_size %s, actual_size %s" % (str(expected_size), str(actual_size)))
def test_with_both_limits(self):
AddSampleLog(Workspace=self.__ws,LogName='run_start',LogText='2008-06-17T11:10:44')
AddSampleLog(Workspace=self.__ws,LogName='run_end',LogText='2100-Jan-02 00:00:00')
results, stats = FilterLogByTime(InputWorkspace=self.__ws, LogName='height', StartTime=1, EndTime=3)
self.assertTrue(isinstance(results, numpy.ndarray), "Should give back an array")
self.assertTrue(isinstance(stats, float), "Should give back a single result")
expected_size = 2
actual_size = results.size
self.assertEqual(expected_size, actual_size, "Should filter one out expected_size %s, actual_size %s" % (str(expected_size), str(actual_size)))
self.assertEqual(stats, (-1.17843 -1.27995)/2, "The 2nd and 3rd entry. Default stats should be mean of these.")
def __doStats(self, method):
AddSampleLog(Workspace=self.__ws,LogName='run_start',LogText='2008-06-17T11:10:44')
AddSampleLog(Workspace=self.__ws,LogName='run_end',LogText='2100-Jan-02 00:00:00')
results, stats = FilterLogByTime(InputWorkspace=self.__ws, LogName='height', StartTime=1, EndTime=3, Method=method)
self.assertTrue(isinstance(results, numpy.ndarray), "Should give back an array")
self.assertTrue(isinstance(stats, float), "Should give back a single result")
return stats
def test_calculate_mean(self):
stats = self.__doStats("mean")
self.assertEqual(stats, (-1.17843 -1.27995)/2)
def test_calculate_max(self):
stats = self.__doStats("max")
self.assertEqual(stats, -1.17843)
def test_calculate_min(self):
stats = self.__doStats("min")
self.assertEqual(stats, -1.27995)
if __name__ == '__main__':
unittest.main() | gpl-3.0 |
macloo/flasky | migrations/versions/38c4e85512a9_initial_migration.py | 182 | 1163 | """initial migration
Revision ID: 38c4e85512a9
Revises: None
Create Date: 2013-12-27 01:23:59.392801
"""
# revision identifiers, used by Alembic.
revision = '38c4e85512a9'
down_revision = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('roles',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=64), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
op.create_table('users',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('username', sa.String(length=64), nullable=True),
sa.Column('role_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['role_id'], ['roles.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index('ix_users_username', 'users', ['username'], unique=True)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_index('ix_users_username', 'users')
op.drop_table('users')
op.drop_table('roles')
### end Alembic commands ###
| mit |
DailyActie/Surrogate-Model | 01-codes/OpenMDAO-Framework-dev/openmdao.util/src/openmdao/util/grab_distrib.py | 1 | 3791 | #!/usr/bin/env python
import logging
import sys
from pkg_resources import Requirement
from setuptools.package_index import PackageIndex
_logger = logging.getLogger()
_pypi = 'http://pypi.python.org/simple'
def _enable_console(level):
""" Configure logging to receive log messages at the console. """
global _logger
# define a Handler which writes messages to sys.stderr
CONSOLE = logging.StreamHandler()
CONSOLE.setLevel(level)
CONSOLE.setFormatter(logging.Formatter('%(message)s'))
_logger.addHandler(CONSOLE)
def grab_distrib(req, index=None, dest='.', search_pypi=True):
"""\
Downloads a distribution from the given package index(s) based on the
given requirement string(s). Downloaded distributions are placed in the
specified destination or the current directory if no destination is
specified. If a distribution cannot be found in the given index(s), the
Python Package Index will be searched as a last resort unless
search_pypi is False. This does NOT install the distribution.
"""
# allow multiple package indexes to be specified
if index is None:
index = []
elif isinstance(index, basestring):
index = [index]
# else just assume it's some iterator of indexes
# add PyPI as last place to search if it wasn't already specified
if search_pypi and _pypi not in index and (_pypi + '/') not in index:
index.append(_pypi)
# allow specification of single or multiple requirements
if isinstance(req, basestring):
reqs = [Requirement.parse(req)]
elif isinstance(req, Requirement):
reqs = [req]
else:
reqs = []
for rr in req:
if isinstance(rr, basestring):
reqs.append(Requirement.parse(rr))
elif isinstance(rr, Requirement):
reqs.append(rr)
else:
raise TypeError("supplied requirement arg must be a string" +
" or a Requirement, but given type is %s" %
type(rr))
index_list = [PackageIndex(idx, search_path=[]) for idx in index]
for req in reqs:
fetched = None
for idx in index_list:
_logger.info('Looking for %s at package index %s' % (req, idx.index_url))
fetched = idx.download(req, dest)
if fetched:
_logger.info(' %s successfully downloaded' % fetched)
break
else:
_logger.error("couldn't find distrib for %s" % req)
return fetched
if __name__ == '__main__':
from optparse import OptionParser
usage = "%prog [options] req(s)"
parser = OptionParser(usage=usage, description=grab_distrib.__doc__)
parser.add_option("-i", "--index", action="append", type="string", dest="index",
help="package index url (separate -i for each one)", default=[])
parser.add_option("-d", "--dest", action="store", type="string", dest="dest",
help="destination directory", default='.')
parser.add_option("-q", "--quiet", action="store_true", dest="quiet",
help="no output")
parser.add_option("--nopypi", action="store_true", dest="nopypi",
help="do not search PyPI")
(options, args) = parser.parse_args(sys.argv[1:])
if len(args) < 1:
parser.print_help()
sys.exit(1)
if options.quiet:
loglevel = logging.CRITICAL
else:
loglevel = logging.INFO
_logger.setLevel(loglevel)
_enable_console(loglevel)
grab_distrib(req=args, index=options.index, dest=options.dest,
search_pypi=not options.nopypi)
grab_distrib.__doc__ += """
Requirements may be supplied as strings or as Requirement objects.
"""
| mit |
leiferikb/bitpop | src/third_party/WebKit/Tools/Scripts/webkitpy/style/patchreader.py | 188 | 3699 | # Copyright (C) 2010 Google Inc. All rights reserved.
# Copyright (C) 2010 Chris Jerdonek ([email protected])
# Copyright (C) 2010 ProFUSION embedded systems
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
import re
from webkitpy.common.checkout.diff_parser import DiffParser
from webkitpy.common.system.executive import Executive
from webkitpy.common.system.filesystem import FileSystem
from webkitpy.common.checkout.scm.detection import SCMDetector
_log = logging.getLogger(__name__)
class PatchReader(object):
"""Supports checking style in patches."""
def __init__(self, text_file_reader):
"""Create a PatchReader instance.
Args:
text_file_reader: A TextFileReader instance.
"""
self._text_file_reader = text_file_reader
def check(self, patch_string, fs=None):
"""Check style in the given patch."""
fs = fs or FileSystem()
patch_files = DiffParser(patch_string.splitlines()).files
# If the user uses git, checking subversion config file only once is enough.
call_only_once = True
for path, diff_file in patch_files.iteritems():
line_numbers = diff_file.added_or_modified_line_numbers()
_log.debug('Found %s new or modified lines in: %s' % (len(line_numbers), path))
if not line_numbers:
match = re.search("\s*png$", path)
if match and fs.exists(path):
if call_only_once:
self._text_file_reader.process_file(file_path=path, line_numbers=None)
cwd = FileSystem().getcwd()
detection = SCMDetector(fs, Executive()).detect_scm_system(cwd)
if detection.display_name() == "git":
call_only_once = False
continue
# Don't check files which contain only deleted lines
# as they can never add style errors. However, mark them as
# processed so that we count up number of such files.
self._text_file_reader.count_delete_only_file()
continue
self._text_file_reader.process_file(file_path=path, line_numbers=line_numbers)
| gpl-3.0 |
soutys/aorn | tests/test_samplesstore.py | 1 | 1983 | # -*- coding: utf-8 -*-
'''Samples storage module tests
'''
from __future__ import with_statement, division, absolute_import, print_function
import sys
from tempfile import NamedTemporaryFile
try:
from io import StringIO
except ImportError:
from StringIO import StringIO
from aorn.samplesstore import STDIN_FILE, SamplesStore
from tests.generators import synth_complex
def test_samplesstore_init():
samples_store = SamplesStore()
assert samples_store.get_samples() is None
def test_samplesstore_load_file_empty():
tmp = NamedTemporaryFile(delete=True, prefix='sample_')
samples_store = SamplesStore()
samples_store.load_samples(tmp.name)
assert samples_store.get_samples() is not None
assert len(samples_store.get_samples()) == 0
def test_samplesstore_load_stdin_empty():
old_stdin = sys.stdin
sys.stdin = StringIO(None)
samples_store = SamplesStore()
samples_store.load_samples(STDIN_FILE)
sys.stdin = old_stdin
assert samples_store.get_samples() is not None
assert len(samples_store.get_samples()) == 0
def test_samplesstore_load_file_somedata():
tmp = NamedTemporaryFile(delete=True, prefix='sample_')
data_sz = 10000
synth_complex(freqs=[440], coefs=[1], datasize=data_sz, fname=tmp.name)
samples_store = SamplesStore()
samples_store.load_samples(tmp.name)
assert samples_store.get_samples() is not None
assert len(samples_store.get_samples()) == data_sz
def test_samplesstore_load_stdin_somedata():
tmp = NamedTemporaryFile(delete=True, prefix='sample_')
data_sz = 10000
synth_complex(freqs=[440], coefs=[1], datasize=data_sz, fname=tmp.name)
old_stdin = sys.stdin
sys.stdin = open(tmp.name, 'rb')
samples_store = SamplesStore()
samples_store.load_samples(STDIN_FILE)
sys.stdin = old_stdin
assert samples_store.get_samples() is not None
assert len(samples_store.get_samples()) == data_sz
# vim: ts=4:sw=4:et:fdm=indent:ff=unix
| mit |