repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values |
---|---|---|---|---|---|
QijunPan/ansible | lib/ansible/modules/cloud/rackspace/rax_cbs.py | 25 | 7272 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# This is a DOCUMENTATION stub specific to this module, it extends
# a documentation fragment located in ansible.utils.module_docs_fragments
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: rax_cbs
short_description: Manipulate Rackspace Cloud Block Storage Volumes
description:
- Manipulate Rackspace Cloud Block Storage Volumes
version_added: 1.6
options:
description:
description:
- Description to give the volume being created
default: null
image:
description:
- image to use for bootable volumes. Can be an C(id), C(human_id) or
C(name). This option requires C(pyrax>=1.9.3)
default: null
version_added: 1.9
meta:
description:
- A hash of metadata to associate with the volume
default: null
name:
description:
- Name to give the volume being created
default: null
required: true
size:
description:
- Size of the volume to create in Gigabytes
default: 100
required: true
snapshot_id:
description:
- The id of the snapshot to create the volume from
default: null
state:
description:
- Indicate desired state of the resource
choices:
- present
- absent
default: present
required: true
volume_type:
description:
- Type of the volume being created
choices:
- SATA
- SSD
default: SATA
required: true
wait:
description:
- wait for the volume to be in state 'available' before returning
default: "no"
choices:
- "yes"
- "no"
wait_timeout:
description:
- how long before wait gives up, in seconds
default: 300
author:
- "Christopher H. Laco (@claco)"
- "Matt Martz (@sivel)"
extends_documentation_fragment: rackspace.openstack
'''
EXAMPLES = '''
- name: Build a Block Storage Volume
gather_facts: False
hosts: local
connection: local
tasks:
- name: Storage volume create request
local_action:
module: rax_cbs
credentials: ~/.raxpub
name: my-volume
description: My Volume
volume_type: SSD
size: 150
region: DFW
wait: yes
state: present
meta:
app: my-cool-app
register: my_volume
'''
from distutils.version import LooseVersion
try:
import pyrax
HAS_PYRAX = True
except ImportError:
HAS_PYRAX = False
def cloud_block_storage(module, state, name, description, meta, size,
snapshot_id, volume_type, wait, wait_timeout,
image):
changed = False
volume = None
instance = {}
cbs = pyrax.cloud_blockstorage
if cbs is None:
module.fail_json(msg='Failed to instantiate client. This '
'typically indicates an invalid region or an '
'incorrectly capitalized region name.')
if image:
# pyrax<1.9.3 did not have support for specifying an image when
# creating a volume which is required for bootable volumes
if LooseVersion(pyrax.version.version) < LooseVersion('1.9.3'):
module.fail_json(msg='Creating a bootable volume requires '
'pyrax>=1.9.3')
image = rax_find_image(module, pyrax, image)
volume = rax_find_volume(module, pyrax, name)
if state == 'present':
if not volume:
kwargs = dict()
if image:
kwargs['image'] = image
try:
volume = cbs.create(name, size=size, volume_type=volume_type,
description=description,
metadata=meta,
snapshot_id=snapshot_id, **kwargs)
changed = True
except Exception as e:
module.fail_json(msg='%s' % e.message)
else:
if wait:
attempts = wait_timeout / 5
pyrax.utils.wait_for_build(volume, interval=5,
attempts=attempts)
volume.get()
instance = rax_to_dict(volume)
result = dict(changed=changed, volume=instance)
if volume.status == 'error':
result['msg'] = '%s failed to build' % volume.id
elif wait and volume.status not in VOLUME_STATUS:
result['msg'] = 'Timeout waiting on %s' % volume.id
if 'msg' in result:
module.fail_json(**result)
else:
module.exit_json(**result)
elif state == 'absent':
if volume:
instance = rax_to_dict(volume)
try:
volume.delete()
changed = True
except Exception as e:
module.fail_json(msg='%s' % e.message)
module.exit_json(changed=changed, volume=instance)
def main():
argument_spec = rax_argument_spec()
argument_spec.update(
dict(
description=dict(type='str'),
image=dict(type='str'),
meta=dict(type='dict', default={}),
name=dict(required=True),
size=dict(type='int', default=100),
snapshot_id=dict(),
state=dict(default='present', choices=['present', 'absent']),
volume_type=dict(choices=['SSD', 'SATA'], default='SATA'),
wait=dict(type='bool', default=False),
wait_timeout=dict(type='int', default=300)
)
)
module = AnsibleModule(
argument_spec=argument_spec,
required_together=rax_required_together()
)
if not HAS_PYRAX:
module.fail_json(msg='pyrax is required for this module')
description = module.params.get('description')
image = module.params.get('image')
meta = module.params.get('meta')
name = module.params.get('name')
size = module.params.get('size')
snapshot_id = module.params.get('snapshot_id')
state = module.params.get('state')
volume_type = module.params.get('volume_type')
wait = module.params.get('wait')
wait_timeout = module.params.get('wait_timeout')
setup_rax_module(module, pyrax)
cloud_block_storage(module, state, name, description, meta, size,
snapshot_id, volume_type, wait, wait_timeout,
image)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.rax import *
# invoke the module
if __name__ == '__main__':
main()
| gpl-3.0 |
boundarydevices/android_external_chromium_org | build/landmine_utils.py | 59 | 2787 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import functools
import logging
import os
import shlex
import sys
def memoize(default=None):
"""This decorator caches the return value of a parameterless pure function"""
def memoizer(func):
val = []
@functools.wraps(func)
def inner():
if not val:
ret = func()
val.append(ret if ret is not None else default)
if logging.getLogger().isEnabledFor(logging.INFO):
print '%s -> %r' % (func.__name__, val[0])
return val[0]
return inner
return memoizer
@memoize()
def IsWindows():
return sys.platform in ['win32', 'cygwin']
@memoize()
def IsLinux():
return sys.platform.startswith(('linux', 'freebsd'))
@memoize()
def IsMac():
return sys.platform == 'darwin'
@memoize()
def gyp_defines():
"""Parses and returns GYP_DEFINES env var as a dictionary."""
return dict(arg.split('=', 1)
for arg in shlex.split(os.environ.get('GYP_DEFINES', '')))
@memoize()
def gyp_msvs_version():
return os.environ.get('GYP_MSVS_VERSION', '')
@memoize()
def distributor():
"""
Returns a string which is the distributed build engine in use (if any).
Possible values: 'goma', 'ib', ''
"""
if 'goma' in gyp_defines():
return 'goma'
elif IsWindows():
if 'CHROME_HEADLESS' in os.environ:
return 'ib' # use (win and !goma and headless) as approximation of ib
@memoize()
def platform():
"""
Returns a string representing the platform this build is targetted for.
Possible values: 'win', 'mac', 'linux', 'ios', 'android'
"""
if 'OS' in gyp_defines():
if 'android' in gyp_defines()['OS']:
return 'android'
else:
return gyp_defines()['OS']
elif IsWindows():
return 'win'
elif IsLinux():
return 'linux'
else:
return 'mac'
@memoize()
def builder():
"""
Returns a string representing the build engine (not compiler) to use.
Possible values: 'make', 'ninja', 'xcode', 'msvs', 'scons'
"""
if 'GYP_GENERATORS' in os.environ:
# for simplicity, only support the first explicit generator
generator = os.environ['GYP_GENERATORS'].split(',')[0]
if generator.endswith('-android'):
return generator.split('-')[0]
elif generator.endswith('-ninja'):
return 'ninja'
else:
return generator
else:
if platform() == 'android':
# Good enough for now? Do any android bots use make?
return 'ninja'
elif platform() == 'ios':
return 'xcode'
elif IsWindows():
return 'ninja'
elif IsLinux():
return 'ninja'
elif IsMac():
return 'ninja'
else:
assert False, 'Don\'t know what builder we\'re using!'
| bsd-3-clause |
eleonrk/SickRage | lib/github/tests/Github_.py | 9 | 17814 | # -*- coding: utf-8 -*-
# ########################## Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <[email protected]> #
# Copyright 2012 Zearin <[email protected]> #
# Copyright 2013 Vincent Jacques <[email protected]> #
# #
# This file is part of PyGithub. #
# http://pygithub.github.io/PyGithub/v1/index.html #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
# ##############################################################################
import datetime
import Framework
import github
class Github(Framework.TestCase):
def testGetGists(self):
self.assertListKeyBegin(self.g.get_gists(), lambda g: g.id, ["2729695", "2729656", "2729597", "2729584", "2729569", "2729554", "2729543", "2729537", "2729536", "2729533", "2729525", "2729522", "2729519", "2729515", "2729506", "2729487", "2729484", "2729482", "2729441", "2729432", "2729420", "2729398", "2729372", "2729371", "2729351", "2729346", "2729316", "2729304", "2729296", "2729276", "2729272", "2729265", "2729195", "2729160", "2729143", "2729127", "2729119", "2729113", "2729103", "2729069", "2729059", "2729051", "2729029", "2729027", "2729026", "2729022", "2729002", "2728985", "2728979", "2728964", "2728937", "2728933", "2728884", "2728869", "2728866", "2728855", "2728854", "2728853", "2728846", "2728825", "2728814", "2728813", "2728812", "2728805", "2728802", "2728800", "2728798", "2728797", "2728796", "2728793", "2728758", "2728754", "2728751", "2728748", "2728721", "2728716", "2728715", "2728705", "2728701", "2728699", "2728697", "2728688", "2728683", "2728677", "2728649", "2728640", "2728625", "2728620", "2728615", "2728614", "2728565", "2728564", "2728554", "2728523", "2728519", "2728511", "2728497", "2728496", "2728495", "2728487"])
def testLegacySearchRepos(self):
repos = self.g.legacy_search_repos("github api v3")
self.assertListKeyBegin(repos, lambda r: r.name, ["github", "octonode", "PyGithub"])
self.assertEqual(repos[0].full_name, "peter-murach/github")
# Attributes retrieved from legacy API without lazy completion call
self.assertEqual(repos[2].created_at, datetime.datetime(2012, 2, 25, 12, 53, 47))
self.assertEqual(repos[2].name, "PyGithub")
self.assertEqual(repos[2].watchers, 365)
self.assertTrue(repos[2].has_downloads)
self.assertEqual(repos[2].homepage, "http://jacquev6.github.io/PyGithub")
self.assertEqual(repos[2].url, "/repos/jacquev6/PyGithub")
self.assertFalse(repos[2].fork)
self.assertTrue(repos[2].has_issues)
self.assertFalse(repos[2].has_wiki)
self.assertEqual(repos[2].forks, 102)
self.assertEqual(repos[2].size, 11373)
self.assertFalse(repos[2].private)
self.assertEqual(repos[2].open_issues, 14)
self.assertEqual(repos[2].pushed_at, datetime.datetime(2014, 3, 16, 17, 1, 56))
self.assertEqual(repos[2].description, "Python library implementing the full Github API v3")
self.assertEqual(repos[2].language, "Python")
self.assertEqual(repos[2].owner.login, "jacquev6")
self.assertEqual(repos[2].owner.url, "/users/jacquev6")
def testLegacySearchReposPagination(self):
repos = self.g.legacy_search_repos("document")
self.assertListKeyBegin(repos, lambda r: r.name, ["git", "nimbus", "kss", "sstoolkit", "lawnchair", "appledoc", "jQ.Mobi", "ipython", "mongoengine", "ravendb", "substance", "symfony-docs", "JavaScript-Garden", "DocSets-for-iOS", "yard", "phpDocumentor2", "phpsh", "Tangle", "Ingredients", "documentjs", "xhp", "couchdb-lucene", "dox", "magento2", "javascriptmvc", "FastPdfKit", "roar", "DocumentUp", "NoRM", "jsdoc", "tagger", "mongodb-csharp", "php-github-api", "beautiful-docs", "mongodb-odm", "iodocs", "seesaw", "bcx-api", "developer.github.com", "amqp", "docsplit", "pycco", "standards-and-practices", "tidy-html5", "redis-doc", "tomdoc", "docs", "flourish", "userguide", "swagger-ui", "rfc", "Weasel-Diesel", "yuidoc", "apigen", "document-viewer", "develop.github.com", "Shanty-Mongo", "PTShowcaseViewController", "gravatar_image_tag", "api-wow-docs", "mongoid-tree", "safari-json-formatter", "mayan", "orm-documentation", "jsfiddle-docs-alpha", "core", "documentcloud", "flexible-nav", "writeCapture", "readium", "xmldocument", "Documentation-Examples", "grails-doc", "stdeb", "aws-autoscaling", "voteable_mongo", "review", "spreadsheet_on_rails", "UKSyntaxColoredTextDocument", "mandango", "bdoc", "Documentation", "documents.com", "rghost", "ticket_mule", "vendo", "khan-api", "spring-data-document-examples", "rspec_api_documentation", "axlsx", "phpdox", "documentation", "Sami", "innershiv", "doxyclean", "documents", "rvm-site", "jqapi", "documentation", "hadoopy", "VichUploaderBundle", "pdoc", "documentation", "wii-js", "oss-docs", "scala-maven-plugin", "Documents", "documenter", "behemoth", "documentation", "documentation", "propelorm.github.com", "Kobold2D", "AutoObjectDocumentation", "php-mongodb-admin", "django-mongokit", "puppet-docs", "docs", "Document", "vendorer", "symfony1-docs", "shocco", "documentation", "jog", "docs", "documentation", "documentation", "documentation", "documentation", "Documentation", "documentation", "documentation", "phpunit-documentation", "ADCtheme", "NelmioApiDocBundle", "iCloud-Singleton-CloudMe", "Documentation", "document", "document_mapper", "heroku-docs", "couchdb-odm", "documentation", "documentation", "document", "documentation", "NanoStore", "documentation", "Documentation", "documentation", "Documentation", "documentation", "document", "documentation", "documentation", "Documentation", "Documentation", "grendel", "ceylon-compiler", "mbtiles-spec", "documentation", "documents", "documents", "Documents", "Documentation", "documentation", "Documentation", "documentation", "documents", "Documentation", "documentation", "documentation", "documents", "Documentation", "documentation", "documenter", "documentation", "documents", "Documents", "documents", "documents", "documentation", "Document", "document", "rdoc", "mongoid_token", "travis-ci.github.com", "Documents", "Documents", "documents", "Document", "Documentation", "documents", "Documents", "Documentation", "documents", "documents", "documents", "documentation", "Documents", "Document", "documents", "documents", "Documentation", "Documentation", "Document", "documents", "Documents", "Documents", "Documentation", "Documents", "documents", "Documents", "document", "documents", "Documentation", "Documents", "documents", "documents", "Documents", "documents", "Documentation", "documentation", "Document", "Documents", "documents", "documents", "documents", "Documentation", "Documentation", "Documents", "Documents", "Documents", "Documenter", "document", "Documentation", "Documents", "Documents", "documentation", "documentation", "Document", "Documents", "Documentation", "Documentation", "Documents", "documents", "Documents", "document", "documentation", "Documents", "documentation", "documentation", "documentation", "Documentation", "Documents", "Documents", "documentation", "Documents", "Documents", "documentation", "documentation", "documents", "Documentation", "documents", "documentation", "Documentation", "Documents", "documentation", "documentation", "documents", "documentation", "Umbraco5Docs", "documents", "Documents", "Documentation", "documents", "document", "documents", "document", "documents", "documentation", "Documents", "documents", "document", "Documents", "Documentation", "Documentation", "documentation", "Documentation", "document", "documentation", "documents", "documents", "Documentations", "document", "documentation", "Documentation", "Document", "Documents", "Documents", "Document"])
def testLegacySearchReposExplicitPagination(self):
repos = self.g.legacy_search_repos("python")
self.assertEqual([r.name for r in repos.get_page(4)], ["assetic", "cartodb", "cuisine", "gae-sessions", "geoalchemy2", "Multicorn", "wmfr-timeline", "redis-rdb-tools", "applet-workflows", "TweetBuff", "groovy-core", "StarTrekGame", "Nuevo", "Cupid", "node-sqlserver", "Magnet2Torrent", "GroundControl", "mock-django", "4bit", "mock-django", "Fabulous", "SFML", "pydicas", "flixel", "up", "mongrel2", "SimpleHTTPServerJs", "ultimos", "Archipel", "JSbooks", "nova", "nodebox", "simplehttp", "dablooms", "solarized", "landslide", "jQuery-File-Upload", "jQuery-File-Upload", "jQuery-File-Upload", "password-manager", "electrum", "twitter_nlp", "djangbone", "pyxfst", "node-gyp", "flare", "www.gittip.com", "wymeditor", "Kokobox", "MyCQ", "runwalk", "git-sweep", "HPCPythonSC2012", "sundown", "node2dm", "statirator", "fantastic-futures", "chainsaw", "itcursos-gerenciador-tarefas", "TideSDK", "genmaybot", "melpa", "ConnectedWire", "tarantool", "anserindicus_sn", "luvit", "Minecraft-Overviewer", "Iconic", "pyist.net", "wikibok", "mejorenvo-scraper", "NewsBlur", "SocketRocket", "spf13-vim", "IWantToWorkAtGloboCom", "ruby-style-guide", "aery32-refguide", "fafsite", "compsense_demo", "enaml", "mpi4py", "fi.pycon.org", "scikits-image", "scikits-image", "uni", "mako.vim", "mako.vim", "slumber", "de-composer", "nvm", "helloshopply", "Alianza", "vimfiles", "socorro-crashstats", "menu", "analytics", "elFinder", "riak_wiki", "livestreamer", "git-goggles"])
def testLegacySearchReposWithLanguage(self):
repos = self.g.legacy_search_repos("document", language="Python")
self.assertListKeyBegin(repos, lambda r: r.name, ["ipython", "mongoengine", "tagger"])
self.assertEqual(repos[0].full_name, "ipython/ipython")
def testLegacySearchUsers(self):
users = self.g.legacy_search_users("vincent")
self.assertListKeyBegin(users, lambda u: u.login, ["nvie", "obra", "lusis"])
# Attributes retrieved from legacy API without lazy completion call
self.assertEqual(users[0].gravatar_id, "c5a7f21b46df698f3db31c37ed0cf55a")
self.assertEqual(users[0].name, "Vincent Driessen")
self.assertEqual(users[0].created_at, datetime.datetime(2009, 5, 12, 21, 19, 38))
self.assertEqual(users[0].location, "Netherlands")
self.assertEqual(users[0].followers, 310)
self.assertEqual(users[0].public_repos, 63)
self.assertEqual(users[0].login, "nvie")
def testLegacySearchUsersPagination(self):
self.assertEqual(len(list(self.g.legacy_search_users("Lucy"))), 146)
def testLegacySearchUsersExplicitPagination(self):
users = self.g.legacy_search_users("Lucy")
self.assertEqual([u.login for u in users.get_page(1)], ["lucievh", "lucyim", "Lucief", "RevolverUpstairs", "seriousprogramming", "reicul", "davincidubai", "LucianaNascimentodoPrado", "lucia-huenchunao", "kraji20", "Lucywolo", "Luciel", "sunnysummer", "elush", "oprealuci", "Flika", "lsher", "datadrivenjournalism", "nill2020", "doobi", "lucilu", "deldeldel", "lucianacocca", "lucyli-sfdc", "lucysatchell", "UBM", "kolousek", "lucyzhang", "lmegia", "luisolivo", "Lucyzhen", "Luhzinha", "beautifly", "lucybm96", "BuonocoreL", "lucywilliams", "ZxOxZ", "Motwinb", "johnlucy", "Aquanimation", "alaltaieri", "lucylin", "lucychambers", "JuanSesma", "cdwwebware", "ZachWills"])
def testLegacySearchUserByEmail(self):
user = self.g.legacy_search_user_by_email("[email protected]")
self.assertEqual(user.login, "jacquev6")
self.assertEqual(user.followers, 13)
def testGetHooks(self):
hooks = self.g.get_hooks()
hook = hooks[0]
self.assertEqual(hook.name, "activecollab")
self.assertEqual(hook.supported_events, ["push"])
self.assertEqual(hook.events, ["push"])
self.assertEqual(hook.schema, [["string", "url"], ["string", "token"], ["string", "project_id"], ["string", "milestone_id"], ["string", "category_id"]])
def testGetEmojis(self):
emojis = self.g.get_emojis()
first = emojis.get("+1")
self.assertEqual(first, "https://github.global.ssl.fastly.net/images/icons/emoji/+1.png?v5")
def testGetHook(self):
hook = self.g.get_hook("activecollab")
self.assertEqual(hook.name, "activecollab")
self.assertEqual(hook.supported_events, ["push"])
self.assertEqual(hook.events, ["push"])
self.assertEqual(hook.schema, [["string", "url"], ["string", "token"], ["string", "project_id"], ["string", "milestone_id"], ["string", "category_id"]])
def testGetRepoFromFullName(self):
self.assertEqual(self.g.get_repo("jacquev6/PyGithub").description, "Python library implementing the full Github API v3")
def testGetRepoFromId(self):
self.assertEqual(self.g.get_repo(3544490).description, "Python library implementing the full Github API v3")
def testGetGitignoreTemplates(self):
self.assertEqual(self.g.get_gitignore_templates(), ["Actionscript", "Android", "AppceleratorTitanium", "Autotools", "Bancha", "C", "C++", "CFWheels", "CMake", "CSharp", "CakePHP", "Clojure", "CodeIgniter", "Compass", "Concrete5", "Coq", "Delphi", "Django", "Drupal", "Erlang", "ExpressionEngine", "Finale", "ForceDotCom", "FuelPHP", "GWT", "Go", "Grails", "Haskell", "Java", "Jboss", "Jekyll", "Joomla", "Jython", "Kohana", "LaTeX", "Leiningen", "LemonStand", "Lilypond", "Lithium", "Magento", "Maven", "Node", "OCaml", "Objective-C", "Opa", "OracleForms", "Perl", "PlayFramework", "Python", "Qooxdoo", "Qt", "R", "Rails", "RhodesRhomobile", "Ruby", "Scala", "Sdcc", "SeamGen", "SketchUp", "SugarCRM", "Symfony", "Symfony2", "SymphonyCMS", "Target3001", "Tasm", "Textpattern", "TurboGears2", "Unity", "VB.Net", "Waf", "Wordpress", "Yii", "ZendFramework", "gcov", "nanoc", "opencart"])
def testGetGitignoreTemplate(self):
t = self.g.get_gitignore_template("Python")
self.assertEqual(t.name, "Python")
self.assertEqual(t.source, "*.py[cod]\n\n# C extensions\n*.so\n\n# Packages\n*.egg\n*.egg-info\ndist\nbuild\neggs\nparts\nbin\nvar\nsdist\ndevelop-eggs\n.installed.cfg\nlib\nlib64\n\n# Installer logs\npip-log.txt\n\n# Unit test / coverage reports\n.coverage\n.tox\nnosetests.xml\n\n# Translations\n*.mo\n\n# Mr Developer\n.mr.developer.cfg\n.project\n.pydevproject\n")
t = self.g.get_gitignore_template("C++")
self.assertEqual(t.name, "C++")
self.assertEqual(t.source, "# Compiled Object files\n*.slo\n*.lo\n*.o\n\n# Compiled Dynamic libraries\n*.so\n*.dylib\n\n# Compiled Static libraries\n*.lai\n*.la\n*.a\n")
def testStringOfNotSet(self):
self.assertEqual(str(github.GithubObject.NotSet), "NotSet")
def testGetUsers(self):
self.assertListKeyBegin(self.g.get_users(), lambda u: u.login, ["mojombo", "defunkt", "pjhyett", "wycats", "ezmobius", "ivey", "evanphx", "vanpelt", "wayneeseguin", "brynary", "kevinclark", "technoweenie", "macournoyer", "takeo", "Caged", "topfunky", "anotherjesse", "roland", "lukas", "fanvsfan", "tomtt", "railsjitsu", "nitay", "kevwil", "KirinDave", "jamesgolick", "atmos", "errfree", "mojodna", "bmizerany", "jnewland", "joshknowles", "hornbeck", "jwhitmire", "elbowdonkey", "reinh", "timocratic", "bs", "rsanheim", "schacon", "uggedal", "bruce", "sam", "mmower", "abhay", "rabble", "benburkert", "indirect", "fearoffish", "ry", "engineyard", "jsierles", "tweibley", "peimei", "brixen", "tmornini", "outerim", "daksis", "sr", "lifo", "rsl", "imownbey", "dylanegan", "jm", "willcodeforfoo", "jvantuyl", "BrianTheCoder", "freeformz", "hassox", "automatthew", "queso", "lancecarlson", "drnic", "lukesutton", "danwrong", "hcatlin", "jfrost", "mattetti", "ctennis", "lawrencepit", "marcjeanson", "grempe", "peterc", "ministrycentered", "afarnham", "up_the_irons", "evilchelu", "heavysixer", "brosner", "danielmorrison", "danielharan", "kvnsmth", "collectiveidea", "canadaduane", "nate", "dstrelau", "sunny", "dkubb", "jnicklas", "richcollins", "simonjefford"])
def testGetUsersSince(self):
self.assertListKeyBegin(self.g.get_users(since=1000), lambda u: u.login, ["sbecker"])
def testGetRepos(self):
self.assertListKeyBegin(self.g.get_repos(), lambda r: r.name, ["grit", "merb-core", "rubinius", "god", "jsawesome", "jspec", "exception_logger", "ambition"])
def testGetReposSince(self):
self.assertListKeyBegin(self.g.get_repos(since=1000), lambda r: r.name, ["jquery-humanize-messages-plugin", "4slicer", "fixture-scenarios", "mongrel_proctitle", "rails-plugins"])
| gpl-3.0 |
jeremygibbs/microhh | cases/lasso/lasso_init.py | 5 | 6596 | import netCDF4 as nc
import numpy as np
import f90nml
import shutil
import os
from datetime import datetime
#import os.path
#import os
#import sys
#import glob
float_type = "f8"
largescale = True
# Define some constants
cp = 1004.
rd = 287.04
grav = 9.8
rho = 1.225
p0 = 1e5
Lv = 2.5e6
tau = 21600.
# Get number of vertical levels and size from .ini file
shutil.copy2('testbed.ini', 'testbed.tmp')
with open('testbed.tmp') as f:
for line in f:
if(line.split('=')[0] == 'ktot'):
kmax = int(line.split('=')[1])
if(line.split('=')[0] == 'zsize'):
zsize = float(line.split('=')[1])
dz = zsize / kmax
zstretch = 5800.
stretch = 1.04
# Read WRF Namelist
fnml = "config/namelist.input"
nml = f90nml.read(fnml)
runtime_in = nml["time_control"]["run_days"] * 86400 + nml["time_control"]["run_hours"] * \
3600 + nml["time_control"]["run_minutes"] * 60 + nml["time_control"]["run_seconds"]
# Read Surface pressure
fname_in = "config/wrfinput_d01.nc"
f = nc.Dataset(fname_in, 'r+')
ps_in = f.variables['PB'][:][0, 0, 0, 0]
#wrfstattime = f.variables['XTIME'][:]*60.
f.close()
# Read WRF Surface Forcing
fname_in = "config/input_sfc_forcing.nc"
f = nc.Dataset(fname_in, 'r+')
H_in = f.variables['PRE_SH_FLX'][:]
LE_in = f.variables['PRE_LH_FLX'][:]
f.close()
# Read WRF LS Forcing & Nudging
fname_in = "config/input_ls_forcing.nc"
f = nc.Dataset(fname_in, 'r+')
timestr = f.variables['Times'][:]
z_in = f.variables['Z_LS'][:]
u_in = f.variables['U_LS'][:]
v_in = f.variables['V_LS'][:]
thl_in = f.variables['TH_RLX'][:]
qt_in = f.variables['QV_RLX'][:]
thlls_in = f.variables['TH_ADV'][:]
qtls_in = f.variables['QV_ADV'][:]
wls_in = f.variables['W_LS'][:]
f.close()
str = np.chararray(timestr.shape)
dt = np.empty(timestr.shape[0], dtype=datetime)
tnudge = np.zeros(timestr.shape[0])
for i in range(timestr.shape[0]):
dt[i] = datetime.strptime(
timestr[i].tostring().decode(),
'%Y-%m-%d_%H:%M:%S')
tnudge[i] = (dt[i] - dt[0]).total_seconds()
ntnudge = tnudge.size
#
# interpolate onto Microhh grid
ntnudge = timestr.shape[0]
# non-equidistant grid
z = np.zeros(kmax)
z[0] = 0.5 * dz
for k in range(1, kmax):
z[k] = z[k - 1] + 0.5 * dz
if z[k] > zstretch:
dz *= stretch
z[k] += 0.5 * dz
zh = np.zeros(kmax)
zh[:-1] = (z[1:] + z[:-1]) / 2
zh[-1] = 2 * z[-1] - zh[-2]
u = np.zeros((ntnudge, z.size))
v = np.zeros(np.shape(u))
thl = np.zeros(np.shape(u))
qt = np.zeros(np.shape(u))
thlls = np.zeros(np.shape(u))
qtls = np.zeros(np.shape(u))
wls = np.zeros(np.shape(u))
nudge_factor = np.zeros(np.shape(u))
p_sbot = np.zeros((ntnudge))
#p_sbot = np.interp(tnudge, wrfstattime,ps_in)
p_sbot[:] = ps_in
for t in range(tnudge.size):
u[t, :] = np.interp(z, z_in[t, :], u_in[t, :])
v[t, :] = np.interp(z, z_in[t, :], v_in[t, :])
thl[t, :] = np.interp(z, z_in[t, :], thl_in[t, :])
qt[t, :] = np.interp(z, z_in[t, :], qt_in[t, :])
thlls[t, :] = np.interp(z, z_in[t, :], thlls_in[t, :])
qtls[t, :] = np.interp(z, z_in[t, :], qtls_in[t, :])
wls[t, :] = np.interp(zh, z_in[t, :], wls_in[t, :])
ug = u
vg = v
nudge_factor[:, :] = 1. / tau
# Surface fluxes
rhosurf = p_sbot / (rd * thl[:, 0] * (1. + 0.61 * qt[:, 0]))
sbotthl = H_in / (rhosurf * cp)
sbotqt = LE_in / (rhosurf * Lv)
# Modify .ini file: Add comments for case description; Alter lines where
# necessary.
inifile = open('testbed.ini', 'w')
inifile.write("#Converted from LASSO WRF" + "\n")
# inifile.write("#Start Date = " + timestr[0]+ "\n")
# inifile.write("#Stop Date = " + timestr[-1]+"\n")
with open('testbed.tmp') as f:
for line_in in f:
if(line_in.split('=')[0] == 'zsize'):
line = "zsize={0:f}\n".format(zh[-1])
elif(line_in.split('=')[0] == 'pbot'):
line = "pbot={0:f}\n".format(p_sbot[0])
else:
line = line_in
inifile.write(line)
inifile.close()
os.remove('testbed.tmp')
# Save all the input data to NetCDF
nc_file = nc.Dataset(
"testbed_input.nc",
mode="w",
datamodel="NETCDF4",
clobber=False)
nc_file.createDimension("z", kmax)
nc_z = nc_file.createVariable("z", float_type, ("z"))
nc_z[:] = z[:]
nc_file.createDimension("zh", kmax)
nc_zh = nc_file.createVariable("zh", float_type, ("zh"))
nc_zh[:] = zh[:]
# Create a group called "init" for the initial profiles.
nc_group_init = nc_file.createGroup("init")
nc_thl = nc_group_init.createVariable("thl", float_type, ("z"))
nc_qt = nc_group_init.createVariable("qt", float_type, ("z"))
nc_u = nc_group_init.createVariable("u", float_type, ("z"))
nc_v = nc_group_init.createVariable("v", float_type, ("z"))
nc_ug = nc_group_init.createVariable("u_geo", float_type, ("z"))
nc_vg = nc_group_init.createVariable("v_geo", float_type, ("z"))
nc_nudge_factor = nc_group_init.createVariable("nudgefac", float_type, ("z"))
nc_thl[:] = thl[0, :]
nc_qt[:] = qt[0, :]
nc_u[:] = u[0, :]
nc_v[:] = v[0, :]
nc_ug[:] = ug[0, :]
nc_vg[:] = vg[0, :]
nc_nudge_factor[:] = nudge_factor[0, :]
# Create a group called "timedep" for the timedep.
nc_group_timedep = nc_file.createGroup("timedep")
nc_group_timedep.createDimension("time", tnudge.size)
nc_time = nc_group_timedep.createVariable("time", float_type, ("time"))
nc_time[:] = tnudge[:]
nc_thl_sbot = nc_group_timedep.createVariable("thl_sbot", float_type, ("time"))
nc_qt_sbot = nc_group_timedep.createVariable("qt_sbot", float_type, ("time"))
nc_p_sbot = nc_group_timedep.createVariable("p_sbot", float_type, ("time"))
nc_thl_sbot[:] = sbotthl[:]
nc_qt_sbot[:] = sbotqt[:]
nc_p_sbot[:] = sbotqt[:]
nc_thl_ls = nc_group_timedep.createVariable(
"thl_ls", float_type, ("time", "z"))
nc_qt_ls = nc_group_timedep.createVariable("qt_ls", float_type, ("time", "z"))
nc_w_ls = nc_group_timedep.createVariable("w_ls", float_type, ("time", "zh"))
nc_u_g = nc_group_timedep.createVariable("u_geo", float_type, ("time", "z"))
nc_v_g = nc_group_timedep.createVariable("v_geo", float_type, ("time", "z"))
nc_u_nudge = nc_group_timedep.createVariable(
"u_nudge", float_type, ("time", "z"))
nc_v_nudge = nc_group_timedep.createVariable(
"v_nudge", float_type, ("time", "z"))
nc_thl_nudge = nc_group_timedep.createVariable(
"thl_nudge", float_type, ("time", "z"))
nc_qt_nudge = nc_group_timedep.createVariable(
"qt_nudge", float_type, ("time", "z"))
nc_thl_ls[:, :] = thlls[:, :]
nc_qt_ls[:, :] = qtls[:, :]
nc_w_ls[:, :] = wls[:, :]
nc_u_g[:, :] = ug[:, :]
nc_v_g[:, :] = vg[:, :]
nc_u_nudge[:, :] = u[:, :]
nc_v_nudge[:, :] = v[:, :]
nc_thl_nudge[:, :] = thl[:, :]
nc_qt_nudge[:, :] = qt[:, :]
nc_file.close()
print("done")
| gpl-3.0 |
melvon22/osmc | package/mediacenter-skin-osmc/files/usr/share/kodi/addons/script.module.unidecode/lib/unidecode/x0be.py | 253 | 4849 | data = (
'byum', # 0x00
'byub', # 0x01
'byubs', # 0x02
'byus', # 0x03
'byuss', # 0x04
'byung', # 0x05
'byuj', # 0x06
'byuc', # 0x07
'byuk', # 0x08
'byut', # 0x09
'byup', # 0x0a
'byuh', # 0x0b
'beu', # 0x0c
'beug', # 0x0d
'beugg', # 0x0e
'beugs', # 0x0f
'beun', # 0x10
'beunj', # 0x11
'beunh', # 0x12
'beud', # 0x13
'beul', # 0x14
'beulg', # 0x15
'beulm', # 0x16
'beulb', # 0x17
'beuls', # 0x18
'beult', # 0x19
'beulp', # 0x1a
'beulh', # 0x1b
'beum', # 0x1c
'beub', # 0x1d
'beubs', # 0x1e
'beus', # 0x1f
'beuss', # 0x20
'beung', # 0x21
'beuj', # 0x22
'beuc', # 0x23
'beuk', # 0x24
'beut', # 0x25
'beup', # 0x26
'beuh', # 0x27
'byi', # 0x28
'byig', # 0x29
'byigg', # 0x2a
'byigs', # 0x2b
'byin', # 0x2c
'byinj', # 0x2d
'byinh', # 0x2e
'byid', # 0x2f
'byil', # 0x30
'byilg', # 0x31
'byilm', # 0x32
'byilb', # 0x33
'byils', # 0x34
'byilt', # 0x35
'byilp', # 0x36
'byilh', # 0x37
'byim', # 0x38
'byib', # 0x39
'byibs', # 0x3a
'byis', # 0x3b
'byiss', # 0x3c
'bying', # 0x3d
'byij', # 0x3e
'byic', # 0x3f
'byik', # 0x40
'byit', # 0x41
'byip', # 0x42
'byih', # 0x43
'bi', # 0x44
'big', # 0x45
'bigg', # 0x46
'bigs', # 0x47
'bin', # 0x48
'binj', # 0x49
'binh', # 0x4a
'bid', # 0x4b
'bil', # 0x4c
'bilg', # 0x4d
'bilm', # 0x4e
'bilb', # 0x4f
'bils', # 0x50
'bilt', # 0x51
'bilp', # 0x52
'bilh', # 0x53
'bim', # 0x54
'bib', # 0x55
'bibs', # 0x56
'bis', # 0x57
'biss', # 0x58
'bing', # 0x59
'bij', # 0x5a
'bic', # 0x5b
'bik', # 0x5c
'bit', # 0x5d
'bip', # 0x5e
'bih', # 0x5f
'bba', # 0x60
'bbag', # 0x61
'bbagg', # 0x62
'bbags', # 0x63
'bban', # 0x64
'bbanj', # 0x65
'bbanh', # 0x66
'bbad', # 0x67
'bbal', # 0x68
'bbalg', # 0x69
'bbalm', # 0x6a
'bbalb', # 0x6b
'bbals', # 0x6c
'bbalt', # 0x6d
'bbalp', # 0x6e
'bbalh', # 0x6f
'bbam', # 0x70
'bbab', # 0x71
'bbabs', # 0x72
'bbas', # 0x73
'bbass', # 0x74
'bbang', # 0x75
'bbaj', # 0x76
'bbac', # 0x77
'bbak', # 0x78
'bbat', # 0x79
'bbap', # 0x7a
'bbah', # 0x7b
'bbae', # 0x7c
'bbaeg', # 0x7d
'bbaegg', # 0x7e
'bbaegs', # 0x7f
'bbaen', # 0x80
'bbaenj', # 0x81
'bbaenh', # 0x82
'bbaed', # 0x83
'bbael', # 0x84
'bbaelg', # 0x85
'bbaelm', # 0x86
'bbaelb', # 0x87
'bbaels', # 0x88
'bbaelt', # 0x89
'bbaelp', # 0x8a
'bbaelh', # 0x8b
'bbaem', # 0x8c
'bbaeb', # 0x8d
'bbaebs', # 0x8e
'bbaes', # 0x8f
'bbaess', # 0x90
'bbaeng', # 0x91
'bbaej', # 0x92
'bbaec', # 0x93
'bbaek', # 0x94
'bbaet', # 0x95
'bbaep', # 0x96
'bbaeh', # 0x97
'bbya', # 0x98
'bbyag', # 0x99
'bbyagg', # 0x9a
'bbyags', # 0x9b
'bbyan', # 0x9c
'bbyanj', # 0x9d
'bbyanh', # 0x9e
'bbyad', # 0x9f
'bbyal', # 0xa0
'bbyalg', # 0xa1
'bbyalm', # 0xa2
'bbyalb', # 0xa3
'bbyals', # 0xa4
'bbyalt', # 0xa5
'bbyalp', # 0xa6
'bbyalh', # 0xa7
'bbyam', # 0xa8
'bbyab', # 0xa9
'bbyabs', # 0xaa
'bbyas', # 0xab
'bbyass', # 0xac
'bbyang', # 0xad
'bbyaj', # 0xae
'bbyac', # 0xaf
'bbyak', # 0xb0
'bbyat', # 0xb1
'bbyap', # 0xb2
'bbyah', # 0xb3
'bbyae', # 0xb4
'bbyaeg', # 0xb5
'bbyaegg', # 0xb6
'bbyaegs', # 0xb7
'bbyaen', # 0xb8
'bbyaenj', # 0xb9
'bbyaenh', # 0xba
'bbyaed', # 0xbb
'bbyael', # 0xbc
'bbyaelg', # 0xbd
'bbyaelm', # 0xbe
'bbyaelb', # 0xbf
'bbyaels', # 0xc0
'bbyaelt', # 0xc1
'bbyaelp', # 0xc2
'bbyaelh', # 0xc3
'bbyaem', # 0xc4
'bbyaeb', # 0xc5
'bbyaebs', # 0xc6
'bbyaes', # 0xc7
'bbyaess', # 0xc8
'bbyaeng', # 0xc9
'bbyaej', # 0xca
'bbyaec', # 0xcb
'bbyaek', # 0xcc
'bbyaet', # 0xcd
'bbyaep', # 0xce
'bbyaeh', # 0xcf
'bbeo', # 0xd0
'bbeog', # 0xd1
'bbeogg', # 0xd2
'bbeogs', # 0xd3
'bbeon', # 0xd4
'bbeonj', # 0xd5
'bbeonh', # 0xd6
'bbeod', # 0xd7
'bbeol', # 0xd8
'bbeolg', # 0xd9
'bbeolm', # 0xda
'bbeolb', # 0xdb
'bbeols', # 0xdc
'bbeolt', # 0xdd
'bbeolp', # 0xde
'bbeolh', # 0xdf
'bbeom', # 0xe0
'bbeob', # 0xe1
'bbeobs', # 0xe2
'bbeos', # 0xe3
'bbeoss', # 0xe4
'bbeong', # 0xe5
'bbeoj', # 0xe6
'bbeoc', # 0xe7
'bbeok', # 0xe8
'bbeot', # 0xe9
'bbeop', # 0xea
'bbeoh', # 0xeb
'bbe', # 0xec
'bbeg', # 0xed
'bbegg', # 0xee
'bbegs', # 0xef
'bben', # 0xf0
'bbenj', # 0xf1
'bbenh', # 0xf2
'bbed', # 0xf3
'bbel', # 0xf4
'bbelg', # 0xf5
'bbelm', # 0xf6
'bbelb', # 0xf7
'bbels', # 0xf8
'bbelt', # 0xf9
'bbelp', # 0xfa
'bbelh', # 0xfb
'bbem', # 0xfc
'bbeb', # 0xfd
'bbebs', # 0xfe
'bbes', # 0xff
)
| gpl-2.0 |
TeachAtTUM/edx-platform | common/djangoapps/terrain/browser.py | 15 | 10334 | """
Browser set up for acceptance tests.
"""
# pylint: disable=no-member
# pylint: disable=unused-argument
from base64 import encodestring
from json import dumps
from logging import getLogger
import requests
from django.conf import settings
from django.core.management import call_command
from lettuce import after, before, world
from selenium.common.exceptions import WebDriverException
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from splinter.browser import Browser
from xmodule.contentstore.django import _CONTENTSTORE
LOGGER = getLogger(__name__)
LOGGER.info("Loading the lettuce acceptance testing terrain file...")
MAX_VALID_BROWSER_ATTEMPTS = 20
GLOBAL_SCRIPT_TIMEOUT = 60
def get_saucelabs_username_and_key():
"""
Returns the Sauce Labs username and access ID as set by environment variables
"""
return {"username": settings.SAUCE.get('USERNAME'), "access-key": settings.SAUCE.get('ACCESS_ID')}
def set_saucelabs_job_status(jobid, passed=True):
"""
Sets the job status on sauce labs
"""
config = get_saucelabs_username_and_key()
url = 'http://saucelabs.com/rest/v1/{}/jobs/{}'.format(config['username'], world.jobid)
body_content = dumps({"passed": passed})
base64string = encodestring('{}:{}'.format(config['username'], config['access-key']))[:-1]
headers = {"Authorization": "Basic {}".format(base64string)}
result = requests.put(url, data=body_content, headers=headers)
return result.status_code == 200
def make_saucelabs_desired_capabilities():
"""
Returns a DesiredCapabilities object corresponding to the environment sauce parameters
"""
desired_capabilities = settings.SAUCE.get('BROWSER', DesiredCapabilities.CHROME)
desired_capabilities['platform'] = settings.SAUCE.get('PLATFORM')
desired_capabilities['version'] = settings.SAUCE.get('VERSION')
desired_capabilities['device-type'] = settings.SAUCE.get('DEVICE')
desired_capabilities['name'] = settings.SAUCE.get('SESSION')
desired_capabilities['build'] = settings.SAUCE.get('BUILD')
desired_capabilities['video-upload-on-pass'] = False
desired_capabilities['sauce-advisor'] = False
desired_capabilities['capture-html'] = True
desired_capabilities['record-screenshots'] = True
desired_capabilities['selenium-version'] = "2.34.0"
desired_capabilities['max-duration'] = 3600
desired_capabilities['public'] = 'public restricted'
return desired_capabilities
@before.harvest
def initial_setup(server):
"""
Launch the browser once before executing the tests.
"""
world.absorb(settings.LETTUCE_SELENIUM_CLIENT, 'LETTUCE_SELENIUM_CLIENT')
if world.LETTUCE_SELENIUM_CLIENT == 'local':
browser_driver = getattr(settings, 'LETTUCE_BROWSER', 'chrome')
if browser_driver == 'chrome':
desired_capabilities = DesiredCapabilities.CHROME
desired_capabilities['loggingPrefs'] = {
'browser': 'ALL',
}
else:
desired_capabilities = {}
# There is an issue with ChromeDriver2 r195627 on Ubuntu
# in which we sometimes get an invalid browser session.
# This is a work-around to ensure that we get a valid session.
success = False
num_attempts = 0
while (not success) and num_attempts < MAX_VALID_BROWSER_ATTEMPTS:
# Load the browser and try to visit the main page
# If the browser couldn't be reached or
# the browser session is invalid, this will
# raise a WebDriverException
try:
if browser_driver == 'firefox':
# Lettuce initializes differently for firefox, and sending
# desired_capabilities will not work. So initialize without
# sending desired_capabilities.
world.browser = Browser(browser_driver)
else:
world.browser = Browser(browser_driver, desired_capabilities=desired_capabilities)
world.browser.driver.set_script_timeout(GLOBAL_SCRIPT_TIMEOUT)
world.visit('/')
except WebDriverException:
LOGGER.warn("Error acquiring %s browser, retrying", browser_driver, exc_info=True)
if hasattr(world, 'browser'):
world.browser.quit()
num_attempts += 1
else:
success = True
# If we were unable to get a valid session within the limit of attempts,
# then we cannot run the tests.
if not success:
raise IOError("Could not acquire valid {driver} browser session.".format(driver=browser_driver))
world.absorb(0, 'IMPLICIT_WAIT')
world.browser.driver.set_window_size(1280, 1024)
elif world.LETTUCE_SELENIUM_CLIENT == 'saucelabs':
config = get_saucelabs_username_and_key()
world.browser = Browser(
'remote',
url="http://{}:{}@ondemand.saucelabs.com:80/wd/hub".format(config['username'], config['access-key']),
**make_saucelabs_desired_capabilities()
)
world.absorb(30, 'IMPLICIT_WAIT')
world.browser.set_script_timeout(GLOBAL_SCRIPT_TIMEOUT)
elif world.LETTUCE_SELENIUM_CLIENT == 'grid':
world.browser = Browser(
'remote',
url=settings.SELENIUM_GRID.get('URL'),
browser=settings.SELENIUM_GRID.get('BROWSER'),
)
world.absorb(30, 'IMPLICIT_WAIT')
world.browser.driver.set_script_timeout(GLOBAL_SCRIPT_TIMEOUT)
else:
raise Exception("Unknown selenium client '{}'".format(world.LETTUCE_SELENIUM_CLIENT))
world.browser.driver.implicitly_wait(world.IMPLICIT_WAIT)
world.absorb(world.browser.driver.session_id, 'jobid')
@before.each_scenario
def reset_data(scenario):
"""
Clean out the django test database defined in the
envs/acceptance.py file: edx-platform/db/test_edx.db
"""
LOGGER.debug("Flushing the test database...")
call_command('flush', interactive=False, verbosity=0)
world.absorb({}, 'scenario_dict')
@before.each_scenario
def configure_screenshots(scenario):
"""
Before each scenario, turn off automatic screenshots.
Args: str, scenario. Name of current scenario.
"""
world.auto_capture_screenshots = False
@after.each_scenario
def clear_data(scenario):
world.spew('scenario_dict')
@after.each_scenario
def reset_databases(scenario):
"""
After each scenario, all databases are cleared/dropped. Contentstore data are stored in unique databases
whereas modulestore data is in unique collection names. This data is created implicitly during the scenarios.
If no data is created during the test, these lines equivilently do nothing.
"""
import xmodule.modulestore.django
xmodule.modulestore.django.modulestore()._drop_database() # pylint: disable=protected-access
xmodule.modulestore.django.clear_existing_modulestores()
_CONTENTSTORE.clear()
@world.absorb
def capture_screenshot(image_name):
"""
Capture a screenshot outputting it to a defined directory.
This function expects only the name of the file. It will generate
the full path of the output screenshot.
If the name contains spaces, they ill be converted to underscores.
"""
output_dir = '{}/log/auto_screenshots'.format(settings.TEST_ROOT)
image_name = '{}/{}.png'.format(output_dir, image_name.replace(' ', '_'))
try:
world.browser.driver.save_screenshot(image_name)
except WebDriverException:
LOGGER.error("Could not capture a screenshot '{}'".format(image_name))
@after.each_scenario
def screenshot_on_error(scenario):
"""
Save a screenshot to help with debugging.
"""
if scenario.failed:
try:
output_dir = '{}/log'.format(settings.TEST_ROOT)
image_name = '{}/{}.png'.format(output_dir, scenario.name.replace(' ', '_'))
world.browser.driver.save_screenshot(image_name)
except WebDriverException:
LOGGER.error('Could not capture a screenshot')
@after.each_scenario
def capture_console_log(scenario):
"""
Save the console log to help with debugging.
"""
if scenario.failed:
log = world.browser.driver.get_log('browser')
try:
output_dir = '{}/log'.format(settings.TEST_ROOT)
file_name = '{}/{}.log'.format(output_dir, scenario.name.replace(' ', '_'))
with open(file_name, 'w') as output_file:
for line in log:
output_file.write("{}{}".format(dumps(line), '\n'))
except WebDriverException:
LOGGER.error('Could not capture the console log')
def capture_screenshot_for_step(step, when):
"""
Useful method for debugging acceptance tests that are run in Vagrant.
This method runs automatically before and after each step of an acceptance
test scenario. The variable:
world.auto_capture_screenshots
either enables or disabled the taking of screenshots. To change the
variable there is a convenient step defined:
I (enable|disable) auto screenshots
If you just want to capture a single screenshot at a desired point in code,
you should use the method:
world.capture_screenshot("image_name")
"""
if world.auto_capture_screenshots:
scenario_num = step.scenario.feature.scenarios.index(step.scenario) + 1
step_num = step.scenario.steps.index(step) + 1
step_func_name = step.defined_at.function.func_name
image_name = "{prefix:03d}__{num:03d}__{name}__{postfix}".format(
prefix=scenario_num,
num=step_num,
name=step_func_name,
postfix=when
)
world.capture_screenshot(image_name)
@before.each_step
def before_each_step(step):
capture_screenshot_for_step(step, '1_before')
@after.each_step
def after_each_step(step):
capture_screenshot_for_step(step, '2_after')
@after.harvest
def saucelabs_status(total):
"""
Collect data for saucelabs.
"""
if world.LETTUCE_SELENIUM_CLIENT == 'saucelabs':
set_saucelabs_job_status(world.jobid, total.scenarios_ran == total.scenarios_passed)
| agpl-3.0 |
sebgoa/client-python | kubernetes/client/models/v1_node_system_info.py | 2 | 12434 | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.7.4
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1NodeSystemInfo(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, architecture=None, boot_id=None, container_runtime_version=None, kernel_version=None, kube_proxy_version=None, kubelet_version=None, machine_id=None, operating_system=None, os_image=None, system_uuid=None):
"""
V1NodeSystemInfo - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'architecture': 'str',
'boot_id': 'str',
'container_runtime_version': 'str',
'kernel_version': 'str',
'kube_proxy_version': 'str',
'kubelet_version': 'str',
'machine_id': 'str',
'operating_system': 'str',
'os_image': 'str',
'system_uuid': 'str'
}
self.attribute_map = {
'architecture': 'architecture',
'boot_id': 'bootID',
'container_runtime_version': 'containerRuntimeVersion',
'kernel_version': 'kernelVersion',
'kube_proxy_version': 'kubeProxyVersion',
'kubelet_version': 'kubeletVersion',
'machine_id': 'machineID',
'operating_system': 'operatingSystem',
'os_image': 'osImage',
'system_uuid': 'systemUUID'
}
self._architecture = architecture
self._boot_id = boot_id
self._container_runtime_version = container_runtime_version
self._kernel_version = kernel_version
self._kube_proxy_version = kube_proxy_version
self._kubelet_version = kubelet_version
self._machine_id = machine_id
self._operating_system = operating_system
self._os_image = os_image
self._system_uuid = system_uuid
@property
def architecture(self):
"""
Gets the architecture of this V1NodeSystemInfo.
The Architecture reported by the node
:return: The architecture of this V1NodeSystemInfo.
:rtype: str
"""
return self._architecture
@architecture.setter
def architecture(self, architecture):
"""
Sets the architecture of this V1NodeSystemInfo.
The Architecture reported by the node
:param architecture: The architecture of this V1NodeSystemInfo.
:type: str
"""
if architecture is None:
raise ValueError("Invalid value for `architecture`, must not be `None`")
self._architecture = architecture
@property
def boot_id(self):
"""
Gets the boot_id of this V1NodeSystemInfo.
Boot ID reported by the node.
:return: The boot_id of this V1NodeSystemInfo.
:rtype: str
"""
return self._boot_id
@boot_id.setter
def boot_id(self, boot_id):
"""
Sets the boot_id of this V1NodeSystemInfo.
Boot ID reported by the node.
:param boot_id: The boot_id of this V1NodeSystemInfo.
:type: str
"""
if boot_id is None:
raise ValueError("Invalid value for `boot_id`, must not be `None`")
self._boot_id = boot_id
@property
def container_runtime_version(self):
"""
Gets the container_runtime_version of this V1NodeSystemInfo.
ContainerRuntime Version reported by the node through runtime remote API (e.g. docker://1.5.0).
:return: The container_runtime_version of this V1NodeSystemInfo.
:rtype: str
"""
return self._container_runtime_version
@container_runtime_version.setter
def container_runtime_version(self, container_runtime_version):
"""
Sets the container_runtime_version of this V1NodeSystemInfo.
ContainerRuntime Version reported by the node through runtime remote API (e.g. docker://1.5.0).
:param container_runtime_version: The container_runtime_version of this V1NodeSystemInfo.
:type: str
"""
if container_runtime_version is None:
raise ValueError("Invalid value for `container_runtime_version`, must not be `None`")
self._container_runtime_version = container_runtime_version
@property
def kernel_version(self):
"""
Gets the kernel_version of this V1NodeSystemInfo.
Kernel Version reported by the node from 'uname -r' (e.g. 3.16.0-0.bpo.4-amd64).
:return: The kernel_version of this V1NodeSystemInfo.
:rtype: str
"""
return self._kernel_version
@kernel_version.setter
def kernel_version(self, kernel_version):
"""
Sets the kernel_version of this V1NodeSystemInfo.
Kernel Version reported by the node from 'uname -r' (e.g. 3.16.0-0.bpo.4-amd64).
:param kernel_version: The kernel_version of this V1NodeSystemInfo.
:type: str
"""
if kernel_version is None:
raise ValueError("Invalid value for `kernel_version`, must not be `None`")
self._kernel_version = kernel_version
@property
def kube_proxy_version(self):
"""
Gets the kube_proxy_version of this V1NodeSystemInfo.
KubeProxy Version reported by the node.
:return: The kube_proxy_version of this V1NodeSystemInfo.
:rtype: str
"""
return self._kube_proxy_version
@kube_proxy_version.setter
def kube_proxy_version(self, kube_proxy_version):
"""
Sets the kube_proxy_version of this V1NodeSystemInfo.
KubeProxy Version reported by the node.
:param kube_proxy_version: The kube_proxy_version of this V1NodeSystemInfo.
:type: str
"""
if kube_proxy_version is None:
raise ValueError("Invalid value for `kube_proxy_version`, must not be `None`")
self._kube_proxy_version = kube_proxy_version
@property
def kubelet_version(self):
"""
Gets the kubelet_version of this V1NodeSystemInfo.
Kubelet Version reported by the node.
:return: The kubelet_version of this V1NodeSystemInfo.
:rtype: str
"""
return self._kubelet_version
@kubelet_version.setter
def kubelet_version(self, kubelet_version):
"""
Sets the kubelet_version of this V1NodeSystemInfo.
Kubelet Version reported by the node.
:param kubelet_version: The kubelet_version of this V1NodeSystemInfo.
:type: str
"""
if kubelet_version is None:
raise ValueError("Invalid value for `kubelet_version`, must not be `None`")
self._kubelet_version = kubelet_version
@property
def machine_id(self):
"""
Gets the machine_id of this V1NodeSystemInfo.
MachineID reported by the node. For unique machine identification in the cluster this field is preferred. Learn more from man(5) machine-id: http://man7.org/linux/man-pages/man5/machine-id.5.html
:return: The machine_id of this V1NodeSystemInfo.
:rtype: str
"""
return self._machine_id
@machine_id.setter
def machine_id(self, machine_id):
"""
Sets the machine_id of this V1NodeSystemInfo.
MachineID reported by the node. For unique machine identification in the cluster this field is preferred. Learn more from man(5) machine-id: http://man7.org/linux/man-pages/man5/machine-id.5.html
:param machine_id: The machine_id of this V1NodeSystemInfo.
:type: str
"""
if machine_id is None:
raise ValueError("Invalid value for `machine_id`, must not be `None`")
self._machine_id = machine_id
@property
def operating_system(self):
"""
Gets the operating_system of this V1NodeSystemInfo.
The Operating System reported by the node
:return: The operating_system of this V1NodeSystemInfo.
:rtype: str
"""
return self._operating_system
@operating_system.setter
def operating_system(self, operating_system):
"""
Sets the operating_system of this V1NodeSystemInfo.
The Operating System reported by the node
:param operating_system: The operating_system of this V1NodeSystemInfo.
:type: str
"""
if operating_system is None:
raise ValueError("Invalid value for `operating_system`, must not be `None`")
self._operating_system = operating_system
@property
def os_image(self):
"""
Gets the os_image of this V1NodeSystemInfo.
OS Image reported by the node from /etc/os-release (e.g. Debian GNU/Linux 7 (wheezy)).
:return: The os_image of this V1NodeSystemInfo.
:rtype: str
"""
return self._os_image
@os_image.setter
def os_image(self, os_image):
"""
Sets the os_image of this V1NodeSystemInfo.
OS Image reported by the node from /etc/os-release (e.g. Debian GNU/Linux 7 (wheezy)).
:param os_image: The os_image of this V1NodeSystemInfo.
:type: str
"""
if os_image is None:
raise ValueError("Invalid value for `os_image`, must not be `None`")
self._os_image = os_image
@property
def system_uuid(self):
"""
Gets the system_uuid of this V1NodeSystemInfo.
SystemUUID reported by the node. For unique machine identification MachineID is preferred. This field is specific to Red Hat hosts https://access.redhat.com/documentation/en-US/Red_Hat_Subscription_Management/1/html/RHSM/getting-system-uuid.html
:return: The system_uuid of this V1NodeSystemInfo.
:rtype: str
"""
return self._system_uuid
@system_uuid.setter
def system_uuid(self, system_uuid):
"""
Sets the system_uuid of this V1NodeSystemInfo.
SystemUUID reported by the node. For unique machine identification MachineID is preferred. This field is specific to Red Hat hosts https://access.redhat.com/documentation/en-US/Red_Hat_Subscription_Management/1/html/RHSM/getting-system-uuid.html
:param system_uuid: The system_uuid of this V1NodeSystemInfo.
:type: str
"""
if system_uuid is None:
raise ValueError("Invalid value for `system_uuid`, must not be `None`")
self._system_uuid = system_uuid
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1NodeSystemInfo):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| apache-2.0 |
touchpro/android_kernel_lge_msm8226 | tools/perf/scripts/python/netdev-times.py | 11271 | 15048 | # Display a process of packets and processed time.
# It helps us to investigate networking or network device.
#
# options
# tx: show only tx chart
# rx: show only rx chart
# dev=: show only thing related to specified device
# debug: work with debug mode. It shows buffer status.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
all_event_list = []; # insert all tracepoint event related with this script
irq_dic = {}; # key is cpu and value is a list which stacks irqs
# which raise NET_RX softirq
net_rx_dic = {}; # key is cpu and value include time of NET_RX softirq-entry
# and a list which stacks receive
receive_hunk_list = []; # a list which include a sequence of receive events
rx_skb_list = []; # received packet list for matching
# skb_copy_datagram_iovec
buffer_budget = 65536; # the budget of rx_skb_list, tx_queue_list and
# tx_xmit_list
of_count_rx_skb_list = 0; # overflow count
tx_queue_list = []; # list of packets which pass through dev_queue_xmit
of_count_tx_queue_list = 0; # overflow count
tx_xmit_list = []; # list of packets which pass through dev_hard_start_xmit
of_count_tx_xmit_list = 0; # overflow count
tx_free_list = []; # list of packets which is freed
# options
show_tx = 0;
show_rx = 0;
dev = 0; # store a name of device specified by option "dev="
debug = 0;
# indices of event_info tuple
EINFO_IDX_NAME= 0
EINFO_IDX_CONTEXT=1
EINFO_IDX_CPU= 2
EINFO_IDX_TIME= 3
EINFO_IDX_PID= 4
EINFO_IDX_COMM= 5
# Calculate a time interval(msec) from src(nsec) to dst(nsec)
def diff_msec(src, dst):
return (dst - src) / 1000000.0
# Display a process of transmitting a packet
def print_transmit(hunk):
if dev != 0 and hunk['dev'].find(dev) < 0:
return
print "%7s %5d %6d.%06dsec %12.3fmsec %12.3fmsec" % \
(hunk['dev'], hunk['len'],
nsecs_secs(hunk['queue_t']),
nsecs_nsecs(hunk['queue_t'])/1000,
diff_msec(hunk['queue_t'], hunk['xmit_t']),
diff_msec(hunk['xmit_t'], hunk['free_t']))
# Format for displaying rx packet processing
PF_IRQ_ENTRY= " irq_entry(+%.3fmsec irq=%d:%s)"
PF_SOFT_ENTRY=" softirq_entry(+%.3fmsec)"
PF_NAPI_POLL= " napi_poll_exit(+%.3fmsec %s)"
PF_JOINT= " |"
PF_WJOINT= " | |"
PF_NET_RECV= " |---netif_receive_skb(+%.3fmsec skb=%x len=%d)"
PF_NET_RX= " |---netif_rx(+%.3fmsec skb=%x)"
PF_CPY_DGRAM= " | skb_copy_datagram_iovec(+%.3fmsec %d:%s)"
PF_KFREE_SKB= " | kfree_skb(+%.3fmsec location=%x)"
PF_CONS_SKB= " | consume_skb(+%.3fmsec)"
# Display a process of received packets and interrputs associated with
# a NET_RX softirq
def print_receive(hunk):
show_hunk = 0
irq_list = hunk['irq_list']
cpu = irq_list[0]['cpu']
base_t = irq_list[0]['irq_ent_t']
# check if this hunk should be showed
if dev != 0:
for i in range(len(irq_list)):
if irq_list[i]['name'].find(dev) >= 0:
show_hunk = 1
break
else:
show_hunk = 1
if show_hunk == 0:
return
print "%d.%06dsec cpu=%d" % \
(nsecs_secs(base_t), nsecs_nsecs(base_t)/1000, cpu)
for i in range(len(irq_list)):
print PF_IRQ_ENTRY % \
(diff_msec(base_t, irq_list[i]['irq_ent_t']),
irq_list[i]['irq'], irq_list[i]['name'])
print PF_JOINT
irq_event_list = irq_list[i]['event_list']
for j in range(len(irq_event_list)):
irq_event = irq_event_list[j]
if irq_event['event'] == 'netif_rx':
print PF_NET_RX % \
(diff_msec(base_t, irq_event['time']),
irq_event['skbaddr'])
print PF_JOINT
print PF_SOFT_ENTRY % \
diff_msec(base_t, hunk['sirq_ent_t'])
print PF_JOINT
event_list = hunk['event_list']
for i in range(len(event_list)):
event = event_list[i]
if event['event_name'] == 'napi_poll':
print PF_NAPI_POLL % \
(diff_msec(base_t, event['event_t']), event['dev'])
if i == len(event_list) - 1:
print ""
else:
print PF_JOINT
else:
print PF_NET_RECV % \
(diff_msec(base_t, event['event_t']), event['skbaddr'],
event['len'])
if 'comm' in event.keys():
print PF_WJOINT
print PF_CPY_DGRAM % \
(diff_msec(base_t, event['comm_t']),
event['pid'], event['comm'])
elif 'handle' in event.keys():
print PF_WJOINT
if event['handle'] == "kfree_skb":
print PF_KFREE_SKB % \
(diff_msec(base_t,
event['comm_t']),
event['location'])
elif event['handle'] == "consume_skb":
print PF_CONS_SKB % \
diff_msec(base_t,
event['comm_t'])
print PF_JOINT
def trace_begin():
global show_tx
global show_rx
global dev
global debug
for i in range(len(sys.argv)):
if i == 0:
continue
arg = sys.argv[i]
if arg == 'tx':
show_tx = 1
elif arg =='rx':
show_rx = 1
elif arg.find('dev=',0, 4) >= 0:
dev = arg[4:]
elif arg == 'debug':
debug = 1
if show_tx == 0 and show_rx == 0:
show_tx = 1
show_rx = 1
def trace_end():
# order all events in time
all_event_list.sort(lambda a,b :cmp(a[EINFO_IDX_TIME],
b[EINFO_IDX_TIME]))
# process all events
for i in range(len(all_event_list)):
event_info = all_event_list[i]
name = event_info[EINFO_IDX_NAME]
if name == 'irq__softirq_exit':
handle_irq_softirq_exit(event_info)
elif name == 'irq__softirq_entry':
handle_irq_softirq_entry(event_info)
elif name == 'irq__softirq_raise':
handle_irq_softirq_raise(event_info)
elif name == 'irq__irq_handler_entry':
handle_irq_handler_entry(event_info)
elif name == 'irq__irq_handler_exit':
handle_irq_handler_exit(event_info)
elif name == 'napi__napi_poll':
handle_napi_poll(event_info)
elif name == 'net__netif_receive_skb':
handle_netif_receive_skb(event_info)
elif name == 'net__netif_rx':
handle_netif_rx(event_info)
elif name == 'skb__skb_copy_datagram_iovec':
handle_skb_copy_datagram_iovec(event_info)
elif name == 'net__net_dev_queue':
handle_net_dev_queue(event_info)
elif name == 'net__net_dev_xmit':
handle_net_dev_xmit(event_info)
elif name == 'skb__kfree_skb':
handle_kfree_skb(event_info)
elif name == 'skb__consume_skb':
handle_consume_skb(event_info)
# display receive hunks
if show_rx:
for i in range(len(receive_hunk_list)):
print_receive(receive_hunk_list[i])
# display transmit hunks
if show_tx:
print " dev len Qdisc " \
" netdevice free"
for i in range(len(tx_free_list)):
print_transmit(tx_free_list[i])
if debug:
print "debug buffer status"
print "----------------------------"
print "xmit Qdisc:remain:%d overflow:%d" % \
(len(tx_queue_list), of_count_tx_queue_list)
print "xmit netdevice:remain:%d overflow:%d" % \
(len(tx_xmit_list), of_count_tx_xmit_list)
print "receive:remain:%d overflow:%d" % \
(len(rx_skb_list), of_count_rx_skb_list)
# called from perf, when it finds a correspoinding event
def irq__softirq_entry(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_exit(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_raise(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__irq_handler_entry(name, context, cpu, sec, nsec, pid, comm,
irq, irq_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
irq, irq_name)
all_event_list.append(event_info)
def irq__irq_handler_exit(name, context, cpu, sec, nsec, pid, comm, irq, ret):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, irq, ret)
all_event_list.append(event_info)
def napi__napi_poll(name, context, cpu, sec, nsec, pid, comm, napi, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
napi, dev_name)
all_event_list.append(event_info)
def net__netif_receive_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__netif_rx(name, context, cpu, sec, nsec, pid, comm, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_queue(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_xmit(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen, rc, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, rc ,dev_name)
all_event_list.append(event_info)
def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm,
skbaddr, protocol, location):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, protocol, location)
all_event_list.append(event_info)
def skb__consume_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr)
all_event_list.append(event_info)
def skb__skb_copy_datagram_iovec(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen)
all_event_list.append(event_info)
def handle_irq_handler_entry(event_info):
(name, context, cpu, time, pid, comm, irq, irq_name) = event_info
if cpu not in irq_dic.keys():
irq_dic[cpu] = []
irq_record = {'irq':irq, 'name':irq_name, 'cpu':cpu, 'irq_ent_t':time}
irq_dic[cpu].append(irq_record)
def handle_irq_handler_exit(event_info):
(name, context, cpu, time, pid, comm, irq, ret) = event_info
if cpu not in irq_dic.keys():
return
irq_record = irq_dic[cpu].pop()
if irq != irq_record['irq']:
return
irq_record.update({'irq_ext_t':time})
# if an irq doesn't include NET_RX softirq, drop.
if 'event_list' in irq_record.keys():
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_raise(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'sirq_raise'})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_entry(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
net_rx_dic[cpu] = {'sirq_ent_t':time, 'event_list':[]}
def handle_irq_softirq_exit(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
irq_list = []
event_list = 0
if cpu in irq_dic.keys():
irq_list = irq_dic[cpu]
del irq_dic[cpu]
if cpu in net_rx_dic.keys():
sirq_ent_t = net_rx_dic[cpu]['sirq_ent_t']
event_list = net_rx_dic[cpu]['event_list']
del net_rx_dic[cpu]
if irq_list == [] or event_list == 0:
return
rec_data = {'sirq_ent_t':sirq_ent_t, 'sirq_ext_t':time,
'irq_list':irq_list, 'event_list':event_list}
# merge information realted to a NET_RX softirq
receive_hunk_list.append(rec_data)
def handle_napi_poll(event_info):
(name, context, cpu, time, pid, comm, napi, dev_name) = event_info
if cpu in net_rx_dic.keys():
event_list = net_rx_dic[cpu]['event_list']
rec_data = {'event_name':'napi_poll',
'dev':dev_name, 'event_t':time}
event_list.append(rec_data)
def handle_netif_rx(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'netif_rx',
'skbaddr':skbaddr, 'skblen':skblen, 'dev_name':dev_name})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_netif_receive_skb(event_info):
global of_count_rx_skb_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu in net_rx_dic.keys():
rec_data = {'event_name':'netif_receive_skb',
'event_t':time, 'skbaddr':skbaddr, 'len':skblen}
event_list = net_rx_dic[cpu]['event_list']
event_list.append(rec_data)
rx_skb_list.insert(0, rec_data)
if len(rx_skb_list) > buffer_budget:
rx_skb_list.pop()
of_count_rx_skb_list += 1
def handle_net_dev_queue(event_info):
global of_count_tx_queue_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
skb = {'dev':dev_name, 'skbaddr':skbaddr, 'len':skblen, 'queue_t':time}
tx_queue_list.insert(0, skb)
if len(tx_queue_list) > buffer_budget:
tx_queue_list.pop()
of_count_tx_queue_list += 1
def handle_net_dev_xmit(event_info):
global of_count_tx_xmit_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, rc, dev_name) = event_info
if rc == 0: # NETDEV_TX_OK
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
skb['xmit_t'] = time
tx_xmit_list.insert(0, skb)
del tx_queue_list[i]
if len(tx_xmit_list) > buffer_budget:
tx_xmit_list.pop()
of_count_tx_xmit_list += 1
return
def handle_kfree_skb(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, protocol, location) = event_info
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
del tx_queue_list[i]
return
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if rec_data['skbaddr'] == skbaddr:
rec_data.update({'handle':"kfree_skb",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
def handle_consume_skb(event_info):
(name, context, cpu, time, pid, comm, skbaddr) = event_info
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
def handle_skb_copy_datagram_iovec(event_info):
(name, context, cpu, time, pid, comm, skbaddr, skblen) = event_info
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if skbaddr == rec_data['skbaddr']:
rec_data.update({'handle':"skb_copy_datagram_iovec",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
| gpl-2.0 |
PhyloStar/PyBayes | params_moves.py | 1 | 1027 | import numpy as np
from scipy.stats import dirichlet
import random, math
dir_alpha = 100.0
scaler_alpha = 1.25
epsilon = 1e-10
def mvDirichlet(pi):
pi_new = dirichlet.rvs(dir_alpha*pi)[0]
#print(pi, pi_new)
hastings_ratio = dirichlet.logpdf(pi, pi_new) - dirichlet.logpdf(pi_new, pi)
return pi_new, hastings_ratio
def mvDualSlider(pi):
i, j = random.sample(range(pi.shape[0]),2 )
sum_ij = pi[i]+pi[j]
x = random.uniform(epsilon, sum_ij)
y = sum_ij -x
pi[i], pi[j] = x, y
return pi, 0.0
def mvScaler(x):
log_c = scaler_alpha*(np.random.uniform()-0.5)
c = math.exp(log_c)
x_new = x*c
return x_new, log_c
def mvVecScaler(X):
log_c = scaler_alpha*(np.random.uniform()-0.5)
c = math.exp(log_c)
X_new = X*c
return X_new, log_c
def mvSlider(x, a, b):
""" a and b are bounds
"""
x_hat = np.random.uniform(x-0.5, x+0.5)
if x_hat < a:
return 2.0*a -x_hat
elif xhat > b:
return 2.0*b -x_hat
else:
return x_hat
| gpl-2.0 |
gangadhar-kadam/verve_test_erp | erpnext/patches/v4_0/apply_user_permissions.py | 87 | 1999 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from erpnext.hr.doctype.employee.employee import EmployeeUserDisabledError
def execute():
update_hr_permissions()
update_permissions()
remove_duplicate_user_permissions()
frappe.clear_cache()
def update_hr_permissions():
from frappe.core.page.user_permissions import user_permissions
# add set user permissions rights to HR Manager
frappe.db.sql("""update `tabDocPerm` set `set_user_permissions`=1 where parent in ('Employee', 'Leave Application')
and role='HR Manager' and permlevel=0 and `read`=1""")
# apply user permissions on Employee and Leave Application
frappe.db.sql("""update `tabDocPerm` set `apply_user_permissions`=1 where parent in ('Employee', 'Leave Application')
and role in ('Employee', 'Leave Approver') and permlevel=0 and `read`=1""")
frappe.clear_cache()
# save employees to run on_update events
for employee in frappe.db.sql_list("""select name from `tabEmployee` where docstatus < 2"""):
try:
emp = frappe.get_doc("Employee", employee)
emp.flags.ignore_mandatory = True
emp.save()
except EmployeeUserDisabledError:
pass
def update_permissions():
# clear match conditions other than owner
frappe.db.sql("""update tabDocPerm set `match`=''
where ifnull(`match`,'') not in ('', 'owner')""")
def remove_duplicate_user_permissions():
# remove duplicate user_permissions (if they exist)
for d in frappe.db.sql("""select parent, defkey, defvalue,
count(*) as cnt from tabDefaultValue
where parent not in ('__global', '__default')
group by parent, defkey, defvalue""", as_dict=1):
if d.cnt > 1:
# order by parenttype so that user permission does not get removed!
frappe.db.sql("""delete from tabDefaultValue where `parent`=%s and `defkey`=%s and
`defvalue`=%s order by parenttype limit %s""", (d.parent, d.defkey, d.defvalue, d.cnt-1))
| agpl-3.0 |
tino1b2be/LARMAS | suit/widgets.py | 10 | 4988 | from django.contrib.admin.widgets import AdminTimeWidget, AdminDateWidget
from django.forms import TextInput, Select, Textarea
from django.utils.safestring import mark_safe
from django import forms
from django.utils.translation import ugettext as _
from django.contrib.admin.templatetags.admin_static import static
class NumberInput(TextInput):
"""
HTML5 Number input
Left for backwards compatibility
"""
input_type = 'number'
class HTML5Input(TextInput):
"""
Supports any HTML5 input
http://www.w3schools.com/html/html5_form_input_types.asp
"""
def __init__(self, attrs=None, input_type=None):
self.input_type = input_type
super(HTML5Input, self).__init__(attrs)
#
class LinkedSelect(Select):
"""
Linked select - Adds link to foreign item, when used with foreign key field
"""
def __init__(self, attrs=None, choices=()):
attrs = _make_attrs(attrs, classes="linked-select")
super(LinkedSelect, self).__init__(attrs, choices)
class EnclosedInput(TextInput):
"""
Widget for bootstrap appended/prepended inputs
"""
def __init__(self, attrs=None, prepend=None, append=None):
"""
For prepend, append parameters use string like %, $ or html
"""
self.prepend = prepend
self.append = append
super(EnclosedInput, self).__init__(attrs=attrs)
def enclose_value(self, value):
"""
If value doesn't starts with html open sign "<", enclose in add-on tag
"""
if value.startswith("<"):
return value
if value.startswith("icon-"):
value = '<i class="%s"></i>' % value
return '<span class="add-on">%s</span>' % value
def render(self, name, value, attrs=None):
output = super(EnclosedInput, self).render(name, value, attrs)
div_classes = []
if self.prepend:
div_classes.append('input-prepend')
self.prepend = self.enclose_value(self.prepend)
output = ''.join((self.prepend, output))
if self.append:
div_classes.append('input-append')
self.append = self.enclose_value(self.append)
output = ''.join((output, self.append))
return mark_safe(
'<div class="%s">%s</div>' % (' '.join(div_classes), output))
class AutosizedTextarea(Textarea):
"""
Autosized Textarea - textarea height dynamically grows based on user input
"""
def __init__(self, attrs=None):
new_attrs = _make_attrs(attrs, {"rows": 2}, "autosize")
super(AutosizedTextarea, self).__init__(new_attrs)
@property
def media(self):
return forms.Media(js=[static("suit/js/jquery.autosize-min.js")])
def render(self, name, value, attrs=None):
output = super(AutosizedTextarea, self).render(name, value, attrs)
output += mark_safe(
"<script type=\"text/javascript\">Suit.$('#id_%s').autosize();</script>"
% name)
return output
#
# Original date widgets with addition html
#
class SuitDateWidget(AdminDateWidget):
def __init__(self, attrs=None, format=None):
defaults = {'placeholder': _('Date:')[:-1]}
new_attrs = _make_attrs(attrs, defaults, "vDateField input-small")
super(SuitDateWidget, self).__init__(attrs=new_attrs, format=format)
def render(self, name, value, attrs=None):
output = super(SuitDateWidget, self).render(name, value, attrs)
return mark_safe(
'<div class="input-append suit-date">%s<span '
'class="add-on"><i class="icon-calendar"></i></span></div>' %
output)
class SuitTimeWidget(AdminTimeWidget):
def __init__(self, attrs=None, format=None):
defaults = {'placeholder': _('Time:')[:-1]}
new_attrs = _make_attrs(attrs, defaults, "vTimeField input-small")
super(SuitTimeWidget, self).__init__(attrs=new_attrs, format=format)
def render(self, name, value, attrs=None):
output = super(SuitTimeWidget, self).render(name, value, attrs)
return mark_safe(
'<div class="input-append suit-date suit-time">%s<span '
'class="add-on"><i class="icon-time"></i></span></div>' %
output)
class SuitSplitDateTimeWidget(forms.SplitDateTimeWidget):
"""
A SplitDateTime Widget that has some admin-specific styling.
"""
def __init__(self, attrs=None):
widgets = [SuitDateWidget, SuitTimeWidget]
forms.MultiWidget.__init__(self, widgets, attrs)
def format_output(self, rendered_widgets):
out_tpl = '<div class="datetime">%s %s</div>'
return mark_safe(out_tpl % (rendered_widgets[0], rendered_widgets[1]))
def _make_attrs(attrs, defaults=None, classes=None):
result = defaults.copy() if defaults else {}
if attrs:
result.update(attrs)
if classes:
result["class"] = " ".join((classes, result.get("class", "")))
return result
| agpl-3.0 |
offbye/paparazzi | sw/tools/calibration/calibrate_gyro.py | 87 | 4686 | #! /usr/bin/env python
# Copyright (C) 2010 Antoine Drouin
#
# This file is part of Paparazzi.
#
# Paparazzi is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# Paparazzi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Paparazzi; see the file COPYING. If not, write to
# the Free Software Foundation, 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
#
#
# calibrate gyrometers using turntable measurements
#
from __future__ import print_function, division
from optparse import OptionParser
import os
import sys
from scipy import linspace, polyval, stats
import matplotlib.pyplot as plt
import calibration_utils
#
# lisa 3
# p : a=-4511.16 b=31948.34, std error= 0.603
# q : a=-4598.46 b=31834.48, std error= 0.734
# r : a=-4525.63 b=32687.95, std error= 0.624
#
# lisa 4
# p : a=-4492.05 b=32684.94, std error= 0.600
# q : a=-4369.63 b=33260.96, std error= 0.710
# r : a=-4577.13 b=32707.72, std error= 0.730
#
# crista
# p : a= 3864.82 b=31288.09, std error= 0.866
# q : a= 3793.71 b=32593.89, std error= 3.070
# r : a= 3817.11 b=32709.70, std error= 3.296
#
def main():
usage = "usage: %prog --id <ac_id> --tt_id <tt_id> --axis <axis> [options] log_filename.data" + "\n" + "Run %prog --help to list the options."
parser = OptionParser(usage)
parser.add_option("-i", "--id", dest="ac_id",
action="store", type=int, default=-1,
help="aircraft id to use")
parser.add_option("-t", "--tt_id", dest="tt_id",
action="store", type=int, default=-1,
help="turntable id to use")
parser.add_option("-a", "--axis", dest="axis",
type="choice", choices=['p', 'q', 'r'],
help="axis to calibrate (p, q, r)",
action="store")
parser.add_option("-v", "--verbose",
action="store_true", dest="verbose")
(options, args) = parser.parse_args()
if len(args) != 1:
parser.error("incorrect number of arguments")
else:
if os.path.isfile(args[0]):
filename = args[0]
else:
print(args[0] + " not found")
sys.exit(1)
if not filename.endswith(".data"):
parser.error("Please specify a *.data log file")
if options.ac_id < 0 or options.ac_id > 255:
parser.error("Specify a valid aircraft id number!")
if options.tt_id < 0 or options.tt_id > 255:
parser.error("Specify a valid turntable id number!")
if options.verbose:
print("reading file "+filename+" for aircraft "+str(options.ac_id)+" and turntable "+str(options.tt_id))
samples = calibration_utils.read_turntable_log(options.ac_id, options.tt_id, filename, 1, 7)
if len(samples) == 0:
print("Error: found zero matching messages in log file!")
print("Was looking for IMU_TURNTABLE from id: "+str(options.tt_id)+" and IMU_GYRO_RAW from id: "+str(options.ac_id)+" in file "+filename)
sys.exit(1)
if options.verbose:
print("found "+str(len(samples))+" records")
if options.axis == 'p':
axis_idx = 1
elif options.axis == 'q':
axis_idx = 2
elif options.axis == 'r':
axis_idx = 3
else:
parser.error("Specify a valid axis!")
#Linear regression using stats.linregress
t = samples[:, 0]
xn = samples[:, axis_idx]
(a_s, b_s, r, tt, stderr) = stats.linregress(t, xn)
print('Linear regression using stats.linregress')
print(('regression: a=%.2f b=%.2f, std error= %.3f' % (a_s, b_s, stderr)))
print(('<define name="GYRO_X_NEUTRAL" value="%d"/>' % (b_s)))
print(('<define name="GYRO_X_SENS" value="%f" integer="16"/>' % (pow(2, 12)/a_s)))
#
# overlay fited value
#
ovl_omega = linspace(1, 7.5, 10)
ovl_adc = polyval([a_s, b_s], ovl_omega)
plt.title('Linear Regression Example')
plt.subplot(3, 1, 1)
plt.plot(samples[:, 1])
plt.plot(samples[:, 2])
plt.plot(samples[:, 3])
plt.legend(['p', 'q', 'r'])
plt.subplot(3, 1, 2)
plt.plot(samples[:, 0])
plt.subplot(3, 1, 3)
plt.plot(samples[:, 0], samples[:, axis_idx], 'b.')
plt.plot(ovl_omega, ovl_adc, 'r')
plt.show()
if __name__ == "__main__":
main()
| gpl-2.0 |
spektom/incubator-airflow | tests/dags/subdir2/test_dont_ignore_this.py | 5 | 1147 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from datetime import datetime
from airflow.models import DAG
from airflow.operators.bash import BashOperator
DEFAULT_DATE = datetime(2019, 12, 1)
dag = DAG(dag_id='test_dag_under_subdir2', start_date=DEFAULT_DATE, schedule_interval=None)
task = BashOperator(
task_id='task1',
bash_command='echo "test dag under sub directory subdir2"',
dag=dag)
| apache-2.0 |
hakuna-m/wubiuefi | src/pypack/altgraph/Dot.py | 9 | 8425 | '''
Interface to the dot language
=============================
The B{Dot} module provides a simple interface to the
file format used in the U{graphviz<http://www.research.att.com/sw/tools/graphviz/>}
program. The module is intended to offload the most tedious part of the process
(the B{dot} file generation) while transparently exposing most of its features.
To display the graphs or to generate image files the U{graphviz<http://www.research.att.com/sw/tools/graphviz/>}
package needs to be installed on the system, moreover the C{dot} and C{dotty} programs must
be accesible in the program path so that they can be ran from processes spawned
within the module. See the L{Dot} documentation for further information on the setup.
Example usage
-------------
Here is a typical usage::
from altgraph import Graph, Dot
# create a graph
edges = [ (1,2), (1,3), (3,4), (3,5), (4,5), (5,4) ]
graph = Graph.Graph(edges)
# create a dot representation of the graph
dot = Dot.Dot(graph)
# display the graph
dot.display()
# save the dot representation into the mydot.dot file
dot.save_dot(file_name='mydot.dot')
# save dot file as gif image into the graph.gif file
dot.save_img(file_name='graph', file_type='gif')
Customizing the output
----------------------
The graph drawing process may be customized by passing
valid B{dot} parameters for the nodes and edges. For a list of all
parameters see the U{graphviz<http://www.research.att.com/sw/tools/graphviz/>}
documentation.
Example::
# customizing the way the overall graph is drawn
dot.style(size='10,10', rankdir='RL', page='5, 5' , ranksep=0.75)
# customizing node drawing
dot.node_style(1, label='BASE_NODE',shape='box', color='blue' )
dot.node_style(2, style='filled', fillcolor='red')
# customizing edge drawing
dot.edge_style(1, 2, style='dotted')
dot.edge_style(3, 5, arrowhead='dot', label='binds', labelangle='90')
dot.edge_style(4, 5, arrowsize=2, style='bold')
B{Observation}: dotty (invoked via L{Dot.display}) may not be able to
display all graphics styles. To verify the output save it to an image file
and look at it that way.
Valid attributes
----------------
- dot styles, passed via the L{Dot.style} method::
rankdir = 'LR' (draws the graph horizontally, left to right)
ranksep = number (rank separation in inches)
- node attributes, passed via the L{Dot.node_style} method::
style = 'filled' | 'invisible' | 'diagonals' | 'rounded'
shape = 'box' | 'ellipse' | 'circle' | 'point' | 'triangle'
- edge attributes, passed via the L{Dot.edge_style} method::
style = 'dashed' | 'dotted' | 'solid' | 'invis' | 'bold'
arrowhead = 'box' | 'crow' | 'diamond' | 'dot' | 'inv' | 'none' | 'tee' | 'vee'
weight = number (the larger the number the closer the nodes will be)
- valid U{graphviz colors<http://www.research.att.com/~erg/graphviz/info/colors.html>}
- for more details on how to control the graph drawing process see the
U{graphviz reference <http://www.research.att.com/sw/tools/graphviz/refs.html>}.
'''
import os
from altgraph import GraphError
from altgraph.compat import *
class Dot(object):
'''
A class providing a B{graphviz} (dot language) representation
allowing a fine grained control over how the graph is being
displayed.
If the C{dot} and C{dotty} programs are not in the current system path
their location needs to be specified in the L{constructor<__init__>}.
For detailed example usage see the L{Dot} module documentation.
'''
def __init__(self, graph=None, nodes=None, edgefn=None, nodevisitor=None, edgevisitor=None, name="G", dot='dot', dotty='dotty', neato='neato'):
'''
Initialization.
'''
self.name, self.attr = name, {}
self.temp_dot = "tmp_dot.dot"
self.temp_neo = "tmp_neo.dot"
self.dot, self.dotty, self.neato = dot, dotty, neato
self.nodes, self.edges = {}, {}
if graph is not None and nodes is None:
nodes = graph
if graph is not None and edgefn is None:
def edgefn(node, graph=graph):
return imap(graph.tail, graph.out_edges(node))
if nodes is None:
nodes = ()
seen = set()
for node in nodes:
if nodevisitor is None:
style = {}
else:
style = nodevisitor(node)
if style is not None:
self.node_style(node, **style)
seen.add(node)
if edgefn is not None:
for head in seen:
for tail in ifilter(seen.__contains__, edgefn(head)):
if edgevisitor is None:
edgestyle = {}
else:
edgestyle = edgevisitor(head, tail)
if edgestyle is not None:
self.edge_style(head, tail, **edgestyle)
def style(self, **attr):
'''
Changes the overall style
'''
self.attr = attr
def display(self, mode='dot'):
'''
Displays the current graph via dotty
'''
if mode == 'neato':
self.save_dot(self.temp_neo)
neato_cmd = "%s -o %s %s" % (self.neato, self.temp_dot, self.temp_neo)
os.system(neato_cmd)
else:
self.save_dot(self.temp_dot)
plot_cmd = "%s %s" % (self.dotty, self.temp_dot)
os.system(plot_cmd)
def node_style(self, node, **kwargs):
'''
Modifies a node style to the dot representation.
'''
if node not in self.edges:
self.edges[node] = {}
self.nodes[node] = kwargs
def all_node_style(self, **kwargs):
'''
Modifies all node styles
'''
for node in self.nodes:
self.node_style(node, **kwargs)
def edge_style(self, head, tail, **kwargs):
'''
Modifies an edge style to the dot representation.
'''
try:
if tail not in self.edges[head]:
self.edges[head][tail]= {}
self.edges[head][tail] = kwargs
except KeyError:
raise GraphError("invalid edge %s -> %s " % (head, tail) )
def iterdot(self):
# write graph title
yield 'digraph %s {\n' % (self.name,)
# write overall graph attributes
for attr_name, attr_value in self.attr.iteritems():
yield '%s="%s";' % (attr_name, attr_value)
yield '\n'
# some reusable patterns
cpatt = '%s="%s",' # to separate attributes
epatt = '];\n' # to end attributes
# write node attributes
for node_name, node_attr in self.nodes.iteritems():
yield '\t"%s" [' % (node_name,)
for attr_name, attr_value in node_attr.iteritems():
yield cpatt % (attr_name, attr_value)
yield epatt
# write edge attributes
for head in self.edges:
for tail in self.edges[head]:
yield '\t"%s" -> "%s" [' % (head, tail)
for attr_name, attr_value in self.edges[head][tail].iteritems():
yield cpatt % (attr_name, attr_value)
yield epatt
# finish file
yield '}\n'
def __iter__(self):
return self.iterdot()
def save_dot(self, file_name=None):
'''
Saves the current graph representation into a file
'''
if not file_name:
file_name = self.temp_dot
fp = open(file_name, "w")
write = fp.write
for chunk in self.iterdot():
write(chunk)
fp.close()
def save_img(self, file_name="out", file_type="gif", mode='dot'):
'''
Saves the dot file as an image file
'''
if mode == 'neato':
self.save_dot(self.temp_neo)
neato_cmd = "%s -o %s %s" % (self.neato, self.temp_dot, self.temp_neo)
os.system(neato_cmd)
plot_cmd = self.neato
else:
self.save_dot(self.temp_dot)
plot_cmd = self.dot
file_name = "%s.%s" % (file_name, file_type)
create_cmd = "%s -T%s %s -o %s" % (plot_cmd, file_type, self.temp_dot, file_name)
os.system(create_cmd)
| gpl-2.0 |
propublica/Capitol-Words | capitolweb/scraper/management/commands/run_crec_scraper.py | 2 | 1705 | import logging
from datetime import datetime
from datetime import timedelta
from django.conf import settings
from django.core.management.base import BaseCommand
from scraper.models import CRECScraperResult
from scraper.crec_scraper import CRECScraper
class Command(BaseCommand):
help = 'Scrapes CREC from gpo.gov for the given date range and stores in s3.'
def add_arguments(self, parser):
parser.add_argument(
'--debug',
help='If true, will not upload to es and instead print to stdout.',
default=False,
)
parser.add_argument(
'--start_date',
help='Start of date range to pull data for, inclusive.',
type=lambda d: datetime.strptime(d, '%Y-%m-%d'),
)
parser.add_argument(
'--end_date',
help='End of date range to pull data for, exclusive.',
type=lambda d: datetime.strptime(d, '%Y-%m-%d'),
default=None,
)
parser.add_argument(
'--s3_bucket',
help='Location of crec data.',
default=settings.CREC_STAGING_S3_BUCKET
)
def handle(self, *args, **options):
start_date = options['start_date']
if options['end_date'] is None:
end_date = datetime.utcnow()
else:
end_date = options['end_date']
start_date.replace(hour=0, minute=0, second=0, microsecond=0)
end_date.replace(hour=0, minute=0, second=0, microsecond=0)
scraper = CRECScraper(options['s3_bucket'])
while start_date < end_date:
result = scraper.scrape_files_for_date(start_date)
start_date += timedelta(days=1)
| bsd-3-clause |
Vixionar/django | django/contrib/gis/db/backends/oracle/introspection.py | 539 | 1977 | import sys
import cx_Oracle
from django.db.backends.oracle.introspection import DatabaseIntrospection
from django.utils import six
class OracleIntrospection(DatabaseIntrospection):
# Associating any OBJECTVAR instances with GeometryField. Of course,
# this won't work right on Oracle objects that aren't MDSYS.SDO_GEOMETRY,
# but it is the only object type supported within Django anyways.
data_types_reverse = DatabaseIntrospection.data_types_reverse.copy()
data_types_reverse[cx_Oracle.OBJECT] = 'GeometryField'
def get_geometry_type(self, table_name, geo_col):
cursor = self.connection.cursor()
try:
# Querying USER_SDO_GEOM_METADATA to get the SRID and dimension information.
try:
cursor.execute(
'SELECT "DIMINFO", "SRID" FROM "USER_SDO_GEOM_METADATA" '
'WHERE "TABLE_NAME"=%s AND "COLUMN_NAME"=%s',
(table_name.upper(), geo_col.upper())
)
row = cursor.fetchone()
except Exception as msg:
new_msg = (
'Could not find entry in USER_SDO_GEOM_METADATA '
'corresponding to "%s"."%s"\n'
'Error message: %s.') % (table_name, geo_col, msg)
six.reraise(Exception, Exception(new_msg), sys.exc_info()[2])
# TODO: Research way to find a more specific geometry field type for
# the column's contents.
field_type = 'GeometryField'
# Getting the field parameters.
field_params = {}
dim, srid = row
if srid != 4326:
field_params['srid'] = srid
# Length of object array ( SDO_DIM_ARRAY ) is number of dimensions.
dim = len(dim)
if dim != 2:
field_params['dim'] = dim
finally:
cursor.close()
return field_type, field_params
| bsd-3-clause |
The-WebOps-Club/odia-forum | pybbm_tag/views.py | 1 | 2818 | from django.shortcuts import render
from pybb.models import *
from pybbm_tag.models import Tag
from pybb.views import ForumView,AddPostView,EditPostView,TopicView
def add_tag(request,**kwargs):
# check permissions before calling this function
# in kwargs we expect the LABEL of the tag to add(not object) and the TOPIC object(not name).
topic = kwargs['topic']
tagname = kwargs['tag']
lst = Tag.objects.filter(label = tagname)
if not lst.count() == 0:
lst[0].topics.add(topic)
lst[0].save()
else:
tag = Tag(label = tagname,desc="Empty")
tag.save()
tag.topics.add(topic)
def remove_all_tags(request,**kwargs):
topic = kwargs['topic']
for i in Tag.objects.filter(topics__in = [topic]):
i.topics.remove(topic)
def remove_tag(request,**kwargs):
# check permissions before calling this function.
topic = kwargs['topic']
tagname = kwargs['tag']
lst = Tag.objects.filter(label = tagname)
lst[0].topics.remove(topic)
# tag additions to the views that are affected by tags.
class AddPostViewWrapper(AddPostView):
def post(self, request, *args, **kwargs):
try:
ret = super(AddPostViewWrapper, self).post(request, *args, **kwargs)
taglist = request.POST['taglist'].split('+')
#import pdb;pdb.set_trace()
for i in taglist:
add_tag(request, topic=self.object.topic, tag=i)
except KeyError:
pass
return ret
def get_context_data(self,**kwargs):
ctx = super(AddPostViewWrapper, self).get_context_data(**kwargs)
if ctx['forum']:
ctx['taglist_input'] = 1
return ctx
class ForumViewWrapper(ForumView):
def get_context_data(self):
ctx = super(ForumViewWrapper, self).get_context_data()
topic_list = ctx['topic_list']
tags = []
for i in topic_list:
tags.append(Tag.objects.filter(topics__in = [i]))
ctx['tags'] = Tag.objects.all()
return ctx
class TopicViewWrapper(TopicView):
def get_context_data(self):
ctx = super(TopicViewWrapper, self).get_context_data()
ctx['tags'] = Tag.objects.all()
return ctx
class EditPostViewWrapper(EditPostView):
def post(self, request, *args, **kwargs):
ret = super(EditPostViewWrapper, self).post(request, *args, **kwargs)
try:
taglist = request.POST['taglist'].split('+')
remove_all_tags(request, topic=self.object.topic)
for i in taglist:
add_tag(request, topic=self.object.topic, tag=i)
except KeyError:
pass
return ret
def make_tag_string(self,topic):
str = ""
for i in Tag.objects.filter(topics__in = [topic]):
str+=(i.label+"+")
if len(str) > 0:
str = str[:-1]
return str
def get_context_data(self, **kwargs):
ctx = super(EditPostViewWrapper, self).get_context_data(**kwargs)
post = ctx['post']
if post.topic.user == self.request.user:
ctx['taglist_input'] = 1
ctx['taglist_initial'] = self.make_tag_string(post.topic)
return ctx | gpl-2.0 |
Gaia3D/QGIS | python/ext-libs/owslib/wms.py | 28 | 23814 | # -*- coding: ISO-8859-15 -*-
# =============================================================================
# Copyright (c) 2004, 2006 Sean C. Gillies
# Copyright (c) 2005 Nuxeo SARL <http://nuxeo.com>
#
# Authors : Sean Gillies <[email protected]>
# Julien Anguenot <[email protected]>
#
# Contact email: [email protected]
# =============================================================================
"""
API for Web Map Service (WMS) methods and metadata.
Currently supports only version 1.1.1 of the WMS protocol.
"""
import cgi
import urllib2
from urllib import urlencode
import warnings
from etree import etree
from .util import openURL, testXMLValue, extract_xml_list, xmltag_split
from fgdc import Metadata
from iso import MD_Metadata
class ServiceException(Exception):
"""WMS ServiceException
Attributes:
message -- short error message
xml -- full xml error message from server
"""
def __init__(self, message, xml):
self.message = message
self.xml = xml
def __str__(self):
return repr(self.message)
class CapabilitiesError(Exception):
pass
class WebMapService(object):
"""Abstraction for OGC Web Map Service (WMS).
Implements IWebMapService.
"""
def __getitem__(self,name):
''' check contents dictionary to allow dict like access to service layers'''
if name in self.__getattribute__('contents').keys():
return self.__getattribute__('contents')[name]
else:
raise KeyError, "No content named %s" % name
def __init__(self, url, version='1.1.1', xml=None,
username=None, password=None, parse_remote_metadata=False
):
"""Initialize."""
self.url = url
self.username = username
self.password = password
self.version = version
self._capabilities = None
# Authentication handled by Reader
reader = WMSCapabilitiesReader(
self.version, url=self.url, un=self.username, pw=self.password
)
if xml: # read from stored xml
self._capabilities = reader.readString(xml)
else: # read from server
self._capabilities = reader.read(self.url)
# avoid building capabilities metadata if the response is a ServiceExceptionReport
se = self._capabilities.find('ServiceException')
if se is not None:
err_message = str(se.text).strip()
raise ServiceException(err_message, xml)
# build metadata objects
self._buildMetadata(parse_remote_metadata)
def _getcapproperty(self):
if not self._capabilities:
reader = WMSCapabilitiesReader(
self.version, url=self.url, un=self.username, pw=self.password
)
self._capabilities = ServiceMetadata(reader.read(self.url))
return self._capabilities
def _buildMetadata(self, parse_remote_metadata=False):
''' set up capabilities metadata objects '''
#serviceIdentification metadata
serviceelem=self._capabilities.find('Service')
self.identification=ServiceIdentification(serviceelem, self.version)
#serviceProvider metadata
self.provider=ServiceProvider(serviceelem)
#serviceOperations metadata
self.operations=[]
for elem in self._capabilities.find('Capability/Request')[:]:
self.operations.append(OperationMetadata(elem))
#serviceContents metadata: our assumption is that services use a top-level
#layer as a metadata organizer, nothing more.
self.contents={}
caps = self._capabilities.find('Capability')
#recursively gather content metadata for all layer elements.
#To the WebMapService.contents store only metadata of named layers.
def gather_layers(parent_elem, parent_metadata):
for index, elem in enumerate(parent_elem.findall('Layer')):
cm = ContentMetadata(elem, parent=parent_metadata, index=index+1, parse_remote_metadata=parse_remote_metadata)
if cm.id:
if cm.id in self.contents:
warnings.warn('Content metadata for layer "%s" already exists. Using child layer' % cm.id)
self.contents[cm.id] = cm
gather_layers(elem, cm)
gather_layers(caps, None)
#exceptions
self.exceptions = [f.text for f \
in self._capabilities.findall('Capability/Exception/Format')]
def items(self):
'''supports dict-like items() access'''
items=[]
for item in self.contents:
items.append((item,self.contents[item]))
return items
def getcapabilities(self):
"""Request and return capabilities document from the WMS as a
file-like object.
NOTE: this is effectively redundant now"""
reader = WMSCapabilitiesReader(
self.version, url=self.url, un=self.username, pw=self.password
)
u = self._open(reader.capabilities_url(self.url))
# check for service exceptions, and return
if u.info().gettype() == 'application/vnd.ogc.se_xml':
se_xml = u.read()
se_tree = etree.fromstring(se_xml)
err_message = str(se_tree.find('ServiceException').text).strip()
raise ServiceException(err_message, se_xml)
return u
def getmap(self, layers=None, styles=None, srs=None, bbox=None,
format=None, size=None, time=None, transparent=False,
bgcolor='#FFFFFF',
exceptions='application/vnd.ogc.se_xml',
method='Get',
**kwargs
):
"""Request and return an image from the WMS as a file-like object.
Parameters
----------
layers : list
List of content layer names.
styles : list
Optional list of named styles, must be the same length as the
layers list.
srs : string
A spatial reference system identifier.
bbox : tuple
(left, bottom, right, top) in srs units.
format : string
Output image format such as 'image/jpeg'.
size : tuple
(width, height) in pixels.
transparent : bool
Optional. Transparent background if True.
bgcolor : string
Optional. Image background color.
method : string
Optional. HTTP DCP method name: Get or Post.
**kwargs : extra arguments
anything else e.g. vendor specific parameters
Example
-------
>>> wms = WebMapService('http://giswebservices.massgis.state.ma.us/geoserver/wms', version='1.1.1')
>>> img = wms.getmap(layers=['massgis:GISDATA.SHORELINES_ARC'],\
styles=[''],\
srs='EPSG:4326',\
bbox=(-70.8, 42, -70, 42.8),\
size=(300, 300),\
format='image/jpeg',\
transparent=True)
>>> out = open('example.jpg.jpg', 'wb')
>>> out.write(img.read())
>>> out.close()
"""
try:
base_url = next((m.get('url') for m in self.getOperationByName('GetMap').methods if m.get('type').lower() == method.lower()))
except StopIteration:
base_url = self.url
request = {'version': self.version, 'request': 'GetMap'}
# check layers and styles
assert len(layers) > 0
request['layers'] = ','.join(layers)
if styles:
assert len(styles) == len(layers)
request['styles'] = ','.join(styles)
else:
request['styles'] = ''
# size
request['width'] = str(size[0])
request['height'] = str(size[1])
request['srs'] = str(srs)
request['bbox'] = ','.join([repr(x) for x in bbox])
request['format'] = str(format)
request['transparent'] = str(transparent).upper()
request['bgcolor'] = '0x' + bgcolor[1:7]
request['exceptions'] = str(exceptions)
if time is not None:
request['time'] = str(time)
if kwargs:
for kw in kwargs:
request[kw]=kwargs[kw]
data = urlencode(request)
u = openURL(base_url, data, method, username = self.username, password = self.password)
# check for service exceptions, and return
if u.info()['Content-Type'] == 'application/vnd.ogc.se_xml':
se_xml = u.read()
se_tree = etree.fromstring(se_xml)
err_message = unicode(se_tree.find('ServiceException').text).strip()
raise ServiceException(err_message, se_xml)
return u
def getServiceXML(self):
xml = None
if self._capabilities is not None:
xml = etree.tostring(self._capabilities)
return xml
def getfeatureinfo(self):
raise NotImplementedError
def getOperationByName(self, name):
"""Return a named content item."""
for item in self.operations:
if item.name == name:
return item
raise KeyError, "No operation named %s" % name
class ServiceIdentification(object):
''' Implements IServiceIdentificationMetadata '''
def __init__(self, infoset, version):
self._root=infoset
self.type = testXMLValue(self._root.find('Name'))
self.version = version
self.title = testXMLValue(self._root.find('Title'))
self.abstract = testXMLValue(self._root.find('Abstract'))
self.keywords = extract_xml_list(self._root.findall('KeywordList/Keyword'))
self.accessconstraints = testXMLValue(self._root.find('AccessConstraints'))
self.fees = testXMLValue(self._root.find('Fees'))
class ServiceProvider(object):
''' Implements IServiceProviderMetatdata '''
def __init__(self, infoset):
self._root=infoset
name=self._root.find('ContactInformation/ContactPersonPrimary/ContactOrganization')
if name is not None:
self.name=name.text
else:
self.name=None
self.url=self._root.find('OnlineResource').attrib.get('{http://www.w3.org/1999/xlink}href', '')
#contact metadata
contact = self._root.find('ContactInformation')
## sometimes there is a contact block that is empty, so make
## sure there are children to parse
if contact is not None and contact[:] != []:
self.contact = ContactMetadata(contact)
else:
self.contact = None
def getContentByName(self, name):
"""Return a named content item."""
for item in self.contents:
if item.name == name:
return item
raise KeyError, "No content named %s" % name
def getOperationByName(self, name):
"""Return a named content item."""
for item in self.operations:
if item.name == name:
return item
raise KeyError, "No operation named %s" % name
class ContentMetadata:
"""
Abstraction for WMS layer metadata.
Implements IContentMetadata.
"""
def __init__(self, elem, parent=None, index=0, parse_remote_metadata=False, timeout=30):
if elem.tag != 'Layer':
raise ValueError('%s should be a Layer' % (elem,))
self.parent = parent
if parent:
self.index = "%s.%d" % (parent.index, index)
else:
self.index = str(index)
self.id = self.name = testXMLValue(elem.find('Name'))
# layer attributes
self.queryable = int(elem.attrib.get('queryable', 0))
self.cascaded = int(elem.attrib.get('cascaded', 0))
self.opaque = int(elem.attrib.get('opaque', 0))
self.noSubsets = int(elem.attrib.get('noSubsets', 0))
self.fixedWidth = int(elem.attrib.get('fixedWidth', 0))
self.fixedHeight = int(elem.attrib.get('fixedHeight', 0))
# title is mandatory property
self.title = None
title = testXMLValue(elem.find('Title'))
if title is not None:
self.title = title.strip()
self.abstract = testXMLValue(elem.find('Abstract'))
# bboxes
b = elem.find('BoundingBox')
self.boundingBox = None
if b is not None:
try: #sometimes the SRS attribute is (wrongly) not provided
srs=b.attrib['SRS']
except KeyError:
srs=None
self.boundingBox = (
float(b.attrib['minx']),
float(b.attrib['miny']),
float(b.attrib['maxx']),
float(b.attrib['maxy']),
srs,
)
elif self.parent:
if hasattr(self.parent, 'boundingBox'):
self.boundingBox = self.parent.boundingBox
# ScaleHint
sh = elem.find('ScaleHint')
self.scaleHint = None
if sh is not None:
self.scaleHint = {'min': sh.attrib['min'], 'max': sh.attrib['max']}
attribution = elem.find('Attribution')
if attribution is not None:
self.attribution = dict()
title = attribution.find('Title')
url = attribution.find('OnlineResource')
logo = attribution.find('LogoURL')
if title is not None:
self.attribution['title'] = title.text
if url is not None:
self.attribution['url'] = url.attrib['{http://www.w3.org/1999/xlink}href']
if logo is not None:
self.attribution['logo_size'] = (int(logo.attrib['width']), int(logo.attrib['height']))
self.attribution['logo_url'] = logo.find('OnlineResource').attrib['{http://www.w3.org/1999/xlink}href']
b = elem.find('LatLonBoundingBox')
if b is not None:
self.boundingBoxWGS84 = (
float(b.attrib['minx']),
float(b.attrib['miny']),
float(b.attrib['maxx']),
float(b.attrib['maxy']),
)
elif self.parent:
self.boundingBoxWGS84 = self.parent.boundingBoxWGS84
else:
self.boundingBoxWGS84 = None
#SRS options
self.crsOptions = []
#Copy any parent SRS options (they are inheritable properties)
if self.parent:
self.crsOptions = list(self.parent.crsOptions)
#Look for SRS option attached to this layer
if elem.find('SRS') is not None:
## some servers found in the wild use a single SRS
## tag containing a whitespace separated list of SRIDs
## instead of several SRS tags. hence the inner loop
for srslist in map(lambda x: x.text, elem.findall('SRS')):
if srslist:
for srs in srslist.split():
self.crsOptions.append(srs)
#Get rid of duplicate entries
self.crsOptions = list(set(self.crsOptions))
#Set self.crsOptions to None if the layer (and parents) had no SRS options
if len(self.crsOptions) == 0:
#raise ValueError('%s no SRS available!?' % (elem,))
#Comment by D Lowe.
#Do not raise ValueError as it is possible that a layer is purely a parent layer and does not have SRS specified. Instead set crsOptions to None
# Comment by Jachym:
# Do not set it to None, but to [], which will make the code
# work further. Fixed by anthonybaxter
self.crsOptions=[]
#Styles
self.styles = {}
#Copy any parent styles (they are inheritable properties)
if self.parent:
self.styles = self.parent.styles.copy()
#Get the styles for this layer (items with the same name are replaced)
for s in elem.findall('Style'):
name = s.find('Name')
title = s.find('Title')
if name is None or title is None:
raise ValueError('%s missing name or title' % (s,))
style = { 'title' : title.text }
# legend url
legend = s.find('LegendURL/OnlineResource')
if legend is not None:
style['legend'] = legend.attrib['{http://www.w3.org/1999/xlink}href']
self.styles[name.text] = style
# keywords
self.keywords = [f.text for f in elem.findall('KeywordList/Keyword')]
# timepositions - times for which data is available.
self.timepositions=None
self.defaulttimeposition = None
for extent in elem.findall('Extent'):
if extent.attrib.get("name").lower() =='time':
if extent.text:
self.timepositions=extent.text.split(',')
self.defaulttimeposition = extent.attrib.get("default")
break
# Elevations - available vertical levels
self.elevations=None
for extent in elem.findall('Extent'):
if extent.attrib.get("name").lower() =='elevation':
if extent.text:
self.elevations=extent.text.split(',')
break
# MetadataURLs
self.metadataUrls = []
for m in elem.findall('MetadataURL'):
metadataUrl = {
'type': testXMLValue(m.attrib['type'], attrib=True),
'format': testXMLValue(m.find('Format')),
'url': testXMLValue(m.find('OnlineResource').attrib['{http://www.w3.org/1999/xlink}href'], attrib=True)
}
if metadataUrl['url'] is not None and parse_remote_metadata: # download URL
try:
content = urllib2.urlopen(metadataUrl['url'], timeout=timeout)
doc = etree.parse(content)
if metadataUrl['type'] is not None:
if metadataUrl['type'] == 'FGDC':
metadataUrl['metadata'] = Metadata(doc)
if metadataUrl['type'] == 'TC211':
metadataUrl['metadata'] = MD_Metadata(doc)
except Exception, err:
metadataUrl['metadata'] = None
self.metadataUrls.append(metadataUrl)
# DataURLs
self.dataUrls = []
for m in elem.findall('DataURL'):
dataUrl = {
'format': m.find('Format').text.strip(),
'url': m.find('OnlineResource').attrib['{http://www.w3.org/1999/xlink}href']
}
self.dataUrls.append(dataUrl)
self.layers = []
for child in elem.findall('Layer'):
self.layers.append(ContentMetadata(child, self))
def __str__(self):
return 'Layer Name: %s Title: %s' % (self.name, self.title)
class OperationMetadata:
"""Abstraction for WMS OperationMetadata.
Implements IOperationMetadata.
"""
def __init__(self, elem):
"""."""
self.name = xmltag_split(elem.tag)
# formatOptions
self.formatOptions = [f.text for f in elem.findall('Format')]
self.methods = []
for verb in elem.findall('DCPType/HTTP/*'):
url = verb.find('OnlineResource').attrib['{http://www.w3.org/1999/xlink}href']
self.methods.append({'type' : xmltag_split(verb.tag), 'url': url})
class ContactMetadata:
"""Abstraction for contact details advertised in GetCapabilities.
"""
def __init__(self, elem):
name = elem.find('ContactPersonPrimary/ContactPerson')
if name is not None:
self.name=name.text
else:
self.name=None
email = elem.find('ContactElectronicMailAddress')
if email is not None:
self.email=email.text
else:
self.email=None
self.address = self.city = self.region = None
self.postcode = self.country = None
address = elem.find('ContactAddress')
if address is not None:
street = address.find('Address')
if street is not None: self.address = street.text
city = address.find('City')
if city is not None: self.city = city.text
region = address.find('StateOrProvince')
if region is not None: self.region = region.text
postcode = address.find('PostCode')
if postcode is not None: self.postcode = postcode.text
country = address.find('Country')
if country is not None: self.country = country.text
organization = elem.find('ContactPersonPrimary/ContactOrganization')
if organization is not None: self.organization = organization.text
else:self.organization = None
position = elem.find('ContactPosition')
if position is not None: self.position = position.text
else: self.position = None
class WMSCapabilitiesReader:
"""Read and parse capabilities document into a lxml.etree infoset
"""
def __init__(self, version='1.1.1', url=None, un=None, pw=None):
"""Initialize"""
self.version = version
self._infoset = None
self.url = url
self.username = un
self.password = pw
#if self.username and self.password:
## Provide login information in order to use the WMS server
## Create an OpenerDirector with support for Basic HTTP
## Authentication...
#passman = HTTPPasswordMgrWithDefaultRealm()
#passman.add_password(None, self.url, self.username, self.password)
#auth_handler = HTTPBasicAuthHandler(passman)
#opener = build_opener(auth_handler)
#self._open = opener.open
def capabilities_url(self, service_url):
"""Return a capabilities url
"""
qs = []
if service_url.find('?') != -1:
qs = cgi.parse_qsl(service_url.split('?')[1])
params = [x[0] for x in qs]
if 'service' not in params:
qs.append(('service', 'WMS'))
if 'request' not in params:
qs.append(('request', 'GetCapabilities'))
if 'version' not in params:
qs.append(('version', self.version))
urlqs = urlencode(tuple(qs))
return service_url.split('?')[0] + '?' + urlqs
def read(self, service_url):
"""Get and parse a WMS capabilities document, returning an
elementtree instance
service_url is the base url, to which is appended the service,
version, and request parameters
"""
getcaprequest = self.capabilities_url(service_url)
#now split it up again to use the generic openURL function...
spliturl=getcaprequest.split('?')
u = openURL(spliturl[0], spliturl[1], method='Get', username = self.username, password = self.password)
return etree.fromstring(u.read())
def readString(self, st):
"""Parse a WMS capabilities document, returning an elementtree instance
string should be an XML capabilities document
"""
if not isinstance(st, str):
raise ValueError("String must be of type string, not %s" % type(st))
return etree.fromstring(st)
| gpl-2.0 |
mammadori/pyglet | tools/wraptypes/wrap.py | 29 | 9533 | #!/usr/bin/env python
'''Generate a Python ctypes wrapper file for a header file.
Usage example::
wrap.py -lGL -oGL.py /usr/include/GL/gl.h
>>> from GL import *
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
from ctypesparser import *
import textwrap
import sys
class CtypesWrapper(CtypesParser, CtypesTypeVisitor):
file=None
def begin_output(self, output_file, library, link_modules=(),
emit_filenames=(), all_headers=False):
self.library = library
self.file = output_file
self.all_names = []
self.known_types = {}
self.structs = set()
self.enums = set()
self.emit_filenames = emit_filenames
self.all_headers = all_headers
self.linked_symbols = {}
for name in link_modules:
module = __import__(name, globals(), locals(), ['foo'])
for symbol in dir(module):
if symbol not in self.linked_symbols:
self.linked_symbols[symbol] = '%s.%s' % (name, symbol)
self.link_modules = link_modules
self.print_preamble()
self.print_link_modules_imports()
def wrap(self, filename, source=None):
assert self.file, 'Call begin_output first'
self.parse(filename, source)
def end_output(self):
self.print_epilogue()
self.file = None
def does_emit(self, symbol, filename):
return self.all_headers or filename in self.emit_filenames
def print_preamble(self):
import textwrap
import time
print >> self.file, textwrap.dedent("""
'''Wrapper for %(library)s
Generated with:
%(argv)s
Do not modify this file.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
import ctypes
from ctypes import *
import pyglet.lib
_lib = pyglet.lib.load_library(%(library)r)
_int_types = (c_int16, c_int32)
if hasattr(ctypes, 'c_int64'):
# Some builds of ctypes apparently do not have c_int64
# defined; it's a pretty good bet that these builds do not
# have 64-bit pointers.
_int_types += (ctypes.c_int64,)
for t in _int_types:
if sizeof(t) == sizeof(c_size_t):
c_ptrdiff_t = t
class c_void(Structure):
# c_void_p is a buggy return type, converting to int, so
# POINTER(None) == c_void_p is actually written as
# POINTER(c_void), so it can be treated as a real pointer.
_fields_ = [('dummy', c_int)]
""" % {
'library': self.library,
'date': time.ctime(),
'class': self.__class__.__name__,
'argv': ' '.join(sys.argv),
}).lstrip()
def print_link_modules_imports(self):
for name in self.link_modules:
print >> self.file, 'import %s' % name
print >> self.file
def print_epilogue(self):
print >> self.file
print >> self.file, '\n'.join(textwrap.wrap(
'__all__ = [%s]' % ', '.join([repr(n) for n in self.all_names]),
width=78,
break_long_words=False))
def handle_ctypes_constant(self, name, value, filename, lineno):
if self.does_emit(name, filename):
print >> self.file, '%s = %r' % (name, value),
print >> self.file, '\t# %s:%d' % (filename, lineno)
self.all_names.append(name)
def handle_ctypes_type_definition(self, name, ctype, filename, lineno):
if self.does_emit(name, filename):
self.all_names.append(name)
if name in self.linked_symbols:
print >> self.file, '%s = %s' % \
(name, self.linked_symbols[name])
else:
ctype.visit(self)
self.emit_type(ctype)
print >> self.file, '%s = %s' % (name, str(ctype)),
print >> self.file, '\t# %s:%d' % (filename, lineno)
else:
self.known_types[name] = (ctype, filename, lineno)
def emit_type(self, t):
t.visit(self)
for s in t.get_required_type_names():
if s in self.known_types:
if s in self.linked_symbols:
print >> self.file, '%s = %s' % (s, self.linked_symbols[s])
else:
s_ctype, s_filename, s_lineno = self.known_types[s]
s_ctype.visit(self)
self.emit_type(s_ctype)
print >> self.file, '%s = %s' % (s, str(s_ctype)),
print >> self.file, '\t# %s:%d' % (s_filename, s_lineno)
del self.known_types[s]
def visit_struct(self, struct):
if struct.tag in self.structs:
return
self.structs.add(struct.tag)
base = {True: 'Union', False: 'Structure'}[struct.is_union]
print >> self.file, 'class struct_%s(%s):' % (struct.tag, base)
print >> self.file, ' __slots__ = ['
if not struct.opaque:
for m in struct.members:
print >> self.file, " '%s'," % m[0]
print >> self.file, ' ]'
# Set fields after completing class, so incomplete structs can be
# referenced within struct.
for name, typ in struct.members:
self.emit_type(typ)
print >> self.file, 'struct_%s._fields_ = [' % struct.tag
if struct.opaque:
print >> self.file, " ('_opaque_struct', c_int)"
self.structs.remove(struct.tag)
else:
for m in struct.members:
print >> self.file, " ('%s', %s)," % (m[0], m[1])
print >> self.file, ']'
print >> self.file
def visit_enum(self, enum):
if enum.tag in self.enums:
return
self.enums.add(enum.tag)
print >> self.file, 'enum_%s = c_int' % enum.tag
for name, value in enum.enumerators:
self.all_names.append(name)
print >> self.file, '%s = %d' % (name, value)
def handle_ctypes_function(self, name, restype, argtypes, filename, lineno):
if self.does_emit(name, filename):
# Also emit any types this func requires that haven't yet been
# written.
self.emit_type(restype)
for a in argtypes:
self.emit_type(a)
self.all_names.append(name)
print >> self.file, '# %s:%d' % (filename, lineno)
print >> self.file, '%s = _lib.%s' % (name, name)
print >> self.file, '%s.restype = %s' % (name, str(restype))
print >> self.file, '%s.argtypes = [%s]' % \
(name, ', '.join([str(a) for a in argtypes]))
print >> self.file
def handle_ctypes_variable(self, name, ctype, filename, lineno):
# This doesn't work.
#self.all_names.append(name)
#print >> self.file, '%s = %s.indll(_lib, %r)' % \
# (name, str(ctype), name)
pass
def main(*argv):
import optparse
import sys
import os.path
usage = 'usage: %prog [options] <header.h>'
op = optparse.OptionParser(usage=usage)
op.add_option('-o', '--output', dest='output',
help='write wrapper to FILE', metavar='FILE')
op.add_option('-l', '--library', dest='library',
help='link to LIBRARY', metavar='LIBRARY')
op.add_option('-D', '--define', dest='defines', default=[],
help='define token NAME=VALUE', action='append')
op.add_option('-i', '--include-file', action='append', dest='include_files',
help='assume FILE is iincluded', metavar='FILE',
default=[])
op.add_option('-I', '--include-dir', action='append', dest='include_dirs',
help='add DIR to include search path', metavar='DIR',
default=[])
op.add_option('-m', '--link-module', action='append', dest='link_modules',
help='use symbols from MODULE', metavar='MODULE',
default=[])
op.add_option('-a', '--all-headers', action='store_true',
dest='all_headers',
help='include symbols from all headers', default=False)
(options, args) = op.parse_args(list(argv[1:]))
if len(args) < 1:
print >> sys.stderr, 'No header files specified.'
sys.exit(1)
headers = args
if options.library is None:
options.library = os.path.splitext(headers[0])[0]
if options.output is None:
options.output = '%s.py' % options.library
wrapper = CtypesWrapper()
wrapper.begin_output(open(options.output, 'w'),
library=options.library,
emit_filenames=headers,
link_modules=options.link_modules,
all_headers=options.all_headers)
wrapper.preprocessor_parser.include_path += options.include_dirs
for define in options.defines:
name, value = define.split('=')
wrapper.preprocessor_parser.define(name, value)
for file in options.include_files:
wrapper.wrap(file)
for header in headers:
wrapper.wrap(header)
wrapper.end_output()
print 'Wrapped to %s' % options.output
if __name__ == '__main__':
main(*sys.argv)
| bsd-3-clause |
rlkelly/Probabilistic-Programming-and-Bayesian-Methods-for-Hackers | sandbox/Chapter10_/github_datapull.py | 108 | 2852 |
try:
import numpy as np
from requests import get
from bs4 import BeautifulSoup
stars_to_explore = ( 2**np.arange( -1, 16 ) ).astype("int")
forks_to_explore = ( 2**np.arange( -1, 16 ) ).astype("int")
repo_with_stars = np.ones_like( stars_to_explore )
repo_with_forks = np.ones_like( forks_to_explore )
URL = "https://github.com/search"
print "Scrapping data from Github. Sorry Github..."
print "The data is contained in variables `foo_to_explore` and `repo_with_foo`"
print
print "stars first..."
payload = {"q":""}
for i, _star in enumerate(stars_to_explore):
payload["q"] = "stars:>=%d"%_star
r = get( URL, params = payload )
soup = BeautifulSoup( r.text )
try:
h3 = soup.find( class_="sort-bar").find( "h3" ).text #hopefully the github search results page plays nicely.
value = int( h3.split(" ")[2].replace(",", "" ) )
except AttributeError as e:
#there might be less than 10 repos, so I'll count the number of display results
value = len( soup.findAll(class_= "mega-icon-public-repo" ) )
repo_with_stars[i] = value
print "number of repos with greater than or equal to %d stars: %d"%(_star, value )
#repo_with_stars = repo_with_stars.astype("float")/repo_with_stars[0]
print
print "forks second..."
payload = {"q":""}
for i, _fork in enumerate(stars_to_explore):
payload["q"] = "forks:>=%d"%_fork
r = get( URL, params = payload )
soup = BeautifulSoup( r.text )
try:
h3 = soup.find( class_="sort-bar").find( "h3" ).text #hopefully the github search results page plays nicely.
value = int( h3.split(" ")[2].replace(",", "" ) )
except AttributeError as e:
#there might be less than 10 repos, so I'll count the number of display results
value = len( soup.findAll(class_= "mega-icon-public-repo" ) )
repo_with_forks[i] = value
print "number of repos with greater than or equal to %d forks: %d"%(_fork, value )
#repo_with_forks = repo_with_forks.astype("float")/repo_with_forks[0]
np.savetxt( "data/gh_forks.csv", np.concatenate( [forks_to_explore, repo_with_forks], axis=1) )
np.savetxt( "data/gh_stars.csv", np.concatenate( [stars_to_explore, repo_with_stars], axis=1) )
except ImportError as e:
print e
print "requests / BeautifulSoup not found. Using data pulled on Feburary 11, 2013"
_data = np.genfromtxt( "data/gh_forks.csv", delimiter = "," ) #cehck this.
forks_to_explore = _data[:,0]
repo_with_forks = _data[:,1]
_data = np.genfromtxt( "data/gh_stars.csv", delimiter = "," ) #cehck this.
stars_to_explore = _data[:,0]
repo_with_stars = _data[:,1]
| mit |
Titan-C/sympy | sympy/crypto/crypto.py | 17 | 56885 | # -*- coding: utf-8 -*-
"""
This file contains some classical ciphers and routines
implementing a linear-feedback shift register (LFSR)
and the Diffie-Hellman key exchange.
"""
from __future__ import print_function
from string import whitespace, ascii_uppercase as uppercase, printable
from sympy import nextprime
from sympy.core import Rational, Symbol
from sympy.core.numbers import igcdex, mod_inverse
from sympy.core.compatibility import range
from sympy.matrices import Matrix
from sympy.ntheory import isprime, totient, primitive_root
from sympy.polys.domains import FF
from sympy.polys.polytools import gcd, Poly
from sympy.utilities.misc import filldedent, translate
from sympy.utilities.iterables import uniq
from sympy.utilities.randtest import _randrange
def AZ(s=None):
"""Return the letters of ``s`` in uppercase. In case more than
one string is passed, each of them will be processed and a list
of upper case strings will be returned.
Examples
========
>>> from sympy.crypto.crypto import AZ
>>> AZ('Hello, world!')
'HELLOWORLD'
>>> AZ('Hello, world!'.split())
['HELLO', 'WORLD']
See Also
========
check_and_join
"""
if not s:
return uppercase
t = type(s) is str
if t:
s = [s]
rv = [check_and_join(i.upper().split(), uppercase, filter=True)
for i in s]
if t:
return rv[0]
return rv
bifid5 = AZ().replace('J', '')
bifid6 = AZ() + '0123456789'
bifid10 = printable
def padded_key(key, symbols, filter=True):
"""Return a string of the distinct characters of ``symbols`` with
those of ``key`` appearing first, omitting characters in ``key``
that are not in ``symbols``. A ValueError is raised if a) there are
duplicate characters in ``symbols`` or b) there are characters
in ``key`` that are not in ``symbols``.
Examples
========
>>> from sympy.crypto.crypto import padded_key
>>> padded_key('PUPPY', 'OPQRSTUVWXY')
'PUYOQRSTVWX'
>>> padded_key('RSA', 'ARTIST')
Traceback (most recent call last):
...
ValueError: duplicate characters in symbols: T
"""
syms = list(uniq(symbols))
if len(syms) != len(symbols):
extra = ''.join(sorted(set(
[i for i in symbols if symbols.count(i) > 1])))
raise ValueError('duplicate characters in symbols: %s' % extra)
extra = set(key) - set(syms)
if extra:
raise ValueError(
'characters in key but not symbols: %s' % ''.join(
sorted(extra)))
key0 = ''.join(list(uniq(key)))
return key0 + ''.join([i for i in syms if i not in key0])
def check_and_join(phrase, symbols=None, filter=None):
"""
Joins characters of `phrase` and if ``symbols`` is given, raises
an error if any character in ``phrase`` is not in ``symbols``.
Parameters
==========
phrase: string or list of strings to be returned as a string
symbols: iterable of characters allowed in ``phrase``;
if ``symbols`` is None, no checking is performed
Examples
========
>>> from sympy.crypto.crypto import check_and_join
>>> check_and_join('a phrase')
'a phrase'
>>> check_and_join('a phrase'.upper().split())
'APHRASE'
>>> check_and_join('a phrase!'.upper().split(), 'ARE', filter=True)
'ARAE'
>>> check_and_join('a phrase!'.upper().split(), 'ARE')
Traceback (most recent call last):
...
ValueError: characters in phrase but not symbols: "!HPS"
"""
rv = ''.join(''.join(phrase))
if symbols is not None:
symbols = check_and_join(symbols)
missing = ''.join(list(sorted(set(rv) - set(symbols))))
if missing:
if not filter:
raise ValueError(
'characters in phrase but not symbols: "%s"' % missing)
rv = translate(rv, None, missing)
return rv
def _prep(msg, key, alp, default=None):
if not alp:
if not default:
alp = AZ()
msg = AZ(msg)
key = AZ(key)
else:
alp = default
else:
alp = ''.join(alp)
key = check_and_join(key, alp, filter=True)
msg = check_and_join(msg, alp, filter=True)
return msg, key, alp
def cycle_list(k, n):
"""
Returns the elements of the list ``range(n)`` shifted to the
left by ``k`` (so the list starts with ``k`` (mod ``n``)).
Examples
========
>>> from sympy.crypto.crypto import cycle_list
>>> cycle_list(3, 10)
[3, 4, 5, 6, 7, 8, 9, 0, 1, 2]
"""
k = k % n
return list(range(k, n)) + list(range(k))
######## shift cipher examples ############
def encipher_shift(msg, key, symbols=None):
"""
Performs shift cipher encryption on plaintext msg, and returns the
ciphertext.
Notes
=====
The shift cipher is also called the Caesar cipher, after
Julius Caesar, who, according to Suetonius, used it with a
shift of three to protect messages of military significance.
Caesar's nephew Augustus reportedly used a similar cipher, but
with a right shift of 1.
ALGORITHM:
INPUT:
``key``: an integer (the secret key)
``msg``: plaintext of upper-case letters
OUTPUT:
``ct``: ciphertext of upper-case letters
STEPS:
0. Number the letters of the alphabet from 0, ..., N
1. Compute from the string ``msg`` a list ``L1`` of
corresponding integers.
2. Compute from the list ``L1`` a new list ``L2``, given by
adding ``(k mod 26)`` to each element in ``L1``.
3. Compute from the list ``L2`` a string ``ct`` of
corresponding letters.
Examples
========
>>> from sympy.crypto.crypto import encipher_shift, decipher_shift
>>> msg = "GONAVYBEATARMY"
>>> ct = encipher_shift(msg, 1); ct
'HPOBWZCFBUBSNZ'
To decipher the shifted text, change the sign of the key:
>>> encipher_shift(ct, -1)
'GONAVYBEATARMY'
There is also a convenience function that does this with the
original key:
>>> decipher_shift(ct, 1)
'GONAVYBEATARMY'
"""
msg, _, A = _prep(msg, '', symbols)
shift = len(A) - key % len(A)
key = A[shift:] + A[:shift]
return translate(msg, key, A)
def decipher_shift(msg, key, symbols=None):
"""
Return the text by shifting the characters of ``msg`` to the
left by the amount given by ``key``.
Examples
========
>>> from sympy.crypto.crypto import encipher_shift, decipher_shift
>>> msg = "GONAVYBEATARMY"
>>> ct = encipher_shift(msg, 1); ct
'HPOBWZCFBUBSNZ'
To decipher the shifted text, change the sign of the key:
>>> encipher_shift(ct, -1)
'GONAVYBEATARMY'
Or use this function with the original key:
>>> decipher_shift(ct, 1)
'GONAVYBEATARMY'
"""
return encipher_shift(msg, -key, symbols)
######## affine cipher examples ############
def encipher_affine(msg, key, symbols=None, _inverse=False):
r"""
Performs the affine cipher encryption on plaintext ``msg``, and
returns the ciphertext.
Encryption is based on the map `x \rightarrow ax+b` (mod `N`)
where ``N`` is the number of characters in the alphabet.
Decryption is based on the map `x \rightarrow cx+d` (mod `N`),
where `c = a^{-1}` (mod `N`) and `d = -a^{-1}b` (mod `N`).
In particular, for the map to be invertible, we need
`\mathrm{gcd}(a, N) = 1` and an error will be raised if this is
not true.
Notes
=====
This is a straightforward generalization of the shift cipher with
the added complexity of requiring 2 characters to be deciphered in
order to recover the key.
ALGORITHM:
INPUT:
``msg``: string of characters that appear in ``symbols``
``a, b``: a pair integers, with ``gcd(a, N) = 1``
(the secret key)
``symbols``: string of characters (default = uppercase
letters). When no symbols are given, ``msg`` is converted
to upper case letters and all other charactes are ignored.
OUTPUT:
``ct``: string of characters (the ciphertext message)
STEPS:
0. Number the letters of the alphabet from 0, ..., N
1. Compute from the string ``msg`` a list ``L1`` of
corresponding integers.
2. Compute from the list ``L1`` a new list ``L2``, given by
replacing ``x`` by ``a*x + b (mod N)``, for each element
``x`` in ``L1``.
3. Compute from the list ``L2`` a string ``ct`` of
corresponding letters.
See Also
========
decipher_affine
"""
msg, _, A = _prep(msg, '', symbols)
N = len(A)
a, b = key
assert gcd(a, N) == 1
if _inverse:
c = mod_inverse(a, N)
d = -b*c
a, b = c, d
B = ''.join([A[(a*i + b) % N] for i in range(N)])
return translate(msg, A, B)
def decipher_affine(msg, key, symbols=None):
r"""
Return the deciphered text that was made from the mapping,
`x \rightarrow ax+b` (mod `N`), where ``N`` is the
number of characters in the alphabet. Deciphering is done by
reciphering with a new key: `x \rightarrow cx+d` (mod `N`),
where `c = a^{-1}` (mod `N`) and `d = -a^{-1}b` (mod `N`).
Examples
========
>>> from sympy.crypto.crypto import encipher_affine, decipher_affine
>>> msg = "GO NAVY BEAT ARMY"
>>> key = (3, 1)
>>> encipher_affine(msg, key)
'TROBMVENBGBALV'
>>> decipher_affine(_, key)
'GONAVYBEATARMY'
"""
return encipher_affine(msg, key, symbols, _inverse=True)
#################### substitution cipher ###########################
def encipher_substitution(msg, old, new=None):
"""
Returns the ciphertext obtained by replacing each character that
appears in ``old`` with the corresponding character in ``new``.
If ``old`` is a mapping, then new is ignored and the replacements
defined by ``old`` are used.
Notes
=====
This is a more general than the affine cipher in that the key can
only be recovered by determining the mapping for each symbol.
Though in practice, once a few symbols are recognized the mappings
for other characters can be quickly guessed.
Examples
========
>>> from sympy.crypto.crypto import encipher_substitution, AZ
>>> old = 'OEYAG'
>>> new = '034^6'
>>> msg = AZ("go navy! beat army!")
>>> ct = encipher_substitution(msg, old, new); ct
'60N^V4B3^T^RM4'
To decrypt a substitution, reverse the last two arguments:
>>> encipher_substitution(ct, new, old)
'GONAVYBEATARMY'
In the special case where ``old`` and ``new`` are a permuation of
order 2 (representing a transposition of characters) their order
is immaterial:
>>> old = 'NAVY'
>>> new = 'ANYV'
>>> encipher = lambda x: encipher_substitution(x, old, new)
>>> encipher('NAVY')
'ANYV'
>>> encipher(_)
'NAVY'
The substitution cipher, in general, is a method
whereby "units" (not necessarily single characters) of plaintext
are replaced with ciphertext according to a regular system.
>>> ords = dict(zip('abc', ['\\%i' % ord(i) for i in 'abc']))
>>> print(encipher_substitution('abc', ords))
\97\98\99
"""
return translate(msg, old, new)
######################################################################
#################### Vigenère cipher examples ########################
######################################################################
def encipher_vigenere(msg, key, symbols=None):
"""
Performs the Vigenère cipher encryption on plaintext ``msg``, and
returns the ciphertext.
Examples
========
>>> from sympy.crypto.crypto import encipher_vigenere, AZ
>>> key = "encrypt"
>>> msg = "meet me on monday"
>>> encipher_vigenere(msg, key)
'QRGKKTHRZQEBPR'
Section 1 of the Kryptos sculpture at the CIA headquarters
uses this cipher and also changes the order of the the
alphabet [2]_. Here is the first line of that section of
the sculpture:
>>> from sympy.crypto.crypto import decipher_vigenere, padded_key
>>> alp = padded_key('KRYPTOS', AZ())
>>> key = 'PALIMPSEST'
>>> msg = 'EMUFPHZLRFAXYUSDJKZLDKRNSHGNFIVJ'
>>> decipher_vigenere(msg, key, alp)
'BETWEENSUBTLESHADINGANDTHEABSENC'
Notes
=====
The Vigenère cipher is named after Blaise de Vigenère, a sixteenth
century diplomat and cryptographer, by a historical accident.
Vigenère actually invented a different and more complicated cipher.
The so-called *Vigenère cipher* was actually invented
by Giovan Batista Belaso in 1553.
This cipher was used in the 1800's, for example, during the American
Civil War. The Confederacy used a brass cipher disk to implement the
Vigenère cipher (now on display in the NSA Museum in Fort
Meade) [1]_.
The Vigenère cipher is a generalization of the shift cipher.
Whereas the shift cipher shifts each letter by the same amount
(that amount being the key of the shift cipher) the Vigenère
cipher shifts a letter by an amount determined by the key (which is
a word or phrase known only to the sender and receiver).
For example, if the key was a single letter, such as "C", then the
so-called Vigenere cipher is actually a shift cipher with a
shift of `2` (since "C" is the 2nd letter of the alphabet, if
you start counting at `0`). If the key was a word with two
letters, such as "CA", then the so-called Vigenère cipher will
shift letters in even positions by `2` and letters in odd positions
are left alone (shifted by `0`, since "A" is the 0th letter, if
you start counting at `0`).
ALGORITHM:
INPUT:
``msg``: string of characters that appear in ``symbols``
(the plaintext)
``key``: a string of characters that appear in ``symbols``
(the secret key)
``symbols``: a string of letters defining the alphabet
OUTPUT:
``ct``: string of characters (the ciphertext message)
STEPS:
0. Number the letters of the alphabet from 0, ..., N
1. Compute from the string ``key`` a list ``L1`` of
corresponding integers. Let ``n1 = len(L1)``.
2. Compute from the string ``msg`` a list ``L2`` of
corresponding integers. Let ``n2 = len(L2)``.
3. Break ``L2`` up sequencially into sublists of size
``n1``; the last sublist may be smaller than ``n1``
4. For each of these sublists ``L`` of ``L2``, compute a
new list ``C`` given by ``C[i] = L[i] + L1[i] (mod N)``
to the ``i``-th element in the sublist, for each ``i``.
5. Assemble these lists ``C`` by concatenation into a new
list of length ``n2``.
6. Compute from the new list a string ``ct`` of
corresponding letters.
Once it is known that the key is, say, `n` characters long,
frequency analysis can be applied to every `n`-th letter of
the ciphertext to determine the plaintext. This method is
called *Kasiski examination* (although it was first discovered
by Babbage). If they key is as long as the message and is
comprised of randomly selected characters -- a one-time pad -- the
message is theoretically unbreakable.
The cipher Vigenère actually discovered is an "auto-key" cipher
described as follows.
ALGORITHM:
INPUT:
``key``: a string of letters (the secret key)
``msg``: string of letters (the plaintext message)
OUTPUT:
``ct``: string of upper-case letters (the ciphertext message)
STEPS:
0. Number the letters of the alphabet from 0, ..., N
1. Compute from the string ``msg`` a list ``L2`` of
corresponding integers. Let ``n2 = len(L2)``.
2. Let ``n1`` be the length of the key. Append to the
string ``key`` the first ``n2 - n1`` characters of
the plaintext message. Compute from this string (also of
length ``n2``) a list ``L1`` of integers corresponding
to the letter numbers in the first step.
3. Compute a new list ``C`` given by
``C[i] = L1[i] + L2[i] (mod N)``.
4. Compute from the new list a string ``ct`` of letters
corresponding to the new integers.
To decipher the auto-key ciphertext, the key is used to decipher
the first ``n1`` characters and then those characters become the
key to decipher the next ``n1`` characters, etc...:
>>> m = AZ('go navy, beat army! yes you can'); m
'GONAVYBEATARMYYESYOUCAN'
>>> key = AZ('gold bug'); n1 = len(key); n2 = len(m)
>>> auto_key = key + m[:n2 - n1]; auto_key
'GOLDBUGGONAVYBEATARMYYE'
>>> ct = encipher_vigenere(m, auto_key); ct
'MCYDWSHKOGAMKZCELYFGAYR'
>>> n1 = len(key)
>>> pt = []
>>> while ct:
... part, ct = ct[:n1], ct[n1:]
... pt.append(decipher_vigenere(part, key))
... key = pt[-1]
...
>>> ''.join(pt) == m
True
References
==========
.. [1] http://en.wikipedia.org/wiki/Vigenere_cipher
.. [2] http://web.archive.org/web/20071116100808/
http://filebox.vt.edu/users/batman/kryptos.html
"""
msg, key, A = _prep(msg, key, symbols)
map = {c: i for i, c in enumerate(A)}
key = [map[c] for c in key]
N = len(map)
k = len(key)
rv = []
for i, m in enumerate(msg):
rv.append(A[(map[m] + key[i % k]) % N])
rv = ''.join(rv)
return rv
def decipher_vigenere(msg, key, symbols=None):
"""
Decode using the Vigenère cipher.
Examples
========
>>> from sympy.crypto.crypto import decipher_vigenere
>>> key = "encrypt"
>>> ct = "QRGK kt HRZQE BPR"
>>> decipher_vigenere(ct, key)
'MEETMEONMONDAY'
"""
msg, key, A = _prep(msg, key, symbols)
map = {c: i for i, c in enumerate(A)}
N = len(A) # normally, 26
K = [map[c] for c in key]
n = len(K)
C = [map[c] for c in msg]
rv = ''.join([A[(-K[i % n] + c) % N] for i, c in enumerate(C)])
return rv
#################### Hill cipher ########################
def encipher_hill(msg, key, symbols=None, pad="Q"):
r"""
Return the Hill cipher encryption of ``msg``.
Notes
=====
The Hill cipher [1]_, invented by Lester S. Hill in the 1920's [2]_,
was the first polygraphic cipher in which it was practical
(though barely) to operate on more than three symbols at once.
The following discussion assumes an elementary knowledge of
matrices.
First, each letter is first encoded as a number starting with 0.
Suppose your message `msg` consists of `n` capital letters, with no
spaces. This may be regarded an `n`-tuple M of elements of
`Z_{26}` (if the letters are those of the English alphabet). A key
in the Hill cipher is a `k x k` matrix `K`, all of whose entries
are in `Z_{26}`, such that the matrix `K` is invertible (i.e., the
linear transformation `K: Z_{N}^k \rightarrow Z_{N}^k`
is one-to-one).
ALGORITHM:
INPUT:
``msg``: plaintext message of `n` upper-case letters
``key``: a `k x k` invertible matrix `K`, all of whose
entries are in `Z_{26}` (or whatever number of symbols
are being used).
``pad``: character (default "Q") to use to make length
of text be a multiple of ``k``
OUTPUT:
``ct``: ciphertext of upper-case letters
STEPS:
0. Number the letters of the alphabet from 0, ..., N
1. Compute from the string ``msg`` a list ``L`` of
corresponding integers. Let ``n = len(L)``.
2. Break the list ``L`` up into ``t = ceiling(n/k)``
sublists ``L_1``, ..., ``L_t`` of size ``k`` (with
the last list "padded" to ensure its size is
``k``).
3. Compute new list ``C_1``, ..., ``C_t`` given by
``C[i] = K*L_i`` (arithmetic is done mod N), for each
``i``.
4. Concatenate these into a list ``C = C_1 + ... + C_t``.
5. Compute from ``C`` a string ``ct`` of corresponding
letters. This has length ``k*t``.
References
==========
.. [1] en.wikipedia.org/wiki/Hill_cipher
.. [2] Lester S. Hill, Cryptography in an Algebraic Alphabet,
The American Mathematical Monthly Vol.36, June-July 1929,
pp.306-312.
See Also
========
decipher_hill
"""
assert key.is_square
assert len(pad) == 1
msg, pad, A = _prep(msg, pad, symbols)
map = {c: i for i, c in enumerate(A)}
P = [map[c] for c in msg]
N = len(A)
k = key.cols
n = len(P)
m, r = divmod(n, k)
if r:
P = P + [map[pad]]*(k - r)
m += 1
rv = ''.join([A[c % N] for j in range(m) for c in
list(key*Matrix(k, 1, [P[i]
for i in range(k*j, k*(j + 1))]))])
return rv
def decipher_hill(msg, key, symbols=None):
"""
Deciphering is the same as enciphering but using the inverse of the
key matrix.
Examples
========
>>> from sympy.crypto.crypto import encipher_hill, decipher_hill
>>> from sympy import Matrix
>>> key = Matrix([[1, 2], [3, 5]])
>>> encipher_hill("meet me on monday", key)
'UEQDUEODOCTCWQ'
>>> decipher_hill(_, key)
'MEETMEONMONDAY'
When the length of the plaintext (stripped of invalid characters)
is not a multiple of the key dimension, extra characters will
appear at the end of the enciphered and deciphered text. In order to
decipher the text, those characters must be included in the text to
be deciphered. In the following, the key has a dimension of 4 but
the text is 2 short of being a multiple of 4 so two characters will
be added.
>>> key = Matrix([[1, 1, 1, 2], [0, 1, 1, 0],
... [2, 2, 3, 4], [1, 1, 0, 1]])
>>> msg = "ST"
>>> encipher_hill(msg, key)
'HJEB'
>>> decipher_hill(_, key)
'STQQ'
>>> encipher_hill(msg, key, pad="Z")
'ISPK'
>>> decipher_hill(_, key)
'STZZ'
If the last two characters of the ciphertext were ignored in
either case, the wrong plaintext would be recovered:
>>> decipher_hill("HD", key)
'ORMV'
>>> decipher_hill("IS", key)
'UIKY'
"""
assert key.is_square
msg, _, A = _prep(msg, '', symbols)
map = {c: i for i, c in enumerate(A)}
C = [map[c] for c in msg]
N = len(A)
k = key.cols
n = len(C)
m, r = divmod(n, k)
if r:
C = C + [0]*(k - r)
m += 1
key_inv = key.inv_mod(N)
rv = ''.join([A[p % N] for j in range(m) for p in
list(key_inv*Matrix(
k, 1, [C[i] for i in range(k*j, k*(j + 1))]))])
return rv
#################### Bifid cipher ########################
def encipher_bifid(msg, key, symbols=None):
r"""
Performs the Bifid cipher encryption on plaintext ``msg``, and
returns the ciphertext.
This is the version of the Bifid cipher that uses an `n \times n`
Polybius square.
INPUT:
``msg``: plaintext string
``key``: short string for key; duplicate characters are
ignored and then it is padded with the characters in
``symbols`` that were not in the short key
``symbols``: `n \times n` characters defining the alphabet
(default is string.printable)
OUTPUT:
ciphertext (using Bifid5 cipher without spaces)
See Also
========
decipher_bifid, encipher_bifid5, encipher_bifid6
"""
msg, key, A = _prep(msg, key, symbols, bifid10)
long_key = ''.join(uniq(key)) or A
n = len(A)**.5
if n != int(n):
raise ValueError(
'Length of alphabet (%s) is not a square number.' % len(A))
N = int(n)
if len(long_key) < N**2:
long_key = list(long_key) + [x for x in A if x not in long_key]
# the fractionalization
row_col = dict([(ch, divmod(i, N))
for i, ch in enumerate(long_key)])
r, c = zip(*[row_col[x] for x in msg])
rc = r + c
ch = {i: ch for ch, i in row_col.items()}
rv = ''.join((ch[i] for i in zip(rc[::2], rc[1::2])))
return rv
def decipher_bifid(msg, key, symbols=None):
r"""
Performs the Bifid cipher decryption on ciphertext ``msg``, and
returns the plaintext.
This is the version of the Bifid cipher that uses the `n \times n`
Polybius square.
INPUT:
``msg``: ciphertext string
``key``: short string for key; duplicate characters are
ignored and then it is padded with the characters in
``symbols`` that were not in the short key
``symbols``: `n \times n` characters defining the alphabet
(default=string.printable, a `10 \times 10` matrix)
OUTPUT:
deciphered text
Examples
========
>>> from sympy.crypto.crypto import (
... encipher_bifid, decipher_bifid, AZ)
Do an encryption using the bifid5 alphabet:
>>> alp = AZ().replace('J', '')
>>> ct = AZ("meet me on monday!")
>>> key = AZ("gold bug")
>>> encipher_bifid(ct, key, alp)
'IEILHHFSTSFQYE'
When entering the text or ciphertext, spaces are ignored so it
can be formatted as desired. Re-entering the ciphertext from the
preceding, putting 4 characters per line and padding with an extra
J, does not cause problems for the deciphering:
>>> decipher_bifid('''
... IEILH
... HFSTS
... FQYEJ''', key, alp)
'MEETMEONMONDAY'
When no alphabet is given, all 100 printable characters will be
used:
>>> key = ''
>>> encipher_bifid('hello world!', key)
'bmtwmg-bIo*w'
>>> decipher_bifid(_, key)
'hello world!'
If the key is changed, a different encryption is obtained:
>>> key = 'gold bug'
>>> encipher_bifid('hello world!', 'gold_bug')
'hg2sfuei7t}w'
And if the key used to decrypt the message is not exact, the
original text will not be perfectly obtained:
>>> decipher_bifid(_, 'gold pug')
'heldo~wor6d!'
"""
msg, _, A = _prep(msg, '', symbols, bifid10)
long_key = ''.join(uniq(key)) or A
n = len(A)**.5
if n != int(n):
raise ValueError(
'Length of alphabet (%s) is not a square number.' % len(A))
N = int(n)
if len(long_key) < N**2:
long_key = list(long_key) + [x for x in A if x not in long_key]
# the reverse fractionalization
row_col = dict(
[(ch, divmod(i, N)) for i, ch in enumerate(long_key)])
rc = [i for c in msg for i in row_col[c]]
n = len(msg)
rc = zip(*(rc[:n], rc[n:]))
ch = {i: ch for ch, i in row_col.items()}
rv = ''.join((ch[i] for i in rc))
return rv
def bifid_square(key):
"""Return characters of ``key`` arranged in a square.
Examples
========
>>> from sympy.crypto.crypto import (
... bifid_square, AZ, padded_key, bifid5)
>>> bifid_square(AZ().replace('J', ''))
Matrix([
[A, B, C, D, E],
[F, G, H, I, K],
[L, M, N, O, P],
[Q, R, S, T, U],
[V, W, X, Y, Z]])
>>> bifid_square(padded_key(AZ('gold bug!'), bifid5))
Matrix([
[G, O, L, D, B],
[U, A, C, E, F],
[H, I, K, M, N],
[P, Q, R, S, T],
[V, W, X, Y, Z]])
See Also
========
padded_key
"""
A = ''.join(uniq(''.join(key)))
n = len(A)**.5
if n != int(n):
raise ValueError(
'Length of alphabet (%s) is not a square number.' % len(A))
n = int(n)
f = lambda i, j: Symbol(A[n*i + j])
rv = Matrix(n, n, f)
return rv
def encipher_bifid5(msg, key):
r"""
Performs the Bifid cipher encryption on plaintext ``msg``, and
returns the ciphertext.
This is the version of the Bifid cipher that uses the `5 \times 5`
Polybius square. The letter "J" is ignored so it must be replaced
with something else (traditionally an "I") before encryption.
Notes
=====
The Bifid cipher was invented around 1901 by Felix Delastelle.
It is a *fractional substitution* cipher, where letters are
replaced by pairs of symbols from a smaller alphabet. The
cipher uses a `5 \times 5` square filled with some ordering of the
alphabet, except that "J" is replaced with "I" (this is a so-called
Polybius square; there is a `6 \times 6` analog if you add back in
"J" and also append onto the usual 26 letter alphabet, the digits
0, 1, ..., 9).
According to Helen Gaines' book *Cryptanalysis*, this type of cipher
was used in the field by the German Army during World War I.
ALGORITHM: (5x5 case)
INPUT:
``msg``: plaintext string; converted to upper case and
filtered of anything but all letters except J.
``key``: short string for key; non-alphabetic letters, J
and duplicated characters are ignored and then, if the
length is less than 25 characters, it is padded with other
letters of the alphabet (in alphabetical order).
OUTPUT:
ciphertext (all caps, no spaces)
STEPS:
0. Create the `5 \times 5` Polybius square ``S`` associated
to ``key`` as follows:
a) moving from left-to-right, top-to-bottom,
place the letters of the key into a `5 \times 5`
matrix,
b) if the key has less than 25 letters, add the
letters of the alphabet not in the key until the
`5 \times 5` square is filled.
1. Create a list ``P`` of pairs of numbers which are the
coordinates in the Polybius square of the letters in
``msg``.
2. Let ``L1`` be the list of all first coordinates of ``P``
(length of ``L1 = n``), let ``L2`` be the list of all
second coordinates of ``P`` (so the length of ``L2``
is also ``n``).
3. Let ``L`` be the concatenation of ``L1`` and ``L2``
(length ``L = 2*n``), except that consecutive numbers
are paired ``(L[2*i], L[2*i + 1])``. You can regard
``L`` as a list of pairs of length ``n``.
4. Let ``C`` be the list of all letters which are of the
form ``S[i, j]``, for all ``(i, j)`` in ``L``. As a
string, this is the ciphertext of ``msg``.
Examples
========
>>> from sympy.crypto.crypto import (
... encipher_bifid5, decipher_bifid5)
"J" will be omitted unless it is replaced with somthing else:
>>> round_trip = lambda m, k: \
... decipher_bifid5(encipher_bifid5(m, k), k)
>>> key = 'a'
>>> msg = "JOSIE"
>>> round_trip(msg, key)
'OSIE'
>>> round_trip(msg.replace("J", "I"), key)
'IOSIE'
>>> j = "QIQ"
>>> round_trip(msg.replace("J", j), key).replace(j, "J")
'JOSIE'
See Also
========
decipher_bifid5, encipher_bifid
"""
msg, key, _ = _prep(msg.upper(), key.upper(), None, bifid5)
key = padded_key(key, bifid5)
return encipher_bifid(msg, '', key)
def decipher_bifid5(msg, key):
r"""
Return the Bifid cipher decryption of ``msg``.
This is the version of the Bifid cipher that uses the `5 \times 5`
Polybius square; the letter "J" is ignored unless a ``key`` of
length 25 is used.
INPUT:
``msg``: ciphertext string
``key``: short string for key; duplicated characters are
ignored and if the length is less then 25 characters, it
will be padded with other letters from the alphabet omitting
"J". Non-alphabetic characters are ignored.
OUTPUT:
plaintext from Bifid5 cipher (all caps, no spaces)
Examples
========
>>> from sympy.crypto.crypto import encipher_bifid5, decipher_bifid5
>>> key = "gold bug"
>>> encipher_bifid5('meet me on friday', key)
'IEILEHFSTSFXEE'
>>> encipher_bifid5('meet me on monday', key)
'IEILHHFSTSFQYE'
>>> decipher_bifid5(_, key)
'MEETMEONMONDAY'
"""
msg, key, _ = _prep(msg.upper(), key.upper(), None, bifid5)
key = padded_key(key, bifid5)
return decipher_bifid(msg, '', key)
def bifid5_square(key=None):
r"""
5x5 Polybius square.
Produce the Polybius square for the `5 \times 5` Bifid cipher.
Examples
========
>>> from sympy.crypto.crypto import bifid5_square
>>> bifid5_square("gold bug")
Matrix([
[G, O, L, D, B],
[U, A, C, E, F],
[H, I, K, M, N],
[P, Q, R, S, T],
[V, W, X, Y, Z]])
"""
if not key:
key = bifid5
else:
_, key, _ = _prep('', key.upper(), None, bifid5)
key = padded_key(key, bifid5)
return bifid_square(key)
def encipher_bifid6(msg, key):
r"""
Performs the Bifid cipher encryption on plaintext ``msg``, and
returns the ciphertext.
This is the version of the Bifid cipher that uses the `6 \times 6`
Polybius square.
INPUT:
``msg``: plaintext string (digits okay)
``key``: short string for key (digits okay). If ``key`` is
less than 36 characters long, the square will be filled with
letters A through Z and digits 0 through 9.
OUTPUT:
ciphertext from Bifid cipher (all caps, no spaces)
See Also
========
decipher_bifid6, encipher_bifid
"""
msg, key, _ = _prep(msg.upper(), key.upper(), None, bifid6)
key = padded_key(key, bifid6)
return encipher_bifid(msg, '', key)
def decipher_bifid6(msg, key):
r"""
Performs the Bifid cipher decryption on ciphertext ``msg``, and
returns the plaintext.
This is the version of the Bifid cipher that uses the `6 \times 6`
Polybius square.
INPUT:
``msg``: ciphertext string (digits okay); converted to upper case
``key``: short string for key (digits okay). If ``key`` is
less than 36 characters long, the square will be filled with
letters A through Z and digits 0 through 9. All letters are
converted to uppercase.
OUTPUT:
plaintext from Bifid cipher (all caps, no spaces)
Examples
========
>>> from sympy.crypto.crypto import encipher_bifid6, decipher_bifid6
>>> key = "gold bug"
>>> encipher_bifid6('meet me on monday at 8am', key)
'KFKLJJHF5MMMKTFRGPL'
>>> decipher_bifid6(_, key)
'MEETMEONMONDAYAT8AM'
"""
msg, key, _ = _prep(msg.upper(), key.upper(), None, bifid6)
key = padded_key(key, bifid6)
return decipher_bifid(msg, '', key)
def bifid6_square(key=None):
r"""
6x6 Polybius square.
Produces the Polybius square for the `6 \times 6` Bifid cipher.
Assumes alphabet of symbols is "A", ..., "Z", "0", ..., "9".
Examples
========
>>> from sympy.crypto.crypto import bifid6_square
>>> key = "gold bug"
>>> bifid6_square(key)
Matrix([
[G, O, L, D, B, U],
[A, C, E, F, H, I],
[J, K, M, N, P, Q],
[R, S, T, V, W, X],
[Y, Z, 0, 1, 2, 3],
[4, 5, 6, 7, 8, 9]])
"""
if not key:
key = bifid6
else:
_, key, _ = _prep('', key.upper(), None, bifid6)
key = padded_key(key, bifid6)
return bifid_square(key)
#################### RSA #############################
def rsa_public_key(p, q, e):
r"""
Return the RSA *public key* pair, `(n, e)`, where `n`
is a product of two primes and `e` is relatively
prime (coprime) to the Euler totient `\phi(n)`. False
is returned if any assumption is violated.
Examples
========
>>> from sympy.crypto.crypto import rsa_public_key
>>> p, q, e = 3, 5, 7
>>> rsa_public_key(p, q, e)
(15, 7)
>>> rsa_public_key(p, q, 30)
False
"""
n = p*q
if isprime(p) and isprime(q):
phi = totient(n)
if gcd(e, phi) == 1:
return n, e
return False
def rsa_private_key(p, q, e):
r"""
Return the RSA *private key*, `(n,d)`, where `n`
is a product of two primes and `d` is the inverse of
`e` (mod `\phi(n)`). False is returned if any assumption
is violated.
Examples
========
>>> from sympy.crypto.crypto import rsa_private_key
>>> p, q, e = 3, 5, 7
>>> rsa_private_key(p, q, e)
(15, 7)
>>> rsa_private_key(p, q, 30)
False
"""
n = p*q
if isprime(p) and isprime(q):
phi = totient(n)
if gcd(e, phi) == 1:
d = mod_inverse(e, phi)
return n, d
return False
def encipher_rsa(i, key):
"""
Return encryption of ``i`` by computing `i^e` (mod `n`),
where ``key`` is the public key `(n, e)`.
Examples
========
>>> from sympy.crypto.crypto import encipher_rsa, rsa_public_key
>>> p, q, e = 3, 5, 7
>>> puk = rsa_public_key(p, q, e)
>>> msg = 12
>>> encipher_rsa(msg, puk)
3
"""
n, e = key
return pow(i, e, n)
def decipher_rsa(i, key):
"""
Return decyption of ``i`` by computing `i^d` (mod `n`),
where ``key`` is the private key `(n, d)`.
Examples
========
>>> from sympy.crypto.crypto import decipher_rsa, rsa_private_key
>>> p, q, e = 3, 5, 7
>>> prk = rsa_private_key(p, q, e)
>>> msg = 3
>>> decipher_rsa(msg, prk)
12
"""
n, d = key
return pow(i, d, n)
#################### kid krypto (kid RSA) #############################
def kid_rsa_public_key(a, b, A, B):
r"""
Kid RSA is a version of RSA useful to teach grade school children
since it does not involve exponentiation.
Alice wants to talk to Bob. Bob generates keys as follows.
Key generation:
* Select positive integers `a, b, A, B` at random.
* Compute `M = a b - 1`, `e = A M + a`, `d = B M + b`,
`n = (e d - 1)//M`.
* The *public key* is `(n, e)`. Bob sends these to Alice.
* The *private key* is `(n, d)`, which Bob keeps secret.
Encryption: If `p` is the plaintext message then the
ciphertext is `c = p e \pmod n`.
Decryption: If `c` is the ciphertext message then the
plaintext is `p = c d \pmod n`.
Examples
========
>>> from sympy.crypto.crypto import kid_rsa_public_key
>>> a, b, A, B = 3, 4, 5, 6
>>> kid_rsa_public_key(a, b, A, B)
(369, 58)
"""
M = a*b - 1
e = A*M + a
d = B*M + b
n = (e*d - 1)//M
return n, e
def kid_rsa_private_key(a, b, A, B):
"""
Compute `M = a b - 1`, `e = A M + a`, `d = B M + b`,
`n = (e d - 1) / M`. The *private key* is `d`, which Bob
keeps secret.
Examples
========
>>> from sympy.crypto.crypto import kid_rsa_private_key
>>> a, b, A, B = 3, 4, 5, 6
>>> kid_rsa_private_key(a, b, A, B)
(369, 70)
"""
M = a*b - 1
e = A*M + a
d = B*M + b
n = (e*d - 1)//M
return n, d
def encipher_kid_rsa(msg, key):
"""
Here ``msg`` is the plaintext and ``key`` is the public key.
Examples
========
>>> from sympy.crypto.crypto import (
... encipher_kid_rsa, kid_rsa_public_key)
>>> msg = 200
>>> a, b, A, B = 3, 4, 5, 6
>>> key = kid_rsa_public_key(a, b, A, B)
>>> encipher_kid_rsa(msg, key)
161
"""
n, e = key
return (msg*e) % n
def decipher_kid_rsa(msg, key):
"""
Here ``msg`` is the plaintext and ``key`` is the private key.
Examples
========
>>> from sympy.crypto.crypto import (
... kid_rsa_public_key, kid_rsa_private_key,
... decipher_kid_rsa, encipher_kid_rsa)
>>> a, b, A, B = 3, 4, 5, 6
>>> d = kid_rsa_private_key(a, b, A, B)
>>> msg = 200
>>> pub = kid_rsa_public_key(a, b, A, B)
>>> pri = kid_rsa_private_key(a, b, A, B)
>>> ct = encipher_kid_rsa(msg, pub)
>>> decipher_kid_rsa(ct, pri)
200
"""
n, d = key
return (msg*d) % n
#################### Morse Code ######################################
morse_char = {
".-": "A", "-...": "B",
"-.-.": "C", "-..": "D",
".": "E", "..-.": "F",
"--.": "G", "....": "H",
"..": "I", ".---": "J",
"-.-": "K", ".-..": "L",
"--": "M", "-.": "N",
"---": "O", ".--.": "P",
"--.-": "Q", ".-.": "R",
"...": "S", "-": "T",
"..-": "U", "...-": "V",
".--": "W", "-..-": "X",
"-.--": "Y", "--..": "Z",
"-----": "0", "----": "1",
"..---": "2", "...--": "3",
"....-": "4", ".....": "5",
"-....": "6", "--...": "7",
"---..": "8", "----.": "9",
".-.-.-": ".", "--..--": ",",
"---...": ":", "-.-.-.": ";",
"..--..": "?", "-...-": "-",
"..--.-": "_", "-.--.": "(",
"-.--.-": ")", ".----.": "'",
"-...-": "=", ".-.-.": "+",
"-..-.": "/", ".--.-.": "@",
"...-..-": "$", "-.-.--": "!"}
char_morse = {v: k for k, v in morse_char.items()}
def encode_morse(msg, sep='|', mapping=None):
"""
Encodes a plaintext into popular Morse Code with letters
separated by `sep` and words by a double `sep`.
References
==========
.. [1] http://en.wikipedia.org/wiki/Morse_code
Examples
========
>>> from sympy.crypto.crypto import encode_morse
>>> msg = 'ATTACK RIGHT FLANK'
>>> encode_morse(msg)
'.-|-|-|.-|-.-.|-.-||.-.|..|--.|....|-||..-.|.-..|.-|-.|-.-'
"""
mapping = mapping or char_morse
assert sep not in mapping
word_sep = 2*sep
mapping[" "] = word_sep
suffix = msg and msg[-1] in whitespace
# normalize whitespace
msg = (' ' if word_sep else '').join(msg.split())
# omit unmapped chars
chars = set(''.join(msg.split()))
ok = set(mapping.keys())
msg = translate(msg, None, ''.join(chars - ok))
morsestring = []
words = msg.split()
for word in words:
morseword = []
for letter in word:
morseletter = mapping[letter]
morseword.append(morseletter)
word = sep.join(morseword)
morsestring.append(word)
return word_sep.join(morsestring) + (word_sep if suffix else '')
def decode_morse(msg, sep='|', mapping=None):
"""
Decodes a Morse Code with letters separated by `sep`
(default is '|') and words by `word_sep` (default is '||)
into plaintext.
References
==========
.. [1] http://en.wikipedia.org/wiki/Morse_code
Examples
========
>>> from sympy.crypto.crypto import decode_morse
>>> mc = '--|---|...-|.||.|.-|...|-'
>>> decode_morse(mc)
'MOVE EAST'
"""
mapping = mapping or morse_char
word_sep = 2*sep
characterstring = []
words = msg.strip(word_sep).split(word_sep)
for word in words:
letters = word.split(sep)
chars = [mapping[c] for c in letters]
word = ''.join(chars)
characterstring.append(word)
rv = " ".join(characterstring)
return rv
#################### LFSRs ##########################################
def lfsr_sequence(key, fill, n):
r"""
This function creates an lfsr sequence.
INPUT:
``key``: a list of finite field elements,
`[c_0, c_1, \ldots, c_k].`
``fill``: the list of the initial terms of the lfsr
sequence, `[x_0, x_1, \ldots, x_k].`
``n``: number of terms of the sequence that the
function returns.
OUTPUT:
The lfsr sequence defined by
`x_{n+1} = c_k x_n + \ldots + c_0 x_{n-k}`, for
`n \leq k`.
Notes
=====
S. Golomb [G]_ gives a list of three statistical properties a
sequence of numbers `a = \{a_n\}_{n=1}^\infty`,
`a_n \in \{0,1\}`, should display to be considered
"random". Define the autocorrelation of `a` to be
.. math::
C(k) = C(k,a) = \lim_{N\rightarrow \infty} {1\over N}\sum_{n=1}^N (-1)^{a_n + a_{n+k}}.
In the case where `a` is periodic with period
`P` then this reduces to
.. math::
C(k) = {1\over P}\sum_{n=1}^P (-1)^{a_n + a_{n+k}}.
Assume `a` is periodic with period `P`.
- balance:
.. math::
\left|\sum_{n=1}^P(-1)^{a_n}\right| \leq 1.
- low autocorrelation:
.. math::
C(k) = \left\{ \begin{array}{cc} 1,& k = 0,\\ \epsilon, & k \ne 0. \end{array} \right.
(For sequences satisfying these first two properties, it is known
that `\epsilon = -1/P` must hold.)
- proportional runs property: In each period, half the runs have
length `1`, one-fourth have length `2`, etc.
Moreover, there are as many runs of `1`'s as there are of
`0`'s.
References
==========
.. [G] Solomon Golomb, Shift register sequences, Aegean Park Press,
Laguna Hills, Ca, 1967
Examples
========
>>> from sympy.crypto.crypto import lfsr_sequence
>>> from sympy.polys.domains import FF
>>> F = FF(2)
>>> fill = [F(1), F(1), F(0), F(1)]
>>> key = [F(1), F(0), F(0), F(1)]
>>> lfsr_sequence(key, fill, 10)
[1 mod 2, 1 mod 2, 0 mod 2, 1 mod 2, 0 mod 2,
1 mod 2, 1 mod 2, 0 mod 2, 0 mod 2, 1 mod 2]
"""
if not isinstance(key, list):
raise TypeError("key must be a list")
if not isinstance(fill, list):
raise TypeError("fill must be a list")
p = key[0].mod
F = FF(p)
s = fill
k = len(fill)
L = []
for i in range(n):
s0 = s[:]
L.append(s[0])
s = s[1:k]
x = sum([int(key[i]*s0[i]) for i in range(k)])
s.append(F(x))
return L # use [x.to_int() for x in L] for int version
def lfsr_autocorrelation(L, P, k):
"""
This function computes the LFSR autocorrelation function.
INPUT:
``L``: is a periodic sequence of elements of `GF(2)`.
``L`` must have length larger than ``P``.
``P``: the period of ``L``
``k``: an integer (`0 < k < p`)
OUTPUT:
the ``k``-th value of the autocorrelation of the LFSR ``L``
Examples
========
>>> from sympy.crypto.crypto import (
... lfsr_sequence, lfsr_autocorrelation)
>>> from sympy.polys.domains import FF
>>> F = FF(2)
>>> fill = [F(1), F(1), F(0), F(1)]
>>> key = [F(1), F(0), F(0), F(1)]
>>> s = lfsr_sequence(key, fill, 20)
>>> lfsr_autocorrelation(s, 15, 7)
-1/15
>>> lfsr_autocorrelation(s, 15, 0)
1
"""
if not isinstance(L, list):
raise TypeError("L (=%s) must be a list" % L)
P = int(P)
k = int(k)
L0 = L[:P] # slices makes a copy
L1 = L0 + L0[:k]
L2 = [(-1)**(L1[i].to_int() + L1[i + k].to_int()) for i in range(P)]
tot = sum(L2)
return Rational(tot, P)
def lfsr_connection_polynomial(s):
"""
This function computes the LFSR connection polynomial.
INPUT:
``s``: a sequence of elements of even length, with entries in
a finite field
OUTPUT:
``C(x)``: the connection polynomial of a minimal LFSR yielding
``s``.
This implements the algorithm in section 3 of J. L. Massey's
article [M]_.
References
==========
.. [M] James L. Massey, "Shift-Register Synthesis and BCH Decoding."
IEEE Trans. on Information Theory, vol. 15(1), pp. 122-127,
Jan 1969.
Examples
========
>>> from sympy.crypto.crypto import (
... lfsr_sequence, lfsr_connection_polynomial)
>>> from sympy.polys.domains import FF
>>> F = FF(2)
>>> fill = [F(1), F(1), F(0), F(1)]
>>> key = [F(1), F(0), F(0), F(1)]
>>> s = lfsr_sequence(key, fill, 20)
>>> lfsr_connection_polynomial(s)
x**4 + x + 1
>>> fill = [F(1), F(0), F(0), F(1)]
>>> key = [F(1), F(1), F(0), F(1)]
>>> s = lfsr_sequence(key, fill, 20)
>>> lfsr_connection_polynomial(s)
x**3 + 1
>>> fill = [F(1), F(0), F(1)]
>>> key = [F(1), F(1), F(0)]
>>> s = lfsr_sequence(key, fill, 20)
>>> lfsr_connection_polynomial(s)
x**3 + x**2 + 1
>>> fill = [F(1), F(0), F(1)]
>>> key = [F(1), F(0), F(1)]
>>> s = lfsr_sequence(key, fill, 20)
>>> lfsr_connection_polynomial(s)
x**3 + x + 1
"""
# Initialization:
p = s[0].mod
F = FF(p)
x = Symbol("x")
C = 1*x**0
B = 1*x**0
m = 1
b = 1*x**0
L = 0
N = 0
while N < len(s):
if L > 0:
dC = Poly(C).degree()
r = min(L + 1, dC + 1)
coeffsC = [C.subs(x, 0)] + [C.coeff(x**i)
for i in range(1, dC + 1)]
d = (s[N].to_int() + sum([coeffsC[i]*s[N - i].to_int()
for i in range(1, r)])) % p
if L == 0:
d = s[N].to_int()*x**0
if d == 0:
m += 1
N += 1
if d > 0:
if 2*L > N:
C = (C - d*((b**(p - 2)) % p)*x**m*B).expand()
m += 1
N += 1
else:
T = C
C = (C - d*((b**(p - 2)) % p)*x**m*B).expand()
L = N + 1 - L
m = 1
b = d
B = T
N += 1
dC = Poly(C).degree()
coeffsC = [C.subs(x, 0)] + [C.coeff(x**i) for i in range(1, dC + 1)]
return sum([coeffsC[i] % p*x**i for i in range(dC + 1)
if coeffsC[i] is not None])
#################### ElGamal #############################
def elgamal_private_key(digit=10, seed=None):
"""
Return three number tuple as private key.
Elgamal encryption is based on the mathmatical problem
called the Discrete Logarithm Problem (DLP). For example,
`a^{b} \equiv c \pmod p`
In general, if ``a`` and ``b`` are known, ``ct`` is easily
calculated. If ``b`` is unknown, it is hard to use
``a`` and ``ct`` to get ``b``.
Parameters
==========
digit : minimum number of binary digits for key
Returns
=======
(p, r, d) : p = prime number, r = primitive root, d = random number
Notes
=====
For testing purposes, the ``seed`` parameter may be set to control
the output of this routine. See sympy.utilities.randtest._randrange.
Examples
========
>>> from sympy.crypto.crypto import elgamal_private_key
>>> from sympy.ntheory import is_primitive_root, isprime
>>> a, b, _ = elgamal_private_key()
>>> isprime(a)
True
>>> is_primitive_root(b, a)
True
"""
randrange = _randrange(seed)
p = nextprime(2**digit)
return p, primitive_root(p), randrange(2, p)
def elgamal_public_key(key):
"""
Return three number tuple as public key.
Parameters
==========
key : Tuple (p, r, e) generated by ``elgamal_private_key``
Returns
=======
(p, r, e = r**d mod p) : d is a random number in private key.
Examples
========
>>> from sympy.crypto.crypto import elgamal_public_key
>>> elgamal_public_key((1031, 14, 636))
(1031, 14, 212)
"""
p, r, e = key
return p, r, pow(r, e, p)
def encipher_elgamal(i, key, seed=None):
"""
Encrypt message with public key
``i`` is a plaintext message expressed as an integer.
``key`` is public key (p, r, e). In order to encrypt
a message, a random number ``a`` in ``range(2, p)``
is generated and the encryped message is returned as
`c_{1}` and `c_{2}` where:
`c_{1} \equiv r^{a} \pmod p`
`c_{2} \equiv m e^{a} \pmod p`
Parameters
==========
msg : int of encoded message
key : public key
Returns
=======
(c1, c2) : Encipher into two number
Notes
=====
For testing purposes, the ``seed`` parameter may be set to control
the output of this routine. See sympy.utilities.randtest._randrange.
Examples
========
>>> from sympy.crypto.crypto import encipher_elgamal, elgamal_private_key, elgamal_public_key
>>> pri = elgamal_private_key(5, seed=[3]); pri
(37, 2, 3)
>>> pub = elgamal_public_key(pri); pub
(37, 2, 8)
>>> msg = 36
>>> encipher_elgamal(msg, pub, seed=[3])
(8, 6)
"""
p, r, e = key
if i < 0 or i >= p:
raise ValueError(
'Message (%s) should be in range(%s)' % (i, p))
randrange = _randrange(seed)
a = randrange(2, p)
return pow(r, a, p), i*pow(e, a, p) % p
def decipher_elgamal(msg, key):
r"""
Decrypt message with private key
`msg = (c_{1}, c_{2})`
`key = (p, r, d)`
According to extended Eucliden theorem,
`u c_{1}^{d} + p n = 1`
`u \equiv 1/{{c_{1}}^d} \pmod p`
`u c_{2} \equiv \frac{1}{c_{1}^d} c_{2} \equiv \frac{1}{r^{ad}} c_{2} \pmod p`
`\frac{1}{r^{ad}} m e^a \equiv \frac{1}{r^{ad}} m {r^{d a}} \equiv m \pmod p`
Examples
========
>>> from sympy.crypto.crypto import decipher_elgamal
>>> from sympy.crypto.crypto import encipher_elgamal
>>> from sympy.crypto.crypto import elgamal_private_key
>>> from sympy.crypto.crypto import elgamal_public_key
>>> pri = elgamal_private_key(5, seed=[3])
>>> pub = elgamal_public_key(pri); pub
(37, 2, 8)
>>> msg = 17
>>> decipher_elgamal(encipher_elgamal(msg, pub), pri) == msg
True
"""
p, r, d = key
c1, c2 = msg
u = igcdex(c1**d, p)[0]
return u * c2 % p
################ Diffie-Hellman Key Exchange #########################
def dh_private_key(digit=10, seed=None):
"""
Return three integer tuple as private key.
Diffie-Hellman key exchange is based on the mathematical problem
called the Discrete Logarithm Problem (see ElGamal).
Diffie-Hellman key exchange is divided into the following steps:
* Alice and Bob agree on a base that consist of a prime ``p``
and a primitive root of ``p`` called ``g``
* Alice choses a number ``a`` and Bob choses a number ``b`` where
``a`` and ``b`` are random numbers in range `[2, p)`. These are
their private keys.
* Alice then publicly sends Bob `g^{a} \pmod p` while Bob sends
Alice `g^{b} \pmod p`
* They both raise the received value to their secretly chosen
number (``a`` or ``b``) and now have both as their shared key
`g^{ab} \pmod p`
Parameters
==========
digit: minimum number of binary digits required in key
Returns
=======
(p, g, a) : p = prime number, g = primitive root of p,
a = random number from 2 thru p - 1
Notes
=====
For testing purposes, the ``seed`` parameter may be set to control
the output of this routine. See sympy.utilities.randtest._randrange.
Examples
========
>>> from sympy.crypto.crypto import dh_private_key
>>> from sympy.ntheory import isprime, is_primitive_root
>>> p, g, _ = dh_private_key()
>>> isprime(p)
True
>>> is_primitive_root(g, p)
True
>>> p, g, _ = dh_private_key(5)
>>> isprime(p)
True
>>> is_primitive_root(g, p)
True
"""
p = nextprime(2**digit)
g = primitive_root(p)
randrange = _randrange(seed)
a = randrange(2, p)
return p, g, a
def dh_public_key(key):
"""
Return three number tuple as public key.
This is the tuple that Alice sends to Bob.
Parameters
==========
key: Tuple (p, g, a) generated by ``dh_private_key``
Returns
=======
(p, g, g^a mod p) : p, g and a as in Parameters
Examples
========
>>> from sympy.crypto.crypto import dh_private_key, dh_public_key
>>> p, g, a = dh_private_key();
>>> _p, _g, x = dh_public_key((p, g, a))
>>> p == _p and g == _g
True
>>> x == pow(g, a, p)
True
"""
p, g, a = key
return p, g, pow(g, a, p)
def dh_shared_key(key, b):
"""
Return an integer that is the shared key.
This is what Bob and Alice can both calculate using the public
keys they received from each other and their private keys.
Parameters
==========
key: Tuple (p, g, x) generated by ``dh_public_key``
b: Random number in the range of 2 to p - 1
(Chosen by second key exchange member (Bob))
Returns
=======
shared key (int)
Examples
========
>>> from sympy.crypto.crypto import (
... dh_private_key, dh_public_key, dh_shared_key)
>>> prk = dh_private_key();
>>> p, g, x = dh_public_key(prk);
>>> sk = dh_shared_key((p, g, x), 1000)
>>> sk == pow(x, 1000, p)
True
"""
p, _, x = key
if 1 >= b or b >= p:
raise ValueError(filldedent('''
Value of b should be greater 1 and less
than prime %s.''' % p))
return pow(x, b, p)
| bsd-3-clause |
jkorell/PTVS | Python/Product/Analyzer/BuiltinScraper.py | 18 | 21219 | # ############################################################################
#
# Copyright (c) Microsoft Corporation.
#
# This source code is subject to terms and conditions of the Apache License, Version 2.0. A
# copy of the license can be found in the License.html file at the root of this distribution. If
# you cannot locate the Apache License, Version 2.0, please send an email to
# [email protected]. By using this source code in any fashion, you are agreeing to be bound
# by the terms of the Apache License, Version 2.0.
#
# You must not remove this notice, or any other, from this software.
#
# ###########################################################################
import re
import sys
import types
import PythonScraper
try:
import thread
except:
import _thread as thread
try:
import __builtin__ as __builtins__
except ImportError:
import builtins as __builtins__
def safe_dir(obj):
try:
return frozenset(obj.__dict__) | frozenset(dir(obj))
except:
# Some types crash when we access __dict__ and/or dir()
pass
try:
return frozenset(dir(obj))
except:
pass
try:
return frozenset(obj.__dict__)
except:
pass
return frozenset()
def builtins_keys():
if isinstance(__builtins__, dict):
return __builtins__.keys()
return dir(__builtins__)
def get_builtin(name):
if isinstance(__builtins__, dict):
return __builtins__[name]
return getattr(__builtins__, name)
safe_getattr = PythonScraper.safe_getattr
BUILTIN_TYPES = [type_name for type_name in builtins_keys() if type(get_builtin(type_name)) is type]
if sys.version_info[0] >= 3:
BUILTIN = 'builtins'
unicode = str
else:
BUILTIN = '__builtin__'
TYPE_OVERRIDES = {
'string': PythonScraper.type_to_typeref(types.CodeType),
's': PythonScraper.type_to_typeref(str),
'integer': PythonScraper.type_to_typeref(int),
'boolean': PythonScraper.type_to_typeref(bool),
'number': PythonScraper.type_to_typeref(int),
'pid': PythonScraper.type_to_typeref(int),
'ppid': PythonScraper.type_to_typeref(int),
'fd': PythonScraper.type_to_typeref(int),
'handle': PythonScraper.type_to_typeref(int),
'Exit': PythonScraper.type_to_typeref(int),
'fd2': PythonScraper.type_to_typeref(int),
'Integral': PythonScraper.type_to_typeref(int),
'exit_status':PythonScraper.type_to_typeref(int),
'old_mask': PythonScraper.type_to_typeref(int),
'source': PythonScraper.type_to_typeref(str),
'newpos': PythonScraper.type_to_typeref(int),
'key': PythonScraper.type_to_typeref(str),
'dictionary': PythonScraper.type_to_typeref(dict),
'None': PythonScraper.type_to_typeref(type(None)),
'floating': PythonScraper.type_to_typeref(float),
'filename': PythonScraper.type_to_typeref(str),
'path': PythonScraper.type_to_typeref(str),
'byteswritten': PythonScraper.type_to_typeref(int),
'unicode': PythonScraper.type_to_typeref(unicode),
'Unicode': PythonScraper.type_to_typeref(unicode),
'True': PythonScraper.type_to_typeref(bool),
'False': PythonScraper.type_to_typeref(bool),
'lock': PythonScraper.type_to_typeref(thread.LockType),
'code': PythonScraper.type_to_typeref(types.CodeType),
'module': PythonScraper.type_to_typeref(types.ModuleType),
'size': PythonScraper.type_to_typeref(int),
'INT': PythonScraper.type_to_typeref(int),
'STRING': PythonScraper.type_to_typeref(str),
'TUPLE': PythonScraper.type_to_typeref(tuple),
'OBJECT': PythonScraper.type_to_typeref(object),
'LIST': PythonScraper.type_to_typeref(list),
'DICT': PythonScraper.type_to_typeref(dict),
'char *': PythonScraper.type_to_typeref(str),
'wchar_t *': PythonScraper.type_to_typeref(unicode),
'CHAR *': PythonScraper.type_to_typeref(str),
'TCHAR *': PythonScraper.type_to_typeref(str),
'WCHAR *': PythonScraper.type_to_typeref(unicode),
'LPSTR': PythonScraper.type_to_typeref(str),
'LPCSTR': PythonScraper.type_to_typeref(str),
'LPTSTR': PythonScraper.type_to_typeref(str),
'LPCTSTR': PythonScraper.type_to_typeref(str),
'LPWSTR': PythonScraper.type_to_typeref(unicode),
'LPCWSTR': PythonScraper.type_to_typeref(unicode),
}
try:
TYPE_OVERRIDES['file object'] = PythonScraper.type_to_typeref(file)
except NameError:
try:
import _io
TYPE_OVERRIDES['file object'] = PythonScraper.type_to_typeref(_io._IOBase)
except (NameError, ImportError):
pass
RETURN_TYPE_OVERRIDES = dict(TYPE_OVERRIDES)
RETURN_TYPE_OVERRIDES.update({'string': PythonScraper.type_to_typeref(str)})
def type_name_to_typeref(name, mod, type_overrides = TYPE_OVERRIDES):
arg_type = type_overrides.get(name, None)
if arg_type is None:
if name in BUILTIN_TYPES:
arg_type = PythonScraper.type_to_typeref(get_builtin(name))
elif mod is not None and name in mod.__dict__:
arg_type = PythonScraper.typename_to_typeref(mod.__name__, name)
elif name.startswith('list'):
arg_type = PythonScraper.type_to_typeref(list)
else:
# see if we can find it in any module we've imported...
for mod_name, mod in sys.modules.items():
if mod is not None and name in mod.__dict__ and isinstance(mod.__dict__[name], type):
arg_type = PythonScraper.typename_to_typeref(mod_name, name)
break
else:
first_space = name.find(' ')
if first_space != -1:
return type_name_to_typeref(name[:first_space], mod, type_overrides)
arg_type = PythonScraper.typename_to_typeref(name)
return arg_type
OBJECT_TYPE = PythonScraper.type_to_typeref(object)
TOKENS_REGEX = '(' + '|'.join([
r'(?:[a-zA-Z_][0-9a-zA-Z_-]*)', # identifier
r'(?:-?[0-9]+[lL]?(?!\.))', # integer value
r'(?:-?[0-9]*\.[0-9]+)', # floating point value
r'(?:-?[0-9]+\.[0-9]*)', # floating point value
r'(?:\s*\'.*?(?<!\\)\')', # single-quote string
r'(?:\s*".*?(?<!\\)")', # double-quote string
r'(?:\.\.\.)', # ellipsis
r'(?:\.)', # dot
r'(?:\()', # open paren
r'(?:\))', # close paren
r'(?:\:)', # colon
r'(?:-->)', # return value
r'(?:->)', # return value
r'(?:=>)', # return value
r'(?:,)', # comma
r'(?:=)', # assignment (default value)
r'(?:\[)',
r'(?:\])',
r'(?:\*\*)',
r'(?:\*)',
]) + ')'
def get_ret_type(ret_type, obj_class, mod):
if ret_type is not None:
if ret_type == 'copy' and obj_class is not None:
# returns a copy of self
return PythonScraper.type_to_typelist(obj_class)
else:
return [type_name_to_typeref(ret_type, mod, RETURN_TYPE_OVERRIDES)]
RETURNS_REGEX = [r'^\s*returns?[\s\-]*[a-z_]\w*\s*:\s*([a-z_]\w*)']
def update_overload_from_doc_str(overload, doc_str, obj_class, mod):
# see if we can get additional information from the doc string
if 'ret_type' not in overload:
for ret_regex in RETURNS_REGEX:
match = re.search(ret_regex, doc_str, re.MULTILINE | re.IGNORECASE)
if match:
ret_type = match.groups(0)[0]
overload['ret_type'] = get_ret_type(ret_type, obj_class, mod)
break
def parse_doc_str(input_str, module_name, mod, func_name, extra_args = [], obj_class = None):
# we split, so as long as we have all tokens every other item is a token, and the
# rest are empty space. If we have unrecognized tokens (for example during the description
# of the function) those will show up in the even locations. We do join's and bring the
# entire range together in that case.
tokens = re.split(TOKENS_REGEX, input_str)
start_token = 0
last_identifier = None
cur_token = 1
overloads = []
while cur_token < len(tokens):
token = tokens[cur_token]
# see if we have modname.funcname(
if (cur_token + 10 < len(tokens) and
token == module_name and
tokens[cur_token + 2] == '.' and
tokens[cur_token + 4] == func_name and
tokens[cur_token + 6] == '('):
sig_start = cur_token
args, ret_type, cur_token = parse_args(tokens, cur_token + 8, mod)
doc_str = ''.join(tokens[start_token:sig_start])
if doc_str.find(' ') == -1:
doc_str = ''
if (not args or doc_str) and overloads:
# if we already parsed an overload, and are now getting an argless
# overload we're likely just seeing a reference to the function in
# a doc string, let's ignore that. This is betting on the idea that
# people list overloads first, then doc strings, and that people tend
# to list overloads from simplest to more complex. an example of this
# is the future_builtins.ascii doc string
# We also skip it if we have a doc string, this comes up in overloads
# like isinstance which has example calls embedded in the doc string
continue
start_token = cur_token
overload = {'args': tuple(extra_args + args), 'doc': doc_str}
ret_types = get_ret_type(ret_type, obj_class, mod)
if ret_types is not None:
overload['ret_type'] = ret_types
update_overload_from_doc_str(overload, doc_str, obj_class, mod)
overloads.append(overload)
# see if we have funcname(
elif (cur_token + 4 < len(tokens) and
token == func_name and
tokens[cur_token + 2] == '('):
sig_start = cur_token
args, ret_type, cur_token = parse_args(tokens, cur_token + 4, mod)
doc_str = ''.join(tokens[start_token:sig_start])
if doc_str.find(' ') == -1:
doc_str = ''
if (not args or doc_str) and overloads:
# if we already parsed an overload, and are now getting an argless
# overload we're likely just seeing a reference to the function in
# a doc string, let's ignore that. This is betting on the idea that
# people list overloads first, then doc strings, and that people tend
# to list overloads from simplest to more complex. an example of this
# is the future_builtins.ascii doc string
# We also skip it if we have a doc string, this comes up in overloads
# like isinstance which has example calls embedded in the doc string
continue
start_token = cur_token
overload = {'args': tuple(extra_args + args), 'doc': doc_str}
ret_types = get_ret_type(ret_type, obj_class, mod)
if ret_types is not None:
overload['ret_type'] = ret_types
update_overload_from_doc_str(overload, doc_str, obj_class, mod)
overloads.append(overload)
else:
# append to doc string
cur_token += 2
finish_doc = ''.join(tokens[start_token:cur_token])
if finish_doc:
if not overloads:
# This occurs when the docstring does not include a function spec
overloads.append({
'args': ({'name': 'args', 'arg_format': '*'}, {'name': 'kwargs', 'arg_format': '**'}),
'doc': ''
})
for overload in overloads:
overload['doc'] += finish_doc
update_overload_from_doc_str(overload, overload['doc'], obj_class, mod)
return overloads
IDENTIFIER_REGEX = re.compile('^[a-zA-Z_][a-zA-Z_0-9-]*$')
def is_identifier(token):
if IDENTIFIER_REGEX.match(token):
return True
return False
RETURN_TOKENS = set(['-->', '->', '=>', 'return'])
def parse_args(tokens, cur_token, module):
args = []
arg = []
annotation = None
default_value = None
ignore = False
arg_tokens = []
next_is_optional = False
is_optional = False
paren_nesting = 0
while cur_token < len(tokens):
token = tokens[cur_token]
cur_token += 1
if token in (',', ')') and paren_nesting == 0:
arg_tokens.append((arg, annotation, default_value, is_optional))
is_optional = False
arg = []
annotation = None
default_value = None
if token == ')':
cur_token += 1
break
elif ignore:
continue
elif token == '=':
if default_value is None:
default_value = []
else:
ignore = True
elif token == ':':
if annotation is None and default_value is None:
annotation = []
else:
ignore = True
elif default_value is not None:
default_value.append(token)
elif annotation is not None:
annotation.append(token)
elif token == '[':
next_is_optional = True
elif token in (']', ' ', ''):
pass
else:
arg.append(token)
if next_is_optional:
is_optional, next_is_optional = True, False
if token == '(':
paren_nesting += 1
elif token == ')':
paren_nesting -= 1
#from pprint import pprint; pprint(arg_tokens)
for arg, annotation, default_value, is_optional in arg_tokens:
if not arg or arg[0] == '/':
continue
arg_name = None
star_args = None
if arg[0] == '(':
names = [arg.pop(0)]
while names[-1] != ')' and arg:
names.append(arg.pop(0))
if names[-1] == ')':
names.pop()
arg_name = ', '.join(n for n in names[1:] if is_identifier(n))
elif is_identifier(arg[-1]):
arg_name = arg.pop()
elif arg[-1] == '...':
arg_name = 'args'
star_args = '*'
if not annotation and arg:
if len(arg) > 1 and arg[-1] == '*':
# C style prototype
annotation = [' '.join(a for a in arg if a != 'const')]
elif is_identifier(arg[-1]):
annotation = arg[-1:]
elif arg[-1] == ')':
annotation = [arg.pop()]
while annotation[0] != '(':
annotation.insert(0, arg.pop())
if arg and arg[0] in ('*', '**'):
star_args = arg[0]
data = { }
if arg_name:
data['name'] = arg_name
elif star_args == '*':
data['name'] = 'args'
elif star_args == '**':
data['name'] = 'kwargs'
else:
data['name'] = 'arg'
if annotation and len(annotation) == 1:
data['type'] = [type_name_to_typeref(annotation[0], module)]
if default_value:
default_value = [d for d in default_value if d]
if is_optional and default_value[-1] == ']':
default_value.pop()
data['default_value'] = ''.join(default_value).strip()
elif is_optional:
data['default_value'] = 'None'
if star_args:
data['arg_format'] = star_args
args.append(data)
# end of params, check for ret value
ret_type = None
if cur_token + 2 < len(tokens) and tokens[cur_token] in RETURN_TOKENS:
ret_type_start = cur_token + 2
# we might have a descriptive return value, 'list of fob'
while ret_type_start < len(tokens) and is_identifier(tokens[ret_type_start]):
if tokens[ret_type_start - 1].find('\n') != -1:
break
ret_type_start += 2
if ret_type_start < len(tokens) and ',' in tokens[ret_type_start]:
# fob(oar, baz) -> some info about the return, and more info, and more info.
# "some info" is unlikely to be a return type
ret_type = ''
cur_token += 2
else:
ret_type = ''.join(tokens[cur_token + 2:ret_type_start]).strip()
cur_token = ret_type_start
elif (cur_token + 4 < len(tokens) and
tokens[cur_token] == ':' and tokens[cur_token + 2] in RETURN_TOKENS):
ret_type_start = cur_token + 4
# we might have a descriptive return value, 'list of fob'
while ret_type_start < len(tokens) and is_identifier(tokens[ret_type_start]):
if tokens[ret_type_start - 1].find('\n') != -1:
break
ret_type_start += 2
if ret_type_start < len(tokens) and ',' in tokens[ret_type_start]:
# fob(oar, baz) -> some info about the return, and more info, and more info.
# "some info" is unlikely to be a return type
ret_type = ''
cur_token += 4
else:
ret_type = ''.join(tokens[cur_token + 4:ret_type_start]).strip()
cur_token = ret_type_start
return args, ret_type, cur_token
if sys.version > '3.':
str_types = (str, bytes)
else:
str_types = (str, unicode)
def get_overloads_from_doc_string(doc_str, mod, obj_class, func_name, extra_args = []):
if isinstance(doc_str, str_types):
decl_mod = None
if isinstance(mod, types.ModuleType):
decl_mod = mod
mod = decl_mod.__name__
elif mod is not None:
decl_mod = sys.modules.get(mod, None)
res = parse_doc_str(doc_str, mod, decl_mod, func_name, extra_args, obj_class)
if res:
for i, v in enumerate(res):
if 'ret_type' not in v or (not v['ret_type'] or v['ret_type'] == ('', '')):
alt_ret_type = v['doc'].find('returned as a ')
if alt_ret_type != -1:
last_space = v['doc'].find(' ', alt_ret_type + 14)
last_new_line = v['doc'].find('\n', alt_ret_type + 14)
if last_space == -1:
if last_new_line == -1:
last_space = None
else:
last_space = last_new_line
elif last_new_line == -1:
last_space = None
else:
last_space = last_new_line
ret_type_str = v['doc'][alt_ret_type+14:last_space]
if ret_type_str.endswith('.') or ret_type_str.endswith(','):
ret_type_str = ret_type_str[:-1]
new_ret_type = get_ret_type(ret_type_str, obj_class, decl_mod)
res[i]['ret_type'] = new_ret_type
return res
return None
def get_overloads(func, is_method = False):
if is_method:
extra_args = [{'type': PythonScraper.type_to_typelist(object), 'name': 'self'}]
else:
extra_args = []
func_doc = safe_getattr(func, '__doc__', None)
if not func_doc:
return None
return get_overloads_from_doc_string(
func_doc,
safe_getattr(func, '__module__', None),
safe_getattr(func, '__objclass__', None),
safe_getattr(func, '__name__', None),
extra_args,
)
def get_descriptor_type(descriptor):
return object
def get_new_overloads(type_obj, obj):
try:
type_doc = safe_getattr(type_obj, '__doc__', None)
type_type = type(type_obj)
except:
return None
res = get_overloads_from_doc_string(
type_doc,
safe_getattr(type_obj, '__module__', None),
type_type,
safe_getattr(type_obj, '__name__', None),
[{'type': PythonScraper.type_to_typelist(type), 'name': 'cls'}],
)
if not res:
obj_doc = safe_getattr(obj, '__doc__', None)
if not obj_doc:
return None
res = get_overloads_from_doc_string(
obj_doc,
safe_getattr(type_obj, '__module__', None),
type_type,
safe_getattr(type_obj, '__name__', None),
)
return res
def should_include_module(name):
return True
| apache-2.0 |
orwell-int/agent-server-game-python | setup.py | 1 | 1239 | #!/usr/bin/env python
import setuptools
# Hack to prevent stupid TypeError: 'NoneType' object is not callable error on
# exit of python setup.py test # in multiprocessing/util.py _exit_function when
# running python setup.py test (see
# http://www.eby-sarna.com/pipermail/peak/2010-May/003357.html)
try:
import multiprocessing
assert multiprocessing
except ImportError:
pass
setuptools.setup(
name='orwell.agent',
version='0.0.1',
description='Agent connecting to the game server.',
author='',
author_email='',
packages=setuptools.find_packages(exclude="test"),
test_suite='nose.collector',
install_requires=['pyzmq', 'cliff'],
tests_require=['nose', 'coverage', 'mock'],
entry_points={
'console_scripts': [
'thought_police = orwell.agent.main:main',
]
},
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: POSIX :: Linux',
'Topic :: Utilities',
'Programming Language :: Python',
'Programming Language :: Python :: 3.6'],
python_requires='>=3.6.0',
)
| bsd-3-clause |
Turpial/Turpial | turpial/ui/gtk/tray.py | 3 | 2115 | # -*- coding: utf-8 -*-
# GTK3 tray icon for Turpial
from gi.repository import Gtk
from turpial import DESC
from turpial.ui.lang import i18n
class TrayIcon(Gtk.StatusIcon):
def __init__(self, base):
Gtk.StatusIcon.__init__(self)
self.base = base
self.set_from_pixbuf(self.base.load_image('turpial-tray.png', True))
self.set_tooltip_text(DESC)
self.menu = Gtk.Menu()
def __build_common_menu(self):
accounts = Gtk.MenuItem(i18n.get('accounts'))
preferences = Gtk.MenuItem(i18n.get('preferences'))
sounds = Gtk.CheckMenuItem(i18n.get('enable_sounds'))
#sound_.set_active(not self.sound._disable)
exit_ = Gtk.MenuItem(i18n.get('exit'))
self.menu.append(accounts)
self.menu.append(preferences)
self.menu.append(sounds)
self.menu.append(Gtk.SeparatorMenuItem())
self.menu.append(exit_)
accounts.connect('activate', self.base.show_accounts_dialog)
preferences.connect('activate', self.base.show_preferences_dialog)
sounds.connect('toggled', self.base.disable_sound)
exit_.connect('activate', self.base.main_quit)
def empty(self):
self.menu = Gtk.Menu()
self.__build_common_menu()
def normal(self):
self.menu = Gtk.Menu()
tweet = Gtk.MenuItem(i18n.get('new_tweet'))
tweet.connect('activate', self.base.show_update_box)
direct = Gtk.MenuItem(i18n.get('direct_message'))
direct.connect('activate', self.base.show_update_box, True)
self.menu.append(tweet)
self.menu.append(direct)
self.__build_common_menu()
def popup(self, button, activate_time):
self.menu.show_all()
self.menu.popup(None, None, None, None, button, activate_time)
return True
# Change the tray icon image to indicate updates
def notify(self):
self.set_from_pixbuf(self.base.load_image('turpial-tray-update.png', True))
# Clear the tray icon image
def clear(self):
self.set_from_pixbuf(self.base.load_image('turpial-tray.png', True))
| gpl-3.0 |
sidmitra/django_nonrel_testapp | django/forms/util.py | 311 | 1983 | from django.utils.html import conditional_escape
from django.utils.encoding import StrAndUnicode, force_unicode
from django.utils.safestring import mark_safe
# Import ValidationError so that it can be imported from this
# module to maintain backwards compatibility.
from django.core.exceptions import ValidationError
def flatatt(attrs):
"""
Convert a dictionary of attributes to a single string.
The returned string will contain a leading space followed by key="value",
XML-style pairs. It is assumed that the keys do not need to be XML-escaped.
If the passed dictionary is empty, then return an empty string.
"""
return u''.join([u' %s="%s"' % (k, conditional_escape(v)) for k, v in attrs.items()])
class ErrorDict(dict, StrAndUnicode):
"""
A collection of errors that knows how to display itself in various formats.
The dictionary keys are the field names, and the values are the errors.
"""
def __unicode__(self):
return self.as_ul()
def as_ul(self):
if not self: return u''
return mark_safe(u'<ul class="errorlist">%s</ul>'
% ''.join([u'<li>%s%s</li>' % (k, force_unicode(v))
for k, v in self.items()]))
def as_text(self):
return u'\n'.join([u'* %s\n%s' % (k, u'\n'.join([u' * %s' % force_unicode(i) for i in v])) for k, v in self.items()])
class ErrorList(list, StrAndUnicode):
"""
A collection of errors that knows how to display itself in various formats.
"""
def __unicode__(self):
return self.as_ul()
def as_ul(self):
if not self: return u''
return mark_safe(u'<ul class="errorlist">%s</ul>'
% ''.join([u'<li>%s</li>' % conditional_escape(force_unicode(e)) for e in self]))
def as_text(self):
if not self: return u''
return u'\n'.join([u'* %s' % force_unicode(e) for e in self])
def __repr__(self):
return repr([force_unicode(e) for e in self])
| bsd-3-clause |
robclark/chromium | third_party/tlslite/tlslite/TLSConnection.py | 6 | 71226 | """
MAIN CLASS FOR TLS LITE (START HERE!).
"""
from __future__ import generators
import socket
from utils.compat import formatExceptionTrace
from TLSRecordLayer import TLSRecordLayer
from Session import Session
from constants import *
from utils.cryptomath import getRandomBytes
from errors import *
from messages import *
from mathtls import *
from HandshakeSettings import HandshakeSettings
class TLSConnection(TLSRecordLayer):
"""
This class wraps a socket and provides TLS handshaking and data
transfer.
To use this class, create a new instance, passing a connected
socket into the constructor. Then call some handshake function.
If the handshake completes without raising an exception, then a TLS
connection has been negotiated. You can transfer data over this
connection as if it were a socket.
This class provides both synchronous and asynchronous versions of
its key functions. The synchronous versions should be used when
writing single-or multi-threaded code using blocking sockets. The
asynchronous versions should be used when performing asynchronous,
event-based I/O with non-blocking sockets.
Asynchronous I/O is a complicated subject; typically, you should
not use the asynchronous functions directly, but should use some
framework like asyncore or Twisted which TLS Lite integrates with
(see
L{tlslite.integration.TLSAsyncDispatcherMixIn.TLSAsyncDispatcherMixIn} or
L{tlslite.integration.TLSTwistedProtocolWrapper.TLSTwistedProtocolWrapper}).
"""
def __init__(self, sock):
"""Create a new TLSConnection instance.
@param sock: The socket data will be transmitted on. The
socket should already be connected. It may be in blocking or
non-blocking mode.
@type sock: L{socket.socket}
"""
TLSRecordLayer.__init__(self, sock)
def handshakeClientSRP(self, username, password, session=None,
settings=None, checker=None, async=False):
"""Perform an SRP handshake in the role of client.
This function performs a TLS/SRP handshake. SRP mutually
authenticates both parties to each other using only a
username and password. This function may also perform a
combined SRP and server-certificate handshake, if the server
chooses to authenticate itself with a certificate chain in
addition to doing SRP.
TLS/SRP is non-standard. Most TLS implementations don't
support it. See
U{http://www.ietf.org/html.charters/tls-charter.html} or
U{http://trevp.net/tlssrp/} for the latest information on
TLS/SRP.
Like any handshake function, this can be called on a closed
TLS connection, or on a TLS connection that is already open.
If called on an open connection it performs a re-handshake.
If the function completes without raising an exception, the
TLS connection will be open and available for data transfer.
If an exception is raised, the connection will have been
automatically closed (if it was ever open).
@type username: str
@param username: The SRP username.
@type password: str
@param password: The SRP password.
@type session: L{tlslite.Session.Session}
@param session: A TLS session to attempt to resume. This
session must be an SRP session performed with the same username
and password as were passed in. If the resumption does not
succeed, a full SRP handshake will be performed.
@type settings: L{tlslite.HandshakeSettings.HandshakeSettings}
@param settings: Various settings which can be used to control
the ciphersuites, certificate types, and SSL/TLS versions
offered by the client.
@type checker: L{tlslite.Checker.Checker}
@param checker: A Checker instance. This instance will be
invoked to examine the other party's authentication
credentials, if the handshake completes succesfully.
@type async: bool
@param async: If False, this function will block until the
handshake is completed. If True, this function will return a
generator. Successive invocations of the generator will
return 0 if it is waiting to read from the socket, 1 if it is
waiting to write to the socket, or will raise StopIteration if
the handshake operation is completed.
@rtype: None or an iterable
@return: If 'async' is True, a generator object will be
returned.
@raise socket.error: If a socket error occurs.
@raise tlslite.errors.TLSAbruptCloseError: If the socket is closed
without a preceding alert.
@raise tlslite.errors.TLSAlert: If a TLS alert is signalled.
@raise tlslite.errors.TLSAuthenticationError: If the checker
doesn't like the other party's authentication credentials.
"""
handshaker = self._handshakeClientAsync(srpParams=(username, password),
session=session, settings=settings, checker=checker)
if async:
return handshaker
for result in handshaker:
pass
def handshakeClientCert(self, certChain=None, privateKey=None,
session=None, settings=None, checker=None,
async=False):
"""Perform a certificate-based handshake in the role of client.
This function performs an SSL or TLS handshake. The server
will authenticate itself using an X.509 or cryptoID certificate
chain. If the handshake succeeds, the server's certificate
chain will be stored in the session's serverCertChain attribute.
Unless a checker object is passed in, this function does no
validation or checking of the server's certificate chain.
If the server requests client authentication, the
client will send the passed-in certificate chain, and use the
passed-in private key to authenticate itself. If no
certificate chain and private key were passed in, the client
will attempt to proceed without client authentication. The
server may or may not allow this.
Like any handshake function, this can be called on a closed
TLS connection, or on a TLS connection that is already open.
If called on an open connection it performs a re-handshake.
If the function completes without raising an exception, the
TLS connection will be open and available for data transfer.
If an exception is raised, the connection will have been
automatically closed (if it was ever open).
@type certChain: L{tlslite.X509CertChain.X509CertChain} or
L{cryptoIDlib.CertChain.CertChain}
@param certChain: The certificate chain to be used if the
server requests client authentication.
@type privateKey: L{tlslite.utils.RSAKey.RSAKey}
@param privateKey: The private key to be used if the server
requests client authentication.
@type session: L{tlslite.Session.Session}
@param session: A TLS session to attempt to resume. If the
resumption does not succeed, a full handshake will be
performed.
@type settings: L{tlslite.HandshakeSettings.HandshakeSettings}
@param settings: Various settings which can be used to control
the ciphersuites, certificate types, and SSL/TLS versions
offered by the client.
@type checker: L{tlslite.Checker.Checker}
@param checker: A Checker instance. This instance will be
invoked to examine the other party's authentication
credentials, if the handshake completes succesfully.
@type async: bool
@param async: If False, this function will block until the
handshake is completed. If True, this function will return a
generator. Successive invocations of the generator will
return 0 if it is waiting to read from the socket, 1 if it is
waiting to write to the socket, or will raise StopIteration if
the handshake operation is completed.
@rtype: None or an iterable
@return: If 'async' is True, a generator object will be
returned.
@raise socket.error: If a socket error occurs.
@raise tlslite.errors.TLSAbruptCloseError: If the socket is closed
without a preceding alert.
@raise tlslite.errors.TLSAlert: If a TLS alert is signalled.
@raise tlslite.errors.TLSAuthenticationError: If the checker
doesn't like the other party's authentication credentials.
"""
handshaker = self._handshakeClientAsync(certParams=(certChain,
privateKey), session=session, settings=settings,
checker=checker)
if async:
return handshaker
for result in handshaker:
pass
def handshakeClientUnknown(self, srpCallback=None, certCallback=None,
session=None, settings=None, checker=None,
async=False):
"""Perform a to-be-determined type of handshake in the role of client.
This function performs an SSL or TLS handshake. If the server
requests client certificate authentication, the
certCallback will be invoked and should return a (certChain,
privateKey) pair. If the callback returns None, the library
will attempt to proceed without client authentication. The
server may or may not allow this.
If the server requests SRP authentication, the srpCallback
will be invoked and should return a (username, password) pair.
If the callback returns None, the local implementation will
signal a user_canceled error alert.
After the handshake completes, the client can inspect the
connection's session attribute to determine what type of
authentication was performed.
Like any handshake function, this can be called on a closed
TLS connection, or on a TLS connection that is already open.
If called on an open connection it performs a re-handshake.
If the function completes without raising an exception, the
TLS connection will be open and available for data transfer.
If an exception is raised, the connection will have been
automatically closed (if it was ever open).
@type srpCallback: callable
@param srpCallback: The callback to be used if the server
requests SRP authentication. If None, the client will not
offer support for SRP ciphersuites.
@type certCallback: callable
@param certCallback: The callback to be used if the server
requests client certificate authentication.
@type session: L{tlslite.Session.Session}
@param session: A TLS session to attempt to resume. If the
resumption does not succeed, a full handshake will be
performed.
@type settings: L{tlslite.HandshakeSettings.HandshakeSettings}
@param settings: Various settings which can be used to control
the ciphersuites, certificate types, and SSL/TLS versions
offered by the client.
@type checker: L{tlslite.Checker.Checker}
@param checker: A Checker instance. This instance will be
invoked to examine the other party's authentication
credentials, if the handshake completes succesfully.
@type async: bool
@param async: If False, this function will block until the
handshake is completed. If True, this function will return a
generator. Successive invocations of the generator will
return 0 if it is waiting to read from the socket, 1 if it is
waiting to write to the socket, or will raise StopIteration if
the handshake operation is completed.
@rtype: None or an iterable
@return: If 'async' is True, a generator object will be
returned.
@raise socket.error: If a socket error occurs.
@raise tlslite.errors.TLSAbruptCloseError: If the socket is closed
without a preceding alert.
@raise tlslite.errors.TLSAlert: If a TLS alert is signalled.
@raise tlslite.errors.TLSAuthenticationError: If the checker
doesn't like the other party's authentication credentials.
"""
handshaker = self._handshakeClientAsync(unknownParams=(srpCallback,
certCallback), session=session, settings=settings,
checker=checker)
if async:
return handshaker
for result in handshaker:
pass
def handshakeClientSharedKey(self, username, sharedKey, settings=None,
checker=None, async=False):
"""Perform a shared-key handshake in the role of client.
This function performs a shared-key handshake. Using shared
symmetric keys of high entropy (128 bits or greater) mutually
authenticates both parties to each other.
TLS with shared-keys is non-standard. Most TLS
implementations don't support it. See
U{http://www.ietf.org/html.charters/tls-charter.html} for the
latest information on TLS with shared-keys. If the shared-keys
Internet-Draft changes or is superceded, TLS Lite will track
those changes, so the shared-key support in later versions of
TLS Lite may become incompatible with this version.
Like any handshake function, this can be called on a closed
TLS connection, or on a TLS connection that is already open.
If called on an open connection it performs a re-handshake.
If the function completes without raising an exception, the
TLS connection will be open and available for data transfer.
If an exception is raised, the connection will have been
automatically closed (if it was ever open).
@type username: str
@param username: The shared-key username.
@type sharedKey: str
@param sharedKey: The shared key.
@type settings: L{tlslite.HandshakeSettings.HandshakeSettings}
@param settings: Various settings which can be used to control
the ciphersuites, certificate types, and SSL/TLS versions
offered by the client.
@type checker: L{tlslite.Checker.Checker}
@param checker: A Checker instance. This instance will be
invoked to examine the other party's authentication
credentials, if the handshake completes succesfully.
@type async: bool
@param async: If False, this function will block until the
handshake is completed. If True, this function will return a
generator. Successive invocations of the generator will
return 0 if it is waiting to read from the socket, 1 if it is
waiting to write to the socket, or will raise StopIteration if
the handshake operation is completed.
@rtype: None or an iterable
@return: If 'async' is True, a generator object will be
returned.
@raise socket.error: If a socket error occurs.
@raise tlslite.errors.TLSAbruptCloseError: If the socket is closed
without a preceding alert.
@raise tlslite.errors.TLSAlert: If a TLS alert is signalled.
@raise tlslite.errors.TLSAuthenticationError: If the checker
doesn't like the other party's authentication credentials.
"""
handshaker = self._handshakeClientAsync(sharedKeyParams=(username,
sharedKey), settings=settings, checker=checker)
if async:
return handshaker
for result in handshaker:
pass
def _handshakeClientAsync(self, srpParams=(), certParams=(),
unknownParams=(), sharedKeyParams=(),
session=None, settings=None, checker=None,
recursive=False):
handshaker = self._handshakeClientAsyncHelper(srpParams=srpParams,
certParams=certParams, unknownParams=unknownParams,
sharedKeyParams=sharedKeyParams, session=session,
settings=settings, recursive=recursive)
for result in self._handshakeWrapperAsync(handshaker, checker):
yield result
def _handshakeClientAsyncHelper(self, srpParams, certParams, unknownParams,
sharedKeyParams, session, settings, recursive):
if not recursive:
self._handshakeStart(client=True)
#Unpack parameters
srpUsername = None # srpParams
password = None # srpParams
clientCertChain = None # certParams
privateKey = None # certParams
srpCallback = None # unknownParams
certCallback = None # unknownParams
#session # sharedKeyParams (or session)
#settings # settings
if srpParams:
srpUsername, password = srpParams
elif certParams:
clientCertChain, privateKey = certParams
elif unknownParams:
srpCallback, certCallback = unknownParams
elif sharedKeyParams:
session = Session()._createSharedKey(*sharedKeyParams)
if not settings:
settings = HandshakeSettings()
settings = settings._filter()
#Validate parameters
if srpUsername and not password:
raise ValueError("Caller passed a username but no password")
if password and not srpUsername:
raise ValueError("Caller passed a password but no username")
if clientCertChain and not privateKey:
raise ValueError("Caller passed a certChain but no privateKey")
if privateKey and not clientCertChain:
raise ValueError("Caller passed a privateKey but no certChain")
if clientCertChain:
foundType = False
try:
import cryptoIDlib.CertChain
if isinstance(clientCertChain, cryptoIDlib.CertChain.CertChain):
if "cryptoID" not in settings.certificateTypes:
raise ValueError("Client certificate doesn't "\
"match Handshake Settings")
settings.certificateTypes = ["cryptoID"]
foundType = True
except ImportError:
pass
if not foundType and isinstance(clientCertChain,
X509CertChain):
if "x509" not in settings.certificateTypes:
raise ValueError("Client certificate doesn't match "\
"Handshake Settings")
settings.certificateTypes = ["x509"]
foundType = True
if not foundType:
raise ValueError("Unrecognized certificate type")
if session:
if not session.valid():
session = None #ignore non-resumable sessions...
elif session.resumable and \
(session.srpUsername != srpUsername):
raise ValueError("Session username doesn't match")
#Add Faults to parameters
if srpUsername and self.fault == Fault.badUsername:
srpUsername += "GARBAGE"
if password and self.fault == Fault.badPassword:
password += "GARBAGE"
if sharedKeyParams:
identifier = sharedKeyParams[0]
sharedKey = sharedKeyParams[1]
if self.fault == Fault.badIdentifier:
identifier += "GARBAGE"
session = Session()._createSharedKey(identifier, sharedKey)
elif self.fault == Fault.badSharedKey:
sharedKey += "GARBAGE"
session = Session()._createSharedKey(identifier, sharedKey)
#Initialize locals
serverCertChain = None
cipherSuite = 0
certificateType = CertificateType.x509
premasterSecret = None
#Get client nonce
clientRandom = getRandomBytes(32)
#Initialize acceptable ciphersuites
cipherSuites = []
if srpParams:
cipherSuites += CipherSuite.getSrpRsaSuites(settings.cipherNames)
cipherSuites += CipherSuite.getSrpSuites(settings.cipherNames)
elif certParams:
cipherSuites += CipherSuite.getRsaSuites(settings.cipherNames)
elif unknownParams:
if srpCallback:
cipherSuites += \
CipherSuite.getSrpRsaSuites(settings.cipherNames)
cipherSuites += \
CipherSuite.getSrpSuites(settings.cipherNames)
cipherSuites += CipherSuite.getRsaSuites(settings.cipherNames)
elif sharedKeyParams:
cipherSuites += CipherSuite.getRsaSuites(settings.cipherNames)
else:
cipherSuites += CipherSuite.getRsaSuites(settings.cipherNames)
#Initialize acceptable certificate types
certificateTypes = settings._getCertificateTypes()
#Tentatively set the version to the client's minimum version.
#We'll use this for the ClientHello, and if an error occurs
#parsing the Server Hello, we'll use this version for the response
self.version = settings.maxVersion
#Either send ClientHello (with a resumable session)...
if session:
#If it's a resumable (i.e. not a shared-key session), then its
#ciphersuite must be one of the acceptable ciphersuites
if (not sharedKeyParams) and \
session.cipherSuite not in cipherSuites:
raise ValueError("Session's cipher suite not consistent "\
"with parameters")
else:
clientHello = ClientHello()
clientHello.create(settings.maxVersion, clientRandom,
session.sessionID, cipherSuites,
certificateTypes, session.srpUsername)
#Or send ClientHello (without)
else:
clientHello = ClientHello()
clientHello.create(settings.maxVersion, clientRandom,
createByteArraySequence([]), cipherSuites,
certificateTypes, srpUsername)
for result in self._sendMsg(clientHello):
yield result
#Get ServerHello (or missing_srp_username)
for result in self._getMsg((ContentType.handshake,
ContentType.alert),
HandshakeType.server_hello):
if result in (0,1):
yield result
else:
break
msg = result
if isinstance(msg, ServerHello):
serverHello = msg
elif isinstance(msg, Alert):
alert = msg
#If it's not a missing_srp_username, re-raise
if alert.description != AlertDescription.missing_srp_username:
self._shutdown(False)
raise TLSRemoteAlert(alert)
#If we're not in SRP callback mode, we won't have offered SRP
#without a username, so we shouldn't get this alert
if not srpCallback:
for result in self._sendError(\
AlertDescription.unexpected_message):
yield result
srpParams = srpCallback()
#If the callback returns None, cancel the handshake
if srpParams == None:
for result in self._sendError(AlertDescription.user_canceled):
yield result
#Recursively perform handshake
for result in self._handshakeClientAsyncHelper(srpParams,
None, None, None, None, settings, True):
yield result
return
#Get the server version. Do this before anything else, so any
#error alerts will use the server's version
self.version = serverHello.server_version
#Future responses from server must use this version
self._versionCheck = True
#Check ServerHello
if serverHello.server_version < settings.minVersion:
for result in self._sendError(\
AlertDescription.protocol_version,
"Too old version: %s" % str(serverHello.server_version)):
yield result
if serverHello.server_version > settings.maxVersion:
for result in self._sendError(\
AlertDescription.protocol_version,
"Too new version: %s" % str(serverHello.server_version)):
yield result
if serverHello.cipher_suite not in cipherSuites:
for result in self._sendError(\
AlertDescription.illegal_parameter,
"Server responded with incorrect ciphersuite"):
yield result
if serverHello.certificate_type not in certificateTypes:
for result in self._sendError(\
AlertDescription.illegal_parameter,
"Server responded with incorrect certificate type"):
yield result
if serverHello.compression_method != 0:
for result in self._sendError(\
AlertDescription.illegal_parameter,
"Server responded with incorrect compression method"):
yield result
#Get the server nonce
serverRandom = serverHello.random
#If the server agrees to resume
if session and session.sessionID and \
serverHello.session_id == session.sessionID:
#If a shared-key, we're flexible about suites; otherwise the
#server-chosen suite has to match the session's suite
if sharedKeyParams:
session.cipherSuite = serverHello.cipher_suite
elif serverHello.cipher_suite != session.cipherSuite:
for result in self._sendError(\
AlertDescription.illegal_parameter,\
"Server's ciphersuite doesn't match session"):
yield result
#Set the session for this connection
self.session = session
#Calculate pending connection states
self._calcPendingStates(clientRandom, serverRandom,
settings.cipherImplementations)
#Exchange ChangeCipherSpec and Finished messages
for result in self._getFinished():
yield result
for result in self._sendFinished():
yield result
#Mark the connection as open
self._handshakeDone(resumed=True)
#If server DOES NOT agree to resume
else:
if sharedKeyParams:
for result in self._sendError(\
AlertDescription.user_canceled,
"Was expecting a shared-key resumption"):
yield result
#We've already validated these
cipherSuite = serverHello.cipher_suite
certificateType = serverHello.certificate_type
#If the server chose an SRP suite...
if cipherSuite in CipherSuite.srpSuites:
#Get ServerKeyExchange, ServerHelloDone
for result in self._getMsg(ContentType.handshake,
HandshakeType.server_key_exchange, cipherSuite):
if result in (0,1):
yield result
else:
break
serverKeyExchange = result
for result in self._getMsg(ContentType.handshake,
HandshakeType.server_hello_done):
if result in (0,1):
yield result
else:
break
serverHelloDone = result
#If the server chose an SRP+RSA suite...
elif cipherSuite in CipherSuite.srpRsaSuites:
#Get Certificate, ServerKeyExchange, ServerHelloDone
for result in self._getMsg(ContentType.handshake,
HandshakeType.certificate, certificateType):
if result in (0,1):
yield result
else:
break
serverCertificate = result
for result in self._getMsg(ContentType.handshake,
HandshakeType.server_key_exchange, cipherSuite):
if result in (0,1):
yield result
else:
break
serverKeyExchange = result
for result in self._getMsg(ContentType.handshake,
HandshakeType.server_hello_done):
if result in (0,1):
yield result
else:
break
serverHelloDone = result
#If the server chose an RSA suite...
elif cipherSuite in CipherSuite.rsaSuites:
#Get Certificate[, CertificateRequest], ServerHelloDone
for result in self._getMsg(ContentType.handshake,
HandshakeType.certificate, certificateType):
if result in (0,1):
yield result
else:
break
serverCertificate = result
for result in self._getMsg(ContentType.handshake,
(HandshakeType.server_hello_done,
HandshakeType.certificate_request)):
if result in (0,1):
yield result
else:
break
msg = result
certificateRequest = None
if isinstance(msg, CertificateRequest):
certificateRequest = msg
for result in self._getMsg(ContentType.handshake,
HandshakeType.server_hello_done):
if result in (0,1):
yield result
else:
break
serverHelloDone = result
elif isinstance(msg, ServerHelloDone):
serverHelloDone = msg
else:
raise AssertionError()
#Calculate SRP premaster secret, if server chose an SRP or
#SRP+RSA suite
if cipherSuite in CipherSuite.srpSuites + \
CipherSuite.srpRsaSuites:
#Get and check the server's group parameters and B value
N = serverKeyExchange.srp_N
g = serverKeyExchange.srp_g
s = serverKeyExchange.srp_s
B = serverKeyExchange.srp_B
if (g,N) not in goodGroupParameters:
for result in self._sendError(\
AlertDescription.untrusted_srp_parameters,
"Unknown group parameters"):
yield result
if numBits(N) < settings.minKeySize:
for result in self._sendError(\
AlertDescription.untrusted_srp_parameters,
"N value is too small: %d" % numBits(N)):
yield result
if numBits(N) > settings.maxKeySize:
for result in self._sendError(\
AlertDescription.untrusted_srp_parameters,
"N value is too large: %d" % numBits(N)):
yield result
if B % N == 0:
for result in self._sendError(\
AlertDescription.illegal_parameter,
"Suspicious B value"):
yield result
#Check the server's signature, if server chose an
#SRP+RSA suite
if cipherSuite in CipherSuite.srpRsaSuites:
#Hash ServerKeyExchange/ServerSRPParams
hashBytes = serverKeyExchange.hash(clientRandom,
serverRandom)
#Extract signature bytes from ServerKeyExchange
sigBytes = serverKeyExchange.signature
if len(sigBytes) == 0:
for result in self._sendError(\
AlertDescription.illegal_parameter,
"Server sent an SRP ServerKeyExchange "\
"message without a signature"):
yield result
#Get server's public key from the Certificate message
for result in self._getKeyFromChain(serverCertificate,
settings):
if result in (0,1):
yield result
else:
break
publicKey, serverCertChain = result
#Verify signature
if not publicKey.verify(sigBytes, hashBytes):
for result in self._sendError(\
AlertDescription.decrypt_error,
"Signature failed to verify"):
yield result
#Calculate client's ephemeral DH values (a, A)
a = bytesToNumber(getRandomBytes(32))
A = powMod(g, a, N)
#Calculate client's static DH values (x, v)
x = makeX(bytesToString(s), srpUsername, password)
v = powMod(g, x, N)
#Calculate u
u = makeU(N, A, B)
#Calculate premaster secret
k = makeK(N, g)
S = powMod((B - (k*v)) % N, a+(u*x), N)
if self.fault == Fault.badA:
A = N
S = 0
premasterSecret = numberToBytes(S)
#Send ClientKeyExchange
for result in self._sendMsg(\
ClientKeyExchange(cipherSuite).createSRP(A)):
yield result
#Calculate RSA premaster secret, if server chose an RSA suite
elif cipherSuite in CipherSuite.rsaSuites:
#Handle the presence of a CertificateRequest
if certificateRequest:
if unknownParams and certCallback:
certParamsNew = certCallback()
if certParamsNew:
clientCertChain, privateKey = certParamsNew
#Get server's public key from the Certificate message
for result in self._getKeyFromChain(serverCertificate,
settings):
if result in (0,1):
yield result
else:
break
publicKey, serverCertChain = result
#Calculate premaster secret
premasterSecret = getRandomBytes(48)
premasterSecret[0] = settings.maxVersion[0]
premasterSecret[1] = settings.maxVersion[1]
if self.fault == Fault.badPremasterPadding:
premasterSecret[0] = 5
if self.fault == Fault.shortPremasterSecret:
premasterSecret = premasterSecret[:-1]
#Encrypt premaster secret to server's public key
encryptedPreMasterSecret = publicKey.encrypt(premasterSecret)
#If client authentication was requested, send Certificate
#message, either with certificates or empty
if certificateRequest:
clientCertificate = Certificate(certificateType)
if clientCertChain:
#Check to make sure we have the same type of
#certificates the server requested
wrongType = False
if certificateType == CertificateType.x509:
if not isinstance(clientCertChain, X509CertChain):
wrongType = True
elif certificateType == CertificateType.cryptoID:
if not isinstance(clientCertChain,
cryptoIDlib.CertChain.CertChain):
wrongType = True
if wrongType:
for result in self._sendError(\
AlertDescription.handshake_failure,
"Client certificate is of wrong type"):
yield result
clientCertificate.create(clientCertChain)
for result in self._sendMsg(clientCertificate):
yield result
else:
#The server didn't request client auth, so we
#zeroize these so the clientCertChain won't be
#stored in the session.
privateKey = None
clientCertChain = None
#Send ClientKeyExchange
clientKeyExchange = ClientKeyExchange(cipherSuite,
self.version)
clientKeyExchange.createRSA(encryptedPreMasterSecret)
for result in self._sendMsg(clientKeyExchange):
yield result
#If client authentication was requested and we have a
#private key, send CertificateVerify
if certificateRequest and privateKey:
if self.version == (3,0):
#Create a temporary session object, just for the
#purpose of creating the CertificateVerify
session = Session()
session._calcMasterSecret(self.version,
premasterSecret,
clientRandom,
serverRandom)
verifyBytes = self._calcSSLHandshakeHash(\
session.masterSecret, "")
elif self.version in ((3,1), (3,2)):
verifyBytes = stringToBytes(\
self._handshake_md5.digest() + \
self._handshake_sha.digest())
if self.fault == Fault.badVerifyMessage:
verifyBytes[0] = ((verifyBytes[0]+1) % 256)
signedBytes = privateKey.sign(verifyBytes)
certificateVerify = CertificateVerify()
certificateVerify.create(signedBytes)
for result in self._sendMsg(certificateVerify):
yield result
#Create the session object
self.session = Session()
self.session._calcMasterSecret(self.version, premasterSecret,
clientRandom, serverRandom)
self.session.sessionID = serverHello.session_id
self.session.cipherSuite = cipherSuite
self.session.srpUsername = srpUsername
self.session.clientCertChain = clientCertChain
self.session.serverCertChain = serverCertChain
#Calculate pending connection states
self._calcPendingStates(clientRandom, serverRandom,
settings.cipherImplementations)
#Exchange ChangeCipherSpec and Finished messages
for result in self._sendFinished():
yield result
for result in self._getFinished():
yield result
#Mark the connection as open
self.session._setResumable(True)
self._handshakeDone(resumed=False)
def handshakeServer(self, sharedKeyDB=None, verifierDB=None,
certChain=None, privateKey=None, reqCert=False,
sessionCache=None, settings=None, checker=None,
reqCAs=None, tlsIntolerant=False):
"""Perform a handshake in the role of server.
This function performs an SSL or TLS handshake. Depending on
the arguments and the behavior of the client, this function can
perform a shared-key, SRP, or certificate-based handshake. It
can also perform a combined SRP and server-certificate
handshake.
Like any handshake function, this can be called on a closed
TLS connection, or on a TLS connection that is already open.
If called on an open connection it performs a re-handshake.
This function does not send a Hello Request message before
performing the handshake, so if re-handshaking is required,
the server must signal the client to begin the re-handshake
through some other means.
If the function completes without raising an exception, the
TLS connection will be open and available for data transfer.
If an exception is raised, the connection will have been
automatically closed (if it was ever open).
@type sharedKeyDB: L{tlslite.SharedKeyDB.SharedKeyDB}
@param sharedKeyDB: A database of shared symmetric keys
associated with usernames. If the client performs a
shared-key handshake, the session's sharedKeyUsername
attribute will be set.
@type verifierDB: L{tlslite.VerifierDB.VerifierDB}
@param verifierDB: A database of SRP password verifiers
associated with usernames. If the client performs an SRP
handshake, the session's srpUsername attribute will be set.
@type certChain: L{tlslite.X509CertChain.X509CertChain} or
L{cryptoIDlib.CertChain.CertChain}
@param certChain: The certificate chain to be used if the
client requests server certificate authentication.
@type privateKey: L{tlslite.utils.RSAKey.RSAKey}
@param privateKey: The private key to be used if the client
requests server certificate authentication.
@type reqCert: bool
@param reqCert: Whether to request client certificate
authentication. This only applies if the client chooses server
certificate authentication; if the client chooses SRP or
shared-key authentication, this will be ignored. If the client
performs a client certificate authentication, the sessions's
clientCertChain attribute will be set.
@type sessionCache: L{tlslite.SessionCache.SessionCache}
@param sessionCache: An in-memory cache of resumable sessions.
The client can resume sessions from this cache. Alternatively,
if the client performs a full handshake, a new session will be
added to the cache.
@type settings: L{tlslite.HandshakeSettings.HandshakeSettings}
@param settings: Various settings which can be used to control
the ciphersuites and SSL/TLS version chosen by the server.
@type checker: L{tlslite.Checker.Checker}
@param checker: A Checker instance. This instance will be
invoked to examine the other party's authentication
credentials, if the handshake completes succesfully.
@type reqCAs: list of L{array.array} of unsigned bytes
@param reqCAs: A collection of DER-encoded DistinguishedNames that
will be sent along with a certificate request. This does not affect
verification.
@raise socket.error: If a socket error occurs.
@raise tlslite.errors.TLSAbruptCloseError: If the socket is closed
without a preceding alert.
@raise tlslite.errors.TLSAlert: If a TLS alert is signalled.
@raise tlslite.errors.TLSAuthenticationError: If the checker
doesn't like the other party's authentication credentials.
"""
for result in self.handshakeServerAsync(sharedKeyDB, verifierDB,
certChain, privateKey, reqCert, sessionCache, settings,
checker, reqCAs, tlsIntolerant):
pass
def handshakeServerAsync(self, sharedKeyDB=None, verifierDB=None,
certChain=None, privateKey=None, reqCert=False,
sessionCache=None, settings=None, checker=None,
reqCAs=None, tlsIntolerant=False):
"""Start a server handshake operation on the TLS connection.
This function returns a generator which behaves similarly to
handshakeServer(). Successive invocations of the generator
will return 0 if it is waiting to read from the socket, 1 if it is
waiting to write to the socket, or it will raise StopIteration
if the handshake operation is complete.
@rtype: iterable
@return: A generator; see above for details.
"""
handshaker = self._handshakeServerAsyncHelper(\
sharedKeyDB=sharedKeyDB,
verifierDB=verifierDB, certChain=certChain,
privateKey=privateKey, reqCert=reqCert,
sessionCache=sessionCache, settings=settings,
reqCAs=reqCAs,
tlsIntolerant=tlsIntolerant)
for result in self._handshakeWrapperAsync(handshaker, checker):
yield result
def _handshakeServerAsyncHelper(self, sharedKeyDB, verifierDB,
certChain, privateKey, reqCert, sessionCache,
settings, reqCAs, tlsIntolerant):
self._handshakeStart(client=False)
if (not sharedKeyDB) and (not verifierDB) and (not certChain):
raise ValueError("Caller passed no authentication credentials")
if certChain and not privateKey:
raise ValueError("Caller passed a certChain but no privateKey")
if privateKey and not certChain:
raise ValueError("Caller passed a privateKey but no certChain")
if reqCAs and not reqCert:
raise ValueError("Caller passed reqCAs but not reqCert")
if not settings:
settings = HandshakeSettings()
settings = settings._filter()
#Initialize acceptable cipher suites
cipherSuites = []
if verifierDB:
if certChain:
cipherSuites += \
CipherSuite.getSrpRsaSuites(settings.cipherNames)
cipherSuites += CipherSuite.getSrpSuites(settings.cipherNames)
if sharedKeyDB or certChain:
cipherSuites += CipherSuite.getRsaSuites(settings.cipherNames)
#Initialize acceptable certificate type
certificateType = None
if certChain:
try:
import cryptoIDlib.CertChain
if isinstance(certChain, cryptoIDlib.CertChain.CertChain):
certificateType = CertificateType.cryptoID
except ImportError:
pass
if isinstance(certChain, X509CertChain):
certificateType = CertificateType.x509
if certificateType == None:
raise ValueError("Unrecognized certificate type")
#Initialize locals
clientCertChain = None
serverCertChain = None #We may set certChain to this later
postFinishedError = None
#Tentatively set version to most-desirable version, so if an error
#occurs parsing the ClientHello, this is what we'll use for the
#error alert
self.version = settings.maxVersion
#Get ClientHello
for result in self._getMsg(ContentType.handshake,
HandshakeType.client_hello):
if result in (0,1):
yield result
else:
break
clientHello = result
#If client's version is too low, reject it
if clientHello.client_version < settings.minVersion:
self.version = settings.minVersion
for result in self._sendError(\
AlertDescription.protocol_version,
"Too old version: %s" % str(clientHello.client_version)):
yield result
if tlsIntolerant and clientHello.client_version > (3, 0):
for result in self._sendError(\
AlertDescription.handshake_failure):
yield result
#If client's version is too high, propose my highest version
elif clientHello.client_version > settings.maxVersion:
self.version = settings.maxVersion
else:
#Set the version to the client's version
self.version = clientHello.client_version
#Get the client nonce; create server nonce
clientRandom = clientHello.random
serverRandom = getRandomBytes(32)
#Calculate the first cipher suite intersection.
#This is the 'privileged' ciphersuite. We'll use it if we're
#doing a shared-key resumption or a new negotiation. In fact,
#the only time we won't use it is if we're resuming a non-sharedkey
#session, in which case we use the ciphersuite from the session.
#
#Given the current ciphersuite ordering, this means we prefer SRP
#over non-SRP.
for cipherSuite in cipherSuites:
if cipherSuite in clientHello.cipher_suites:
break
else:
for result in self._sendError(\
AlertDescription.handshake_failure):
yield result
#If resumption was requested...
if clientHello.session_id and (sharedKeyDB or sessionCache):
session = None
#Check in the sharedKeys container
if sharedKeyDB and len(clientHello.session_id)==16:
try:
#Trim off zero padding, if any
for x in range(16):
if clientHello.session_id[x]==0:
break
self.allegedSharedKeyUsername = bytesToString(\
clientHello.session_id[:x])
session = sharedKeyDB[self.allegedSharedKeyUsername]
if not session.sharedKey:
raise AssertionError()
#use privileged ciphersuite
session.cipherSuite = cipherSuite
except KeyError:
pass
#Then check in the session cache
if sessionCache and not session:
try:
session = sessionCache[bytesToString(\
clientHello.session_id)]
if session.sharedKey:
raise AssertionError()
if not session.resumable:
raise AssertionError()
#Check for consistency with ClientHello
if session.cipherSuite not in cipherSuites:
for result in self._sendError(\
AlertDescription.handshake_failure):
yield result
if session.cipherSuite not in clientHello.cipher_suites:
for result in self._sendError(\
AlertDescription.handshake_failure):
yield result
if clientHello.srp_username:
if clientHello.srp_username != session.srpUsername:
for result in self._sendError(\
AlertDescription.handshake_failure):
yield result
except KeyError:
pass
#If a session is found..
if session:
#Set the session
self.session = session
#Send ServerHello
serverHello = ServerHello()
serverHello.create(self.version, serverRandom,
session.sessionID, session.cipherSuite,
certificateType)
for result in self._sendMsg(serverHello):
yield result
#From here on, the client's messages must have the right version
self._versionCheck = True
#Calculate pending connection states
self._calcPendingStates(clientRandom, serverRandom,
settings.cipherImplementations)
#Exchange ChangeCipherSpec and Finished messages
for result in self._sendFinished():
yield result
for result in self._getFinished():
yield result
#Mark the connection as open
self._handshakeDone(resumed=True)
return
#If not a resumption...
#TRICKY: we might have chosen an RSA suite that was only deemed
#acceptable because of the shared-key resumption. If the shared-
#key resumption failed, because the identifier wasn't recognized,
#we might fall through to here, where we have an RSA suite
#chosen, but no certificate.
if cipherSuite in CipherSuite.rsaSuites and not certChain:
for result in self._sendError(\
AlertDescription.handshake_failure):
yield result
#If an RSA suite is chosen, check for certificate type intersection
#(We do this check down here because if the mismatch occurs but the
# client is using a shared-key session, it's okay)
if cipherSuite in CipherSuite.rsaSuites + \
CipherSuite.srpRsaSuites:
if certificateType not in clientHello.certificate_types:
for result in self._sendError(\
AlertDescription.handshake_failure,
"the client doesn't support my certificate type"):
yield result
#Move certChain -> serverCertChain, now that we're using it
serverCertChain = certChain
#Create sessionID
if sessionCache:
sessionID = getRandomBytes(32)
else:
sessionID = createByteArraySequence([])
#If we've selected an SRP suite, exchange keys and calculate
#premaster secret:
if cipherSuite in CipherSuite.srpSuites + CipherSuite.srpRsaSuites:
#If there's no SRP username...
if not clientHello.srp_username:
#Ask the client to re-send ClientHello with one
for result in self._sendMsg(Alert().create(\
AlertDescription.missing_srp_username,
AlertLevel.warning)):
yield result
#Get ClientHello
for result in self._getMsg(ContentType.handshake,
HandshakeType.client_hello):
if result in (0,1):
yield result
else:
break
clientHello = result
#Check ClientHello
#If client's version is too low, reject it (COPIED CODE; BAD!)
if clientHello.client_version < settings.minVersion:
self.version = settings.minVersion
for result in self._sendError(\
AlertDescription.protocol_version,
"Too old version: %s" % str(clientHello.client_version)):
yield result
#If client's version is too high, propose my highest version
elif clientHello.client_version > settings.maxVersion:
self.version = settings.maxVersion
else:
#Set the version to the client's version
self.version = clientHello.client_version
#Recalculate the privileged cipher suite, making sure to
#pick an SRP suite
cipherSuites = [c for c in cipherSuites if c in \
CipherSuite.srpSuites + \
CipherSuite.srpRsaSuites]
for cipherSuite in cipherSuites:
if cipherSuite in clientHello.cipher_suites:
break
else:
for result in self._sendError(\
AlertDescription.handshake_failure):
yield result
#Get the client nonce; create server nonce
clientRandom = clientHello.random
serverRandom = getRandomBytes(32)
#The username better be there, this time
if not clientHello.srp_username:
for result in self._sendError(\
AlertDescription.illegal_parameter,
"Client resent a hello, but without the SRP"\
" username"):
yield result
#Get username
self.allegedSrpUsername = clientHello.srp_username
#Get parameters from username
try:
entry = verifierDB[self.allegedSrpUsername]
except KeyError:
for result in self._sendError(\
AlertDescription.unknown_srp_username):
yield result
(N, g, s, v) = entry
#Calculate server's ephemeral DH values (b, B)
b = bytesToNumber(getRandomBytes(32))
k = makeK(N, g)
B = (powMod(g, b, N) + (k*v)) % N
#Create ServerKeyExchange, signing it if necessary
serverKeyExchange = ServerKeyExchange(cipherSuite)
serverKeyExchange.createSRP(N, g, stringToBytes(s), B)
if cipherSuite in CipherSuite.srpRsaSuites:
hashBytes = serverKeyExchange.hash(clientRandom,
serverRandom)
serverKeyExchange.signature = privateKey.sign(hashBytes)
#Send ServerHello[, Certificate], ServerKeyExchange,
#ServerHelloDone
msgs = []
serverHello = ServerHello()
serverHello.create(self.version, serverRandom, sessionID,
cipherSuite, certificateType)
msgs.append(serverHello)
if cipherSuite in CipherSuite.srpRsaSuites:
certificateMsg = Certificate(certificateType)
certificateMsg.create(serverCertChain)
msgs.append(certificateMsg)
msgs.append(serverKeyExchange)
msgs.append(ServerHelloDone())
for result in self._sendMsgs(msgs):
yield result
#From here on, the client's messages must have the right version
self._versionCheck = True
#Get and check ClientKeyExchange
for result in self._getMsg(ContentType.handshake,
HandshakeType.client_key_exchange,
cipherSuite):
if result in (0,1):
yield result
else:
break
clientKeyExchange = result
A = clientKeyExchange.srp_A
if A % N == 0:
postFinishedError = (AlertDescription.illegal_parameter,
"Suspicious A value")
#Calculate u
u = makeU(N, A, B)
#Calculate premaster secret
S = powMod((A * powMod(v,u,N)) % N, b, N)
premasterSecret = numberToBytes(S)
#If we've selected an RSA suite, exchange keys and calculate
#premaster secret:
elif cipherSuite in CipherSuite.rsaSuites:
#Send ServerHello, Certificate[, CertificateRequest],
#ServerHelloDone
msgs = []
msgs.append(ServerHello().create(self.version, serverRandom,
sessionID, cipherSuite, certificateType))
msgs.append(Certificate(certificateType).create(serverCertChain))
if reqCert and reqCAs:
msgs.append(CertificateRequest().create([], reqCAs))
elif reqCert:
msgs.append(CertificateRequest())
msgs.append(ServerHelloDone())
for result in self._sendMsgs(msgs):
yield result
#From here on, the client's messages must have the right version
self._versionCheck = True
#Get [Certificate,] (if was requested)
if reqCert:
if self.version == (3,0):
for result in self._getMsg((ContentType.handshake,
ContentType.alert),
HandshakeType.certificate,
certificateType):
if result in (0,1):
yield result
else:
break
msg = result
if isinstance(msg, Alert):
#If it's not a no_certificate alert, re-raise
alert = msg
if alert.description != \
AlertDescription.no_certificate:
self._shutdown(False)
raise TLSRemoteAlert(alert)
elif isinstance(msg, Certificate):
clientCertificate = msg
if clientCertificate.certChain and \
clientCertificate.certChain.getNumCerts()!=0:
clientCertChain = clientCertificate.certChain
else:
raise AssertionError()
elif self.version in ((3,1), (3,2)):
for result in self._getMsg(ContentType.handshake,
HandshakeType.certificate,
certificateType):
if result in (0,1):
yield result
else:
break
clientCertificate = result
if clientCertificate.certChain and \
clientCertificate.certChain.getNumCerts()!=0:
clientCertChain = clientCertificate.certChain
else:
raise AssertionError()
#Get ClientKeyExchange
for result in self._getMsg(ContentType.handshake,
HandshakeType.client_key_exchange,
cipherSuite):
if result in (0,1):
yield result
else:
break
clientKeyExchange = result
#Decrypt ClientKeyExchange
premasterSecret = privateKey.decrypt(\
clientKeyExchange.encryptedPreMasterSecret)
randomPreMasterSecret = getRandomBytes(48)
versionCheck = (premasterSecret[0], premasterSecret[1])
if not premasterSecret:
premasterSecret = randomPreMasterSecret
elif len(premasterSecret)!=48:
premasterSecret = randomPreMasterSecret
elif versionCheck != clientHello.client_version:
if versionCheck != self.version: #Tolerate buggy IE clients
premasterSecret = randomPreMasterSecret
#Get and check CertificateVerify, if relevant
if clientCertChain:
if self.version == (3,0):
#Create a temporary session object, just for the purpose
#of checking the CertificateVerify
session = Session()
session._calcMasterSecret(self.version, premasterSecret,
clientRandom, serverRandom)
verifyBytes = self._calcSSLHandshakeHash(\
session.masterSecret, "")
elif self.version in ((3,1), (3,2)):
verifyBytes = stringToBytes(self._handshake_md5.digest() +\
self._handshake_sha.digest())
for result in self._getMsg(ContentType.handshake,
HandshakeType.certificate_verify):
if result in (0,1):
yield result
else:
break
certificateVerify = result
publicKey = clientCertChain.getEndEntityPublicKey()
if len(publicKey) < settings.minKeySize:
postFinishedError = (AlertDescription.handshake_failure,
"Client's public key too small: %d" % len(publicKey))
if len(publicKey) > settings.maxKeySize:
postFinishedError = (AlertDescription.handshake_failure,
"Client's public key too large: %d" % len(publicKey))
if not publicKey.verify(certificateVerify.signature,
verifyBytes):
postFinishedError = (AlertDescription.decrypt_error,
"Signature failed to verify")
#Create the session object
self.session = Session()
self.session._calcMasterSecret(self.version, premasterSecret,
clientRandom, serverRandom)
self.session.sessionID = sessionID
self.session.cipherSuite = cipherSuite
self.session.srpUsername = self.allegedSrpUsername
self.session.clientCertChain = clientCertChain
self.session.serverCertChain = serverCertChain
#Calculate pending connection states
self._calcPendingStates(clientRandom, serverRandom,
settings.cipherImplementations)
#Exchange ChangeCipherSpec and Finished messages
for result in self._getFinished():
yield result
#If we were holding a post-finished error until receiving the client
#finished message, send it now. We delay the call until this point
#because calling sendError() throws an exception, and our caller might
#shut down the socket upon receiving the exception. If he did, and the
#client was still sending its ChangeCipherSpec or Finished messages, it
#would cause a socket error on the client side. This is a lot of
#consideration to show to misbehaving clients, but this would also
#cause problems with fault-testing.
if postFinishedError:
for result in self._sendError(*postFinishedError):
yield result
for result in self._sendFinished():
yield result
#Add the session object to the session cache
if sessionCache and sessionID:
sessionCache[bytesToString(sessionID)] = self.session
#Mark the connection as open
self.session._setResumable(True)
self._handshakeDone(resumed=False)
def _handshakeWrapperAsync(self, handshaker, checker):
if not self.fault:
try:
for result in handshaker:
yield result
if checker:
try:
checker(self)
except TLSAuthenticationError:
alert = Alert().create(AlertDescription.close_notify,
AlertLevel.fatal)
for result in self._sendMsg(alert):
yield result
raise
except:
self._shutdown(False)
raise
else:
try:
for result in handshaker:
yield result
if checker:
try:
checker(self)
except TLSAuthenticationError:
alert = Alert().create(AlertDescription.close_notify,
AlertLevel.fatal)
for result in self._sendMsg(alert):
yield result
raise
except socket.error, e:
raise TLSFaultError("socket error!")
except TLSAbruptCloseError, e:
raise TLSFaultError("abrupt close error!")
except TLSAlert, alert:
if alert.description not in Fault.faultAlerts[self.fault]:
raise TLSFaultError(str(alert))
else:
pass
except:
self._shutdown(False)
raise
else:
raise TLSFaultError("No error!")
def _getKeyFromChain(self, certificate, settings):
#Get and check cert chain from the Certificate message
certChain = certificate.certChain
if not certChain or certChain.getNumCerts() == 0:
for result in self._sendError(AlertDescription.illegal_parameter,
"Other party sent a Certificate message without "\
"certificates"):
yield result
#Get and check public key from the cert chain
publicKey = certChain.getEndEntityPublicKey()
if len(publicKey) < settings.minKeySize:
for result in self._sendError(AlertDescription.handshake_failure,
"Other party's public key too small: %d" % len(publicKey)):
yield result
if len(publicKey) > settings.maxKeySize:
for result in self._sendError(AlertDescription.handshake_failure,
"Other party's public key too large: %d" % len(publicKey)):
yield result
yield publicKey, certChain
| bsd-3-clause |
tensorflow/examples | tensorflow_examples/lite/model_maker/demo/image_classification_demo_test.py | 1 | 2699 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
from unittest.mock import patch
import tensorflow as tf
from tensorflow_examples.lite.model_maker.core import test_util
from tensorflow_examples.lite.model_maker.demo import image_classification_demo
from tflite_model_maker import image_classifier
from_folder_fn = image_classifier.DataLoader.from_folder
def patch_data_loader():
"""Patch to train partial dataset rather than all of them."""
def side_effect(*args, **kwargs):
tf.compat.v1.logging.info('Train on partial dataset')
data_loader = from_folder_fn(*args, **kwargs)
if len(data_loader) > 10: # Trim dataset to at most 10.
data_loader._size = 10
# TODO(b/171449557): Change this once the dataset is lazily loaded.
data_loader._dataset = data_loader._dataset.take(10)
return data_loader
return patch.object(
image_classifier.DataLoader, 'from_folder', side_effect=side_effect)
class ImageClassificationDemoTest(tf.test.TestCase):
def test_image_classification_demo(self):
with patch_data_loader():
with tempfile.TemporaryDirectory() as temp_dir:
# Use cached training data if exists.
data_dir = image_classification_demo.download_demo_data(
cache_dir=test_util.get_cache_dir(temp_dir, 'flower_photos.tgz'),
file_hash='6f87fb78e9cc9ab41eff2015b380011d')
tflite_filename = os.path.join(temp_dir, 'model.tflite')
label_filename = os.path.join(temp_dir, 'labels.txt')
image_classification_demo.run(
data_dir,
temp_dir,
spec='efficientnet_lite0',
epochs=1,
batch_size=1)
self.assertTrue(tf.io.gfile.exists(tflite_filename))
self.assertGreater(os.path.getsize(tflite_filename), 0)
self.assertFalse(tf.io.gfile.exists(label_filename))
if __name__ == '__main__':
# Load compressed models from tensorflow_hub
os.environ['TFHUB_MODEL_LOAD_FORMAT'] = 'COMPRESSED'
tf.test.main()
| apache-2.0 |
takis/odoo | openerp/report/report_sxw.py | 217 | 27364 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from lxml import etree
import StringIO
import cStringIO
import base64
from datetime import datetime
import os
import re
import time
from interface import report_rml
import preprocess
import logging
import openerp.tools as tools
import zipfile
import common
import openerp
from openerp import SUPERUSER_ID
from openerp.osv.fields import float as float_field, function as function_field, datetime as datetime_field
from openerp.tools.translate import _
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT, DEFAULT_SERVER_DATETIME_FORMAT
from openerp.tools.safe_eval import safe_eval as eval
_logger = logging.getLogger(__name__)
rml_parents = {
'tr':1,
'li':1,
'story': 0,
'section': 0
}
rml_tag="para"
sxw_parents = {
'table-row': 1,
'list-item': 1,
'body': 0,
'section': 0,
}
html_parents = {
'tr' : 1,
'body' : 0,
'div' : 0
}
sxw_tag = "p"
rml2sxw = {
'para': 'p',
}
def get_date_length(date_format=DEFAULT_SERVER_DATE_FORMAT):
return len((datetime.now()).strftime(date_format))
class rml_parse(object):
def __init__(self, cr, uid, name, parents=rml_parents, tag=rml_tag, context=None):
if not context:
context={}
self.cr = cr
self.uid = uid
self.pool = openerp.registry(cr.dbname)
user = self.pool['res.users'].browse(cr, uid, uid, context=context)
self.localcontext = {
'user': user,
'setCompany': self.setCompany,
'repeatIn': self.repeatIn,
'setLang': self.setLang,
'setTag': self.setTag,
'removeParentNode': self.removeParentNode,
'format': self.format,
'formatLang': self.formatLang,
'lang' : user.company_id.partner_id.lang,
'translate' : self._translate,
'setHtmlImage' : self.set_html_image,
'strip_name' : self._strip_name,
'time' : time,
'display_address': self.display_address,
# more context members are setup in setCompany() below:
# - company_id
# - logo
}
self.setCompany(user.company_id)
self.localcontext.update(context)
self.name = name
self._node = None
self.parents = parents
self.tag = tag
self._lang_cache = {}
self.lang_dict = {}
self.default_lang = {}
self.lang_dict_called = False
self._transl_regex = re.compile('(\[\[.+?\]\])')
def setTag(self, oldtag, newtag, attrs=None):
return newtag, attrs
def _ellipsis(self, char, size=100, truncation_str='...'):
if not char:
return ''
if len(char) <= size:
return char
return char[:size-len(truncation_str)] + truncation_str
def setCompany(self, company_id):
if company_id:
self.localcontext['company'] = company_id
self.localcontext['logo'] = company_id.logo
self.rml_header = company_id.rml_header
self.rml_header2 = company_id.rml_header2
self.rml_header3 = company_id.rml_header3
self.logo = company_id.logo
def _strip_name(self, name, maxlen=50):
return self._ellipsis(name, maxlen)
def format(self, text, oldtag=None):
return text.strip()
def removeParentNode(self, tag=None):
raise GeneratorExit('Skip')
def set_html_image(self,id,model=None,field=None,context=None):
if not id :
return ''
if not model:
model = 'ir.attachment'
try :
id = int(id)
res = self.pool[model].read(self.cr,self.uid,id)
if field :
return res[field]
elif model =='ir.attachment' :
return res['datas']
else :
return ''
except Exception:
return ''
def setLang(self, lang):
self.localcontext['lang'] = lang
self.lang_dict_called = False
# re-evaluate self.objects in a different environment
env = self.objects.env(self.cr, self.uid, self.localcontext)
self.objects = self.objects.with_env(env)
def _get_lang_dict(self):
pool_lang = self.pool['res.lang']
lang = self.localcontext.get('lang', 'en_US') or 'en_US'
lang_ids = pool_lang.search(self.cr,self.uid,[('code','=',lang)])
if not lang_ids:
lang_ids = pool_lang.search(self.cr,self.uid,[('code','=','en_US')])
lang_obj = pool_lang.browse(self.cr,self.uid,lang_ids[0])
self.lang_dict.update({'lang_obj':lang_obj,'date_format':lang_obj.date_format,'time_format':lang_obj.time_format})
self.default_lang[lang] = self.lang_dict.copy()
return True
def digits_fmt(self, obj=None, f=None, dp=None):
digits = self.get_digits(obj, f, dp)
return "%%.%df" % (digits, )
def get_digits(self, obj=None, f=None, dp=None):
d = DEFAULT_DIGITS = 2
if dp:
decimal_precision_obj = self.pool['decimal.precision']
d = decimal_precision_obj.precision_get(self.cr, self.uid, dp)
elif obj and f:
res_digits = getattr(obj._columns[f], 'digits', lambda x: ((16, DEFAULT_DIGITS)))
if isinstance(res_digits, tuple):
d = res_digits[1]
else:
d = res_digits(self.cr)[1]
elif (hasattr(obj, '_field') and\
isinstance(obj._field, (float_field, function_field)) and\
obj._field.digits):
d = obj._field.digits[1]
if not d and d is not 0:
d = DEFAULT_DIGITS
return d
def formatLang(self, value, digits=None, date=False, date_time=False, grouping=True, monetary=False, dp=False, currency_obj=False):
"""
Assuming 'Account' decimal.precision=3:
formatLang(value) -> digits=2 (default)
formatLang(value, digits=4) -> digits=4
formatLang(value, dp='Account') -> digits=3
formatLang(value, digits=5, dp='Account') -> digits=5
"""
if digits is None:
if dp:
digits = self.get_digits(dp=dp)
else:
digits = self.get_digits(value)
if isinstance(value, (str, unicode)) and not value:
return ''
if not self.lang_dict_called:
self._get_lang_dict()
self.lang_dict_called = True
if date or date_time:
if not value:
return ''
date_format = self.lang_dict['date_format']
parse_format = DEFAULT_SERVER_DATE_FORMAT
if date_time:
value = value.split('.')[0]
date_format = date_format + " " + self.lang_dict['time_format']
parse_format = DEFAULT_SERVER_DATETIME_FORMAT
if isinstance(value, basestring):
# FIXME: the trimming is probably unreliable if format includes day/month names
# and those would need to be translated anyway.
date = datetime.strptime(value[:get_date_length(parse_format)], parse_format)
elif isinstance(value, time.struct_time):
date = datetime(*value[:6])
else:
date = datetime(*value.timetuple()[:6])
if date_time:
# Convert datetime values to the expected client/context timezone
date = datetime_field.context_timestamp(self.cr, self.uid,
timestamp=date,
context=self.localcontext)
return date.strftime(date_format.encode('utf-8'))
res = self.lang_dict['lang_obj'].format('%.' + str(digits) + 'f', value, grouping=grouping, monetary=monetary)
if currency_obj:
if currency_obj.position == 'after':
res = u'%s\N{NO-BREAK SPACE}%s' % (res, currency_obj.symbol)
elif currency_obj and currency_obj.position == 'before':
res = u'%s\N{NO-BREAK SPACE}%s' % (currency_obj.symbol, res)
return res
def display_address(self, address_record, without_company=False):
# FIXME handle `without_company`
return address_record.contact_address
def repeatIn(self, lst, name,nodes_parent=False):
ret_lst = []
for id in lst:
ret_lst.append({name:id})
return ret_lst
def _translate(self,text):
lang = self.localcontext['lang']
if lang and text and not text.isspace():
transl_obj = self.pool['ir.translation']
piece_list = self._transl_regex.split(text)
for pn in range(len(piece_list)):
if not self._transl_regex.match(piece_list[pn]):
source_string = piece_list[pn].replace('\n', ' ').strip()
if len(source_string):
translated_string = transl_obj._get_source(self.cr, self.uid, self.name, ('report', 'rml'), lang, source_string)
if translated_string:
piece_list[pn] = piece_list[pn].replace(source_string, translated_string)
text = ''.join(piece_list)
return text
def _add_header(self, rml_dom, header='external'):
if header=='internal':
rml_head = self.rml_header2
elif header=='internal landscape':
rml_head = self.rml_header3
else:
rml_head = self.rml_header
head_dom = etree.XML(rml_head)
for tag in head_dom:
found = rml_dom.find('.//'+tag.tag)
if found is not None and len(found):
if tag.get('position'):
found.append(tag)
else :
found.getparent().replace(found,tag)
return True
def set_context(self, objects, data, ids, report_type = None):
self.localcontext['data'] = data
self.localcontext['objects'] = objects
self.localcontext['digits_fmt'] = self.digits_fmt
self.localcontext['get_digits'] = self.get_digits
self.datas = data
self.ids = ids
self.objects = objects
if report_type:
if report_type=='odt' :
self.localcontext.update({'name_space' :common.odt_namespace})
else:
self.localcontext.update({'name_space' :common.sxw_namespace})
# WARNING: the object[0].exists() call below is slow but necessary because
# some broken reporting wizards pass incorrect IDs (e.g. ir.ui.menu ids)
if objects and len(objects) == 1 and \
objects[0].exists() and 'company_id' in objects[0] and objects[0].company_id:
# When we print only one record, we can auto-set the correct
# company in the localcontext. For other cases the report
# will have to call setCompany() inside the main repeatIn loop.
self.setCompany(objects[0].company_id)
class report_sxw(report_rml, preprocess.report):
"""
The register=True kwarg has been added to help remove the
openerp.netsvc.LocalService() indirection and the related
openerp.report.interface.report_int._reports dictionary:
report_sxw registered in XML with auto=False are also registered in Python.
In that case, they are registered in the above dictionary. Since
registration is automatically done upon instanciation, and that
instanciation is needed before rendering, a way was needed to
instanciate-without-register a report. In the future, no report
should be registered in the above dictionary and it will be dropped.
"""
def __init__(self, name, table, rml=False, parser=rml_parse, header='external', store=False, register=True):
report_rml.__init__(self, name, table, rml, '', register=register)
self.name = name
self.parser = parser
self.header = header
self.store = store
self.internal_header=False
if header=='internal' or header=='internal landscape':
self.internal_header=True
def getObjects(self, cr, uid, ids, context):
table_obj = openerp.registry(cr.dbname)[self.table]
return table_obj.browse(cr, uid, ids, context=context)
def create(self, cr, uid, ids, data, context=None):
context = dict(context or {})
if self.internal_header:
context.update(internal_header=self.internal_header)
# skip osv.fields.sanitize_binary_value() because we want the raw bytes in all cases
context.update(bin_raw=True)
registry = openerp.registry(cr.dbname)
ir_obj = registry['ir.actions.report.xml']
registry['res.font'].font_scan(cr, SUPERUSER_ID, lazy=True, context=context)
report_xml_ids = ir_obj.search(cr, uid,
[('report_name', '=', self.name[7:])], context=context)
if report_xml_ids:
report_xml = ir_obj.browse(cr, uid, report_xml_ids[0], context=context)
else:
title = ''
report_file = tools.file_open(self.tmpl, subdir=None)
try:
rml = report_file.read()
report_type= data.get('report_type', 'pdf')
class a(object):
def __init__(self, *args, **argv):
for key,arg in argv.items():
setattr(self, key, arg)
report_xml = a(title=title, report_type=report_type, report_rml_content=rml, name=title, attachment=False, header=self.header)
finally:
report_file.close()
# We add an attribute on the ir.actions.report.xml instance.
# This attribute 'use_global_header' will be used by
# the create_single_XXX function of the report engine.
# This change has been done to avoid a big change of the API.
setattr(report_xml, 'use_global_header', self.header if report_xml.header else False)
report_type = report_xml.report_type
if report_type in ['sxw','odt']:
fnct = self.create_source_odt
elif report_type in ['pdf','raw','txt','html']:
fnct = self.create_source_pdf
elif report_type=='html2html':
fnct = self.create_source_html2html
elif report_type=='mako2html':
fnct = self.create_source_mako2html
else:
raise NotImplementedError(_('Unknown report type: %s') % report_type)
fnct_ret = fnct(cr, uid, ids, data, report_xml, context)
if not fnct_ret:
return False, False
return fnct_ret
def create_source_odt(self, cr, uid, ids, data, report_xml, context=None):
return self.create_single_odt(cr, uid, ids, data, report_xml, context or {})
def create_source_html2html(self, cr, uid, ids, data, report_xml, context=None):
return self.create_single_html2html(cr, uid, ids, data, report_xml, context or {})
def create_source_mako2html(self, cr, uid, ids, data, report_xml, context=None):
return self.create_single_mako2html(cr, uid, ids, data, report_xml, context or {})
def create_source_pdf(self, cr, uid, ids, data, report_xml, context=None):
if not context:
context={}
registry = openerp.registry(cr.dbname)
attach = report_xml.attachment
if attach:
objs = self.getObjects(cr, uid, ids, context)
results = []
for obj in objs:
aname = eval(attach, {'object':obj, 'time':time})
result = False
if report_xml.attachment_use and aname and context.get('attachment_use', True):
aids = registry['ir.attachment'].search(cr, uid, [('datas_fname','=',aname+'.pdf'),('res_model','=',self.table),('res_id','=',obj.id)])
if aids:
brow_rec = registry['ir.attachment'].browse(cr, uid, aids[0])
if not brow_rec.datas:
continue
d = base64.decodestring(brow_rec.datas)
results.append((d,'pdf'))
continue
result = self.create_single_pdf(cr, uid, [obj.id], data, report_xml, context)
if not result:
return False
if aname:
try:
name = aname+'.'+result[1]
# Remove the default_type entry from the context: this
# is for instance used on the account.account_invoices
# and is thus not intended for the ir.attachment type
# field.
ctx = dict(context)
ctx.pop('default_type', None)
registry['ir.attachment'].create(cr, uid, {
'name': aname,
'datas': base64.encodestring(result[0]),
'datas_fname': name,
'res_model': self.table,
'res_id': obj.id,
}, context=ctx
)
except Exception:
#TODO: should probably raise a proper osv_except instead, shouldn't we? see LP bug #325632
_logger.error('Could not create saved report attachment', exc_info=True)
results.append(result)
if results:
if results[0][1]=='pdf':
from pyPdf import PdfFileWriter, PdfFileReader
output = PdfFileWriter()
for r in results:
reader = PdfFileReader(cStringIO.StringIO(r[0]))
for page in range(reader.getNumPages()):
output.addPage(reader.getPage(page))
s = cStringIO.StringIO()
output.write(s)
return s.getvalue(), results[0][1]
return self.create_single_pdf(cr, uid, ids, data, report_xml, context)
def create_single_pdf(self, cr, uid, ids, data, report_xml, context=None):
if not context:
context={}
logo = None
context = context.copy()
title = report_xml.name
rml = report_xml.report_rml_content
# if no rml file is found
if not rml:
return False
rml_parser = self.parser(cr, uid, self.name2, context=context)
objs = self.getObjects(cr, uid, ids, context)
rml_parser.set_context(objs, data, ids, report_xml.report_type)
processed_rml = etree.XML(rml)
if report_xml.use_global_header:
rml_parser._add_header(processed_rml, self.header)
processed_rml = self.preprocess_rml(processed_rml,report_xml.report_type)
if rml_parser.logo:
logo = base64.decodestring(rml_parser.logo)
create_doc = self.generators[report_xml.report_type]
pdf = create_doc(etree.tostring(processed_rml),rml_parser.localcontext,logo,title.encode('utf8'))
return pdf, report_xml.report_type
def create_single_odt(self, cr, uid, ids, data, report_xml, context=None):
context = dict(context or {})
context['parents'] = sxw_parents
report_type = report_xml.report_type
binary_report_content = report_xml.report_sxw_content
if isinstance(report_xml.report_sxw_content, unicode):
# if binary content was passed as unicode, we must
# re-encode it as a 8-bit string using the pass-through
# 'latin1' encoding, to restore the original byte values.
# See also osv.fields.sanitize_binary_value()
binary_report_content = report_xml.report_sxw_content.encode("latin1")
sxw_io = StringIO.StringIO(binary_report_content)
sxw_z = zipfile.ZipFile(sxw_io, mode='r')
rml = sxw_z.read('content.xml')
meta = sxw_z.read('meta.xml')
mime_type = sxw_z.read('mimetype')
if mime_type == 'application/vnd.sun.xml.writer':
mime_type = 'sxw'
else :
mime_type = 'odt'
sxw_z.close()
rml_parser = self.parser(cr, uid, self.name2, context=context)
rml_parser.parents = sxw_parents
rml_parser.tag = sxw_tag
objs = self.getObjects(cr, uid, ids, context)
rml_parser.set_context(objs, data, ids, mime_type)
rml_dom_meta = node = etree.XML(meta)
elements = node.findall(rml_parser.localcontext['name_space']["meta"]+"user-defined")
for pe in elements:
if pe.get(rml_parser.localcontext['name_space']["meta"]+"name"):
if pe.get(rml_parser.localcontext['name_space']["meta"]+"name") == "Info 3":
pe[0].text=data['id']
if pe.get(rml_parser.localcontext['name_space']["meta"]+"name") == "Info 4":
pe[0].text=data['model']
meta = etree.tostring(rml_dom_meta, encoding='utf-8',
xml_declaration=True)
rml_dom = etree.XML(rml)
elements = []
key1 = rml_parser.localcontext['name_space']["text"]+"p"
key2 = rml_parser.localcontext['name_space']["text"]+"drop-down"
for n in rml_dom.iterdescendants():
if n.tag == key1:
elements.append(n)
if mime_type == 'odt':
for pe in elements:
e = pe.findall(key2)
for de in e:
pp=de.getparent()
if de.text or de.tail:
pe.text = de.text or de.tail
for cnd in de:
if cnd.text or cnd.tail:
if pe.text:
pe.text += cnd.text or cnd.tail
else:
pe.text = cnd.text or cnd.tail
pp.remove(de)
else:
for pe in elements:
e = pe.findall(key2)
for de in e:
pp = de.getparent()
if de.text or de.tail:
pe.text = de.text or de.tail
for cnd in de:
text = cnd.get("{http://openoffice.org/2000/text}value",False)
if text:
if pe.text and text.startswith('[['):
pe.text += text
elif text.startswith('[['):
pe.text = text
if de.getparent():
pp.remove(de)
rml_dom = self.preprocess_rml(rml_dom, mime_type)
create_doc = self.generators[mime_type]
odt = etree.tostring(create_doc(rml_dom, rml_parser.localcontext),
encoding='utf-8', xml_declaration=True)
sxw_contents = {'content.xml':odt, 'meta.xml':meta}
if report_xml.use_global_header:
#Add corporate header/footer
rml_file = tools.file_open(os.path.join('base', 'report', 'corporate_%s_header.xml' % report_type))
try:
rml = rml_file.read()
rml_parser = self.parser(cr, uid, self.name2, context=context)
rml_parser.parents = sxw_parents
rml_parser.tag = sxw_tag
objs = self.getObjects(cr, uid, ids, context)
rml_parser.set_context(objs, data, ids, report_xml.report_type)
rml_dom = self.preprocess_rml(etree.XML(rml),report_type)
create_doc = self.generators[report_type]
odt = create_doc(rml_dom,rml_parser.localcontext)
if report_xml.use_global_header:
rml_parser._add_header(odt)
odt = etree.tostring(odt, encoding='utf-8',
xml_declaration=True)
sxw_contents['styles.xml'] = odt
finally:
rml_file.close()
#created empty zip writing sxw contents to avoid duplication
sxw_out = StringIO.StringIO()
sxw_out_zip = zipfile.ZipFile(sxw_out, mode='w')
sxw_template_zip = zipfile.ZipFile (sxw_io, 'r')
for item in sxw_template_zip.infolist():
if item.filename not in sxw_contents:
buffer = sxw_template_zip.read(item.filename)
sxw_out_zip.writestr(item.filename, buffer)
for item_filename, buffer in sxw_contents.iteritems():
sxw_out_zip.writestr(item_filename, buffer)
sxw_template_zip.close()
sxw_out_zip.close()
final_op = sxw_out.getvalue()
sxw_io.close()
sxw_out.close()
return final_op, mime_type
def create_single_html2html(self, cr, uid, ids, data, report_xml, context=None):
context = dict(context or {})
context['parents'] = html_parents
report_type = 'html'
html = report_xml.report_rml_content
html_parser = self.parser(cr, uid, self.name2, context=context)
html_parser.parents = html_parents
html_parser.tag = sxw_tag
objs = self.getObjects(cr, uid, ids, context)
html_parser.set_context(objs, data, ids, report_type)
html_dom = etree.HTML(html)
html_dom = self.preprocess_rml(html_dom,'html2html')
create_doc = self.generators['html2html']
html = etree.tostring(create_doc(html_dom, html_parser.localcontext))
return html.replace('&','&').replace('<', '<').replace('>', '>').replace('</br>',''), report_type
def create_single_mako2html(self, cr, uid, ids, data, report_xml, context=None):
mako_html = report_xml.report_rml_content
html_parser = self.parser(cr, uid, self.name2, context)
objs = self.getObjects(cr, uid, ids, context)
html_parser.set_context(objs, data, ids, 'html')
create_doc = self.generators['makohtml2html']
html = create_doc(mako_html,html_parser.localcontext)
return html,'html'
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
MrLucasCardoso/pycards | tests/test_amex.py | 1 | 2451 | import json
import pytest
from pycards import CreditCard
from datetime import datetime
from pycards.settings import FIXTURES_PATH
@pytest.fixture(scope="session")
def data():
with open(FIXTURES_PATH) as data_file:
return json.load(data_file)['AMEX']
def test_init(data):
assert len(data) > 0
cards = [CreditCard(card['name'], code=card['code']) for card in data]
assert len(cards) == len(data)
def test_is_valid(data):
assert all(CreditCard(card['name'], code=card['code']).is_valid for card in data)
def test_brand(data):
cards = [CreditCard(card['name'], code=card['code']) for card in data]
assert len(cards) == len([card for card in cards if card.brand == 'Amex'])
def test_cardholder(data):
cards = [CreditCard(card['name'], code=card['code'], cardholder='TESTE DADOS') for card in data]
assert len(cards) == len([card for card in cards if card.cardholder == 'TESTE DADOS'])
def test_number(data):
numbers = [card['name'] for card in data]
cards = [CreditCard(card['name'], code=card['code']) for card in data]
assert all([True for c in cards if c.number in numbers]) and any([True for c in cards if c.number in numbers])
def test_expires(data):
cards = [CreditCard(card['name'], code=card['code'], expire_month='7', expire_year='2021') for card in data]
assert all(True for c in cards if type(c.expires) == datetime)
def test_expires_string(data):
cards = [CreditCard(card['name'], code=card['code'], expire_month='7', expire_year='2021') for card in data]
assert all(True for c in cards if c.expires_string == '07/21') and any(True for c in cards if c.expires_string == '07/21')
def test_is_not_expired(data):
card = [CreditCard(card['name'], code=card['code'], expire_month='7', expire_year='2021') for card in data][0]
assert not card.is_expired
def test_is_expired(data):
card = [CreditCard(card['name'], code=card['code'], expire_month='7', expire_year='2016') for card in data][0]
assert card.is_expired
def test_code_name(data):
card = [CreditCard(card['name'], code=card['code'], expire_month='7', expire_year='2016') for card in data][0]
assert card.code_name == 'CVV'
def test_code(data):
codes = [card['code'] for card in data]
cards = [CreditCard(card['name'], code=card['code']) for card in data]
assert all([True for c in cards if c.code in codes]) and any([True for c in cards if c.code in codes])
| mit |
steveklabnik/servo | components/script/dom/bindings/codegen/parser/tests/test_extended_attributes.py | 149 | 2846 | import WebIDL
def WebIDLTest(parser, harness):
parser.parse("""
[NoInterfaceObject]
interface TestExtendedAttr {
[Unforgeable] readonly attribute byte b;
};
""")
results = parser.finish()
parser = parser.reset()
parser.parse("""
[Pref="foo.bar",Pref=flop]
interface TestExtendedAttr {
[Pref="foo.bar"] attribute byte b;
};
""")
results = parser.finish()
parser = parser.reset()
parser.parse("""
interface TestLenientThis {
[LenientThis] attribute byte b;
};
""")
results = parser.finish()
harness.ok(results[0].members[0].hasLenientThis(),
"Should have a lenient this")
parser = parser.reset()
threw = False
try:
parser.parse("""
interface TestLenientThis2 {
[LenientThis=something] attribute byte b;
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "[LenientThis] must take no arguments")
parser = parser.reset()
parser.parse("""
interface TestClamp {
void testClamp([Clamp] long foo);
void testNotClamp(long foo);
};
""")
results = parser.finish()
# Pull out the first argument out of the arglist of the first (and
# only) signature.
harness.ok(results[0].members[0].signatures()[0][1][0].clamp,
"Should be clamped")
harness.ok(not results[0].members[1].signatures()[0][1][0].clamp,
"Should not be clamped")
parser = parser.reset()
threw = False
try:
parser.parse("""
interface TestClamp2 {
void testClamp([Clamp=something] long foo);
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "[Clamp] must take no arguments")
parser = parser.reset()
parser.parse("""
interface TestEnforceRange {
void testEnforceRange([EnforceRange] long foo);
void testNotEnforceRange(long foo);
};
""")
results = parser.finish()
# Pull out the first argument out of the arglist of the first (and
# only) signature.
harness.ok(results[0].members[0].signatures()[0][1][0].enforceRange,
"Should be enforceRange")
harness.ok(not results[0].members[1].signatures()[0][1][0].enforceRange,
"Should not be enforceRange")
parser = parser.reset()
threw = False
try:
parser.parse("""
interface TestEnforceRange2 {
void testEnforceRange([EnforceRange=something] long foo);
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "[EnforceRange] must take no arguments")
| mpl-2.0 |
lichengshuang/createvhost | others/webvirtmgr/delServer.py | 1 | 1305 | #!/usr/bin/python
#-*-encoding:utf-8-*-
#author: asher
#date: 20160429 on train D909
# this scripts useed for add server ip to webvirtmgr
# if not , each server must add by website,it's too slow, and very not interesting.
# use this , it's make you feel very happy
import sqlite3
try:
conn = sqlite3.connect('../webvirtmgr.sqlite3')
cur = conn.cursor()
print "Input the server ip address like:"
ips = raw_input("Ips 172.23.32:").strip()
ips1 = int(raw_input("Input start last ip num: 1:>").strip())
ips2 = int(raw_input("Input end ip num: 100:>").strip())
# jifang = str(raw_input("DataCenter like:jxq:>").strip())
# login = str(raw_input("User:admin or others:>").strip())
# password = str(raw_input("Password:>").strip())
while True:
if ips1 <= ips2:
ips1 = str(ips1)
newip = ips + "." + ips1
# jifang1 = jifang + "_" + newip
print "Del %s into database\n" % newip
cur.execute("delete from servers_compute where hostname == '%s'" % newip)
ips1 = int(ips1)
ips1 += 1
conn.commit()
else:
break
finally:
allservers = cur.execute("select id,name,hostname,login,type from servers_compute").fetchall()
for i in allservers:
print i
conn.close()
| apache-2.0 |
esikachev/sahara-backup | sahara/tests/unit/swift/test_utils.py | 7 | 1447 | # Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from sahara.swift import utils
from sahara.tests.unit import base as testbase
class SwiftUtilsTest(testbase.SaharaTestCase):
def setUp(self):
super(SwiftUtilsTest, self).setUp()
self.override_config('use_identity_api_v3', True)
@mock.patch('sahara.utils.openstack.base.url_for')
def test_retrieve_auth_url(self, url_for_mock):
correct = "https://127.0.0.1:8080/v2.0/"
def _assert(uri):
url_for_mock.return_value = uri
self.assertEqual(correct, utils.retrieve_auth_url())
_assert("%s/" % correct)
_assert("https://127.0.0.1:8080")
_assert("https://127.0.0.1:8080/")
_assert("https://127.0.0.1:8080/v2.0")
_assert("https://127.0.0.1:8080/v2.0/")
_assert("https://127.0.0.1:8080/v42/")
_assert("https://127.0.0.1:8080/foo")
| apache-2.0 |
ProfessionalIT/maxigenios-website | sdk/google_appengine/lib/django-1.5/django/core/files/base.py | 147 | 4653 | from __future__ import unicode_literals
import os
from io import BytesIO, StringIO, UnsupportedOperation
from django.utils.encoding import smart_text
from django.core.files.utils import FileProxyMixin
from django.utils import six
from django.utils.encoding import force_bytes, python_2_unicode_compatible
@python_2_unicode_compatible
class File(FileProxyMixin):
DEFAULT_CHUNK_SIZE = 64 * 2**10
def __init__(self, file, name=None):
self.file = file
if name is None:
name = getattr(file, 'name', None)
self.name = name
if hasattr(file, 'mode'):
self.mode = file.mode
def __str__(self):
return smart_text(self.name or '')
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self or "None")
def __bool__(self):
return bool(self.name)
def __nonzero__(self): # Python 2 compatibility
return type(self).__bool__(self)
def __len__(self):
return self.size
def _get_size(self):
if not hasattr(self, '_size'):
if hasattr(self.file, 'size'):
self._size = self.file.size
elif hasattr(self.file, 'name') and os.path.exists(self.file.name):
self._size = os.path.getsize(self.file.name)
elif hasattr(self.file, 'tell') and hasattr(self.file, 'seek'):
pos = self.file.tell()
self.file.seek(0, os.SEEK_END)
self._size = self.file.tell()
self.file.seek(pos)
else:
raise AttributeError("Unable to determine the file's size.")
return self._size
def _set_size(self, size):
self._size = size
size = property(_get_size, _set_size)
def _get_closed(self):
return not self.file or self.file.closed
closed = property(_get_closed)
def chunks(self, chunk_size=None):
"""
Read the file and yield chucks of ``chunk_size`` bytes (defaults to
``UploadedFile.DEFAULT_CHUNK_SIZE``).
"""
if not chunk_size:
chunk_size = self.DEFAULT_CHUNK_SIZE
try:
self.seek(0)
except (AttributeError, UnsupportedOperation):
pass
while True:
data = self.read(chunk_size)
if not data:
break
yield data
def multiple_chunks(self, chunk_size=None):
"""
Returns ``True`` if you can expect multiple chunks.
NB: If a particular file representation is in memory, subclasses should
always return ``False`` -- there's no good reason to read from memory in
chunks.
"""
if not chunk_size:
chunk_size = self.DEFAULT_CHUNK_SIZE
return self.size > chunk_size
def __iter__(self):
# Iterate over this file-like object by newlines
buffer_ = None
for chunk in self.chunks():
chunk_buffer = BytesIO(chunk)
for line in chunk_buffer:
if buffer_:
line = buffer_ + line
buffer_ = None
# If this is the end of a line, yield
# otherwise, wait for the next round
if line[-1] in ('\n', '\r'):
yield line
else:
buffer_ = line
if buffer_ is not None:
yield buffer_
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tb):
self.close()
def open(self, mode=None):
if not self.closed:
self.seek(0)
elif self.name and os.path.exists(self.name):
self.file = open(self.name, mode or self.mode)
else:
raise ValueError("The file cannot be reopened.")
def close(self):
self.file.close()
@python_2_unicode_compatible
class ContentFile(File):
"""
A File-like object that takes just raw content, rather than an actual file.
"""
def __init__(self, content, name=None):
if six.PY3:
stream_class = StringIO if isinstance(content, six.text_type) else BytesIO
else:
stream_class = BytesIO
content = force_bytes(content)
super(ContentFile, self).__init__(stream_class(content), name=name)
self.size = len(content)
def __str__(self):
return 'Raw content'
def __bool__(self):
return True
def __nonzero__(self): # Python 2 compatibility
return type(self).__bool__(self)
def open(self, mode=None):
self.seek(0)
def close(self):
pass
| mit |
goofwear/raspberry_pwn | src/pentest/fasttrack/setup.py | 16 | 11757 | #!/usr/bin/env python
import os
import sys
import time
import subprocess
import re
def get_basepath():
basepath = os.getcwd()
return basepath
definepath=get_basepath()
try:
if sys.argv[1]=='install':
print """
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
~ *** Fast-Track Setup *** ~
~ *** Install Fast-Track dependencies *** ~
~ *** Version 2.1 *** ~
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Fast-Track initial setup menu, you should use this if you are updating
Fast-Track at all due to added dependancies when writing new application
modules into Fast-Track.
Some things to note before running Fast-Track, you must install the following:
Metasploit (for autopwn, and mass client attack)
SQLite3 (for autopwn)
There are other requirements however, Fast-Track will check for them and if
your missing it, Fast-Track will install it for you.
NOTE: Some changes, pymssql is currently compiled at a lower level, higher levels
completely break Fast-Track at this point. Working on a solution to fix the overall
issues.
"""
# Check if we're root
if os.geteuid() != 0:
print "\nFast-Track v4 - A new beginning...\n\n"
print "Fast-Track Setup is not running under root. Please re-run the tool under root...\n"
sys.exit(1)
#print "Metasploit directory example: /pentest/exploits/framework3/"
#print "\nMake sure you do /folder/ with the / at the end or it'll\njack some stuff up."
try:
print "[*] Ensure that you configure the Metasploit path in bin/config/config\n"
#metasploitpath=raw_input("\nEnter the path to the metasploit directory\nHit enter for default (/pentest/exploits/framework3/): ")
#if metasploitpath=='':
# metasploitpath="/pentest/exploits/framework3/"
#if os.path.isfile("%smsfconsole" % (metasploitpath)): print "Metasploit files detected, moving on..."
#if not os.path.isfile("%smsfconsole" % (metasploitpath)): print "Metasploit not detected in path specified. You should re-run setup and specify the correct path."
#writefile=file("%s/bin/setup/metasploitconfig.file" % (definepath),'w')
#writefile.write("%s" % (metasploitpath))
#writefile.close()
#print "*** Metasploit directory set..... ***\n"
print "No guarantee this will successfully install all dependancies correctly\nyou may need to install some manually..\n\nDifferent Linux OS require different things to work.\n"
installstuff=raw_input("Would you like to attempt all dependancies, yes or no: ")
# Thanks to swc|666 for the help below
if installstuff=='yes':
print '[-] Installing requirements needed for Fast-Track.. [-]'
print '\n[-] Detecting Linux version... [-]'
time.sleep(2)
if os.path.isfile("/etc/apt/sources.list"):
### Not every sources.list file presence indicates Ubuntu (this works on all flavors of Ubuntu, Debian and Sidux @least)
if os.path.isfile("/etc/lsb-release"):
pat=re.compile("=|\"",re.M|re.DOTALL)
distro=open("/etc/lsb-release").read()
distro=pat.sub("",distro).split("\n")
distro=[i.strip() for i in distro if i.strip() != '' ]
for n,items in enumerate(distro):
if "DISTRIB_DESCRIPTION" in items:
d1 = distro[n+0]
d2 = d1.strip("DISTRIB_DESCRIPTION")
d3 = "\n[-] " "%s " "Detected [-]\n" % (d2)
#print d3
print "[-] Installing requirements to run on " "%s" "! [-]" % (d2)
# else:
### A sources.list and not a lsb-release file? >.<
print '\n[-] Debian-Based OS Detected [-]\n'
print '[-] Installing requirements! [-]'
print "Installing Subversion, Build-Essential, Python-ClientForm, FreeTds-Dev, PExpect, and Python2.5-Dev, PYMILLS, through Apt, please wait.."
subprocess.Popen("apt-get --force-yes -y install subversion build-essential vncviewer nmap python-clientform python2.6-dev python-pexpect python-setuptools", shell=True).wait()
subprocess.Popen("wget http://ibiblio.org/pub/Linux/ALPHA/freetds/stable/freetds-stable.tgz", shell=True).wait()
subprocess.Popen("wget http://downloads.sourceforge.net/pymssql/pymssql-0.8.0.tar.gz", shell=True).wait()
subprocess.Popen("tar -zxvf freetds-stable.tgz;tar -zxvf pymssql-0.8.0.tar.gz;cd freetds-0.*;./configure --enable-msdblib --with-tdsver=8.0 && make && make install; cd ..;cd pymssql-0.8.0;ln -s /usr/local/lib/libsysbdb.so.5 /usr/lib;python setup.py install;cd ..;rm -rf freetds*;rm -rf pymssql*", shell=True).wait()
print '[-] Running ldconfig.... [-]'
subprocess.Popen("ldconfig", shell=True).wait()
subprocess.Popen("wget http://downloads.sourceforge.net/pymssql/pymssql-0.8.0.tar.gz", shell=True).wait()
subprocess.Popen("tar -zxvf pymssql-0.8.0.tar.gz;cd pymssql-0.8.0;ln -s /usr/local/lib/libsysbdb.so.5 /usr/lib;python setup.py install;cd ..;rm -rf pymssql*", shell=True).wait()
subprocess.Popen('wget http://pypi.inqbus.de/pymills/pymills-3.4.tar.gz#md5=5741d4a5c30aaed5def2f4b4f86e92a9;tar -zxvf pymills-3.4.tar.gz;mv pymills-3.4 pymills;cd pymills/; python setup.py install', shell=True).wait()
subprocess.Popen('rm -rf pymills; rm -rf pymills-3.4.tar.gz', shell=True).wait()
print "Installing BeautifulSoup Python Module"
subprocess.Popen("wget http://www.crummy.com/software/BeautifulSoup/download/BeautifulSoup.tar.gz;tar -zxvf BeautifulSoup.tar.gz;cd BeautifulSoup*;python setup.py install;cd ..;rm -rf BeautifulSoup*", shell=True).wait()
print "BeautifulSoup Installed."
# Taken from http://wiredbytes.com/node/5
metasploitinstall=raw_input("\nWould you like Fast-Track to install Metasploit 3 for you (experimental)? yes or no: ")
if metasploitinstall == 'yes':
subprocess.Popen("apt-get install build-essential ruby libruby rdoc libyaml-ruby libzlib-ruby libopenssl-ruby libdl-ruby libreadline-ruby libiconv-ruby libgtk2-ruby libglade2-ruby subversion sqlite3 libsqlite3-ruby irb", shell=True).wait()
subprocess.Popen("wget -c http://rubyforge.org/frs/download.php/70696/rubygems-1.3.7.tgz;tar -xvzf rubygems-1.3.7.tgz -C /tmp/;cd /tmp/rubygems-1.3.7/;ruby setup.rb", shell=True).wait()
subprocess.Popen("/usr/bin/gem1.8 install rails", shell=True).wait()
subprocess.Popen("rm rubygems-1.3.7.tgz", shell=True).wait()
subprocess.Popen("mkdir /pentest/exploits/framework3;cd /pentest/exploits/framework/;svn co http://metasploit.com/svn/framework3/trunk/ ." , shell=True).wait()
print "Metasploit should have been installed..running ldconfig"
ldconfig=subprocess.Popen("ldconfig").wait()
else:
print "[-] Generic Linux OS detected! [-] \n[-] Installing vanilla installation for dependancies [-]"
print '[-] Installing FreeTDS and PYMMSQL [-]'
subprocess.Popen("wget http://ibiblio.org/pub/Linux/ALPHA/freetds/stable/freetds-stable.tgz", shell=True).wait()
subprocess.Popen("wget http://downloads.sourceforge.net/pymssql/pymssql-0.8.0.tar.gz", shell=True).wait()
subprocess.Popen("tar -zxvf freetds-stable.tgz;tar -zxvf pymssql-0.8.0.tar.gz;cd freetds-0.*;./configure --enable-msdblib --with-tdsver=8.0 && make && make install; cd ..;cd pymssql-0.8.0;ln -s /usr/local/lib/libsysbdb.so.5 /usr/lib;python setup.py install;cd ..;rm -rf freetds*;rm -rf pymssql*", shell=True).wait()
print '[-] Running ldconfig.... [-]'
subprocess.Popen("ldconfig", shell=True).wait()
print '[-] Finished..moving on.. [-]'
time.sleep(2)
print 'Installing Module for Python Called "PExpect"'
subprocess.Popen('wget http://downloads.sourceforge.net/pexpect/pexpect-2.3.tar.gz;tar -zxvf pexpect-2.3.tar.gz;cd pexpect-2.3;python setup.py install;cd ..;rm -rf pexpect-2.3;rm pexpect-2.3.tar.gz', shell=True).wait()
print 'Installed! Moving on...'
print 'Installing SQLite3'
subprocess.Popen('cd /usr/local/bin/;ln -s tclsh8.4 tclsh', shell=True).wait()
subprocess.Popen('wget http://www.sqlite.org/sqlite-3.7.0.1.tar.gz;tar -zxvf sqlite-3.7.0.1;cd sqlite-3.7.0.1;./configure --prefix=/usr/local && make && make install;cd ..;rm sqlite-3.7.0.1.tar.gz;rm -rf sqlite-3.7.0.1', shell=True).wait()
subprocess.Popen('wget http://rubyforge.org/frs/download.php/2820/sqlite-ruby-2.2.3.tar.gz;tar -zxvf sqlite3-ruby-2.2.3.tar.gz;cd sqlite3-ruby-2.2.3;ruby setup.rb config;ruby setup.rb setup;ruby setup.rb install;cd ..;rm sqlite3-ruby-2.2.3.tar.gz;rm -rf sqlite3-ruby-2.2.3', shell=True).wait()
print 'SQLite3 installed..Moving on...'
print "Installing ClientForm Python Module"
subprocess.Popen("svn co http://codespeak.net/svn/wwwsearch/ClientForm/trunk ClientForm;cd ClientForm;python setup.py install;cd ..;rm -rf ClientForm", shell=True).wait()
print "ClientForm Installed, moving on.."
print "Installing PROFTPD"
subprocess.Popen("""wget ftp://ftp.proftpd.org/distrib/source/proftpd-1.3.3a.tar.gz;tar -zxvf proftpd-1.3.3a.tar.gz;cd proftpd-1.3.*/;./configure && make && make install;cd ..;rm -rf proftpd*;echo "UseReverseDNS off" >> /usr/local/etc/proftpd.conf;echo "IdentLookups off" >> /usr/local/etc/proftpd.conf;killall proftpd""", shell=True).wait()
print "PROFRPD installed..Moving on..."
print "Installing PyMills"
subprocess.Popen('python setuptools.py;wget http://pypi.inqbus.de/pymills/pymills-3.4.tar.gz;tar -zxvf pymills-3.4.tar.gz;mv pymills-3.4 pymills;cd pymills/;python setup.py install;cd ..;rm -rf pymills*', shell=True).wait()
print "PyMills installed..Moving on..."
print "Installing BeautifulSoup..."
subprocess.Popen("wget http://www.crummy.com/software/BeautifulSoup/download/BeautifulSoup.tar.gz;tar -zxvf BeautifulSoup.tar.gz;cd BeautifulSoup*;python setup.py install;cd ..;rm -rf BeautifulSoup*", shell=True).wait()
print "BeautifulSoup installed..Moving on..."
print "Finished with installations..."
print "Running ldconfig to wrap up everything..."
subprocess.Popen("ldconfig", shell=True).wait()
print "\n[-] Finished with setup [-]\n[-] Try running Fast-Track now. [-]\n[-] If unsucessful, manually compile from source the deps. [-]"
print "[-] Re-checking dependencies... [-]"
try:
sys.path.append("%s/bin/setup/" % (definepath))
import depend
print "\n"
print "Finished..running ldconfig to wrap everything up...\n"
ldconfig=subprocess.Popen("ldconfig", shell=True)
print "Fast-Track setup exiting...\n"
except ImportError:
print "Error importing dependancy checker."
except KeyboardInterrupt:
print "\n\nExiting Fast-Track setup...\n"
except IndexError:
print """
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
~ ~
~ Fast-Track Setup and Installation ~
~ ~
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This script will allow you to install the required
dependencies needed for Fast-Track to function
correctly. Note this does not install Metasploit for
you. If you want to use the automated autopwn
functionality within Metasploit, you will need to
install that yourself.
Usage: python setup.py install
"""
| gpl-3.0 |
StephanieMak/hadoop | hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/job_history_summary.py | 323 | 3444 | #!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import sys
pat = re.compile('(?P<name>[^=]+)="(?P<value>[^"]*)" *')
counterPat = re.compile('(?P<name>[^:]+):(?P<value>[^,]*),?')
def parse(tail):
result = {}
for n,v in re.findall(pat, tail):
result[n] = v
return result
mapStartTime = {}
mapEndTime = {}
reduceStartTime = {}
reduceShuffleTime = {}
reduceSortTime = {}
reduceEndTime = {}
reduceBytes = {}
for line in sys.stdin:
words = line.split(" ",1)
event = words[0]
attrs = parse(words[1])
if event == 'MapAttempt':
if attrs.has_key("START_TIME"):
mapStartTime[attrs["TASKID"]] = int(attrs["START_TIME"])/1000
elif attrs.has_key("FINISH_TIME"):
mapEndTime[attrs["TASKID"]] = int(attrs["FINISH_TIME"])/1000
elif event == 'ReduceAttempt':
if attrs.has_key("START_TIME"):
reduceStartTime[attrs["TASKID"]] = int(attrs["START_TIME"]) / 1000
elif attrs.has_key("FINISH_TIME"):
reduceShuffleTime[attrs["TASKID"]] = int(attrs["SHUFFLE_FINISHED"])/1000
reduceSortTime[attrs["TASKID"]] = int(attrs["SORT_FINISHED"])/1000
reduceEndTime[attrs["TASKID"]] = int(attrs["FINISH_TIME"])/1000
elif event == 'Task':
if attrs["TASK_TYPE"] == "REDUCE" and attrs.has_key("COUNTERS"):
for n,v in re.findall(counterPat, attrs["COUNTERS"]):
if n == "File Systems.HDFS bytes written":
reduceBytes[attrs["TASKID"]] = int(v)
runningMaps = {}
shufflingReduces = {}
sortingReduces = {}
runningReduces = {}
startTime = min(reduce(min, mapStartTime.values()),
reduce(min, reduceStartTime.values()))
endTime = max(reduce(max, mapEndTime.values()),
reduce(max, reduceEndTime.values()))
reduces = reduceBytes.keys()
reduces.sort()
print "Name reduce-output-bytes shuffle-finish reduce-finish"
for r in reduces:
print r, reduceBytes[r], reduceShuffleTime[r] - startTime,
print reduceEndTime[r] - startTime
print
for t in range(startTime, endTime):
runningMaps[t] = 0
shufflingReduces[t] = 0
sortingReduces[t] = 0
runningReduces[t] = 0
for map in mapStartTime.keys():
for t in range(mapStartTime[map], mapEndTime[map]):
runningMaps[t] += 1
for reduce in reduceStartTime.keys():
for t in range(reduceStartTime[reduce], reduceShuffleTime[reduce]):
shufflingReduces[t] += 1
for t in range(reduceShuffleTime[reduce], reduceSortTime[reduce]):
sortingReduces[t] += 1
for t in range(reduceSortTime[reduce], reduceEndTime[reduce]):
runningReduces[t] += 1
print "time maps shuffle merge reduce"
for t in range(startTime, endTime):
print t - startTime, runningMaps[t], shufflingReduces[t], sortingReduces[t],
print runningReduces[t]
| apache-2.0 |
fengbaicanhe/intellij-community | python/lib/Lib/xml/FtCore.py | 132 | 1806 | """
Contains various definitions common to modules acquired from 4Suite
"""
__all__ = ["FtException", "get_translator"]
class FtException(Exception):
def __init__(self, errorCode, messages, args):
# By defining __str__, args will be available. Otherwise
# the __init__ of Exception sets it to the passed in arguments.
self.params = args
self.errorCode = errorCode
self.message = messages[errorCode] % args
Exception.__init__(self, self.message, args)
def __str__(self):
return self.message
# What follows is used to provide support for I18N in the rest of the
# 4Suite-derived packages in PyXML.
#
# Each sub-package of the top-level "xml" package that contains 4Suite
# code is really a separate text domain, but they're all called
# '4Suite'. For each domain, a translation object is provided using
# message catalogs stored inside the package. The code below defines
# a get_translator() function that returns an appropriate gettext
# function to be used as _() in the sub-package named by the
# parameter. This handles all the compatibility issues related to
# Python versions (whether the gettext module can be found) and
# whether the message catalogs can actually be found.
def _(msg):
return msg
try:
import gettext
except (ImportError, IOError):
def get_translator(pkg):
return _
else:
import os
_cache = {}
_top = os.path.dirname(os.path.abspath(__file__))
def get_translator(pkg):
if not _cache.has_key(pkg):
locale_dir = os.path.join(_top, pkg.replace(".", os.sep))
try:
f = gettext.translation('4Suite', locale_dir).gettext
except IOError:
f = _
_cache[pkg] = f
return _cache[pkg]
| apache-2.0 |
louisq/staticguru | utility/artifact_archiver.py | 1 | 4495 | """
The MIT License (MIT)
Copyright (c) 2016 Louis-Philippe Querel [email protected]
Permission is hereby granted, free of charge, to any person obtaining a copy of this software
and associated documentation files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify, merge, publish, distribute,
sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or
substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import glob
import os
import shutil
from Logging import logger
"""
The purpose of this utility is to clone the artifacts that have been generated through the build process to preserve them
This version would probably only work for maven run projects
"""
FILTERED_EXTENSIONS = ('*.jar', '*.tar.*', '*.zip', '*.rpm')
# todo replace with an abstract solution that could be reused for the other modules to log the version that was ran
artifact_archiver_version = 1
def archive(repo_path, archive_path, repo_id, commit, filter_extensions=True):
# Determine if we can access the path where the archive should be
if not _determine_access(archive_path):
logger.error("Failed to save to archive %s" % archive_path)
return False
temp_archive = os.path.join(repo_path, "%s-temp" % commit)
temp_archive_compress_file_no_ext = os.path.join(temp_archive, commit)
temp_archive_compress_file = "%s.tar.gz" % temp_archive_compress_file_no_ext
archive_repo_path = os.path.join(archive_path, repo_id)
archive_compress_file = "%s.tar.gz" % os.path.join(archive_repo_path, commit)
_clear_archive(temp_archive, archive_compress_file)
target_directories = _identify_target_directories(repo_path)
_clone_files_in_targets(repo_path, temp_archive, target_directories, filter_extensions=filter_extensions)
_compress_files(temp_archive, temp_archive_compress_file_no_ext)
_move_compress_file_to_archive(archive_repo_path, temp_archive_compress_file)
# Delete the temporary folder
_clear_archive_temp(temp_archive)
return True
def _determine_access(archive_path):
return os.path.exists(archive_path)
def _clear_archive(archive_temp, archive_compress_file):
_clear_archive_temp(archive_temp)
if os.path.exists(archive_compress_file):
os.remove(archive_compress_file)
def _clear_archive_temp(temp_archive):
if os.path.exists(temp_archive):
shutil.rmtree(temp_archive)
def _identify_target_directories(repo_path):
folder = "target"
nesting = "**/"
target_directories = glob.glob(r'%s%s' % (repo_path, folder))
compound_nesting = ""
# We need to navigate the repository to find project target folders
for count in range(5):
compound_nesting += nesting
target_directories += glob.glob(r'%s%s%s' % (repo_path, compound_nesting, folder))
return target_directories
def _clone_files_in_targets(repo_path, temp_archive, target_directories, filter_extensions):
# Determine if we need to filter any of the files
if filter_extensions:
ignore = shutil.ignore_patterns(FILTERED_EXTENSIONS)
else:
ignore = None
for path in target_directories:
folder = path[len(repo_path):]
shutil.copytree(path, "%s/%s" % (temp_archive, folder), ignore=ignore, symlinks=True)
def _compress_files(archive_temp, temp_archive_compress_file_no_ext):
# If the compression is changed the file extension needs to be changed as well in the parent method
shutil._make_tarball(temp_archive_compress_file_no_ext, archive_temp, compress="gzip")
def _move_compress_file_to_archive(repo_archive_path, temp_archive_compress_file):
if not os.path.exists(repo_archive_path):
os.makedirs(repo_archive_path)
shutil.move(temp_archive_compress_file, repo_archive_path)
| mit |
digetx/picasso_upstream_support | Documentation/target/tcm_mod_builder.py | 215 | 36866 | #!/usr/bin/python
# The TCM v4 multi-protocol fabric module generation script for drivers/target/$NEW_MOD
#
# Copyright (c) 2010 Rising Tide Systems
# Copyright (c) 2010 Linux-iSCSI.org
#
# Author: [email protected]
#
import os, sys
import subprocess as sub
import string
import re
import optparse
tcm_dir = ""
fabric_ops = []
fabric_mod_dir = ""
fabric_mod_port = ""
fabric_mod_init_port = ""
def tcm_mod_err(msg):
print msg
sys.exit(1)
def tcm_mod_create_module_subdir(fabric_mod_dir_var):
if os.path.isdir(fabric_mod_dir_var) == True:
return 1
print "Creating fabric_mod_dir: " + fabric_mod_dir_var
ret = os.mkdir(fabric_mod_dir_var)
if ret:
tcm_mod_err("Unable to mkdir " + fabric_mod_dir_var)
return
def tcm_mod_build_FC_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* Binary World Wide unique Port Name for FC Initiator Nport */\n"
buf += " u64 nport_wwpn;\n"
buf += " /* ASCII formatted WWPN for FC Initiator Nport */\n"
buf += " char nport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* FC lport target portal group tag for TCM */\n"
buf += " u16 lport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_lport */\n"
buf += " struct " + fabric_mod_name + "_lport *lport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_lport {\n"
buf += " /* SCSI protocol the lport is providing */\n"
buf += " u8 lport_proto_id;\n"
buf += " /* Binary World Wide unique Port Name for FC Target Lport */\n"
buf += " u64 lport_wwpn;\n"
buf += " /* ASCII formatted WWPN for FC Target Lport */\n"
buf += " char lport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_lport() */\n"
buf += " struct se_wwn lport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "lport"
fabric_mod_init_port = "nport"
return
def tcm_mod_build_SAS_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* Binary World Wide unique Port Name for SAS Initiator port */\n"
buf += " u64 iport_wwpn;\n"
buf += " /* ASCII formatted WWPN for Sas Initiator port */\n"
buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* SAS port target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* SCSI protocol the tport is providing */\n"
buf += " u8 tport_proto_id;\n"
buf += " /* Binary World Wide unique Port Name for SAS Target port */\n"
buf += " u64 tport_wwpn;\n"
buf += " /* ASCII formatted WWPN for SAS Target port */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_iSCSI_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* ASCII formatted InitiatorName */\n"
buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* iSCSI target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* SCSI protocol the tport is providing */\n"
buf += " u8 tport_proto_id;\n"
buf += " /* ASCII formatted TargetName for IQN */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_base_includes(proto_ident, fabric_mod_dir_val, fabric_mod_name):
if proto_ident == "FC":
tcm_mod_build_FC_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "SAS":
tcm_mod_build_SAS_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "iSCSI":
tcm_mod_build_iSCSI_include(fabric_mod_dir_val, fabric_mod_name)
else:
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
return
def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_configfs.c"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#include <linux/module.h>\n"
buf += "#include <linux/moduleparam.h>\n"
buf += "#include <linux/version.h>\n"
buf += "#include <generated/utsrelease.h>\n"
buf += "#include <linux/utsname.h>\n"
buf += "#include <linux/init.h>\n"
buf += "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/configfs.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_fabric.h>\n"
buf += "#include <target/target_core_fabric_configfs.h>\n"
buf += "#include <target/target_core_configfs.h>\n"
buf += "#include <target/configfs_macros.h>\n\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "static const struct target_core_fabric_ops " + fabric_mod_name + "_ops;\n\n"
buf += "static struct se_node_acl *" + fabric_mod_name + "_make_nodeacl(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct se_node_acl *se_nacl, *se_nacl_new;\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl;\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " u64 wwpn = 0;\n"
buf += " u32 nexus_depth;\n\n"
buf += " /* " + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n"
buf += " se_nacl_new = " + fabric_mod_name + "_alloc_fabric_acl(se_tpg);\n"
buf += " if (!se_nacl_new)\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += "//#warning FIXME: Hardcoded nexus depth in " + fabric_mod_name + "_make_nodeacl()\n"
buf += " nexus_depth = 1;\n"
buf += " /*\n"
buf += " * se_nacl_new may be released by core_tpg_add_initiator_node_acl()\n"
buf += " * when converting a NodeACL from demo mode -> explict\n"
buf += " */\n"
buf += " se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,\n"
buf += " name, nexus_depth);\n"
buf += " if (IS_ERR(se_nacl)) {\n"
buf += " " + fabric_mod_name + "_release_fabric_acl(se_tpg, se_nacl_new);\n"
buf += " return se_nacl;\n"
buf += " }\n"
buf += " /*\n"
buf += " * Locate our struct " + fabric_mod_name + "_nacl and set the FC Nport WWPN\n"
buf += " */\n"
buf += " nacl = container_of(se_nacl, struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " nacl->" + fabric_mod_init_port + "_wwpn = wwpn;\n"
buf += " /* " + fabric_mod_name + "_format_wwn(&nacl->" + fabric_mod_init_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n"
buf += " return se_nacl;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_nodeacl(struct se_node_acl *se_acl)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_acl,\n"
buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
buf += " core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1);\n"
buf += " kfree(nacl);\n"
buf += "}\n\n"
buf += "static struct se_portal_group *" + fabric_mod_name + "_make_tpg(\n"
buf += " struct se_wwn *wwn,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + "*" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg;\n"
buf += " unsigned long tpgt;\n"
buf += " int ret;\n\n"
buf += " if (strstr(name, \"tpgt_\") != name)\n"
buf += " return ERR_PTR(-EINVAL);\n"
buf += " if (kstrtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)\n"
buf += " return ERR_PTR(-EINVAL);\n\n"
buf += " tpg = kzalloc(sizeof(struct " + fabric_mod_name + "_tpg), GFP_KERNEL);\n"
buf += " if (!tpg) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_tpg\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
buf += " tpg->" + fabric_mod_port + " = " + fabric_mod_port + ";\n"
buf += " tpg->" + fabric_mod_port + "_tpgt = tpgt;\n\n"
buf += " ret = core_tpg_register(&" + fabric_mod_name + "_ops, wwn,\n"
buf += " &tpg->se_tpg, tpg,\n"
buf += " TRANSPORT_TPG_TYPE_NORMAL);\n"
buf += " if (ret < 0) {\n"
buf += " kfree(tpg);\n"
buf += " return NULL;\n"
buf += " }\n"
buf += " return &tpg->se_tpg;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_tpg(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n\n"
buf += " core_tpg_deregister(se_tpg);\n"
buf += " kfree(tpg);\n"
buf += "}\n\n"
buf += "static struct se_wwn *" + fabric_mod_name + "_make_" + fabric_mod_port + "(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + ";\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " u64 wwpn = 0;\n\n"
buf += " /* if (" + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n\n"
buf += " " + fabric_mod_port + " = kzalloc(sizeof(struct " + fabric_mod_name + "_" + fabric_mod_port + "), GFP_KERNEL);\n"
buf += " if (!" + fabric_mod_port + ") {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_" + fabric_mod_port + "\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " " + fabric_mod_port + "->" + fabric_mod_port + "_wwpn = wwpn;\n"
buf += " /* " + fabric_mod_name + "_format_wwn(&" + fabric_mod_port + "->" + fabric_mod_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_wwn;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_" + fabric_mod_port + "(struct se_wwn *wwn)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n"
buf += " kfree(" + fabric_mod_port + ");\n"
buf += "}\n\n"
buf += "static ssize_t " + fabric_mod_name + "_wwn_show_attr_version(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " char *page)\n"
buf += "{\n"
buf += " return sprintf(page, \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n"
buf += " \"on \"UTS_RELEASE\"\\n\", " + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n"
buf += " utsname()->machine);\n"
buf += "}\n\n"
buf += "TF_WWN_ATTR_RO(" + fabric_mod_name + ", version);\n\n"
buf += "static struct configfs_attribute *" + fabric_mod_name + "_wwn_attrs[] = {\n"
buf += " &" + fabric_mod_name + "_wwn_version.attr,\n"
buf += " NULL,\n"
buf += "};\n\n"
buf += "static const struct target_core_fabric_ops " + fabric_mod_name + "_ops = {\n"
buf += " .module = THIS_MODULE,\n"
buf += " .name = " + fabric_mod_name + ",\n"
buf += " .get_fabric_proto_ident = " + fabric_mod_name + "_get_fabric_proto_ident,\n"
buf += " .get_fabric_name = " + fabric_mod_name + "_get_fabric_name,\n"
buf += " .get_fabric_proto_ident = " + fabric_mod_name + "_get_fabric_proto_ident,\n"
buf += " .tpg_get_wwn = " + fabric_mod_name + "_get_fabric_wwn,\n"
buf += " .tpg_get_tag = " + fabric_mod_name + "_get_tag,\n"
buf += " .tpg_get_default_depth = " + fabric_mod_name + "_get_default_depth,\n"
buf += " .tpg_get_pr_transport_id = " + fabric_mod_name + "_get_pr_transport_id,\n"
buf += " .tpg_get_pr_transport_id_len = " + fabric_mod_name + "_get_pr_transport_id_len,\n"
buf += " .tpg_parse_pr_out_transport_id = " + fabric_mod_name + "_parse_pr_out_transport_id,\n"
buf += " .tpg_check_demo_mode = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_check_demo_mode_cache = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_demo_mode_write_protect = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_prod_mode_write_protect = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_alloc_fabric_acl = " + fabric_mod_name + "_alloc_fabric_acl,\n"
buf += " .tpg_release_fabric_acl = " + fabric_mod_name + "_release_fabric_acl,\n"
buf += " .tpg_get_inst_index = " + fabric_mod_name + "_tpg_get_inst_index,\n"
buf += " .release_cmd = " + fabric_mod_name + "_release_cmd,\n"
buf += " .shutdown_session = " + fabric_mod_name + "_shutdown_session,\n"
buf += " .close_session = " + fabric_mod_name + "_close_session,\n"
buf += " .sess_get_index = " + fabric_mod_name + "_sess_get_index,\n"
buf += " .sess_get_initiator_sid = NULL,\n"
buf += " .write_pending = " + fabric_mod_name + "_write_pending,\n"
buf += " .write_pending_status = " + fabric_mod_name + "_write_pending_status,\n"
buf += " .set_default_node_attributes = " + fabric_mod_name + "_set_default_node_attrs,\n"
buf += " .get_task_tag = " + fabric_mod_name + "_get_task_tag,\n"
buf += " .get_cmd_state = " + fabric_mod_name + "_get_cmd_state,\n"
buf += " .queue_data_in = " + fabric_mod_name + "_queue_data_in,\n"
buf += " .queue_status = " + fabric_mod_name + "_queue_status,\n"
buf += " .queue_tm_rsp = " + fabric_mod_name + "_queue_tm_rsp,\n"
buf += " .aborted_task = " + fabric_mod_name + "_aborted_task,\n"
buf += " /*\n"
buf += " * Setup function pointers for generic logic in target_core_fabric_configfs.c\n"
buf += " */\n"
buf += " .fabric_make_wwn = " + fabric_mod_name + "_make_" + fabric_mod_port + ",\n"
buf += " .fabric_drop_wwn = " + fabric_mod_name + "_drop_" + fabric_mod_port + ",\n"
buf += " .fabric_make_tpg = " + fabric_mod_name + "_make_tpg,\n"
buf += " .fabric_drop_tpg = " + fabric_mod_name + "_drop_tpg,\n"
buf += " .fabric_post_link = NULL,\n"
buf += " .fabric_pre_unlink = NULL,\n"
buf += " .fabric_make_np = NULL,\n"
buf += " .fabric_drop_np = NULL,\n"
buf += " .fabric_make_nodeacl = " + fabric_mod_name + "_make_nodeacl,\n"
buf += " .fabric_drop_nodeacl = " + fabric_mod_name + "_drop_nodeacl,\n"
buf += "\n"
buf += " .tfc_wwn_attrs = " + fabric_mod_name + "_wwn_attrs;\n"
buf += "};\n\n"
buf += "static int __init " + fabric_mod_name + "_init(void)\n"
buf += "{\n"
buf += " return target_register_template(" + fabric_mod_name + "_ops);\n"
buf += "};\n\n"
buf += "static void __exit " + fabric_mod_name + "_exit(void)\n"
buf += "{\n"
buf += " target_unregister_template(" + fabric_mod_name + "_ops);\n"
buf += "};\n\n"
buf += "MODULE_DESCRIPTION(\"" + fabric_mod_name.upper() + " series fabric driver\");\n"
buf += "MODULE_LICENSE(\"GPL\");\n"
buf += "module_init(" + fabric_mod_name + "_init);\n"
buf += "module_exit(" + fabric_mod_name + "_exit);\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_scan_fabric_ops(tcm_dir):
fabric_ops_api = tcm_dir + "include/target/target_core_fabric.h"
print "Using tcm_mod_scan_fabric_ops: " + fabric_ops_api
process_fo = 0;
p = open(fabric_ops_api, 'r')
line = p.readline()
while line:
if process_fo == 0 and re.search('struct target_core_fabric_ops {', line):
line = p.readline()
continue
if process_fo == 0:
process_fo = 1;
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
continue
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
p.close()
return
def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
bufi = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.c"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
fi = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.h"
print "Writing file: " + fi
pi = open(fi, 'w')
if not pi:
tcm_mod_err("Unable to open file: " + fi)
buf = "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/list.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n"
buf += "#include <scsi/scsi.h>\n"
buf += "#include <scsi/scsi_host.h>\n"
buf += "#include <scsi/scsi_device.h>\n"
buf += "#include <scsi/scsi_cmnd.h>\n"
buf += "#include <scsi/libfc.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_fabric.h>\n"
buf += "#include <target/target_core_configfs.h>\n\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "int " + fabric_mod_name + "_check_true(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_true(struct se_portal_group *);\n"
buf += "int " + fabric_mod_name + "_check_false(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_false(struct se_portal_group *);\n"
total_fabric_ops = len(fabric_ops)
i = 0
while i < total_fabric_ops:
fo = fabric_ops[i]
i += 1
# print "fabric_ops: " + fo
if re.search('get_fabric_name', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_name(void)\n"
buf += "{\n"
buf += " return \"" + fabric_mod_name + "\";\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_name(void);\n"
continue
if re.search('get_fabric_proto_ident', fo):
buf += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " u8 proto_id;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " proto_id = fc_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " proto_id = sas_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " proto_id = iscsi_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return proto_id;\n"
buf += "}\n\n"
bufi += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *);\n"
if re.search('get_wwn', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_name[0];\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *);\n"
if re.search('get_tag', fo):
buf += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " return tpg->" + fabric_mod_port + "_tpgt;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *);\n"
if re.search('get_default_depth', fo):
buf += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *);\n"
if re.search('get_pr_transport_id\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_pr_transport_id(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl,\n"
buf += " struct t10_pr_registration *pr_reg,\n"
buf += " int *format_code,\n"
buf += " unsigned char *buf)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " int ret = 0;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " ret = fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " ret = sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " ret = iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return ret;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id(struct se_portal_group *,\n"
bufi += " struct se_node_acl *, struct t10_pr_registration *,\n"
bufi += " int *, unsigned char *);\n"
if re.search('get_pr_transport_id_len\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl,\n"
buf += " struct t10_pr_registration *pr_reg,\n"
buf += " int *format_code)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " int ret = 0;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " ret = fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " ret = sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " ret = iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return ret;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(struct se_portal_group *,\n"
bufi += " struct se_node_acl *, struct t10_pr_registration *,\n"
bufi += " int *);\n"
if re.search('parse_pr_out_transport_id\)\(', fo):
buf += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " const char *buf,\n"
buf += " u32 *out_tid_len,\n"
buf += " char **port_nexus_ptr)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " char *tid = NULL;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " tid = fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " tid = sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " tid = iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
buf += " }\n\n"
buf += " return tid;\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(struct se_portal_group *,\n"
bufi += " const char *, u32 *, char **);\n"
if re.search('alloc_fabric_acl\)\(', fo):
buf += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl;\n\n"
buf += " nacl = kzalloc(sizeof(struct " + fabric_mod_name + "_nacl), GFP_KERNEL);\n"
buf += " if (!nacl) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_nacl\\n\");\n"
buf += " return NULL;\n"
buf += " }\n\n"
buf += " return &nacl->se_node_acl;\n"
buf += "}\n\n"
bufi += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *);\n"
if re.search('release_fabric_acl\)\(', fo):
buf += "void " + fabric_mod_name + "_release_fabric_acl(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_nacl,\n"
buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
buf += " kfree(nacl);\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_release_fabric_acl(struct se_portal_group *,\n"
bufi += " struct se_node_acl *);\n"
if re.search('tpg_get_inst_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *);\n"
if re.search('\*release_cmd\)\(', fo):
buf += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *);\n"
if re.search('shutdown_session\)\(', fo):
buf += "int " + fabric_mod_name + "_shutdown_session(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_shutdown_session(struct se_session *);\n"
if re.search('close_session\)\(', fo):
buf += "void " + fabric_mod_name + "_close_session(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_close_session(struct se_session *);\n"
if re.search('sess_get_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *);\n"
if re.search('write_pending\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending(struct se_cmd *);\n"
if re.search('write_pending_status\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *);\n"
if re.search('set_default_node_attributes\)\(', fo):
buf += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *nacl)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *);\n"
if re.search('get_task_tag\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *);\n"
if re.search('get_cmd_state\)\(', fo):
buf += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *);\n"
if re.search('queue_data_in\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *);\n"
if re.search('queue_status\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_status(struct se_cmd *);\n"
if re.search('queue_tm_rsp\)\(', fo):
buf += "void " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *);\n"
if re.search('aborted_task\)\(', fo):
buf += "void " + fabric_mod_name + "_aborted_task(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_aborted_task(struct se_cmd *);\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
ret = pi.write(bufi)
if ret:
tcm_mod_err("Unable to write fi: " + fi)
pi.close()
return
def tcm_mod_build_kbuild(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Makefile"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf += fabric_mod_name + "-objs := " + fabric_mod_name + "_fabric.o \\\n"
buf += " " + fabric_mod_name + "_configfs.o\n"
buf += "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name + ".o\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_build_kconfig(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Kconfig"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "config " + fabric_mod_name.upper() + "\n"
buf += " tristate \"" + fabric_mod_name.upper() + " fabric module\"\n"
buf += " depends on TARGET_CORE && CONFIGFS_FS\n"
buf += " default n\n"
buf += " ---help---\n"
buf += " Say Y here to enable the " + fabric_mod_name.upper() + " fabric module\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_add_kbuild(tcm_dir, fabric_mod_name):
buf = "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name.lower() + "/\n"
kbuild = tcm_dir + "/drivers/target/Makefile"
f = open(kbuild, 'a')
f.write(buf)
f.close()
return
def tcm_mod_add_kconfig(tcm_dir, fabric_mod_name):
buf = "source \"drivers/target/" + fabric_mod_name.lower() + "/Kconfig\"\n"
kconfig = tcm_dir + "/drivers/target/Kconfig"
f = open(kconfig, 'a')
f.write(buf)
f.close()
return
def main(modname, proto_ident):
# proto_ident = "FC"
# proto_ident = "SAS"
# proto_ident = "iSCSI"
tcm_dir = os.getcwd();
tcm_dir += "/../../"
print "tcm_dir: " + tcm_dir
fabric_mod_name = modname
fabric_mod_dir = tcm_dir + "drivers/target/" + fabric_mod_name
print "Set fabric_mod_name: " + fabric_mod_name
print "Set fabric_mod_dir: " + fabric_mod_dir
print "Using proto_ident: " + proto_ident
if proto_ident != "FC" and proto_ident != "SAS" and proto_ident != "iSCSI":
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
ret = tcm_mod_create_module_subdir(fabric_mod_dir)
if ret:
print "tcm_mod_create_module_subdir() failed because module already exists!"
sys.exit(1)
tcm_mod_build_base_includes(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_scan_fabric_ops(tcm_dir)
tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_configfs(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kbuild(fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kconfig(fabric_mod_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + " to drivers/target/Makefile..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kbuild(tcm_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + " to drivers/target/Kconfig..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kconfig(tcm_dir, fabric_mod_name)
return
parser = optparse.OptionParser()
parser.add_option('-m', '--modulename', help='Module name', dest='modname',
action='store', nargs=1, type='string')
parser.add_option('-p', '--protoident', help='Protocol Ident', dest='protoident',
action='store', nargs=1, type='string')
(opts, args) = parser.parse_args()
mandatories = ['modname', 'protoident']
for m in mandatories:
if not opts.__dict__[m]:
print "mandatory option is missing\n"
parser.print_help()
exit(-1)
if __name__ == "__main__":
main(str(opts.modname), opts.protoident)
| gpl-2.0 |
s0enke/boto | boto/logs/layer1.py | 146 | 22588 | # Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import boto
from boto.connection import AWSQueryConnection
from boto.regioninfo import RegionInfo
from boto.exception import JSONResponseError
from boto.logs import exceptions
from boto.compat import json
class CloudWatchLogsConnection(AWSQueryConnection):
"""
Amazon CloudWatch Logs Service API Reference
This is the Amazon CloudWatch Logs API Reference . Amazon
CloudWatch Logs is a managed service for real time monitoring and
archival of application logs. This guide provides detailed
information about Amazon CloudWatch Logs actions, data types,
parameters, and errors. For detailed information about Amazon
CloudWatch Logs features and their associated API calls, go to the
`Amazon CloudWatch Logs Developer Guide`_.
Use the following links to get started using the Amazon CloudWatch
API Reference :
+ `Actions`_: An alphabetical list of all Amazon CloudWatch Logs
actions.
+ `Data Types`_: An alphabetical list of all Amazon CloudWatch
Logs data types.
+ `Common Parameters`_: Parameters that all Query actions can use.
+ `Common Errors`_: Client and server errors that all actions can
return.
+ `Regions and Endpoints`_: Itemized regions and endpoints for all
AWS products.
In addition to using the Amazon CloudWatch Logs API, you can also
use the following SDKs and third-party libraries to access Amazon
CloudWatch Logs programmatically.
+ `AWS SDK for Java Documentation`_
+ `AWS SDK for .NET Documentation`_
+ `AWS SDK for PHP Documentation`_
+ `AWS SDK for Ruby Documentation`_
Developers in the AWS developer community also provide their own
libraries, which you can find at the following AWS developer
centers:
+ `AWS Java Developer Center`_
+ `AWS PHP Developer Center`_
+ `AWS Python Developer Center`_
+ `AWS Ruby Developer Center`_
+ `AWS Windows and .NET Developer Center`_
"""
APIVersion = "2014-03-28"
DefaultRegionName = "us-east-1"
DefaultRegionEndpoint = "logs.us-east-1.amazonaws.com"
ServiceName = "CloudWatchLogs"
TargetPrefix = "Logs_20140328"
ResponseError = JSONResponseError
_faults = {
"LimitExceededException": exceptions.LimitExceededException,
"DataAlreadyAcceptedException": exceptions.DataAlreadyAcceptedException,
"ResourceInUseException": exceptions.ResourceInUseException,
"ServiceUnavailableException": exceptions.ServiceUnavailableException,
"InvalidParameterException": exceptions.InvalidParameterException,
"ResourceNotFoundException": exceptions.ResourceNotFoundException,
"ResourceAlreadyExistsException": exceptions.ResourceAlreadyExistsException,
"OperationAbortedException": exceptions.OperationAbortedException,
"InvalidSequenceTokenException": exceptions.InvalidSequenceTokenException,
}
def __init__(self, **kwargs):
region = kwargs.pop('region', None)
if not region:
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint)
if 'host' not in kwargs or kwargs['host'] is None:
kwargs['host'] = region.endpoint
super(CloudWatchLogsConnection, self).__init__(**kwargs)
self.region = region
def _required_auth_capability(self):
return ['hmac-v4']
def create_log_group(self, log_group_name):
"""
Creates a new log group with the specified name. The name of
the log group must be unique within a region for an AWS
account. You can create up to 100 log groups per account.
You must use the following guidelines when naming a log group:
+ Log group names can be between 1 and 512 characters long.
+ Allowed characters are az, AZ, 09, '_' (underscore), '-'
(hyphen), '/' (forward slash), and '.' (period).
Log groups are created with a default retention of 14 days.
The retention attribute allow you to configure the number of
days you want to retain log events in the specified log group.
See the `SetRetention` operation on how to modify the
retention of your log groups.
:type log_group_name: string
:param log_group_name:
"""
params = {'logGroupName': log_group_name, }
return self.make_request(action='CreateLogGroup',
body=json.dumps(params))
def create_log_stream(self, log_group_name, log_stream_name):
"""
Creates a new log stream in the specified log group. The name
of the log stream must be unique within the log group. There
is no limit on the number of log streams that can exist in a
log group.
You must use the following guidelines when naming a log
stream:
+ Log stream names can be between 1 and 512 characters long.
+ The ':' colon character is not allowed.
:type log_group_name: string
:param log_group_name:
:type log_stream_name: string
:param log_stream_name:
"""
params = {
'logGroupName': log_group_name,
'logStreamName': log_stream_name,
}
return self.make_request(action='CreateLogStream',
body=json.dumps(params))
def delete_log_group(self, log_group_name):
"""
Deletes the log group with the specified name. Amazon
CloudWatch Logs will delete a log group only if there are no
log streams and no metric filters associated with the log
group. If this condition is not satisfied, the request will
fail and the log group will not be deleted.
:type log_group_name: string
:param log_group_name:
"""
params = {'logGroupName': log_group_name, }
return self.make_request(action='DeleteLogGroup',
body=json.dumps(params))
def delete_log_stream(self, log_group_name, log_stream_name):
"""
Deletes a log stream and permanently deletes all the archived
log events associated with it.
:type log_group_name: string
:param log_group_name:
:type log_stream_name: string
:param log_stream_name:
"""
params = {
'logGroupName': log_group_name,
'logStreamName': log_stream_name,
}
return self.make_request(action='DeleteLogStream',
body=json.dumps(params))
def delete_metric_filter(self, log_group_name, filter_name):
"""
Deletes a metric filter associated with the specified log
group.
:type log_group_name: string
:param log_group_name:
:type filter_name: string
:param filter_name: The name of the metric filter.
"""
params = {
'logGroupName': log_group_name,
'filterName': filter_name,
}
return self.make_request(action='DeleteMetricFilter',
body=json.dumps(params))
def delete_retention_policy(self, log_group_name):
"""
:type log_group_name: string
:param log_group_name:
"""
params = {'logGroupName': log_group_name, }
return self.make_request(action='DeleteRetentionPolicy',
body=json.dumps(params))
def describe_log_groups(self, log_group_name_prefix=None,
next_token=None, limit=None):
"""
Returns all the log groups that are associated with the AWS
account making the request. The list returned in the response
is ASCII-sorted by log group name.
By default, this operation returns up to 50 log groups. If
there are more log groups to list, the response would contain
a `nextToken` value in the response body. You can also limit
the number of log groups returned in the response by
specifying the `limit` parameter in the request.
:type log_group_name_prefix: string
:param log_group_name_prefix:
:type next_token: string
:param next_token: A string token used for pagination that points to
the next page of results. It must be a value obtained from the
response of the previous `DescribeLogGroups` request.
:type limit: integer
:param limit: The maximum number of items returned in the response. If
you don't specify a value, the request would return up to 50 items.
"""
params = {}
if log_group_name_prefix is not None:
params['logGroupNamePrefix'] = log_group_name_prefix
if next_token is not None:
params['nextToken'] = next_token
if limit is not None:
params['limit'] = limit
return self.make_request(action='DescribeLogGroups',
body=json.dumps(params))
def describe_log_streams(self, log_group_name,
log_stream_name_prefix=None, next_token=None,
limit=None):
"""
Returns all the log streams that are associated with the
specified log group. The list returned in the response is
ASCII-sorted by log stream name.
By default, this operation returns up to 50 log streams. If
there are more log streams to list, the response would contain
a `nextToken` value in the response body. You can also limit
the number of log streams returned in the response by
specifying the `limit` parameter in the request.
:type log_group_name: string
:param log_group_name:
:type log_stream_name_prefix: string
:param log_stream_name_prefix:
:type next_token: string
:param next_token: A string token used for pagination that points to
the next page of results. It must be a value obtained from the
response of the previous `DescribeLogStreams` request.
:type limit: integer
:param limit: The maximum number of items returned in the response. If
you don't specify a value, the request would return up to 50 items.
"""
params = {'logGroupName': log_group_name, }
if log_stream_name_prefix is not None:
params['logStreamNamePrefix'] = log_stream_name_prefix
if next_token is not None:
params['nextToken'] = next_token
if limit is not None:
params['limit'] = limit
return self.make_request(action='DescribeLogStreams',
body=json.dumps(params))
def describe_metric_filters(self, log_group_name,
filter_name_prefix=None, next_token=None,
limit=None):
"""
Returns all the metrics filters associated with the specified
log group. The list returned in the response is ASCII-sorted
by filter name.
By default, this operation returns up to 50 metric filters. If
there are more metric filters to list, the response would
contain a `nextToken` value in the response body. You can also
limit the number of metric filters returned in the response by
specifying the `limit` parameter in the request.
:type log_group_name: string
:param log_group_name:
:type filter_name_prefix: string
:param filter_name_prefix: The name of the metric filter.
:type next_token: string
:param next_token: A string token used for pagination that points to
the next page of results. It must be a value obtained from the
response of the previous `DescribeMetricFilters` request.
:type limit: integer
:param limit: The maximum number of items returned in the response. If
you don't specify a value, the request would return up to 50 items.
"""
params = {'logGroupName': log_group_name, }
if filter_name_prefix is not None:
params['filterNamePrefix'] = filter_name_prefix
if next_token is not None:
params['nextToken'] = next_token
if limit is not None:
params['limit'] = limit
return self.make_request(action='DescribeMetricFilters',
body=json.dumps(params))
def get_log_events(self, log_group_name, log_stream_name,
start_time=None, end_time=None, next_token=None,
limit=None, start_from_head=None):
"""
Retrieves log events from the specified log stream. You can
provide an optional time range to filter the results on the
event `timestamp`.
By default, this operation returns as much log events as can
fit in a response size of 1MB, up to 10,000 log events. The
response will always include a `nextForwardToken` and a
`nextBackwardToken` in the response body. You can use any of
these tokens in subsequent `GetLogEvents` requests to paginate
through events in either forward or backward direction. You
can also limit the number of log events returned in the
response by specifying the `limit` parameter in the request.
:type log_group_name: string
:param log_group_name:
:type log_stream_name: string
:param log_stream_name:
:type start_time: long
:param start_time: A point in time expressed as the number milliseconds
since Jan 1, 1970 00:00:00 UTC.
:type end_time: long
:param end_time: A point in time expressed as the number milliseconds
since Jan 1, 1970 00:00:00 UTC.
:type next_token: string
:param next_token: A string token used for pagination that points to
the next page of results. It must be a value obtained from the
`nextForwardToken` or `nextBackwardToken` fields in the response of
the previous `GetLogEvents` request.
:type limit: integer
:param limit: The maximum number of log events returned in the
response. If you don't specify a value, the request would return as
much log events as can fit in a response size of 1MB, up to 10,000
log events.
:type start_from_head: boolean
:param start_from_head:
"""
params = {
'logGroupName': log_group_name,
'logStreamName': log_stream_name,
}
if start_time is not None:
params['startTime'] = start_time
if end_time is not None:
params['endTime'] = end_time
if next_token is not None:
params['nextToken'] = next_token
if limit is not None:
params['limit'] = limit
if start_from_head is not None:
params['startFromHead'] = start_from_head
return self.make_request(action='GetLogEvents',
body=json.dumps(params))
def put_log_events(self, log_group_name, log_stream_name, log_events,
sequence_token=None):
"""
Uploads a batch of log events to the specified log stream.
Every PutLogEvents request must include the `sequenceToken`
obtained from the response of the previous request. An upload
in a newly created log stream does not require a
`sequenceToken`.
The batch of events must satisfy the following constraints:
+ The maximum batch size is 32,768 bytes, and this size is
calculated as the sum of all event messages in UTF-8, plus 26
bytes for each log event.
+ None of the log events in the batch can be more than 2 hours
in the future.
+ None of the log events in the batch can be older than 14
days or the retention period of the log group.
+ The log events in the batch must be in chronological ordered
by their `timestamp`.
+ The maximum number of log events in a batch is 1,000.
:type log_group_name: string
:param log_group_name:
:type log_stream_name: string
:param log_stream_name:
:type log_events: list
:param log_events: A list of events belonging to a log stream.
:type sequence_token: string
:param sequence_token: A string token that must be obtained from the
response of the previous `PutLogEvents` request.
"""
params = {
'logGroupName': log_group_name,
'logStreamName': log_stream_name,
'logEvents': log_events,
}
if sequence_token is not None:
params['sequenceToken'] = sequence_token
return self.make_request(action='PutLogEvents',
body=json.dumps(params))
def put_metric_filter(self, log_group_name, filter_name, filter_pattern,
metric_transformations):
"""
Creates or updates a metric filter and associates it with the
specified log group. Metric filters allow you to configure
rules to extract metric data from log events ingested through
`PutLogEvents` requests.
:type log_group_name: string
:param log_group_name:
:type filter_name: string
:param filter_name: The name of the metric filter.
:type filter_pattern: string
:param filter_pattern:
:type metric_transformations: list
:param metric_transformations:
"""
params = {
'logGroupName': log_group_name,
'filterName': filter_name,
'filterPattern': filter_pattern,
'metricTransformations': metric_transformations,
}
return self.make_request(action='PutMetricFilter',
body=json.dumps(params))
def put_retention_policy(self, log_group_name, retention_in_days):
"""
:type log_group_name: string
:param log_group_name:
:type retention_in_days: integer
:param retention_in_days: Specifies the number of days you want to
retain log events in the specified log group. Possible values are:
1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400, 547, 730.
"""
params = {
'logGroupName': log_group_name,
'retentionInDays': retention_in_days,
}
return self.make_request(action='PutRetentionPolicy',
body=json.dumps(params))
def set_retention(self, log_group_name, retention_in_days):
"""
Sets the retention of the specified log group. Log groups are
created with a default retention of 14 days. The retention
attribute allow you to configure the number of days you want
to retain log events in the specified log group.
:type log_group_name: string
:param log_group_name:
:type retention_in_days: integer
:param retention_in_days: Specifies the number of days you want to
retain log events in the specified log group. Possible values are:
1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400, 547, 730.
"""
params = {
'logGroupName': log_group_name,
'retentionInDays': retention_in_days,
}
return self.make_request(action='SetRetention',
body=json.dumps(params))
def test_metric_filter(self, filter_pattern, log_event_messages):
"""
Tests the filter pattern of a metric filter against a sample
of log event messages. You can use this operation to validate
the correctness of a metric filter pattern.
:type filter_pattern: string
:param filter_pattern:
:type log_event_messages: list
:param log_event_messages:
"""
params = {
'filterPattern': filter_pattern,
'logEventMessages': log_event_messages,
}
return self.make_request(action='TestMetricFilter',
body=json.dumps(params))
def make_request(self, action, body):
headers = {
'X-Amz-Target': '%s.%s' % (self.TargetPrefix, action),
'Host': self.region.endpoint,
'Content-Type': 'application/x-amz-json-1.1',
'Content-Length': str(len(body)),
}
http_request = self.build_base_http_request(
method='POST', path='/', auth_path='/', params={},
headers=headers, data=body)
response = self._mexe(http_request, sender=None,
override_num_retries=10)
response_body = response.read().decode('utf-8')
boto.log.debug(response_body)
if response.status == 200:
if response_body:
return json.loads(response_body)
else:
json_body = json.loads(response_body)
fault_name = json_body.get('__type', None)
exception_class = self._faults.get(fault_name, self.ResponseError)
raise exception_class(response.status, response.reason,
body=json_body)
| mit |
MCFlowMace/Wordom | src/setup.py | 1 | 1423 | #! /usr/bin/env python
# System imports
from distutils.core import *
from distutils import sysconfig
# Third-party modules - we depend on numpy
import numpy
# in order to check whether lapack are present ...
import numpy.distutils.system_info as sysinfo
# Obtain the numpy include directory. This works across numpy versions.
try:
numpy_include = numpy.get_include()
except AttributeError:
numpy_include = numpy.get_numpy_include()
# wordom extension module
if len(sysinfo.get_info('lapack')) == 0:
_wordom = Extension("_wordom",
["wordom.i","fileio.c","tools.c","qcprot.c", "xdrfile.c", "xdrfile_xtc.c"],
)
else:
_wordom = Extension("_wordom",
["wordom.i","fileio.c","tools.c","qcprot.c", "xdrfile.c", "xdrfile_xtc.c"],
include_dirs = [numpy_include],
extra_compile_args = ["-D LAPACK"],
libraries = [ 'lapack', 'blas' ]
)
# NumyTypemapTests setup
setup( name = "wordom",
description = "wordom is a molecular structure and data manipulation program/library",
author = "Michele Seeber & colleagues",
url = "http://wordom.sf.net",
author_email= "[email protected]",
license = "GPL",
version = "0.23",
ext_modules = [_wordom],
py_modules = ['wordom']
)
| gpl-3.0 |
flask-admin/flask-admin | flask_admin/model/ajax.py | 53 | 1076 | DEFAULT_PAGE_SIZE = 10
class AjaxModelLoader(object):
"""
Ajax related model loader. Override this to implement custom loading behavior.
"""
def __init__(self, name, options):
"""
Constructor.
:param name:
Field name
"""
self.name = name
self.options = options
def format(self, model):
"""
Return (id, name) tuple from the model.
"""
raise NotImplementedError()
def get_one(self, pk):
"""
Find model by its primary key.
:param pk:
Primary key value
"""
raise NotImplementedError()
def get_list(self, query, offset=0, limit=DEFAULT_PAGE_SIZE):
"""
Return models that match `query`.
:param view:
Administrative view.
:param query:
Query string
:param offset:
Offset
:param limit:
Limit
"""
raise NotImplementedError()
| bsd-3-clause |
psdh/servo | components/script/dom/bindings/codegen/parser/tests/test_builtins.py | 276 | 1798 | import WebIDL
def WebIDLTest(parser, harness):
parser.parse("""
interface TestBuiltins {
attribute boolean b;
attribute byte s8;
attribute octet u8;
attribute short s16;
attribute unsigned short u16;
attribute long s32;
attribute unsigned long u32;
attribute long long s64;
attribute unsigned long long u64;
attribute DOMTimeStamp ts;
};
""")
results = parser.finish()
harness.ok(True, "TestBuiltins interface parsed without error.")
harness.check(len(results), 1, "Should be one production")
harness.ok(isinstance(results[0], WebIDL.IDLInterface),
"Should be an IDLInterface")
iface = results[0]
harness.check(iface.identifier.QName(), "::TestBuiltins", "Interface has the right QName")
harness.check(iface.identifier.name, "TestBuiltins", "Interface has the right name")
harness.check(iface.parent, None, "Interface has no parent")
members = iface.members
harness.check(len(members), 10, "Should be one production")
names = ["b", "s8", "u8", "s16", "u16", "s32", "u32", "s64", "u64", "ts"]
types = ["Boolean", "Byte", "Octet", "Short", "UnsignedShort", "Long", "UnsignedLong", "LongLong", "UnsignedLongLong", "UnsignedLongLong"]
for i in range(10):
attr = members[i]
harness.ok(isinstance(attr, WebIDL.IDLAttribute), "Should be an IDLAttribute")
harness.check(attr.identifier.QName(), "::TestBuiltins::" + names[i], "Attr has correct QName")
harness.check(attr.identifier.name, names[i], "Attr has correct name")
harness.check(str(attr.type), types[i], "Attr type is the correct name")
harness.ok(attr.type.isPrimitive(), "Should be a primitive type")
| mpl-2.0 |
benob/icsisumm | icsisumm-primary-sys34_v1/nltk/nltk-0.9.2/nltk/classify/__init__.py | 9 | 3871 | # Natural Language Toolkit: Classifiers
#
# Copyright (C) 2001-2008 University of Pennsylvania
# Author: Edward Loper <[email protected]>
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
"""
Classes and interfaces for labeling tokens with category labels (or
X{class labels}). Typically, labels are represented with strings
(such as C{'health'} or C{'sports'}). Classifiers can be used to
perform a wide range of classification tasks. For example,
classifiers can be used...
- to classify documents by topic.
- to classify ambiguous words by which word sense is intended.
- to classify acoustic signals by which phoneme they represent.
- to classify sentences by their author.
Features
--------
In order to decide which category label is appropriate for a given
token, classifiers examine one or more 'features' of the token. These
X{features} are typically chosen by hand, and indicate which aspects
of the token are relevant to the classification decision. For
example, a document classifier might use a separate feature for each
word, recording how often that word occured in the document.
Featuresets
-----------
The features describing a token are encoded using a X{featureset},
which is a dictionary that maps from X{feature names} to X{feature
values}. Feature names are unique strings that indicate what aspect
of the token is encoded by the feature. Examples include
C{'prevword'}, for a feature whose value is the previous word; and
C{'contains-word(library)'} for a feature that is true when a document
contains the word C{'library'}. Feature values are typically
booleans, numbers, or strings, depending on which feature they
describe.
Featuresets are typically constructed using a X{feature
extraction function}, which takes a token as its input, and returns a
featuresets describing that token. This feature extraction
function is applied to each token before it is fed to the classifier:
>>> # Define a feature extraction function.
>>> def document_features(document):
... return dict([('contains-word(%s)'%w,True) for w in document])
>>> Classify each Gutenberg document.
>>> for file in gutenberg.files():
... doc = gutenberg.tokenized(file)
... print doc_name, classifier.classify(document_features(doc))
Training Classifiers
--------------------
Most classifiers are built by training them on a list of hand-labeled
examples, known as the X{training set}. Training sets are represented
as lists of C{(featuredict, label)} tuples.
"""
from api import *
from util import *
from naivebayes import *
from decisiontree import *
from weka import *
from nltk.internals import deprecated, Deprecated
__all__ = [
# Classifier Interfaces
'ClassifierI', 'MultiClassifierI',
# Classifiers
'NaiveBayesClassifier', 'DecisionTreeClassifier', 'WekaClassifier',
# Utility functions. Note that accuracy() is intentionally
# omitted -- it should be accessed as nltk.classify.accuracy();
# similarly for log_likelihood() and attested_labels().
'config_weka',
# Demos -- not included.
]
try:
import numpy
from maxent import *
__all__ += ['ConditionalExponentialClassifier', 'train_maxent_classifier',]
except ImportError:
pass
######################################################################
#{ Deprecated
######################################################################
from nltk.internals import Deprecated
class ClassifyI(ClassifierI, Deprecated):
"""Use nltk.ClassifierI instead."""
@deprecated("Use nltk.classify.accuracy() instead.")
def classifier_accuracy(classifier, gold):
return accuracy(classifier, gold)
@deprecated("Use nltk.classify.log_likelihood() instead.")
def classifier_log_likelihood(classifier, gold):
return log_likelihood(classifier, gold)
| gpl-3.0 |
LoHChina/nova | nova/tests/functional/v3/test_migrate_server.py | 27 | 3462 | # Copyright 2012 Nebula, Inc.
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_config import cfg
from nova.conductor import manager as conductor_manager
from nova import db
from nova.tests.functional.v3 import test_servers
from nova import utils
CONF = cfg.CONF
CONF.import_opt('osapi_compute_extension',
'nova.api.openstack.compute.extensions')
class MigrateServerSamplesJsonTest(test_servers.ServersSampleBase):
extension_name = "os-migrate-server"
ctype = 'json'
extra_extensions_to_load = ["os-access-ips"]
_api_version = 'v2'
def _get_flags(self):
f = super(MigrateServerSamplesJsonTest, self)._get_flags()
f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
f['osapi_compute_extension'].append(
'nova.api.openstack.compute.contrib.admin_actions.'
'Admin_actions')
return f
def setUp(self):
"""setUp Method for MigrateServer api samples extension
This method creates the server that will be used in each tests
"""
super(MigrateServerSamplesJsonTest, self).setUp()
self.uuid = self._post_server()
@mock.patch('nova.conductor.manager.ComputeTaskManager._cold_migrate')
def test_post_migrate(self, mock_cold_migrate):
# Get api samples to migrate server request.
response = self._do_post('servers/%s/action' % self.uuid,
'migrate-server', {})
self.assertEqual(202, response.status_code)
def test_post_live_migrate_server(self):
# Get api samples to server live migrate request.
def fake_live_migrate(_self, context, instance, scheduler_hint,
block_migration, disk_over_commit):
self.assertEqual(self.uuid, instance["uuid"])
host = scheduler_hint["host"]
self.assertEqual(self.compute.host, host)
self.stubs.Set(conductor_manager.ComputeTaskManager,
'_live_migrate',
fake_live_migrate)
def fake_get_compute(context, host):
service = dict(host=host,
binary='nova-compute',
topic='compute',
report_count=1,
updated_at='foo',
hypervisor_type='bar',
hypervisor_version=utils.convert_version_to_int(
'1.0'),
disabled=False)
return {'compute_node': [service]}
self.stubs.Set(db, "service_get_by_compute_host", fake_get_compute)
response = self._do_post('servers/%s/action' % self.uuid,
'live-migrate-server',
{'hostname': self.compute.host})
self.assertEqual(202, response.status_code)
| apache-2.0 |
montefra/dodocs | dodocs/__init__.py | 1 | 1068 | """Main function
Copyright (c) 2015 Francesco Montesano
MIT Licence
"""
import os
import sys
from dodocs.cmdline import parse
import dodocs.logger as dlog
__version__ = "0.0.1"
def main(argv=None):
"""
Main code
Parameters
----------
argv : list of strings, optional
command line arguments
"""
args = parse(argv=argv)
dlog.setLogger(args)
# make sure to reset the subcommand name
log = dlog.getLogger()
if "func" in args:
args.func(args)
log.debug("Finished")
return 0
else:
# defaults profile to list
if args.subparser_name == 'profile' and args.profile_cmd is None:
main(sys.argv[1:] + ["list"])
else:
# in the other cases suggest to run -h
msg = ("Please provide a valid command.\n"
"Type\n " + os.path.split(sys.argv[0])[1])
if args.subparser_name is not None:
msg += " " + args.subparser_name
msg += ' -h'
log.error(msg)
return 1
| mit |
mozilla/kitsune | kitsune/wiki/permissions.py | 1 | 4844 | import logging
from django.conf import settings
log = logging.getLogger("k.wiki")
# Why is this a mixin if it can only be used for the Document model?
# Good question! My only good reason is to keep the permission related
# code organized and contained in one place.
class DocumentPermissionMixin(object):
"""Adds of permission checking methods to the Document model."""
def allows(self, user, action):
"""Check if the user has the permission on the document."""
# If this is kicking up a KeyError it's probably because you typoed!
return getattr(self, "_allows_%s" % action)(user)
def _allows_create_revision(self, user):
"""Can the user create a revision for the document?"""
# For now (ever?), creating revisions isn't restricted at all.
return True
def _allows_edit(self, user):
"""Can the user edit the document?"""
# Document editing isn't restricted until it has an approved
# revision.
if not self.current_revision:
return True
# Locale leaders and reviewers can edit in their locale.
locale = self.locale
if _is_leader(locale, user) or _is_reviewer(locale, user):
return True
# And finally, fallback to the actual django permission.
return user.has_perm("wiki.change_document")
def _allows_delete(self, user):
"""Can the user delete the document?"""
# Locale leaders can delete documents in their locale.
locale = self.locale
if _is_leader(locale, user):
return True
# Fallback to the django permission.
return user.has_perm("wiki.delete_document")
def _allows_archive(self, user):
"""Can the user archive the document?"""
# Just use the django permission.
return user.has_perm("wiki.archive_document")
def _allows_edit_keywords(self, user):
"""Can the user edit the document's keywords?"""
# If the document is in the default locale, just use the
# django permission.
# Editing keywords isn't restricted in other locales.
return self.locale != settings.WIKI_DEFAULT_LANGUAGE or user.has_perm("wiki.edit_keywords")
def _allows_edit_needs_change(self, user):
"""Can the user edit the needs change fields for the document?"""
# If the document is in the default locale, just use the
# django permission.
# Needs change isn't used for other locales (yet?).
return self.locale == settings.WIKI_DEFAULT_LANGUAGE and user.has_perm(
"wiki.edit_needs_change"
)
def _allows_mark_ready_for_l10n(self, user):
""""Can the user mark the document as ready for localization?"""
# If the document is localizable and the user has the django
# permission, then the user can mark as ready for l10n.
return self.is_localizable and user.has_perm("wiki.mark_ready_for_l10n")
def _allows_review_revision(self, user):
"""Can the user review a revision for the document?"""
# Locale leaders and reviewers can review revisions in their
# locale.
locale = self.locale
if _is_leader(locale, user) or _is_reviewer(locale, user):
return True
# Fallback to the django permission.
return user.has_perm("wiki.review_revision")
def _allows_delete_revision(self, user):
"""Can the user delete a document's revisions?"""
# Locale leaders and reviewers can delete revisions in their
# locale.
locale = self.locale
if _is_leader(locale, user) or _is_reviewer(locale, user):
return True
# Fallback to the django permission.
return user.has_perm("wiki.delete_revision")
def _is_leader(locale, user):
"""Checks if the user is a leader for the given locale.
Returns False if the locale doesn't exist. This will should only happen
if we forgot to insert a new locale when enabling it or during testing.
"""
from kitsune.wiki.models import Locale
try:
locale_team = Locale.objects.get(locale=locale)
except Locale.DoesNotExist:
log.warning("Locale not created for %s" % locale)
return False
return user in locale_team.leaders.all()
def _is_reviewer(locale, user):
"""Checks if the user is a reviewer for the given locale.
Returns False if the locale doesn't exist. This will should only happen
if we forgot to insert a new locale when enabling it or during testing.
"""
from kitsune.wiki.models import Locale
try:
locale_team = Locale.objects.get(locale=locale)
except Locale.DoesNotExist:
log.warning("Locale not created for %s" % locale)
return False
return user in locale_team.reviewers.all()
| bsd-3-clause |
dwaynebailey/translate | translate/lang/zh_tw.py | 3 | 1116 | # -*- coding: utf-8 -*-
#
# Copyright 2013 Zuza Software Foundation
#
# This file is part of translate.
#
# translate is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# translate is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
"""This module represents the Chinese language (traditional).
.. seealso:: http://en.wikipedia.org/wiki/Chinese_language
"""
from __future__ import unicode_literals
from translate.lang.zh import zh
class zh_tw(zh):
specialchars = "←→↔×÷©…—‘’“”「」『』【】《》"
ignoretests = {
'all': ["acronyms", "simplecaps", "startcaps"],
}
| gpl-2.0 |
shermanng10/superathletebuilder | env/lib/python2.7/site-packages/wheel/install.py | 472 | 18070 | """
Operations on existing wheel files, including basic installation.
"""
# XXX see patched pip to install
import sys
import warnings
import os.path
import re
import zipfile
import hashlib
import csv
import shutil
try:
_big_number = sys.maxsize
except NameError:
_big_number = sys.maxint
from wheel.decorator import reify
from wheel.util import (urlsafe_b64encode, from_json, urlsafe_b64decode,
native, binary, HashingFile)
from wheel import signatures
from wheel.pkginfo import read_pkg_info_bytes
from wheel.util import open_for_csv
from .pep425tags import get_supported
from .paths import get_install_paths
# The next major version after this version of the 'wheel' tool:
VERSION_TOO_HIGH = (1, 0)
# Non-greedy matching of an optional build number may be too clever (more
# invalid wheel filenames will match). Separate regex for .dist-info?
WHEEL_INFO_RE = re.compile(
r"""^(?P<namever>(?P<name>.+?)(-(?P<ver>\d.+?))?)
((-(?P<build>\d.*?))?-(?P<pyver>.+?)-(?P<abi>.+?)-(?P<plat>.+?)
\.whl|\.dist-info)$""",
re.VERBOSE).match
def parse_version(version):
"""Use parse_version from pkg_resources or distutils as available."""
global parse_version
try:
from pkg_resources import parse_version
except ImportError:
from distutils.version import LooseVersion as parse_version
return parse_version(version)
class BadWheelFile(ValueError):
pass
class WheelFile(object):
"""Parse wheel-specific attributes from a wheel (.whl) file and offer
basic installation and verification support.
WheelFile can be used to simply parse a wheel filename by avoiding the
methods that require the actual file contents."""
WHEEL_INFO = "WHEEL"
RECORD = "RECORD"
def __init__(self,
filename,
fp=None,
append=False,
context=get_supported):
"""
:param fp: A seekable file-like object or None to open(filename).
:param append: Open archive in append mode.
:param context: Function returning list of supported tags. Wheels
must have the same context to be sortable.
"""
self.filename = filename
self.fp = fp
self.append = append
self.context = context
basename = os.path.basename(filename)
self.parsed_filename = WHEEL_INFO_RE(basename)
if not basename.endswith('.whl') or self.parsed_filename is None:
raise BadWheelFile("Bad filename '%s'" % filename)
def __repr__(self):
return self.filename
@property
def distinfo_name(self):
return "%s.dist-info" % self.parsed_filename.group('namever')
@property
def datadir_name(self):
return "%s.data" % self.parsed_filename.group('namever')
@property
def record_name(self):
return "%s/%s" % (self.distinfo_name, self.RECORD)
@property
def wheelinfo_name(self):
return "%s/%s" % (self.distinfo_name, self.WHEEL_INFO)
@property
def tags(self):
"""A wheel file is compatible with the Cartesian product of the
period-delimited tags in its filename.
To choose a wheel file among several candidates having the same
distribution version 'ver', an installer ranks each triple of
(pyver, abi, plat) that its Python installation can run, sorting
the wheels by the best-ranked tag it supports and then by their
arity which is just len(list(compatibility_tags)).
"""
tags = self.parsed_filename.groupdict()
for pyver in tags['pyver'].split('.'):
for abi in tags['abi'].split('.'):
for plat in tags['plat'].split('.'):
yield (pyver, abi, plat)
compatibility_tags = tags
@property
def arity(self):
"""The number of compatibility tags the wheel declares."""
return len(list(self.compatibility_tags))
@property
def rank(self):
"""
Lowest index of any of this wheel's tags in self.context(), and the
arity e.g. (0, 1)
"""
return self.compatibility_rank(self.context())
@property
def compatible(self):
return self.rank[0] != _big_number # bad API!
# deprecated:
def compatibility_rank(self, supported):
"""Rank the wheel against the supported tags. Smaller ranks are more
compatible!
:param supported: A list of compatibility tags that the current
Python implemenation can run.
"""
preferences = []
for tag in self.compatibility_tags:
try:
preferences.append(supported.index(tag))
# Tag not present
except ValueError:
pass
if len(preferences):
return (min(preferences), self.arity)
return (_big_number, 0)
# deprecated
def supports_current_python(self, x):
assert self.context == x, 'context mismatch'
return self.compatible
# Comparability.
# Wheels are equal if they refer to the same file.
# If two wheels are not equal, compare based on (in this order):
# 1. Name
# 2. Version
# 3. Compatibility rank
# 4. Filename (as a tiebreaker)
@property
def _sort_key(self):
return (self.parsed_filename.group('name'),
parse_version(self.parsed_filename.group('ver')),
tuple(-x for x in self.rank),
self.filename)
def __eq__(self, other):
return self.filename == other.filename
def __ne__(self, other):
return self.filename != other.filename
def __lt__(self, other):
if self.context != other.context:
raise TypeError("{0}.context != {1}.context".format(self, other))
return self._sort_key < other._sort_key
# XXX prune
sn = self.parsed_filename.group('name')
on = other.parsed_filename.group('name')
if sn != on:
return sn < on
sv = parse_version(self.parsed_filename.group('ver'))
ov = parse_version(other.parsed_filename.group('ver'))
if sv != ov:
return sv < ov
# Compatibility
if self.context != other.context:
raise TypeError("{0}.context != {1}.context".format(self, other))
sc = self.rank
oc = other.rank
if sc != None and oc != None and sc != oc:
# Smaller compatibility ranks are "better" than larger ones,
# so we have to reverse the sense of the comparison here!
return sc > oc
elif sc == None and oc != None:
return False
return self.filename < other.filename
def __gt__(self, other):
return other < self
def __le__(self, other):
return self == other or self < other
def __ge__(self, other):
return self == other or other < self
#
# Methods using the file's contents:
#
@reify
def zipfile(self):
mode = "r"
if self.append:
mode = "a"
vzf = VerifyingZipFile(self.fp if self.fp else self.filename, mode)
if not self.append:
self.verify(vzf)
return vzf
@reify
def parsed_wheel_info(self):
"""Parse wheel metadata (the .data/WHEEL file)"""
return read_pkg_info_bytes(self.zipfile.read(self.wheelinfo_name))
def check_version(self):
version = self.parsed_wheel_info['Wheel-Version']
if tuple(map(int, version.split('.'))) >= VERSION_TOO_HIGH:
raise ValueError("Wheel version is too high")
@reify
def install_paths(self):
"""
Consult distutils to get the install paths for our dist. A dict with
('purelib', 'platlib', 'headers', 'scripts', 'data').
We use the name from our filename as the dist name, which means headers
could be installed in the wrong place if the filesystem-escaped name
is different than the Name. Who cares?
"""
name = self.parsed_filename.group('name')
return get_install_paths(name)
def install(self, force=False, overrides={}):
"""
Install the wheel into site-packages.
"""
# Utility to get the target directory for a particular key
def get_path(key):
return overrides.get(key) or self.install_paths[key]
# The base target location is either purelib or platlib
if self.parsed_wheel_info['Root-Is-Purelib'] == 'true':
root = get_path('purelib')
else:
root = get_path('platlib')
# Parse all the names in the archive
name_trans = {}
for info in self.zipfile.infolist():
name = info.filename
# Zip files can contain entries representing directories.
# These end in a '/'.
# We ignore these, as we create directories on demand.
if name.endswith('/'):
continue
# Pathnames in a zipfile namelist are always /-separated.
# In theory, paths could start with ./ or have other oddities
# but this won't happen in practical cases of well-formed wheels.
# We'll cover the simple case of an initial './' as it's both easy
# to do and more common than most other oddities.
if name.startswith('./'):
name = name[2:]
# Split off the base directory to identify files that are to be
# installed in non-root locations
basedir, sep, filename = name.partition('/')
if sep and basedir == self.datadir_name:
# Data file. Target destination is elsewhere
key, sep, filename = filename.partition('/')
if not sep:
raise ValueError("Invalid filename in wheel: {0}".format(name))
target = get_path(key)
else:
# Normal file. Target destination is root
key = ''
target = root
filename = name
# Map the actual filename from the zipfile to its intended target
# directory and the pathname relative to that directory.
dest = os.path.normpath(os.path.join(target, filename))
name_trans[info] = (key, target, filename, dest)
# We're now ready to start processing the actual install. The process
# is as follows:
# 1. Prechecks - is the wheel valid, is its declared architecture
# OK, etc. [[Responsibility of the caller]]
# 2. Overwrite check - do any of the files to be installed already
# exist?
# 3. Actual install - put the files in their target locations.
# 4. Update RECORD - write a suitably modified RECORD file to
# reflect the actual installed paths.
if not force:
for info, v in name_trans.items():
k = info.filename
key, target, filename, dest = v
if os.path.exists(dest):
raise ValueError("Wheel file {0} would overwrite {1}. Use force if this is intended".format(k, dest))
# Get the name of our executable, for use when replacing script
# wrapper hashbang lines.
# We encode it using getfilesystemencoding, as that is "the name of
# the encoding used to convert Unicode filenames into system file
# names".
exename = sys.executable.encode(sys.getfilesystemencoding())
record_data = []
record_name = self.distinfo_name + '/RECORD'
for info, (key, target, filename, dest) in name_trans.items():
name = info.filename
source = self.zipfile.open(info)
# Skip the RECORD file
if name == record_name:
continue
ddir = os.path.dirname(dest)
if not os.path.isdir(ddir):
os.makedirs(ddir)
destination = HashingFile(open(dest, 'wb'))
if key == 'scripts':
hashbang = source.readline()
if hashbang.startswith(b'#!python'):
hashbang = b'#!' + exename + binary(os.linesep)
destination.write(hashbang)
shutil.copyfileobj(source, destination)
reldest = os.path.relpath(dest, root)
reldest.replace(os.sep, '/')
record_data.append((reldest, destination.digest(), destination.length))
destination.close()
source.close()
# preserve attributes (especially +x bit for scripts)
attrs = info.external_attr >> 16
if attrs: # tends to be 0 if Windows.
os.chmod(dest, info.external_attr >> 16)
record_name = os.path.join(root, self.record_name)
writer = csv.writer(open_for_csv(record_name, 'w+'))
for reldest, digest, length in sorted(record_data):
writer.writerow((reldest, digest, length))
writer.writerow((self.record_name, '', ''))
def verify(self, zipfile=None):
"""Configure the VerifyingZipFile `zipfile` by verifying its signature
and setting expected hashes for every hash in RECORD.
Caller must complete the verification process by completely reading
every file in the archive (e.g. with extractall)."""
sig = None
if zipfile is None:
zipfile = self.zipfile
zipfile.strict = True
record_name = '/'.join((self.distinfo_name, 'RECORD'))
sig_name = '/'.join((self.distinfo_name, 'RECORD.jws'))
# tolerate s/mime signatures:
smime_sig_name = '/'.join((self.distinfo_name, 'RECORD.p7s'))
zipfile.set_expected_hash(record_name, None)
zipfile.set_expected_hash(sig_name, None)
zipfile.set_expected_hash(smime_sig_name, None)
record = zipfile.read(record_name)
record_digest = urlsafe_b64encode(hashlib.sha256(record).digest())
try:
sig = from_json(native(zipfile.read(sig_name)))
except KeyError: # no signature
pass
if sig:
headers, payload = signatures.verify(sig)
if payload['hash'] != "sha256=" + native(record_digest):
msg = "RECORD.sig claimed RECORD hash {0} != computed hash {1}."
raise BadWheelFile(msg.format(payload['hash'],
native(record_digest)))
reader = csv.reader((native(r) for r in record.splitlines()))
for row in reader:
filename = row[0]
hash = row[1]
if not hash:
if filename not in (record_name, sig_name):
sys.stderr.write("%s has no hash!\n" % filename)
continue
algo, data = row[1].split('=', 1)
assert algo == "sha256", "Unsupported hash algorithm"
zipfile.set_expected_hash(filename, urlsafe_b64decode(binary(data)))
class VerifyingZipFile(zipfile.ZipFile):
"""ZipFile that can assert that each of its extracted contents matches
an expected sha256 hash. Note that each file must be completly read in
order for its hash to be checked."""
def __init__(self, file, mode="r",
compression=zipfile.ZIP_STORED,
allowZip64=False):
zipfile.ZipFile.__init__(self, file, mode, compression, allowZip64)
self.strict = False
self._expected_hashes = {}
self._hash_algorithm = hashlib.sha256
def set_expected_hash(self, name, hash):
"""
:param name: name of zip entry
:param hash: bytes of hash (or None for "don't care")
"""
self._expected_hashes[name] = hash
def open(self, name_or_info, mode="r", pwd=None):
"""Return file-like object for 'name'."""
# A non-monkey-patched version would contain most of zipfile.py
ef = zipfile.ZipFile.open(self, name_or_info, mode, pwd)
if isinstance(name_or_info, zipfile.ZipInfo):
name = name_or_info.filename
else:
name = name_or_info
if (name in self._expected_hashes
and self._expected_hashes[name] != None):
expected_hash = self._expected_hashes[name]
try:
_update_crc_orig = ef._update_crc
except AttributeError:
warnings.warn('Need ZipExtFile._update_crc to implement '
'file hash verification (in Python >= 2.7)')
return ef
running_hash = self._hash_algorithm()
if hasattr(ef, '_eof'): # py33
def _update_crc(data):
_update_crc_orig(data)
running_hash.update(data)
if ef._eof and running_hash.digest() != expected_hash:
raise BadWheelFile("Bad hash for file %r" % ef.name)
else:
def _update_crc(data, eof=None):
_update_crc_orig(data, eof=eof)
running_hash.update(data)
if eof and running_hash.digest() != expected_hash:
raise BadWheelFile("Bad hash for file %r" % ef.name)
ef._update_crc = _update_crc
elif self.strict and name not in self._expected_hashes:
raise BadWheelFile("No expected hash for file %r" % ef.name)
return ef
def pop(self):
"""Truncate the last file off this zipfile.
Assumes infolist() is in the same order as the files (true for
ordinary zip files created by Python)"""
if not self.fp:
raise RuntimeError(
"Attempt to pop from ZIP archive that was already closed")
last = self.infolist().pop()
del self.NameToInfo[last.filename]
self.fp.seek(last.header_offset, os.SEEK_SET)
self.fp.truncate()
self._didModify = True
| mit |
aussendorf/bareos-fd-python-plugins | plugin/BareosFdPluginBaseclass.py | 1 | 5778 | #This file is now part of the main Bareos repo. Do not use this version, use the package bareos-filedaemon-python-plugin instead
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Baseclass for Bareos python plugins
# Functions taken and adapted from bareos-fd.py
# (c) Bareos GmbH & Co. KG, Maik Aussendorf
# AGPL v.3
from bareosfd import *
from bareos_fd_consts import *
from io import open
from os import O_WRONLY, O_CREAT
class BareosFdPluginBaseclass:
''' Bareos python plugin base class '''
def __init__(self, context, plugindef):
DebugMessage(context, 100, "Constructor called in module " + __name__ + "\n");
events = [];
events.append(bEventType['bEventJobEnd']);
events.append(bEventType['bEventEndBackupJob']);
events.append(bEventType['bEventEndFileSet']);
events.append(bEventType['bEventHandleBackupFile']);
RegisterEvents(context, events);
# get some static Bareos values
self.fdname = GetValue(context, bVariable['bVarFDName']);
self.jobId = GetValue(context, bVariable['bVarJobId']);
self.client = GetValue(context, bVariable['bVarClient']);
self.level = GetValue(context, bVariable['bVarLevel']);
self.jobName = GetValue(context, bVariable['bVarJobName']);
self.workingdir = GetValue(context, bVariable['bVarWorkingDir']);
DebugMessage(context, 100, "FDName = " + self.fdname + " - BareosFdPluginBaseclass\n");
DebugMessage(context, 100, "WorkingDir = " + self.workingdir + " jobId: " + str(self.jobId) + "\n");
def parse_plugin_definition(self,context, plugindef):
DebugMessage(context, 100, "plugin def parser called with " + plugindef + "\n");
# Parse plugin options into a dict
self.options = dict();
plugin_options = plugindef.split(":");
for current_option in plugin_options:
key,sep,val = current_option.partition("=");
DebugMessage(context, 100, "key:val: " + key + ':' + val + "\n");
if val == '':
continue;
else:
self.options[key] = val;
# you should overload this method with your own and do option checking here, return bRCs['bRC_Error'], if options are not ok
# or better call super.parse_plugin_definition in your own class and make sanity check on self.options afterwards
return bRCs['bRC_OK'];
def plugin_io(self, context, IOP):
DebugMessage(context, 100, "plugin_io called with " + str(IOP) + "\n");
FNAME = IOP.fname;
if IOP.func == bIOPS['IO_OPEN']:
try:
if IOP.flags & (O_CREAT | O_WRONLY):
self.file = open(FNAME, 'wb');
else:
self.file = open(FNAME, 'rb');
except:
IOP.status = -1;
return bRCs['bRC_Error'];
return bRCs['bRC_OK'];
elif IOP.func == bIOPS['IO_CLOSE']:
self.file.close();
return bRCs['bRC_OK'];
elif IOP.func == bIOPS['IO_SEEK']:
return bRCs['bRC_OK'];
elif IOP.func == bIOPS['IO_READ']:
IOP.buf = bytearray(IOP.count);
IOP.status = self.file.readinto(IOP.buf);
IOP.io_errno = 0
return bRCs['bRC_OK'];
elif IOP.func == bIOPS['IO_WRITE']:
IOP.status = self.file.write(IOP.buf);
IOP.io_errno = 0
return bRCs['bRC_OK'];
def handle_plugin_event(self, context, event):
if event == bEventType['bEventJobEnd']:
DebugMessage(context, 100, "handle_plugin_event called with bEventJobEnd\n");
elif event == bEventType['bEventEndBackupJob']:
DebugMessage(context, 100, "handle_plugin_event called with bEventEndBackupJob\n");
elif event == bEventType['bEventEndFileSet']:
DebugMessage(context, 100, "handle_plugin_event called with bEventEndFileSet\n");
else:
DebugMessage(context, 100, "handle_plugin_event called with event" + str(event) + "\n");
return bRCs['bRC_OK'];
def start_backup_file(self,context, savepkt):
DebugMessage(context, 100, "start_backup called\n");
# Base method, we do not add anything, overload this method with your implementation to add files to backup fileset
return bRCs['bRC_Skip'];
def end_backup_file(self, context):
DebugMessage(context, 100, "end_backup_file() entry point in Python called\n")
return bRCs['bRC_OK'];
def start_restore_file(self, context, cmd):
DebugMessage(context, 100, "start_restore_file() entry point in Python called with" + str(cmd) + "\n")
return bRCs['bRC_OK'];
def end_restore_file(self,context):
DebugMessage(context, 100, "end_restore_file() entry point in Python called\n")
return bRCs['bRC_OK'];
def restore_object_data(self, context, ROP):
DebugMessage(context, 100, "restore_object_data called with " + str(ROP) + "\n");
return bRCs['bRC_OK'];
def create_file(self,context, restorepkt):
DebugMessage(context, 100, "create_file() entry point in Python called with" + str(restorepkt) + "\n")
restorepkt.create_status = bCFs['CF_EXTRACT'];
return bRCs['bRC_OK'];
def check_file(self,context, fname):
DebugMessage(context, 100, "check_file() entry point in Python called with" + str(fname) + "\n")
return bRCs['bRC_OK'];
def handle_backup_file(self,context, savepkt):
DebugMessage(context, 100, "handle_backup_file called with " + str(savepkt) + "\n");
return bRCs['bRC_OK'];
# vim: ts=4 tabstop=4 expandtab shiftwidth=4 softtabstop=4
| agpl-3.0 |
bdh1011/wau | venv/lib/python2.7/site-packages/nbformat/v4/nbjson.py | 11 | 1921 | """Read and write notebooks in JSON format."""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import copy
import json
from ipython_genutils import py3compat
from .nbbase import from_dict
from .rwbase import (
NotebookReader, NotebookWriter, rejoin_lines, split_lines, strip_transient
)
class BytesEncoder(json.JSONEncoder):
"""A JSON encoder that accepts b64 (and other *ascii*) bytestrings."""
def default(self, obj):
if isinstance(obj, bytes):
return obj.decode('ascii')
return json.JSONEncoder.default(self, obj)
class JSONReader(NotebookReader):
def reads(self, s, **kwargs):
"""Read a JSON string into a Notebook object"""
nb = json.loads(s, **kwargs)
nb = self.to_notebook(nb, **kwargs)
return nb
def to_notebook(self, d, **kwargs):
"""Convert a disk-format notebook dict to in-memory NotebookNode
handles multi-line values as strings, scrubbing of transient values, etc.
"""
nb = from_dict(d)
nb = rejoin_lines(nb)
nb = strip_transient(nb)
return nb
class JSONWriter(NotebookWriter):
def writes(self, nb, **kwargs):
"""Serialize a NotebookNode object as a JSON string"""
kwargs['cls'] = BytesEncoder
kwargs['indent'] = 1
kwargs['sort_keys'] = True
kwargs['separators'] = (',',': ')
kwargs.setdefault('ensure_ascii', False)
# don't modify in-memory dict
nb = copy.deepcopy(nb)
if kwargs.pop('split_lines', True):
nb = split_lines(nb)
nb = strip_transient(nb)
return py3compat.cast_unicode_py2(json.dumps(nb, **kwargs), 'utf-8')
_reader = JSONReader()
_writer = JSONWriter()
reads = _reader.reads
read = _reader.read
to_notebook = _reader.to_notebook
write = _writer.write
writes = _writer.writes
| mit |
bcornwellmott/frappe | frappe/commands/docs.py | 7 | 2238 | from __future__ import unicode_literals, absolute_import
import click
import os
import frappe
from frappe.commands import pass_context
@click.command('write-docs')
@pass_context
@click.argument('app')
@click.option('--target', default=None)
@click.option('--local', default=False, is_flag=True, help='Run app locally')
def write_docs(context, app, target=None, local=False):
"Setup docs in target folder of target app"
from frappe.utils.setup_docs import setup_docs
if not target:
target = os.path.abspath(os.path.join("..", "docs", app))
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
make = setup_docs(app)
make.make_docs(target, local)
finally:
frappe.destroy()
@click.command('build-docs')
@pass_context
@click.argument('app')
@click.option('--docs-version', default='current')
@click.option('--target', default=None)
@click.option('--local', default=False, is_flag=True, help='Run app locally')
@click.option('--watch', default=False, is_flag=True, help='Watch for changes and rewrite')
def build_docs(context, app, docs_version="current", target=None, local=False, watch=False):
"Setup docs in target folder of target app"
from frappe.utils import watch as start_watch
if not target:
target = os.path.abspath(os.path.join("..", "docs", app))
for site in context.sites:
_build_docs_once(site, app, docs_version, target, local)
if watch:
def trigger_make(source_path, event_type):
if "/templates/autodoc/" in source_path:
_build_docs_once(site, app, docs_version, target, local)
elif ("/docs.css" in source_path
or "/docs/" in source_path
or "docs.py" in source_path):
_build_docs_once(site, app, docs_version, target, local, only_content_updated=True)
apps_path = frappe.get_app_path(app, "..", "..")
start_watch(apps_path, handler=trigger_make)
def _build_docs_once(site, app, docs_version, target, local, only_content_updated=False):
from frappe.utils.setup_docs import setup_docs
try:
frappe.init(site=site)
frappe.connect()
make = setup_docs(app)
if not only_content_updated:
make.build(docs_version)
make.make_docs(target, local)
finally:
frappe.destroy()
commands = [
build_docs,
write_docs,
]
| mit |
uclouvain/osis_louvain | manage.py | 1 | 1269 | #!/usr/bin/env python
import os
import sys
import dotenv
if __name__ == "__main__":
if 'test' in sys.argv:
os.environ.setdefault('TESTING', 'True')
dotenv.read_dotenv()
SETTINGS_FILE = os.environ.get('DJANGO_SETTINGS_MODULE', 'backoffice.settings.local')
os.environ.setdefault("DJANGO_SETTINGS_MODULE", SETTINGS_FILE)
from django.core.management import execute_from_command_line
try:
execute_from_command_line(sys.argv)
except KeyError as ke:
print("Error loading application.")
print("The following environment var is not defined : {}".format(str(ke)))
print("Check the following possible causes :")
print(" - You don't have a .env file. You can copy .env.example to .env to use default")
print(" - Mandatory variables are not defined in your .env file.")
sys.exit("SettingsKeyError")
except ImportError as ie:
print("Error loading application : {}".format(str(ie)))
print("Check the following possible causes :")
print(" - The DJANGO_SETTINGS_MODULE defined in your .env doesn't exist")
print(" - No DJANGO_SETTINGS_MODULE is defined and the default 'backoffice.settings.local' doesn't exist ")
sys.exit("DjangoSettingsError")
| agpl-3.0 |
pforret/python-for-android | python-modules/twisted/twisted/web/rewrite.py | 57 | 1862 | # Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
#
from twisted.web import resource
class RewriterResource(resource.Resource):
def __init__(self, orig, *rewriteRules):
resource.Resource.__init__(self)
self.resource = orig
self.rewriteRules = list(rewriteRules)
def _rewrite(self, request):
for rewriteRule in self.rewriteRules:
rewriteRule(request)
def getChild(self, path, request):
request.postpath.insert(0, path)
request.prepath.pop()
self._rewrite(request)
path = request.postpath.pop(0)
request.prepath.append(path)
return self.resource.getChildWithDefault(path, request)
def render(self, request):
self._rewrite(request)
return self.resource.render(request)
def tildeToUsers(request):
if request.postpath and request.postpath[0][:1]=='~':
request.postpath[:1] = ['users', request.postpath[0][1:]]
request.path = '/'+'/'.join(request.prepath+request.postpath)
def alias(aliasPath, sourcePath):
"""
I am not a very good aliaser. But I'm the best I can be. If I'm
aliasing to a Resource that generates links, and it uses any parts
of request.prepath to do so, the links will not be relative to the
aliased path, but rather to the aliased-to path. That I can't
alias static.File directory listings that nicely. However, I can
still be useful, as many resources will play nice.
"""
sourcePath = sourcePath.split('/')
aliasPath = aliasPath.split('/')
def rewriter(request):
if request.postpath[:len(aliasPath)] == aliasPath:
after = request.postpath[len(aliasPath):]
request.postpath = sourcePath + after
request.path = '/'+'/'.join(request.prepath+request.postpath)
return rewriter
| apache-2.0 |
vgrachev8/youtube-dl | youtube_dl/extractor/radiofrance.py | 2 | 2024 | # coding: utf-8
import re
from .common import InfoExtractor
class RadioFranceIE(InfoExtractor):
_VALID_URL = r'^https?://maison\.radiofrance\.fr/radiovisions/(?P<id>[^?#]+)'
IE_NAME = u'radiofrance'
_TEST = {
u'url': u'http://maison.radiofrance.fr/radiovisions/one-one',
u'file': u'one-one.ogg',
u'md5': u'bdbb28ace95ed0e04faab32ba3160daf',
u'info_dict': {
u"title": u"One to one",
u"description": u"Plutôt que d'imaginer la radio de demain comme technologie ou comme création de contenu, je veux montrer que quelles que soient ses évolutions, j'ai l'intime conviction que la radio continuera d'être un grand média de proximité pour les auditeurs.",
u"uploader": u"Thomas Hercouët",
},
}
def _real_extract(self, url):
m = re.match(self._VALID_URL, url)
video_id = m.group('id')
webpage = self._download_webpage(url, video_id)
title = self._html_search_regex(r'<h1>(.*?)</h1>', webpage, u'title')
description = self._html_search_regex(
r'<div class="bloc_page_wrapper"><div class="text">(.*?)</div>',
webpage, u'description', fatal=False)
uploader = self._html_search_regex(
r'<div class="credit"> © (.*?)</div>',
webpage, u'uploader', fatal=False)
formats_str = self._html_search_regex(
r'class="jp-jplayer[^"]*" data-source="([^"]+)">',
webpage, u'audio URLs')
formats = [
{
'format_id': fm[0],
'url': fm[1],
'vcodec': 'none',
}
for fm in
re.findall(r"([a-z0-9]+)\s*:\s*'([^']+)'", formats_str)
]
# No sorting, we don't know any more about these formats
return {
'id': video_id,
'title': title,
'formats': formats,
'description': description,
'uploader': uploader,
}
| unlicense |
walksun/robotframework-selenium2library | test/unit/locators/test_elementfinder.py | 35 | 14209 | import unittest
import os
from Selenium2Library.locators import ElementFinder
from mockito import *
class ElementFinderTests(unittest.TestCase):
def test_find_with_invalid_prefix(self):
finder = ElementFinder()
browser = mock()
try:
self.assertRaises(ValueError, finder.find, browser, "something=test1")
except ValueError as e:
self.assertEqual(e.message, "Element locator with prefix 'something' is not supported")
def test_find_with_null_browser(self):
finder = ElementFinder()
self.assertRaises(AssertionError,
finder.find, None, "id=test1")
def test_find_with_null_locator(self):
finder = ElementFinder()
browser = mock()
self.assertRaises(AssertionError,
finder.find, browser, None)
def test_find_with_empty_locator(self):
finder = ElementFinder()
browser = mock()
self.assertRaises(AssertionError,
finder.find, browser, "")
def test_find_with_no_tag(self):
finder = ElementFinder()
browser = mock()
finder.find(browser, "test1")
verify(browser).find_elements_by_xpath("//*[(@id='test1' or @name='test1')]")
def test_find_with_tag(self):
finder = ElementFinder()
browser = mock()
finder.find(browser, "test1", tag='div')
verify(browser).find_elements_by_xpath("//div[(@id='test1' or @name='test1')]")
def test_find_with_locator_with_apos(self):
finder = ElementFinder()
browser = mock()
finder.find(browser, "test '1'")
verify(browser).find_elements_by_xpath("//*[(@id=\"test '1'\" or @name=\"test '1'\")]")
def test_find_with_locator_with_quote(self):
finder = ElementFinder()
browser = mock()
finder.find(browser, "test \"1\"")
verify(browser).find_elements_by_xpath("//*[(@id='test \"1\"' or @name='test \"1\"')]")
def test_find_with_locator_with_quote_and_apos(self):
finder = ElementFinder()
browser = mock()
finder.find(browser, "test \"1\" and '2'")
verify(browser).find_elements_by_xpath(
"//*[(@id=concat('test \"1\" and ', \"'\", '2', \"'\", '') or @name=concat('test \"1\" and ', \"'\", '2', \"'\", ''))]")
def test_find_with_a(self):
finder = ElementFinder()
browser = mock()
when(browser).get_current_url().thenReturn("http://localhost/mypage.html")
finder.find(browser, "test1", tag='a')
verify(browser).find_elements_by_xpath(
"//a[(@id='test1' or @name='test1' or @href='test1' or normalize-space(descendant-or-self::text())='test1' or @href='http://localhost/test1')]")
def test_find_with_link_synonym(self):
finder = ElementFinder()
browser = mock()
when(browser).get_current_url().thenReturn("http://localhost/mypage.html")
finder.find(browser, "test1", tag='link')
verify(browser).find_elements_by_xpath(
"//a[(@id='test1' or @name='test1' or @href='test1' or normalize-space(descendant-or-self::text())='test1' or @href='http://localhost/test1')]")
def test_find_with_img(self):
finder = ElementFinder()
browser = mock()
when(browser).get_current_url().thenReturn("http://localhost/mypage.html")
finder.find(browser, "test1", tag='img')
verify(browser).find_elements_by_xpath(
"//img[(@id='test1' or @name='test1' or @src='test1' or @alt='test1' or @src='http://localhost/test1')]")
def test_find_with_image_synonym(self):
finder = ElementFinder()
browser = mock()
when(browser).get_current_url().thenReturn("http://localhost/mypage.html")
finder.find(browser, "test1", tag='image')
verify(browser).find_elements_by_xpath(
"//img[(@id='test1' or @name='test1' or @src='test1' or @alt='test1' or @src='http://localhost/test1')]")
def test_find_with_input(self):
finder = ElementFinder()
browser = mock()
when(browser).get_current_url().thenReturn("http://localhost/mypage.html")
finder.find(browser, "test1", tag='input')
verify(browser).find_elements_by_xpath(
"//input[(@id='test1' or @name='test1' or @value='test1' or @src='test1' or @src='http://localhost/test1')]")
def test_find_with_radio_button_synonym(self):
finder = ElementFinder()
browser = mock()
when(browser).get_current_url().thenReturn("http://localhost/mypage.html")
finder.find(browser, "test1", tag='radio button')
verify(browser).find_elements_by_xpath(
"//input[@type='radio' and (@id='test1' or @name='test1' or @value='test1' or @src='test1' or @src='http://localhost/test1')]")
def test_find_with_checkbox_synonym(self):
finder = ElementFinder()
browser = mock()
when(browser).get_current_url().thenReturn("http://localhost/mypage.html")
finder.find(browser, "test1", tag='checkbox')
verify(browser).find_elements_by_xpath(
"//input[@type='checkbox' and (@id='test1' or @name='test1' or @value='test1' or @src='test1' or @src='http://localhost/test1')]")
def test_find_with_file_upload_synonym(self):
finder = ElementFinder()
browser = mock()
when(browser).get_current_url().thenReturn("http://localhost/mypage.html")
finder.find(browser, "test1", tag='file upload')
verify(browser).find_elements_by_xpath(
"//input[@type='file' and (@id='test1' or @name='test1' or @value='test1' or @src='test1' or @src='http://localhost/test1')]")
def test_find_with_text_field_synonym(self):
finder = ElementFinder()
browser = mock()
when(browser).get_current_url().thenReturn("http://localhost/mypage.html")
finder.find(browser, "test1", tag='text field')
verify(browser).find_elements_by_xpath(
"//input[@type='text' and (@id='test1' or @name='test1' or @value='test1' or @src='test1' or @src='http://localhost/test1')]")
def test_find_with_button(self):
finder = ElementFinder()
browser = mock()
finder.find(browser, "test1", tag='button')
verify(browser).find_elements_by_xpath(
"//button[(@id='test1' or @name='test1' or @value='test1' or normalize-space(descendant-or-self::text())='test1')]")
def test_find_with_select(self):
finder = ElementFinder()
browser = mock()
finder.find(browser, "test1", tag='select')
verify(browser).find_elements_by_xpath(
"//select[(@id='test1' or @name='test1')]")
def test_find_with_list_synonym(self):
finder = ElementFinder()
browser = mock()
finder.find(browser, "test1", tag='list')
verify(browser).find_elements_by_xpath(
"//select[(@id='test1' or @name='test1')]")
def test_find_with_implicit_xpath(self):
finder = ElementFinder()
browser = mock()
elements = self._make_mock_elements('div', 'a', 'span', 'a')
when(browser).find_elements_by_xpath("//*[(@test='1')]").thenReturn(elements)
result = finder.find(browser, "//*[(@test='1')]")
self.assertEqual(result, elements)
result = finder.find(browser, "//*[(@test='1')]", tag='a')
self.assertEqual(result, [elements[1], elements[3]])
def test_find_by_identifier(self):
finder = ElementFinder()
browser = mock()
id_elements = self._make_mock_elements('div', 'a')
name_elements = self._make_mock_elements('span', 'a')
when(browser).find_elements_by_id("test1").thenReturn(list(id_elements)).thenReturn(list(id_elements))
when(browser).find_elements_by_name("test1").thenReturn(list(name_elements)).thenReturn(list(name_elements))
all_elements = list(id_elements)
all_elements.extend(name_elements)
result = finder.find(browser, "identifier=test1")
self.assertEqual(result, all_elements)
result = finder.find(browser, "identifier=test1", tag='a')
self.assertEqual(result, [id_elements[1], name_elements[1]])
def test_find_by_id(self):
finder = ElementFinder()
browser = mock()
elements = self._make_mock_elements('div', 'a', 'span', 'a')
when(browser).find_elements_by_id("test1").thenReturn(elements)
result = finder.find(browser, "id=test1")
self.assertEqual(result, elements)
result = finder.find(browser, "id=test1", tag='a')
self.assertEqual(result, [elements[1], elements[3]])
def test_find_by_name(self):
finder = ElementFinder()
browser = mock()
elements = self._make_mock_elements('div', 'a', 'span', 'a')
when(browser).find_elements_by_name("test1").thenReturn(elements)
result = finder.find(browser, "name=test1")
self.assertEqual(result, elements)
result = finder.find(browser, "name=test1", tag='a')
self.assertEqual(result, [elements[1], elements[3]])
def test_find_by_xpath(self):
finder = ElementFinder()
browser = mock()
elements = self._make_mock_elements('div', 'a', 'span', 'a')
when(browser).find_elements_by_xpath("//*[(@test='1')]").thenReturn(elements)
result = finder.find(browser, "xpath=//*[(@test='1')]")
self.assertEqual(result, elements)
result = finder.find(browser, "xpath=//*[(@test='1')]", tag='a')
self.assertEqual(result, [elements[1], elements[3]])
def test_find_by_dom(self):
finder = ElementFinder()
browser = mock()
elements = self._make_mock_elements('div', 'a', 'span', 'a')
when(browser).execute_script("return document.getElementsByTagName('a');").thenReturn(
[elements[1], elements[3]])
result = finder.find(browser, "dom=document.getElementsByTagName('a')")
self.assertEqual(result, [elements[1], elements[3]])
def test_find_by_link_text(self):
finder = ElementFinder()
browser = mock()
elements = self._make_mock_elements('div', 'a', 'span', 'a')
when(browser).find_elements_by_link_text("my link").thenReturn(elements)
result = finder.find(browser, "link=my link")
self.assertEqual(result, elements)
result = finder.find(browser, "link=my link", tag='a')
self.assertEqual(result, [elements[1], elements[3]])
def test_find_by_css_selector(self):
finder = ElementFinder()
browser = mock()
elements = self._make_mock_elements('div', 'a', 'span', 'a')
when(browser).find_elements_by_css_selector("#test1").thenReturn(elements)
result = finder.find(browser, "css=#test1")
self.assertEqual(result, elements)
result = finder.find(browser, "css=#test1", tag='a')
self.assertEqual(result, [elements[1], elements[3]])
def test_find_by_tag_name(self):
finder = ElementFinder()
browser = mock()
elements = self._make_mock_elements('div', 'a', 'span', 'a')
when(browser).find_elements_by_tag_name("div").thenReturn(elements)
result = finder.find(browser, "tag=div")
self.assertEqual(result, elements)
result = finder.find(browser, "tag=div", tag='a')
self.assertEqual(result, [elements[1], elements[3]])
def test_find_with_sloppy_prefix(self):
finder = ElementFinder()
browser = mock()
elements = self._make_mock_elements('div', 'a', 'span', 'a')
when(browser).find_elements_by_id("test1").thenReturn(elements)
result = finder.find(browser, "ID=test1")
self.assertEqual(result, elements)
result = finder.find(browser, "iD=test1")
self.assertEqual(result, elements)
result = finder.find(browser, "id=test1")
self.assertEqual(result, elements)
result = finder.find(browser, " id =test1")
self.assertEqual(result, elements)
def test_find_with_sloppy_criteria(self):
finder = ElementFinder()
browser = mock()
elements = self._make_mock_elements('div', 'a', 'span', 'a')
when(browser).find_elements_by_id("test1").thenReturn(elements)
result = finder.find(browser, "id= test1 ")
self.assertEqual(result, elements)
def test_find_by_id_with_synonym_and_constraints(self):
finder = ElementFinder()
browser = mock()
elements = self._make_mock_elements('div', 'input', 'span', 'input', 'a', 'input', 'div', 'input')
elements[1].set_attribute('type', 'radio')
elements[3].set_attribute('type', 'checkbox')
elements[5].set_attribute('type', 'text')
elements[7].set_attribute('type', 'file')
when(browser).find_elements_by_id("test1").thenReturn(elements)
result = finder.find(browser, "id=test1")
self.assertEqual(result, elements)
result = finder.find(browser, "id=test1", tag='input')
self.assertEqual(result, [elements[1], elements[3], elements[5], elements[7]])
result = finder.find(browser, "id=test1", tag='radio button')
self.assertEqual(result, [elements[1]])
result = finder.find(browser, "id=test1", tag='checkbox')
self.assertEqual(result, [elements[3]])
result = finder.find(browser, "id=test1", tag='text field')
self.assertEqual(result, [elements[5]])
result = finder.find(browser, "id=test1", tag='file upload')
self.assertEqual(result, [elements[7]])
def _make_mock_elements(self, *tags):
elements = []
for tag in tags:
element = self._make_mock_element(tag)
elements.append(element)
return elements
def _make_mock_element(self, tag):
element = mock()
element.tag_name = tag
element.attributes = {}
def set_attribute(name, value):
element.attributes[name] = value
element.set_attribute = set_attribute
def get_attribute(name):
return element.attributes[name]
element.get_attribute = get_attribute
return element
| apache-2.0 |
yuyichao/pyscical | pyscical/utils.py | 1 | 1930 | # Copyright (C) 2012~2014 by Yichao Yu
# [email protected]
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
def cffi_ptr(obj, _ffi, writable=False, retain=False):
if isinstance(obj, bytes):
if writable:
# bytes is not writable
raise TypeError('expected an object with a writable '
'buffer interface.')
if retain:
buf = _ffi.new('char[]', obj)
return (buf, len(obj), buf)
return (obj, len(obj), obj)
elif isinstance(obj, np.ndarray):
# numpy array
return (_ffi.cast('void*', obj.__array_interface__['data'][0]),
obj.nbytes, obj)
elif isinstance(obj, np.generic):
if writable or retain:
raise TypeError('expected an object with a writable '
'buffer interface.')
# numpy scalar
#
# * obj.__array_interface__ exists in CPython although requires
# holding a reference to the dynamically created
# __array_interface__ object
#
# * does not exist (yet?) in numpypy.
s_array = obj[()]
return (_ffi.cast('void*', s_array.__array_interface__['data'][0]),
s_array.nbytes, s_array)
raise TypeError("Only numpy arrays and bytes can be converted")
| gpl-3.0 |
potzenheimer/meetshaus | src/meetshaus.sitetheme/meetshaus/sitetheme/tests.py | 1 | 1419 | import unittest
#from zope.testing import doctestunit
#from zope.component import testing
from Testing import ZopeTestCase as ztc
from Products.Five import fiveconfigure
from Products.PloneTestCase import PloneTestCase as ptc
from Products.PloneTestCase.layer import PloneSite
ptc.setupPloneSite()
import meetshaus.sitetheme
class TestCase(ptc.PloneTestCase):
class layer(PloneSite):
@classmethod
def setUp(cls):
fiveconfigure.debug_mode = True
ztc.installPackage(meetshaus.sitetheme)
fiveconfigure.debug_mode = False
@classmethod
def tearDown(cls):
pass
def test_suite():
return unittest.TestSuite([
# Unit tests
#doctestunit.DocFileSuite(
# 'README.txt', package='meetshaus.sitetheme',
# setUp=testing.setUp, tearDown=testing.tearDown),
#doctestunit.DocTestSuite(
# module='meetshaus.sitetheme.mymodule',
# setUp=testing.setUp, tearDown=testing.tearDown),
# Integration tests that use PloneTestCase
#ztc.ZopeDocFileSuite(
# 'README.txt', package='meetshaus.sitetheme',
# test_class=TestCase),
#ztc.FunctionalDocFileSuite(
# 'browser.txt', package='meetshaus.sitetheme',
# test_class=TestCase),
])
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
| mit |
yglazner/jy-dev | jy_dev/docopt.py | 11 | 19946 | """Pythonic command-line interface parser that will make you smile.
* http://docopt.org
* Repository and issue-tracker: https://github.com/docopt/docopt
* Licensed under terms of MIT license (see LICENSE-MIT)
* Copyright (c) 2013 Vladimir Keleshev, [email protected]
"""
import sys
import re
__all__ = ['docopt']
__version__ = '0.6.1'
class DocoptLanguageError(Exception):
"""Error in construction of usage-message by developer."""
class DocoptExit(SystemExit):
"""Exit in case user invoked program with incorrect arguments."""
usage = ''
def __init__(self, message=''):
SystemExit.__init__(self, (message + '\n' + self.usage).strip())
class Pattern(object):
def __eq__(self, other):
return repr(self) == repr(other)
def __hash__(self):
return hash(repr(self))
def fix(self):
self.fix_identities()
self.fix_repeating_arguments()
return self
def fix_identities(self, uniq=None):
"""Make pattern-tree tips point to same object if they are equal."""
if not hasattr(self, 'children'):
return self
uniq = list(set(self.flat())) if uniq is None else uniq
for i, c in enumerate(self.children):
if not hasattr(c, 'children'):
assert c in uniq
self.children[i] = uniq[uniq.index(c)]
else:
c.fix_identities(uniq)
def fix_repeating_arguments(self):
"""Fix elements that should accumulate/increment values."""
either = [list(c.children) for c in self.either.children]
for case in either:
for e in [c for c in case if case.count(c) > 1]:
if type(e) is Argument or type(e) is Option and e.argcount:
if e.value is None:
e.value = []
elif type(e.value) is not list:
e.value = e.value.split()
if type(e) is Command or type(e) is Option and e.argcount == 0:
e.value = 0
return self
@property
def either(self):
"""Transform pattern into an equivalent, with only top-level Either."""
# Currently the pattern will not be equivalent, but more "narrow",
# although good enough to reason about list arguments.
ret = []
groups = [[self]]
while groups:
children = groups.pop(0)
types = [type(c) for c in children]
if Either in types:
either = [c for c in children if type(c) is Either][0]
children.pop(children.index(either))
for c in either.children:
groups.append([c] + children)
elif Required in types:
required = [c for c in children if type(c) is Required][0]
children.pop(children.index(required))
groups.append(list(required.children) + children)
elif Optional in types:
optional = [c for c in children if type(c) is Optional][0]
children.pop(children.index(optional))
groups.append(list(optional.children) + children)
elif AnyOptions in types:
optional = [c for c in children if type(c) is AnyOptions][0]
children.pop(children.index(optional))
groups.append(list(optional.children) + children)
elif OneOrMore in types:
oneormore = [c for c in children if type(c) is OneOrMore][0]
children.pop(children.index(oneormore))
groups.append(list(oneormore.children) * 2 + children)
else:
ret.append(children)
return Either(*[Required(*e) for e in ret])
class ChildPattern(Pattern):
def __init__(self, name, value=None):
self.name = name
self.value = value
def __repr__(self):
return '%s(%r, %r)' % (self.__class__.__name__, self.name, self.value)
def flat(self, *types):
return [self] if not types or type(self) in types else []
def match(self, left, collected=None):
collected = [] if collected is None else collected
pos, match = self.single_match(left)
if match is None:
return False, left, collected
left_ = left[:pos] + left[pos + 1:]
same_name = [a for a in collected if a.name == self.name]
if type(self.value) in (int, list):
if type(self.value) is int:
increment = 1
else:
increment = ([match.value] if type(match.value) is str
else match.value)
if not same_name:
match.value = increment
return True, left_, collected + [match]
same_name[0].value += increment
return True, left_, collected
return True, left_, collected + [match]
class ParentPattern(Pattern):
def __init__(self, *children):
self.children = list(children)
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__,
', '.join(repr(a) for a in self.children))
def flat(self, *types):
if type(self) in types:
return [self]
return sum([c.flat(*types) for c in self.children], [])
class Argument(ChildPattern):
def single_match(self, left):
for n, p in enumerate(left):
if type(p) is Argument:
return n, Argument(self.name, p.value)
return None, None
@classmethod
def parse(class_, source):
name = re.findall('(<\S*?>)', source)[0]
value = re.findall('\[default: (.*)\]', source, flags=re.I)
return class_(name, value[0] if value else None)
class Command(Argument):
def __init__(self, name, value=False):
self.name = name
self.value = value
def single_match(self, left):
for n, p in enumerate(left):
if type(p) is Argument:
if p.value == self.name:
return n, Command(self.name, True)
else:
break
return None, None
class Option(ChildPattern):
def __init__(self, short=None, long=None, argcount=0, value=False):
assert argcount in (0, 1)
self.short, self.long = short, long
self.argcount, self.value = argcount, value
self.value = None if value is False and argcount else value
@classmethod
def parse(class_, option_description):
short, long, argcount, value = None, None, 0, False
options, _, description = option_description.strip().partition(' ')
options = options.replace(',', ' ').replace('=', ' ')
for s in options.split():
if s.startswith('--'):
long = s
elif s.startswith('-'):
short = s
else:
argcount = 1
if argcount:
matched = re.findall('\[default: (.*)\]', description, flags=re.I)
value = matched[0] if matched else None
return class_(short, long, argcount, value)
def single_match(self, left):
for n, p in enumerate(left):
if self.name == p.name:
return n, p
return None, None
@property
def name(self):
return self.long or self.short
def __repr__(self):
return 'Option(%r, %r, %r, %r)' % (self.short, self.long,
self.argcount, self.value)
class Required(ParentPattern):
def match(self, left, collected=None):
collected = [] if collected is None else collected
l = left
c = collected
for p in self.children:
matched, l, c = p.match(l, c)
if not matched:
return False, left, collected
return True, l, c
class Optional(ParentPattern):
def match(self, left, collected=None):
collected = [] if collected is None else collected
for p in self.children:
m, left, collected = p.match(left, collected)
return True, left, collected
class AnyOptions(Optional):
"""Marker/placeholder for [options] shortcut."""
class OneOrMore(ParentPattern):
def match(self, left, collected=None):
assert len(self.children) == 1
collected = [] if collected is None else collected
l = left
c = collected
l_ = None
matched = True
times = 0
while matched:
# could it be that something didn't match but changed l or c?
matched, l, c = self.children[0].match(l, c)
times += 1 if matched else 0
if l_ == l:
break
l_ = l
if times >= 1:
return True, l, c
return False, left, collected
class Either(ParentPattern):
def match(self, left, collected=None):
collected = [] if collected is None else collected
outcomes = []
for p in self.children:
matched, _, _ = outcome = p.match(left, collected)
if matched:
outcomes.append(outcome)
if outcomes:
return min(outcomes, key=lambda outcome: len(outcome[1]))
return False, left, collected
class TokenStream(list):
def __init__(self, source, error):
self += source.split() if hasattr(source, 'split') else source
self.error = error
def move(self):
return self.pop(0) if len(self) else None
def current(self):
return self[0] if len(self) else None
def parse_long(tokens, options):
"""long ::= '--' chars [ ( ' ' | '=' ) chars ] ;"""
long, eq, value = tokens.move().partition('=')
assert long.startswith('--')
value = None if eq == value == '' else value
similar = [o for o in options if o.long == long]
if tokens.error is DocoptExit and similar == []: # if no exact match
similar = [o for o in options if o.long and o.long.startswith(long)]
if len(similar) > 1: # might be simply specified ambiguously 2+ times?
raise tokens.error('%s is not a unique prefix: %s?' %
(long, ', '.join(o.long for o in similar)))
elif len(similar) < 1:
argcount = 1 if eq == '=' else 0
o = Option(None, long, argcount)
options.append(o)
if tokens.error is DocoptExit:
o = Option(None, long, argcount, value if argcount else True)
else:
o = Option(similar[0].short, similar[0].long,
similar[0].argcount, similar[0].value)
if o.argcount == 0:
if value is not None:
raise tokens.error('%s must not have an argument' % o.long)
else:
if value is None:
if tokens.current() is None:
raise tokens.error('%s requires argument' % o.long)
value = tokens.move()
if tokens.error is DocoptExit:
o.value = value if value is not None else True
return [o]
def parse_shorts(tokens, options):
"""shorts ::= '-' ( chars )* [ [ ' ' ] chars ] ;"""
token = tokens.move()
assert token.startswith('-') and not token.startswith('--')
left = token.lstrip('-')
parsed = []
while left != '':
short, left = '-' + left[0], left[1:]
similar = [o for o in options if o.short == short]
if len(similar) > 1:
raise tokens.error('%s is specified ambiguously %d times' %
(short, len(similar)))
elif len(similar) < 1:
o = Option(short, None, 0)
options.append(o)
if tokens.error is DocoptExit:
o = Option(short, None, 0, True)
else: # why copying is necessary here?
o = Option(short, similar[0].long,
similar[0].argcount, similar[0].value)
value = None
if o.argcount != 0:
if left == '':
if tokens.current() is None:
raise tokens.error('%s requires argument' % short)
value = tokens.move()
else:
value = left
left = ''
if tokens.error is DocoptExit:
o.value = value if value is not None else True
parsed.append(o)
return parsed
def parse_pattern(source, options):
tokens = TokenStream(re.sub(r'([\[\]\(\)\|]|\.\.\.)', r' \1 ', source),
DocoptLanguageError)
result = parse_expr(tokens, options)
if tokens.current() is not None:
raise tokens.error('unexpected ending: %r' % ' '.join(tokens))
return Required(*result)
def parse_expr(tokens, options):
"""expr ::= seq ( '|' seq )* ;"""
seq = parse_seq(tokens, options)
if tokens.current() != '|':
return seq
result = [Required(*seq)] if len(seq) > 1 else seq
while tokens.current() == '|':
tokens.move()
seq = parse_seq(tokens, options)
result += [Required(*seq)] if len(seq) > 1 else seq
return [Either(*result)] if len(result) > 1 else result
def parse_seq(tokens, options):
"""seq ::= ( atom [ '...' ] )* ;"""
result = []
while tokens.current() not in [None, ']', ')', '|']:
atom = parse_atom(tokens, options)
if tokens.current() == '...':
atom = [OneOrMore(*atom)]
tokens.move()
result += atom
return result
def parse_atom(tokens, options):
"""atom ::= '(' expr ')' | '[' expr ']' | 'options'
| long | shorts | argument | command ;
"""
token = tokens.current()
result = []
if token in '([':
tokens.move()
matching, pattern = {'(': [')', Required], '[': [']', Optional]}[token]
result = pattern(*parse_expr(tokens, options))
if tokens.move() != matching:
raise tokens.error("unmatched '%s'" % token)
return [result]
elif token == 'options':
tokens.move()
return [AnyOptions()]
elif token.startswith('--') and token != '--':
return parse_long(tokens, options)
elif token.startswith('-') and token not in ('-', '--'):
return parse_shorts(tokens, options)
elif token.startswith('<') and token.endswith('>') or token.isupper():
return [Argument(tokens.move())]
else:
return [Command(tokens.move())]
def parse_argv(tokens, options, options_first=False):
"""Parse command-line argument vector.
If options_first:
argv ::= [ long | shorts ]* [ argument ]* [ '--' [ argument ]* ] ;
else:
argv ::= [ long | shorts | argument ]* [ '--' [ argument ]* ] ;
"""
parsed = []
while tokens.current() is not None:
if tokens.current() == '--':
return parsed + [Argument(None, v) for v in tokens]
elif tokens.current().startswith('--'):
parsed += parse_long(tokens, options)
elif tokens.current().startswith('-') and tokens.current() != '-':
parsed += parse_shorts(tokens, options)
elif options_first:
return parsed + [Argument(None, v) for v in tokens]
else:
parsed.append(Argument(None, tokens.move()))
return parsed
def parse_defaults(doc):
# in python < 2.7 you can't pass flags=re.MULTILINE
split = re.split('\n *(<\S+?>|-\S+?)', doc)[1:]
split = [s1 + s2 for s1, s2 in zip(split[::2], split[1::2])]
options = [Option.parse(s) for s in split if s.startswith('-')]
#arguments = [Argument.parse(s) for s in split if s.startswith('<')]
#return options, arguments
return options
def printable_usage(doc):
# in python < 2.7 you can't pass flags=re.IGNORECASE
usage_split = re.split(r'([Uu][Ss][Aa][Gg][Ee]:)', doc)
if len(usage_split) < 3:
raise DocoptLanguageError('"usage:" (case-insensitive) not found.')
if len(usage_split) > 3:
raise DocoptLanguageError('More than one "usage:" (case-insensitive).')
return re.split(r'\n\s*\n', ''.join(usage_split[1:]))[0].strip()
def formal_usage(printable_usage):
pu = printable_usage.split()[1:] # split and drop "usage:"
return '( ' + ' '.join(') | (' if s == pu[0] else s for s in pu[1:]) + ' )'
def extras(help, version, options, doc):
if help and any((o.name in ('-h', '--help')) and o.value for o in options):
print(doc.strip("\n"))
sys.exit()
if version and any(o.name == '--version' and o.value for o in options):
print(version)
sys.exit()
class Dict(dict):
def __repr__(self):
return '{%s}' % ',\n '.join('%r: %r' % i for i in sorted(self.items()))
def docopt(doc, argv=None, help=True, version=None, options_first=False):
"""Parse `argv` based on command-line interface described in `doc`.
`docopt` creates your command-line interface based on its
description that you pass as `doc`. Such description can contain
--options, <positional-argument>, commands, which could be
[optional], (required), (mutually | exclusive) or repeated...
Parameters
----------
doc : str
Description of your command-line interface.
argv : list of str, optional
Argument vector to be parsed. sys.argv[1:] is used if not
provided.
help : bool (default: True)
Set to False to disable automatic help on -h or --help
options.
version : any object
If passed, the object will be printed if --version is in
`argv`.
options_first : bool (default: False)
Set to True to require options preceed positional arguments,
i.e. to forbid options and positional arguments intermix.
Returns
-------
args : dict
A dictionary, where keys are names of command-line elements
such as e.g. "--verbose" and "<path>", and values are the
parsed values of those elements.
Example
-------
>>> from docopt import docopt
>>> doc = '''
Usage:
my_program tcp <host> <port> [--timeout=<seconds>]
my_program serial <port> [--baud=<n>] [--timeout=<seconds>]
my_program (-h | --help | --version)
Options:
-h, --help Show this screen and exit.
--baud=<n> Baudrate [default: 9600]
'''
>>> argv = ['tcp', '127.0.0.1', '80', '--timeout', '30']
>>> docopt(doc, argv)
{'--baud': '9600',
'--help': False,
'--timeout': '30',
'--version': False,
'<host>': '127.0.0.1',
'<port>': '80',
'serial': False,
'tcp': True}
See also
--------
* For video introduction see http://docopt.org
* Full documentation is available in README.rst as well as online
at https://github.com/docopt/docopt#readme
"""
if argv is None:
argv = sys.argv[1:]
DocoptExit.usage = printable_usage(doc)
options = parse_defaults(doc)
pattern = parse_pattern(formal_usage(DocoptExit.usage), options)
# [default] syntax for argument is disabled
#for a in pattern.flat(Argument):
# same_name = [d for d in arguments if d.name == a.name]
# if same_name:
# a.value = same_name[0].value
argv = parse_argv(TokenStream(argv, DocoptExit), list(options),
options_first)
pattern_options = set(pattern.flat(Option))
for ao in pattern.flat(AnyOptions):
doc_options = parse_defaults(doc)
ao.children = list(set(doc_options) - pattern_options)
#if any_options:
# ao.children += [Option(o.short, o.long, o.argcount)
# for o in argv if type(o) is Option]
extras(help, version, argv, doc)
matched, left, collected = pattern.fix().match(argv)
if matched and left == []: # better error message if left?
return Dict((a.name, a.value) for a in (pattern.flat() + collected))
raise DocoptExit()
| mit |
mikedh/trimesh | examples/voxel.py | 2 | 4350 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
import inspect
import trimesh
from trimesh.exchange.binvox import voxelize_mesh
from trimesh import voxel as v
dir_current = os.path.dirname(
os.path.abspath(
inspect.getfile(
inspect.currentframe())))
# the absolute path for our reference models
dir_models = os.path.abspath(
os.path.join(dir_current, '..', 'models'))
def show(chair_mesh, chair_voxels, colors=(1, 1, 1, 0.3)):
scene = chair_mesh.scene()
scene.add_geometry(chair_voxels.as_boxes(colors=colors))
scene.show()
if __name__ == '__main__':
base_name = 'chair_model'
chair_mesh = trimesh.load(os.path.join(dir_models, '%s.obj' % base_name))
if isinstance(chair_mesh, trimesh.scene.Scene):
chair_mesh = trimesh.util.concatenate([
trimesh.Trimesh(mesh.vertices, mesh.faces)
for mesh in chair_mesh.geometry.values()])
binvox_path = os.path.join(dir_models, '%s.binvox' % base_name)
chair_voxels = trimesh.load(binvox_path)
chair_voxels = v.VoxelGrid(chair_voxels.encoding.dense, chair_voxels.transform)
print('white: voxelized chair (binvox, exact)')
show(chair_mesh, voxelize_mesh(chair_mesh, exact=True), colors=(1, 1, 1, 0.3))
print('red: binvox-loaded chair')
show(chair_mesh, chair_voxels, colors=(1, 0, 0, 0.3))
voxelized_chair_mesh = chair_mesh.voxelized(np.max(chair_mesh.extents) / 32)
print('green: voxelized chair (default).')
show(chair_mesh, voxelized_chair_mesh, colors=(0, 1, 0, 0.3))
shape = (50, 17, 63)
revox = chair_voxels.revoxelized(shape)
print('cyan: revoxelized.')
show(chair_mesh, revox, colors=(0, 1, 1, 0.3))
values = chair_voxels.encoding.dense.copy()
values[:values.shape[0] // 2] = 0
stripped = v.VoxelGrid(values, chair_voxels.transform.copy()).strip()
print('yellow: stripped halved voxel grid. Transform is updated appropriately')
show(chair_mesh, stripped, colors=(1, 1, 0, 0.3))
transform = np.eye(4)
transform[:3] += np.random.normal(size=(3, 4)) * 0.2
transformed_chair_mesh = chair_mesh.copy().apply_transform(transform)
print('original transform volume: %s'
% str(chair_voxels.element_volume))
chair_voxels.apply_transform(transform)
print('warped transform volume: %s' % str(chair_voxels.element_volume))
print('blue: transformed voxels. Transformation is lazy, and each voxel is '
'no longer a cube.')
show(transformed_chair_mesh, chair_voxels, colors=(0, 0, 1, 0.3))
voxelized = chair_mesh.voxelized(pitch=0.02, method='subdivide').fill()
print('green: subdivided')
show(chair_mesh, voxelized, colors=(0, 1, 0, 0.3))
voxelized = chair_mesh.voxelized(pitch=0.02, method='ray')
print('red: ray. Poor performance on thin structures')
show(chair_mesh, voxelized, colors=(1, 0, 0, 0.3))
voxelized = chair_mesh.voxelized(pitch=0.02, method='binvox')
print('red: binvox (default). Poor performance on thin structures')
show(chair_mesh, voxelized, colors=(1, 0, 0, 0.3))
voxelized = chair_mesh.voxelized(pitch=0.02, method='binvox', wireframe=True)
print('green: binvox (wireframe). Still doesn\'t capture all thin structures')
show(chair_mesh, voxelized, colors=(0, 1, 0, 0.3))
voxelized = chair_mesh.voxelized(pitch=0.02, method='binvox', exact=True)
print('blue: binvox (exact). Does a good job')
show(chair_mesh, voxelized, colors=(0, 0, 1, 0.3))
voxelized = chair_mesh.voxelized(
pitch=0.02,
method='binvox',
exact=True,
downsample_factor=2,
downsample_threshold=1)
print('red: binvox (exact downsampled) surface')
show(chair_mesh, voxelized, colors=(1, 0, 0, 0.3))
chair_voxels = chair_mesh.voxelized(pitch=0.02, method='binvox', exact=True)
voxelized = chair_voxels.copy().fill(method='base')
print('blue: binvox (exact) filled (base). Gets a bit overly excited')
show(chair_mesh, voxelized, colors=(0, 0, 1, 0.3))
voxelized = chair_voxels.copy().fill(method='orthographic')
print('green: binvox (exact) filled (orthographic). '
'Doesn\'t do much as should be expected')
show(chair_mesh, voxelized, colors=(0, 1, 0, 0.3))
| mit |
zyga/ubuntu-make | tests/large/test_ide.py | 6 | 18449 |
# -*- coding: utf-8 -*-
# Copyright (C) 2014 Canonical
#
# Authors:
# Didier Roche
# Tin Tvrtković
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; version 3.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
"""Tests for the IDE category"""
import logging
import platform
import subprocess
import os
import pexpect
from tests.large import LargeFrameworkTests
from tests.tools import UMAKE
logger = logging.getLogger(__name__)
class EclipseIDETests(LargeFrameworkTests):
"""The Eclipse distribution from the IDE collection."""
TIMEOUT_INSTALL_PROGRESS = 120
TIMEOUT_START = 60
TIMEOUT_STOP = 60
def setUp(self):
super().setUp()
self.installed_path = os.path.expanduser("~/tools/ide/eclipse")
self.desktop_filename = "eclipse.desktop"
@property
def arch_option(self):
"""we return the expected arch call on command line"""
return platform.machine()
def test_default_eclipse_ide_install(self):
"""Install eclipse from scratch test case"""
self.child = pexpect.spawnu(self.command('{} ide eclipse'.format(UMAKE)))
self.expect_and_no_warn("Choose installation path: {}".format(self.installed_path))
self.child.sendline("")
self.expect_and_no_warn("Installation done", timeout=self.TIMEOUT_INSTALL_PROGRESS)
self.wait_and_no_warn()
# we have an installed launcher, added to the launcher and an icon file
self.assertTrue(self.launcher_exists_and_is_pinned(self.desktop_filename))
self.assert_exec_exists()
self.assert_icon_exists()
# launch it, send SIGTERM and check that it exits fine
proc = subprocess.Popen(self.command_as_list(self.exec_path), stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
# on 64 bits, there is a java subprocess, we kill that one with SIGKILL (eclipse isn't reliable on SIGTERM)
if self.arch_option == "x86_64":
self.check_and_kill_process(["java", self.arch_option, self.installed_path],
wait_before=self.TIMEOUT_START, send_sigkill=True)
else:
self.check_and_kill_process([self.exec_path],
wait_before=self.TIMEOUT_START, send_sigkill=True)
proc.wait(self.TIMEOUT_STOP)
# ensure that it's detected as installed:
self.child = pexpect.spawnu(self.command('{} ide eclipse'.format(UMAKE)))
self.expect_and_no_warn("Eclipse is already installed.*\[.*\] ")
self.child.sendline()
self.wait_and_no_warn()
class IdeaIDETests(LargeFrameworkTests):
"""IntelliJ Idea from the IDE collection."""
TIMEOUT_INSTALL_PROGRESS = 120
TIMEOUT_START = 60
TIMEOUT_STOP = 60
def setUp(self):
super().setUp()
self.installed_path = os.path.expanduser("~/tools/ide/idea")
self.desktop_filename = 'jetbrains-idea.desktop'
def test_default_install(self):
"""Install from scratch test case"""
self.child = pexpect.spawnu(self.command('{} ide idea'.format(UMAKE)))
self.expect_and_no_warn("Choose installation path: {}".format(self.installed_path))
self.child.sendline("")
self.expect_and_no_warn("Installation done", timeout=self.TIMEOUT_INSTALL_PROGRESS)
self.wait_and_no_warn()
# we have an installed launcher, added to the launcher and an icon file
self.assertTrue(self.launcher_exists_and_is_pinned(self.desktop_filename))
self.assert_exec_exists()
self.assert_icon_exists()
# launch it, send SIGTERM and check that it exits fine
proc = subprocess.Popen(self.command_as_list(self.exec_path), stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
self.check_and_kill_process(["java", self.installed_path], wait_before=self.TIMEOUT_START)
proc.wait(self.TIMEOUT_STOP)
# ensure that it's detected as installed:
self.child = pexpect.spawnu(self.command('{} ide idea'.format(UMAKE)))
self.expect_and_no_warn("Idea is already installed.*\[.*\] ")
self.child.sendline()
self.wait_and_no_warn()
class IdeaUltimateIDETests(LargeFrameworkTests):
"""IntelliJ Idea Ultimate from the IDE collection."""
TIMEOUT_INSTALL_PROGRESS = 120
TIMEOUT_START = 60
TIMEOUT_STOP = 60
def setUp(self):
super().setUp()
self.installed_path = os.path.expanduser("~/tools/ide/idea-ultimate")
self.desktop_filename = 'jetbrains-idea.desktop'
def test_default_install(self):
"""Install from scratch test case"""
self.child = pexpect.spawnu(self.command('{} ide idea-ultimate'.format(UMAKE)))
self.expect_and_no_warn("Choose installation path: {}".format(self.installed_path))
self.child.sendline("")
self.expect_and_no_warn("Installation done", timeout=self.TIMEOUT_INSTALL_PROGRESS)
self.wait_and_no_warn()
logger.info("Installed, running...")
# we have an installed launcher, added to the launcher and an icon file
self.assertTrue(self.launcher_exists_and_is_pinned(self.desktop_filename))
self.assert_exec_exists()
self.assert_icon_exists()
# launch it, send SIGTERM and check that it exits fine
proc = subprocess.Popen(self.command_as_list(self.exec_path), stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
self.check_and_kill_process(["java", self.installed_path], wait_before=self.TIMEOUT_START)
proc.wait(self.TIMEOUT_STOP)
# ensure that it's detected as installed:
self.child = pexpect.spawnu(self.command('{} ide idea-ultimate'.format(UMAKE)))
self.expect_and_no_warn("Idea Ultimate is already installed.*\[.*\] ")
self.child.sendline()
self.wait_and_no_warn()
class PyCharmIDETests(LargeFrameworkTests):
"""PyCharm from the IDE collection."""
TIMEOUT_INSTALL_PROGRESS = 120
TIMEOUT_START = 60
TIMEOUT_STOP = 60
def setUp(self):
super().setUp()
self.installed_path = os.path.expanduser("~/tools/ide/pycharm")
self.desktop_filename = 'jetbrains-pycharm.desktop'
def test_default_install(self):
"""Install from scratch test case"""
self.child = pexpect.spawnu(self.command('{} ide pycharm'.format(UMAKE)))
self.expect_and_no_warn("Choose installation path: {}".format(self.installed_path))
self.child.sendline("")
self.expect_and_no_warn("Installation done", timeout=self.TIMEOUT_INSTALL_PROGRESS)
self.wait_and_no_warn()
logger.info("Installed, running...")
# we have an installed launcher, added to the launcher and an icon file
self.assertTrue(self.launcher_exists_and_is_pinned(self.desktop_filename))
self.assert_exec_exists()
self.assert_icon_exists()
# launch it, send SIGTERM and check that it exits fine
proc = subprocess.Popen(self.command_as_list(self.exec_path), stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
self.check_and_kill_process(["java", self.installed_path], wait_before=self.TIMEOUT_START)
proc.wait(self.TIMEOUT_STOP)
# ensure that it's detected as installed:
self.child = pexpect.spawnu(self.command('{} ide pycharm'.format(UMAKE)))
self.expect_and_no_warn("PyCharm is already installed.*\[.*\] ")
self.child.sendline()
self.wait_and_no_warn()
class PyCharmEducationalIDETests(LargeFrameworkTests):
"""PyCharm Educational from the IDE collection."""
TIMEOUT_INSTALL_PROGRESS = 120
TIMEOUT_START = 60
TIMEOUT_STOP = 60
def setUp(self):
super().setUp()
self.installed_path = os.path.expanduser("~/tools/ide/pycharm-educational")
self.desktop_filename = 'jetbrains-pycharm.desktop'
def test_default_install(self):
"""Install from scratch test case"""
self.child = pexpect.spawnu(self.command('{} ide pycharm-educational'.format(UMAKE)))
self.expect_and_no_warn("Choose installation path: {}".format(self.installed_path))
self.child.sendline("")
self.expect_and_no_warn("Installation done", timeout=self.TIMEOUT_INSTALL_PROGRESS)
self.wait_and_no_warn()
logger.info("Installed, running...")
# we have an installed launcher, added to the launcher and an icon file
self.assertTrue(self.launcher_exists_and_is_pinned(self.desktop_filename))
self.assert_exec_exists()
self.assert_icon_exists()
# launch it, send SIGTERM and check that it exits fine
proc = subprocess.Popen(self.command_as_list(self.exec_path), stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
self.check_and_kill_process(["java", self.installed_path], wait_before=self.TIMEOUT_START)
proc.wait(self.TIMEOUT_STOP)
# ensure that it's detected as installed:
self.child = pexpect.spawnu(self.command('{} ide pycharm-educational'.format(UMAKE)))
self.expect_and_no_warn("PyCharm Educational is already installed.*\[.*\] ")
self.child.sendline()
self.wait_and_no_warn()
class PyCharmProfessionalIDETests(LargeFrameworkTests):
"""PyCharm Professional from the IDE collection."""
TIMEOUT_INSTALL_PROGRESS = 120
TIMEOUT_START = 60
TIMEOUT_STOP = 60
def setUp(self):
super().setUp()
self.installed_path = os.path.expanduser("~/tools/ide/pycharm-professional")
self.desktop_filename = 'jetbrains-pycharm.desktop'
def test_default_install(self):
"""Install from scratch test case"""
self.child = pexpect.spawnu(self.command('{} ide pycharm-professional'.format(UMAKE)))
self.expect_and_no_warn("Choose installation path: {}".format(self.installed_path))
self.child.sendline("")
self.expect_and_no_warn("Installation done", timeout=self.TIMEOUT_INSTALL_PROGRESS)
self.wait_and_no_warn()
logger.info("Installed, running...")
# we have an installed launcher, added to the launcher and an icon file
self.assertTrue(self.launcher_exists_and_is_pinned(self.desktop_filename))
self.assert_exec_exists()
self.assert_icon_exists()
# launch it, send SIGTERM and check that it exits fine
proc = subprocess.Popen(self.command_as_list(self.exec_path), stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
self.check_and_kill_process(["java", self.installed_path], wait_before=self.TIMEOUT_START)
proc.wait(self.TIMEOUT_STOP)
# ensure that it's detected as installed:
self.child = pexpect.spawnu(self.command('{} ide pycharm-professional'.format(UMAKE)))
self.expect_and_no_warn("PyCharm Professional is already installed.*\[.*\] ")
self.child.sendline()
self.wait_and_no_warn()
class RubyMineIDETests(LargeFrameworkTests):
"""RubyMine from the IDE collection."""
TIMEOUT_INSTALL_PROGRESS = 120
TIMEOUT_START = 60
TIMEOUT_STOP = 60
def setUp(self):
super().setUp()
self.installed_path = os.path.expanduser("~/tools/ide/rubymine")
self.desktop_filename = 'jetbrains-rubymine.desktop'
def test_default_install(self):
"""Install from scratch test case"""
self.child = pexpect.spawnu(self.command('{} ide rubymine'.format(UMAKE)))
self.expect_and_no_warn("Choose installation path: {}".format(self.installed_path))
self.child.sendline("")
self.expect_and_no_warn("Installation done", timeout=self.TIMEOUT_INSTALL_PROGRESS)
self.wait_and_no_warn()
logger.info("Installed, running...")
# we have an installed launcher, added to the launcher and an icon file
self.assertTrue(self.launcher_exists_and_is_pinned(self.desktop_filename))
self.assert_exec_exists()
self.assert_icon_exists()
# launch it, send SIGTERM and check that it exits fine
proc = subprocess.Popen(self.command_as_list(self.exec_path), stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
self.check_and_kill_process(["java", self.installed_path], wait_before=self.TIMEOUT_START)
proc.wait(self.TIMEOUT_STOP)
# ensure that it's detected as installed:
self.child = pexpect.spawnu(self.command('{} ide rubymine'.format(UMAKE)))
self.expect_and_no_warn("RubyMine is already installed.*\[.*\] ")
self.child.sendline()
self.wait_and_no_warn()
class WebStormIDETests(LargeFrameworkTests):
"""WebStorm from the IDE collection."""
TIMEOUT_INSTALL_PROGRESS = 120
TIMEOUT_START = 60
TIMEOUT_STOP = 60
def setUp(self):
super().setUp()
self.installed_path = os.path.expanduser("~/tools/ide/webstorm")
self.desktop_filename = 'jetbrains-webstorm.desktop'
def test_default_install(self):
"""Install from scratch test case"""
self.child = pexpect.spawnu(self.command('{} ide webstorm'.format(UMAKE)))
self.expect_and_no_warn("Choose installation path: {}".format(self.installed_path))
self.child.sendline("")
self.expect_and_no_warn("Installation done", timeout=self.TIMEOUT_INSTALL_PROGRESS)
self.wait_and_no_warn()
logger.info("Installed, running...")
# we have an installed launcher, added to the launcher and an icon file
self.assertTrue(self.launcher_exists_and_is_pinned(self.desktop_filename))
self.assert_exec_exists()
self.assert_icon_exists()
# launch it, send SIGTERM and check that it exits fine
proc = subprocess.Popen(self.command_as_list(self.exec_path), stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
self.check_and_kill_process(["java", self.installed_path], wait_before=self.TIMEOUT_START)
proc.wait(self.TIMEOUT_STOP)
# ensure that it's detected as installed:
self.child = pexpect.spawnu(self.command('{} ide webstorm'.format(UMAKE)))
self.expect_and_no_warn("WebStorm is already installed.*\[.*\] ")
self.child.sendline()
self.wait_and_no_warn()
class PhpStormIDETests(LargeFrameworkTests):
"""PhpStorm from the IDE collection."""
TIMEOUT_INSTALL_PROGRESS = 120
TIMEOUT_START = 60
TIMEOUT_STOP = 60
def setUp(self):
super().setUp()
self.installed_path = os.path.expanduser("~/tools/ide/phpstorm")
self.desktop_filename = 'jetbrains-phpstorm.desktop'
def test_default_install(self):
"""Install from scratch test case"""
self.child = pexpect.spawnu(self.command('{} ide phpstorm'.format(UMAKE)))
self.expect_and_no_warn("Choose installation path: {}".format(self.installed_path))
self.child.sendline("")
self.expect_and_no_warn("Installation done", timeout=self.TIMEOUT_INSTALL_PROGRESS)
self.wait_and_no_warn()
logger.info("Installed, running...")
# we have an installed launcher, added to the launcher and an icon file
self.assertTrue(self.launcher_exists_and_is_pinned(self.desktop_filename))
self.assert_exec_exists()
self.assert_icon_exists()
# launch it, send SIGTERM and check that it exits fine
proc = subprocess.Popen(self.command_as_list(self.exec_path), stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
self.check_and_kill_process(["java", self.installed_path], wait_before=self.TIMEOUT_START)
proc.wait(self.TIMEOUT_STOP)
# ensure that it's detected as installed:
self.child = pexpect.spawnu(self.command('{} ide phpstorm'.format(UMAKE)))
self.expect_and_no_warn("PhpStorm is already installed.*\[.*\] ")
self.child.sendline()
self.wait_and_no_warn()
class ArduinoIDETests(LargeFrameworkTests):
"""The Arduino Software distribution from the IDE collection."""
TIMEOUT_INSTALL_PROGRESS = 120
TIMEOUT_START = 60
TIMEOUT_STOP = 60
def setUp(self):
super().setUp()
self.installed_path = os.path.expanduser("~/tools/ide/arduino")
self.desktop_filename = "arduino.desktop"
@property
def arch_option(self):
"""we return the expected arch call on command line"""
return platform.machine()
def test_default_install(self):
"""Install the distribution from scratch test case"""
self.child = pexpect.spawnu(self.command('{} ide arduino'.format(UMAKE)))
self.expect_and_no_warn("Choose installation path: {}".format(self.installed_path))
self.child.sendline("")
self.expect_and_no_warn("Installation done", timeout=self.TIMEOUT_INSTALL_PROGRESS)
self.wait_and_no_warn()
# we have an installed launcher, added to the launcher and an icon file
self.assertTrue(self.launcher_exists_and_is_pinned(self.desktop_filename))
self.assert_exec_exists()
self.assert_icon_exists()
self.assertTrue(self.is_in_group("dialout"))
# launch it, send SIGTERM and check that it exits fine
proc = subprocess.Popen(self.command_as_list(self.exec_path), stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
self.check_and_kill_process(["java", "processing.app.Base"], wait_before=self.TIMEOUT_START)
proc.wait(self.TIMEOUT_STOP)
# ensure that it's detected as installed:
self.child = pexpect.spawnu(self.command('{} ide arduino'.format(UMAKE)))
self.expect_and_no_warn("Arduino is already installed.*\[.*\] ")
self.child.sendline()
self.wait_and_no_warn()
| gpl-3.0 |
cloudnautique/cloud-cattle | code/agent/src/agents/pyagent/cattle/plugins/core/event_handlers.py | 11 | 2000 | import os
import subprocess
from cattle import utils
from cattle import Config
from cattle.type_manager import types
from cattle.progress import Progress
def _should_handle(handler, event):
name = event.name.split(';', 1)[0]
if name not in handler.events() or event.replyTo is None:
return False
return True
class PingHandler:
def __init__(self):
pass
def events(self):
return ['ping']
def execute(self, event):
if not _should_handle(self, event):
return
resp = utils.reply(event)
if Config.do_ping():
for type in types():
if hasattr(type, 'on_ping'):
type.on_ping(event, resp)
return resp
class ConfigUpdateHandler:
def __init__(self):
pass
def events(self):
return ['config.update']
def execute(self, event):
if not _should_handle(self, event):
return
if len(event.data.items) == 0:
return utils.reply(event)
item_names = []
for item in event.data.items:
# For development, don't let the server kill your agent
if item.name != 'pyagent' or Config.config_update_pyagent():
item_names.append(item.name)
home = Config.home()
env = dict(os.environ)
env['CATTLE_ACCESS_KEY'] = Config.access_key()
env['CATTLE_SECRET_KEY'] = Config.secret_key()
env['CATTLE_CONFIG_URL'] = Config.config_url()
env['CATTLE_HOME'] = home
args = [Config.config_sh()] + item_names
try:
output = utils.get_command_output(args, cwd=home, env=env)
return utils.reply(event, {
'exitCode': 0,
'output': output
})
except subprocess.CalledProcessError as e:
Progress(event).update('Update Failed', data={
'exitCode': e.returncode,
'output': e.output
})
| apache-2.0 |
capchu/TextRPGOnline | rpgonline/env/lib/python2.7/site-packages/pip/vendor/html5lib/treebuilders/dom.py | 249 | 11328 | from __future__ import absolute_import, division, unicode_literals
from xml.dom import minidom, Node, XML_NAMESPACE, XMLNS_NAMESPACE
import weakref
from . import _base
from .. import constants
from ..constants import namespaces
from ..utils import moduleFactoryFactory
def getDomBuilder(DomImplementation):
Dom = DomImplementation
class AttrList(object):
def __init__(self, element):
self.element = element
def __iter__(self):
return list(self.element.attributes.items()).__iter__()
def __setitem__(self, name, value):
self.element.setAttribute(name, value)
def __len__(self):
return len(list(self.element.attributes.items()))
def items(self):
return [(item[0], item[1]) for item in
list(self.element.attributes.items())]
def keys(self):
return list(self.element.attributes.keys())
def __getitem__(self, name):
return self.element.getAttribute(name)
def __contains__(self, name):
if isinstance(name, tuple):
raise NotImplementedError
else:
return self.element.hasAttribute(name)
class NodeBuilder(_base.Node):
def __init__(self, element):
_base.Node.__init__(self, element.nodeName)
self.element = element
namespace = property(lambda self: hasattr(self.element, "namespaceURI")
and self.element.namespaceURI or None)
def appendChild(self, node):
node.parent = self
self.element.appendChild(node.element)
def insertText(self, data, insertBefore=None):
text = self.element.ownerDocument.createTextNode(data)
if insertBefore:
self.element.insertBefore(text, insertBefore.element)
else:
self.element.appendChild(text)
def insertBefore(self, node, refNode):
self.element.insertBefore(node.element, refNode.element)
node.parent = self
def removeChild(self, node):
if node.element.parentNode == self.element:
self.element.removeChild(node.element)
node.parent = None
def reparentChildren(self, newParent):
while self.element.hasChildNodes():
child = self.element.firstChild
self.element.removeChild(child)
newParent.element.appendChild(child)
self.childNodes = []
def getAttributes(self):
return AttrList(self.element)
def setAttributes(self, attributes):
if attributes:
for name, value in list(attributes.items()):
if isinstance(name, tuple):
if name[0] is not None:
qualifiedName = (name[0] + ":" + name[1])
else:
qualifiedName = name[1]
self.element.setAttributeNS(name[2], qualifiedName,
value)
else:
self.element.setAttribute(
name, value)
attributes = property(getAttributes, setAttributes)
def cloneNode(self):
return NodeBuilder(self.element.cloneNode(False))
def hasContent(self):
return self.element.hasChildNodes()
def getNameTuple(self):
if self.namespace is None:
return namespaces["html"], self.name
else:
return self.namespace, self.name
nameTuple = property(getNameTuple)
class TreeBuilder(_base.TreeBuilder):
def documentClass(self):
self.dom = Dom.getDOMImplementation().createDocument(None, None, None)
return weakref.proxy(self)
def insertDoctype(self, token):
name = token["name"]
publicId = token["publicId"]
systemId = token["systemId"]
domimpl = Dom.getDOMImplementation()
doctype = domimpl.createDocumentType(name, publicId, systemId)
self.document.appendChild(NodeBuilder(doctype))
if Dom == minidom:
doctype.ownerDocument = self.dom
def elementClass(self, name, namespace=None):
if namespace is None and self.defaultNamespace is None:
node = self.dom.createElement(name)
else:
node = self.dom.createElementNS(namespace, name)
return NodeBuilder(node)
def commentClass(self, data):
return NodeBuilder(self.dom.createComment(data))
def fragmentClass(self):
return NodeBuilder(self.dom.createDocumentFragment())
def appendChild(self, node):
self.dom.appendChild(node.element)
def testSerializer(self, element):
return testSerializer(element)
def getDocument(self):
return self.dom
def getFragment(self):
return _base.TreeBuilder.getFragment(self).element
def insertText(self, data, parent=None):
data = data
if parent != self:
_base.TreeBuilder.insertText(self, data, parent)
else:
# HACK: allow text nodes as children of the document node
if hasattr(self.dom, '_child_node_types'):
if not Node.TEXT_NODE in self.dom._child_node_types:
self.dom._child_node_types = list(self.dom._child_node_types)
self.dom._child_node_types.append(Node.TEXT_NODE)
self.dom.appendChild(self.dom.createTextNode(data))
implementation = DomImplementation
name = None
def testSerializer(element):
element.normalize()
rv = []
def serializeElement(element, indent=0):
if element.nodeType == Node.DOCUMENT_TYPE_NODE:
if element.name:
if element.publicId or element.systemId:
publicId = element.publicId or ""
systemId = element.systemId or ""
rv.append("""|%s<!DOCTYPE %s "%s" "%s">""" %
(' ' * indent, element.name, publicId, systemId))
else:
rv.append("|%s<!DOCTYPE %s>" % (' ' * indent, element.name))
else:
rv.append("|%s<!DOCTYPE >" % (' ' * indent,))
elif element.nodeType == Node.DOCUMENT_NODE:
rv.append("#document")
elif element.nodeType == Node.DOCUMENT_FRAGMENT_NODE:
rv.append("#document-fragment")
elif element.nodeType == Node.COMMENT_NODE:
rv.append("|%s<!-- %s -->" % (' ' * indent, element.nodeValue))
elif element.nodeType == Node.TEXT_NODE:
rv.append("|%s\"%s\"" % (' ' * indent, element.nodeValue))
else:
if (hasattr(element, "namespaceURI") and
element.namespaceURI is not None):
name = "%s %s" % (constants.prefixes[element.namespaceURI],
element.nodeName)
else:
name = element.nodeName
rv.append("|%s<%s>" % (' ' * indent, name))
if element.hasAttributes():
attributes = []
for i in range(len(element.attributes)):
attr = element.attributes.item(i)
name = attr.nodeName
value = attr.value
ns = attr.namespaceURI
if ns:
name = "%s %s" % (constants.prefixes[ns], attr.localName)
else:
name = attr.nodeName
attributes.append((name, value))
for name, value in sorted(attributes):
rv.append('|%s%s="%s"' % (' ' * (indent + 2), name, value))
indent += 2
for child in element.childNodes:
serializeElement(child, indent)
serializeElement(element, 0)
return "\n".join(rv)
def dom2sax(node, handler, nsmap={'xml': XML_NAMESPACE}):
if node.nodeType == Node.ELEMENT_NODE:
if not nsmap:
handler.startElement(node.nodeName, node.attributes)
for child in node.childNodes:
dom2sax(child, handler, nsmap)
handler.endElement(node.nodeName)
else:
attributes = dict(node.attributes.itemsNS())
# gather namespace declarations
prefixes = []
for attrname in list(node.attributes.keys()):
attr = node.getAttributeNode(attrname)
if (attr.namespaceURI == XMLNS_NAMESPACE or
(attr.namespaceURI is None and attr.nodeName.startswith('xmlns'))):
prefix = (attr.nodeName != 'xmlns' and attr.nodeName or None)
handler.startPrefixMapping(prefix, attr.nodeValue)
prefixes.append(prefix)
nsmap = nsmap.copy()
nsmap[prefix] = attr.nodeValue
del attributes[(attr.namespaceURI, attr.nodeName)]
# apply namespace declarations
for attrname in list(node.attributes.keys()):
attr = node.getAttributeNode(attrname)
if attr.namespaceURI is None and ':' in attr.nodeName:
prefix = attr.nodeName.split(':')[0]
if prefix in nsmap:
del attributes[(attr.namespaceURI, attr.nodeName)]
attributes[(nsmap[prefix], attr.nodeName)] = attr.nodeValue
# SAX events
ns = node.namespaceURI or nsmap.get(None, None)
handler.startElementNS((ns, node.nodeName), node.nodeName, attributes)
for child in node.childNodes:
dom2sax(child, handler, nsmap)
handler.endElementNS((ns, node.nodeName), node.nodeName)
for prefix in prefixes:
handler.endPrefixMapping(prefix)
elif node.nodeType in [Node.TEXT_NODE, Node.CDATA_SECTION_NODE]:
handler.characters(node.nodeValue)
elif node.nodeType == Node.DOCUMENT_NODE:
handler.startDocument()
for child in node.childNodes:
dom2sax(child, handler, nsmap)
handler.endDocument()
elif node.nodeType == Node.DOCUMENT_FRAGMENT_NODE:
for child in node.childNodes:
dom2sax(child, handler, nsmap)
else:
# ATTRIBUTE_NODE
# ENTITY_NODE
# PROCESSING_INSTRUCTION_NODE
# COMMENT_NODE
# DOCUMENT_TYPE_NODE
# NOTATION_NODE
pass
return locals()
# The actual means to get a module!
getDomModule = moduleFactoryFactory(getDomBuilder)
| gpl-3.0 |
mwstobo/marshmallow | tests/test_exceptions.py | 7 | 2753 | # -*- coding: utf-8 -*-
import pytest
from marshmallow.exceptions import ValidationError, MarshallingError, UnmarshallingError
from marshmallow import fields, Schema
class TestValidationError:
def test_stores_message_in_list(self):
err = ValidationError('foo')
assert err.messages == ['foo']
def test_can_pass_list_of_messages(self):
err = ValidationError(['foo', 'bar'])
assert err.messages == ['foo', 'bar']
def test_stores_dictionaries(self):
messages = {'user': {'email': ['email is invalid']}}
err = ValidationError(messages)
assert err.messages == messages
def test_can_store_field_names(self):
err = ValidationError('invalid email', field_names='email')
assert err.field_names == ['email']
err = ValidationError('invalid email', field_names=['email'])
assert err.field_names == ['email']
def test_str(self):
err = ValidationError('invalid email')
assert str(err) == 'invalid email'
err2 = ValidationError('invalid email', 'email')
assert str(err2) == 'invalid email'
class TestMarshallingError:
def test_deprecated(self):
pytest.deprecated_call(MarshallingError, 'foo')
def test_can_store_field_and_field_name(self):
field_name = 'foo'
field = fields.Str()
err = MarshallingError('something went wrong', fields=[field],
field_names=[field_name])
assert err.fields == [field]
assert err.field_names == [field_name]
def test_can_be_raised_by_custom_field(self):
class MyField(fields.Field):
def _serialize(self, val, attr, obj):
raise MarshallingError('oops')
class MySchema(Schema):
foo = MyField()
s = MySchema()
result = s.dump({'foo': 42})
assert 'foo' in result.errors
assert result.errors['foo'] == ['oops']
class TestUnmarshallingError:
def test_deprecated(self):
pytest.deprecated_call(UnmarshallingError, 'foo')
def test_can_store_field_and_field_name(self):
field_name = 'foo'
field = fields.Str()
err = UnmarshallingError('something went wrong', fields=[field],
field_names=[field_name])
assert err.fields == [field]
assert err.field_names == [field_name]
def test_can_be_raised_by_validator(self):
def validator(val):
raise UnmarshallingError('oops')
class MySchema(Schema):
foo = fields.Field(validate=[validator])
s = MySchema()
result = s.load({'foo': 42})
assert 'foo' in result.errors
assert result.errors['foo'] == ['oops']
| mit |
juanka1331/VAN-applied-to-Nifti-images | final_scripts/tests_over_3dmask_generator.py | 1 | 1589 | import sys
import os
from lib.data_loader import utils_mask3d
sys.path.append(os.path.dirname(os.getcwd()))
from lib.utils import output_utils
from lib.data_loader import mri_atlas
from lib.data_loader import pet_atlas
from lib.data_loader import PET_stack_NORAD
from lib.data_loader import MRI_stack_NORAD
from lib.utils.os_aux import create_directories
import settings
region = 75
#images = "MRI"
images = "PET"
path_folder3D = os.path.join(settings.path_to_project, "folder3D")
path_folder_masks3d = os.path.join(path_folder3D, "masks3D")
path_mask = os.path.join(
path_folder_masks3d, "{1}_region:{0}".format(region, images))
create_directories([path_folder3D, path_folder_masks3d])
atlas = None
reshape_kind = None
colour_kind = None
stack_dict = None
if images == "MRI":
stack_dict = MRI_stack_NORAD.get_gm_stack()
reshape_kind = "A"
colour_kind = "Greys"
atlas = mri_atlas.load_atlas_mri()
elif images == "PET":
stack_dict = PET_stack_NORAD.get_full_stack()
reshape_kind = "F"
colour_kind = "jet"
total_size = stack_dict['total_size']
imgsize = stack_dict['imgsize']
voxels_index = stack_dict['voxel_index']
map_region_voxels = atlas[region] # index refered to nbground voxels
no_bg_region_voxels_index = voxels_index[map_region_voxels]
mask3d = utils_mask3d.generate_region_3dmaskatlas(
no_bg_region_voxels_index=no_bg_region_voxels_index,
reshape_kind=reshape_kind,
imgsize=imgsize,
totalsize=total_size)
output_utils.from_3d_image_to_nifti_file(
path_to_save=path_mask,
image3d=mask3d)
| gpl-2.0 |
boberfly/gaffer | python/GafferUI/Button.py | 7 | 6312 | ##########################################################################
#
# Copyright (c) 2011, John Haddon. All rights reserved.
# Copyright (c) 2011-2012, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import six
import Gaffer
import GafferUI
from Qt import QtGui
from Qt import QtWidgets
from Qt import QtCore
class Button( GafferUI.Widget ) :
__palette = None
def __init__( self, text="", image=None, hasFrame=True, highlightOnOver=True, **kw ) :
GafferUI.Widget.__init__( self, QtWidgets.QPushButton(), **kw )
self.__highlightForHover = False
self._qtWidget().setAttribute( QtCore.Qt.WA_LayoutUsesWidgetRect )
# allow return and enter keys to click button
self._qtWidget().setAutoDefault( True )
self.setText( text )
self.setImage( image )
self.setHasFrame( hasFrame )
# using a WeakMethod to avoid circular references which would otherwise
# never be broken.
self._qtWidget().clicked.connect( Gaffer.WeakMethod( self.__clicked ) )
self.__clickedSignal = GafferUI.WidgetSignal()
# buttons appear to totally ignore the etch-disabled-text stylesheet option,
# and we really don't like the etching. the only effective way of disabling it
# seems to be to apply this palette which makes the etched text transparent.
if Button.__palette is None :
Button.__palette = QtGui.QPalette( QtWidgets.QApplication.instance().palette( self._qtWidget() ) )
Button.__palette.setColor( QtGui.QPalette.Disabled, QtGui.QPalette.Light, QtGui.QColor( 0, 0, 0, 0 ) )
self._qtWidget().setPalette( Button.__palette )
if highlightOnOver :
self.enterSignal().connect( Gaffer.WeakMethod( self.__enter ), scoped = False )
self.leaveSignal().connect( Gaffer.WeakMethod( self.__leave ), scoped = False )
def setHighlighted( self, highlighted ) :
GafferUI.Widget.setHighlighted( self, highlighted )
self.__updateIcon()
def setText( self, text ) :
assert( isinstance( text, six.string_types ) )
self._qtWidget().setText( text )
def getText( self ) :
return self._qtWidget().text()
def setImage( self, imageOrImageFileName ) :
assert( isinstance( imageOrImageFileName, ( six.string_types, GafferUI.Image, type( None ) ) ) )
if isinstance( imageOrImageFileName, six.string_types ) :
self.__image = GafferUI.Image( imageOrImageFileName )
else :
self.__image = imageOrImageFileName
self.__updateIcon()
def getImage( self ) :
return self.__image
def setHasFrame( self, hasFrame ) :
self._qtWidget().setProperty( "gafferWithFrame", hasFrame )
self._qtWidget().setSizePolicy(
QtWidgets.QSizePolicy.Minimum if hasFrame else QtWidgets.QSizePolicy.Fixed,
QtWidgets.QSizePolicy.Fixed
)
self._repolish()
def getHasFrame( self ) :
return self._qtWidget().property( "gafferWithFrame" )
def setEnabled( self, enabled ) :
# Once we're disabled, mouse leave events will be skipped, and we'll
# remain in a highlighted state once re-enabled.
if not enabled and self.__highlightForHover :
self.__highlightForHover = False
self.__updateIcon()
GafferUI.Widget.setEnabled( self, enabled )
def clickedSignal( self ) :
return self.__clickedSignal
def __clicked( self, *unusedArgs ) : # currently PyQt passes a "checked" argument and PySide doesn't
# workaround problem whereby not all text fields will have committed their contents
# into plugs when the button is pressed - this occurs particularly in the OpDialogue, and causes
# the op to run without the values the user sees in the ui. normally editingFinished is emitted by
# the text widget itself on a loss of focus, but unfortunately clicking on a button doesn't cause that
# focus loss. so we helpfully emit the signal ourselves here.
focusWidget = GafferUI.Widget._owner( QtWidgets.QApplication.focusWidget() )
if focusWidget is not None and hasattr( focusWidget, "editingFinishedSignal" ) :
focusWidget.editingFinishedSignal()( focusWidget )
self.clickedSignal()( self )
def __updateIcon( self ) :
if self.__image is None :
self._qtWidget().setIcon( QtGui.QIcon() )
return
# Qt's built-in disabled state generation doesn't work well with dark schemes
# There is no built-in support for QtGui.QIcon.Active in the default
# painter, which is why we have to juggle it here.
icon = self.__image._qtIcon( highlighted = self.getHighlighted() or self.__highlightForHover )
self._qtWidget().setIcon( icon )
self._qtWidget().setIconSize( self.__image._qtPixmap().size() )
def __enter( self, widget ) :
self.__highlightForHover = True
self.__updateIcon()
def __leave( self, widget ) :
self.__highlightForHover = False
self.__updateIcon()
| bsd-3-clause |
xxd3vin/spp-sdk | opt/Python27/Lib/site-packages/numpy/ma/core.py | 22 | 226541 | """
numpy.ma : a package to handle missing or invalid values.
This package was initially written for numarray by Paul F. Dubois
at Lawrence Livermore National Laboratory.
In 2006, the package was completely rewritten by Pierre Gerard-Marchant
(University of Georgia) to make the MaskedArray class a subclass of ndarray,
and to improve support of structured arrays.
Copyright 1999, 2000, 2001 Regents of the University of California.
Released for unlimited redistribution.
* Adapted for numpy_core 2005 by Travis Oliphant and (mainly) Paul Dubois.
* Subclassing of the base `ndarray` 2006 by Pierre Gerard-Marchant
(pgmdevlist_AT_gmail_DOT_com)
* Improvements suggested by Reggie Dugard (reggie_AT_merfinllc_DOT_com)
.. moduleauthor:: Pierre Gerard-Marchant
"""
# pylint: disable-msg=E1002
__author__ = "Pierre GF Gerard-Marchant"
__docformat__ = "restructuredtext en"
__all__ = ['MAError', 'MaskError', 'MaskType', 'MaskedArray',
'bool_',
'abs', 'absolute', 'add', 'all', 'allclose', 'allequal', 'alltrue',
'amax', 'amin', 'anom', 'anomalies', 'any', 'arange',
'arccos', 'arccosh', 'arcsin', 'arcsinh', 'arctan', 'arctan2',
'arctanh', 'argmax', 'argmin', 'argsort', 'around',
'array', 'asarray', 'asanyarray',
'bitwise_and', 'bitwise_or', 'bitwise_xor',
'ceil', 'choose', 'clip', 'common_fill_value', 'compress',
'compressed', 'concatenate', 'conjugate', 'copy', 'cos', 'cosh',
'count', 'cumprod', 'cumsum',
'default_fill_value', 'diag', 'diagonal', 'diff', 'divide', 'dump',
'dumps',
'empty', 'empty_like', 'equal', 'exp', 'expand_dims',
'fabs', 'flatten_mask', 'fmod', 'filled', 'floor', 'floor_divide',
'fix_invalid', 'flatten_structured_array', 'frombuffer', 'fromflex',
'fromfunction',
'getdata', 'getmask', 'getmaskarray', 'greater', 'greater_equal',
'harden_mask', 'hypot',
'identity', 'ids', 'indices', 'inner', 'innerproduct',
'isMA', 'isMaskedArray', 'is_mask', 'is_masked', 'isarray',
'left_shift', 'less', 'less_equal', 'load', 'loads', 'log', 'log2',
'log10', 'logical_and', 'logical_not', 'logical_or', 'logical_xor',
'make_mask', 'make_mask_descr', 'make_mask_none', 'mask_or',
'masked', 'masked_array', 'masked_equal', 'masked_greater',
'masked_greater_equal', 'masked_inside', 'masked_invalid',
'masked_less', 'masked_less_equal', 'masked_not_equal',
'masked_object', 'masked_outside', 'masked_print_option',
'masked_singleton', 'masked_values', 'masked_where', 'max', 'maximum',
'maximum_fill_value', 'mean', 'min', 'minimum', 'minimum_fill_value',
'mod', 'multiply', 'mvoid',
'negative', 'nomask', 'nonzero', 'not_equal',
'ones', 'outer', 'outerproduct',
'power', 'prod', 'product', 'ptp', 'put', 'putmask',
'rank', 'ravel', 'remainder', 'repeat', 'reshape', 'resize',
'right_shift', 'round_', 'round',
'set_fill_value', 'shape', 'sin', 'sinh', 'size', 'sometrue',
'sort', 'soften_mask', 'sqrt', 'squeeze', 'std', 'subtract', 'sum',
'swapaxes',
'take', 'tan', 'tanh', 'trace', 'transpose', 'true_divide',
'var', 'where',
'zeros']
import cPickle
import numpy as np
from numpy import ndarray, amax, amin, iscomplexobj, bool_
from numpy import array as narray
import numpy.core.umath as umath
import numpy.core.numerictypes as ntypes
from numpy.compat import getargspec, formatargspec
from numpy import expand_dims as n_expand_dims
import warnings
import sys
if sys.version_info[0] >= 3:
from functools import reduce
MaskType = np.bool_
nomask = MaskType(0)
def doc_note(initialdoc, note):
"""
Adds a Notes section to an existing docstring.
"""
if initialdoc is None:
return
if note is None:
return initialdoc
newdoc = """
%s
Notes
-----
%s
"""
return newdoc % (initialdoc, note)
def get_object_signature(obj):
"""
Get the signature from obj
"""
try:
sig = formatargspec(*getargspec(obj))
except TypeError, errmsg:
sig = ''
# msg = "Unable to retrieve the signature of %s '%s'\n"\
# "(Initial error message: %s)"
# warnings.warn(msg % (type(obj),
# getattr(obj, '__name__', '???'),
# errmsg))
return sig
#####--------------------------------------------------------------------------
#---- --- Exceptions ---
#####--------------------------------------------------------------------------
class MAError(Exception):
"""Class for masked array related errors."""
pass
class MaskError(MAError):
"Class for mask related errors."
pass
#####--------------------------------------------------------------------------
#---- --- Filling options ---
#####--------------------------------------------------------------------------
# b: boolean - c: complex - f: floats - i: integer - O: object - S: string
default_filler = {'b': True,
'c' : 1.e20 + 0.0j,
'f' : 1.e20,
'i' : 999999,
'O' : '?',
'S' : 'N/A',
'u' : 999999,
'V' : '???',
'U' : 'N/A',
}
max_filler = ntypes._minvals
max_filler.update([(k, -np.inf) for k in [np.float32, np.float64]])
min_filler = ntypes._maxvals
min_filler.update([(k, +np.inf) for k in [np.float32, np.float64]])
if 'float128' in ntypes.typeDict:
max_filler.update([(np.float128, -np.inf)])
min_filler.update([(np.float128, +np.inf)])
def default_fill_value(obj):
"""
Return the default fill value for the argument object.
The default filling value depends on the datatype of the input
array or the type of the input scalar:
======== ========
datatype default
======== ========
bool True
int 999999
float 1.e20
complex 1.e20+0j
object '?'
string 'N/A'
======== ========
Parameters
----------
obj : ndarray, dtype or scalar
The array data-type or scalar for which the default fill value
is returned.
Returns
-------
fill_value : scalar
The default fill value.
Examples
--------
>>> np.ma.default_fill_value(1)
999999
>>> np.ma.default_fill_value(np.array([1.1, 2., np.pi]))
1e+20
>>> np.ma.default_fill_value(np.dtype(complex))
(1e+20+0j)
"""
if hasattr(obj, 'dtype'):
defval = _check_fill_value(None, obj.dtype)
elif isinstance(obj, np.dtype):
if obj.subdtype:
defval = default_filler.get(obj.subdtype[0].kind, '?')
else:
defval = default_filler.get(obj.kind, '?')
elif isinstance(obj, float):
defval = default_filler['f']
elif isinstance(obj, int) or isinstance(obj, long):
defval = default_filler['i']
elif isinstance(obj, str):
defval = default_filler['S']
elif isinstance(obj, unicode):
defval = default_filler['U']
elif isinstance(obj, complex):
defval = default_filler['c']
else:
defval = default_filler['O']
return defval
def _recursive_extremum_fill_value(ndtype, extremum):
names = ndtype.names
if names:
deflist = []
for name in names:
fval = _recursive_extremum_fill_value(ndtype[name], extremum)
deflist.append(fval)
return tuple(deflist)
return extremum[ndtype]
def minimum_fill_value(obj):
"""
Return the maximum value that can be represented by the dtype of an object.
This function is useful for calculating a fill value suitable for
taking the minimum of an array with a given dtype.
Parameters
----------
obj : ndarray or dtype
An object that can be queried for it's numeric type.
Returns
-------
val : scalar
The maximum representable value.
Raises
------
TypeError
If `obj` isn't a suitable numeric type.
See Also
--------
maximum_fill_value : The inverse function.
set_fill_value : Set the filling value of a masked array.
MaskedArray.fill_value : Return current fill value.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.int8()
>>> ma.minimum_fill_value(a)
127
>>> a = np.int32()
>>> ma.minimum_fill_value(a)
2147483647
An array of numeric data can also be passed.
>>> a = np.array([1, 2, 3], dtype=np.int8)
>>> ma.minimum_fill_value(a)
127
>>> a = np.array([1, 2, 3], dtype=np.float32)
>>> ma.minimum_fill_value(a)
inf
"""
errmsg = "Unsuitable type for calculating minimum."
if hasattr(obj, 'dtype'):
return _recursive_extremum_fill_value(obj.dtype, min_filler)
elif isinstance(obj, float):
return min_filler[ntypes.typeDict['float_']]
elif isinstance(obj, int):
return min_filler[ntypes.typeDict['int_']]
elif isinstance(obj, long):
return min_filler[ntypes.typeDict['uint']]
elif isinstance(obj, np.dtype):
return min_filler[obj]
else:
raise TypeError(errmsg)
def maximum_fill_value(obj):
"""
Return the minimum value that can be represented by the dtype of an object.
This function is useful for calculating a fill value suitable for
taking the maximum of an array with a given dtype.
Parameters
----------
obj : {ndarray, dtype}
An object that can be queried for it's numeric type.
Returns
-------
val : scalar
The minimum representable value.
Raises
------
TypeError
If `obj` isn't a suitable numeric type.
See Also
--------
minimum_fill_value : The inverse function.
set_fill_value : Set the filling value of a masked array.
MaskedArray.fill_value : Return current fill value.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.int8()
>>> ma.maximum_fill_value(a)
-128
>>> a = np.int32()
>>> ma.maximum_fill_value(a)
-2147483648
An array of numeric data can also be passed.
>>> a = np.array([1, 2, 3], dtype=np.int8)
>>> ma.maximum_fill_value(a)
-128
>>> a = np.array([1, 2, 3], dtype=np.float32)
>>> ma.maximum_fill_value(a)
-inf
"""
errmsg = "Unsuitable type for calculating maximum."
if hasattr(obj, 'dtype'):
return _recursive_extremum_fill_value(obj.dtype, max_filler)
elif isinstance(obj, float):
return max_filler[ntypes.typeDict['float_']]
elif isinstance(obj, int):
return max_filler[ntypes.typeDict['int_']]
elif isinstance(obj, long):
return max_filler[ntypes.typeDict['uint']]
elif isinstance(obj, np.dtype):
return max_filler[obj]
else:
raise TypeError(errmsg)
def _recursive_set_default_fill_value(dtypedescr):
deflist = []
for currentdescr in dtypedescr:
currenttype = currentdescr[1]
if isinstance(currenttype, list):
deflist.append(tuple(_recursive_set_default_fill_value(currenttype)))
else:
deflist.append(default_fill_value(np.dtype(currenttype)))
return tuple(deflist)
def _recursive_set_fill_value(fillvalue, dtypedescr):
fillvalue = np.resize(fillvalue, len(dtypedescr))
output_value = []
for (fval, descr) in zip(fillvalue, dtypedescr):
cdtype = descr[1]
if isinstance(cdtype, list):
output_value.append(tuple(_recursive_set_fill_value(fval, cdtype)))
else:
output_value.append(np.array(fval, dtype=cdtype).item())
return tuple(output_value)
def _check_fill_value(fill_value, ndtype):
"""
Private function validating the given `fill_value` for the given dtype.
If fill_value is None, it is set to the default corresponding to the dtype
if this latter is standard (no fields). If the datatype is flexible (named
fields), fill_value is set to a tuple whose elements are the default fill
values corresponding to each field.
If fill_value is not None, its value is forced to the given dtype.
"""
ndtype = np.dtype(ndtype)
fields = ndtype.fields
if fill_value is None:
if fields:
descr = ndtype.descr
fill_value = np.array(_recursive_set_default_fill_value(descr),
dtype=ndtype,)
else:
fill_value = default_fill_value(ndtype)
elif fields:
fdtype = [(_[0], _[1]) for _ in ndtype.descr]
if isinstance(fill_value, (ndarray, np.void)):
try:
fill_value = np.array(fill_value, copy=False, dtype=fdtype)
except ValueError:
err_msg = "Unable to transform %s to dtype %s"
raise ValueError(err_msg % (fill_value, fdtype))
else:
descr = ndtype.descr
fill_value = np.asarray(fill_value, dtype=object)
fill_value = np.array(_recursive_set_fill_value(fill_value, descr),
dtype=ndtype)
else:
if isinstance(fill_value, basestring) and (ndtype.char not in 'SV'):
fill_value = default_fill_value(ndtype)
else:
# In case we want to convert 1e+20 to int...
try:
fill_value = np.array(fill_value, copy=False, dtype=ndtype)#.item()
except OverflowError:
fill_value = default_fill_value(ndtype)
return np.array(fill_value)
def set_fill_value(a, fill_value):
"""
Set the filling value of a, if a is a masked array.
This function changes the fill value of the masked array `a` in place.
If `a` is not a masked array, the function returns silently, without
doing anything.
Parameters
----------
a : array_like
Input array.
fill_value : dtype
Filling value. A consistency test is performed to make sure
the value is compatible with the dtype of `a`.
Returns
-------
None
Nothing returned by this function.
See Also
--------
maximum_fill_value : Return the default fill value for a dtype.
MaskedArray.fill_value : Return current fill value.
MaskedArray.set_fill_value : Equivalent method.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.arange(5)
>>> a
array([0, 1, 2, 3, 4])
>>> a = ma.masked_where(a < 3, a)
>>> a
masked_array(data = [-- -- -- 3 4],
mask = [ True True True False False],
fill_value=999999)
>>> ma.set_fill_value(a, -999)
>>> a
masked_array(data = [-- -- -- 3 4],
mask = [ True True True False False],
fill_value=-999)
Nothing happens if `a` is not a masked array.
>>> a = range(5)
>>> a
[0, 1, 2, 3, 4]
>>> ma.set_fill_value(a, 100)
>>> a
[0, 1, 2, 3, 4]
>>> a = np.arange(5)
>>> a
array([0, 1, 2, 3, 4])
>>> ma.set_fill_value(a, 100)
>>> a
array([0, 1, 2, 3, 4])
"""
if isinstance(a, MaskedArray):
a.set_fill_value(fill_value)
return
def get_fill_value(a):
"""
Return the filling value of a, if any. Otherwise, returns the
default filling value for that type.
"""
if isinstance(a, MaskedArray):
result = a.fill_value
else:
result = default_fill_value(a)
return result
def common_fill_value(a, b):
"""
Return the common filling value of two masked arrays, if any.
If ``a.fill_value == b.fill_value``, return the fill value,
otherwise return None.
Parameters
----------
a, b : MaskedArray
The masked arrays for which to compare fill values.
Returns
-------
fill_value : scalar or None
The common fill value, or None.
Examples
--------
>>> x = np.ma.array([0, 1.], fill_value=3)
>>> y = np.ma.array([0, 1.], fill_value=3)
>>> np.ma.common_fill_value(x, y)
3.0
"""
t1 = get_fill_value(a)
t2 = get_fill_value(b)
if t1 == t2:
return t1
return None
#####--------------------------------------------------------------------------
def filled(a, fill_value=None):
"""
Return input as an array with masked data replaced by a fill value.
If `a` is not a `MaskedArray`, `a` itself is returned.
If `a` is a `MaskedArray` and `fill_value` is None, `fill_value` is set to
``a.fill_value``.
Parameters
----------
a : MaskedArray or array_like
An input object.
fill_value : scalar, optional
Filling value. Default is None.
Returns
-------
a : ndarray
The filled array.
See Also
--------
compressed
Examples
--------
>>> x = np.ma.array(np.arange(9).reshape(3, 3), mask=[[1, 0, 0],
... [1, 0, 0],
... [0, 0, 0]])
>>> x.filled()
array([[999999, 1, 2],
[999999, 4, 5],
[ 6, 7, 8]])
"""
if hasattr(a, 'filled'):
return a.filled(fill_value)
elif isinstance(a, ndarray):
# Should we check for contiguity ? and a.flags['CONTIGUOUS']:
return a
elif isinstance(a, dict):
return np.array(a, 'O')
else:
return np.array(a)
#####--------------------------------------------------------------------------
def get_masked_subclass(*arrays):
"""
Return the youngest subclass of MaskedArray from a list of (masked) arrays.
In case of siblings, the first listed takes over.
"""
if len(arrays) == 1:
arr = arrays[0]
if isinstance(arr, MaskedArray):
rcls = type(arr)
else:
rcls = MaskedArray
else:
arrcls = [type(a) for a in arrays]
rcls = arrcls[0]
if not issubclass(rcls, MaskedArray):
rcls = MaskedArray
for cls in arrcls[1:]:
if issubclass(cls, rcls):
rcls = cls
# Don't return MaskedConstant as result: revert to MaskedArray
if rcls.__name__ == 'MaskedConstant':
return MaskedArray
return rcls
#####--------------------------------------------------------------------------
def getdata(a, subok=True):
"""
Return the data of a masked array as an ndarray.
Return the data of `a` (if any) as an ndarray if `a` is a ``MaskedArray``,
else return `a` as a ndarray or subclass (depending on `subok`) if not.
Parameters
----------
a : array_like
Input ``MaskedArray``, alternatively a ndarray or a subclass thereof.
subok : bool
Whether to force the output to be a `pure` ndarray (False) or to
return a subclass of ndarray if appropriate (True, default).
See Also
--------
getmask : Return the mask of a masked array, or nomask.
getmaskarray : Return the mask of a masked array, or full array of False.
Examples
--------
>>> import numpy.ma as ma
>>> a = ma.masked_equal([[1,2],[3,4]], 2)
>>> a
masked_array(data =
[[1 --]
[3 4]],
mask =
[[False True]
[False False]],
fill_value=999999)
>>> ma.getdata(a)
array([[1, 2],
[3, 4]])
Equivalently use the ``MaskedArray`` `data` attribute.
>>> a.data
array([[1, 2],
[3, 4]])
"""
try:
data = a._data
except AttributeError:
data = np.array(a, copy=False, subok=subok)
if not subok:
return data.view(ndarray)
return data
get_data = getdata
def fix_invalid(a, mask=nomask, copy=True, fill_value=None):
"""
Return input with invalid data masked and replaced by a fill value.
Invalid data means values of `nan`, `inf`, etc.
Parameters
----------
a : array_like
Input array, a (subclass of) ndarray.
copy : bool, optional
Whether to use a copy of `a` (True) or to fix `a` in place (False).
Default is True.
fill_value : scalar, optional
Value used for fixing invalid data. Default is None, in which case
the ``a.fill_value`` is used.
Returns
-------
b : MaskedArray
The input array with invalid entries fixed.
Notes
-----
A copy is performed by default.
Examples
--------
>>> x = np.ma.array([1., -1, np.nan, np.inf], mask=[1] + [0]*3)
>>> x
masked_array(data = [-- -1.0 nan inf],
mask = [ True False False False],
fill_value = 1e+20)
>>> np.ma.fix_invalid(x)
masked_array(data = [-- -1.0 -- --],
mask = [ True False True True],
fill_value = 1e+20)
>>> fixed = np.ma.fix_invalid(x)
>>> fixed.data
array([ 1.00000000e+00, -1.00000000e+00, 1.00000000e+20,
1.00000000e+20])
>>> x.data
array([ 1., -1., NaN, Inf])
"""
a = masked_array(a, copy=copy, mask=mask, subok=True)
#invalid = (numpy.isnan(a._data) | numpy.isinf(a._data))
invalid = np.logical_not(np.isfinite(a._data))
if not invalid.any():
return a
a._mask |= invalid
if fill_value is None:
fill_value = a.fill_value
a._data[invalid] = fill_value
return a
#####--------------------------------------------------------------------------
#---- --- Ufuncs ---
#####--------------------------------------------------------------------------
ufunc_domain = {}
ufunc_fills = {}
class _DomainCheckInterval:
"""
Define a valid interval, so that :
``domain_check_interval(a,b)(x) == True`` where
``x < a`` or ``x > b``.
"""
def __init__(self, a, b):
"domain_check_interval(a,b)(x) = true where x < a or y > b"
if (a > b):
(a, b) = (b, a)
self.a = a
self.b = b
def __call__ (self, x):
"Execute the call behavior."
return umath.logical_or(umath.greater (x, self.b),
umath.less(x, self.a))
class _DomainTan:
"""Define a valid interval for the `tan` function, so that:
``domain_tan(eps) = True`` where ``abs(cos(x)) < eps``
"""
def __init__(self, eps):
"domain_tan(eps) = true where abs(cos(x)) < eps)"
self.eps = eps
def __call__ (self, x):
"Executes the call behavior."
return umath.less(umath.absolute(umath.cos(x)), self.eps)
class _DomainSafeDivide:
"""Define a domain for safe division."""
def __init__ (self, tolerance=None):
self.tolerance = tolerance
def __call__ (self, a, b):
# Delay the selection of the tolerance to here in order to reduce numpy
# import times. The calculation of these parameters is a substantial
# component of numpy's import time.
if self.tolerance is None:
self.tolerance = np.finfo(float).tiny
return umath.absolute(a) * self.tolerance >= umath.absolute(b)
class _DomainGreater:
"""DomainGreater(v)(x) is True where x <= v."""
def __init__(self, critical_value):
"DomainGreater(v)(x) = true where x <= v"
self.critical_value = critical_value
def __call__ (self, x):
"Executes the call behavior."
return umath.less_equal(x, self.critical_value)
class _DomainGreaterEqual:
"""DomainGreaterEqual(v)(x) is True where x < v."""
def __init__(self, critical_value):
"DomainGreaterEqual(v)(x) = true where x < v"
self.critical_value = critical_value
def __call__ (self, x):
"Executes the call behavior."
return umath.less(x, self.critical_value)
#..............................................................................
class _MaskedUnaryOperation:
"""
Defines masked version of unary operations, where invalid values are
pre-masked.
Parameters
----------
mufunc : callable
The function for which to define a masked version. Made available
as ``_MaskedUnaryOperation.f``.
fill : scalar, optional
Filling value, default is 0.
domain : class instance
Domain for the function. Should be one of the ``_Domain*``
classes. Default is None.
"""
def __init__ (self, mufunc, fill=0, domain=None):
""" _MaskedUnaryOperation(aufunc, fill=0, domain=None)
aufunc(fill) must be defined
self(x) returns aufunc(x)
with masked values where domain(x) is true or getmask(x) is true.
"""
self.f = mufunc
self.fill = fill
self.domain = domain
self.__doc__ = getattr(mufunc, "__doc__", str(mufunc))
self.__name__ = getattr(mufunc, "__name__", str(mufunc))
ufunc_domain[mufunc] = domain
ufunc_fills[mufunc] = fill
#
def __call__ (self, a, *args, **kwargs):
"Execute the call behavior."
d = getdata(a)
# Case 1.1. : Domained function
if self.domain is not None:
# Save the error status
err_status_ini = np.geterr()
try:
np.seterr(divide='ignore', invalid='ignore')
result = self.f(d, *args, **kwargs)
finally:
np.seterr(**err_status_ini)
# Make a mask
m = ~umath.isfinite(result)
m |= self.domain(d)
m |= getmask(a)
# Case 1.2. : Function without a domain
else:
# Get the result and the mask
result = self.f(d, *args, **kwargs)
m = getmask(a)
# Case 2.1. : The result is scalarscalar
if not result.ndim:
if m:
return masked
return result
# Case 2.2. The result is an array
# We need to fill the invalid data back w/ the input
# Now, that's plain silly: in C, we would just skip the element and keep
# the original, but we do have to do it that way in Python
if m is not nomask:
# In case result has a lower dtype than the inputs (as in equal)
try:
np.putmask(result, m, d)
except TypeError:
pass
# Transform to
if isinstance(a, MaskedArray):
subtype = type(a)
else:
subtype = MaskedArray
result = result.view(subtype)
result._mask = m
result._update_from(a)
return result
#
def __str__ (self):
return "Masked version of %s. [Invalid values are masked]" % str(self.f)
class _MaskedBinaryOperation:
"""
Define masked version of binary operations, where invalid
values are pre-masked.
Parameters
----------
mbfunc : function
The function for which to define a masked version. Made available
as ``_MaskedBinaryOperation.f``.
domain : class instance
Default domain for the function. Should be one of the ``_Domain*``
classes. Default is None.
fillx : scalar, optional
Filling value for the first argument, default is 0.
filly : scalar, optional
Filling value for the second argument, default is 0.
"""
def __init__ (self, mbfunc, fillx=0, filly=0):
"""abfunc(fillx, filly) must be defined.
abfunc(x, filly) = x for all x to enable reduce.
"""
self.f = mbfunc
self.fillx = fillx
self.filly = filly
self.__doc__ = getattr(mbfunc, "__doc__", str(mbfunc))
self.__name__ = getattr(mbfunc, "__name__", str(mbfunc))
ufunc_domain[mbfunc] = None
ufunc_fills[mbfunc] = (fillx, filly)
def __call__ (self, a, b, *args, **kwargs):
"Execute the call behavior."
# Get the data, as ndarray
(da, db) = (getdata(a, subok=False), getdata(b, subok=False))
# Get the mask
(ma, mb) = (getmask(a), getmask(b))
if ma is nomask:
if mb is nomask:
m = nomask
else:
m = umath.logical_or(getmaskarray(a), mb)
elif mb is nomask:
m = umath.logical_or(ma, getmaskarray(b))
else:
m = umath.logical_or(ma, mb)
# Get the result
err_status_ini = np.geterr()
try:
np.seterr(divide='ignore', invalid='ignore')
result = self.f(da, db, *args, **kwargs)
finally:
np.seterr(**err_status_ini)
# Case 1. : scalar
if not result.ndim:
if m:
return masked
return result
# Case 2. : array
# Revert result to da where masked
if m.any():
np.putmask(result, m, 0)
# This only makes sense if the operation preserved the dtype
if result.dtype == da.dtype:
result += m * da
# Transforms to a (subclass of) MaskedArray
result = result.view(get_masked_subclass(a, b))
result._mask = m
# Update the optional info from the inputs
if isinstance(b, MaskedArray):
if isinstance(a, MaskedArray):
result._update_from(a)
else:
result._update_from(b)
elif isinstance(a, MaskedArray):
result._update_from(a)
return result
def reduce(self, target, axis=0, dtype=None):
"""Reduce `target` along the given `axis`."""
if isinstance(target, MaskedArray):
tclass = type(target)
else:
tclass = MaskedArray
m = getmask(target)
t = filled(target, self.filly)
if t.shape == ():
t = t.reshape(1)
if m is not nomask:
m = make_mask(m, copy=1)
m.shape = (1,)
if m is nomask:
return self.f.reduce(t, axis).view(tclass)
t = t.view(tclass)
t._mask = m
tr = self.f.reduce(getdata(t), axis, dtype=dtype or t.dtype)
mr = umath.logical_and.reduce(m, axis)
tr = tr.view(tclass)
if mr.ndim > 0:
tr._mask = mr
return tr
elif mr:
return masked
return tr
def outer (self, a, b):
"""Return the function applied to the outer product of a and b.
"""
ma = getmask(a)
mb = getmask(b)
if ma is nomask and mb is nomask:
m = nomask
else:
ma = getmaskarray(a)
mb = getmaskarray(b)
m = umath.logical_or.outer(ma, mb)
if (not m.ndim) and m:
return masked
(da, db) = (getdata(a), getdata(b))
d = self.f.outer(da, db)
if m is not nomask:
np.putmask(d, m, da)
if d.shape:
d = d.view(get_masked_subclass(a, b))
d._mask = m
return d
def accumulate (self, target, axis=0):
"""Accumulate `target` along `axis` after filling with y fill
value.
"""
if isinstance(target, MaskedArray):
tclass = type(target)
else:
tclass = MaskedArray
t = filled(target, self.filly)
return self.f.accumulate(t, axis).view(tclass)
def __str__ (self):
return "Masked version of " + str(self.f)
class _DomainedBinaryOperation:
"""
Define binary operations that have a domain, like divide.
They have no reduce, outer or accumulate.
Parameters
----------
mbfunc : function
The function for which to define a masked version. Made available
as ``_DomainedBinaryOperation.f``.
domain : class instance
Default domain for the function. Should be one of the ``_Domain*``
classes.
fillx : scalar, optional
Filling value for the first argument, default is 0.
filly : scalar, optional
Filling value for the second argument, default is 0.
"""
def __init__ (self, dbfunc, domain, fillx=0, filly=0):
"""abfunc(fillx, filly) must be defined.
abfunc(x, filly) = x for all x to enable reduce.
"""
self.f = dbfunc
self.domain = domain
self.fillx = fillx
self.filly = filly
self.__doc__ = getattr(dbfunc, "__doc__", str(dbfunc))
self.__name__ = getattr(dbfunc, "__name__", str(dbfunc))
ufunc_domain[dbfunc] = domain
ufunc_fills[dbfunc] = (fillx, filly)
def __call__(self, a, b, *args, **kwargs):
"Execute the call behavior."
# Get the data and the mask
(da, db) = (getdata(a, subok=False), getdata(b, subok=False))
(ma, mb) = (getmask(a), getmask(b))
# Get the result
err_status_ini = np.geterr()
try:
np.seterr(divide='ignore', invalid='ignore')
result = self.f(da, db, *args, **kwargs)
finally:
np.seterr(**err_status_ini)
# Get the mask as a combination of ma, mb and invalid
m = ~umath.isfinite(result)
m |= ma
m |= mb
# Apply the domain
domain = ufunc_domain.get(self.f, None)
if domain is not None:
m |= filled(domain(da, db), True)
# Take care of the scalar case first
if (not m.ndim):
if m:
return masked
else:
return result
# When the mask is True, put back da
np.putmask(result, m, 0)
result += m * da
result = result.view(get_masked_subclass(a, b))
result._mask = m
if isinstance(b, MaskedArray):
if isinstance(a, MaskedArray):
result._update_from(a)
else:
result._update_from(b)
elif isinstance(a, MaskedArray):
result._update_from(a)
return result
def __str__ (self):
return "Masked version of " + str(self.f)
#..............................................................................
# Unary ufuncs
exp = _MaskedUnaryOperation(umath.exp)
conjugate = _MaskedUnaryOperation(umath.conjugate)
sin = _MaskedUnaryOperation(umath.sin)
cos = _MaskedUnaryOperation(umath.cos)
tan = _MaskedUnaryOperation(umath.tan)
arctan = _MaskedUnaryOperation(umath.arctan)
arcsinh = _MaskedUnaryOperation(umath.arcsinh)
sinh = _MaskedUnaryOperation(umath.sinh)
cosh = _MaskedUnaryOperation(umath.cosh)
tanh = _MaskedUnaryOperation(umath.tanh)
abs = absolute = _MaskedUnaryOperation(umath.absolute)
fabs = _MaskedUnaryOperation(umath.fabs)
negative = _MaskedUnaryOperation(umath.negative)
floor = _MaskedUnaryOperation(umath.floor)
ceil = _MaskedUnaryOperation(umath.ceil)
around = _MaskedUnaryOperation(np.round_)
logical_not = _MaskedUnaryOperation(umath.logical_not)
# Domained unary ufuncs .......................................................
sqrt = _MaskedUnaryOperation(umath.sqrt, 0.0,
_DomainGreaterEqual(0.0))
log = _MaskedUnaryOperation(umath.log, 1.0,
_DomainGreater(0.0))
log2 = _MaskedUnaryOperation(umath.log2, 1.0,
_DomainGreater(0.0))
log10 = _MaskedUnaryOperation(umath.log10, 1.0,
_DomainGreater(0.0))
tan = _MaskedUnaryOperation(umath.tan, 0.0,
_DomainTan(1e-35))
arcsin = _MaskedUnaryOperation(umath.arcsin, 0.0,
_DomainCheckInterval(-1.0, 1.0))
arccos = _MaskedUnaryOperation(umath.arccos, 0.0,
_DomainCheckInterval(-1.0, 1.0))
arccosh = _MaskedUnaryOperation(umath.arccosh, 1.0,
_DomainGreaterEqual(1.0))
arctanh = _MaskedUnaryOperation(umath.arctanh, 0.0,
_DomainCheckInterval(-1.0 + 1e-15, 1.0 - 1e-15))
# Binary ufuncs ...............................................................
add = _MaskedBinaryOperation(umath.add)
subtract = _MaskedBinaryOperation(umath.subtract)
multiply = _MaskedBinaryOperation(umath.multiply, 1, 1)
arctan2 = _MaskedBinaryOperation(umath.arctan2, 0.0, 1.0)
equal = _MaskedBinaryOperation(umath.equal)
equal.reduce = None
not_equal = _MaskedBinaryOperation(umath.not_equal)
not_equal.reduce = None
less_equal = _MaskedBinaryOperation(umath.less_equal)
less_equal.reduce = None
greater_equal = _MaskedBinaryOperation(umath.greater_equal)
greater_equal.reduce = None
less = _MaskedBinaryOperation(umath.less)
less.reduce = None
greater = _MaskedBinaryOperation(umath.greater)
greater.reduce = None
logical_and = _MaskedBinaryOperation(umath.logical_and)
alltrue = _MaskedBinaryOperation(umath.logical_and, 1, 1).reduce
logical_or = _MaskedBinaryOperation(umath.logical_or)
sometrue = logical_or.reduce
logical_xor = _MaskedBinaryOperation(umath.logical_xor)
bitwise_and = _MaskedBinaryOperation(umath.bitwise_and)
bitwise_or = _MaskedBinaryOperation(umath.bitwise_or)
bitwise_xor = _MaskedBinaryOperation(umath.bitwise_xor)
hypot = _MaskedBinaryOperation(umath.hypot)
# Domained binary ufuncs ......................................................
divide = _DomainedBinaryOperation(umath.divide, _DomainSafeDivide(), 0, 1)
true_divide = _DomainedBinaryOperation(umath.true_divide,
_DomainSafeDivide(), 0, 1)
floor_divide = _DomainedBinaryOperation(umath.floor_divide,
_DomainSafeDivide(), 0, 1)
remainder = _DomainedBinaryOperation(umath.remainder,
_DomainSafeDivide(), 0, 1)
fmod = _DomainedBinaryOperation(umath.fmod, _DomainSafeDivide(), 0, 1)
mod = _DomainedBinaryOperation(umath.mod, _DomainSafeDivide(), 0, 1)
#####--------------------------------------------------------------------------
#---- --- Mask creation functions ---
#####--------------------------------------------------------------------------
def _recursive_make_descr(datatype, newtype=bool_):
"Private function allowing recursion in make_descr."
# Do we have some name fields ?
if datatype.names:
descr = []
for name in datatype.names:
field = datatype.fields[name]
if len(field) == 3:
# Prepend the title to the name
name = (field[-1], name)
descr.append((name, _recursive_make_descr(field[0], newtype)))
return descr
# Is this some kind of composite a la (np.float,2)
elif datatype.subdtype:
mdescr = list(datatype.subdtype)
mdescr[0] = newtype
return tuple(mdescr)
else:
return newtype
def make_mask_descr(ndtype):
"""
Construct a dtype description list from a given dtype.
Returns a new dtype object, with the type of all fields in `ndtype` to a
boolean type. Field names are not altered.
Parameters
----------
ndtype : dtype
The dtype to convert.
Returns
-------
result : dtype
A dtype that looks like `ndtype`, the type of all fields is boolean.
Examples
--------
>>> import numpy.ma as ma
>>> dtype = np.dtype({'names':['foo', 'bar'],
'formats':[np.float32, np.int]})
>>> dtype
dtype([('foo', '<f4'), ('bar', '<i4')])
>>> ma.make_mask_descr(dtype)
dtype([('foo', '|b1'), ('bar', '|b1')])
>>> ma.make_mask_descr(np.float32)
<type 'numpy.bool_'>
"""
# Make sure we do have a dtype
if not isinstance(ndtype, np.dtype):
ndtype = np.dtype(ndtype)
return np.dtype(_recursive_make_descr(ndtype, np.bool))
def getmask(a):
"""
Return the mask of a masked array, or nomask.
Return the mask of `a` as an ndarray if `a` is a `MaskedArray` and the
mask is not `nomask`, else return `nomask`. To guarantee a full array
of booleans of the same shape as a, use `getmaskarray`.
Parameters
----------
a : array_like
Input `MaskedArray` for which the mask is required.
See Also
--------
getdata : Return the data of a masked array as an ndarray.
getmaskarray : Return the mask of a masked array, or full array of False.
Examples
--------
>>> import numpy.ma as ma
>>> a = ma.masked_equal([[1,2],[3,4]], 2)
>>> a
masked_array(data =
[[1 --]
[3 4]],
mask =
[[False True]
[False False]],
fill_value=999999)
>>> ma.getmask(a)
array([[False, True],
[False, False]], dtype=bool)
Equivalently use the `MaskedArray` `mask` attribute.
>>> a.mask
array([[False, True],
[False, False]], dtype=bool)
Result when mask == `nomask`
>>> b = ma.masked_array([[1,2],[3,4]])
>>> b
masked_array(data =
[[1 2]
[3 4]],
mask =
False,
fill_value=999999)
>>> ma.nomask
False
>>> ma.getmask(b) == ma.nomask
True
>>> b.mask == ma.nomask
True
"""
return getattr(a, '_mask', nomask)
get_mask = getmask
def getmaskarray(arr):
"""
Return the mask of a masked array, or full boolean array of False.
Return the mask of `arr` as an ndarray if `arr` is a `MaskedArray` and
the mask is not `nomask`, else return a full boolean array of False of
the same shape as `arr`.
Parameters
----------
arr : array_like
Input `MaskedArray` for which the mask is required.
See Also
--------
getmask : Return the mask of a masked array, or nomask.
getdata : Return the data of a masked array as an ndarray.
Examples
--------
>>> import numpy.ma as ma
>>> a = ma.masked_equal([[1,2],[3,4]], 2)
>>> a
masked_array(data =
[[1 --]
[3 4]],
mask =
[[False True]
[False False]],
fill_value=999999)
>>> ma.getmaskarray(a)
array([[False, True],
[False, False]], dtype=bool)
Result when mask == ``nomask``
>>> b = ma.masked_array([[1,2],[3,4]])
>>> b
masked_array(data =
[[1 2]
[3 4]],
mask =
False,
fill_value=999999)
>>> >ma.getmaskarray(b)
array([[False, False],
[False, False]], dtype=bool)
"""
mask = getmask(arr)
if mask is nomask:
mask = make_mask_none(np.shape(arr), getdata(arr).dtype)
return mask
def is_mask(m):
"""
Return True if m is a valid, standard mask.
This function does not check the contents of the input, only that the
type is MaskType. In particular, this function returns False if the
mask has a flexible dtype.
Parameters
----------
m : array_like
Array to test.
Returns
-------
result : bool
True if `m.dtype.type` is MaskType, False otherwise.
See Also
--------
isMaskedArray : Test whether input is an instance of MaskedArray.
Examples
--------
>>> import numpy.ma as ma
>>> m = ma.masked_equal([0, 1, 0, 2, 3], 0)
>>> m
masked_array(data = [-- 1 -- 2 3],
mask = [ True False True False False],
fill_value=999999)
>>> ma.is_mask(m)
False
>>> ma.is_mask(m.mask)
True
Input must be an ndarray (or have similar attributes)
for it to be considered a valid mask.
>>> m = [False, True, False]
>>> ma.is_mask(m)
False
>>> m = np.array([False, True, False])
>>> m
array([False, True, False], dtype=bool)
>>> ma.is_mask(m)
True
Arrays with complex dtypes don't return True.
>>> dtype = np.dtype({'names':['monty', 'pithon'],
'formats':[np.bool, np.bool]})
>>> dtype
dtype([('monty', '|b1'), ('pithon', '|b1')])
>>> m = np.array([(True, False), (False, True), (True, False)],
dtype=dtype)
>>> m
array([(True, False), (False, True), (True, False)],
dtype=[('monty', '|b1'), ('pithon', '|b1')])
>>> ma.is_mask(m)
False
"""
try:
return m.dtype.type is MaskType
except AttributeError:
return False
def make_mask(m, copy=False, shrink=True, dtype=MaskType):
"""
Create a boolean mask from an array.
Return `m` as a boolean mask, creating a copy if necessary or requested.
The function can accept any sequence that is convertible to integers,
or ``nomask``. Does not require that contents must be 0s and 1s, values
of 0 are interepreted as False, everything else as True.
Parameters
----------
m : array_like
Potential mask.
copy : bool, optional
Whether to return a copy of `m` (True) or `m` itself (False).
shrink : bool, optional
Whether to shrink `m` to ``nomask`` if all its values are False.
dtype : dtype, optional
Data-type of the output mask. By default, the output mask has
a dtype of MaskType (bool). If the dtype is flexible, each field
has a boolean dtype.
Returns
-------
result : ndarray
A boolean mask derived from `m`.
Examples
--------
>>> import numpy.ma as ma
>>> m = [True, False, True, True]
>>> ma.make_mask(m)
array([ True, False, True, True], dtype=bool)
>>> m = [1, 0, 1, 1]
>>> ma.make_mask(m)
array([ True, False, True, True], dtype=bool)
>>> m = [1, 0, 2, -3]
>>> ma.make_mask(m)
array([ True, False, True, True], dtype=bool)
Effect of the `shrink` parameter.
>>> m = np.zeros(4)
>>> m
array([ 0., 0., 0., 0.])
>>> ma.make_mask(m)
False
>>> ma.make_mask(m, shrink=False)
array([False, False, False, False], dtype=bool)
Using a flexible `dtype`.
>>> m = [1, 0, 1, 1]
>>> n = [0, 1, 0, 0]
>>> arr = []
>>> for man, mouse in zip(m, n):
... arr.append((man, mouse))
>>> arr
[(1, 0), (0, 1), (1, 0), (1, 0)]
>>> dtype = np.dtype({'names':['man', 'mouse'],
'formats':[np.int, np.int]})
>>> arr = np.array(arr, dtype=dtype)
>>> arr
array([(1, 0), (0, 1), (1, 0), (1, 0)],
dtype=[('man', '<i4'), ('mouse', '<i4')])
>>> ma.make_mask(arr, dtype=dtype)
array([(True, False), (False, True), (True, False), (True, False)],
dtype=[('man', '|b1'), ('mouse', '|b1')])
"""
if m is nomask:
return nomask
elif isinstance(m, ndarray):
# We won't return after this point to make sure we can shrink the mask
# Fill the mask in case there are missing data
m = filled(m, True)
# Make sure the input dtype is valid
dtype = make_mask_descr(dtype)
if m.dtype == dtype:
if copy:
result = m.copy()
else:
result = m
else:
result = np.array(m, dtype=dtype, copy=copy)
else:
result = np.array(filled(m, True), dtype=MaskType)
# Bas les masques !
if shrink and (not result.dtype.names) and (not result.any()):
return nomask
else:
return result
def make_mask_none(newshape, dtype=None):
"""
Return a boolean mask of the given shape, filled with False.
This function returns a boolean ndarray with all entries False, that can
be used in common mask manipulations. If a complex dtype is specified, the
type of each field is converted to a boolean type.
Parameters
----------
newshape : tuple
A tuple indicating the shape of the mask.
dtype: {None, dtype}, optional
If None, use a MaskType instance. Otherwise, use a new datatype with
the same fields as `dtype`, converted to boolean types.
Returns
-------
result : ndarray
An ndarray of appropriate shape and dtype, filled with False.
See Also
--------
make_mask : Create a boolean mask from an array.
make_mask_descr : Construct a dtype description list from a given dtype.
Examples
--------
>>> import numpy.ma as ma
>>> ma.make_mask_none((3,))
array([False, False, False], dtype=bool)
Defining a more complex dtype.
>>> dtype = np.dtype({'names':['foo', 'bar'],
'formats':[np.float32, np.int]})
>>> dtype
dtype([('foo', '<f4'), ('bar', '<i4')])
>>> ma.make_mask_none((3,), dtype=dtype)
array([(False, False), (False, False), (False, False)],
dtype=[('foo', '|b1'), ('bar', '|b1')])
"""
if dtype is None:
result = np.zeros(newshape, dtype=MaskType)
else:
result = np.zeros(newshape, dtype=make_mask_descr(dtype))
return result
def mask_or (m1, m2, copy=False, shrink=True):
"""
Combine two masks with the ``logical_or`` operator.
The result may be a view on `m1` or `m2` if the other is `nomask`
(i.e. False).
Parameters
----------
m1, m2 : array_like
Input masks.
copy : bool, optional
If copy is False and one of the inputs is `nomask`, return a view
of the other input mask. Defaults to False.
shrink : bool, optional
Whether to shrink the output to `nomask` if all its values are
False. Defaults to True.
Returns
-------
mask : output mask
The result masks values that are masked in either `m1` or `m2`.
Raises
------
ValueError
If `m1` and `m2` have different flexible dtypes.
Examples
--------
>>> m1 = np.ma.make_mask([0, 1, 1, 0])
>>> m2 = np.ma.make_mask([1, 0, 0, 0])
>>> np.ma.mask_or(m1, m2)
array([ True, True, True, False], dtype=bool)
"""
def _recursive_mask_or(m1, m2, newmask):
names = m1.dtype.names
for name in names:
current1 = m1[name]
if current1.dtype.names:
_recursive_mask_or(current1, m2[name], newmask[name])
else:
umath.logical_or(current1, m2[name], newmask[name])
return
#
if (m1 is nomask) or (m1 is False):
dtype = getattr(m2, 'dtype', MaskType)
return make_mask(m2, copy=copy, shrink=shrink, dtype=dtype)
if (m2 is nomask) or (m2 is False):
dtype = getattr(m1, 'dtype', MaskType)
return make_mask(m1, copy=copy, shrink=shrink, dtype=dtype)
if m1 is m2 and is_mask(m1):
return m1
(dtype1, dtype2) = (getattr(m1, 'dtype', None), getattr(m2, 'dtype', None))
if (dtype1 != dtype2):
raise ValueError("Incompatible dtypes '%s'<>'%s'" % (dtype1, dtype2))
if dtype1.names:
newmask = np.empty_like(m1)
_recursive_mask_or(m1, m2, newmask)
return newmask
return make_mask(umath.logical_or(m1, m2), copy=copy, shrink=shrink)
def flatten_mask(mask):
"""
Returns a completely flattened version of the mask, where nested fields
are collapsed.
Parameters
----------
mask : array_like
Input array, which will be interpreted as booleans.
Returns
-------
flattened_mask : ndarray of bools
The flattened input.
Examples
--------
>>> mask = np.array([0, 0, 1], dtype=np.bool)
>>> flatten_mask(mask)
array([False, False, True], dtype=bool)
>>> mask = np.array([(0, 0), (0, 1)], dtype=[('a', bool), ('b', bool)])
>>> flatten_mask(mask)
array([False, False, False, True], dtype=bool)
>>> mdtype = [('a', bool), ('b', [('ba', bool), ('bb', bool)])]
>>> mask = np.array([(0, (0, 0)), (0, (0, 1))], dtype=mdtype)
>>> flatten_mask(mask)
array([False, False, False, False, False, True], dtype=bool)
"""
#
def _flatmask(mask):
"Flatten the mask and returns a (maybe nested) sequence of booleans."
mnames = mask.dtype.names
if mnames:
return [flatten_mask(mask[name]) for name in mnames]
else:
return mask
#
def _flatsequence(sequence):
"Generates a flattened version of the sequence."
try:
for element in sequence:
if hasattr(element, '__iter__'):
for f in _flatsequence(element):
yield f
else:
yield element
except TypeError:
yield sequence
#
mask = np.asarray(mask)
flattened = _flatsequence(_flatmask(mask))
return np.array([_ for _ in flattened], dtype=bool)
def _check_mask_axis(mask, axis):
"Check whether there are masked values along the given axis"
if mask is not nomask:
return mask.all(axis=axis)
return nomask
#####--------------------------------------------------------------------------
#--- --- Masking functions ---
#####--------------------------------------------------------------------------
def masked_where(condition, a, copy=True):
"""
Mask an array where a condition is met.
Return `a` as an array masked where `condition` is True.
Any masked values of `a` or `condition` are also masked in the output.
Parameters
----------
condition : array_like
Masking condition. When `condition` tests floating point values for
equality, consider using ``masked_values`` instead.
a : array_like
Array to mask.
copy : bool
If True (default) make a copy of `a` in the result. If False modify
`a` in place and return a view.
Returns
-------
result : MaskedArray
The result of masking `a` where `condition` is True.
See Also
--------
masked_values : Mask using floating point equality.
masked_equal : Mask where equal to a given value.
masked_not_equal : Mask where `not` equal to a given value.
masked_less_equal : Mask where less than or equal to a given value.
masked_greater_equal : Mask where greater than or equal to a given value.
masked_less : Mask where less than a given value.
masked_greater : Mask where greater than a given value.
masked_inside : Mask inside a given interval.
masked_outside : Mask outside a given interval.
masked_invalid : Mask invalid values (NaNs or infs).
Examples
--------
>>> import numpy.ma as ma
>>> a = np.arange(4)
>>> a
array([0, 1, 2, 3])
>>> ma.masked_where(a <= 2, a)
masked_array(data = [-- -- -- 3],
mask = [ True True True False],
fill_value=999999)
Mask array `b` conditional on `a`.
>>> b = ['a', 'b', 'c', 'd']
>>> ma.masked_where(a == 2, b)
masked_array(data = [a b -- d],
mask = [False False True False],
fill_value=N/A)
Effect of the `copy` argument.
>>> c = ma.masked_where(a <= 2, a)
>>> c
masked_array(data = [-- -- -- 3],
mask = [ True True True False],
fill_value=999999)
>>> c[0] = 99
>>> c
masked_array(data = [99 -- -- 3],
mask = [False True True False],
fill_value=999999)
>>> a
array([0, 1, 2, 3])
>>> c = ma.masked_where(a <= 2, a, copy=False)
>>> c[0] = 99
>>> c
masked_array(data = [99 -- -- 3],
mask = [False True True False],
fill_value=999999)
>>> a
array([99, 1, 2, 3])
When `condition` or `a` contain masked values.
>>> a = np.arange(4)
>>> a = ma.masked_where(a == 2, a)
>>> a
masked_array(data = [0 1 -- 3],
mask = [False False True False],
fill_value=999999)
>>> b = np.arange(4)
>>> b = ma.masked_where(b == 0, b)
>>> b
masked_array(data = [-- 1 2 3],
mask = [ True False False False],
fill_value=999999)
>>> ma.masked_where(a == 3, b)
masked_array(data = [-- 1 -- --],
mask = [ True False True True],
fill_value=999999)
"""
# Make sure that condition is a valid standard-type mask.
cond = make_mask(condition)
a = np.array(a, copy=copy, subok=True)
(cshape, ashape) = (cond.shape, a.shape)
if cshape and cshape != ashape:
raise IndexError("Inconsistant shape between the condition and the input"\
" (got %s and %s)" % (cshape, ashape))
if hasattr(a, '_mask'):
cond = mask_or(cond, a._mask)
cls = type(a)
else:
cls = MaskedArray
result = a.view(cls)
result._mask = cond
return result
def masked_greater(x, value, copy=True):
"""
Mask an array where greater than a given value.
This function is a shortcut to ``masked_where``, with
`condition` = (x > value).
See Also
--------
masked_where : Mask where a condition is met.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.arange(4)
>>> a
array([0, 1, 2, 3])
>>> ma.masked_greater(a, 2)
masked_array(data = [0 1 2 --],
mask = [False False False True],
fill_value=999999)
"""
return masked_where(greater(x, value), x, copy=copy)
def masked_greater_equal(x, value, copy=True):
"""
Mask an array where greater than or equal to a given value.
This function is a shortcut to ``masked_where``, with
`condition` = (x >= value).
See Also
--------
masked_where : Mask where a condition is met.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.arange(4)
>>> a
array([0, 1, 2, 3])
>>> ma.masked_greater_equal(a, 2)
masked_array(data = [0 1 -- --],
mask = [False False True True],
fill_value=999999)
"""
return masked_where(greater_equal(x, value), x, copy=copy)
def masked_less(x, value, copy=True):
"""
Mask an array where less than a given value.
This function is a shortcut to ``masked_where``, with
`condition` = (x < value).
See Also
--------
masked_where : Mask where a condition is met.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.arange(4)
>>> a
array([0, 1, 2, 3])
>>> ma.masked_less(a, 2)
masked_array(data = [-- -- 2 3],
mask = [ True True False False],
fill_value=999999)
"""
return masked_where(less(x, value), x, copy=copy)
def masked_less_equal(x, value, copy=True):
"""
Mask an array where less than or equal to a given value.
This function is a shortcut to ``masked_where``, with
`condition` = (x <= value).
See Also
--------
masked_where : Mask where a condition is met.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.arange(4)
>>> a
array([0, 1, 2, 3])
>>> ma.masked_less_equal(a, 2)
masked_array(data = [-- -- -- 3],
mask = [ True True True False],
fill_value=999999)
"""
return masked_where(less_equal(x, value), x, copy=copy)
def masked_not_equal(x, value, copy=True):
"""
Mask an array where `not` equal to a given value.
This function is a shortcut to ``masked_where``, with
`condition` = (x != value).
See Also
--------
masked_where : Mask where a condition is met.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.arange(4)
>>> a
array([0, 1, 2, 3])
>>> ma.masked_not_equal(a, 2)
masked_array(data = [-- -- 2 --],
mask = [ True True False True],
fill_value=999999)
"""
return masked_where(not_equal(x, value), x, copy=copy)
def masked_equal(x, value, copy=True):
"""
Mask an array where equal to a given value.
This function is a shortcut to ``masked_where``, with
`condition` = (x == value). For floating point arrays,
consider using ``masked_values(x, value)``.
See Also
--------
masked_where : Mask where a condition is met.
masked_values : Mask using floating point equality.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.arange(4)
>>> a
array([0, 1, 2, 3])
>>> ma.masked_equal(a, 2)
masked_array(data = [0 1 -- 3],
mask = [False False True False],
fill_value=999999)
"""
# An alternative implementation relies on filling first: probably not needed.
# d = filled(x, 0)
# c = umath.equal(d, value)
# m = mask_or(c, getmask(x))
# return array(d, mask=m, copy=copy)
output = masked_where(equal(x, value), x, copy=copy)
output.fill_value = value
return output
def masked_inside(x, v1, v2, copy=True):
"""
Mask an array inside a given interval.
Shortcut to ``masked_where``, where `condition` is True for `x` inside
the interval [v1,v2] (v1 <= x <= v2). The boundaries `v1` and `v2`
can be given in either order.
See Also
--------
masked_where : Mask where a condition is met.
Notes
-----
The array `x` is prefilled with its filling value.
Examples
--------
>>> import numpy.ma as ma
>>> x = [0.31, 1.2, 0.01, 0.2, -0.4, -1.1]
>>> ma.masked_inside(x, -0.3, 0.3)
masked_array(data = [0.31 1.2 -- -- -0.4 -1.1],
mask = [False False True True False False],
fill_value=1e+20)
The order of `v1` and `v2` doesn't matter.
>>> ma.masked_inside(x, 0.3, -0.3)
masked_array(data = [0.31 1.2 -- -- -0.4 -1.1],
mask = [False False True True False False],
fill_value=1e+20)
"""
if v2 < v1:
(v1, v2) = (v2, v1)
xf = filled(x)
condition = (xf >= v1) & (xf <= v2)
return masked_where(condition, x, copy=copy)
def masked_outside(x, v1, v2, copy=True):
"""
Mask an array outside a given interval.
Shortcut to ``masked_where``, where `condition` is True for `x` outside
the interval [v1,v2] (x < v1)|(x > v2).
The boundaries `v1` and `v2` can be given in either order.
See Also
--------
masked_where : Mask where a condition is met.
Notes
-----
The array `x` is prefilled with its filling value.
Examples
--------
>>> import numpy.ma as ma
>>> x = [0.31, 1.2, 0.01, 0.2, -0.4, -1.1]
>>> ma.masked_outside(x, -0.3, 0.3)
masked_array(data = [-- -- 0.01 0.2 -- --],
mask = [ True True False False True True],
fill_value=1e+20)
The order of `v1` and `v2` doesn't matter.
>>> ma.masked_outside(x, 0.3, -0.3)
masked_array(data = [-- -- 0.01 0.2 -- --],
mask = [ True True False False True True],
fill_value=1e+20)
"""
if v2 < v1:
(v1, v2) = (v2, v1)
xf = filled(x)
condition = (xf < v1) | (xf > v2)
return masked_where(condition, x, copy=copy)
def masked_object(x, value, copy=True, shrink=True):
"""
Mask the array `x` where the data are exactly equal to value.
This function is similar to `masked_values`, but only suitable
for object arrays: for floating point, use `masked_values` instead.
Parameters
----------
x : array_like
Array to mask
value : object
Comparison value
copy : {True, False}, optional
Whether to return a copy of `x`.
shrink : {True, False}, optional
Whether to collapse a mask full of False to nomask
Returns
-------
result : MaskedArray
The result of masking `x` where equal to `value`.
See Also
--------
masked_where : Mask where a condition is met.
masked_equal : Mask where equal to a given value (integers).
masked_values : Mask using floating point equality.
Examples
--------
>>> import numpy.ma as ma
>>> food = np.array(['green_eggs', 'ham'], dtype=object)
>>> # don't eat spoiled food
>>> eat = ma.masked_object(food, 'green_eggs')
>>> print eat
[-- ham]
>>> # plain ol` ham is boring
>>> fresh_food = np.array(['cheese', 'ham', 'pineapple'], dtype=object)
>>> eat = ma.masked_object(fresh_food, 'green_eggs')
>>> print eat
[cheese ham pineapple]
Note that `mask` is set to ``nomask`` if possible.
>>> eat
masked_array(data = [cheese ham pineapple],
mask = False,
fill_value=?)
"""
if isMaskedArray(x):
condition = umath.equal(x._data, value)
mask = x._mask
else:
condition = umath.equal(np.asarray(x), value)
mask = nomask
mask = mask_or(mask, make_mask(condition, shrink=shrink))
return masked_array(x, mask=mask, copy=copy, fill_value=value)
def masked_values(x, value, rtol=1e-5, atol=1e-8, copy=True, shrink=True):
"""
Mask using floating point equality.
Return a MaskedArray, masked where the data in array `x` are approximately
equal to `value`, i.e. where the following condition is True
(abs(x - value) <= atol+rtol*abs(value))
The fill_value is set to `value` and the mask is set to ``nomask`` if
possible. For integers, consider using ``masked_equal``.
Parameters
----------
x : array_like
Array to mask.
value : float
Masking value.
rtol : float, optional
Tolerance parameter.
atol : float, optional
Tolerance parameter (1e-8).
copy : bool, optional
Whether to return a copy of `x`.
shrink : bool, optional
Whether to collapse a mask full of False to ``nomask``.
Returns
-------
result : MaskedArray
The result of masking `x` where approximately equal to `value`.
See Also
--------
masked_where : Mask where a condition is met.
masked_equal : Mask where equal to a given value (integers).
Examples
--------
>>> import numpy.ma as ma
>>> x = np.array([1, 1.1, 2, 1.1, 3])
>>> ma.masked_values(x, 1.1)
masked_array(data = [1.0 -- 2.0 -- 3.0],
mask = [False True False True False],
fill_value=1.1)
Note that `mask` is set to ``nomask`` if possible.
>>> ma.masked_values(x, 1.5)
masked_array(data = [ 1. 1.1 2. 1.1 3. ],
mask = False,
fill_value=1.5)
For integers, the fill value will be different in general to the
result of ``masked_equal``.
>>> x = np.arange(5)
>>> x
array([0, 1, 2, 3, 4])
>>> ma.masked_values(x, 2)
masked_array(data = [0 1 -- 3 4],
mask = [False False True False False],
fill_value=2)
>>> ma.masked_equal(x, 2)
masked_array(data = [0 1 -- 3 4],
mask = [False False True False False],
fill_value=999999)
"""
mabs = umath.absolute
xnew = filled(x, value)
if issubclass(xnew.dtype.type, np.floating):
condition = umath.less_equal(mabs(xnew - value), atol + rtol * mabs(value))
mask = getattr(x, '_mask', nomask)
else:
condition = umath.equal(xnew, value)
mask = nomask
mask = mask_or(mask, make_mask(condition, shrink=shrink))
return masked_array(xnew, mask=mask, copy=copy, fill_value=value)
def masked_invalid(a, copy=True):
"""
Mask an array where invalid values occur (NaNs or infs).
This function is a shortcut to ``masked_where``, with
`condition` = ~(np.isfinite(a)). Any pre-existing mask is conserved.
Only applies to arrays with a dtype where NaNs or infs make sense
(i.e. floating point types), but accepts any array_like object.
See Also
--------
masked_where : Mask where a condition is met.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.arange(5, dtype=np.float)
>>> a[2] = np.NaN
>>> a[3] = np.PINF
>>> a
array([ 0., 1., NaN, Inf, 4.])
>>> ma.masked_invalid(a)
masked_array(data = [0.0 1.0 -- -- 4.0],
mask = [False False True True False],
fill_value=1e+20)
"""
a = np.array(a, copy=copy, subok=True)
mask = getattr(a, '_mask', None)
if mask is not None:
condition = ~(np.isfinite(getdata(a)))
if mask is not nomask:
condition |= mask
cls = type(a)
else:
condition = ~(np.isfinite(a))
cls = MaskedArray
result = a.view(cls)
result._mask = condition
return result
#####--------------------------------------------------------------------------
#---- --- Printing options ---
#####--------------------------------------------------------------------------
class _MaskedPrintOption:
"""
Handle the string used to represent missing data in a masked array.
"""
def __init__ (self, display):
"Create the masked_print_option object."
self._display = display
self._enabled = True
def display(self):
"Display the string to print for masked values."
return self._display
def set_display (self, s):
"Set the string to print for masked values."
self._display = s
def enabled(self):
"Is the use of the display value enabled?"
return self._enabled
def enable(self, shrink=1):
"Set the enabling shrink to `shrink`."
self._enabled = shrink
def __str__ (self):
return str(self._display)
__repr__ = __str__
#if you single index into a masked location you get this object.
masked_print_option = _MaskedPrintOption('--')
def _recursive_printoption(result, mask, printopt):
"""
Puts printoptions in result where mask is True.
Private function allowing for recursion
"""
names = result.dtype.names
for name in names:
(curdata, curmask) = (result[name], mask[name])
if curdata.dtype.names:
_recursive_printoption(curdata, curmask, printopt)
else:
np.putmask(curdata, curmask, printopt)
return
_print_templates = dict(long_std="""\
masked_%(name)s(data =
%(data)s,
%(nlen)s mask =
%(mask)s,
%(nlen)s fill_value = %(fill)s)
""",
short_std="""\
masked_%(name)s(data = %(data)s,
%(nlen)s mask = %(mask)s,
%(nlen)s fill_value = %(fill)s)
""",
long_flx="""\
masked_%(name)s(data =
%(data)s,
%(nlen)s mask =
%(mask)s,
%(nlen)s fill_value = %(fill)s,
%(nlen)s dtype = %(dtype)s)
""",
short_flx="""\
masked_%(name)s(data = %(data)s,
%(nlen)s mask = %(mask)s,
%(nlen)s fill_value = %(fill)s,
%(nlen)s dtype = %(dtype)s)
""")
#####--------------------------------------------------------------------------
#---- --- MaskedArray class ---
#####--------------------------------------------------------------------------
def _recursive_filled(a, mask, fill_value):
"""
Recursively fill `a` with `fill_value`.
Private function
"""
names = a.dtype.names
for name in names:
current = a[name]
if current.dtype.names:
_recursive_filled(current, mask[name], fill_value[name])
else:
np.putmask(current, mask[name], fill_value[name])
def flatten_structured_array(a):
"""
Flatten a structured array.
The data type of the output is chosen such that it can represent all of the
(nested) fields.
Parameters
----------
a : structured array
Returns
-------
output : masked array or ndarray
A flattened masked array if the input is a masked array, otherwise a
standard ndarray.
Examples
--------
>>> ndtype = [('a', int), ('b', float)]
>>> a = np.array([(1, 1), (2, 2)], dtype=ndtype)
>>> flatten_structured_array(a)
array([[1., 1.],
[2., 2.]])
"""
#
def flatten_sequence(iterable):
"""Flattens a compound of nested iterables."""
for elm in iter(iterable):
if hasattr(elm, '__iter__'):
for f in flatten_sequence(elm):
yield f
else:
yield elm
#
a = np.asanyarray(a)
inishape = a.shape
a = a.ravel()
if isinstance(a, MaskedArray):
out = np.array([tuple(flatten_sequence(d.item())) for d in a._data])
out = out.view(MaskedArray)
out._mask = np.array([tuple(flatten_sequence(d.item()))
for d in getmaskarray(a)])
else:
out = np.array([tuple(flatten_sequence(d.item())) for d in a])
if len(inishape) > 1:
newshape = list(out.shape)
newshape[0] = inishape
out.shape = tuple(flatten_sequence(newshape))
return out
class _arraymethod(object):
"""
Define a wrapper for basic array methods.
Upon call, returns a masked array, where the new ``_data`` array is
the output of the corresponding method called on the original
``_data``.
If `onmask` is True, the new mask is the output of the method called
on the initial mask. Otherwise, the new mask is just a reference
to the initial mask.
Attributes
----------
_onmask : bool
Holds the `onmask` parameter.
obj : object
The object calling `_arraymethod`.
Parameters
----------
funcname : str
Name of the function to apply on data.
onmask : bool
Whether the mask must be processed also (True) or left
alone (False). Default is True. Make available as `_onmask`
attribute.
"""
def __init__(self, funcname, onmask=True):
self.__name__ = funcname
self._onmask = onmask
self.obj = None
self.__doc__ = self.getdoc()
#
def getdoc(self):
"Return the doc of the function (from the doc of the method)."
methdoc = getattr(ndarray, self.__name__, None) or \
getattr(np, self.__name__, None)
if methdoc is not None:
return methdoc.__doc__
#
def __get__(self, obj, objtype=None):
self.obj = obj
return self
#
def __call__(self, *args, **params):
methodname = self.__name__
instance = self.obj
# Fallback : if the instance has not been initialized, use the first arg
if instance is None:
args = list(args)
instance = args.pop(0)
data = instance._data
mask = instance._mask
cls = type(instance)
result = getattr(data, methodname)(*args, **params).view(cls)
result._update_from(instance)
if result.ndim:
if not self._onmask:
result.__setmask__(mask)
elif mask is not nomask:
result.__setmask__(getattr(mask, methodname)(*args, **params))
else:
if mask.ndim and (not mask.dtype.names and mask.all()):
return masked
return result
class MaskedIterator(object):
"""
Flat iterator object to iterate over masked arrays.
A `MaskedIterator` iterator is returned by ``x.flat`` for any masked array
`x`. It allows iterating over the array as if it were a 1-D array,
either in a for-loop or by calling its `next` method.
Iteration is done in C-contiguous style, with the last index varying the
fastest. The iterator can also be indexed using basic slicing or
advanced indexing.
See Also
--------
MaskedArray.flat : Return a flat iterator over an array.
MaskedArray.flatten : Returns a flattened copy of an array.
Notes
-----
`MaskedIterator` is not exported by the `ma` module. Instead of
instantiating a `MaskedIterator` directly, use `MaskedArray.flat`.
Examples
--------
>>> x = np.ma.array(arange(6).reshape(2, 3))
>>> fl = x.flat
>>> type(fl)
<class 'numpy.ma.core.MaskedIterator'>
>>> for item in fl:
... print item
...
0
1
2
3
4
5
Extracting more than a single element b indexing the `MaskedIterator`
returns a masked array:
>>> fl[2:4]
masked_array(data = [2 3],
mask = False,
fill_value = 999999)
"""
def __init__(self, ma):
self.ma = ma
self.dataiter = ma._data.flat
#
if ma._mask is nomask:
self.maskiter = None
else:
self.maskiter = ma._mask.flat
def __iter__(self):
return self
def __getitem__(self, indx):
result = self.dataiter.__getitem__(indx).view(type(self.ma))
if self.maskiter is not None:
_mask = self.maskiter.__getitem__(indx)
_mask.shape = result.shape
result._mask = _mask
return result
### This won't work is ravel makes a copy
def __setitem__(self, index, value):
self.dataiter[index] = getdata(value)
if self.maskiter is not None:
self.maskiter[index] = getmaskarray(value)
def next(self):
"""
Return the next value, or raise StopIteration.
Examples
--------
>>> x = np.ma.array([3, 2], mask=[0, 1])
>>> fl = x.flat
>>> fl.next()
3
>>> fl.next()
masked_array(data = --,
mask = True,
fill_value = 1e+20)
>>> fl.next()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/ralf/python/numpy/numpy/ma/core.py", line 2243, in next
d = self.dataiter.next()
StopIteration
"""
d = self.dataiter.next()
if self.maskiter is not None and self.maskiter.next():
d = masked
return d
class MaskedArray(ndarray):
"""
An array class with possibly masked values.
Masked values of True exclude the corresponding element from any
computation.
Construction::
x = MaskedArray(data, mask=nomask, dtype=None,
copy=False, subok=True, ndmin=0, fill_value=None,
keep_mask=True, hard_mask=None, shrink=True)
Parameters
----------
data : array_like
Input data.
mask : sequence, optional
Mask. Must be convertible to an array of booleans with the same
shape as `data`. True indicates a masked (i.e. invalid) data.
dtype : dtype, optional
Data type of the output.
If `dtype` is None, the type of the data argument (``data.dtype``)
is used. If `dtype` is not None and different from ``data.dtype``,
a copy is performed.
copy : bool, optional
Whether to copy the input data (True), or to use a reference instead.
Default is False.
subok : bool, optional
Whether to return a subclass of `MaskedArray` if possible (True) or a
plain `MaskedArray`. Default is True.
ndmin : int, optional
Minimum number of dimensions. Default is 0.
fill_value : scalar, optional
Value used to fill in the masked values when necessary.
If None, a default based on the data-type is used.
keep_mask : bool, optional
Whether to combine `mask` with the mask of the input data, if any
(True), or to use only `mask` for the output (False). Default is True.
hard_mask : bool, optional
Whether to use a hard mask or not. With a hard mask, masked values
cannot be unmasked. Default is False.
shrink : bool, optional
Whether to force compression of an empty mask. Default is True.
"""
__array_priority__ = 15
_defaultmask = nomask
_defaulthardmask = False
_baseclass = ndarray
def __new__(cls, data=None, mask=nomask, dtype=None, copy=False,
subok=True, ndmin=0, fill_value=None,
keep_mask=True, hard_mask=None, shrink=True,
**options):
"""
Create a new masked array from scratch.
Notes
-----
A masked array can also be created by taking a .view(MaskedArray).
"""
# Process data............
_data = np.array(data, dtype=dtype, copy=copy, subok=True, ndmin=ndmin)
_baseclass = getattr(data, '_baseclass', type(_data))
# Check that we're not erasing the mask..........
if isinstance(data, MaskedArray) and (data.shape != _data.shape):
copy = True
# Careful, cls might not always be MaskedArray...
if not isinstance(data, cls) or not subok:
_data = ndarray.view(_data, cls)
else:
_data = ndarray.view(_data, type(data))
# Backwards compatibility w/ numpy.core.ma .......
if hasattr(data, '_mask') and not isinstance(data, ndarray):
_data._mask = data._mask
_sharedmask = True
# Process mask ...............................
# Number of named fields (or zero if none)
names_ = _data.dtype.names or ()
# Type of the mask
if names_:
mdtype = make_mask_descr(_data.dtype)
else:
mdtype = MaskType
# Case 1. : no mask in input ............
if mask is nomask:
# Erase the current mask ?
if not keep_mask:
# With a reduced version
if shrink:
_data._mask = nomask
# With full version
else:
_data._mask = np.zeros(_data.shape, dtype=mdtype)
# Check whether we missed something
elif isinstance(data, (tuple, list)):
try:
# If data is a sequence of masked array
mask = np.array([getmaskarray(m) for m in data],
dtype=mdtype)
except ValueError:
# If data is nested
mask = nomask
# Force shrinking of the mask if needed (and possible)
if (mdtype == MaskType) and mask.any():
_data._mask = mask
_data._sharedmask = False
else:
if copy:
_data._mask = _data._mask.copy()
_data._sharedmask = False
# Reset the shape of the original mask
if getmask(data) is not nomask:
data._mask.shape = data.shape
else:
_data._sharedmask = True
# Case 2. : With a mask in input ........
else:
# Read the mask with the current mdtype
try:
mask = np.array(mask, copy=copy, dtype=mdtype)
# Or assume it's a sequence of bool/int
except TypeError:
mask = np.array([tuple([m] * len(mdtype)) for m in mask],
dtype=mdtype)
# Make sure the mask and the data have the same shape
if mask.shape != _data.shape:
(nd, nm) = (_data.size, mask.size)
if nm == 1:
mask = np.resize(mask, _data.shape)
elif nm == nd:
mask = np.reshape(mask, _data.shape)
else:
msg = "Mask and data not compatible: data size is %i, " + \
"mask size is %i."
raise MaskError, msg % (nd, nm)
copy = True
# Set the mask to the new value
if _data._mask is nomask:
_data._mask = mask
_data._sharedmask = not copy
else:
if not keep_mask:
_data._mask = mask
_data._sharedmask = not copy
else:
if names_:
def _recursive_or(a, b):
"do a|=b on each field of a, recursively"
for name in a.dtype.names:
(af, bf) = (a[name], b[name])
if af.dtype.names:
_recursive_or(af, bf)
else:
af |= bf
return
_recursive_or(_data._mask, mask)
else:
_data._mask = np.logical_or(mask, _data._mask)
_data._sharedmask = False
# Update fill_value.......
if fill_value is None:
fill_value = getattr(data, '_fill_value', None)
# But don't run the check unless we have something to check....
if fill_value is not None:
_data._fill_value = _check_fill_value(fill_value, _data.dtype)
# Process extra options ..
if hard_mask is None:
_data._hardmask = getattr(data, '_hardmask', False)
else:
_data._hardmask = hard_mask
_data._baseclass = _baseclass
return _data
#
def _update_from(self, obj):
"""Copies some attributes of obj to self.
"""
if obj is not None and isinstance(obj, ndarray):
_baseclass = type(obj)
else:
_baseclass = ndarray
# We need to copy the _basedict to avoid backward propagation
_optinfo = {}
_optinfo.update(getattr(obj, '_optinfo', {}))
_optinfo.update(getattr(obj, '_basedict', {}))
if not isinstance(obj, MaskedArray):
_optinfo.update(getattr(obj, '__dict__', {}))
_dict = dict(_fill_value=getattr(obj, '_fill_value', None),
_hardmask=getattr(obj, '_hardmask', False),
_sharedmask=getattr(obj, '_sharedmask', False),
_isfield=getattr(obj, '_isfield', False),
_baseclass=getattr(obj, '_baseclass', _baseclass),
_optinfo=_optinfo,
_basedict=_optinfo)
self.__dict__.update(_dict)
self.__dict__.update(_optinfo)
return
def __array_finalize__(self, obj):
"""Finalizes the masked array.
"""
# Get main attributes .........
self._update_from(obj)
if isinstance(obj, ndarray):
odtype = obj.dtype
if odtype.names:
_mask = getattr(obj, '_mask', make_mask_none(obj.shape, odtype))
else:
_mask = getattr(obj, '_mask', nomask)
else:
_mask = nomask
self._mask = _mask
# Finalize the mask ...........
if self._mask is not nomask:
try:
self._mask.shape = self.shape
except ValueError:
self._mask = nomask
except (TypeError, AttributeError):
# When _mask.shape is not writable (because it's a void)
pass
# Finalize the fill_value for structured arrays
if self.dtype.names:
if self._fill_value is None:
self._fill_value = _check_fill_value(None, self.dtype)
return
def __array_wrap__(self, obj, context=None):
"""
Special hook for ufuncs.
Wraps the numpy array and sets the mask according to context.
"""
result = obj.view(type(self))
result._update_from(self)
#..........
if context is not None:
result._mask = result._mask.copy()
(func, args, _) = context
m = reduce(mask_or, [getmaskarray(arg) for arg in args])
# Get the domain mask................
domain = ufunc_domain.get(func, None)
if domain is not None:
# Take the domain, and make sure it's a ndarray
if len(args) > 2:
d = filled(reduce(domain, args), True)
else:
d = filled(domain(*args), True)
# Fill the result where the domain is wrong
try:
# Binary domain: take the last value
fill_value = ufunc_fills[func][-1]
except TypeError:
# Unary domain: just use this one
fill_value = ufunc_fills[func]
except KeyError:
# Domain not recognized, use fill_value instead
fill_value = self.fill_value
result = result.copy()
np.putmask(result, d, fill_value)
# Update the mask
if m is nomask:
if d is not nomask:
m = d
else:
# Don't modify inplace, we risk back-propagation
m = (m | d)
# Make sure the mask has the proper size
if result.shape == () and m:
return masked
else:
result._mask = m
result._sharedmask = False
#....
return result
def view(self, dtype=None, type=None):
if dtype is None:
if type is None:
output = ndarray.view(self)
else:
output = ndarray.view(self, type)
elif type is None:
try:
if issubclass(dtype, ndarray):
output = ndarray.view(self, dtype)
dtype = None
else:
output = ndarray.view(self, dtype)
except TypeError:
output = ndarray.view(self, dtype)
else:
output = ndarray.view(self, dtype, type)
# Should we update the mask ?
if (getattr(output, '_mask', nomask) is not nomask):
if dtype is None:
dtype = output.dtype
mdtype = make_mask_descr(dtype)
output._mask = self._mask.view(mdtype, ndarray)
# Try to reset the shape of the mask (if we don't have a void)
try:
output._mask.shape = output.shape
except (AttributeError, TypeError):
pass
# Make sure to reset the _fill_value if needed
if getattr(output, '_fill_value', None) is not None:
output._fill_value = None
return output
view.__doc__ = ndarray.view.__doc__
def astype(self, newtype):
"""
Returns a copy of the MaskedArray cast to given newtype.
Returns
-------
output : MaskedArray
A copy of self cast to input newtype.
The returned record shape matches self.shape.
Examples
--------
>>> x = np.ma.array([[1,2,3.1],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4)
>>> print x
[[1.0 -- 3.1]
[-- 5.0 --]
[7.0 -- 9.0]]
>>> print x.astype(int32)
[[1 -- 3]
[-- 5 --]
[7 -- 9]]
"""
newtype = np.dtype(newtype)
output = self._data.astype(newtype).view(type(self))
output._update_from(self)
names = output.dtype.names
if names is None:
output._mask = self._mask.astype(bool)
else:
if self._mask is nomask:
output._mask = nomask
else:
output._mask = self._mask.astype([(n, bool) for n in names])
# Don't check _fill_value if it's None, that'll speed things up
if self._fill_value is not None:
output._fill_value = _check_fill_value(self._fill_value, newtype)
return output
def __getitem__(self, indx):
"""x.__getitem__(y) <==> x[y]
Return the item described by i, as a masked array.
"""
# This test is useful, but we should keep things light...
# if getmask(indx) is not nomask:
# msg = "Masked arrays must be filled before they can be used as indices!"
# raise IndexError, msg
_data = ndarray.view(self, ndarray)
dout = ndarray.__getitem__(_data, indx)
# We could directly use ndarray.__getitem__ on self...
# But then we would have to modify __array_finalize__ to prevent the
# mask of being reshaped if it hasn't been set up properly yet...
# So it's easier to stick to the current version
_mask = self._mask
if not getattr(dout, 'ndim', False):
# A record ................
if isinstance(dout, np.void):
mask = _mask[indx]
# If we can make mvoid a subclass of np.void, that'd be what we'd need
# return mvoid(dout, mask=mask)
if flatten_mask(mask).any():
dout = mvoid(dout, mask=mask)
else:
return dout
# Just a scalar............
elif _mask is not nomask and _mask[indx]:
return masked
else:
# Force dout to MA ........
dout = dout.view(type(self))
# Inherit attributes from self
dout._update_from(self)
# Check the fill_value ....
if isinstance(indx, basestring):
if self._fill_value is not None:
dout._fill_value = self._fill_value[indx]
dout._isfield = True
# Update the mask if needed
if _mask is not nomask:
dout._mask = _mask[indx]
dout._sharedmask = True
# Note: Don't try to check for m.any(), that'll take too long...
return dout
def __setitem__(self, indx, value):
"""x.__setitem__(i, y) <==> x[i]=y
Set item described by index. If value is masked, masks those
locations.
"""
if self is masked:
raise MaskError, 'Cannot alter the masked element.'
# This test is useful, but we should keep things light...
# if getmask(indx) is not nomask:
# msg = "Masked arrays must be filled before they can be used as indices!"
# raise IndexError, msg
_data = ndarray.view(self, ndarray.__getattribute__(self, '_baseclass'))
_mask = ndarray.__getattribute__(self, '_mask')
if isinstance(indx, basestring):
ndarray.__setitem__(_data, indx, value)
if _mask is nomask:
self._mask = _mask = make_mask_none(self.shape, self.dtype)
_mask[indx] = getmask(value)
return
#........................................
_dtype = ndarray.__getattribute__(_data, 'dtype')
nbfields = len(_dtype.names or ())
#........................................
if value is masked:
# The mask wasn't set: create a full version...
if _mask is nomask:
_mask = self._mask = make_mask_none(self.shape, _dtype)
# Now, set the mask to its value.
if nbfields:
_mask[indx] = tuple([True] * nbfields)
else:
_mask[indx] = True
if not self._isfield:
self._sharedmask = False
return
#........................................
# Get the _data part of the new value
dval = value
# Get the _mask part of the new value
mval = getattr(value, '_mask', nomask)
if nbfields and mval is nomask:
mval = tuple([False] * nbfields)
if _mask is nomask:
# Set the data, then the mask
ndarray.__setitem__(_data, indx, dval)
if mval is not nomask:
_mask = self._mask = make_mask_none(self.shape, _dtype)
ndarray.__setitem__(_mask, indx, mval)
elif not self._hardmask:
# Unshare the mask if necessary to avoid propagation
if not self._isfield:
self.unshare_mask()
_mask = ndarray.__getattribute__(self, '_mask')
# Set the data, then the mask
ndarray.__setitem__(_data, indx, dval)
ndarray.__setitem__(_mask, indx, mval)
elif hasattr(indx, 'dtype') and (indx.dtype == MaskType):
indx = indx * umath.logical_not(_mask)
ndarray.__setitem__(_data, indx, dval)
else:
if nbfields:
err_msg = "Flexible 'hard' masks are not yet supported..."
raise NotImplementedError(err_msg)
mindx = mask_or(_mask[indx], mval, copy=True)
dindx = self._data[indx]
if dindx.size > 1:
dindx[~mindx] = dval
elif mindx is nomask:
dindx = dval
ndarray.__setitem__(_data, indx, dindx)
_mask[indx] = mindx
return
def __getslice__(self, i, j):
"""x.__getslice__(i, j) <==> x[i:j]
Return the slice described by (i, j). The use of negative
indices is not supported.
"""
return self.__getitem__(slice(i, j))
def __setslice__(self, i, j, value):
"""x.__setslice__(i, j, value) <==> x[i:j]=value
Set the slice (i,j) of a to value. If value is masked, mask
those locations.
"""
self.__setitem__(slice(i, j), value)
def __setmask__(self, mask, copy=False):
"""Set the mask.
"""
idtype = ndarray.__getattribute__(self, 'dtype')
current_mask = ndarray.__getattribute__(self, '_mask')
if mask is masked:
mask = True
# Make sure the mask is set
if (current_mask is nomask):
# Just don't do anything is there's nothing to do...
if mask is nomask:
return
current_mask = self._mask = make_mask_none(self.shape, idtype)
# No named fields.........
if idtype.names is None:
# Hardmask: don't unmask the data
if self._hardmask:
current_mask |= mask
# Softmask: set everything to False
else:
current_mask.flat = mask
# Named fields w/ ............
else:
mdtype = current_mask.dtype
mask = np.array(mask, copy=False)
# Mask is a singleton
if not mask.ndim:
# It's a boolean : make a record
if mask.dtype.kind == 'b':
mask = np.array(tuple([mask.item()]*len(mdtype)),
dtype=mdtype)
# It's a record: make sure the dtype is correct
else:
mask = mask.astype(mdtype)
# Mask is a sequence
else:
# Make sure the new mask is a ndarray with the proper dtype
try:
mask = np.array(mask, copy=copy, dtype=mdtype)
# Or assume it's a sequence of bool/int
except TypeError:
mask = np.array([tuple([m] * len(mdtype)) for m in mask],
dtype=mdtype)
# Hardmask: don't unmask the data
if self._hardmask:
for n in idtype.names:
current_mask[n] |= mask[n]
# Softmask: set everything to False
else:
current_mask.flat = mask
# Reshape if needed
if current_mask.shape:
current_mask.shape = self.shape
return
_set_mask = __setmask__
#....
def _get_mask(self):
"""Return the current mask.
"""
# We could try to force a reshape, but that wouldn't work in some cases.
# return self._mask.reshape(self.shape)
return self._mask
mask = property(fget=_get_mask, fset=__setmask__, doc="Mask")
def _get_recordmask(self):
"""
Return the mask of the records.
A record is masked when all the fields are masked.
"""
_mask = ndarray.__getattribute__(self, '_mask').view(ndarray)
if _mask.dtype.names is None:
return _mask
return np.all(flatten_structured_array(_mask), axis= -1)
def _set_recordmask(self):
"""Return the mask of the records.
A record is masked when all the fields are masked.
"""
raise NotImplementedError("Coming soon: setting the mask per records!")
recordmask = property(fget=_get_recordmask)
#............................................
def harden_mask(self):
"""
Force the mask to hard.
Whether the mask of a masked array is hard or soft is determined by
its `hardmask` property. `harden_mask` sets `hardmask` to True.
See Also
--------
hardmask
"""
self._hardmask = True
return self
def soften_mask(self):
"""
Force the mask to soft.
Whether the mask of a masked array is hard or soft is determined by
its `hardmask` property. `soften_mask` sets `hardmask` to False.
See Also
--------
hardmask
"""
self._hardmask = False
return self
hardmask = property(fget=lambda self: self._hardmask,
doc="Hardness of the mask")
def unshare_mask(self):
"""
Copy the mask and set the sharedmask flag to False.
Whether the mask is shared between masked arrays can be seen from
the `sharedmask` property. `unshare_mask` ensures the mask is not shared.
A copy of the mask is only made if it was shared.
See Also
--------
sharedmask
"""
if self._sharedmask:
self._mask = self._mask.copy()
self._sharedmask = False
return self
sharedmask = property(fget=lambda self: self._sharedmask,
doc="Share status of the mask (read-only).")
def shrink_mask(self):
"""
Reduce a mask to nomask when possible.
Parameters
----------
None
Returns
-------
None
Examples
--------
>>> x = np.ma.array([[1,2 ], [3, 4]], mask=[0]*4)
>>> x.mask
array([[False, False],
[False, False]], dtype=bool)
>>> x.shrink_mask()
>>> x.mask
False
"""
m = self._mask
if m.ndim and not m.any():
self._mask = nomask
return self
#............................................
baseclass = property(fget=lambda self:self._baseclass,
doc="Class of the underlying data (read-only).")
def _get_data(self):
"""Return the current data, as a view of the original
underlying data.
"""
return ndarray.view(self, self._baseclass)
_data = property(fget=_get_data)
data = property(fget=_get_data)
def _get_flat(self):
"Return a flat iterator."
return MaskedIterator(self)
#
def _set_flat (self, value):
"Set a flattened version of self to value."
y = self.ravel()
y[:] = value
#
flat = property(fget=_get_flat, fset=_set_flat,
doc="Flat version of the array.")
def get_fill_value(self):
"""
Return the filling value of the masked array.
Returns
-------
fill_value : scalar
The filling value.
Examples
--------
>>> for dt in [np.int32, np.int64, np.float64, np.complex128]:
... np.ma.array([0, 1], dtype=dt).get_fill_value()
...
999999
999999
1e+20
(1e+20+0j)
>>> x = np.ma.array([0, 1.], fill_value=-np.inf)
>>> x.get_fill_value()
-inf
"""
if self._fill_value is None:
self._fill_value = _check_fill_value(None, self.dtype)
return self._fill_value[()]
def set_fill_value(self, value=None):
"""
Set the filling value of the masked array.
Parameters
----------
value : scalar, optional
The new filling value. Default is None, in which case a default
based on the data type is used.
See Also
--------
ma.set_fill_value : Equivalent function.
Examples
--------
>>> x = np.ma.array([0, 1.], fill_value=-np.inf)
>>> x.fill_value
-inf
>>> x.set_fill_value(np.pi)
>>> x.fill_value
3.1415926535897931
Reset to default:
>>> x.set_fill_value()
>>> x.fill_value
1e+20
"""
target = _check_fill_value(value, self.dtype)
_fill_value = self._fill_value
if _fill_value is None:
# Create the attribute if it was undefined
self._fill_value = target
else:
# Don't overwrite the attribute, just fill it (for propagation)
_fill_value[()] = target
fill_value = property(fget=get_fill_value, fset=set_fill_value,
doc="Filling value.")
def filled(self, fill_value=None):
"""
Return a copy of self, with masked values filled with a given value.
Parameters
----------
fill_value : scalar, optional
The value to use for invalid entries (None by default).
If None, the `fill_value` attribute of the array is used instead.
Returns
-------
filled_array : ndarray
A copy of ``self`` with invalid entries replaced by *fill_value*
(be it the function argument or the attribute of ``self``.
Notes
-----
The result is **not** a MaskedArray!
Examples
--------
>>> x = np.ma.array([1,2,3,4,5], mask=[0,0,1,0,1], fill_value=-999)
>>> x.filled()
array([1, 2, -999, 4, -999])
>>> type(x.filled())
<type 'numpy.ndarray'>
Subclassing is preserved. This means that if the data part of the masked
array is a matrix, `filled` returns a matrix:
>>> x = np.ma.array(np.matrix([[1, 2], [3, 4]]), mask=[[0, 1], [1, 0]])
>>> x.filled()
matrix([[ 1, 999999],
[999999, 4]])
"""
m = self._mask
if m is nomask:
return self._data
#
if fill_value is None:
fill_value = self.fill_value
else:
fill_value = _check_fill_value(fill_value, self.dtype)
#
if self is masked_singleton:
return np.asanyarray(fill_value)
#
if m.dtype.names:
result = self._data.copy()
_recursive_filled(result, self._mask, fill_value)
elif not m.any():
return self._data
else:
result = self._data.copy()
try:
np.putmask(result, m, fill_value)
except (TypeError, AttributeError):
fill_value = narray(fill_value, dtype=object)
d = result.astype(object)
result = np.choose(m, (d, fill_value))
except IndexError:
#ok, if scalar
if self._data.shape:
raise
elif m:
result = np.array(fill_value, dtype=self.dtype)
else:
result = self._data
return result
def compressed(self):
"""
Return all the non-masked data as a 1-D array.
Returns
-------
data : ndarray
A new `ndarray` holding the non-masked data is returned.
Notes
-----
The result is **not** a MaskedArray!
Examples
--------
>>> x = np.ma.array(np.arange(5), mask=[0]*2 + [1]*3)
>>> x.compressed()
array([0, 1])
>>> type(x.compressed())
<type 'numpy.ndarray'>
"""
data = ndarray.ravel(self._data)
if self._mask is not nomask:
data = data.compress(np.logical_not(ndarray.ravel(self._mask)))
return data
def compress(self, condition, axis=None, out=None):
"""
Return `a` where condition is ``True``.
If condition is a `MaskedArray`, missing values are considered
as ``False``.
Parameters
----------
condition : var
Boolean 1-d array selecting which entries to return. If len(condition)
is less than the size of a along the axis, then output is truncated
to length of condition array.
axis : {None, int}, optional
Axis along which the operation must be performed.
out : {None, ndarray}, optional
Alternative output array in which to place the result. It must have
the same shape as the expected output but the type will be cast if
necessary.
Returns
-------
result : MaskedArray
A :class:`MaskedArray` object.
Notes
-----
Please note the difference with :meth:`compressed` !
The output of :meth:`compress` has a mask, the output of
:meth:`compressed` does not.
Examples
--------
>>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4)
>>> print x
[[1 -- 3]
[-- 5 --]
[7 -- 9]]
>>> x.compress([1, 0, 1])
masked_array(data = [1 3],
mask = [False False],
fill_value=999999)
>>> x.compress([1, 0, 1], axis=1)
masked_array(data =
[[1 3]
[-- --]
[7 9]],
mask =
[[False False]
[ True True]
[False False]],
fill_value=999999)
"""
# Get the basic components
(_data, _mask) = (self._data, self._mask)
# Force the condition to a regular ndarray (forget the missing values...)
condition = np.array(condition, copy=False, subok=False)
#
_new = _data.compress(condition, axis=axis, out=out).view(type(self))
_new._update_from(self)
if _mask is not nomask:
_new._mask = _mask.compress(condition, axis=axis)
return _new
#............................................
def __str__(self):
"""String representation.
"""
if masked_print_option.enabled():
f = masked_print_option
if self is masked:
return str(f)
m = self._mask
if m is nomask:
res = self._data
else:
if m.shape == ():
if m.dtype.names:
m = m.view((bool, len(m.dtype)))
if m.any():
r = np.array(self._data.tolist(), dtype=object)
np.putmask(r, m, f)
return str(tuple(r))
else:
return str(self._data)
elif m:
return str(f)
else:
return str(self._data)
# convert to object array to make filled work
names = self.dtype.names
if names is None:
res = self._data.astype("|O8")
res[m] = f
else:
rdtype = _recursive_make_descr(self.dtype, "|O8")
res = self._data.astype(rdtype)
_recursive_printoption(res, m, f)
else:
res = self.filled(self.fill_value)
return str(res)
def __repr__(self):
"""Literal string representation.
"""
n = len(self.shape)
name = repr(self._data).split('(')[0]
parameters = dict(name=name, nlen=" " * len(name),
data=str(self), mask=str(self._mask),
fill=str(self.fill_value), dtype=str(self.dtype))
if self.dtype.names:
if n <= 1:
return _print_templates['short_flx'] % parameters
return _print_templates['long_flx'] % parameters
elif n <= 1:
return _print_templates['short_std'] % parameters
return _print_templates['long_std'] % parameters
def __eq__(self, other):
"Check whether other equals self elementwise"
if self is masked:
return masked
omask = getattr(other, '_mask', nomask)
if omask is nomask:
check = ndarray.__eq__(self.filled(0), other)
try:
check = check.view(type(self))
check._mask = self._mask
except AttributeError:
# Dang, we have a bool instead of an array: return the bool
return check
else:
odata = filled(other, 0)
check = ndarray.__eq__(self.filled(0), odata).view(type(self))
if self._mask is nomask:
check._mask = omask
else:
mask = mask_or(self._mask, omask)
if mask.dtype.names:
if mask.size > 1:
axis = 1
else:
axis = None
try:
mask = mask.view((bool_, len(self.dtype))).all(axis)
except ValueError:
mask = np.all([[f[n].all() for n in mask.dtype.names]
for f in mask], axis=axis)
check._mask = mask
return check
#
def __ne__(self, other):
"Check whether other doesn't equal self elementwise"
if self is masked:
return masked
omask = getattr(other, '_mask', nomask)
if omask is nomask:
check = ndarray.__ne__(self.filled(0), other)
try:
check = check.view(type(self))
check._mask = self._mask
except AttributeError:
# In case check is a boolean (or a numpy.bool)
return check
else:
odata = filled(other, 0)
check = ndarray.__ne__(self.filled(0), odata).view(type(self))
if self._mask is nomask:
check._mask = omask
else:
mask = mask_or(self._mask, omask)
if mask.dtype.names:
if mask.size > 1:
axis = 1
else:
axis = None
try:
mask = mask.view((bool_, len(self.dtype))).all(axis)
except ValueError:
mask = np.all([[f[n].all() for n in mask.dtype.names]
for f in mask], axis=axis)
check._mask = mask
return check
#
def __add__(self, other):
"Add other to self, and return a new masked array."
return add(self, other)
#
def __radd__(self, other):
"Add other to self, and return a new masked array."
return add(self, other)
#
def __sub__(self, other):
"Subtract other to self, and return a new masked array."
return subtract(self, other)
#
def __rsub__(self, other):
"Subtract other to self, and return a new masked array."
return subtract(other, self)
#
def __mul__(self, other):
"Multiply other by self, and return a new masked array."
return multiply(self, other)
#
def __rmul__(self, other):
"Multiply other by self, and return a new masked array."
return multiply(self, other)
#
def __div__(self, other):
"Divide other into self, and return a new masked array."
return divide(self, other)
#
def __truediv__(self, other):
"Divide other into self, and return a new masked array."
return true_divide(self, other)
#
def __rtruediv__(self, other):
"Divide other into self, and return a new masked array."
return true_divide(other, self)
#
def __floordiv__(self, other):
"Divide other into self, and return a new masked array."
return floor_divide(self, other)
#
def __rfloordiv__(self, other):
"Divide other into self, and return a new masked array."
return floor_divide(other, self)
#
def __pow__(self, other):
"Raise self to the power other, masking the potential NaNs/Infs"
return power(self, other)
#
def __rpow__(self, other):
"Raise self to the power other, masking the potential NaNs/Infs"
return power(other, self)
#............................................
def __iadd__(self, other):
"Add other to self in-place."
m = getmask(other)
if self._mask is nomask:
if m is not nomask and m.any():
self._mask = make_mask_none(self.shape, self.dtype)
self._mask += m
else:
if m is not nomask:
self._mask += m
ndarray.__iadd__(self._data, np.where(self._mask, 0, getdata(other)))
return self
#....
def __isub__(self, other):
"Subtract other from self in-place."
m = getmask(other)
if self._mask is nomask:
if m is not nomask and m.any():
self._mask = make_mask_none(self.shape, self.dtype)
self._mask += m
elif m is not nomask:
self._mask += m
ndarray.__isub__(self._data, np.where(self._mask, 0, getdata(other)))
return self
#....
def __imul__(self, other):
"Multiply self by other in-place."
m = getmask(other)
if self._mask is nomask:
if m is not nomask and m.any():
self._mask = make_mask_none(self.shape, self.dtype)
self._mask += m
elif m is not nomask:
self._mask += m
ndarray.__imul__(self._data, np.where(self._mask, 1, getdata(other)))
return self
#....
def __idiv__(self, other):
"Divide self by other in-place."
other_data = getdata(other)
dom_mask = _DomainSafeDivide().__call__(self._data, other_data)
other_mask = getmask(other)
new_mask = mask_or(other_mask, dom_mask)
# The following 3 lines control the domain filling
if dom_mask.any():
(_, fval) = ufunc_fills[np.divide]
other_data = np.where(dom_mask, fval, other_data)
# self._mask = mask_or(self._mask, new_mask)
self._mask |= new_mask
ndarray.__idiv__(self._data, np.where(self._mask, 1, other_data))
return self
#....
def __ifloordiv__(self, other):
"Floor divide self by other in-place."
other_data = getdata(other)
dom_mask = _DomainSafeDivide().__call__(self._data, other_data)
other_mask = getmask(other)
new_mask = mask_or(other_mask, dom_mask)
# The following 3 lines control the domain filling
if dom_mask.any():
(_, fval) = ufunc_fills[np.floor_divide]
other_data = np.where(dom_mask, fval, other_data)
# self._mask = mask_or(self._mask, new_mask)
self._mask |= new_mask
ndarray.__ifloordiv__(self._data, np.where(self._mask, 1, other_data))
return self
#....
def __itruediv__(self, other):
"True divide self by other in-place."
other_data = getdata(other)
dom_mask = _DomainSafeDivide().__call__(self._data, other_data)
other_mask = getmask(other)
new_mask = mask_or(other_mask, dom_mask)
# The following 3 lines control the domain filling
if dom_mask.any():
(_, fval) = ufunc_fills[np.true_divide]
other_data = np.where(dom_mask, fval, other_data)
# self._mask = mask_or(self._mask, new_mask)
self._mask |= new_mask
ndarray.__itruediv__(self._data, np.where(self._mask, 1, other_data))
return self
#...
def __ipow__(self, other):
"Raise self to the power other, in place."
other_data = getdata(other)
other_mask = getmask(other)
err_status = np.geterr()
try:
np.seterr(divide='ignore', invalid='ignore')
ndarray.__ipow__(self._data, np.where(self._mask, 1, other_data))
finally:
np.seterr(**err_status)
invalid = np.logical_not(np.isfinite(self._data))
if invalid.any():
if self._mask is not nomask:
self._mask |= invalid
else:
self._mask = invalid
np.putmask(self._data, invalid, self.fill_value)
new_mask = mask_or(other_mask, invalid)
self._mask = mask_or(self._mask, new_mask)
return self
#............................................
def __float__(self):
"Convert to float."
if self.size > 1:
raise TypeError("Only length-1 arrays can be converted "\
"to Python scalars")
elif self._mask:
warnings.warn("Warning: converting a masked element to nan.")
return np.nan
return float(self.item())
def __int__(self):
"Convert to int."
if self.size > 1:
raise TypeError("Only length-1 arrays can be converted "\
"to Python scalars")
elif self._mask:
raise MaskError, 'Cannot convert masked element to a Python int.'
return int(self.item())
def get_imag(self):
"""
Return the imaginary part of the masked array.
The returned array is a view on the imaginary part of the `MaskedArray`
whose `get_imag` method is called.
Parameters
----------
None
Returns
-------
result : MaskedArray
The imaginary part of the masked array.
See Also
--------
get_real, real, imag
Examples
--------
>>> x = np.ma.array([1+1.j, -2j, 3.45+1.6j], mask=[False, True, False])
>>> x.get_imag()
masked_array(data = [1.0 -- 1.6],
mask = [False True False],
fill_value = 1e+20)
"""
result = self._data.imag.view(type(self))
result.__setmask__(self._mask)
return result
imag = property(fget=get_imag, doc="Imaginary part.")
def get_real(self):
"""
Return the real part of the masked array.
The returned array is a view on the real part of the `MaskedArray`
whose `get_real` method is called.
Parameters
----------
None
Returns
-------
result : MaskedArray
The real part of the masked array.
See Also
--------
get_imag, real, imag
Examples
--------
>>> x = np.ma.array([1+1.j, -2j, 3.45+1.6j], mask=[False, True, False])
>>> x.get_real()
masked_array(data = [1.0 -- 3.45],
mask = [False True False],
fill_value = 1e+20)
"""
result = self._data.real.view(type(self))
result.__setmask__(self._mask)
return result
real = property(fget=get_real, doc="Real part")
#............................................
def count(self, axis=None):
"""
Count the non-masked elements of the array along the given axis.
Parameters
----------
axis : int, optional
Axis along which to count the non-masked elements. If `axis` is
`None`, all non-masked elements are counted.
Returns
-------
result : int or ndarray
If `axis` is `None`, an integer count is returned. When `axis` is
not `None`, an array with shape determined by the lengths of the
remaining axes, is returned.
See Also
--------
count_masked : Count masked elements in array or along a given axis.
Examples
--------
>>> import numpy.ma as ma
>>> a = ma.arange(6).reshape((2, 3))
>>> a[1, :] = ma.masked
>>> a
masked_array(data =
[[0 1 2]
[-- -- --]],
mask =
[[False False False]
[ True True True]],
fill_value = 999999)
>>> a.count()
3
When the `axis` keyword is specified an array of appropriate size is
returned.
>>> a.count(axis=0)
array([1, 1, 1])
>>> a.count(axis=1)
array([3, 0])
"""
m = self._mask
s = self.shape
ls = len(s)
if m is nomask:
if ls == 0:
return 1
if ls == 1:
return s[0]
if axis is None:
return self.size
else:
n = s[axis]
t = list(s)
del t[axis]
return np.ones(t) * n
n1 = np.size(m, axis)
n2 = m.astype(int).sum(axis)
if axis is None:
return (n1 - n2)
else:
return narray(n1 - n2)
#............................................
flatten = _arraymethod('flatten')
#
def ravel(self):
"""
Returns a 1D version of self, as a view.
Returns
-------
MaskedArray
Output view is of shape ``(self.size,)`` (or
``(np.ma.product(self.shape),)``).
Examples
--------
>>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4)
>>> print x
[[1 -- 3]
[-- 5 --]
[7 -- 9]]
>>> print x.ravel()
[1 -- 3 -- 5 -- 7 -- 9]
"""
r = ndarray.ravel(self._data).view(type(self))
r._update_from(self)
if self._mask is not nomask:
r._mask = ndarray.ravel(self._mask).reshape(r.shape)
else:
r._mask = nomask
return r
#
repeat = _arraymethod('repeat')
#
def reshape (self, *s, **kwargs):
"""
Give a new shape to the array without changing its data.
Returns a masked array containing the same data, but with a new shape.
The result is a view on the original array; if this is not possible, a
ValueError is raised.
Parameters
----------
shape : int or tuple of ints
The new shape should be compatible with the original shape. If an
integer is supplied, then the result will be a 1-D array of that
length.
order : {'C', 'F'}, optional
Determines whether the array data should be viewed as in C
(row-major) or FORTRAN (column-major) order.
Returns
-------
reshaped_array : array
A new view on the array.
See Also
--------
reshape : Equivalent function in the masked array module.
numpy.ndarray.reshape : Equivalent method on ndarray object.
numpy.reshape : Equivalent function in the NumPy module.
Notes
-----
The reshaping operation cannot guarantee that a copy will not be made,
to modify the shape in place, use ``a.shape = s``
Examples
--------
>>> x = np.ma.array([[1,2],[3,4]], mask=[1,0,0,1])
>>> print x
[[-- 2]
[3 --]]
>>> x = x.reshape((4,1))
>>> print x
[[--]
[2]
[3]
[--]]
"""
kwargs.update(order=kwargs.get('order', 'C'))
result = self._data.reshape(*s, **kwargs).view(type(self))
result._update_from(self)
mask = self._mask
if mask is not nomask:
result._mask = mask.reshape(*s, **kwargs)
return result
#
def resize(self, newshape, refcheck=True, order=False):
"""
.. warning::
This method does nothing, except raise a ValueError exception. A
masked array does not own its data and therefore cannot safely be
resized in place. Use the `numpy.ma.resize` function instead.
This method is difficult to implement safely and may be deprecated in
future releases of NumPy.
"""
# Note : the 'order' keyword looks broken, let's just drop it
# try:
# ndarray.resize(self, newshape, refcheck=refcheck)
# if self.mask is not nomask:
# self._mask.resize(newshape, refcheck=refcheck)
# except ValueError:
# raise ValueError("Cannot resize an array that has been referenced "
# "or is referencing another array in this way.\n"
# "Use the numpy.ma.resize function.")
# return None
errmsg = "A masked array does not own its data "\
"and therefore cannot be resized.\n" \
"Use the numpy.ma.resize function instead."
raise ValueError(errmsg)
#
def put(self, indices, values, mode='raise'):
"""
Set storage-indexed locations to corresponding values.
Sets self._data.flat[n] = values[n] for each n in indices.
If `values` is shorter than `indices` then it will repeat.
If `values` has some masked values, the initial mask is updated
in consequence, else the corresponding values are unmasked.
Parameters
----------
indices : 1-D array_like
Target indices, interpreted as integers.
values : array_like
Values to place in self._data copy at target indices.
mode : {'raise', 'wrap', 'clip'}, optional
Specifies how out-of-bounds indices will behave.
'raise' : raise an error.
'wrap' : wrap around.
'clip' : clip to the range.
Notes
-----
`values` can be a scalar or length 1 array.
Examples
--------
>>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4)
>>> print x
[[1 -- 3]
[-- 5 --]
[7 -- 9]]
>>> x.put([0,4,8],[10,20,30])
>>> print x
[[10 -- 3]
[-- 20 --]
[7 -- 30]]
>>> x.put(4,999)
>>> print x
[[10 -- 3]
[-- 999 --]
[7 -- 30]]
"""
m = self._mask
# Hard mask: Get rid of the values/indices that fall on masked data
if self._hardmask and self._mask is not nomask:
mask = self._mask[indices]
indices = narray(indices, copy=False)
values = narray(values, copy=False, subok=True)
values.resize(indices.shape)
indices = indices[~mask]
values = values[~mask]
#....
self._data.put(indices, values, mode=mode)
#....
if m is nomask:
m = getmask(values)
else:
m = m.copy()
if getmask(values) is nomask:
m.put(indices, False, mode=mode)
else:
m.put(indices, values._mask, mode=mode)
m = make_mask(m, copy=False, shrink=True)
self._mask = m
#............................................
def ids (self):
"""
Return the addresses of the data and mask areas.
Parameters
----------
None
Examples
--------
>>> x = np.ma.array([1, 2, 3], mask=[0, 1, 1])
>>> x.ids()
(166670640, 166659832)
If the array has no mask, the address of `nomask` is returned. This address
is typically not close to the data in memory:
>>> x = np.ma.array([1, 2, 3])
>>> x.ids()
(166691080, 3083169284L)
"""
if self._mask is nomask:
return (self.ctypes.data, id(nomask))
return (self.ctypes.data, self._mask.ctypes.data)
def iscontiguous(self):
"""
Return a boolean indicating whether the data is contiguous.
Parameters
----------
None
Examples
--------
>>> x = np.ma.array([1, 2, 3])
>>> x.iscontiguous()
True
`iscontiguous` returns one of the flags of the masked array:
>>> x.flags
C_CONTIGUOUS : True
F_CONTIGUOUS : True
OWNDATA : False
WRITEABLE : True
ALIGNED : True
UPDATEIFCOPY : False
"""
return self.flags['CONTIGUOUS']
#............................................
def all(self, axis=None, out=None):
"""
Check if all of the elements of `a` are true.
Performs a :func:`logical_and` over the given axis and returns the result.
Masked values are considered as True during computation.
For convenience, the output array is masked where ALL the values along the
current axis are masked: if the output would have been a scalar and that
all the values are masked, then the output is `masked`.
Parameters
----------
axis : {None, integer}
Axis to perform the operation over.
If None, perform over flattened array.
out : {None, array}, optional
Array into which the result can be placed. Its type is preserved
and it must be of the right shape to hold the output.
See Also
--------
all : equivalent function
Examples
--------
>>> np.ma.array([1,2,3]).all()
True
>>> a = np.ma.array([1,2,3], mask=True)
>>> (a.all() is np.ma.masked)
True
"""
mask = _check_mask_axis(self._mask, axis)
if out is None:
d = self.filled(True).all(axis=axis).view(type(self))
if d.ndim:
d.__setmask__(mask)
elif mask:
return masked
return d
self.filled(True).all(axis=axis, out=out)
if isinstance(out, MaskedArray):
if out.ndim or mask:
out.__setmask__(mask)
return out
def any(self, axis=None, out=None):
"""
Check if any of the elements of `a` are true.
Performs a logical_or over the given axis and returns the result.
Masked values are considered as False during computation.
Parameters
----------
axis : {None, integer}
Axis to perform the operation over.
If None, perform over flattened array and return a scalar.
out : {None, array}, optional
Array into which the result can be placed. Its type is preserved
and it must be of the right shape to hold the output.
See Also
--------
any : equivalent function
"""
mask = _check_mask_axis(self._mask, axis)
if out is None:
d = self.filled(False).any(axis=axis).view(type(self))
if d.ndim:
d.__setmask__(mask)
elif mask:
d = masked
return d
self.filled(False).any(axis=axis, out=out)
if isinstance(out, MaskedArray):
if out.ndim or mask:
out.__setmask__(mask)
return out
def nonzero(self):
"""
Return the indices of unmasked elements that are not zero.
Returns a tuple of arrays, one for each dimension, containing the
indices of the non-zero elements in that dimension. The corresponding
non-zero values can be obtained with::
a[a.nonzero()]
To group the indices by element, rather than dimension, use
instead::
np.transpose(a.nonzero())
The result of this is always a 2d array, with a row for each non-zero
element.
Parameters
----------
None
Returns
-------
tuple_of_arrays : tuple
Indices of elements that are non-zero.
See Also
--------
numpy.nonzero :
Function operating on ndarrays.
flatnonzero :
Return indices that are non-zero in the flattened version of the input
array.
ndarray.nonzero :
Equivalent ndarray method.
count_nonzero :
Counts the number of non-zero elements in the input array.
Examples
--------
>>> import numpy.ma as ma
>>> x = ma.array(np.eye(3))
>>> x
masked_array(data =
[[ 1. 0. 0.]
[ 0. 1. 0.]
[ 0. 0. 1.]],
mask =
False,
fill_value=1e+20)
>>> x.nonzero()
(array([0, 1, 2]), array([0, 1, 2]))
Masked elements are ignored.
>>> x[1, 1] = ma.masked
>>> x
masked_array(data =
[[1.0 0.0 0.0]
[0.0 -- 0.0]
[0.0 0.0 1.0]],
mask =
[[False False False]
[False True False]
[False False False]],
fill_value=1e+20)
>>> x.nonzero()
(array([0, 2]), array([0, 2]))
Indices can also be grouped by element.
>>> np.transpose(x.nonzero())
array([[0, 0],
[2, 2]])
A common use for ``nonzero`` is to find the indices of an array, where
a condition is True. Given an array `a`, the condition `a` > 3 is a
boolean array and since False is interpreted as 0, ma.nonzero(a > 3)
yields the indices of the `a` where the condition is true.
>>> a = ma.array([[1,2,3],[4,5,6],[7,8,9]])
>>> a > 3
masked_array(data =
[[False False False]
[ True True True]
[ True True True]],
mask =
False,
fill_value=999999)
>>> ma.nonzero(a > 3)
(array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2]))
The ``nonzero`` method of the condition array can also be called.
>>> (a > 3).nonzero()
(array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2]))
"""
return narray(self.filled(0), copy=False).nonzero()
def trace(self, offset=0, axis1=0, axis2=1, dtype=None, out=None):
"""
(this docstring should be overwritten)
"""
#!!!: implement out + test!
m = self._mask
if m is nomask:
result = super(MaskedArray, self).trace(offset=offset, axis1=axis1,
axis2=axis2, out=out)
return result.astype(dtype)
else:
D = self.diagonal(offset=offset, axis1=axis1, axis2=axis2)
return D.astype(dtype).filled(0).sum(axis=None, out=out)
trace.__doc__ = ndarray.trace.__doc__
def sum(self, axis=None, dtype=None, out=None):
"""
Return the sum of the array elements over the given axis.
Masked elements are set to 0 internally.
Parameters
----------
axis : {None, -1, int}, optional
Axis along which the sum is computed. The default
(`axis` = None) is to compute over the flattened array.
dtype : {None, dtype}, optional
Determines the type of the returned array and of the accumulator
where the elements are summed. If dtype has the value None and
the type of a is an integer type of precision less than the default
platform integer, then the default platform integer precision is
used. Otherwise, the dtype is the same as that of a.
out : {None, ndarray}, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output
but the type will be cast if necessary.
Returns
-------
sum_along_axis : MaskedArray or scalar
An array with the same shape as self, with the specified
axis removed. If self is a 0-d array, or if `axis` is None, a scalar
is returned. If an output array is specified, a reference to
`out` is returned.
Examples
--------
>>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4)
>>> print x
[[1 -- 3]
[-- 5 --]
[7 -- 9]]
>>> print x.sum()
25
>>> print x.sum(axis=1)
[4 5 16]
>>> print x.sum(axis=0)
[8 5 12]
>>> print type(x.sum(axis=0, dtype=np.int64)[0])
<type 'numpy.int64'>
"""
_mask = ndarray.__getattribute__(self, '_mask')
newmask = _check_mask_axis(_mask, axis)
# No explicit output
if out is None:
result = self.filled(0).sum(axis, dtype=dtype)
rndim = getattr(result, 'ndim', 0)
if rndim:
result = result.view(type(self))
result.__setmask__(newmask)
elif newmask:
result = masked
return result
# Explicit output
result = self.filled(0).sum(axis, dtype=dtype, out=out)
if isinstance(out, MaskedArray):
outmask = getattr(out, '_mask', nomask)
if (outmask is nomask):
outmask = out._mask = make_mask_none(out.shape)
outmask.flat = newmask
return out
def cumsum(self, axis=None, dtype=None, out=None):
"""
Return the cumulative sum of the elements along the given axis.
The cumulative sum is calculated over the flattened array by
default, otherwise over the specified axis.
Masked values are set to 0 internally during the computation.
However, their position is saved, and the result will be masked at
the same locations.
Parameters
----------
axis : {None, -1, int}, optional
Axis along which the sum is computed. The default (`axis` = None) is to
compute over the flattened array. `axis` may be negative, in which case
it counts from the last to the first axis.
dtype : {None, dtype}, optional
Type of the returned array and of the accumulator in which the
elements are summed. If `dtype` is not specified, it defaults
to the dtype of `a`, unless `a` has an integer dtype with a
precision less than that of the default platform integer. In
that case, the default platform integer is used.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output
but the type will be cast if necessary.
Returns
-------
cumsum : ndarray.
A new array holding the result is returned unless ``out`` is
specified, in which case a reference to ``out`` is returned.
Notes
-----
The mask is lost if `out` is not a valid :class:`MaskedArray` !
Arithmetic is modular when using integer types, and no error is
raised on overflow.
Examples
--------
>>> marr = np.ma.array(np.arange(10), mask=[0,0,0,1,1,1,0,0,0,0])
>>> print marr.cumsum()
[0 1 3 -- -- -- 9 16 24 33]
"""
result = self.filled(0).cumsum(axis=axis, dtype=dtype, out=out)
if out is not None:
if isinstance(out, MaskedArray):
out.__setmask__(self.mask)
return out
result = result.view(type(self))
result.__setmask__(self._mask)
return result
def prod(self, axis=None, dtype=None, out=None):
"""
Return the product of the array elements over the given axis.
Masked elements are set to 1 internally for computation.
Parameters
----------
axis : {None, int}, optional
Axis over which the product is taken. If None is used, then the
product is over all the array elements.
dtype : {None, dtype}, optional
Determines the type of the returned array and of the accumulator
where the elements are multiplied. If ``dtype`` has the value ``None``
and the type of a is an integer type of precision less than the default
platform integer, then the default platform integer precision is
used. Otherwise, the dtype is the same as that of a.
out : {None, array}, optional
Alternative output array in which to place the result. It must have
the same shape as the expected output but the type will be cast if
necessary.
Returns
-------
product_along_axis : {array, scalar}, see dtype parameter above.
Returns an array whose shape is the same as a with the specified
axis removed. Returns a 0d array when a is 1d or axis=None.
Returns a reference to the specified output array if specified.
See Also
--------
prod : equivalent function
Notes
-----
Arithmetic is modular when using integer types, and no error is raised
on overflow.
Examples
--------
>>> np.prod([1.,2.])
2.0
>>> np.prod([1.,2.], dtype=np.int32)
2
>>> np.prod([[1.,2.],[3.,4.]])
24.0
>>> np.prod([[1.,2.],[3.,4.]], axis=1)
array([ 2., 12.])
"""
_mask = ndarray.__getattribute__(self, '_mask')
newmask = _check_mask_axis(_mask, axis)
# No explicit output
if out is None:
result = self.filled(1).prod(axis, dtype=dtype)
rndim = getattr(result, 'ndim', 0)
if rndim:
result = result.view(type(self))
result.__setmask__(newmask)
elif newmask:
result = masked
return result
# Explicit output
result = self.filled(1).prod(axis, dtype=dtype, out=out)
if isinstance(out, MaskedArray):
outmask = getattr(out, '_mask', nomask)
if (outmask is nomask):
outmask = out._mask = make_mask_none(out.shape)
outmask.flat = newmask
return out
product = prod
def cumprod(self, axis=None, dtype=None, out=None):
"""
Return the cumulative product of the elements along the given axis.
The cumulative product is taken over the flattened array by
default, otherwise over the specified axis.
Masked values are set to 1 internally during the computation.
However, their position is saved, and the result will be masked at
the same locations.
Parameters
----------
axis : {None, -1, int}, optional
Axis along which the product is computed. The default
(`axis` = None) is to compute over the flattened array.
dtype : {None, dtype}, optional
Determines the type of the returned array and of the accumulator
where the elements are multiplied. If ``dtype`` has the value ``None``
and the type of ``a`` is an integer type of precision less than the
default platform integer, then the default platform integer precision
is used. Otherwise, the dtype is the same as that of ``a``.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output
but the type will be cast if necessary.
Returns
-------
cumprod : ndarray
A new array holding the result is returned unless out is specified,
in which case a reference to out is returned.
Notes
-----
The mask is lost if `out` is not a valid MaskedArray !
Arithmetic is modular when using integer types, and no error is
raised on overflow.
"""
result = self.filled(1).cumprod(axis=axis, dtype=dtype, out=out)
if out is not None:
if isinstance(out, MaskedArray):
out.__setmask__(self._mask)
return out
result = result.view(type(self))
result.__setmask__(self._mask)
return result
def mean(self, axis=None, dtype=None, out=None):
"""
Returns the average of the array elements.
Masked entries are ignored.
The average is taken over the flattened array by default, otherwise over
the specified axis. Refer to `numpy.mean` for the full documentation.
Parameters
----------
a : array_like
Array containing numbers whose mean is desired. If `a` is not an
array, a conversion is attempted.
axis : int, optional
Axis along which the means are computed. The default is to compute
the mean of the flattened array.
dtype : dtype, optional
Type to use in computing the mean. For integer inputs, the default
is float64; for floating point, inputs it is the same as the input
dtype.
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape as the expected output but the type will be cast if
necessary.
Returns
-------
mean : ndarray, see dtype parameter above
If `out=None`, returns a new array containing the mean values,
otherwise a reference to the output array is returned.
See Also
--------
numpy.ma.mean : Equivalent function.
numpy.mean : Equivalent function on non-masked arrays.
numpy.ma.average: Weighted average.
Examples
--------
>>> a = np.ma.array([1,2,3], mask=[False, False, True])
>>> a
masked_array(data = [1 2 --],
mask = [False False True],
fill_value = 999999)
>>> a.mean()
1.5
"""
if self._mask is nomask:
result = super(MaskedArray, self).mean(axis=axis, dtype=dtype)
else:
dsum = self.sum(axis=axis, dtype=dtype)
cnt = self.count(axis=axis)
if cnt.shape == () and (cnt == 0):
result = masked
else:
result = dsum * 1. / cnt
if out is not None:
out.flat = result
if isinstance(out, MaskedArray):
outmask = getattr(out, '_mask', nomask)
if (outmask is nomask):
outmask = out._mask = make_mask_none(out.shape)
outmask.flat = getattr(result, '_mask', nomask)
return out
return result
def anom(self, axis=None, dtype=None):
"""
Compute the anomalies (deviations from the arithmetic mean)
along the given axis.
Returns an array of anomalies, with the same shape as the input and
where the arithmetic mean is computed along the given axis.
Parameters
----------
axis : int, optional
Axis over which the anomalies are taken.
The default is to use the mean of the flattened array as reference.
dtype : dtype, optional
Type to use in computing the variance. For arrays of integer type
the default is float32; for arrays of float types it is the same as
the array type.
See Also
--------
mean : Compute the mean of the array.
Examples
--------
>>> a = np.ma.array([1,2,3])
>>> a.anom()
masked_array(data = [-1. 0. 1.],
mask = False,
fill_value = 1e+20)
"""
m = self.mean(axis, dtype)
if not axis:
return (self - m)
else:
return (self - expand_dims(m, axis))
def var(self, axis=None, dtype=None, out=None, ddof=0):
""
# Easy case: nomask, business as usual
if self._mask is nomask:
return self._data.var(axis=axis, dtype=dtype, out=out, ddof=ddof)
# Some data are masked, yay!
cnt = self.count(axis=axis) - ddof
danom = self.anom(axis=axis, dtype=dtype)
if iscomplexobj(self):
danom = umath.absolute(danom) ** 2
else:
danom *= danom
dvar = divide(danom.sum(axis), cnt).view(type(self))
# Apply the mask if it's not a scalar
if dvar.ndim:
dvar._mask = mask_or(self._mask.all(axis), (cnt <= 0))
dvar._update_from(self)
elif getattr(dvar, '_mask', False):
# Make sure that masked is returned when the scalar is masked.
dvar = masked
if out is not None:
if isinstance(out, MaskedArray):
out.__setmask__(True)
elif out.dtype.kind in 'biu':
errmsg = "Masked data information would be lost in one or "\
"more location."
raise MaskError(errmsg)
else:
out.flat = np.nan
return out
# In case with have an explicit output
if out is not None:
# Set the data
out.flat = dvar
# Set the mask if needed
if isinstance(out, MaskedArray):
out.__setmask__(dvar.mask)
return out
return dvar
var.__doc__ = np.var.__doc__
def std(self, axis=None, dtype=None, out=None, ddof=0):
""
dvar = self.var(axis=axis, dtype=dtype, out=out, ddof=ddof)
if dvar is not masked:
dvar = sqrt(dvar)
if out is not None:
out **= 0.5
return out
return dvar
std.__doc__ = np.std.__doc__
#............................................
def round(self, decimals=0, out=None):
"""
Return an array rounded a to the given number of decimals.
Refer to `numpy.around` for full documentation.
See Also
--------
numpy.around : equivalent function
"""
result = self._data.round(decimals=decimals, out=out).view(type(self))
result._mask = self._mask
result._update_from(self)
# No explicit output: we're done
if out is None:
return result
if isinstance(out, MaskedArray):
out.__setmask__(self._mask)
return out
round.__doc__ = ndarray.round.__doc__
#............................................
def argsort(self, axis=None, kind='quicksort', order=None, fill_value=None):
"""
Return an ndarray of indices that sort the array along the
specified axis. Masked values are filled beforehand to
`fill_value`.
Parameters
----------
axis : int, optional
Axis along which to sort. The default is -1 (last axis).
If None, the flattened array is used.
fill_value : var, optional
Value used to fill the array before sorting.
The default is the `fill_value` attribute of the input array.
kind : {'quicksort', 'mergesort', 'heapsort'}, optional
Sorting algorithm.
order : list, optional
When `a` is an array with fields defined, this argument specifies
which fields to compare first, second, etc. Not all fields need be
specified.
Returns
-------
index_array : ndarray, int
Array of indices that sort `a` along the specified axis.
In other words, ``a[index_array]`` yields a sorted `a`.
See Also
--------
sort : Describes sorting algorithms used.
lexsort : Indirect stable sort with multiple keys.
ndarray.sort : Inplace sort.
Notes
-----
See `sort` for notes on the different sorting algorithms.
Examples
--------
>>> a = np.ma.array([3,2,1], mask=[False, False, True])
>>> a
masked_array(data = [3 2 --],
mask = [False False True],
fill_value = 999999)
>>> a.argsort()
array([1, 0, 2])
"""
if fill_value is None:
fill_value = default_fill_value(self)
d = self.filled(fill_value).view(ndarray)
return d.argsort(axis=axis, kind=kind, order=order)
def argmin(self, axis=None, fill_value=None, out=None):
"""
Return array of indices to the minimum values along the given axis.
Parameters
----------
axis : {None, integer}
If None, the index is into the flattened array, otherwise along
the specified axis
fill_value : {var}, optional
Value used to fill in the masked values. If None, the output of
minimum_fill_value(self._data) is used instead.
out : {None, array}, optional
Array into which the result can be placed. Its type is preserved
and it must be of the right shape to hold the output.
Returns
-------
{ndarray, scalar}
If multi-dimension input, returns a new ndarray of indices to the
minimum values along the given axis. Otherwise, returns a scalar
of index to the minimum values along the given axis.
Examples
--------
>>> x = np.ma.array(arange(4), mask=[1,1,0,0])
>>> x.shape = (2,2)
>>> print x
[[-- --]
[2 3]]
>>> print x.argmin(axis=0, fill_value=-1)
[0 0]
>>> print x.argmin(axis=0, fill_value=9)
[1 1]
"""
if fill_value is None:
fill_value = minimum_fill_value(self)
d = self.filled(fill_value).view(ndarray)
return d.argmin(axis, out=out)
def argmax(self, axis=None, fill_value=None, out=None):
"""
Returns array of indices of the maximum values along the given axis.
Masked values are treated as if they had the value fill_value.
Parameters
----------
axis : {None, integer}
If None, the index is into the flattened array, otherwise along
the specified axis
fill_value : {var}, optional
Value used to fill in the masked values. If None, the output of
maximum_fill_value(self._data) is used instead.
out : {None, array}, optional
Array into which the result can be placed. Its type is preserved
and it must be of the right shape to hold the output.
Returns
-------
index_array : {integer_array}
Examples
--------
>>> a = np.arange(6).reshape(2,3)
>>> a.argmax()
5
>>> a.argmax(0)
array([1, 1, 1])
>>> a.argmax(1)
array([2, 2])
"""
if fill_value is None:
fill_value = maximum_fill_value(self._data)
d = self.filled(fill_value).view(ndarray)
return d.argmax(axis, out=out)
def sort(self, axis= -1, kind='quicksort', order=None,
endwith=True, fill_value=None):
"""
Sort the array, in-place
Parameters
----------
a : array_like
Array to be sorted.
axis : int, optional
Axis along which to sort. If None, the array is flattened before
sorting. The default is -1, which sorts along the last axis.
kind : {'quicksort', 'mergesort', 'heapsort'}, optional
Sorting algorithm. Default is 'quicksort'.
order : list, optional
When `a` is a structured array, this argument specifies which fields
to compare first, second, and so on. This list does not need to
include all of the fields.
endwith : {True, False}, optional
Whether missing values (if any) should be forced in the upper indices
(at the end of the array) (True) or lower indices (at the beginning).
fill_value : {var}, optional
Value used internally for the masked values.
If ``fill_value`` is not None, it supersedes ``endwith``.
Returns
-------
sorted_array : ndarray
Array of the same type and shape as `a`.
See Also
--------
ndarray.sort : Method to sort an array in-place.
argsort : Indirect sort.
lexsort : Indirect stable sort on multiple keys.
searchsorted : Find elements in a sorted array.
Notes
-----
See ``sort`` for notes on the different sorting algorithms.
Examples
--------
>>> a = ma.array([1, 2, 5, 4, 3],mask=[0, 1, 0, 1, 0])
>>> # Default
>>> a.sort()
>>> print a
[1 3 5 -- --]
>>> a = ma.array([1, 2, 5, 4, 3],mask=[0, 1, 0, 1, 0])
>>> # Put missing values in the front
>>> a.sort(endwith=False)
>>> print a
[-- -- 1 3 5]
>>> a = ma.array([1, 2, 5, 4, 3],mask=[0, 1, 0, 1, 0])
>>> # fill_value takes over endwith
>>> a.sort(endwith=False, fill_value=3)
>>> print a
[1 -- -- 3 5]
"""
if self._mask is nomask:
ndarray.sort(self, axis=axis, kind=kind, order=order)
else:
if self is masked:
return self
if fill_value is None:
if endwith:
filler = minimum_fill_value(self)
else:
filler = maximum_fill_value(self)
else:
filler = fill_value
idx = np.indices(self.shape)
idx[axis] = self.filled(filler).argsort(axis=axis, kind=kind,
order=order)
idx_l = idx.tolist()
tmp_mask = self._mask[idx_l].flat
tmp_data = self._data[idx_l].flat
self._data.flat = tmp_data
self._mask.flat = tmp_mask
return
#............................................
def min(self, axis=None, out=None, fill_value=None):
"""
Return the minimum along a given axis.
Parameters
----------
axis : {None, int}, optional
Axis along which to operate. By default, ``axis`` is None and the
flattened input is used.
out : array_like, optional
Alternative output array in which to place the result. Must be of
the same shape and buffer length as the expected output.
fill_value : {var}, optional
Value used to fill in the masked values.
If None, use the output of `minimum_fill_value`.
Returns
-------
amin : array_like
New array holding the result.
If ``out`` was specified, ``out`` is returned.
See Also
--------
minimum_fill_value
Returns the minimum filling value for a given datatype.
"""
_mask = ndarray.__getattribute__(self, '_mask')
newmask = _check_mask_axis(_mask, axis)
if fill_value is None:
fill_value = minimum_fill_value(self)
# No explicit output
if out is None:
result = self.filled(fill_value).min(axis=axis, out=out).view(type(self))
if result.ndim:
# Set the mask
result.__setmask__(newmask)
# Get rid of Infs
if newmask.ndim:
np.putmask(result, newmask, result.fill_value)
elif newmask:
result = masked
return result
# Explicit output
result = self.filled(fill_value).min(axis=axis, out=out)
if isinstance(out, MaskedArray):
outmask = getattr(out, '_mask', nomask)
if (outmask is nomask):
outmask = out._mask = make_mask_none(out.shape)
outmask.flat = newmask
else:
if out.dtype.kind in 'biu':
errmsg = "Masked data information would be lost in one or more"\
" location."
raise MaskError(errmsg)
np.putmask(out, newmask, np.nan)
return out
def mini(self, axis=None):
"""
Return the array minimum along the specified axis.
Parameters
----------
axis : int, optional
The axis along which to find the minima. Default is None, in which case
the minimum value in the whole array is returned.
Returns
-------
min : scalar or MaskedArray
If `axis` is None, the result is a scalar. Otherwise, if `axis` is
given and the array is at least 2-D, the result is a masked array with
dimension one smaller than the array on which `mini` is called.
Examples
--------
>>> x = np.ma.array(np.arange(6), mask=[0 ,1, 0, 0, 0 ,1]).reshape(3, 2)
>>> print x
[[0 --]
[2 3]
[4 --]]
>>> x.mini()
0
>>> x.mini(axis=0)
masked_array(data = [0 3],
mask = [False False],
fill_value = 999999)
>>> print x.mini(axis=1)
[0 2 4]
"""
if axis is None:
return minimum(self)
else:
return minimum.reduce(self, axis)
#........................
def max(self, axis=None, out=None, fill_value=None):
"""
Return the maximum along a given axis.
Parameters
----------
axis : {None, int}, optional
Axis along which to operate. By default, ``axis`` is None and the
flattened input is used.
out : array_like, optional
Alternative output array in which to place the result. Must
be of the same shape and buffer length as the expected output.
fill_value : {var}, optional
Value used to fill in the masked values.
If None, use the output of maximum_fill_value().
Returns
-------
amax : array_like
New array holding the result.
If ``out`` was specified, ``out`` is returned.
See Also
--------
maximum_fill_value
Returns the maximum filling value for a given datatype.
"""
_mask = ndarray.__getattribute__(self, '_mask')
newmask = _check_mask_axis(_mask, axis)
if fill_value is None:
fill_value = maximum_fill_value(self)
# No explicit output
if out is None:
result = self.filled(fill_value).max(axis=axis, out=out).view(type(self))
if result.ndim:
# Set the mask
result.__setmask__(newmask)
# Get rid of Infs
if newmask.ndim:
np.putmask(result, newmask, result.fill_value)
elif newmask:
result = masked
return result
# Explicit output
result = self.filled(fill_value).max(axis=axis, out=out)
if isinstance(out, MaskedArray):
outmask = getattr(out, '_mask', nomask)
if (outmask is nomask):
outmask = out._mask = make_mask_none(out.shape)
outmask.flat = newmask
else:
if out.dtype.kind in 'biu':
errmsg = "Masked data information would be lost in one or more"\
" location."
raise MaskError(errmsg)
np.putmask(out, newmask, np.nan)
return out
def ptp(self, axis=None, out=None, fill_value=None):
"""
Return (maximum - minimum) along the the given dimension
(i.e. peak-to-peak value).
Parameters
----------
axis : {None, int}, optional
Axis along which to find the peaks. If None (default) the
flattened array is used.
out : {None, array_like}, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output
but the type will be cast if necessary.
fill_value : {var}, optional
Value used to fill in the masked values.
Returns
-------
ptp : ndarray.
A new array holding the result, unless ``out`` was
specified, in which case a reference to ``out`` is returned.
"""
if out is None:
result = self.max(axis=axis, fill_value=fill_value)
result -= self.min(axis=axis, fill_value=fill_value)
return result
out.flat = self.max(axis=axis, out=out, fill_value=fill_value)
out -= self.min(axis=axis, fill_value=fill_value)
return out
def take(self, indices, axis=None, out=None, mode='raise'):
"""
"""
(_data, _mask) = (self._data, self._mask)
cls = type(self)
# Make sure the indices are not masked
maskindices = getattr(indices, '_mask', nomask)
if maskindices is not nomask:
indices = indices.filled(0)
# Get the data
if out is None:
out = _data.take(indices, axis=axis, mode=mode).view(cls)
else:
np.take(_data, indices, axis=axis, mode=mode, out=out)
# Get the mask
if isinstance(out, MaskedArray):
if _mask is nomask:
outmask = maskindices
else:
outmask = _mask.take(indices, axis=axis, mode=mode)
outmask |= maskindices
out.__setmask__(outmask)
return out
# Array methods ---------------------------------------
copy = _arraymethod('copy')
diagonal = _arraymethod('diagonal')
transpose = _arraymethod('transpose')
T = property(fget=lambda self:self.transpose())
swapaxes = _arraymethod('swapaxes')
clip = _arraymethod('clip', onmask=False)
copy = _arraymethod('copy')
squeeze = _arraymethod('squeeze')
#--------------------------------------------
def tolist(self, fill_value=None):
"""
Return the data portion of the masked array as a hierarchical Python list.
Data items are converted to the nearest compatible Python type.
Masked values are converted to `fill_value`. If `fill_value` is None,
the corresponding entries in the output list will be ``None``.
Parameters
----------
fill_value : scalar, optional
The value to use for invalid entries. Default is None.
Returns
-------
result : list
The Python list representation of the masked array.
Examples
--------
>>> x = np.ma.array([[1,2,3], [4,5,6], [7,8,9]], mask=[0] + [1,0]*4)
>>> x.tolist()
[[1, None, 3], [None, 5, None], [7, None, 9]]
>>> x.tolist(-999)
[[1, -999, 3], [-999, 5, -999], [7, -999, 9]]
"""
_mask = self._mask
# No mask ? Just return .data.tolist ?
if _mask is nomask:
return self._data.tolist()
# Explicit fill_value: fill the array and get the list
if fill_value is not None:
return self.filled(fill_value).tolist()
# Structured array .............
names = self.dtype.names
if names:
result = self._data.astype([(_, object) for _ in names])
for n in names:
result[n][_mask[n]] = None
return result.tolist()
# Standard arrays ...............
if _mask is nomask:
return [None]
# Set temps to save time when dealing w/ marrays...
inishape = self.shape
result = np.array(self._data.ravel(), dtype=object)
result[_mask.ravel()] = None
result.shape = inishape
return result.tolist()
# if fill_value is not None:
# return self.filled(fill_value).tolist()
# result = self.filled().tolist()
# # Set temps to save time when dealing w/ mrecarrays...
# _mask = self._mask
# if _mask is nomask:
# return result
# nbdims = self.ndim
# dtypesize = len(self.dtype)
# if nbdims == 0:
# return tuple([None] * dtypesize)
# elif nbdims == 1:
# maskedidx = _mask.nonzero()[0].tolist()
# if dtypesize:
# nodata = tuple([None] * dtypesize)
# else:
# nodata = None
# [operator.setitem(result, i, nodata) for i in maskedidx]
# else:
# for idx in zip(*[i.tolist() for i in _mask.nonzero()]):
# tmp = result
# for i in idx[:-1]:
# tmp = tmp[i]
# tmp[idx[-1]] = None
# return result
#........................
def tostring(self, fill_value=None, order='C'):
"""
Return the array data as a string containing the raw bytes in the array.
The array is filled with a fill value before the string conversion.
Parameters
----------
fill_value : scalar, optional
Value used to fill in the masked values. Deafult is None, in which
case `MaskedArray.fill_value` is used.
order : {'C','F','A'}, optional
Order of the data item in the copy. Default is 'C'.
- 'C' -- C order (row major).
- 'F' -- Fortran order (column major).
- 'A' -- Any, current order of array.
- None -- Same as 'A'.
See Also
--------
ndarray.tostring
tolist, tofile
Notes
-----
As for `ndarray.tostring`, information about the shape, dtype, etc.,
but also about `fill_value`, will be lost.
Examples
--------
>>> x = np.ma.array(np.array([[1, 2], [3, 4]]), mask=[[0, 1], [1, 0]])
>>> x.tostring()
'\\x01\\x00\\x00\\x00?B\\x0f\\x00?B\\x0f\\x00\\x04\\x00\\x00\\x00'
"""
return self.filled(fill_value).tostring(order=order)
#........................
def tofile(self, fid, sep="", format="%s"):
"""
Save a masked array to a file in binary format.
.. warning::
This function is not implemented yet.
Raises
------
NotImplementedError
When `tofile` is called.
"""
raise NotImplementedError("Not implemented yet, sorry...")
def toflex(self):
"""
Transforms a masked array into a flexible-type array.
The flexible type array that is returned will have two fields:
* the ``_data`` field stores the ``_data`` part of the array.
* the ``_mask`` field stores the ``_mask`` part of the array.
Parameters
----------
None
Returns
-------
record : ndarray
A new flexible-type `ndarray` with two fields: the first element
containing a value, the second element containing the corresponding
mask boolean. The returned record shape matches self.shape.
Notes
-----
A side-effect of transforming a masked array into a flexible `ndarray` is
that meta information (``fill_value``, ...) will be lost.
Examples
--------
>>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4)
>>> print x
[[1 -- 3]
[-- 5 --]
[7 -- 9]]
>>> print x.toflex()
[[(1, False) (2, True) (3, False)]
[(4, True) (5, False) (6, True)]
[(7, False) (8, True) (9, False)]]
"""
# Get the basic dtype ....
ddtype = self.dtype
# Make sure we have a mask
_mask = self._mask
if _mask is None:
_mask = make_mask_none(self.shape, ddtype)
# And get its dtype
mdtype = self._mask.dtype
#
record = np.ndarray(shape=self.shape,
dtype=[('_data', ddtype), ('_mask', mdtype)])
record['_data'] = self._data
record['_mask'] = self._mask
return record
torecords = toflex
#--------------------------------------------
# Pickling
def __getstate__(self):
"""Return the internal state of the masked array, for pickling
purposes.
"""
cf = 'CF'[self.flags.fnc]
state = (1,
self.shape,
self.dtype,
self.flags.fnc,
self._data.tostring(cf),
#self._data.tolist(),
getmaskarray(self).tostring(cf),
#getmaskarray(self).tolist(),
self._fill_value,
)
return state
#
def __setstate__(self, state):
"""Restore the internal state of the masked array, for
pickling purposes. ``state`` is typically the output of the
``__getstate__`` output, and is a 5-tuple:
- class name
- a tuple giving the shape of the data
- a typecode for the data
- a binary string for the data
- a binary string for the mask.
"""
(_, shp, typ, isf, raw, msk, flv) = state
ndarray.__setstate__(self, (shp, typ, isf, raw))
self._mask.__setstate__((shp, make_mask_descr(typ), isf, msk))
self.fill_value = flv
#
def __reduce__(self):
"""Return a 3-tuple for pickling a MaskedArray.
"""
return (_mareconstruct,
(self.__class__, self._baseclass, (0,), 'b',),
self.__getstate__())
#
def __deepcopy__(self, memo=None):
from copy import deepcopy
copied = MaskedArray.__new__(type(self), self, copy=True)
if memo is None:
memo = {}
memo[id(self)] = copied
for (k, v) in self.__dict__.iteritems():
copied.__dict__[k] = deepcopy(v, memo)
return copied
def _mareconstruct(subtype, baseclass, baseshape, basetype,):
"""Internal function that builds a new MaskedArray from the
information stored in a pickle.
"""
_data = ndarray.__new__(baseclass, baseshape, basetype)
_mask = ndarray.__new__(ndarray, baseshape, make_mask_descr(basetype))
return subtype.__new__(subtype, _data, mask=_mask, dtype=basetype,)
class mvoid(MaskedArray):
"""
Fake a 'void' object to use for masked array with structured dtypes.
"""
#
def __new__(self, data, mask=nomask, dtype=None, fill_value=None):
dtype = dtype or data.dtype
_data = ndarray((), dtype=dtype)
_data[()] = data
_data = _data.view(self)
if mask is not nomask:
if isinstance(mask, np.void):
_data._mask = mask
else:
try:
# Mask is already a 0D array
_data._mask = np.void(mask)
except TypeError:
# Transform the mask to a void
mdtype = make_mask_descr(dtype)
_data._mask = np.array(mask, dtype=mdtype)[()]
if fill_value is not None:
_data.fill_value = fill_value
return _data
def _get_data(self):
# Make sure that the _data part is a np.void
return self.view(ndarray)[()]
_data = property(fget=_get_data)
def __getitem__(self, indx):
"Get the index..."
m = self._mask
if m is not nomask and m[indx]:
return masked
return self._data[indx]
def __setitem__(self, indx, value):
self._data[indx] = value
self._mask[indx] |= getattr(value, "_mask", False)
def __str__(self):
m = self._mask
if (m is nomask):
return self._data.__str__()
m = tuple(m)
if (not any(m)):
return self._data.__str__()
r = self._data.tolist()
p = masked_print_option
if not p.enabled():
p = 'N/A'
else:
p = str(p)
r = [(str(_), p)[int(_m)] for (_, _m) in zip(r, m)]
return "(%s)" % ", ".join(r)
def __repr__(self):
m = self._mask
if (m is nomask):
return self._data.__repr__()
m = tuple(m)
if not any(m):
return self._data.__repr__()
p = masked_print_option
if not p.enabled():
return self.filled(self.fill_value).__repr__()
p = str(p)
r = [(str(_), p)[int(_m)] for (_, _m) in zip(self._data.tolist(), m)]
return "(%s)" % ", ".join(r)
def __iter__(self):
"Defines an iterator for mvoid"
(_data, _mask) = (self._data, self._mask)
if _mask is nomask:
for d in _data:
yield d
else:
for (d, m) in zip(_data, _mask):
if m:
yield masked
else:
yield d
def filled(self, fill_value=None):
"""
Return a copy with masked fields filled with a given value.
Parameters
----------
fill_value : scalar, optional
The value to use for invalid entries (None by default).
If None, the `fill_value` attribute is used instead.
Returns
-------
filled_void:
A `np.void` object
See Also
--------
MaskedArray.filled
"""
return asarray(self).filled(fill_value)[()]
def tolist(self):
"""
Transforms the mvoid object into a tuple.
Masked fields are replaced by None.
Returns
-------
returned_tuple
Tuple of fields
"""
_mask = self._mask
if _mask is nomask:
return self._data.tolist()
result = []
for (d, m) in zip(self._data, self._mask):
if m:
result.append(None)
else:
# .item() makes sure we return a standard Python object
result.append(d.item())
return tuple(result)
#####--------------------------------------------------------------------------
#---- --- Shortcuts ---
#####---------------------------------------------------------------------------
def isMaskedArray(x):
"""
Test whether input is an instance of MaskedArray.
This function returns True if `x` is an instance of MaskedArray
and returns False otherwise. Any object is accepted as input.
Parameters
----------
x : object
Object to test.
Returns
-------
result : bool
True if `x` is a MaskedArray.
See Also
--------
isMA : Alias to isMaskedArray.
isarray : Alias to isMaskedArray.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.eye(3, 3)
>>> a
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
>>> m = ma.masked_values(a, 0)
>>> m
masked_array(data =
[[1.0 -- --]
[-- 1.0 --]
[-- -- 1.0]],
mask =
[[False True True]
[ True False True]
[ True True False]],
fill_value=0.0)
>>> ma.isMaskedArray(a)
False
>>> ma.isMaskedArray(m)
True
>>> ma.isMaskedArray([0, 1, 2])
False
"""
return isinstance(x, MaskedArray)
isarray = isMaskedArray
isMA = isMaskedArray #backward compatibility
# We define the masked singleton as a float for higher precedence...
# Note that it can be tricky sometimes w/ type comparison
class MaskedConstant(MaskedArray):
#
_data = data = np.array(0.)
_mask = mask = np.array(True)
_baseclass = ndarray
#
def __new__(self):
return self._data.view(self)
#
def __array_finalize__(self, obj):
return
#
def __array_wrap__(self, obj):
return self
#
def __str__(self):
return str(masked_print_option._display)
#
def __repr__(self):
return 'masked'
#
def flatten(self):
return masked_array([self._data], dtype=float, mask=[True])
masked = masked_singleton = MaskedConstant()
masked_array = MaskedArray
def array(data, dtype=None, copy=False, order=False,
mask=nomask, fill_value=None,
keep_mask=True, hard_mask=False, shrink=True, subok=True, ndmin=0,
):
"""array(data, dtype=None, copy=False, order=False, mask=nomask,
fill_value=None, keep_mask=True, hard_mask=False, shrink=True,
subok=True, ndmin=0)
Acts as shortcut to MaskedArray, with options in a different order
for convenience. And backwards compatibility...
"""
#!!!: we should try to put 'order' somwehere
return MaskedArray(data, mask=mask, dtype=dtype, copy=copy, subok=subok,
keep_mask=keep_mask, hard_mask=hard_mask,
fill_value=fill_value, ndmin=ndmin, shrink=shrink)
array.__doc__ = masked_array.__doc__
def is_masked(x):
"""
Determine whether input has masked values.
Accepts any object as input, but always returns False unless the
input is a MaskedArray containing masked values.
Parameters
----------
x : array_like
Array to check for masked values.
Returns
-------
result : bool
True if `x` is a MaskedArray with masked values, False otherwise.
Examples
--------
>>> import numpy.ma as ma
>>> x = ma.masked_equal([0, 1, 0, 2, 3], 0)
>>> x
masked_array(data = [-- 1 -- 2 3],
mask = [ True False True False False],
fill_value=999999)
>>> ma.is_masked(x)
True
>>> x = ma.masked_equal([0, 1, 0, 2, 3], 42)
>>> x
masked_array(data = [0 1 0 2 3],
mask = False,
fill_value=999999)
>>> ma.is_masked(x)
False
Always returns False if `x` isn't a MaskedArray.
>>> x = [False, True, False]
>>> ma.is_masked(x)
False
>>> x = 'a string'
>>> ma.is_masked(x)
False
"""
m = getmask(x)
if m is nomask:
return False
elif m.any():
return True
return False
#####---------------------------------------------------------------------------
#---- --- Extrema functions ---
#####---------------------------------------------------------------------------
class _extrema_operation(object):
"""
Generic class for maximum/minimum functions.
.. note::
This is the base class for `_maximum_operation` and
`_minimum_operation`.
"""
def __call__(self, a, b=None):
"Executes the call behavior."
if b is None:
return self.reduce(a)
return where(self.compare(a, b), a, b)
#.........
def reduce(self, target, axis=None):
"Reduce target along the given axis."
target = narray(target, copy=False, subok=True)
m = getmask(target)
if axis is not None:
kargs = { 'axis' : axis }
else:
kargs = {}
target = target.ravel()
if not (m is nomask):
m = m.ravel()
if m is nomask:
t = self.ufunc.reduce(target, **kargs)
else:
target = target.filled(self.fill_value_func(target)).view(type(target))
t = self.ufunc.reduce(target, **kargs)
m = umath.logical_and.reduce(m, **kargs)
if hasattr(t, '_mask'):
t._mask = m
elif m:
t = masked
return t
#.........
def outer (self, a, b):
"Return the function applied to the outer product of a and b."
ma = getmask(a)
mb = getmask(b)
if ma is nomask and mb is nomask:
m = nomask
else:
ma = getmaskarray(a)
mb = getmaskarray(b)
m = logical_or.outer(ma, mb)
result = self.ufunc.outer(filled(a), filled(b))
if not isinstance(result, MaskedArray):
result = result.view(MaskedArray)
result._mask = m
return result
#............................
class _minimum_operation(_extrema_operation):
"Object to calculate minima"
def __init__ (self):
"""minimum(a, b) or minimum(a)
In one argument case, returns the scalar minimum.
"""
self.ufunc = umath.minimum
self.afunc = amin
self.compare = less
self.fill_value_func = minimum_fill_value
#............................
class _maximum_operation(_extrema_operation):
"Object to calculate maxima"
def __init__ (self):
"""maximum(a, b) or maximum(a)
In one argument case returns the scalar maximum.
"""
self.ufunc = umath.maximum
self.afunc = amax
self.compare = greater
self.fill_value_func = maximum_fill_value
#..........................................................
def min(obj, axis=None, out=None, fill_value=None):
try:
return obj.min(axis=axis, fill_value=fill_value, out=out)
except (AttributeError, TypeError):
# If obj doesn't have a max method,
# ...or if the method doesn't accept a fill_value argument
return asanyarray(obj).min(axis=axis, fill_value=fill_value, out=out)
min.__doc__ = MaskedArray.min.__doc__
def max(obj, axis=None, out=None, fill_value=None):
try:
return obj.max(axis=axis, fill_value=fill_value, out=out)
except (AttributeError, TypeError):
# If obj doesn't have a max method,
# ...or if the method doesn't accept a fill_value argument
return asanyarray(obj).max(axis=axis, fill_value=fill_value, out=out)
max.__doc__ = MaskedArray.max.__doc__
def ptp(obj, axis=None, out=None, fill_value=None):
"""a.ptp(axis=None) = a.max(axis)-a.min(axis)"""
try:
return obj.ptp(axis, out=out, fill_value=fill_value)
except (AttributeError, TypeError):
# If obj doesn't have a max method,
# ...or if the method doesn't accept a fill_value argument
return asanyarray(obj).ptp(axis=axis, fill_value=fill_value, out=out)
ptp.__doc__ = MaskedArray.ptp.__doc__
#####---------------------------------------------------------------------------
#---- --- Definition of functions from the corresponding methods ---
#####---------------------------------------------------------------------------
class _frommethod:
"""
Define functions from existing MaskedArray methods.
Parameters
----------
methodname : str
Name of the method to transform.
"""
def __init__(self, methodname):
self.__name__ = methodname
self.__doc__ = self.getdoc()
#
def getdoc(self):
"Return the doc of the function (from the doc of the method)."
meth = getattr(MaskedArray, self.__name__, None) or\
getattr(np, self.__name__, None)
signature = self.__name__ + get_object_signature(meth)
if meth is not None:
doc = """ %s\n%s""" % (signature, getattr(meth, '__doc__', None))
return doc
#
def __call__(self, a, *args, **params):
# Get the method from the array (if possible)
method_name = self.__name__
method = getattr(a, method_name, None)
if method is not None:
return method(*args, **params)
# Still here ? Then a is not a MaskedArray
method = getattr(MaskedArray, method_name, None)
if method is not None:
return method(MaskedArray(a), *args, **params)
# Still here ? OK, let's call the corresponding np function
method = getattr(np, method_name)
return method(a, *args, **params)
all = _frommethod('all')
anomalies = anom = _frommethod('anom')
any = _frommethod('any')
compress = _frommethod('compress')
cumprod = _frommethod('cumprod')
cumsum = _frommethod('cumsum')
copy = _frommethod('copy')
diagonal = _frommethod('diagonal')
harden_mask = _frommethod('harden_mask')
ids = _frommethod('ids')
maximum = _maximum_operation()
mean = _frommethod('mean')
minimum = _minimum_operation()
nonzero = _frommethod('nonzero')
prod = _frommethod('prod')
product = _frommethod('prod')
ravel = _frommethod('ravel')
repeat = _frommethod('repeat')
shrink_mask = _frommethod('shrink_mask')
soften_mask = _frommethod('soften_mask')
std = _frommethod('std')
sum = _frommethod('sum')
swapaxes = _frommethod('swapaxes')
#take = _frommethod('take')
trace = _frommethod('trace')
var = _frommethod('var')
def take(a, indices, axis=None, out=None, mode='raise'):
"""
"""
a = masked_array(a)
return a.take(indices, axis=axis, out=out, mode=mode)
#..............................................................................
def power(a, b, third=None):
"""
Returns element-wise base array raised to power from second array.
This is the masked array version of `numpy.power`. For details see
`numpy.power`.
See Also
--------
numpy.power
Notes
-----
The *out* argument to `numpy.power` is not supported, `third` has to be
None.
"""
if third is not None:
raise MaskError, "3-argument power not supported."
# Get the masks
ma = getmask(a)
mb = getmask(b)
m = mask_or(ma, mb)
# Get the rawdata
fa = getdata(a)
fb = getdata(b)
# Get the type of the result (so that we preserve subclasses)
if isinstance(a, MaskedArray):
basetype = type(a)
else:
basetype = MaskedArray
# Get the result and view it as a (subclass of) MaskedArray
err_status = np.geterr()
try:
np.seterr(divide='ignore', invalid='ignore')
result = np.where(m, fa, umath.power(fa, fb)).view(basetype)
finally:
np.seterr(**err_status)
result._update_from(a)
# Find where we're in trouble w/ NaNs and Infs
invalid = np.logical_not(np.isfinite(result.view(ndarray)))
# Add the initial mask
if m is not nomask:
if not (result.ndim):
return masked
result._mask = np.logical_or(m, invalid)
# Fix the invalid parts
if invalid.any():
if not result.ndim:
return masked
elif result._mask is nomask:
result._mask = invalid
result._data[invalid] = result.fill_value
return result
# if fb.dtype.char in typecodes["Integer"]:
# return masked_array(umath.power(fa, fb), m)
# m = mask_or(m, (fa < 0) & (fb != fb.astype(int)))
# if m is nomask:
# return masked_array(umath.power(fa, fb))
# else:
# fa = fa.copy()
# if m.all():
# fa.flat = 1
# else:
# np.putmask(fa,m,1)
# return masked_array(umath.power(fa, fb), m)
#..............................................................................
def argsort(a, axis=None, kind='quicksort', order=None, fill_value=None):
"Function version of the eponymous method."
if fill_value is None:
fill_value = default_fill_value(a)
d = filled(a, fill_value)
if axis is None:
return d.argsort(kind=kind, order=order)
return d.argsort(axis, kind=kind, order=order)
argsort.__doc__ = MaskedArray.argsort.__doc__
def argmin(a, axis=None, fill_value=None):
"Function version of the eponymous method."
if fill_value is None:
fill_value = default_fill_value(a)
d = filled(a, fill_value)
return d.argmin(axis=axis)
argmin.__doc__ = MaskedArray.argmin.__doc__
def argmax(a, axis=None, fill_value=None):
"Function version of the eponymous method."
if fill_value is None:
fill_value = default_fill_value(a)
try:
fill_value = -fill_value
except:
pass
d = filled(a, fill_value)
return d.argmax(axis=axis)
argmin.__doc__ = MaskedArray.argmax.__doc__
def sort(a, axis= -1, kind='quicksort', order=None, endwith=True, fill_value=None):
"Function version of the eponymous method."
a = narray(a, copy=True, subok=True)
if axis is None:
a = a.flatten()
axis = 0
if fill_value is None:
if endwith:
filler = minimum_fill_value(a)
else:
filler = maximum_fill_value(a)
else:
filler = fill_value
# return
indx = np.indices(a.shape).tolist()
indx[axis] = filled(a, filler).argsort(axis=axis, kind=kind, order=order)
return a[indx]
sort.__doc__ = MaskedArray.sort.__doc__
def compressed(x):
"""
Return all the non-masked data as a 1-D array.
This function is equivalent to calling the "compressed" method of a
`MaskedArray`, see `MaskedArray.compressed` for details.
See Also
--------
MaskedArray.compressed
Equivalent method.
"""
if getmask(x) is nomask:
return np.asanyarray(x)
else:
return x.compressed()
def concatenate(arrays, axis=0):
"""
Concatenate a sequence of arrays along the given axis.
Parameters
----------
arrays : sequence of array_like
The arrays must have the same shape, except in the dimension
corresponding to `axis` (the first, by default).
axis : int, optional
The axis along which the arrays will be joined. Default is 0.
Returns
-------
result : MaskedArray
The concatenated array with any masked entries preserved.
See Also
--------
numpy.concatenate : Equivalent function in the top-level NumPy module.
Examples
--------
>>> import numpy.ma as ma
>>> a = ma.arange(3)
>>> a[1] = ma.masked
>>> b = ma.arange(2, 5)
>>> a
masked_array(data = [0 -- 2],
mask = [False True False],
fill_value = 999999)
>>> b
masked_array(data = [2 3 4],
mask = False,
fill_value = 999999)
>>> ma.concatenate([a, b])
masked_array(data = [0 -- 2 2 3 4],
mask = [False True False False False False],
fill_value = 999999)
"""
d = np.concatenate([getdata(a) for a in arrays], axis)
rcls = get_masked_subclass(*arrays)
data = d.view(rcls)
# Check whether one of the arrays has a non-empty mask...
for x in arrays:
if getmask(x) is not nomask:
break
else:
return data
# OK, so we have to concatenate the masks
dm = np.concatenate([getmaskarray(a) for a in arrays], axis)
# If we decide to keep a '_shrinkmask' option, we want to check that ...
# ... all of them are True, and then check for dm.any()
# shrink = numpy.logical_or.reduce([getattr(a,'_shrinkmask',True) for a in arrays])
# if shrink and not dm.any():
if not dm.dtype.fields and not dm.any():
data._mask = nomask
else:
data._mask = dm.reshape(d.shape)
return data
def count(a, axis=None):
if isinstance(a, MaskedArray):
return a.count(axis)
return masked_array(a, copy=False).count(axis)
count.__doc__ = MaskedArray.count.__doc__
def diag(v, k=0):
"""
Extract a diagonal or construct a diagonal array.
This function is the equivalent of `numpy.diag` that takes masked
values into account, see `numpy.diag` for details.
See Also
--------
numpy.diag : Equivalent function for ndarrays.
"""
output = np.diag(v, k).view(MaskedArray)
if getmask(v) is not nomask:
output._mask = np.diag(v._mask, k)
return output
def expand_dims(x, axis):
"""
Expand the shape of an array.
Expands the shape of the array by including a new axis before the one
specified by the `axis` parameter. This function behaves the same as
`numpy.expand_dims` but preserves masked elements.
See Also
--------
numpy.expand_dims : Equivalent function in top-level NumPy module.
Examples
--------
>>> import numpy.ma as ma
>>> x = ma.array([1, 2, 4])
>>> x[1] = ma.masked
>>> x
masked_array(data = [1 -- 4],
mask = [False True False],
fill_value = 999999)
>>> np.expand_dims(x, axis=0)
array([[1, 2, 4]])
>>> ma.expand_dims(x, axis=0)
masked_array(data =
[[1 -- 4]],
mask =
[[False True False]],
fill_value = 999999)
The same result can be achieved using slicing syntax with `np.newaxis`.
>>> x[np.newaxis, :]
masked_array(data =
[[1 -- 4]],
mask =
[[False True False]],
fill_value = 999999)
"""
result = n_expand_dims(x, axis)
if isinstance(x, MaskedArray):
new_shape = result.shape
result = x.view()
result.shape = new_shape
if result._mask is not nomask:
result._mask.shape = new_shape
return result
#......................................
def left_shift (a, n):
"""
Shift the bits of an integer to the left.
This is the masked array version of `numpy.left_shift`, for details
see that function.
See Also
--------
numpy.left_shift
"""
m = getmask(a)
if m is nomask:
d = umath.left_shift(filled(a), n)
return masked_array(d)
else:
d = umath.left_shift(filled(a, 0), n)
return masked_array(d, mask=m)
def right_shift (a, n):
"""
Shift the bits of an integer to the right.
This is the masked array version of `numpy.right_shift`, for details
see that function.
See Also
--------
numpy.right_shift
"""
m = getmask(a)
if m is nomask:
d = umath.right_shift(filled(a), n)
return masked_array(d)
else:
d = umath.right_shift(filled(a, 0), n)
return masked_array(d, mask=m)
#......................................
def put(a, indices, values, mode='raise'):
"""
Set storage-indexed locations to corresponding values.
This function is equivalent to `MaskedArray.put`, see that method
for details.
See Also
--------
MaskedArray.put
"""
# We can't use 'frommethod', the order of arguments is different
try:
return a.put(indices, values, mode=mode)
except AttributeError:
return narray(a, copy=False).put(indices, values, mode=mode)
def putmask(a, mask, values): #, mode='raise'):
"""
Changes elements of an array based on conditional and input values.
This is the masked array version of `numpy.putmask`, for details see
`numpy.putmask`.
See Also
--------
numpy.putmask
Notes
-----
Using a masked array as `values` will **not** transform a `ndarray` into
a `MaskedArray`.
"""
# We can't use 'frommethod', the order of arguments is different
if not isinstance(a, MaskedArray):
a = a.view(MaskedArray)
(valdata, valmask) = (getdata(values), getmask(values))
if getmask(a) is nomask:
if valmask is not nomask:
a._sharedmask = True
a._mask = make_mask_none(a.shape, a.dtype)
np.putmask(a._mask, mask, valmask)
elif a._hardmask:
if valmask is not nomask:
m = a._mask.copy()
np.putmask(m, mask, valmask)
a.mask |= m
else:
if valmask is nomask:
valmask = getmaskarray(values)
np.putmask(a._mask, mask, valmask)
np.putmask(a._data, mask, valdata)
return
def transpose(a, axes=None):
"""
Permute the dimensions of an array.
This function is exactly equivalent to `numpy.transpose`.
See Also
--------
numpy.transpose : Equivalent function in top-level NumPy module.
Examples
--------
>>> import numpy.ma as ma
>>> x = ma.arange(4).reshape((2,2))
>>> x[1, 1] = ma.masked
>>>> x
masked_array(data =
[[0 1]
[2 --]],
mask =
[[False False]
[False True]],
fill_value = 999999)
>>> ma.transpose(x)
masked_array(data =
[[0 2]
[1 --]],
mask =
[[False False]
[False True]],
fill_value = 999999)
"""
#We can't use 'frommethod', as 'transpose' doesn't take keywords
try:
return a.transpose(axes)
except AttributeError:
return narray(a, copy=False).transpose(axes).view(MaskedArray)
def reshape(a, new_shape, order='C'):
"""
Returns an array containing the same data with a new shape.
Refer to `MaskedArray.reshape` for full documentation.
See Also
--------
MaskedArray.reshape : equivalent function
"""
#We can't use 'frommethod', it whine about some parameters. Dmmit.
try:
return a.reshape(new_shape, order=order)
except AttributeError:
_tmp = narray(a, copy=False).reshape(new_shape, order=order)
return _tmp.view(MaskedArray)
def resize(x, new_shape):
"""
Return a new masked array with the specified size and shape.
This is the masked equivalent of the `numpy.resize` function. The new
array is filled with repeated copies of `x` (in the order that the
data are stored in memory). If `x` is masked, the new array will be
masked, and the new mask will be a repetition of the old one.
See Also
--------
numpy.resize : Equivalent function in the top level NumPy module.
Examples
--------
>>> import numpy.ma as ma
>>> a = ma.array([[1, 2] ,[3, 4]])
>>> a[0, 1] = ma.masked
>>> a
masked_array(data =
[[1 --]
[3 4]],
mask =
[[False True]
[False False]],
fill_value = 999999)
>>> np.resize(a, (3, 3))
array([[1, 2, 3],
[4, 1, 2],
[3, 4, 1]])
>>> ma.resize(a, (3, 3))
masked_array(data =
[[1 -- 3]
[4 1 --]
[3 4 1]],
mask =
[[False True False]
[False False True]
[False False False]],
fill_value = 999999)
A MaskedArray is always returned, regardless of the input type.
>>> a = np.array([[1, 2] ,[3, 4]])
>>> ma.resize(a, (3, 3))
masked_array(data =
[[1 2 3]
[4 1 2]
[3 4 1]],
mask =
False,
fill_value = 999999)
"""
# We can't use _frommethods here, as N.resize is notoriously whiny.
m = getmask(x)
if m is not nomask:
m = np.resize(m, new_shape)
result = np.resize(x, new_shape).view(get_masked_subclass(x))
if result.ndim:
result._mask = m
return result
#................................................
def rank(obj):
"maskedarray version of the numpy function."
return np.rank(getdata(obj))
rank.__doc__ = np.rank.__doc__
#
def shape(obj):
"maskedarray version of the numpy function."
return np.shape(getdata(obj))
shape.__doc__ = np.shape.__doc__
#
def size(obj, axis=None):
"maskedarray version of the numpy function."
return np.size(getdata(obj), axis)
size.__doc__ = np.size.__doc__
#................................................
#####--------------------------------------------------------------------------
#---- --- Extra functions ---
#####--------------------------------------------------------------------------
def where (condition, x=None, y=None):
"""
Return a masked array with elements from x or y, depending on condition.
Returns a masked array, shaped like condition, where the elements
are from `x` when `condition` is True, and from `y` otherwise.
If neither `x` nor `y` are given, the function returns a tuple of
indices where `condition` is True (the result of
``condition.nonzero()``).
Parameters
----------
condition : array_like, bool
The condition to meet. For each True element, yield the corresponding
element from `x`, otherwise from `y`.
x, y : array_like, optional
Values from which to choose. `x` and `y` need to have the same shape
as condition, or be broadcast-able to that shape.
Returns
-------
out : MaskedArray or tuple of ndarrays
The resulting masked array if `x` and `y` were given, otherwise
the result of ``condition.nonzero()``.
See Also
--------
numpy.where : Equivalent function in the top-level NumPy module.
Examples
--------
>>> x = np.ma.array(np.arange(9.).reshape(3, 3), mask=[[0, 1, 0],
... [1, 0, 1],
... [0, 1, 0]])
>>> print x
[[0.0 -- 2.0]
[-- 4.0 --]
[6.0 -- 8.0]]
>>> np.ma.where(x > 5) # return the indices where x > 5
(array([2, 2]), array([0, 2]))
>>> print np.ma.where(x > 5, x, -3.1416)
[[-3.1416 -- -3.1416]
[-- -3.1416 --]
[6.0 -- 8.0]]
"""
if x is None and y is None:
return filled(condition, 0).nonzero()
elif x is None or y is None:
raise ValueError, "Either both or neither x and y should be given."
# Get the condition ...............
fc = filled(condition, 0).astype(MaskType)
notfc = np.logical_not(fc)
# Get the data ......................................
xv = getdata(x)
yv = getdata(y)
if x is masked:
ndtype = yv.dtype
elif y is masked:
ndtype = xv.dtype
else:
ndtype = np.find_common_type([xv.dtype, yv.dtype], [])
# Construct an empty array and fill it
d = np.empty(fc.shape, dtype=ndtype).view(MaskedArray)
_data = d._data
np.putmask(_data, fc, xv.astype(ndtype))
np.putmask(_data, notfc, yv.astype(ndtype))
# Create an empty mask and fill it
_mask = d._mask = np.zeros(fc.shape, dtype=MaskType)
np.putmask(_mask, fc, getmask(x))
np.putmask(_mask, notfc, getmask(y))
_mask |= getmaskarray(condition)
if not _mask.any():
d._mask = nomask
return d
def choose (indices, choices, out=None, mode='raise'):
"""
Use an index array to construct a new array from a set of choices.
Given an array of integers and a set of n choice arrays, this method
will create a new array that merges each of the choice arrays. Where a
value in `a` is i, the new array will have the value that choices[i]
contains in the same place.
Parameters
----------
a : ndarray of ints
This array must contain integers in ``[0, n-1]``, where n is the
number of choices.
choices : sequence of arrays
Choice arrays. The index array and all of the choices should be
broadcastable to the same shape.
out : array, optional
If provided, the result will be inserted into this array. It should
be of the appropriate shape and `dtype`.
mode : {'raise', 'wrap', 'clip'}, optional
Specifies how out-of-bounds indices will behave.
* 'raise' : raise an error
* 'wrap' : wrap around
* 'clip' : clip to the range
Returns
-------
merged_array : array
See Also
--------
choose : equivalent function
Examples
--------
>>> choice = np.array([[1,1,1], [2,2,2], [3,3,3]])
>>> a = np.array([2, 1, 0])
>>> np.ma.choose(a, choice)
masked_array(data = [3 2 1],
mask = False,
fill_value=999999)
"""
def fmask (x):
"Returns the filled array, or True if masked."
if x is masked:
return True
return filled(x)
def nmask (x):
"Returns the mask, True if ``masked``, False if ``nomask``."
if x is masked:
return True
return getmask(x)
# Get the indices......
c = filled(indices, 0)
# Get the masks........
masks = [nmask(x) for x in choices]
data = [fmask(x) for x in choices]
# Construct the mask
outputmask = np.choose(c, masks, mode=mode)
outputmask = make_mask(mask_or(outputmask, getmask(indices)),
copy=0, shrink=True)
# Get the choices......
d = np.choose(c, data, mode=mode, out=out).view(MaskedArray)
if out is not None:
if isinstance(out, MaskedArray):
out.__setmask__(outputmask)
return out
d.__setmask__(outputmask)
return d
def round_(a, decimals=0, out=None):
"""
Return a copy of a, rounded to 'decimals' places.
When 'decimals' is negative, it specifies the number of positions
to the left of the decimal point. The real and imaginary parts of
complex numbers are rounded separately. Nothing is done if the
array is not of float type and 'decimals' is greater than or equal
to 0.
Parameters
----------
decimals : int
Number of decimals to round to. May be negative.
out : array_like
Existing array to use for output.
If not given, returns a default copy of a.
Notes
-----
If out is given and does not have a mask attribute, the mask of a
is lost!
"""
if out is None:
return np.round_(a, decimals, out)
else:
np.round_(getdata(a), decimals, out)
if hasattr(out, '_mask'):
out._mask = getmask(a)
return out
round = round_
def inner(a, b):
"""
Returns the inner product of a and b for arrays of floating point types.
Like the generic NumPy equivalent the product sum is over the last dimension
of a and b.
Notes
-----
The first argument is not conjugated.
"""
fa = filled(a, 0)
fb = filled(b, 0)
if len(fa.shape) == 0:
fa.shape = (1,)
if len(fb.shape) == 0:
fb.shape = (1,)
return np.inner(fa, fb).view(MaskedArray)
inner.__doc__ = doc_note(np.inner.__doc__,
"Masked values are replaced by 0.")
innerproduct = inner
def outer(a, b):
"maskedarray version of the numpy function."
fa = filled(a, 0).ravel()
fb = filled(b, 0).ravel()
d = np.outer(fa, fb)
ma = getmask(a)
mb = getmask(b)
if ma is nomask and mb is nomask:
return masked_array(d)
ma = getmaskarray(a)
mb = getmaskarray(b)
m = make_mask(1 - np.outer(1 - ma, 1 - mb), copy=0)
return masked_array(d, mask=m)
outer.__doc__ = doc_note(np.outer.__doc__,
"Masked values are replaced by 0.")
outerproduct = outer
def allequal (a, b, fill_value=True):
"""
Return True if all entries of a and b are equal, using
fill_value as a truth value where either or both are masked.
Parameters
----------
a, b : array_like
Input arrays to compare.
fill_value : bool, optional
Whether masked values in a or b are considered equal (True) or not
(False).
Returns
-------
y : bool
Returns True if the two arrays are equal within the given
tolerance, False otherwise. If either array contains NaN,
then False is returned.
See Also
--------
all, any
numpy.ma.allclose
Examples
--------
>>> a = ma.array([1e10, 1e-7, 42.0], mask=[0, 0, 1])
>>> a
masked_array(data = [10000000000.0 1e-07 --],
mask = [False False True],
fill_value=1e+20)
>>> b = array([1e10, 1e-7, -42.0])
>>> b
array([ 1.00000000e+10, 1.00000000e-07, -4.20000000e+01])
>>> ma.allequal(a, b, fill_value=False)
False
>>> ma.allequal(a, b)
True
"""
m = mask_or(getmask(a), getmask(b))
if m is nomask:
x = getdata(a)
y = getdata(b)
d = umath.equal(x, y)
return d.all()
elif fill_value:
x = getdata(a)
y = getdata(b)
d = umath.equal(x, y)
dm = array(d, mask=m, copy=False)
return dm.filled(True).all(None)
else:
return False
def allclose (a, b, masked_equal=True, rtol=1e-5, atol=1e-8):
"""
Returns True if two arrays are element-wise equal within a tolerance.
This function is equivalent to `allclose` except that masked values
are treated as equal (default) or unequal, depending on the `masked_equal`
argument.
Parameters
----------
a, b : array_like
Input arrays to compare.
masked_equal : bool, optional
Whether masked values in `a` and `b` are considered equal (True) or not
(False). They are considered equal by default.
rtol : float, optional
Relative tolerance. The relative difference is equal to ``rtol * b``.
Default is 1e-5.
atol : float, optional
Absolute tolerance. The absolute difference is equal to `atol`.
Default is 1e-8.
Returns
-------
y : bool
Returns True if the two arrays are equal within the given
tolerance, False otherwise. If either array contains NaN, then
False is returned.
See Also
--------
all, any
numpy.allclose : the non-masked `allclose`.
Notes
-----
If the following equation is element-wise True, then `allclose` returns
True::
absolute(`a` - `b`) <= (`atol` + `rtol` * absolute(`b`))
Return True if all elements of `a` and `b` are equal subject to
given tolerances.
Examples
--------
>>> a = ma.array([1e10, 1e-7, 42.0], mask=[0, 0, 1])
>>> a
masked_array(data = [10000000000.0 1e-07 --],
mask = [False False True],
fill_value = 1e+20)
>>> b = ma.array([1e10, 1e-8, -42.0], mask=[0, 0, 1])
>>> ma.allclose(a, b)
False
>>> a = ma.array([1e10, 1e-8, 42.0], mask=[0, 0, 1])
>>> b = ma.array([1.00001e10, 1e-9, -42.0], mask=[0, 0, 1])
>>> ma.allclose(a, b)
True
>>> ma.allclose(a, b, masked_equal=False)
False
Masked values are not compared directly.
>>> a = ma.array([1e10, 1e-8, 42.0], mask=[0, 0, 1])
>>> b = ma.array([1.00001e10, 1e-9, 42.0], mask=[0, 0, 1])
>>> ma.allclose(a, b)
True
>>> ma.allclose(a, b, masked_equal=False)
False
"""
x = masked_array(a, copy=False)
y = masked_array(b, copy=False)
m = mask_or(getmask(x), getmask(y))
xinf = np.isinf(masked_array(x, copy=False, mask=m)).filled(False)
# If we have some infs, they should fall at the same place.
if not np.all(xinf == filled(np.isinf(y), False)):
return False
# No infs at all
if not np.any(xinf):
d = filled(umath.less_equal(umath.absolute(x - y),
atol + rtol * umath.absolute(y)),
masked_equal)
return np.all(d)
if not np.all(filled(x[xinf] == y[xinf], masked_equal)):
return False
x = x[~xinf]
y = y[~xinf]
d = filled(umath.less_equal(umath.absolute(x - y),
atol + rtol * umath.absolute(y)),
masked_equal)
return np.all(d)
#..............................................................................
def asarray(a, dtype=None, order=None):
"""
Convert the input to a masked array of the given data-type.
No copy is performed if the input is already an `ndarray`. If `a` is
a subclass of `MaskedArray`, a base class `MaskedArray` is returned.
Parameters
----------
a : array_like
Input data, in any form that can be converted to a masked array. This
includes lists, lists of tuples, tuples, tuples of tuples, tuples
of lists, ndarrays and masked arrays.
dtype : dtype, optional
By default, the data-type is inferred from the input data.
order : {'C', 'F'}, optional
Whether to use row-major ('C') or column-major ('FORTRAN') memory
representation. Default is 'C'.
Returns
-------
out : MaskedArray
Masked array interpretation of `a`.
See Also
--------
asanyarray : Similar to `asarray`, but conserves subclasses.
Examples
--------
>>> x = np.arange(10.).reshape(2, 5)
>>> x
array([[ 0., 1., 2., 3., 4.],
[ 5., 6., 7., 8., 9.]])
>>> np.ma.asarray(x)
masked_array(data =
[[ 0. 1. 2. 3. 4.]
[ 5. 6. 7. 8. 9.]],
mask =
False,
fill_value = 1e+20)
>>> type(np.ma.asarray(x))
<class 'numpy.ma.core.MaskedArray'>
"""
return masked_array(a, dtype=dtype, copy=False, keep_mask=True, subok=False)
def asanyarray(a, dtype=None):
"""
Convert the input to a masked array, conserving subclasses.
If `a` is a subclass of `MaskedArray`, its class is conserved.
No copy is performed if the input is already an `ndarray`.
Parameters
----------
a : array_like
Input data, in any form that can be converted to an array.
dtype : dtype, optional
By default, the data-type is inferred from the input data.
order : {'C', 'F'}, optional
Whether to use row-major ('C') or column-major ('FORTRAN') memory
representation. Default is 'C'.
Returns
-------
out : MaskedArray
MaskedArray interpretation of `a`.
See Also
--------
asarray : Similar to `asanyarray`, but does not conserve subclass.
Examples
--------
>>> x = np.arange(10.).reshape(2, 5)
>>> x
array([[ 0., 1., 2., 3., 4.],
[ 5., 6., 7., 8., 9.]])
>>> np.ma.asanyarray(x)
masked_array(data =
[[ 0. 1. 2. 3. 4.]
[ 5. 6. 7. 8. 9.]],
mask =
False,
fill_value = 1e+20)
>>> type(np.ma.asanyarray(x))
<class 'numpy.ma.core.MaskedArray'>
"""
return masked_array(a, dtype=dtype, copy=False, keep_mask=True, subok=True)
#####--------------------------------------------------------------------------
#---- --- Pickling ---
#####--------------------------------------------------------------------------
def dump(a, F):
"""
Pickle a masked array to a file.
This is a wrapper around ``cPickle.dump``.
Parameters
----------
a : MaskedArray
The array to be pickled.
F : str or file-like object
The file to pickle `a` to. If a string, the full path to the file.
"""
if not hasattr(F, 'readline'):
F = open(F, 'w')
return cPickle.dump(a, F)
def dumps(a):
"""
Return a string corresponding to the pickling of a masked array.
This is a wrapper around ``cPickle.dumps``.
Parameters
----------
a : MaskedArray
The array for which the string representation of the pickle is
returned.
"""
return cPickle.dumps(a)
def load(F):
"""
Wrapper around ``cPickle.load`` which accepts either a file-like object
or a filename.
Parameters
----------
F : str or file
The file or file name to load.
See Also
--------
dump : Pickle an array
Notes
-----
This is different from `numpy.load`, which does not use cPickle but loads
the NumPy binary .npy format.
"""
if not hasattr(F, 'readline'):
F = open(F, 'r')
return cPickle.load(F)
def loads(strg):
"""
Load a pickle from the current string.
The result of ``cPickle.loads(strg)`` is returned.
Parameters
----------
strg : str
The string to load.
See Also
--------
dumps : Return a string corresponding to the pickling of a masked array.
"""
return cPickle.loads(strg)
################################################################################
def fromfile(file, dtype=float, count= -1, sep=''):
raise NotImplementedError("Not yet implemented. Sorry")
def fromflex(fxarray):
"""
Build a masked array from a suitable flexible-type array.
The input array has to have a data-type with ``_data`` and ``_mask``
fields. This type of array is output by `MaskedArray.toflex`.
Parameters
----------
fxarray : ndarray
The structured input array, containing ``_data`` and ``_mask``
fields. If present, other fields are discarded.
Returns
-------
result : MaskedArray
The constructed masked array.
See Also
--------
MaskedArray.toflex : Build a flexible-type array from a masked array.
Examples
--------
>>> x = np.ma.array(np.arange(9).reshape(3, 3), mask=[0] + [1, 0] * 4)
>>> rec = x.toflex()
>>> rec
array([[(0, False), (1, True), (2, False)],
[(3, True), (4, False), (5, True)],
[(6, False), (7, True), (8, False)]],
dtype=[('_data', '<i4'), ('_mask', '|b1')])
>>> x2 = np.ma.fromflex(rec)
>>> x2
masked_array(data =
[[0 -- 2]
[-- 4 --]
[6 -- 8]],
mask =
[[False True False]
[ True False True]
[False True False]],
fill_value = 999999)
Extra fields can be present in the structured array but are discarded:
>>> dt = [('_data', '<i4'), ('_mask', '|b1'), ('field3', '<f4')]
>>> rec2 = np.zeros((2, 2), dtype=dt)
>>> rec2
array([[(0, False, 0.0), (0, False, 0.0)],
[(0, False, 0.0), (0, False, 0.0)]],
dtype=[('_data', '<i4'), ('_mask', '|b1'), ('field3', '<f4')])
>>> y = np.ma.fromflex(rec2)
>>> y
masked_array(data =
[[0 0]
[0 0]],
mask =
[[False False]
[False False]],
fill_value = 999999)
"""
return masked_array(fxarray['_data'], mask=fxarray['_mask'])
class _convert2ma:
"""
Convert functions from numpy to numpy.ma.
Parameters
----------
_methodname : string
Name of the method to transform.
"""
__doc__ = None
#
def __init__(self, funcname, params=None):
self._func = getattr(np, funcname)
self.__doc__ = self.getdoc()
self._extras = params or {}
#
def getdoc(self):
"Return the doc of the function (from the doc of the method)."
doc = getattr(self._func, '__doc__', None)
sig = get_object_signature(self._func)
if doc:
# Add the signature of the function at the beginning of the doc
if sig:
sig = "%s%s\n" % (self._func.__name__, sig)
doc = sig + doc
return doc
#
def __call__(self, a, *args, **params):
# Find the common parameters to the call and the definition
_extras = self._extras
common_params = set(params).intersection(_extras)
# Drop the common parameters from the call
for p in common_params:
_extras[p] = params.pop(p)
# Get the result
result = self._func.__call__(a, *args, **params).view(MaskedArray)
if "fill_value" in common_params:
result.fill_value = _extras.get("fill_value", None)
if "hardmask" in common_params:
result._hardmask = bool(_extras.get("hard_mask", False))
return result
arange = _convert2ma('arange', params=dict(fill_value=None, hardmask=False))
clip = np.clip
diff = np.diff
empty = _convert2ma('empty', params=dict(fill_value=None, hardmask=False))
empty_like = _convert2ma('empty_like')
frombuffer = _convert2ma('frombuffer')
fromfunction = _convert2ma('fromfunction')
identity = _convert2ma('identity', params=dict(fill_value=None, hardmask=False))
indices = np.indices
ones = _convert2ma('ones', params=dict(fill_value=None, hardmask=False))
ones_like = np.ones_like
squeeze = np.squeeze
zeros = _convert2ma('zeros', params=dict(fill_value=None, hardmask=False))
zeros_like = np.zeros_like
###############################################################################
| mit |
hkawasaki/kawasaki-aio8-2 | cms/djangoapps/contentstore/management/commands/check_course.py | 22 | 2740 | from django.core.management.base import BaseCommand, CommandError
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.xml_importer import check_module_metadata_editability
from xmodule.course_module import CourseDescriptor
from xmodule.modulestore import Location
class Command(BaseCommand):
help = '''Enumerates through the course and find common errors'''
def handle(self, *args, **options):
if len(args) != 1:
raise CommandError("check_course requires one argument: <location>")
loc_str = args[0]
loc = CourseDescriptor.id_to_location(loc_str)
store = modulestore()
course = store.get_item(loc, depth=3)
err_cnt = 0
def _xlint_metadata(module):
err_cnt = check_module_metadata_editability(module)
for child in module.get_children():
err_cnt = err_cnt + _xlint_metadata(child)
return err_cnt
err_cnt = err_cnt + _xlint_metadata(course)
# we've had a bug where the xml_attributes field can we rewritten as a string rather than a dict
def _check_xml_attributes_field(module):
err_cnt = 0
if hasattr(module, 'xml_attributes') and isinstance(module.xml_attributes, basestring):
print 'module = {0} has xml_attributes as a string. It should be a dict'.format(module.location.url())
err_cnt = err_cnt + 1
for child in module.get_children():
err_cnt = err_cnt + _check_xml_attributes_field(child)
return err_cnt
err_cnt = err_cnt + _check_xml_attributes_field(course)
# check for dangling discussion items, this can cause errors in the forums
def _get_discussion_items(module):
discussion_items = []
if module.location.category == 'discussion':
discussion_items = discussion_items + [module.location.url()]
for child in module.get_children():
discussion_items = discussion_items + _get_discussion_items(child)
return discussion_items
discussion_items = _get_discussion_items(course)
# now query all discussion items via get_items() and compare with the tree-traversal
queried_discussion_items = store.get_items(
Location(
'i4x',
course.location.org,
course.location.course,
'discussion',
None,
None
)
)
for item in queried_discussion_items:
if item.location.url() not in discussion_items:
print 'Found dangling discussion module = {0}'.format(item.location.url())
| agpl-3.0 |
bbcf/bbcflib | bein/tests/test_bein.py | 1 | 13588 | import socket
import re
import sys
import random
from unittest2 import TestCase, TestSuite, main, TestLoader, skipIf
from bein import *
from bein.util import touch
M = MiniLIMS("testing_lims")
def hostname_contains(pattern):
hostname = socket.gethostbyaddr(socket.gethostname())[0]
if re.search(pattern, hostname) == None:
return False
else:
return True
try:
if hostname_contains('vital-it.ch'):
not_vital_it = False
else:
not_vital_it = True
except:
not_vital_it = True
@program
def count_lines(filename):
"""Count the number of lines in *filename* (equivalent to ``wc -l``)."""
def parse_output(p):
m = re.search(r'^\s*(\d+)\s+' + filename + r'\s*$',
''.join(p.stdout))
if m == None:
return None
else:
return int(m.groups()[-1]) # in case of a weird line in LSF
return {"arguments": ["wc","-l",filename],
"return_value": parse_output}
class TestProgramBinding(TestCase):
def test_binding_works(self):
with execution(None) as ex:
with open('boris','w') as f:
f.write("This is a test\nof the emergency broadcast\nsystem.\n")
self.assertEqual(count_lines(ex, 'boris'), 3)
def test_local_works(self):
with execution(None) as ex:
with open('boris','w') as f:
f.write("This is a test\nof the emergency broadcast\nsystem.\n")
q = count_lines._local(ex, 'boris')
self.assertEqual(str(q.__class__), "<class 'bein.Future'>")
self.assertEqual(q.wait(), 3)
@skipIf(not_vital_it, "Not on VITAL-IT.")
def test_lsf_works(self):
with execution(None) as ex:
with open('boris','w') as f:
f.write("This is a test\nof the emergency broadcast\nsystem.\n")
q = count_lines._lsf(ex, 'boris')
self.assertEqual(str(q.__class__), "<class 'bein.Future'>")
self.assertEqual(q.wait(), 3)
def test_nonblocking_with_via_local(self):
with execution(None) as ex:
with open('boris','w') as f:
f.write("This is a test\nof the emergency broadcast\nsystem.\n")
q = count_lines.nonblocking(ex, 'boris', via='local')
self.assertEqual(str(q.__class__), "<class 'bein.Future'>")
self.assertEqual(q.wait(), 3)
@skipIf(not_vital_it, "Not on VITAL-IT")
def test_nonblocking_with_via_lsf(self):
with execution(None) as ex:
with open('boris','w') as f:
f.write("This is a test\nof the emergency broadcast\nsystem.\n")
q = count_lines.nonblocking(ex, 'boris', via='lsf')
self.assertEqual(str(q.__class__), "<class 'bein.Future'>")
self.assertEqual(q.wait(), 3)
def test_syntaxerror_outside_execution(self):
with execution(M) as ex:
pass
M.delete_execution(ex.id)
with self.assertRaises(SyntaxError):
touch(ex)
def test_syntaxerror_outside_execution_nonblocking(self):
with execution(M) as ex:
pass
M.delete_execution(ex.id)
with self.assertRaises(SyntaxError):
touch.nonblocking(ex)
class TestUniqueFilenameIn(TestCase):
def test_state_determines_filename(self):
with execution(None) as ex:
st = random.getstate()
f = unique_filename_in()
random.setstate(st)
g = unique_filename_in()
self.assertEqual(f, g)
def test_unique_filename_exact_match(self):
with execution(None) as ex:
st = random.getstate()
f = touch(ex)
random.setstate(st)
g = touch(ex)
self.assertNotEqual(f, g)
def test_unique_filename_beginnings_match(self):
with execution(None) as ex:
st = random.getstate()
f = unique_filename_in()
touch(ex, f + 'abcdefg')
random.setstate(st)
g = touch(ex)
self.assertNotEqual(f, g)
class TestMiniLIMS(TestCase):
def test_resolve_alias_exception_on_no_file(self):
with execution(None) as ex:
M = MiniLIMS("boris")
self.assertRaises(ValueError, M.resolve_alias, 55)
def test_resolve_alias_returns_int_if_exists(self):
with execution(None) as ex:
f = touch(ex)
M = MiniLIMS("boris")
a = M.import_file(f)
self.assertEqual(M.resolve_alias(a), a)
def test_resolve_alias_with_alias(self):
with execution(None) as ex:
f = touch(ex)
M = MiniLIMS("boris")
a = M.import_file(f)
M.add_alias(a, 'hilda')
self.assertEqual(M.resolve_alias('hilda'), a)
def test_path_to_file_on_execution(self):
with execution(None) as ignoreme:
f = touch(ignoreme)
M = MiniLIMS("boris")
fid = M.import_file(f)
mpath = M.path_to_file(fid)
with execution(M) as ex:
fpath = ex.path_to_file(fid)
self.assertEqual(mpath, fpath)
def test_search_files(self):
f_desc = unique_filename_in()
t1 = time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime())
f_id = M.import_file("../LICENSE", description=f_desc)
t2 = time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime())
f_found = M.search_files(with_text="LICENSE", with_description=f_desc, older_than=t2, source="import", newer_than=t1)
self.assertIn(f_id, f_found)
M.delete_file(f_id)
f_desc = {"name":"test_search_files_by_dict", "m":5, "n":15}
f_id = M.import_file("../LICENSE", description=f_desc)
f_found = M.search_files(with_description=f_desc)
self.assertIn(f_id, f_found)
M.delete_file(f_id)
def test_search_executions(self):
with execution(M, description="desc_test") as ex:
pass
ex_found = M.search_executions(with_description="desc_test")
self.assertIn(ex.id,ex_found)
M.delete_execution(ex.id)
ex_desc = {"name":"test_search_ex_by_dict", "m":5, "n":15}
with execution(M, description=ex_desc) as ex:
pass
ex_found = M.search_executions(with_description=ex_desc)
self.assertIn(ex.id, ex_found)
try:
with execution(M, description="desc_test_fail") as ex_nofail:
3/0
except: pass
ex_found_nofail = M.search_executions(with_description="desc_test", fails=False)
for e in ex_found_nofail:
error = M.fetch_execution(e)["exception_string"]
self.assertIsNone(error)
ex_found_fail = M.search_executions(with_description="desc_test", fails=True)
for e in ex_found_fail:
error = M.fetch_execution(e)["exception_string"]
self.assertIsNotNone(error)
M.delete_execution(ex.id)
M.delete_execution(ex_nofail.id)
def test_browse_files(self):
f_desc = "browse_file_test"
f_id = M.import_file("../LICENSE", description=f_desc)
f_found = M.browse_files(with_description=f_desc)
#self.assertIn(f_id,f_found)
M.delete_file(f_id)
def test_browse_executions(self):
ex_desc = "browse_ex_test"
with execution(M, description=ex_desc) as ex:
touch(ex,"boris")
ex_found = M.browse_executions(with_description=ex_desc)
#self.assertIs(ex.id,ex_found)
M.delete_execution(ex.id)
class TestExportFile(TestCase):
def test_export_file(self):
filea = M.import_file("../LICENSE") #file ID
fileb = M.import_file("../doc/bein.rst")
testdir = "testing.files"
if not os.path.isdir(testdir):
os.mkdir(testdir)
M.associate_file(fileb,filea,template="%s.linked")
M.export_file(filea, dst=os.path.join(testdir,"exportedfile"), with_associated=True) #test with file name given
self.assertTrue(os.path.isfile(os.path.join(testdir,"exportedfile"+".linked")))
os.remove(os.path.join(testdir,"exportedfile"))
os.remove(os.path.join(testdir,"exportedfile"+".linked"))
M.export_file(filea, dst=testdir, with_associated=True) #test with directory given
filename = M.fetch_file(filea)['repository_name']
self.assertTrue(os.path.isfile(os.path.join(testdir, filename +".linked")))
os.remove(os.path.join(testdir, filename))
os.remove(os.path.join(testdir, filename +".linked"))
@program
def echo(s):
return {'arguments': ['echo',str(s)],
'return_value': None}
class TestStdoutStderrRedirect(TestCase):
def test_stdout_redirected(self):
try:
with execution(M) as ex:
f = unique_filename_in()
echo(ex, "boris!", stdout=f)
with open(f) as q:
l = q.readline()
self.assertEqual(l, 'boris!\n')
finally:
M.delete_execution(ex.id)
def test_stdout_local_redirected(self):
try:
with execution(M) as ex:
f = unique_filename_in()
m = echo.nonblocking(ex, "boris!", stdout=f)
m.wait()
with open(f) as q:
l = q.readline()
self.assertEqual(l, 'boris!\n')
finally:
M.delete_execution(ex.id)
class TestNoSuchProgramError(TestCase):
@program
def nonexistent():
return {"arguments": ["meepbarf","hilda"],
"return_value": None}
def test_nonexistent(self):
with execution(None) as ex:
self.assertRaises(ValueError, self.nonexistent, ex)
def test_nonexistent_local(self):
with execution(None) as ex:
f = self.nonexistent.nonblocking(ex, via="local")
self.assertRaises(ValueError, f.wait)
class TestImmutabilityDropped(TestCase):
def test_immutability_dropped(self):
executions = []
with execution(M) as ex:
touch(ex, "boris")
ex.add("boris")
exid1 = ex.id
borisid = M.search_files(source=('execution',ex.id))[0]
self.assertFalse(M.fetch_file(borisid)['immutable'])
with execution(M) as ex:
ex.use(borisid)
exid2 = ex.id
self.assertTrue(M.fetch_file(borisid)['immutable'])
M.delete_execution(exid2)
self.assertFalse(M.fetch_file(borisid)['immutable'])
M.delete_execution(exid1)
self.assertEqual(M.search_files(source=('execution',exid1)), [])
class TestAssociatePreservesFilenames(TestCase):
def test_associate_with_names(self):
try:
with execution(M) as ex:
touch(ex, "boris")
touch(ex, "hilda")
ex.add("boris")
ex.add("hilda", associate_to_filename="boris", template="%s.meep")
boris_id = M.search_files(source=('execution',ex.id), with_text="boris")[0]
hilda_id = M.search_files(source=('execution',ex.id), with_text="hilda")[0]
boris_name = M.fetch_file(boris_id)['repository_name']
hilda_name = M.fetch_file(hilda_id)['repository_name']
self.assertEqual("%s.meep" % boris_name, hilda_name)
finally:
try:
M.delete_execution(ex.id)
except:
pass
def test_associate_with_id(self):
try:
fid = M.import_file('test.py')
with execution(M) as ex:
touch(ex, "hilda")
ex.add("hilda", associate_to_id=fid, template="%s.meep")
hilda_id = M.search_files(source=('execution',ex.id))[0]
hilda_name = M.fetch_file(hilda_id)['repository_name']
fid_name = M.fetch_file(fid)['repository_name']
self.assertEqual("%s.meep" % fid_name, hilda_name)
finally:
try:
M.delete_execution(ex.id)
M.delete_file(fid)
except:
pass
def test_hierarchical_association(self):
try:
with execution(M) as ex:
touch(ex, "a")
touch(ex, "b")
touch(ex, "c")
ex.add("a")
ex.add("b", associate_to_filename="a", template="%s.step")
ex.add("c", associate_to_filename="b", template="%s.step")
a_id = M.search_files(source=('execution',ex.id), with_text='a')[0]
b_id = M.search_files(source=('execution',ex.id), with_text='b')[0]
c_id = M.search_files(source=('execution',ex.id), with_text='c')[0]
a_name = M.fetch_file(a_id)['repository_name']
b_name = M.fetch_file(b_id)['repository_name']
c_name = M.fetch_file(c_id)['repository_name']
self.assertEqual("%s.step" % a_name, b_name)
self.assertEqual("%s.step.step" % a_name, c_name)
finally:
try:
M.delete_execution(ex.id)
except:
pass
#def test_given(tests):
# module = sys.modules[__name__]
# if tests == None:
# defaultTest = None
# else:
# loader = TestLoader()
# defaultTest = TestSuite()
# tests = loader.loadTestsFromNames(tests, module)
# defaultTest.addTests(tests)
# main(defaultTest=defaultTest)
if __name__ == '__main__':
if len(sys.argv) > 1:
test_given(sys.argv[1:])
else:
test_given(None)
| gpl-3.0 |
dawnpower/nova | nova/openstack/common/imageutils.py | 64 | 6349 | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright (c) 2010 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Helper methods to deal with images.
"""
import re
from oslo_utils import strutils
from nova.openstack.common._i18n import _
class QemuImgInfo(object):
BACKING_FILE_RE = re.compile((r"^(.*?)\s*\(actual\s+path\s*:"
r"\s+(.*?)\)\s*$"), re.I)
TOP_LEVEL_RE = re.compile(r"^([\w\d\s\_\-]+):(.*)$")
SIZE_RE = re.compile(r"(\d*\.?\d+)(\w+)?(\s*\(\s*(\d+)\s+bytes\s*\))?",
re.I)
def __init__(self, cmd_output=None):
details = self._parse(cmd_output or '')
self.image = details.get('image')
self.backing_file = details.get('backing_file')
self.file_format = details.get('file_format')
self.virtual_size = details.get('virtual_size')
self.cluster_size = details.get('cluster_size')
self.disk_size = details.get('disk_size')
self.snapshots = details.get('snapshot_list', [])
self.encrypted = details.get('encrypted')
def __str__(self):
lines = [
'image: %s' % self.image,
'file_format: %s' % self.file_format,
'virtual_size: %s' % self.virtual_size,
'disk_size: %s' % self.disk_size,
'cluster_size: %s' % self.cluster_size,
'backing_file: %s' % self.backing_file,
]
if self.snapshots:
lines.append("snapshots: %s" % self.snapshots)
if self.encrypted:
lines.append("encrypted: %s" % self.encrypted)
return "\n".join(lines)
def _canonicalize(self, field):
# Standardize on underscores/lc/no dash and no spaces
# since qemu seems to have mixed outputs here... and
# this format allows for better integration with python
# - i.e. for usage in kwargs and such...
field = field.lower().strip()
for c in (" ", "-"):
field = field.replace(c, '_')
return field
def _extract_bytes(self, details):
# Replace it with the byte amount
real_size = self.SIZE_RE.search(details)
if not real_size:
raise ValueError(_('Invalid input value "%s".') % details)
magnitude = real_size.group(1)
unit_of_measure = real_size.group(2)
bytes_info = real_size.group(3)
if bytes_info:
return int(real_size.group(4))
elif not unit_of_measure:
return int(magnitude)
return strutils.string_to_bytes('%s%sB' % (magnitude, unit_of_measure),
return_int=True)
def _extract_details(self, root_cmd, root_details, lines_after):
real_details = root_details
if root_cmd == 'backing_file':
# Replace it with the real backing file
backing_match = self.BACKING_FILE_RE.match(root_details)
if backing_match:
real_details = backing_match.group(2).strip()
elif root_cmd in ['virtual_size', 'cluster_size', 'disk_size']:
# Replace it with the byte amount (if we can convert it)
if root_details == 'None':
real_details = 0
else:
real_details = self._extract_bytes(root_details)
elif root_cmd == 'file_format':
real_details = real_details.strip().lower()
elif root_cmd == 'snapshot_list':
# Next line should be a header, starting with 'ID'
if not lines_after or not lines_after.pop(0).startswith("ID"):
msg = _("Snapshot list encountered but no header found!")
raise ValueError(msg)
real_details = []
# This is the sprintf pattern we will try to match
# "%-10s%-20s%7s%20s%15s"
# ID TAG VM SIZE DATE VM CLOCK (current header)
while lines_after:
line = lines_after[0]
line_pieces = line.split()
if len(line_pieces) != 6:
break
# Check against this pattern in the final position
# "%02d:%02d:%02d.%03d"
date_pieces = line_pieces[5].split(":")
if len(date_pieces) != 3:
break
lines_after.pop(0)
real_details.append({
'id': line_pieces[0],
'tag': line_pieces[1],
'vm_size': line_pieces[2],
'date': line_pieces[3],
'vm_clock': line_pieces[4] + " " + line_pieces[5],
})
return real_details
def _parse(self, cmd_output):
# Analysis done of qemu-img.c to figure out what is going on here
# Find all points start with some chars and then a ':' then a newline
# and then handle the results of those 'top level' items in a separate
# function.
#
# TODO(harlowja): newer versions might have a json output format
# we should switch to that whenever possible.
# see: http://bit.ly/XLJXDX
contents = {}
lines = [x for x in cmd_output.splitlines() if x.strip()]
while lines:
line = lines.pop(0)
top_level = self.TOP_LEVEL_RE.match(line)
if top_level:
root = self._canonicalize(top_level.group(1))
if not root:
continue
root_details = top_level.group(2).strip()
details = self._extract_details(root, root_details, lines)
contents[root] = details
return contents
| apache-2.0 |
Turkingwang/dpkt | dpkt/snoop.py | 22 | 2963 | # $Id$
"""Snoop file format."""
import sys, time
import dpkt
# RFC 1761
SNOOP_MAGIC = 0x736E6F6F70000000L
SNOOP_VERSION = 2
SDL_8023 = 0
SDL_8024 = 1
SDL_8025 = 2
SDL_8026 = 3
SDL_ETHER = 4
SDL_HDLC = 5
SDL_CHSYNC = 6
SDL_IBMCC = 7
SDL_FDDI = 8
SDL_OTHER = 9
dltoff = { SDL_ETHER:14 }
class PktHdr(dpkt.Packet):
"""snoop packet header."""
__byte_order__ = '!'
__hdr__ = (
('orig_len', 'I', 0),
('incl_len', 'I', 0),
('rec_len', 'I', 0),
('cum_drops', 'I', 0),
('ts_sec', 'I', 0),
('ts_usec', 'I', 0),
)
class FileHdr(dpkt.Packet):
"""snoop file header."""
__byte_order__ = '!'
__hdr__ = (
('magic', 'Q', SNOOP_MAGIC),
('v', 'I', SNOOP_VERSION),
('linktype', 'I', SDL_ETHER),
)
class Writer(object):
"""Simple snoop dumpfile writer."""
def __init__(self, fileobj, linktype=SDL_ETHER):
self.__f = fileobj
fh = FileHdr(linktype=linktype)
self.__f.write(str(fh))
def writepkt(self, pkt, ts=None):
if ts is None:
ts = time.time()
s = str(pkt)
n = len(s)
pad_len = 4 - n % 4 if n % 4 else 0
ph = PktHdr(orig_len=n,incl_len=n,
rec_len=PktHdr.__hdr_len__+n+pad_len,
ts_sec=int(ts),
ts_usec=int((int(ts) - float(ts)) * 1000000.0))
self.__f.write(str(ph))
self.__f.write(s + '\0' * pad_len)
def close(self):
self.__f.close()
class Reader(object):
"""Simple pypcap-compatible snoop file reader."""
def __init__(self, fileobj):
self.name = fileobj.name
self.fd = fileobj.fileno()
self.__f = fileobj
buf = self.__f.read(FileHdr.__hdr_len__)
self.__fh = FileHdr(buf)
self.__ph = PktHdr
if self.__fh.magic != SNOOP_MAGIC:
raise ValueError, 'invalid snoop header'
self.dloff = dltoff[self.__fh.linktype]
self.filter = ''
def fileno(self):
return self.fd
def datalink(self):
return self.__fh.linktype
def setfilter(self, value, optimize=1):
return NotImplementedError
def readpkts(self):
return list(self)
def dispatch(self, cnt, callback, *args):
if cnt > 0:
for i in range(cnt):
ts, pkt = self.next()
callback(ts, pkt, *args)
else:
for ts, pkt in self:
callback(ts, pkt, *args)
def loop(self, callback, *args):
self.dispatch(0, callback, *args)
def __iter__(self):
self.__f.seek(FileHdr.__hdr_len__)
while 1:
buf = self.__f.read(PktHdr.__hdr_len__)
if not buf: break
hdr = self.__ph(buf)
buf = self.__f.read(hdr.rec_len - PktHdr.__hdr_len__)
yield (hdr.ts_sec + (hdr.ts_usec / 1000000.0), buf[:hdr.incl_len])
| bsd-3-clause |
amanikamail/flexx | docs/scripts/genuiclasses.py | 20 | 3063 | """ Generate docs for ui classes.
"""
import os
from types import ModuleType
from flexx import ui, app
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
DOC_DIR = os.path.abspath(os.path.join(THIS_DIR, '..'))
OUTPUT_DIR = os.path.join(DOC_DIR, 'ui')
created_files = []
def main():
pages = {}
class_names = []
# Get all pages and class names
for mod in ui.__dict__.values():
if isinstance(mod, ModuleType):
classes = []
for w in mod.__dict__.values():
if isinstance(w, type) and issubclass(w, ui.Widget):
if w.__module__ == mod.__name__:
classes.append(w)
if classes:
classes.sort(key=lambda x: len(x.mro()))
class_names.extend([w.__name__ for w in classes])
pages[mod.__name__] = classes
# Create page for each module
for module_name, classes in sorted(pages.items()):
page_name = module_name.split('.')[-1].strip('_').capitalize()
docs = '%s\n%s\n\n' % (page_name, '-' * len(page_name))
docs += '.. automodule:: %s\n\n' % module_name
docs += '----\n\n'
for cls in classes:
name = cls.__name__
# Insert info on base clases
if 'Inherits from' not in cls.__doc__:
bases = [':class:`%s <flexx.ui.%s>`' % (bcls.__name__, bcls.__name__)
for bcls in cls.__bases__]
line = 'Inherits from: ' + ', '.join(bases)
cls.__doc__ = line + '\n\n' + (cls.__doc__ or '')
# Create doc for class
docs += '.. autoclass:: flexx.ui.%s\n' % name
docs += ' :members:\n\n'
# Write doc page
filename = os.path.join(OUTPUT_DIR, page_name.lower() + '.rst')
created_files.append(filename)
open(filename, 'wt').write(docs)
# Create overview doc page
docs = 'Ui API'
docs += '\n' + '=' * len(docs) + '\n\n'
docs += 'This is a list of all widget classes provided by ``flexx.ui``. '
docs += ':class:`Widget <flexx.ui.Widget>` is the base class of all widgets. '
docs += 'There is one document per widget type. Each document contains '
docs += 'examples with the widget(s) defined within.\n\n'
for name in sorted(class_names):
docs += '* :class:`%s <flexx.ui.%s>`\n' % (name, name)
docs += '\n.. toctree::\n :maxdepth: 1\n :hidden:\n\n'
for module_name in sorted(pages.keys()):
docs += ' %s\n' % module_name.split('.')[-1].strip('_').lower()
# Write overview doc page
filename = os.path.join(OUTPUT_DIR, 'api.rst')
created_files.append(filename)
open(filename, 'wt').write(docs)
print(' generated widget docs with %i pages and %i widgets' % (len(pages), len(class_names)))
def clean():
while created_files:
filename = created_files.pop()
if os.path.isfile(filename):
os.remove(filename)
| bsd-2-clause |
eeshangarg/oh-mainline | vendor/packages/django-extensions/django_extensions/management/commands/sqldiff.py | 17 | 30783 | """
sqldiff.py - Prints the (approximated) difference between models and database
TODO:
- better support for relations
- better support for constraints (mainly postgresql?)
- support for table spaces with postgresql
- when a table is not managed (meta.managed==False) then only do a one-way
sqldiff ? show differences from db->table but not the other way around since
it's not managed.
KNOWN ISSUES:
- MySQL has by far the most problems with introspection. Please be
carefull when using MySQL with sqldiff.
- Booleans are reported back as Integers, so there's know way to know if
there was a real change.
- Varchar sizes are reported back without unicode support so their size
may change in comparison to the real length of the varchar.
- Some of the 'fixes' to counter these problems might create false
positives or false negatives.
"""
from django.core.management.base import BaseCommand
from django.core.management import sql as _sql
from django.core.management import CommandError
from django.core.management.color import no_style
from django.db import transaction, connection
from django.db.models.fields import IntegerField
from optparse import make_option
ORDERING_FIELD = IntegerField('_order', null=True)
def flatten(l, ltypes=(list, tuple)):
ltype = type(l)
l = list(l)
i = 0
while i < len(l):
while isinstance(l[i], ltypes):
if not l[i]:
l.pop(i)
i -= 1
break
else:
l[i:i + 1] = l[i]
i += 1
return ltype(l)
def all_local_fields(meta):
all_fields = meta.local_fields[:]
for parent in meta.parents:
all_fields.extend(all_local_fields(parent._meta))
return all_fields
class SQLDiff(object):
DATA_TYPES_REVERSE_OVERRIDE = {}
DIFF_TYPES = [
'error',
'comment',
'table-missing-in-db',
'field-missing-in-db',
'field-missing-in-model',
'index-missing-in-db',
'index-missing-in-model',
'unique-missing-in-db',
'unique-missing-in-model',
'field-type-differ',
'field-parameter-differ',
]
DIFF_TEXTS = {
'error': 'error: %(0)s',
'comment': 'comment: %(0)s',
'table-missing-in-db': "table '%(0)s' missing in database",
'field-missing-in-db': "field '%(1)s' defined in model but missing in database",
'field-missing-in-model': "field '%(1)s' defined in database but missing in model",
'index-missing-in-db': "field '%(1)s' INDEX defined in model but missing in database",
'index-missing-in-model': "field '%(1)s' INDEX defined in database schema but missing in model",
'unique-missing-in-db': "field '%(1)s' UNIQUE defined in model but missing in database",
'unique-missing-in-model': "field '%(1)s' UNIQUE defined in database schema but missing in model",
'field-type-differ': "field '%(1)s' not of same type: db='%(3)s', model='%(2)s'",
'field-parameter-differ': "field '%(1)s' parameters differ: db='%(3)s', model='%(2)s'",
}
SQL_FIELD_MISSING_IN_DB = lambda self, style, qn, args: "%s %s\n\t%s %s %s;" % (style.SQL_KEYWORD('ALTER TABLE'), style.SQL_TABLE(qn(args[0])), style.SQL_KEYWORD('ADD'), style.SQL_FIELD(qn(args[1])), style.SQL_COLTYPE(args[2]))
SQL_FIELD_MISSING_IN_MODEL = lambda self, style, qn, args: "%s %s\n\t%s %s;" % (style.SQL_KEYWORD('ALTER TABLE'), style.SQL_TABLE(qn(args[0])), style.SQL_KEYWORD('DROP COLUMN'), style.SQL_FIELD(qn(args[1])))
SQL_INDEX_MISSING_IN_DB = lambda self, style, qn, args: "%s %s\n\t%s %s (%s);" % (style.SQL_KEYWORD('CREATE INDEX'), style.SQL_TABLE(qn("%s_idx" % '_'.join(args[0:2]))), style.SQL_KEYWORD('ON'), style.SQL_TABLE(qn(args[0])), style.SQL_FIELD(qn(args[1])))
# FIXME: need to lookup index name instead of just appending _idx to table + fieldname
SQL_INDEX_MISSING_IN_MODEL = lambda self, style, qn, args: "%s %s;" % (style.SQL_KEYWORD('DROP INDEX'), style.SQL_TABLE(qn("%s_idx" % '_'.join(args[0:2]))))
SQL_UNIQUE_MISSING_IN_DB = lambda self, style, qn, args: "%s %s\n\t%s %s (%s);" % (style.SQL_KEYWORD('ALTER TABLE'), style.SQL_TABLE(qn(args[0])), style.SQL_KEYWORD('ADD'), style.SQL_KEYWORD('UNIQUE'), style.SQL_FIELD(qn(args[1])))
# FIXME: need to lookup unique constraint name instead of appending _key to table + fieldname
SQL_UNIQUE_MISSING_IN_MODEL = lambda self, style, qn, args: "%s %s\n\t%s %s %s;" % (style.SQL_KEYWORD('ALTER TABLE'), style.SQL_TABLE(qn(args[0])), style.SQL_KEYWORD('DROP'), style.SQL_KEYWORD('CONSTRAINT'), style.SQL_TABLE(qn("%s_key" % ('_'.join(args[:2])))))
SQL_FIELD_TYPE_DIFFER = lambda self, style, qn, args: "%s %s\n\t%s %s %s;" % (style.SQL_KEYWORD('ALTER TABLE'), style.SQL_TABLE(qn(args[0])), style.SQL_KEYWORD("MODIFY"), style.SQL_FIELD(qn(args[1])), style.SQL_COLTYPE(args[2]))
SQL_FIELD_PARAMETER_DIFFER = lambda self, style, qn, args: "%s %s\n\t%s %s %s;" % (style.SQL_KEYWORD('ALTER TABLE'), style.SQL_TABLE(qn(args[0])), style.SQL_KEYWORD("MODIFY"), style.SQL_FIELD(qn(args[1])), style.SQL_COLTYPE(args[2]))
SQL_ERROR = lambda self, style, qn, args: style.NOTICE('-- Error: %s' % style.ERROR(args[0]))
SQL_COMMENT = lambda self, style, qn, args: style.NOTICE('-- Comment: %s' % style.SQL_TABLE(args[0]))
SQL_TABLE_MISSING_IN_DB = lambda self, style, qn, args: style.NOTICE('-- Table missing: %s' % args[0])
def __init__(self, app_models, options):
self.app_models = app_models
self.options = options
self.dense = options.get('dense_output', False)
try:
self.introspection = connection.introspection
except AttributeError:
from django.db import get_introspection_module
self.introspection = get_introspection_module()
self.cursor = connection.cursor()
self.django_tables = self.get_django_tables(options.get('only_existing', True))
self.db_tables = self.introspection.get_table_list(self.cursor)
self.differences = []
self.unknown_db_fields = {}
self.DIFF_SQL = {
'error': self.SQL_ERROR,
'comment': self.SQL_COMMENT,
'table-missing-in-db': self.SQL_TABLE_MISSING_IN_DB,
'field-missing-in-db': self.SQL_FIELD_MISSING_IN_DB,
'field-missing-in-model': self.SQL_FIELD_MISSING_IN_MODEL,
'index-missing-in-db': self.SQL_INDEX_MISSING_IN_DB,
'index-missing-in-model': self.SQL_INDEX_MISSING_IN_MODEL,
'unique-missing-in-db': self.SQL_UNIQUE_MISSING_IN_DB,
'unique-missing-in-model': self.SQL_UNIQUE_MISSING_IN_MODEL,
'field-type-differ': self.SQL_FIELD_TYPE_DIFFER,
'field-parameter-differ': self.SQL_FIELD_PARAMETER_DIFFER,
}
def add_app_model_marker(self, app_label, model_name):
self.differences.append((app_label, model_name, []))
def add_difference(self, diff_type, *args):
assert diff_type in self.DIFF_TYPES, 'Unknown difference type'
self.differences[-1][-1].append((diff_type, args))
def get_django_tables(self, only_existing):
try:
django_tables = self.introspection.django_table_names(only_existing=only_existing)
except AttributeError:
# backwards compatibility for before introspection refactoring (r8296)
try:
django_tables = _sql.django_table_names(only_existing=only_existing)
except AttributeError:
# backwards compatibility for before svn r7568
django_tables = _sql.django_table_list(only_existing=only_existing)
return django_tables
def sql_to_dict(self, query, param):
""" sql_to_dict(query, param) -> list of dicts
code from snippet at http://www.djangosnippets.org/snippets/1383/
"""
cursor = connection.cursor()
cursor.execute(query, param)
fieldnames = [name[0] for name in cursor.description]
result = []
for row in cursor.fetchall():
rowset = []
for field in zip(fieldnames, row):
rowset.append(field)
result.append(dict(rowset))
return result
def get_field_model_type(self, field):
return field.db_type(connection=connection)
def get_field_db_type(self, description, field=None, table_name=None):
from django.db import models
# DB-API cursor.description
#(name, type_code, display_size, internal_size, precision, scale, null_ok) = description
type_code = description[1]
if type_code in self.DATA_TYPES_REVERSE_OVERRIDE:
reverse_type = self.DATA_TYPES_REVERSE_OVERRIDE[type_code]
else:
try:
try:
reverse_type = self.introspection.data_types_reverse[type_code]
except AttributeError:
# backwards compatibility for before introspection refactoring (r8296)
reverse_type = self.introspection.DATA_TYPES_REVERSE.get(type_code)
except KeyError:
# type_code not found in data_types_reverse map
key = (self.differences[-1][:2], description[:2])
if key not in self.unknown_db_fields:
self.unknown_db_fields[key] = 1
self.add_difference('comment', "Unknown database type for field '%s' (%s)" % (description[0], type_code))
return None
kwargs = {}
if isinstance(reverse_type, tuple):
kwargs.update(reverse_type[1])
reverse_type = reverse_type[0]
if reverse_type == "CharField" and description[3]:
kwargs['max_length'] = description[3]
if reverse_type == "DecimalField":
kwargs['max_digits'] = description[4]
kwargs['decimal_places'] = description[5] and abs(description[5]) or description[5]
if description[6]:
kwargs['blank'] = True
if not reverse_type in ('TextField', 'CharField'):
kwargs['null'] = True
if '.' in reverse_type:
from django.utils import importlib
# TODO: when was importlib added to django.utils ? and do we
# need to add backwards compatibility code ?
module_path, package_name = reverse_type.rsplit('.', 1)
module = importlib.import_module(module_path)
field_db_type = getattr(module, package_name)(**kwargs).db_type(connection=connection)
else:
field_db_type = getattr(models, reverse_type)(**kwargs).db_type(connection=connection)
return field_db_type
def strip_parameters(self, field_type):
if field_type and field_type != 'double precision':
return field_type.split(" ")[0].split("(")[0]
return field_type
def find_unique_missing_in_db(self, meta, table_indexes, table_name):
for field in all_local_fields(meta):
if field.unique:
attname = field.db_column or field.attname
if attname in table_indexes and table_indexes[attname]['unique']:
continue
self.add_difference('unique-missing-in-db', table_name, attname)
def find_unique_missing_in_model(self, meta, table_indexes, table_name):
# TODO: Postgresql does not list unique_togethers in table_indexes
# MySQL does
fields = dict([(field.db_column or field.name, field.unique) for field in all_local_fields(meta)])
for att_name, att_opts in table_indexes.iteritems():
if att_opts['unique'] and att_name in fields and not fields[att_name]:
if att_name in flatten(meta.unique_together):
continue
self.add_difference('unique-missing-in-model', table_name, att_name)
def find_index_missing_in_db(self, meta, table_indexes, table_name):
for field in all_local_fields(meta):
if field.db_index:
attname = field.db_column or field.attname
if not attname in table_indexes:
self.add_difference('index-missing-in-db', table_name, attname)
def find_index_missing_in_model(self, meta, table_indexes, table_name):
fields = dict([(field.name, field) for field in all_local_fields(meta)])
for att_name, att_opts in table_indexes.iteritems():
if att_name in fields:
field = fields[att_name]
if field.db_index:
continue
if att_opts['primary_key'] and field.primary_key:
continue
if att_opts['unique'] and field.unique:
continue
if att_opts['unique'] and att_name in flatten(meta.unique_together):
continue
self.add_difference('index-missing-in-model', table_name, att_name)
def find_field_missing_in_model(self, fieldmap, table_description, table_name):
for row in table_description:
if row[0] not in fieldmap:
self.add_difference('field-missing-in-model', table_name, row[0])
def find_field_missing_in_db(self, fieldmap, table_description, table_name):
db_fields = [row[0] for row in table_description]
for field_name, field in fieldmap.iteritems():
if field_name not in db_fields:
self.add_difference('field-missing-in-db', table_name, field_name,
field.db_type(connection=connection))
def find_field_type_differ(self, meta, table_description, table_name, func=None):
db_fields = dict([(row[0], row) for row in table_description])
for field in all_local_fields(meta):
if field.name not in db_fields:
continue
description = db_fields[field.name]
model_type = self.strip_parameters(self.get_field_model_type(field))
db_type = self.strip_parameters(self.get_field_db_type(description, field))
# use callback function if defined
if func:
model_type, db_type = func(field, description, model_type, db_type)
if not model_type == db_type:
self.add_difference('field-type-differ', table_name, field.name, model_type, db_type)
def find_field_parameter_differ(self, meta, table_description, table_name, func=None):
db_fields = dict([(row[0], row) for row in table_description])
for field in all_local_fields(meta):
if field.name not in db_fields:
continue
description = db_fields[field.name]
model_type = self.get_field_model_type(field)
db_type = self.get_field_db_type(description, field, table_name)
if not self.strip_parameters(model_type) == self.strip_parameters(db_type):
continue
# use callback function if defined
if func:
model_type, db_type = func(field, description, model_type, db_type)
if not model_type == db_type:
self.add_difference('field-parameter-differ', table_name, field.name, model_type, db_type)
@transaction.commit_manually
def find_differences(self):
cur_app_label = None
for app_model in self.app_models:
meta = app_model._meta
table_name = meta.db_table
app_label = meta.app_label
if cur_app_label != app_label:
# Marker indicating start of difference scan for this table_name
self.add_app_model_marker(app_label, app_model.__name__)
#if not table_name in self.django_tables:
if not table_name in self.db_tables:
# Table is missing from database
self.add_difference('table-missing-in-db', table_name)
continue
table_indexes = self.introspection.get_indexes(self.cursor, table_name)
fieldmap = dict([(field.db_column or field.get_attname(), field) for field in all_local_fields(meta)])
# add ordering field if model uses order_with_respect_to
if meta.order_with_respect_to:
fieldmap['_order'] = ORDERING_FIELD
try:
table_description = self.introspection.get_table_description(self.cursor, table_name)
except Exception, e:
self.add_difference('error', 'unable to introspect table: %s' % str(e).strip())
transaction.rollback() # reset transaction
continue
else:
transaction.commit()
# Fields which are defined in database but not in model
# 1) find: 'unique-missing-in-model'
self.find_unique_missing_in_model(meta, table_indexes, table_name)
# 2) find: 'index-missing-in-model'
self.find_index_missing_in_model(meta, table_indexes, table_name)
# 3) find: 'field-missing-in-model'
self.find_field_missing_in_model(fieldmap, table_description, table_name)
# Fields which are defined in models but not in database
# 4) find: 'field-missing-in-db'
self.find_field_missing_in_db(fieldmap, table_description, table_name)
# 5) find: 'unique-missing-in-db'
self.find_unique_missing_in_db(meta, table_indexes, table_name)
# 6) find: 'index-missing-in-db'
self.find_index_missing_in_db(meta, table_indexes, table_name)
# Fields which have a different type or parameters
# 7) find: 'type-differs'
self.find_field_type_differ(meta, table_description, table_name)
# 8) find: 'type-parameter-differs'
self.find_field_parameter_differ(meta, table_description, table_name)
def print_diff(self, style=no_style()):
""" print differences to stdout """
if self.options.get('sql', True):
self.print_diff_sql(style)
else:
self.print_diff_text(style)
def print_diff_text(self, style):
cur_app_label = None
for app_label, model_name, diffs in self.differences:
if not diffs:
continue
if not self.dense and cur_app_label != app_label:
print style.NOTICE("+ Application:"), style.SQL_TABLE(app_label)
cur_app_label = app_label
if not self.dense:
print style.NOTICE("|-+ Differences for model:"), style.SQL_TABLE(model_name)
for diff in diffs:
diff_type, diff_args = diff
text = self.DIFF_TEXTS[diff_type] % dict((str(i), style.SQL_TABLE(e)) for i, e in enumerate(diff_args))
text = "'".join(i % 2 == 0 and style.ERROR(e) or e for i, e in enumerate(text.split("'")))
if not self.dense:
print style.NOTICE("|--+"), text
else:
print style.NOTICE("App"), style.SQL_TABLE(app_label), style.NOTICE('Model'), style.SQL_TABLE(model_name), text
def print_diff_sql(self, style):
cur_app_label = None
qn = connection.ops.quote_name
has_differences = max([len(diffs) for app_label, model_name, diffs in self.differences])
if not has_differences:
if not self.dense:
print style.SQL_KEYWORD("-- No differences")
else:
print style.SQL_KEYWORD("BEGIN;")
for app_label, model_name, diffs in self.differences:
if not diffs:
continue
if not self.dense and cur_app_label != app_label:
print style.NOTICE("-- Application: %s" % style.SQL_TABLE(app_label))
cur_app_label = app_label
if not self.dense:
print style.NOTICE("-- Model: %s" % style.SQL_TABLE(model_name))
for diff in diffs:
diff_type, diff_args = diff
text = self.DIFF_SQL[diff_type](style, qn, diff_args)
if self.dense:
text = text.replace("\n\t", " ")
print text
print style.SQL_KEYWORD("COMMIT;")
class GenericSQLDiff(SQLDiff):
pass
class MySQLDiff(SQLDiff):
# All the MySQL hacks together create something of a problem
# Fixing one bug in MySQL creates another issue. So just keep in mind
# that this is way unreliable for MySQL atm.
def get_field_db_type(self, description, field=None, table_name=None):
from MySQLdb.constants import FIELD_TYPE
# weird bug? in mysql db-api where it returns three times the correct value for field length
# if i remember correctly it had something todo with unicode strings
# TODO: Fix this is a more meaningful and better understood manner
description = list(description)
if description[1] not in [FIELD_TYPE.TINY, FIELD_TYPE.SHORT]: # exclude tinyints from conversion.
description[3] = description[3] / 3
description[4] = description[4] / 3
db_type = super(MySQLDiff, self).get_field_db_type(description)
if not db_type:
return
if field:
if field.primary_key and (db_type == 'integer' or db_type == 'bigint'):
db_type += ' AUTO_INCREMENT'
# MySQL isn't really sure about char's and varchar's like sqlite
field_type = self.get_field_model_type(field)
# Fix char/varchar inconsistencies
if self.strip_parameters(field_type) == 'char' and self.strip_parameters(db_type) == 'varchar':
db_type = db_type.lstrip("var")
# They like to call 'bool's 'tinyint(1)' and introspection makes that a integer
# just convert it back to it's proper type, a bool is a bool and nothing else.
if db_type == 'integer' and description[1] == FIELD_TYPE.TINY and description[4] == 1:
db_type = 'bool'
if db_type == 'integer' and description[1] == FIELD_TYPE.SHORT:
db_type = 'smallint UNSIGNED' # FIXME: what about if it's not UNSIGNED ?
return db_type
class SqliteSQLDiff(SQLDiff):
# Unique does not seem to be implied on Sqlite for Primary_key's
# if this is more generic among databases this might be usefull
# to add to the superclass's find_unique_missing_in_db method
def find_unique_missing_in_db(self, meta, table_indexes, table_name):
for field in all_local_fields(meta):
if field.unique:
attname = field.db_column or field.attname
if attname in table_indexes and table_indexes[attname]['unique']:
continue
if attname in table_indexes and table_indexes[attname]['primary_key']:
continue
self.add_difference('unique-missing-in-db', table_name, attname)
# Finding Indexes by using the get_indexes dictionary doesn't seem to work
# for sqlite.
def find_index_missing_in_db(self, meta, table_indexes, table_name):
pass
def find_index_missing_in_model(self, meta, table_indexes, table_name):
pass
def get_field_db_type(self, description, field=None, table_name=None):
db_type = super(SqliteSQLDiff, self).get_field_db_type(description)
if not db_type:
return
if field:
field_type = self.get_field_model_type(field)
# Fix char/varchar inconsistencies
if self.strip_parameters(field_type) == 'char' and self.strip_parameters(db_type) == 'varchar':
db_type = db_type.lstrip("var")
return db_type
class PostgresqlSQLDiff(SQLDiff):
DATA_TYPES_REVERSE_OVERRIDE = {
1042: 'CharField',
# postgis types (TODO: support is very incomplete)
17506: 'django.contrib.gis.db.models.fields.PointField',
55902: 'django.contrib.gis.db.models.fields.MultiPolygonField',
}
# Hopefully in the future we can add constraint checking and other more
# advanced checks based on this database.
SQL_LOAD_CONSTRAINTS = """
SELECT nspname, relname, conname, attname, pg_get_constraintdef(pg_constraint.oid)
FROM pg_constraint
INNER JOIN pg_attribute ON pg_constraint.conrelid = pg_attribute.attrelid AND pg_attribute.attnum = any(pg_constraint.conkey)
INNER JOIN pg_class ON conrelid=pg_class.oid
INNER JOIN pg_namespace ON pg_namespace.oid=pg_class.relnamespace
ORDER BY CASE WHEN contype='f' THEN 0 ELSE 1 END,contype,nspname,relname,conname;
"""
SQL_FIELD_TYPE_DIFFER = lambda self, style, qn, args: "%s %s\n\t%s %s %s %s;" % (style.SQL_KEYWORD('ALTER TABLE'), style.SQL_TABLE(qn(args[0])), style.SQL_KEYWORD('ALTER'), style.SQL_FIELD(qn(args[1])), style.SQL_KEYWORD("TYPE"), style.SQL_COLTYPE(args[2]))
SQL_FIELD_PARAMETER_DIFFER = lambda self, style, qn, args: "%s %s\n\t%s %s %s %s;" % (style.SQL_KEYWORD('ALTER TABLE'), style.SQL_TABLE(qn(args[0])), style.SQL_KEYWORD('ALTER'), style.SQL_FIELD(qn(args[1])), style.SQL_KEYWORD("TYPE"), style.SQL_COLTYPE(args[2]))
def __init__(self, app_models, options):
SQLDiff.__init__(self, app_models, options)
self.check_constraints = {}
self.load_constraints()
def load_constraints(self):
for dct in self.sql_to_dict(self.SQL_LOAD_CONSTRAINTS, []):
key = (dct['nspname'], dct['relname'], dct['attname'])
if 'CHECK' in dct['pg_get_constraintdef']:
self.check_constraints[key] = dct
def get_field_db_type(self, description, field=None, table_name=None):
db_type = super(PostgresqlSQLDiff, self).get_field_db_type(description)
if not db_type:
return
if field:
if field.primary_key and db_type == 'integer':
db_type = 'serial'
if table_name:
tablespace = field.db_tablespace
if tablespace == "":
tablespace = "public"
check_constraint = self.check_constraints.get((tablespace, table_name, field.attname), {}).get('pg_get_constraintdef', None)
if check_constraint:
check_constraint = check_constraint.replace("((", "(")
check_constraint = check_constraint.replace("))", ")")
check_constraint = '("'.join([')' in e and '" '.join(e.split(" ", 1)) or e for e in check_constraint.split("(")])
# TODO: might be more then one constraint in definition ?
db_type += ' ' + check_constraint
return db_type
"""
def find_field_type_differ(self, meta, table_description, table_name):
def callback(field, description, model_type, db_type):
if field.primary_key and db_type=='integer':
db_type = 'serial'
return model_type, db_type
super(PostgresqlSQLDiff, self).find_field_type_differs(meta, table_description, table_name, callback)
"""
DATABASE_SQLDIFF_CLASSES = {
'postgresql_psycopg2' : PostgresqlSQLDiff,
'postgresql': PostgresqlSQLDiff,
'mysql': MySQLDiff,
'sqlite3': SqliteSQLDiff,
'oracle': GenericSQLDiff
}
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--all-applications', '-a', action='store_true', dest='all_applications',
help="Automaticly include all application from INSTALLED_APPS."),
make_option('--not-only-existing', '-e', action='store_false', dest='only_existing',
help="Check all tables that exist in the database, not only tables that should exist based on models."),
make_option('--dense-output', '-d', action='store_true', dest='dense_output',
help="Shows the output in dense format, normally output is spreaded over multiple lines."),
make_option('--output_text', '-t', action='store_false', dest='sql', default=True,
help="Outputs the differences as descriptive text instead of SQL"),
)
help = """Prints the (approximated) difference between models and fields in the database for the given app name(s).
It indicates how columns in the database are different from the sql that would
be generated by Django. This command is not a database migration tool. (Though
it can certainly help) It's purpose is to show the current differences as a way
to check/debug ur models compared to the real database tables and columns."""
output_transaction = False
args = '<appname appname ...>'
def handle(self, *app_labels, **options):
from django import VERSION
if VERSION[:2] < (1, 0):
raise CommandError("SQLDiff only support Django 1.0 or higher!")
from django.db import models
from django.conf import settings
if settings.DATABASE_ENGINE == 'dummy':
# This must be the "dummy" database backend, which means the user
# hasn't set DATABASE_ENGINE.
raise CommandError("Django doesn't know which syntax to use for your SQL statements,\n" +
"because you haven't specified the DATABASE_ENGINE setting.\n" +
"Edit your settings file and change DATABASE_ENGINE to something like 'postgresql' or 'mysql'.")
if options.get('all_applications', False):
app_models = models.get_models()
else:
if not app_labels:
raise CommandError('Enter at least one appname.')
try:
app_list = [models.get_app(app_label) for app_label in app_labels]
except (models.ImproperlyConfigured, ImportError), e:
raise CommandError("%s. Are you sure your INSTALLED_APPS setting is correct?" % e)
app_models = []
for app in app_list:
app_models.extend(models.get_models(app))
## remove all models that are not managed by Django
#app_models = [model for model in app_models if getattr(model._meta, 'managed', True)]
if not app_models:
raise CommandError('Unable to execute sqldiff no models founds.')
engine = settings.DATABASE_ENGINE
if not engine:
engine = connection.__module__.split('.')[-2]
cls = DATABASE_SQLDIFF_CLASSES.get(engine, GenericSQLDiff)
sqldiff_instance = cls(app_models, options)
sqldiff_instance.find_differences()
sqldiff_instance.print_diff(self.style)
return
| agpl-3.0 |
jjs0sbw/CSPLN | apps/scaffolding/linux/web2py/gluon/contrib/fpdf/php.py | 13 | 1256 | #!/usr/bin/env python
# -*- coding: latin-1 -*-
# fpdf php helpers:
def substr(s, start, length=-1):
if length < 0:
length=len(s)-start
return s[start:start+length]
def sprintf(fmt, *args): return fmt % args
def print_r(array):
if not isinstance(array, dict):
array = dict([(k, k) for k in array])
for k, v in array.items():
print "[%s] => %s" % (k, v),
def UTF8ToUTF16BE(instr, setbom=True):
"Converts UTF-8 strings to UTF16-BE."
outstr = ""
if (setbom):
outstr += "\xFE\xFF";
if not isinstance(instr, unicode):
instr = instr.decode('UTF-8')
outstr += instr.encode('UTF-16BE')
return outstr
def UTF8StringToArray(instr):
"Converts UTF-8 strings to codepoints array"
return [ord(c) for c in instr]
# ttfints php helpers:
def die(msg):
raise RuntimeError(msg)
def str_repeat(s, count):
return s * count
def str_pad(s, pad_length=0, pad_char= " ", pad_type= +1 ):
if pad_type<0: # pad left
return s.rjust(pad_length, pad_char)
elif pad_type>0: # pad right
return s.ljust(pad_length, pad_char)
else: # pad both
return s.center(pad_length, pad_char)
strlen = count = lambda s: len(s) | gpl-3.0 |
rismalrv/edx-platform | common/djangoapps/course_modes/views.py | 62 | 10994 | """
Views for the course_mode module
"""
import decimal
from ipware.ip import get_ip
from django.core.urlresolvers import reverse
from django.http import HttpResponse, HttpResponseBadRequest
from django.shortcuts import redirect
from django.views.generic.base import View
from django.utils.translation import ugettext as _
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from edxmako.shortcuts import render_to_response
from course_modes.models import CourseMode
from courseware.access import has_access
from student.models import CourseEnrollment
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from opaque_keys.edx.keys import CourseKey
from util.db import commit_on_success_with_read_committed
from xmodule.modulestore.django import modulestore
from embargo import api as embargo_api
class ChooseModeView(View):
"""View used when the user is asked to pick a mode.
When a get request is used, shows the selection page.
When a post request is used, assumes that it is a form submission
from the selection page, parses the response, and then sends user
to the next step in the flow.
"""
@method_decorator(login_required)
def get(self, request, course_id, error=None):
"""Displays the course mode choice page.
Args:
request (`Request`): The Django Request object.
course_id (unicode): The slash-separated course key.
Keyword Args:
error (unicode): If provided, display this error message
on the page.
Returns:
Response
"""
course_key = CourseKey.from_string(course_id)
# Check whether the user has access to this course
# based on country access rules.
embargo_redirect = embargo_api.redirect_if_blocked(
course_key,
user=request.user,
ip_address=get_ip(request),
url=request.path
)
if embargo_redirect:
return redirect(embargo_redirect)
enrollment_mode, is_active = CourseEnrollment.enrollment_mode_for_user(request.user, course_key)
modes = CourseMode.modes_for_course_dict(course_key)
# We assume that, if 'professional' is one of the modes, it is the *only* mode.
# If we offer more modes alongside 'professional' in the future, this will need to route
# to the usual "choose your track" page same is true for no-id-professional mode.
has_enrolled_professional = (CourseMode.is_professional_slug(enrollment_mode) and is_active)
if CourseMode.has_professional_mode(modes) and not has_enrolled_professional:
return redirect(
reverse(
'verify_student_start_flow',
kwargs={'course_id': unicode(course_key)}
)
)
# If there isn't a verified mode available, then there's nothing
# to do on this page. The user has almost certainly been auto-registered
# in the "honor" track by this point, so we send the user
# to the dashboard.
if not CourseMode.has_verified_mode(modes):
return redirect(reverse('dashboard'))
# If a user has already paid, redirect them to the dashboard.
if is_active and (enrollment_mode in CourseMode.VERIFIED_MODES + [CourseMode.NO_ID_PROFESSIONAL_MODE]):
return redirect(reverse('dashboard'))
donation_for_course = request.session.get("donation_for_course", {})
chosen_price = donation_for_course.get(unicode(course_key), None)
course = modulestore().get_course(course_key)
# When a credit mode is available, students will be given the option
# to upgrade from a verified mode to a credit mode at the end of the course.
# This allows students who have completed photo verification to be eligible
# for univerity credit.
# Since credit isn't one of the selectable options on the track selection page,
# we need to check *all* available course modes in order to determine whether
# a credit mode is available. If so, then we show slightly different messaging
# for the verified track.
has_credit_upsell = any(
CourseMode.is_credit_mode(mode) for mode
in CourseMode.modes_for_course(course_key, only_selectable=False)
)
context = {
"course_modes_choose_url": reverse("course_modes_choose", kwargs={'course_id': course_key.to_deprecated_string()}),
"modes": modes,
"has_credit_upsell": has_credit_upsell,
"course_name": course.display_name_with_default,
"course_org": course.display_org_with_default,
"course_num": course.display_number_with_default,
"chosen_price": chosen_price,
"error": error,
"responsive": True,
"nav_hidden": True,
}
if "verified" in modes:
context["suggested_prices"] = [
decimal.Decimal(x.strip())
for x in modes["verified"].suggested_prices.split(",")
if x.strip()
]
context["currency"] = modes["verified"].currency.upper()
context["min_price"] = modes["verified"].min_price
context["verified_name"] = modes["verified"].name
context["verified_description"] = modes["verified"].description
return render_to_response("course_modes/choose.html", context)
@method_decorator(login_required)
@method_decorator(commit_on_success_with_read_committed)
def post(self, request, course_id):
"""Takes the form submission from the page and parses it.
Args:
request (`Request`): The Django Request object.
course_id (unicode): The slash-separated course key.
Returns:
Status code 400 when the requested mode is unsupported. When the honor mode
is selected, redirects to the dashboard. When the verified mode is selected,
returns error messages if the indicated contribution amount is invalid or
below the minimum, otherwise redirects to the verification flow.
"""
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
user = request.user
# This is a bit redundant with logic in student.views.change_enrollment,
# but I don't really have the time to refactor it more nicely and test.
course = modulestore().get_course(course_key)
if not has_access(user, 'enroll', course):
error_msg = _("Enrollment is closed")
return self.get(request, course_id, error=error_msg)
requested_mode = self._get_requested_mode(request.POST)
allowed_modes = CourseMode.modes_for_course_dict(course_key)
if requested_mode not in allowed_modes:
return HttpResponseBadRequest(_("Enrollment mode not supported"))
if requested_mode == 'honor':
# The user will have already been enrolled in the honor mode at this
# point, so we just redirect them to the dashboard, thereby avoiding
# hitting the database a second time attempting to enroll them.
return redirect(reverse('dashboard'))
mode_info = allowed_modes[requested_mode]
if requested_mode == 'verified':
amount = request.POST.get("contribution") or \
request.POST.get("contribution-other-amt") or 0
try:
# Validate the amount passed in and force it into two digits
amount_value = decimal.Decimal(amount).quantize(decimal.Decimal('.01'), rounding=decimal.ROUND_DOWN)
except decimal.InvalidOperation:
error_msg = _("Invalid amount selected.")
return self.get(request, course_id, error=error_msg)
# Check for minimum pricing
if amount_value < mode_info.min_price:
error_msg = _("No selected price or selected price is too low.")
return self.get(request, course_id, error=error_msg)
donation_for_course = request.session.get("donation_for_course", {})
donation_for_course[unicode(course_key)] = amount_value
request.session["donation_for_course"] = donation_for_course
return redirect(
reverse(
'verify_student_start_flow',
kwargs={'course_id': unicode(course_key)}
)
)
def _get_requested_mode(self, request_dict):
"""Get the user's requested mode
Args:
request_dict (`QueryDict`): A dictionary-like object containing all given HTTP POST parameters.
Returns:
The course mode slug corresponding to the choice in the POST parameters,
None if the choice in the POST parameters is missing or is an unsupported mode.
"""
if 'verified_mode' in request_dict:
return 'verified'
if 'honor_mode' in request_dict:
return 'honor'
else:
return None
def create_mode(request, course_id):
"""Add a mode to the course corresponding to the given course ID.
Only available when settings.FEATURES['MODE_CREATION_FOR_TESTING'] is True.
Attempts to use the following querystring parameters from the request:
`mode_slug` (str): The mode to add, either 'honor', 'verified', or 'professional'
`mode_display_name` (str): Describes the new course mode
`min_price` (int): The minimum price a user must pay to enroll in the new course mode
`suggested_prices` (str): Comma-separated prices to suggest to the user.
`currency` (str): The currency in which to list prices.
By default, this endpoint will create an 'honor' mode for the given course with display name
'Honor Code', a minimum price of 0, no suggested prices, and using USD as the currency.
Args:
request (`Request`): The Django Request object.
course_id (unicode): A course ID.
Returns:
Response
"""
PARAMETERS = {
'mode_slug': u'honor',
'mode_display_name': u'Honor Code Certificate',
'min_price': 0,
'suggested_prices': u'',
'currency': u'usd',
}
# Try pulling querystring parameters out of the request
for parameter, default in PARAMETERS.iteritems():
PARAMETERS[parameter] = request.GET.get(parameter, default)
# Attempt to create the new mode for the given course
course_key = CourseKey.from_string(course_id)
CourseMode.objects.get_or_create(course_id=course_key, **PARAMETERS)
# Return a success message and a 200 response
return HttpResponse("Mode '{mode_slug}' created for '{course}'.".format(
mode_slug=PARAMETERS['mode_slug'],
course=course_id
))
| agpl-3.0 |
stevenwudi/Kernelized_Correlation_Filter | CNN_training.py | 1 | 3640 | import numpy as np
from keras.optimizers import SGD
from models.CNN_CIFAR import cnn_cifar_batchnormalisation, cnn_cifar_small, cnn_cifar_nodropout, \
cnn_cifar_small_batchnormalisation
from models.DataLoader import DataLoader
from scripts.progress_bar import printProgress
from time import time, localtime
# this is a predefined dataloader
loader = DataLoader(batch_size=32)
# construct the model here (pre-defined model)
model = cnn_cifar_small_batchnormalisation(loader.image_shape)
print(model.name)
nb_epoch = 200
early_stopping = True
early_stopping_count = 0
early_stopping_wait = 3
train_loss = []
valid_loss = []
learning_rate = [0.0001, 0.001, 0.01]
# let's train the model using SGD + momentum (how original).
sgd = SGD(lr=learning_rate[-1], decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='mean_squared_error', optimizer=sgd)
# load validation data from the h5py file (heavy lifting here)
x_valid, y_valid = loader.get_valid()
best_valid = np.inf
for e in range(nb_epoch):
print("epoch %d" % e)
loss_list = []
time_list = []
time_start = time()
for i in range(loader.n_iter_train):
time_start_batch = time()
X_batch, Y_batch = loader.next_train_batch()
loss_list.append(model.train_on_batch(X_batch, Y_batch))
# calculate some time information
time_list.append(time() - time_start_batch)
eta = (loader.n_iter_train - i) * np.array(time_list).mean()
printProgress(i, loader.n_iter_train-1, prefix='Progress:', suffix='batch error: %0.5f, ETA: %0.2f sec.'%(np.array(loss_list).mean(), eta), barLength=50)
printProgress(i, loader.n_iter_train - 1, prefix='Progress:', suffix='batch error: %0.5f' % (np.array(loss_list).mean()), barLength=50)
train_loss.append(np.asarray(loss_list).mean())
print('training loss is %f, one epoch uses: %0.2f sec' % (train_loss[-1], time() - time_start))
valid_loss.append(model.evaluate(x_valid, y_valid))
print('valid loss is %f' % valid_loss[-1])
if best_valid > valid_loss[-1]:
early_stopping_count = 0
print('saving best valid result...')
best_valid = valid_loss[-1]
model.save('./models/CNN_Model_OBT100_multi_cnn_best_valid_'+model.name+'.h5')
else:
# we wait for early stopping loop until a certain time
early_stopping_count += 1
if early_stopping_count > early_stopping_wait:
early_stopping_count = 0
if len(learning_rate) > 1:
learning_rate.pop()
print('decreasing the learning rate to: %f'%learning_rate[-1])
model.optimizer.lr.set_value(learning_rate[-1])
else:
break
lt = localtime()
lt_str = str(lt.tm_year)+"."+str(lt.tm_mon).zfill(2)+"." \
+str(lt.tm_mday).zfill(2)+"."+str(lt.tm_hour).zfill(2)+"."\
+str(lt.tm_min).zfill(2)+"."+str(lt.tm_sec).zfill(2)
np.savetxt('./models/train_loss_'+model.name+'_'+lt_str+'.txt', train_loss)
np.savetxt('./models/valid_loss_'+model.name+'_'+lt_str+'.txt', valid_loss)
model.save('./models/CNN_Model_OBT100_multi_cnn_'+model.name+'_final.h5')
print("done")
#### we show some visualisation here
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
train_loss = np.loadtxt('./models/train_loss_'+model.name+'_'+lt_str+'.txt')
valid_loss = np.loadtxt('./models/valid_loss_'+model.name+'_'+lt_str+'.txt')
plt.plot(train_loss, 'b')
plt.plot(valid_loss, 'r')
blue_label = mpatches.Patch(color='blue', label='train_loss')
red_label = mpatches.Patch(color='red', label='valid_loss')
plt.legend(handles=[blue_label, red_label])
| gpl-3.0 |
wakatime/wakatime | wakatime/packages/py27/pygments/lexers/sas.py | 4 | 9449 | # -*- coding: utf-8 -*-
"""
pygments.lexers.sas
~~~~~~~~~~~~~~~~~~~
Lexer for SAS.
:copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, include, words
from pygments.token import Comment, Keyword, Name, Number, String, Text, \
Other, Generic
__all__ = ['SASLexer']
class SASLexer(RegexLexer):
"""
For `SAS <http://www.sas.com/>`_ files.
.. versionadded:: 2.2
"""
# Syntax from syntax/sas.vim by James Kidd <[email protected]>
name = 'SAS'
aliases = ['sas']
filenames = ['*.SAS', '*.sas']
mimetypes = ['text/x-sas', 'text/sas', 'application/x-sas']
flags = re.IGNORECASE | re.MULTILINE
builtins_macros = (
"bquote", "nrbquote", "cmpres", "qcmpres", "compstor", "datatyp",
"display", "do", "else", "end", "eval", "global", "goto", "if",
"index", "input", "keydef", "label", "left", "length", "let",
"local", "lowcase", "macro", "mend", "nrquote",
"nrstr", "put", "qleft", "qlowcase", "qscan",
"qsubstr", "qsysfunc", "qtrim", "quote", "qupcase", "scan",
"str", "substr", "superq", "syscall", "sysevalf", "sysexec",
"sysfunc", "sysget", "syslput", "sysprod", "sysrc", "sysrput",
"then", "to", "trim", "unquote", "until", "upcase", "verify",
"while", "window"
)
builtins_conditionals = (
"do", "if", "then", "else", "end", "until", "while"
)
builtins_statements = (
"abort", "array", "attrib", "by", "call", "cards", "cards4",
"catname", "continue", "datalines", "datalines4", "delete", "delim",
"delimiter", "display", "dm", "drop", "endsas", "error", "file",
"filename", "footnote", "format", "goto", "in", "infile", "informat",
"input", "keep", "label", "leave", "length", "libname", "link",
"list", "lostcard", "merge", "missing", "modify", "options", "output",
"out", "page", "put", "redirect", "remove", "rename", "replace",
"retain", "return", "select", "set", "skip", "startsas", "stop",
"title", "update", "waitsas", "where", "window", "x", "systask"
)
builtins_sql = (
"add", "and", "alter", "as", "cascade", "check", "create",
"delete", "describe", "distinct", "drop", "foreign", "from",
"group", "having", "index", "insert", "into", "in", "key", "like",
"message", "modify", "msgtype", "not", "null", "on", "or",
"order", "primary", "references", "reset", "restrict", "select",
"set", "table", "unique", "update", "validate", "view", "where"
)
builtins_functions = (
"abs", "addr", "airy", "arcos", "arsin", "atan", "attrc",
"attrn", "band", "betainv", "blshift", "bnot", "bor",
"brshift", "bxor", "byte", "cdf", "ceil", "cexist", "cinv",
"close", "cnonct", "collate", "compbl", "compound",
"compress", "cos", "cosh", "css", "curobs", "cv", "daccdb",
"daccdbsl", "daccsl", "daccsyd", "dacctab", "dairy", "date",
"datejul", "datepart", "datetime", "day", "dclose", "depdb",
"depdbsl", "depsl", "depsyd",
"deptab", "dequote", "dhms", "dif", "digamma",
"dim", "dinfo", "dnum", "dopen", "doptname", "doptnum",
"dread", "dropnote", "dsname", "erf", "erfc", "exist", "exp",
"fappend", "fclose", "fcol", "fdelete", "fetch", "fetchobs",
"fexist", "fget", "fileexist", "filename", "fileref",
"finfo", "finv", "fipname", "fipnamel", "fipstate", "floor",
"fnonct", "fnote", "fopen", "foptname", "foptnum", "fpoint",
"fpos", "fput", "fread", "frewind", "frlen", "fsep", "fuzz",
"fwrite", "gaminv", "gamma", "getoption", "getvarc", "getvarn",
"hbound", "hms", "hosthelp", "hour", "ibessel", "index",
"indexc", "indexw", "input", "inputc", "inputn", "int",
"intck", "intnx", "intrr", "irr", "jbessel", "juldate",
"kurtosis", "lag", "lbound", "left", "length", "lgamma",
"libname", "libref", "log", "log10", "log2", "logpdf", "logpmf",
"logsdf", "lowcase", "max", "mdy", "mean", "min", "minute",
"mod", "month", "mopen", "mort", "n", "netpv", "nmiss",
"normal", "note", "npv", "open", "ordinal", "pathname",
"pdf", "peek", "peekc", "pmf", "point", "poisson", "poke",
"probbeta", "probbnml", "probchi", "probf", "probgam",
"probhypr", "probit", "probnegb", "probnorm", "probt",
"put", "putc", "putn", "qtr", "quote", "ranbin", "rancau",
"ranexp", "rangam", "range", "rank", "rannor", "ranpoi",
"rantbl", "rantri", "ranuni", "repeat", "resolve", "reverse",
"rewind", "right", "round", "saving", "scan", "sdf", "second",
"sign", "sin", "sinh", "skewness", "soundex", "spedis",
"sqrt", "std", "stderr", "stfips", "stname", "stnamel",
"substr", "sum", "symget", "sysget", "sysmsg", "sysprod",
"sysrc", "system", "tan", "tanh", "time", "timepart", "tinv",
"tnonct", "today", "translate", "tranwrd", "trigamma",
"trim", "trimn", "trunc", "uniform", "upcase", "uss", "var",
"varfmt", "varinfmt", "varlabel", "varlen", "varname",
"varnum", "varray", "varrayx", "vartype", "verify", "vformat",
"vformatd", "vformatdx", "vformatn", "vformatnx", "vformatw",
"vformatwx", "vformatx", "vinarray", "vinarrayx", "vinformat",
"vinformatd", "vinformatdx", "vinformatn", "vinformatnx",
"vinformatw", "vinformatwx", "vinformatx", "vlabel",
"vlabelx", "vlength", "vlengthx", "vname", "vnamex", "vtype",
"vtypex", "weekday", "year", "yyq", "zipfips", "zipname",
"zipnamel", "zipstate"
)
tokens = {
'root': [
include('comments'),
include('proc-data'),
include('cards-datalines'),
include('logs'),
include('general'),
(r'.', Text),
],
# SAS is multi-line regardless, but * is ended by ;
'comments': [
(r'^\s*\*.*?;', Comment),
(r'/\*.*?\*/', Comment),
(r'^\s*\*(.|\n)*?;', Comment.Multiline),
(r'/[*](.|\n)*?[*]/', Comment.Multiline),
],
# Special highlight for proc, data, quit, run
'proc-data': [
(r'(^|;)\s*(proc \w+|data|run|quit)[\s;]',
Keyword.Reserved),
],
# Special highlight cards and datalines
'cards-datalines': [
(r'^\s*(datalines|cards)\s*;\s*$', Keyword, 'data'),
],
'data': [
(r'(.|\n)*^\s*;\s*$', Other, '#pop'),
],
# Special highlight for put NOTE|ERROR|WARNING (order matters)
'logs': [
(r'\n?^\s*%?put ', Keyword, 'log-messages'),
],
'log-messages': [
(r'NOTE(:|-).*', Generic, '#pop'),
(r'WARNING(:|-).*', Generic.Emph, '#pop'),
(r'ERROR(:|-).*', Generic.Error, '#pop'),
include('general'),
],
'general': [
include('keywords'),
include('vars-strings'),
include('special'),
include('numbers'),
],
# Keywords, statements, functions, macros
'keywords': [
(words(builtins_statements,
prefix = r'\b',
suffix = r'\b'),
Keyword),
(words(builtins_sql,
prefix = r'\b',
suffix = r'\b'),
Keyword),
(words(builtins_conditionals,
prefix = r'\b',
suffix = r'\b'),
Keyword),
(words(builtins_macros,
prefix = r'%',
suffix = r'\b'),
Name.Builtin),
(words(builtins_functions,
prefix = r'\b',
suffix = r'\('),
Name.Builtin),
],
# Strings and user-defined variables and macros (order matters)
'vars-strings': [
(r'&[a-z_]\w{0,31}\.?', Name.Variable),
(r'%[a-z_]\w{0,31}', Name.Function),
(r'\'', String, 'string_squote'),
(r'"', String, 'string_dquote'),
],
'string_squote': [
('\'', String, '#pop'),
(r'\\\\|\\"|\\\n', String.Escape),
# AFAIK, macro variables are not evaluated in single quotes
# (r'&', Name.Variable, 'validvar'),
(r'[^$\'\\]+', String),
(r'[$\'\\]', String),
],
'string_dquote': [
(r'"', String, '#pop'),
(r'\\\\|\\"|\\\n', String.Escape),
(r'&', Name.Variable, 'validvar'),
(r'[^$&"\\]+', String),
(r'[$"\\]', String),
],
'validvar': [
(r'[a-z_]\w{0,31}\.?', Name.Variable, '#pop'),
],
# SAS numbers and special variables
'numbers': [
(r'\b[+-]?([0-9]+(\.[0-9]+)?|\.[0-9]+|\.)(E[+-]?[0-9]+)?i?\b',
Number),
],
'special': [
(r'(null|missing|_all_|_automatic_|_character_|_n_|'
r'_infile_|_name_|_null_|_numeric_|_user_|_webout_)',
Keyword.Constant),
],
# 'operators': [
# (r'(-|=|<=|>=|<|>|<>|&|!=|'
# r'\||\*|\+|\^|/|!|~|~=)', Operator)
# ],
}
| bsd-3-clause |
Cubitect/ASMModSuit | ASMVillageMarker.py | 1 | 5318 | import SRenderLib
from asmutils import *
def create_mod(util):
print '\nSearching for mappings for ASMVillageMarker...'
SRenderLib.setup_lib(util)
lines = util.readj('World')
pos = findOps(lines,0,[['.field','protected',';'],['.field','protected','Z'],['.field','protected',';'],['.field','protected',';']])
util.setmap('VillageCollection',betweenr(lines[pos],'L',';'))
util.setmap('World.villageCollectionObj',endw(lines[pos],2))
pos = findOps(lines,pos+1,[['.method','public','()L'+util.getmap('VillageCollection')]])
if pos is not None:
util.setmap('World.getVillageCollection',endw(lines[pos],3))
lines = util.readj('VillageCollection')
pos = findOps(lines,0,[['.method','public','()Ljava/util/List']])
util.setmap('VillageCollection.getVillageList',endw(lines[pos],3))
pos = findOps(lines,pos+1,[['.method','public',')L']])
util.setmap('Village',betweenr(lines[pos],')L',';'))
lines = util.readj('Village')
pos = findOps(lines,0,[['.method','public','()L']])
util.setmap('Village.getCenter',endw(lines[pos],3))
util.setmap('BlockPos',betweenr(lines[pos],')L',';'))
pos = findOps(lines,pos+1,[['.method','public','()I']])
util.setmap('Village.getVillageRadius',endw(lines[pos],3))
pos = findOps(lines,pos+1,[['.method','public','()Ljava/util/List']])
util.setmap('Village.getVillageDoorInfoList',endw(lines[pos],3))
pos = findOps(lines,pos+1,[['.method','public',')L']])
util.setmap('VillageDoorInfo',betweenr(lines[pos],')L',';'))
lines = util.readj('VillageDoorInfo')
pos = findOps(lines,0,[['.method','public','()L']])
util.setmap('VillageDoorInfo.getDoorBlockPos',endw(lines[pos],3))
lines = util.readj('BlockPos')
pos = findOps(lines,0,[['.super']])
util.setmap('Vec3i',endw(lines[pos],1))
lines = util.readj('Vec3i')
pos = findOps(lines,0, [['.method','public','()I'],['stack 1 locals 1']])
util.setmap('Vec3i.getX',endw(lines[pos-1],3))
pos = findOps(lines,pos+1,[['.method','public','()I'],['stack 1 locals 1']])
util.setmap('Vec3i.getY',endw(lines[pos-1],3))
pos = findOps(lines,pos+1,[['.method','public','()I'],['stack 1 locals 1']])
util.setmap('Vec3i.getZ',endw(lines[pos-1],3))
print 'Applying ASMVillageMarker patch...'
util.setmap('ASMVillageMarker','villagemarker/ASMVillageMarker')
lines = util.readt('ASMVillageMarker')
lines = '\1'.join(lines)
lines = lines.replace('net/minecraft/server/integrated/IntegratedServer', util.getmap('IntegratedServer'))
lines = lines.replace('net/minecraft/client/entity/EntityPlayerSP', util.getmap('EntityPlayerSP'))
lines = lines.replace('net/minecraft/client/Minecraft', util.getmap('Minecraft'))
lines = lines.replace('net/minecraft/world/WorldServer', util.getmap('WorldServer'))
lines = lines.replace('net/minecraft/util/math/BlockPos', util.getmap('BlockPos'))
lines = lines.replace('net/minecraft/village/VillageCollection', util.getmap('VillageCollection'))
lines = lines.replace('net/minecraft/village/VillageDoorInfo', util.getmap('VillageDoorInfo'))
lines = lines.replace('net/minecraft/village/Village', util.getmap('Village'))
lines = lines.replace('thePlayer', util.getmap('Minecraft.thePlayer'))
lines = lines.replace('dimension', util.getmap('Entity.dimension'))
lines = lines.replace('isSingleplayer', util.getmap('Minecraft.isSingleplayer'))
lines = lines.replace('worldServerForDimension', util.getmap('MinecraftServer.worldServerForDimension'))
lines = lines.replace('getVillageDoorInfoList', util.getmap('Village.getVillageDoorInfoList'))
lines = lines.replace('getVillageCollection', util.getmap('World.getVillageCollection'))
lines = lines.replace('getVillageRadius', util.getmap('Village.getVillageRadius'))
lines = lines.replace('getVillageList', util.getmap('VillageCollection.getVillageList'))
lines = lines.replace('getDoorBlockPos', util.getmap('VillageDoorInfo.getDoorBlockPos'))
lines = lines.replace('getIntegratedServer', util.getmap('Minecraft.getIntegratedServer'))
lines = lines.replace('getMinecraft', util.getmap('Minecraft.getMinecraft'))
lines = lines.replace('getCenter', util.getmap('Village.getCenter'))
lines = lines.replace('getX', util.getmap('Vec3i.getX'))
lines = lines.replace('getY', util.getmap('Vec3i.getY'))
lines = lines.replace('getZ', util.getmap('Vec3i.getZ'))
lines = lines.split('\1')
util.write2mod('ASMVillageMarker',lines)
print 'Injecting render call...'
lines = util.readj('EntityRenderer')
pos = 0
while True:
pos = findOps(lines,pos+1,[['ldc','culling']])
if pos is None:
break
pos = findOps(lines,pos+1,[['dload'],['dload'],['dload']])
playerX = endw(lines[pos-2],1)
playerY = endw(lines[pos-1],1)
playerZ = endw(lines[pos ],1)
pos = findOps(lines,pos+1,[['ldc','aboveClouds']])
pos = goBackTo(lines,pos,['invokevirtual'])
lines.insert(pos+1,'dload '+playerX+'\n')
lines.insert(pos+2,'dload '+playerY+'\n')
lines.insert(pos+3,'dload '+playerZ+'\n')
lines.insert(pos+4,'invokestatic Method '+util.getmap('ASMVillageMarker')+' render (DDD)V\n')
util.write2mod('EntityRenderer',lines)
| gpl-3.0 |
dyn888/youtube-dl | youtube_dl/extractor/behindkink.py | 145 | 1645 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import url_basename
class BehindKinkIE(InfoExtractor):
_VALID_URL = r'http://(?:www\.)?behindkink\.com/(?P<year>[0-9]{4})/(?P<month>[0-9]{2})/(?P<day>[0-9]{2})/(?P<id>[^/#?_]+)'
_TEST = {
'url': 'http://www.behindkink.com/2014/12/05/what-are-you-passionate-about-marley-blaze/',
'md5': '507b57d8fdcd75a41a9a7bdb7989c762',
'info_dict': {
'id': '37127',
'ext': 'mp4',
'title': 'What are you passionate about – Marley Blaze',
'description': 'md5:aee8e9611b4ff70186f752975d9b94b4',
'upload_date': '20141205',
'thumbnail': 'http://www.behindkink.com/wp-content/uploads/2014/12/blaze-1.jpg',
'age_limit': 18,
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
display_id = mobj.group('id')
webpage = self._download_webpage(url, display_id)
video_url = self._search_regex(
r'<source src="([^"]+)"', webpage, 'video URL')
video_id = url_basename(video_url).split('_')[0]
upload_date = mobj.group('year') + mobj.group('month') + mobj.group('day')
return {
'id': video_id,
'display_id': display_id,
'url': video_url,
'title': self._og_search_title(webpage),
'thumbnail': self._og_search_thumbnail(webpage),
'description': self._og_search_description(webpage),
'upload_date': upload_date,
'age_limit': 18,
}
| unlicense |
shadowmint/nwidget | lib/cocos2d-0.5.5/test/test_menu_items.py | 1 | 2268 | # This code is so you can run the samples without installing the package
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
#
testinfo = "s, q"
tags = "menu items, ToggleMenuItem, MultipleMenuItem, MenuItem, EntryMenuItem, ImageMenuItem, ColorMenuItem"
from pyglet import image
from pyglet.gl import *
from pyglet import font
from cocos.director import *
from cocos.menu import *
from cocos.scene import *
from cocos.layer import *
from operator import setslice
def printf(*args):
sys.stdout.write(''.join([str(x) for x in args])+'\n')
class MainMenu(Menu):
def __init__( self ):
super( MainMenu, self ).__init__("Test Menu Items")
# then add the items
item1= ToggleMenuItem('ToggleMenuItem: ', self.on_toggle_callback, True )
resolutions = ['320x200','640x480','800x600', '1024x768', '1200x1024']
item2= MultipleMenuItem('MultipleMenuItem: ',
self.on_multiple_callback,
resolutions)
item3 = MenuItem('MenuItem', self.on_callback )
item4 = EntryMenuItem('EntryMenuItem:', self.on_entry_callback, 'value',
max_length=8)
item5 = ImageMenuItem('imagemenuitem.png', self.on_image_callback)
colors = [(255, 255, 255), (129, 255, 100), (50, 50, 100), (255, 200, 150)]
item6 = ColorMenuItem('ColorMenuItem:', self.on_color_callback, colors)
self.create_menu( [item1,item2,item3,item4,item5,item6] )
def on_quit( self ):
pyglet.app.exit()
def on_multiple_callback(self, idx ):
print 'multiple item callback', idx
def on_toggle_callback(self, b ):
print 'toggle item callback', b
def on_callback(self ):
print 'item callback'
def on_entry_callback (self, value):
print 'entry item callback', value
def on_image_callback (self):
print 'image item callback'
def on_color_callback(self, value):
print 'color item callback:', value
def main():
pyglet.font.add_directory('.')
director.init( resizable=True)
director.run( Scene( MainMenu() ) )
if __name__ == '__main__':
main()
| apache-2.0 |
tangfeng1/flask | tests/test_blueprints.py | 143 | 18147 | # -*- coding: utf-8 -*-
"""
tests.blueprints
~~~~~~~~~~~~~~~~
Blueprints (and currently modules)
:copyright: (c) 2015 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import pytest
import flask
from flask._compat import text_type
from werkzeug.http import parse_cache_control_header
from jinja2 import TemplateNotFound
def test_blueprint_specific_error_handling():
frontend = flask.Blueprint('frontend', __name__)
backend = flask.Blueprint('backend', __name__)
sideend = flask.Blueprint('sideend', __name__)
@frontend.errorhandler(403)
def frontend_forbidden(e):
return 'frontend says no', 403
@frontend.route('/frontend-no')
def frontend_no():
flask.abort(403)
@backend.errorhandler(403)
def backend_forbidden(e):
return 'backend says no', 403
@backend.route('/backend-no')
def backend_no():
flask.abort(403)
@sideend.route('/what-is-a-sideend')
def sideend_no():
flask.abort(403)
app = flask.Flask(__name__)
app.register_blueprint(frontend)
app.register_blueprint(backend)
app.register_blueprint(sideend)
@app.errorhandler(403)
def app_forbidden(e):
return 'application itself says no', 403
c = app.test_client()
assert c.get('/frontend-no').data == b'frontend says no'
assert c.get('/backend-no').data == b'backend says no'
assert c.get('/what-is-a-sideend').data == b'application itself says no'
def test_blueprint_specific_user_error_handling():
class MyDecoratorException(Exception):
pass
class MyFunctionException(Exception):
pass
blue = flask.Blueprint('blue', __name__)
@blue.errorhandler(MyDecoratorException)
def my_decorator_exception_handler(e):
assert isinstance(e, MyDecoratorException)
return 'boom'
def my_function_exception_handler(e):
assert isinstance(e, MyFunctionException)
return 'bam'
blue.register_error_handler(MyFunctionException, my_function_exception_handler)
@blue.route('/decorator')
def blue_deco_test():
raise MyDecoratorException()
@blue.route('/function')
def blue_func_test():
raise MyFunctionException()
app = flask.Flask(__name__)
app.register_blueprint(blue)
c = app.test_client()
assert c.get('/decorator').data == b'boom'
assert c.get('/function').data == b'bam'
def test_blueprint_url_definitions():
bp = flask.Blueprint('test', __name__)
@bp.route('/foo', defaults={'baz': 42})
def foo(bar, baz):
return '%s/%d' % (bar, baz)
@bp.route('/bar')
def bar(bar):
return text_type(bar)
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/1', url_defaults={'bar': 23})
app.register_blueprint(bp, url_prefix='/2', url_defaults={'bar': 19})
c = app.test_client()
assert c.get('/1/foo').data == b'23/42'
assert c.get('/2/foo').data == b'19/42'
assert c.get('/1/bar').data == b'23'
assert c.get('/2/bar').data == b'19'
def test_blueprint_url_processors():
bp = flask.Blueprint('frontend', __name__, url_prefix='/<lang_code>')
@bp.url_defaults
def add_language_code(endpoint, values):
values.setdefault('lang_code', flask.g.lang_code)
@bp.url_value_preprocessor
def pull_lang_code(endpoint, values):
flask.g.lang_code = values.pop('lang_code')
@bp.route('/')
def index():
return flask.url_for('.about')
@bp.route('/about')
def about():
return flask.url_for('.index')
app = flask.Flask(__name__)
app.register_blueprint(bp)
c = app.test_client()
assert c.get('/de/').data == b'/de/about'
assert c.get('/de/about').data == b'/de/'
def test_templates_and_static(test_apps):
from blueprintapp import app
c = app.test_client()
rv = c.get('/')
assert rv.data == b'Hello from the Frontend'
rv = c.get('/admin/')
assert rv.data == b'Hello from the Admin'
rv = c.get('/admin/index2')
assert rv.data == b'Hello from the Admin'
rv = c.get('/admin/static/test.txt')
assert rv.data.strip() == b'Admin File'
rv.close()
rv = c.get('/admin/static/css/test.css')
assert rv.data.strip() == b'/* nested file */'
rv.close()
# try/finally, in case other tests use this app for Blueprint tests.
max_age_default = app.config['SEND_FILE_MAX_AGE_DEFAULT']
try:
expected_max_age = 3600
if app.config['SEND_FILE_MAX_AGE_DEFAULT'] == expected_max_age:
expected_max_age = 7200
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = expected_max_age
rv = c.get('/admin/static/css/test.css')
cc = parse_cache_control_header(rv.headers['Cache-Control'])
assert cc.max_age == expected_max_age
rv.close()
finally:
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = max_age_default
with app.test_request_context():
assert flask.url_for('admin.static', filename='test.txt') == '/admin/static/test.txt'
with app.test_request_context():
try:
flask.render_template('missing.html')
except TemplateNotFound as e:
assert e.name == 'missing.html'
else:
assert 0, 'expected exception'
with flask.Flask(__name__).test_request_context():
assert flask.render_template('nested/nested.txt') == 'I\'m nested'
def test_default_static_cache_timeout():
app = flask.Flask(__name__)
class MyBlueprint(flask.Blueprint):
def get_send_file_max_age(self, filename):
return 100
blueprint = MyBlueprint('blueprint', __name__, static_folder='static')
app.register_blueprint(blueprint)
# try/finally, in case other tests use this app for Blueprint tests.
max_age_default = app.config['SEND_FILE_MAX_AGE_DEFAULT']
try:
with app.test_request_context():
unexpected_max_age = 3600
if app.config['SEND_FILE_MAX_AGE_DEFAULT'] == unexpected_max_age:
unexpected_max_age = 7200
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = unexpected_max_age
rv = blueprint.send_static_file('index.html')
cc = parse_cache_control_header(rv.headers['Cache-Control'])
assert cc.max_age == 100
rv.close()
finally:
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = max_age_default
def test_templates_list(test_apps):
from blueprintapp import app
templates = sorted(app.jinja_env.list_templates())
assert templates == ['admin/index.html', 'frontend/index.html']
def test_dotted_names():
frontend = flask.Blueprint('myapp.frontend', __name__)
backend = flask.Blueprint('myapp.backend', __name__)
@frontend.route('/fe')
def frontend_index():
return flask.url_for('myapp.backend.backend_index')
@frontend.route('/fe2')
def frontend_page2():
return flask.url_for('.frontend_index')
@backend.route('/be')
def backend_index():
return flask.url_for('myapp.frontend.frontend_index')
app = flask.Flask(__name__)
app.register_blueprint(frontend)
app.register_blueprint(backend)
c = app.test_client()
assert c.get('/fe').data.strip() == b'/be'
assert c.get('/fe2').data.strip() == b'/fe'
assert c.get('/be').data.strip() == b'/fe'
def test_dotted_names_from_app():
app = flask.Flask(__name__)
app.testing = True
test = flask.Blueprint('test', __name__)
@app.route('/')
def app_index():
return flask.url_for('test.index')
@test.route('/test/')
def index():
return flask.url_for('app_index')
app.register_blueprint(test)
with app.test_client() as c:
rv = c.get('/')
assert rv.data == b'/test/'
def test_empty_url_defaults():
bp = flask.Blueprint('bp', __name__)
@bp.route('/', defaults={'page': 1})
@bp.route('/page/<int:page>')
def something(page):
return str(page)
app = flask.Flask(__name__)
app.register_blueprint(bp)
c = app.test_client()
assert c.get('/').data == b'1'
assert c.get('/page/2').data == b'2'
def test_route_decorator_custom_endpoint():
bp = flask.Blueprint('bp', __name__)
@bp.route('/foo')
def foo():
return flask.request.endpoint
@bp.route('/bar', endpoint='bar')
def foo_bar():
return flask.request.endpoint
@bp.route('/bar/123', endpoint='123')
def foo_bar_foo():
return flask.request.endpoint
@bp.route('/bar/foo')
def bar_foo():
return flask.request.endpoint
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
@app.route('/')
def index():
return flask.request.endpoint
c = app.test_client()
assert c.get('/').data == b'index'
assert c.get('/py/foo').data == b'bp.foo'
assert c.get('/py/bar').data == b'bp.bar'
assert c.get('/py/bar/123').data == b'bp.123'
assert c.get('/py/bar/foo').data == b'bp.bar_foo'
def test_route_decorator_custom_endpoint_with_dots():
bp = flask.Blueprint('bp', __name__)
@bp.route('/foo')
def foo():
return flask.request.endpoint
try:
@bp.route('/bar', endpoint='bar.bar')
def foo_bar():
return flask.request.endpoint
except AssertionError:
pass
else:
raise AssertionError('expected AssertionError not raised')
try:
@bp.route('/bar/123', endpoint='bar.123')
def foo_bar_foo():
return flask.request.endpoint
except AssertionError:
pass
else:
raise AssertionError('expected AssertionError not raised')
def foo_foo_foo():
pass
pytest.raises(
AssertionError,
lambda: bp.add_url_rule(
'/bar/123', endpoint='bar.123', view_func=foo_foo_foo
)
)
pytest.raises(
AssertionError,
bp.route('/bar/123', endpoint='bar.123'),
lambda: None
)
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
c = app.test_client()
assert c.get('/py/foo').data == b'bp.foo'
# The rule's didn't actually made it through
rv = c.get('/py/bar')
assert rv.status_code == 404
rv = c.get('/py/bar/123')
assert rv.status_code == 404
def test_template_filter():
bp = flask.Blueprint('bp', __name__)
@bp.app_template_filter()
def my_reverse(s):
return s[::-1]
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
assert 'my_reverse' in app.jinja_env.filters.keys()
assert app.jinja_env.filters['my_reverse'] == my_reverse
assert app.jinja_env.filters['my_reverse']('abcd') == 'dcba'
def test_add_template_filter():
bp = flask.Blueprint('bp', __name__)
def my_reverse(s):
return s[::-1]
bp.add_app_template_filter(my_reverse)
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
assert 'my_reverse' in app.jinja_env.filters.keys()
assert app.jinja_env.filters['my_reverse'] == my_reverse
assert app.jinja_env.filters['my_reverse']('abcd') == 'dcba'
def test_template_filter_with_name():
bp = flask.Blueprint('bp', __name__)
@bp.app_template_filter('strrev')
def my_reverse(s):
return s[::-1]
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
assert 'strrev' in app.jinja_env.filters.keys()
assert app.jinja_env.filters['strrev'] == my_reverse
assert app.jinja_env.filters['strrev']('abcd') == 'dcba'
def test_add_template_filter_with_name():
bp = flask.Blueprint('bp', __name__)
def my_reverse(s):
return s[::-1]
bp.add_app_template_filter(my_reverse, 'strrev')
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
assert 'strrev' in app.jinja_env.filters.keys()
assert app.jinja_env.filters['strrev'] == my_reverse
assert app.jinja_env.filters['strrev']('abcd') == 'dcba'
def test_template_filter_with_template():
bp = flask.Blueprint('bp', __name__)
@bp.app_template_filter()
def super_reverse(s):
return s[::-1]
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
@app.route('/')
def index():
return flask.render_template('template_filter.html', value='abcd')
rv = app.test_client().get('/')
assert rv.data == b'dcba'
def test_template_filter_after_route_with_template():
app = flask.Flask(__name__)
@app.route('/')
def index():
return flask.render_template('template_filter.html', value='abcd')
bp = flask.Blueprint('bp', __name__)
@bp.app_template_filter()
def super_reverse(s):
return s[::-1]
app.register_blueprint(bp, url_prefix='/py')
rv = app.test_client().get('/')
assert rv.data == b'dcba'
def test_add_template_filter_with_template():
bp = flask.Blueprint('bp', __name__)
def super_reverse(s):
return s[::-1]
bp.add_app_template_filter(super_reverse)
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
@app.route('/')
def index():
return flask.render_template('template_filter.html', value='abcd')
rv = app.test_client().get('/')
assert rv.data == b'dcba'
def test_template_filter_with_name_and_template():
bp = flask.Blueprint('bp', __name__)
@bp.app_template_filter('super_reverse')
def my_reverse(s):
return s[::-1]
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
@app.route('/')
def index():
return flask.render_template('template_filter.html', value='abcd')
rv = app.test_client().get('/')
assert rv.data == b'dcba'
def test_add_template_filter_with_name_and_template():
bp = flask.Blueprint('bp', __name__)
def my_reverse(s):
return s[::-1]
bp.add_app_template_filter(my_reverse, 'super_reverse')
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
@app.route('/')
def index():
return flask.render_template('template_filter.html', value='abcd')
rv = app.test_client().get('/')
assert rv.data == b'dcba'
def test_template_test():
bp = flask.Blueprint('bp', __name__)
@bp.app_template_test()
def is_boolean(value):
return isinstance(value, bool)
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
assert 'is_boolean' in app.jinja_env.tests.keys()
assert app.jinja_env.tests['is_boolean'] == is_boolean
assert app.jinja_env.tests['is_boolean'](False)
def test_add_template_test():
bp = flask.Blueprint('bp', __name__)
def is_boolean(value):
return isinstance(value, bool)
bp.add_app_template_test(is_boolean)
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
assert 'is_boolean' in app.jinja_env.tests.keys()
assert app.jinja_env.tests['is_boolean'] == is_boolean
assert app.jinja_env.tests['is_boolean'](False)
def test_template_test_with_name():
bp = flask.Blueprint('bp', __name__)
@bp.app_template_test('boolean')
def is_boolean(value):
return isinstance(value, bool)
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
assert 'boolean' in app.jinja_env.tests.keys()
assert app.jinja_env.tests['boolean'] == is_boolean
assert app.jinja_env.tests['boolean'](False)
def test_add_template_test_with_name():
bp = flask.Blueprint('bp', __name__)
def is_boolean(value):
return isinstance(value, bool)
bp.add_app_template_test(is_boolean, 'boolean')
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
assert 'boolean' in app.jinja_env.tests.keys()
assert app.jinja_env.tests['boolean'] == is_boolean
assert app.jinja_env.tests['boolean'](False)
def test_template_test_with_template():
bp = flask.Blueprint('bp', __name__)
@bp.app_template_test()
def boolean(value):
return isinstance(value, bool)
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
@app.route('/')
def index():
return flask.render_template('template_test.html', value=False)
rv = app.test_client().get('/')
assert b'Success!' in rv.data
def test_template_test_after_route_with_template():
app = flask.Flask(__name__)
@app.route('/')
def index():
return flask.render_template('template_test.html', value=False)
bp = flask.Blueprint('bp', __name__)
@bp.app_template_test()
def boolean(value):
return isinstance(value, bool)
app.register_blueprint(bp, url_prefix='/py')
rv = app.test_client().get('/')
assert b'Success!' in rv.data
def test_add_template_test_with_template():
bp = flask.Blueprint('bp', __name__)
def boolean(value):
return isinstance(value, bool)
bp.add_app_template_test(boolean)
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
@app.route('/')
def index():
return flask.render_template('template_test.html', value=False)
rv = app.test_client().get('/')
assert b'Success!' in rv.data
def test_template_test_with_name_and_template():
bp = flask.Blueprint('bp', __name__)
@bp.app_template_test('boolean')
def is_boolean(value):
return isinstance(value, bool)
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
@app.route('/')
def index():
return flask.render_template('template_test.html', value=False)
rv = app.test_client().get('/')
assert b'Success!' in rv.data
def test_add_template_test_with_name_and_template():
bp = flask.Blueprint('bp', __name__)
def is_boolean(value):
return isinstance(value, bool)
bp.add_app_template_test(is_boolean, 'boolean')
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
@app.route('/')
def index():
return flask.render_template('template_test.html', value=False)
rv = app.test_client().get('/')
assert b'Success!' in rv.data
| bsd-3-clause |
iparanza/earthenterprise | earth_enterprise/src/server/wsgi/serve/snippets/util/dbroot_v2_pb2.py | 4 | 184394 | #!/usr/bin/python
#
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Generated by the protocol buffer compiler. DO NOT EDIT!
from google.protobuf import descriptor
from google.protobuf import message
from google.protobuf import reflection
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
DESCRIPTOR = descriptor.FileDescriptor(
name='dbroot_v2.proto',
package='',
serialized_pb='\n\x0f\x64\x62root_v2.proto\";\n\x10StringEntryProto\x12\x11\n\tstring_id\x18\x01 \x02(\x07\x12\x14\n\x0cstring_value\x18\x02 \x02(\t\"8\n\x14StringIdOrValueProto\x12\x11\n\tstring_id\x18\x01 \x01(\x07\x12\r\n\x05value\x18\x02 \x01(\t\"\xc6\x01\n\x10PlanetModelProto\x12\x18\n\x06radius\x18\x01 \x01(\x01:\x08\x36\x33\x37\x38.137\x12\'\n\nflattening\x18\x02 \x01(\x01:\x13\x30.00335281066474748\x12\x16\n\x0e\x65levation_bias\x18\x04 \x01(\x01\x12\'\n\x1fnegative_altitude_exponent_bias\x18\x05 \x01(\x05\x12.\n&compressed_negative_altitude_threshold\x18\x06 \x01(\x01\"|\n\x11ProviderInfoProto\x12\x13\n\x0bprovider_id\x18\x01 \x02(\x05\x12/\n\x10\x63opyright_string\x18\x02 \x01(\x0b\x32\x15.StringIdOrValueProto\x12!\n\x15vertical_pixel_offset\x18\x03 \x01(\x05:\x02-1\"\xa2\x01\n\nPopUpProto\x12\x1f\n\x10is_balloon_style\x18\x01 \x01(\x08:\x05\x66\x61lse\x12#\n\x04text\x18\x02 \x01(\x0b\x32\x15.StringIdOrValueProto\x12)\n\x15\x62\x61\x63kground_color_abgr\x18\x03 \x01(\x07:\n4294967295\x12#\n\x0ftext_color_abgr\x18\x04 \x01(\x07:\n4278190080\"\x9e\x04\n\x13StyleAttributeProto\x12\x10\n\x08style_id\x18\x01 \x02(\t\x12\x13\n\x0bprovider_id\x18\x03 \x01(\x05\x12#\n\x0fpoly_color_abgr\x18\x04 \x01(\x07:\n4294967295\x12#\n\x0fline_color_abgr\x18\x05 \x01(\x07:\n4294967295\x12\x15\n\nline_width\x18\x06 \x01(\x02:\x01\x31\x12$\n\x10label_color_abgr\x18\x07 \x01(\x07:\n4294967295\x12\x16\n\x0blabel_scale\x18\x08 \x01(\x02:\x01\x31\x12-\n\x19placemark_icon_color_abgr\x18\t \x01(\x07:\n4294967295\x12\x1f\n\x14placemark_icon_scale\x18\n \x01(\x02:\x01\x31\x12\x32\n\x13placemark_icon_path\x18\x0b \x01(\x0b\x32\x15.StringIdOrValueProto\x12\x1b\n\x10placemark_icon_x\x18\x0c \x01(\x05:\x01\x30\x12\x1b\n\x10placemark_icon_y\x18\r \x01(\x05:\x01\x30\x12 \n\x14placemark_icon_width\x18\x0e \x01(\x05:\x02\x33\x32\x12!\n\x15placemark_icon_height\x18\x0f \x01(\x05:\x02\x33\x32\x12\x1b\n\x06pop_up\x18\x10 \x01(\x0b\x32\x0b.PopUpProto\x12!\n\tdraw_flag\x18\x11 \x03(\x0b\x32\x0e.DrawFlagProto\"|\n\rStyleMapProto\x12\x14\n\x0cstyle_map_id\x18\x01 \x02(\x05\x12\x12\n\nchannel_id\x18\x02 \x03(\x05\x12\x1e\n\x16normal_style_attribute\x18\x03 \x01(\x05\x12!\n\x19highlight_style_attribute\x18\x04 \x01(\x05\"4\n\x0eZoomRangeProto\x12\x10\n\x08min_zoom\x18\x01 \x02(\x05\x12\x10\n\x08max_zoom\x18\x02 \x02(\x05\"\xc9\x01\n\rDrawFlagProto\x12\x33\n\x0e\x64raw_flag_type\x18\x01 \x02(\x0e\x32\x1b.DrawFlagProto.DrawFlagType\"\x82\x01\n\x0c\x44rawFlagType\x12\x12\n\x0eTYPE_FILL_ONLY\x10\x01\x12\x15\n\x11TYPE_OUTLINE_ONLY\x10\x02\x12\x19\n\x15TYPE_FILL_AND_OUTLINE\x10\x03\x12\x15\n\x11TYPE_ANTIALIASING\x10\x04\x12\x15\n\x11TYPE_CENTER_LABEL\x10\x05\"\x8c\x01\n\nLayerProto\x12#\n\nzoom_range\x18\x01 \x03(\x0b\x32\x0f.ZoomRangeProto\x12\x1f\n\x13preserve_text_level\x18\x02 \x01(\x05:\x02\x33\x30\x12\x1c\n\x14lod_begin_transition\x18\x04 \x01(\x08\x12\x1a\n\x12lod_end_transition\x18\x05 \x01(\x08\"*\n\x0b\x46olderProto\x12\x1b\n\ris_expandable\x18\x01 \x01(\x08:\x04true\"\x9e\x01\n\x10RequirementProto\x12\x15\n\rrequired_vram\x18\x03 \x01(\t\x12\x1b\n\x13required_client_ver\x18\x04 \x01(\t\x12\x13\n\x0bprobability\x18\x05 \x01(\t\x12\x1b\n\x13required_user_agent\x18\x06 \x01(\t\x12$\n\x1crequired_client_capabilities\x18\x07 \x01(\t\"`\n\x0bLookAtProto\x12\x11\n\tlongitude\x18\x01 \x02(\x02\x12\x10\n\x08latitude\x18\x02 \x02(\x02\x12\r\n\x05range\x18\x03 \x01(\x02\x12\x0c\n\x04tilt\x18\x04 \x01(\x02\x12\x0f\n\x07heading\x18\x05 \x01(\x02\"\x97\x06\n\x12NestedFeatureProto\x12\x35\n\x0c\x66\x65\x61ture_type\x18\x01 \x01(\x0e\x32\x1f.NestedFeatureProto.FeatureType\x12&\n\x07kml_url\x18\x02 \x01(\x0b\x32\x15.StringIdOrValueProto\x12\x14\n\x0c\x64\x61tabase_url\x18\x15 \x01(\t\x12\x1a\n\x05layer\x18\x03 \x01(\x0b\x32\x0b.LayerProto\x12\x1c\n\x06\x66older\x18\x04 \x01(\x0b\x32\x0c.FolderProto\x12&\n\x0brequirement\x18\x05 \x01(\x0b\x32\x11.RequirementProto\x12\x12\n\nchannel_id\x18\x06 \x02(\x05\x12+\n\x0c\x64isplay_name\x18\x07 \x01(\x0b\x32\x15.StringIdOrValueProto\x12\x18\n\nis_visible\x18\x08 \x01(\x08:\x04true\x12\x18\n\nis_enabled\x18\t \x01(\x08:\x04true\x12\x19\n\nis_checked\x18\n \x01(\x08:\x05\x66\x61lse\x12-\n\x14layer_menu_icon_path\x18\x0b \x01(\t:\x0ficons/773_l.png\x12*\n\x0b\x64\x65scription\x18\x0c \x01(\x0b\x32\x15.StringIdOrValueProto\x12\x1d\n\x07look_at\x18\r \x01(\x0b\x32\x0c.LookAtProto\x12\x12\n\nasset_uuid\x18\x0f \x01(\t\x12\x1c\n\x0eis_save_locked\x18\x10 \x01(\x08:\x04true\x12%\n\x08\x63hildren\x18\x11 \x03(\x0b\x32\x13.NestedFeatureProto\x12!\n\x19\x63lient_config_script_name\x18\x12 \x01(\t\x12%\n\x19\x64iorama_data_channel_base\x18\x13 \x01(\x05:\x02-1\x12%\n\x19replica_data_channel_base\x18\x14 \x01(\x05:\x02-1\"V\n\x0b\x46\x65\x61tureType\x12\x10\n\x0cTYPE_POINT_Z\x10\x01\x12\x12\n\x0eTYPE_POLYGON_Z\x10\x02\x12\x0f\n\x0bTYPE_LINE_Z\x10\x03\x12\x10\n\x0cTYPE_TERRAIN\x10\x04\"\xd6\x01\n\x16MfeDomainFeaturesProto\x12\x14\n\x0c\x63ountry_code\x18\x01 \x02(\t\x12\x13\n\x0b\x64omain_name\x18\x02 \x02(\t\x12\x44\n\x12supported_features\x18\x03 \x03(\x0e\x32(.MfeDomainFeaturesProto.SupportedFeature\"K\n\x10SupportedFeature\x12\r\n\tGEOCODING\x10\x00\x12\x10\n\x0cLOCAL_SEARCH\x10\x01\x12\x16\n\x12\x44RIVING_DIRECTIONS\x10\x02\"\xb5\x0c\n\x12\x43lientOptionsProto\x12\x1a\n\x12\x64isable_disk_cache\x18\x01 \x01(\x08\x12&\n\x1e\x64isable_embedded_browser_vista\x18\x02 \x01(\x08\x12\x1d\n\x0f\x64raw_atmosphere\x18\x03 \x01(\x08:\x04true\x12\x18\n\ndraw_stars\x18\x04 \x01(\x08:\x04true\x12\x1a\n\x12shader_file_prefix\x18\x05 \x01(\t\x12%\n\x1duse_protobuf_quadtree_packets\x18\x06 \x01(\x08\x12(\n\x1ause_extended_copyright_ids\x18\x07 \x01(\x08:\x04true\x12I\n\x16precipitations_options\x18\x08 \x01(\x0b\x32).ClientOptionsProto.PrecipitationsOptions\x12;\n\x0f\x63\x61pture_options\x18\t \x01(\x0b\x32\".ClientOptionsProto.CaptureOptions\x12\x1f\n\x11show_2d_maps_icon\x18\n \x01(\x08:\x04true\x12 \n\x18\x64isable_internal_browser\x18\x0b \x01(\x08\x12\"\n\x1ainternal_browser_blacklist\x18\x0c \x01(\t\x12,\n!internal_browser_origin_whitelist\x18\r \x01(\t:\x01*\x12 \n\x18polar_tile_merging_level\x18\x0e \x01(\x05\x12:\n\x1bjs_bridge_request_whitelist\x18\x0f \x01(\t:\x15http://*.google.com/*\x12\x35\n\x0cmaps_options\x18\x10 \x01(\x0b\x32\x1f.ClientOptionsProto.MapsOptions\x1a\xd2\x04\n\x15PrecipitationsOptions\x12\x11\n\timage_url\x18\x01 \x01(\t\x12\x1e\n\x11image_expire_time\x18\x02 \x01(\x05:\x03\x39\x30\x30\x12\x1e\n\x12max_color_distance\x18\x03 \x01(\x05:\x02\x32\x30\x12\x16\n\x0bimage_level\x18\x04 \x01(\x05:\x01\x35\x12Q\n\x0fweather_mapping\x18\x05 \x03(\x0b\x32\x38.ClientOptionsProto.PrecipitationsOptions.WeatherMapping\x12\x18\n\x10\x63louds_layer_url\x18\x06 \x01(\t\x12(\n\x1c\x61nimation_deceleration_delay\x18\x07 \x01(\x02:\x02\x32\x30\x1a\xb6\x02\n\x0eWeatherMapping\x12\x12\n\ncolor_abgr\x18\x01 \x02(\r\x12Z\n\x0cweather_type\x18\x02 \x02(\x0e\x32\x44.ClientOptionsProto.PrecipitationsOptions.WeatherMapping.WeatherType\x12\x15\n\nelongation\x18\x03 \x01(\x02:\x01\x31\x12\x0f\n\x07opacity\x18\x04 \x01(\x02\x12\x13\n\x0b\x66og_density\x18\x05 \x01(\x02\x12\x0e\n\x06speed0\x18\x06 \x01(\x02\x12\x0e\n\x06speed1\x18\x07 \x01(\x02\x12\x0e\n\x06speed2\x18\x08 \x01(\x02\x12\x0e\n\x06speed3\x18\t \x01(\x02\"7\n\x0bWeatherType\x12\x14\n\x10NO_PRECIPITATION\x10\x00\x12\x08\n\x04RAIN\x10\x01\x12\x08\n\x04SNOW\x10\x02\x1a~\n\x0e\x43\x61ptureOptions\x12!\n\x13\x61llow_save_as_image\x18\x01 \x01(\x08:\x04true\x12\"\n\x14max_free_capture_res\x18\x02 \x01(\x05:\x04\x32\x34\x30\x30\x12%\n\x17max_premium_capture_res\x18\x03 \x01(\x05:\x04\x34\x38\x30\x30\x1a\xad\x01\n\x0bMapsOptions\x12\x13\n\x0b\x65nable_maps\x18\x01 \x01(\x08\x12\"\n\x1a\x64ocs_auto_download_enabled\x18\x02 \x01(\x08\x12#\n\x1b\x64ocs_auto_download_interval\x18\x03 \x01(\x05\x12 \n\x18\x64ocs_auto_upload_enabled\x18\x04 \x01(\x08\x12\x1e\n\x16\x64ocs_auto_upload_delay\x18\x05 \x01(\x05\"\xdc\x03\n\x14\x46\x65tchingOptionsProto\x12!\n\x16max_requests_per_query\x18\x01 \x01(\x05:\x01\x31\x12$\n\x1c\x66orce_max_requests_per_query\x18\x0c \x01(\x08\x12\x14\n\x0csort_batches\x18\r \x01(\x08\x12\x17\n\x0cmax_drawable\x18\x02 \x01(\x05:\x01\x32\x12\x16\n\x0bmax_imagery\x18\x03 \x01(\x05:\x01\x32\x12\x16\n\x0bmax_terrain\x18\x04 \x01(\x05:\x01\x35\x12\x17\n\x0cmax_quadtree\x18\x05 \x01(\x05:\x01\x35\x12\x1f\n\x14max_diorama_metadata\x18\x06 \x01(\x05:\x01\x31\x12\x1b\n\x10max_diorama_data\x18\x07 \x01(\x05:\x01\x30\x12#\n\x18max_consumer_fetch_ratio\x18\x08 \x01(\x02:\x01\x31\x12!\n\x16max_pro_ec_fetch_ratio\x18\t \x01(\x02:\x01\x30\x12\x18\n\x10safe_overall_qps\x18\n \x01(\x02\x12\x18\n\x10safe_imagery_qps\x18\x0b \x01(\x02\x12\x31\n\x11\x64omains_for_https\x18\x0e \x01(\t:\x16google.com gstatic.com\x12\x16\n\x0ehosts_for_http\x18\x0f \x01(\t\"\x91\x01\n\x17TimeMachineOptionsProto\x12\x12\n\nserver_url\x18\x01 \x01(\t\x12\x16\n\x0eis_timemachine\x18\x02 \x01(\x08\x12\x1a\n\rdwell_time_ms\x18\x03 \x01(\x05:\x03\x35\x30\x30\x12.\n\x1f\x64iscoverability_altitude_meters\x18\x04 \x01(\x05:\x05\x31\x35\x30\x30\x30\"\xe3\x01\n\x13\x41utopiaOptionsProto\x12\x37\n\x13metadata_server_url\x18\x01 \x01(\t:\x1ahttp://cbk0.google.com/cbk\x12\x37\n\x13\x64\x65pthmap_server_url\x18\x02 \x01(\t:\x1ahttp://cbk0.google.com/cbk\x12\x1e\n\x14\x63overage_overlay_url\x18\x03 \x01(\t:\x00\x12\x17\n\x0fmax_imagery_qps\x18\x04 \x01(\x02\x12!\n\x19max_metadata_depthmap_qps\x18\x05 \x01(\x02\"E\n\x0f\x43SIOptionsProto\x12\x1b\n\x13sampling_percentage\x18\x01 \x01(\x05\x12\x15\n\rexperiment_id\x18\x02 \x01(\t\"\xb3\x02\n\x0eSearchTabProto\x12\x12\n\nis_visible\x18\x01 \x02(\x08\x12(\n\ttab_label\x18\x02 \x01(\x0b\x32\x15.StringIdOrValueProto\x12\x10\n\x08\x62\x61se_url\x18\x03 \x01(\t\x12\x17\n\x0fviewport_prefix\x18\x04 \x01(\t\x12/\n\tinput_box\x18\x05 \x03(\x0b\x32\x1c.SearchTabProto.InputBoxInfo\x12&\n\x0brequirement\x18\x06 \x01(\x0b\x32\x11.RequirementProto\x1a_\n\x0cInputBoxInfo\x12$\n\x05label\x18\x01 \x02(\x0b\x32\x15.StringIdOrValueProto\x12\x12\n\nquery_verb\x18\x02 \x02(\t\x12\x15\n\rquery_prepend\x18\x03 \x01(\t\"\x90\x03\n\x0c\x43obrandProto\x12\x10\n\x08logo_url\x18\x01 \x02(\t\x12$\n\x07x_coord\x18\x02 \x01(\x0b\x32\x13.CobrandProto.Coord\x12$\n\x07y_coord\x18\x03 \x01(\x0b\x32\x13.CobrandProto.Coord\x12\x36\n\ttie_point\x18\x04 \x01(\x0e\x32\x16.CobrandProto.TiePoint:\x0b\x42OTTOM_LEFT\x12\x16\n\x0bscreen_size\x18\x05 \x01(\x01:\x01\x30\x1a\x35\n\x05\x43oord\x12\x10\n\x05value\x18\x01 \x02(\x01:\x01\x30\x12\x1a\n\x0bis_relative\x18\x02 \x01(\x08:\x05\x66\x61lse\"\x9a\x01\n\x08TiePoint\x12\x0c\n\x08TOP_LEFT\x10\x00\x12\x0e\n\nTOP_CENTER\x10\x01\x12\r\n\tTOP_RIGHT\x10\x02\x12\x0c\n\x08MID_LEFT\x10\x03\x12\x0e\n\nMID_CENTER\x10\x04\x12\r\n\tMID_RIGHT\x10\x05\x12\x0f\n\x0b\x42OTTOM_LEFT\x10\x06\x12\x11\n\rBOTTOM_CENTER\x10\x07\x12\x10\n\x0c\x42OTTOM_RIGHT\x10\x08\"^\n\x18\x44\x61tabaseDescriptionProto\x12,\n\rdatabase_name\x18\x01 \x01(\x0b\x32\x15.StringIdOrValueProto\x12\x14\n\x0c\x64\x61tabase_url\x18\x02 \x02(\t\"=\n\x11\x43onfigScriptProto\x12\x13\n\x0bscript_name\x18\x01 \x02(\t\x12\x13\n\x0bscript_data\x18\x02 \x02(\t\"0\n\x10SwoopParamsProto\x12\x1c\n\x14start_dist_in_meters\x18\x01 \x01(\x01\"\xc4\x01\n\x12PostingServerProto\x12#\n\x04name\x18\x01 \x01(\x0b\x32\x15.StringIdOrValueProto\x12\'\n\x08\x62\x61se_url\x18\x02 \x01(\x0b\x32\x15.StringIdOrValueProto\x12/\n\x10post_wizard_path\x18\x03 \x01(\x0b\x32\x15.StringIdOrValueProto\x12/\n\x10\x66ile_submit_path\x18\x04 \x01(\x0b\x32\x15.StringIdOrValueProto\"a\n\x16PlanetaryDatabaseProto\x12\"\n\x03url\x18\x01 \x02(\x0b\x32\x15.StringIdOrValueProto\x12#\n\x04name\x18\x02 \x02(\x0b\x32\x15.StringIdOrValueProto\"b\n\x0eLogServerProto\x12\"\n\x03url\x18\x01 \x01(\x0b\x32\x15.StringIdOrValueProto\x12\x0e\n\x06\x65nable\x18\x02 \x01(\x08\x12\x1c\n\x11throttling_factor\x18\x03 \x01(\x05:\x01\x31\"\xff+\n\x0f\x45ndSnippetProto\x12 \n\x05model\x18\x01 \x01(\x0b\x32\x11.PlanetModelProto\x12.\n\x0f\x61uth_server_url\x18\x02 \x01(\x0b\x32\x15.StringIdOrValueProto\x12\x1e\n\x16\x64isable_authentication\x18\x03 \x01(\x08\x12,\n\x0bmfe_domains\x18\x04 \x03(\x0b\x32\x17.MfeDomainFeaturesProto\x12 \n\x0emfe_lang_param\x18\x05 \x01(\t:\x08hl=$[hl]\x12\x18\n\x10\x61\x64s_url_patterns\x18\x06 \x01(\t\x12\x33\n\x14reverse_geocoder_url\x18\x07 \x01(\x0b\x32\x15.StringIdOrValueProto\x12,\n!reverse_geocoder_protocol_version\x18\x08 \x01(\x05:\x01\x33\x12\'\n\x19sky_database_is_available\x18\t \x01(\x08:\x04true\x12/\n\x10sky_database_url\x18\n \x01(\x0b\x32\x15.StringIdOrValueProto\x12\x38\n\x19\x64\x65\x66\x61ult_web_page_intl_url\x18\x0b \x01(\x0b\x32\x15.StringIdOrValueProto\x12\x1d\n\x11num_start_up_tips\x18\x0c \x01(\x05:\x02\x31\x37\x12\x30\n\x11start_up_tips_url\x18\r \x01(\x0b\x32\x15.StringIdOrValueProto\x12\x1d\n\x15num_pro_start_up_tips\x18\x33 \x01(\x05\x12\x34\n\x15pro_start_up_tips_url\x18\x34 \x01(\x0b\x32\x15.StringIdOrValueProto\x12\x34\n\x15startup_tips_intl_url\x18@ \x01(\x0b\x32\x15.StringIdOrValueProto\x12\x32\n\x13user_guide_intl_url\x18\x0e \x01(\x0b\x32\x15.StringIdOrValueProto\x12\x36\n\x17support_center_intl_url\x18\x0f \x01(\x0b\x32\x15.StringIdOrValueProto\x12\x38\n\x19\x62usiness_listing_intl_url\x18\x10 \x01(\x0b\x32\x15.StringIdOrValueProto\x12\x36\n\x17support_answer_intl_url\x18\x11 \x01(\x0b\x32\x15.StringIdOrValueProto\x12\x35\n\x16support_topic_intl_url\x18\x12 \x01(\x0b\x32\x15.StringIdOrValueProto\x12\x37\n\x18support_request_intl_url\x18\x13 \x01(\x0b\x32\x15.StringIdOrValueProto\x12-\n\x0e\x65\x61rth_intl_url\x18\x14 \x01(\x0b\x32\x15.StringIdOrValueProto\x12.\n\x0f\x61\x64\x64_content_url\x18\x15 \x01(\x0b\x32\x15.StringIdOrValueProto\x12\x39\n\x1asketchup_not_installed_url\x18\x16 \x01(\x0b\x32\x15.StringIdOrValueProto\x12\x31\n\x12sketchup_error_url\x18\x17 \x01(\x0b\x32\x15.StringIdOrValueProto\x12/\n\x10\x66ree_license_url\x18\x18 \x01(\x0b\x32\x15.StringIdOrValueProto\x12.\n\x0fpro_license_url\x18\x19 \x01(\x0b\x32\x15.StringIdOrValueProto\x12+\n\x0ctutorial_url\x18\x30 \x01(\x0b\x32\x15.StringIdOrValueProto\x12\x35\n\x16keyboard_shortcuts_url\x18\x31 \x01(\x0b\x32\x15.StringIdOrValueProto\x12\x30\n\x11release_notes_url\x18\x32 \x01(\x0b\x32\x15.StringIdOrValueProto\x12\x1d\n\x0ehide_user_data\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x19\n\x0buse_ge_logo\x18\x1b \x01(\x08:\x04true\x12;\n\x1c\x64iorama_description_url_base\x18\x1c \x01(\x0b\x32\x15.StringIdOrValueProto\x12)\n\x15\x64iorama_default_color\x18\x1d \x01(\r:\n4291281607\x12\x34\n\x15\x64iorama_blacklist_url\x18\x35 \x01(\x0b\x32\x15.StringIdOrValueProto\x12+\n\x0e\x63lient_options\x18\x1e \x01(\x0b\x32\x13.ClientOptionsProto\x12/\n\x10\x66\x65tching_options\x18\x1f \x01(\x0b\x32\x15.FetchingOptionsProto\x12\x36\n\x14time_machine_options\x18 \x01(\x0b\x32\x18.TimeMachineOptionsProto\x12%\n\x0b\x63si_options\x18! \x01(\x0b\x32\x10.CSIOptionsProto\x12#\n\nsearch_tab\x18\" \x03(\x0b\x32\x0f.SearchTabProto\x12#\n\x0c\x63obrand_info\x18# \x03(\x0b\x32\r.CobrandProto\x12\x31\n\x0evalid_database\x18$ \x03(\x0b\x32\x19.DatabaseDescriptionProto\x12)\n\rconfig_script\x18% \x03(\x0b\x32\x12.ConfigScriptProto\x12\x30\n\x11\x64\x65\x61uth_server_url\x18& \x01(\x0b\x32\x15.StringIdOrValueProto\x12+\n\x10swoop_parameters\x18\' \x01(\x0b\x32\x11.SwoopParamsProto\x12,\n\x0f\x62\x62s_server_info\x18( \x01(\x0b\x32\x13.PostingServerProto\x12\x33\n\x16\x64\x61ta_error_server_info\x18) \x01(\x0b\x32\x13.PostingServerProto\x12\x33\n\x12planetary_database\x18* \x03(\x0b\x32\x17.PlanetaryDatabaseProto\x12#\n\nlog_server\x18+ \x01(\x0b\x32\x0f.LogServerProto\x12-\n\x0f\x61utopia_options\x18, \x01(\x0b\x32\x14.AutopiaOptionsProto\x12\x39\n\rsearch_config\x18\x36 \x01(\x0b\x32\".EndSnippetProto.SearchConfigProto\x12\x35\n\x0bsearch_info\x18- \x01(\x0b\x32 .EndSnippetProto.SearchInfoProto\x12N\n\x1a\x65levation_service_base_url\x18. \x01(\t:*http://maps.google.com/maps/api/elevation/\x12*\n\x1d\x65levation_profile_query_delay\x18/ \x01(\x05:\x03\x35\x30\x30\x12.\n\x0fpro_upgrade_url\x18\x37 \x01(\x0b\x32\x15.StringIdOrValueProto\x12\x32\n\x13\x65\x61rth_community_url\x18\x38 \x01(\x0b\x32\x15.StringIdOrValueProto\x12.\n\x0fgoogle_maps_url\x18\x39 \x01(\x0b\x32\x15.StringIdOrValueProto\x12*\n\x0bsharing_url\x18: \x01(\x0b\x32\x15.StringIdOrValueProto\x12\x31\n\x12privacy_policy_url\x18; \x01(\x0b\x32\x15.StringIdOrValueProto\x12\"\n\x13\x64o_gplus_user_check\x18< \x01(\x08:\x05\x66\x61lse\x12?\n\x13rocktree_data_proto\x18= \x01(\x0b\x32\".EndSnippetProto.RockTreeDataProto\x12?\n\x10\x66ilmstrip_config\x18> \x03(\x0b\x32%.EndSnippetProto.FilmstripConfigProto\x12\x1a\n\x12show_signin_button\x18? \x01(\x08\x12\x35\n\x16pro_measure_upsell_url\x18\x41 \x01(\x0b\x32\x15.StringIdOrValueProto\x12\x33\n\x14pro_print_upsell_url\x18\x42 \x01(\x0b\x32\x15.StringIdOrValueProto\x12\x37\n\x0fstar_data_proto\x18\x43 \x01(\x0b\x32\x1e.EndSnippetProto.StarDataProto\x12+\n\x0c\x66\x65\x65\x64\x62\x61\x63k_url\x18\x44 \x01(\x0b\x32\x15.StringIdOrValueProto\x1a\xcc\n\n\x11SearchConfigProto\x12\x46\n\rsearch_server\x18\x01 \x03(\x0b\x32/.EndSnippetProto.SearchConfigProto.SearchServer\x12M\n\x0eonebox_service\x18\x02 \x03(\x0b\x32\x35.EndSnippetProto.SearchConfigProto.OneboxServiceProto\x12-\n\x0ekml_search_url\x18\x03 \x01(\x0b\x32\x15.StringIdOrValueProto\x12-\n\x0ekml_render_url\x18\x04 \x01(\x0b\x32\x15.StringIdOrValueProto\x12\x31\n\x12search_history_url\x18\x06 \x01(\x0b\x32\x15.StringIdOrValueProto\x12-\n\x0e\x65rror_page_url\x18\x05 \x01(\x0b\x32\x15.StringIdOrValueProto\x1a\xf4\x06\n\x0cSearchServer\x12#\n\x04name\x18\x01 \x01(\x0b\x32\x15.StringIdOrValueProto\x12\"\n\x03url\x18\x02 \x01(\x0b\x32\x15.StringIdOrValueProto\x12Y\n\x04type\x18\x03 \x01(\x0e\x32:.EndSnippetProto.SearchConfigProto.SearchServer.ResultType:\x0fRESULT_TYPE_KML\x12\x31\n\x12html_transform_url\x18\x04 \x01(\x0b\x32\x15.StringIdOrValueProto\x12\x30\n\x11kml_transform_url\x18\x05 \x01(\x0b\x32\x15.StringIdOrValueProto\x12W\n\x0fsupplemental_ui\x18\x06 \x01(\x0b\x32>.EndSnippetProto.SearchConfigProto.SearchServer.SupplementalUi\x12)\n\nsuggestion\x18\t \x03(\x0b\x32\x15.StringIdOrValueProto\x12Q\n\tsearchlet\x18\x07 \x03(\x0b\x32>.EndSnippetProto.SearchConfigProto.SearchServer.SearchletProto\x12\'\n\x0crequirements\x18\x08 \x01(\x0b\x32\x11.RequirementProto\x12-\n\x0esuggest_server\x18\n \x01(\x0b\x32\x15.StringIdOrValueProto\x1ao\n\x0eSupplementalUi\x12\"\n\x03url\x18\x01 \x01(\x0b\x32\x15.StringIdOrValueProto\x12$\n\x05label\x18\x02 \x01(\x0b\x32\x15.StringIdOrValueProto\x12\x13\n\x06height\x18\x03 \x01(\x05:\x03\x31\x36\x30\x1a\x82\x01\n\x0eSearchletProto\x12\"\n\x03url\x18\x01 \x01(\x0b\x32\x15.StringIdOrValueProto\x12#\n\x04name\x18\x02 \x01(\x0b\x32\x15.StringIdOrValueProto\x12\'\n\x0crequirements\x18\x03 \x01(\x0b\x32\x11.RequirementProto\"6\n\nResultType\x12\x13\n\x0fRESULT_TYPE_KML\x10\x00\x12\x13\n\x0fRESULT_TYPE_XML\x10\x01\x1ai\n\x12OneboxServiceProto\x12*\n\x0bservice_url\x18\x01 \x01(\x0b\x32\x15.StringIdOrValueProto\x12\'\n\x0crequirements\x18\x02 \x01(\x0b\x32\x11.RequirementProto\x1a]\n\x0fSearchInfoProto\x12\x30\n\x0b\x64\x65\x66\x61ult_url\x18\x01 \x01(\t:\x1bhttp://maps.google.com/maps\x12\x18\n\rgeocode_param\x18\x02 \x01(\t:\x01q\x1a\x37\n\x11RockTreeDataProto\x12\"\n\x03url\x18\x01 \x01(\x0b\x32\x15.StringIdOrValueProto\x1a\x91\x06\n\x14\x46ilmstripConfigProto\x12\'\n\x0crequirements\x18\x01 \x01(\x0b\x32\x11.RequirementProto\x12\x34\n\x15\x61lleycat_url_template\x18\x02 \x01(\x0b\x32\x15.StringIdOrValueProto\x12=\n\x1e\x66\x61llback_alleycat_url_template\x18\t \x01(\x0b\x32\x15.StringIdOrValueProto\x12\x34\n\x15metadata_url_template\x18\x03 \x01(\x0b\x32\x15.StringIdOrValueProto\x12\x35\n\x16thumbnail_url_template\x18\x04 \x01(\x0b\x32\x15.StringIdOrValueProto\x12/\n\x10kml_url_template\x18\x05 \x01(\x0b\x32\x15.StringIdOrValueProto\x12\x31\n\x12\x66\x65\x61tured_tours_url\x18\x06 \x01(\x0b\x32\x15.StringIdOrValueProto\x12 \n\x18\x65nable_viewport_fallback\x18\x07 \x01(\x08\x12\"\n\x1aviewport_fallback_distance\x18\x08 \x01(\r\x12T\n\x0cimagery_type\x18\n \x03(\x0b\x32>.EndSnippetProto.FilmstripConfigProto.AlleycatImageryTypeProto\x1a\xed\x01\n\x18\x41lleycatImageryTypeProto\x12\x17\n\x0fimagery_type_id\x18\x01 \x01(\x05\x12\x1a\n\x12imagery_type_label\x18\x02 \x01(\t\x12\x34\n\x15metadata_url_template\x18\x03 \x01(\x0b\x32\x15.StringIdOrValueProto\x12\x35\n\x16thumbnail_url_template\x18\x04 \x01(\x0b\x32\x15.StringIdOrValueProto\x12/\n\x10kml_url_template\x18\x05 \x01(\x0b\x32\x15.StringIdOrValueProto\x1a\x33\n\rStarDataProto\x12\"\n\x03url\x18\x01 \x01(\x0b\x32\x15.StringIdOrValueProto\"b\n\x0e\x44\x62RootRefProto\x12\x0b\n\x03url\x18\x02 \x02(\t\x12\x1a\n\x0bis_critical\x18\x01 \x01(\x08:\x05\x66\x61lse\x12\'\n\x0crequirements\x18\x03 \x01(\x0b\x32\x11.RequirementProto\"0\n\x14\x44\x61tabaseVersionProto\x12\x18\n\x10quadtree_version\x18\x01 \x02(\r\"\xb6\x04\n\x0b\x44\x62RootProto\x12,\n\rdatabase_name\x18\x0f \x01(\x0b\x32\x15.StringIdOrValueProto\x12\x1d\n\x0fimagery_present\x18\x01 \x01(\x08:\x04true\x12\x1c\n\rproto_imagery\x18\x0e \x01(\x08:\x05\x66\x61lse\x12\x1e\n\x0fterrain_present\x18\x02 \x01(\x08:\x05\x66\x61lse\x12)\n\rprovider_info\x18\x03 \x03(\x0b\x32\x12.ProviderInfoProto\x12+\n\x0enested_feature\x18\x04 \x03(\x0b\x32\x13.NestedFeatureProto\x12-\n\x0fstyle_attribute\x18\x05 \x03(\x0b\x32\x14.StyleAttributeProto\x12!\n\tstyle_map\x18\x06 \x03(\x0b\x32\x0e.StyleMapProto\x12%\n\x0b\x65nd_snippet\x18\x07 \x01(\x0b\x32\x10.EndSnippetProto\x12,\n\x11translation_entry\x18\x08 \x03(\x0b\x32\x11.StringEntryProto\x12\x14\n\x08language\x18\t \x01(\t:\x02\x65n\x12\x12\n\x07version\x18\n \x01(\x05:\x01\x35\x12)\n\x10\x64\x62root_reference\x18\x0b \x03(\x0b\x32\x0f.DbRootRefProto\x12/\n\x10\x64\x61tabase_version\x18\r \x01(\x0b\x32\x15.DatabaseVersionProto\x12\x17\n\x0frefresh_timeout\x18\x10 \x01(\x05\"\xa9\x01\n\x14\x45ncryptedDbRootProto\x12=\n\x0f\x65ncryption_type\x18\x01 \x01(\x0e\x32$.EncryptedDbRootProto.EncryptionType\x12\x17\n\x0f\x65ncryption_data\x18\x02 \x01(\x0c\x12\x13\n\x0b\x64\x62root_data\x18\x03 \x01(\x0c\"$\n\x0e\x45ncryptionType\x12\x12\n\x0e\x45NCRYPTION_XOR\x10\x00')
_DRAWFLAGPROTO_DRAWFLAGTYPE = descriptor.EnumDescriptor(
name='DrawFlagType',
full_name='DrawFlagProto.DrawFlagType',
filename=None,
file=DESCRIPTOR,
values=[
descriptor.EnumValueDescriptor(
name='TYPE_FILL_ONLY', index=0, number=1,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='TYPE_OUTLINE_ONLY', index=1, number=2,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='TYPE_FILL_AND_OUTLINE', index=2, number=3,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='TYPE_ANTIALIASING', index=3, number=4,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='TYPE_CENTER_LABEL', index=4, number=5,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=1427,
serialized_end=1557,
)
_NESTEDFEATUREPROTO_FEATURETYPE = descriptor.EnumDescriptor(
name='FeatureType',
full_name='NestedFeatureProto.FeatureType',
filename=None,
file=DESCRIPTOR,
values=[
descriptor.EnumValueDescriptor(
name='TYPE_POINT_Z', index=0, number=1,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='TYPE_POLYGON_Z', index=1, number=2,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='TYPE_LINE_Z', index=2, number=3,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='TYPE_TERRAIN', index=3, number=4,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=2711,
serialized_end=2797,
)
_MFEDOMAINFEATURESPROTO_SUPPORTEDFEATURE = descriptor.EnumDescriptor(
name='SupportedFeature',
full_name='MfeDomainFeaturesProto.SupportedFeature',
filename=None,
file=DESCRIPTOR,
values=[
descriptor.EnumValueDescriptor(
name='GEOCODING', index=0, number=0,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='LOCAL_SEARCH', index=1, number=1,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='DRIVING_DIRECTIONS', index=2, number=2,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=2939,
serialized_end=3014,
)
_CLIENTOPTIONSPROTO_PRECIPITATIONSOPTIONS_WEATHERMAPPING_WEATHERTYPE = descriptor.EnumDescriptor(
name='WeatherType',
full_name='ClientOptionsProto.PrecipitationsOptions.WeatherMapping.WeatherType',
filename=None,
file=DESCRIPTOR,
values=[
descriptor.EnumValueDescriptor(
name='NO_PRECIPITATION', index=0, number=0,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='RAIN', index=1, number=1,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='SNOW', index=2, number=2,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=4247,
serialized_end=4302,
)
_COBRANDPROTO_TIEPOINT = descriptor.EnumDescriptor(
name='TiePoint',
full_name='CobrandProto.TiePoint',
filename=None,
file=DESCRIPTOR,
values=[
descriptor.EnumValueDescriptor(
name='TOP_LEFT', index=0, number=0,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='TOP_CENTER', index=1, number=1,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='TOP_RIGHT', index=2, number=2,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='MID_LEFT', index=3, number=3,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='MID_CENTER', index=4, number=4,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='MID_RIGHT', index=5, number=5,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='BOTTOM_LEFT', index=6, number=6,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='BOTTOM_CENTER', index=7, number=7,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='BOTTOM_RIGHT', index=8, number=8,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=6093,
serialized_end=6247,
)
_ENDSNIPPETPROTO_SEARCHCONFIGPROTO_SEARCHSERVER_RESULTTYPE = descriptor.EnumDescriptor(
name='ResultType',
full_name='EndSnippetProto.SearchConfigProto.SearchServer.ResultType',
filename=None,
file=DESCRIPTOR,
values=[
descriptor.EnumValueDescriptor(
name='RESULT_TYPE_KML', index=0, number=0,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='RESULT_TYPE_XML', index=1, number=1,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=11334,
serialized_end=11388,
)
_ENCRYPTEDDBROOTPROTO_ENCRYPTIONTYPE = descriptor.EnumDescriptor(
name='EncryptionType',
full_name='EncryptedDbRootProto.EncryptionType',
filename=None,
file=DESCRIPTOR,
values=[
descriptor.EnumValueDescriptor(
name='ENCRYPTION_XOR', index=0, number=0,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=13343,
serialized_end=13379,
)
_STRINGENTRYPROTO = descriptor.Descriptor(
name='StringEntryProto',
full_name='StringEntryProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='string_id', full_name='StringEntryProto.string_id', index=0,
number=1, type=7, cpp_type=3, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='string_value', full_name='StringEntryProto.string_value', index=1,
number=2, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=19,
serialized_end=78,
)
_STRINGIDORVALUEPROTO = descriptor.Descriptor(
name='StringIdOrValueProto',
full_name='StringIdOrValueProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='string_id', full_name='StringIdOrValueProto.string_id', index=0,
number=1, type=7, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='value', full_name='StringIdOrValueProto.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=80,
serialized_end=136,
)
_PLANETMODELPROTO = descriptor.Descriptor(
name='PlanetModelProto',
full_name='PlanetModelProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='radius', full_name='PlanetModelProto.radius', index=0,
number=1, type=1, cpp_type=5, label=1,
has_default_value=True, default_value=6378.137,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='flattening', full_name='PlanetModelProto.flattening', index=1,
number=2, type=1, cpp_type=5, label=1,
has_default_value=True, default_value=0.00335281066474748,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='elevation_bias', full_name='PlanetModelProto.elevation_bias', index=2,
number=4, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='negative_altitude_exponent_bias', full_name='PlanetModelProto.negative_altitude_exponent_bias', index=3,
number=5, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='compressed_negative_altitude_threshold', full_name='PlanetModelProto.compressed_negative_altitude_threshold', index=4,
number=6, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=139,
serialized_end=337,
)
_PROVIDERINFOPROTO = descriptor.Descriptor(
name='ProviderInfoProto',
full_name='ProviderInfoProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='provider_id', full_name='ProviderInfoProto.provider_id', index=0,
number=1, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='copyright_string', full_name='ProviderInfoProto.copyright_string', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='vertical_pixel_offset', full_name='ProviderInfoProto.vertical_pixel_offset', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=-1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=339,
serialized_end=463,
)
_POPUPPROTO = descriptor.Descriptor(
name='PopUpProto',
full_name='PopUpProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='is_balloon_style', full_name='PopUpProto.is_balloon_style', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='text', full_name='PopUpProto.text', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='background_color_abgr', full_name='PopUpProto.background_color_abgr', index=2,
number=3, type=7, cpp_type=3, label=1,
has_default_value=True, default_value=4294967295,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='text_color_abgr', full_name='PopUpProto.text_color_abgr', index=3,
number=4, type=7, cpp_type=3, label=1,
has_default_value=True, default_value=4278190080,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=466,
serialized_end=628,
)
_STYLEATTRIBUTEPROTO = descriptor.Descriptor(
name='StyleAttributeProto',
full_name='StyleAttributeProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='style_id', full_name='StyleAttributeProto.style_id', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='provider_id', full_name='StyleAttributeProto.provider_id', index=1,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='poly_color_abgr', full_name='StyleAttributeProto.poly_color_abgr', index=2,
number=4, type=7, cpp_type=3, label=1,
has_default_value=True, default_value=4294967295,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='line_color_abgr', full_name='StyleAttributeProto.line_color_abgr', index=3,
number=5, type=7, cpp_type=3, label=1,
has_default_value=True, default_value=4294967295,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='line_width', full_name='StyleAttributeProto.line_width', index=4,
number=6, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='label_color_abgr', full_name='StyleAttributeProto.label_color_abgr', index=5,
number=7, type=7, cpp_type=3, label=1,
has_default_value=True, default_value=4294967295,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='label_scale', full_name='StyleAttributeProto.label_scale', index=6,
number=8, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='placemark_icon_color_abgr', full_name='StyleAttributeProto.placemark_icon_color_abgr', index=7,
number=9, type=7, cpp_type=3, label=1,
has_default_value=True, default_value=4294967295,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='placemark_icon_scale', full_name='StyleAttributeProto.placemark_icon_scale', index=8,
number=10, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='placemark_icon_path', full_name='StyleAttributeProto.placemark_icon_path', index=9,
number=11, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='placemark_icon_x', full_name='StyleAttributeProto.placemark_icon_x', index=10,
number=12, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='placemark_icon_y', full_name='StyleAttributeProto.placemark_icon_y', index=11,
number=13, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='placemark_icon_width', full_name='StyleAttributeProto.placemark_icon_width', index=12,
number=14, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=32,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='placemark_icon_height', full_name='StyleAttributeProto.placemark_icon_height', index=13,
number=15, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=32,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='pop_up', full_name='StyleAttributeProto.pop_up', index=14,
number=16, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='draw_flag', full_name='StyleAttributeProto.draw_flag', index=15,
number=17, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=631,
serialized_end=1173,
)
_STYLEMAPPROTO = descriptor.Descriptor(
name='StyleMapProto',
full_name='StyleMapProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='style_map_id', full_name='StyleMapProto.style_map_id', index=0,
number=1, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='channel_id', full_name='StyleMapProto.channel_id', index=1,
number=2, type=5, cpp_type=1, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='normal_style_attribute', full_name='StyleMapProto.normal_style_attribute', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='highlight_style_attribute', full_name='StyleMapProto.highlight_style_attribute', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=1175,
serialized_end=1299,
)
_ZOOMRANGEPROTO = descriptor.Descriptor(
name='ZoomRangeProto',
full_name='ZoomRangeProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='min_zoom', full_name='ZoomRangeProto.min_zoom', index=0,
number=1, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='max_zoom', full_name='ZoomRangeProto.max_zoom', index=1,
number=2, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=1301,
serialized_end=1353,
)
_DRAWFLAGPROTO = descriptor.Descriptor(
name='DrawFlagProto',
full_name='DrawFlagProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='draw_flag_type', full_name='DrawFlagProto.draw_flag_type', index=0,
number=1, type=14, cpp_type=8, label=2,
has_default_value=False, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_DRAWFLAGPROTO_DRAWFLAGTYPE,
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=1356,
serialized_end=1557,
)
_LAYERPROTO = descriptor.Descriptor(
name='LayerProto',
full_name='LayerProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='zoom_range', full_name='LayerProto.zoom_range', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='preserve_text_level', full_name='LayerProto.preserve_text_level', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=30,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='lod_begin_transition', full_name='LayerProto.lod_begin_transition', index=2,
number=4, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='lod_end_transition', full_name='LayerProto.lod_end_transition', index=3,
number=5, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=1560,
serialized_end=1700,
)
_FOLDERPROTO = descriptor.Descriptor(
name='FolderProto',
full_name='FolderProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='is_expandable', full_name='FolderProto.is_expandable', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=True,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=1702,
serialized_end=1744,
)
_REQUIREMENTPROTO = descriptor.Descriptor(
name='RequirementProto',
full_name='RequirementProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='required_vram', full_name='RequirementProto.required_vram', index=0,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='required_client_ver', full_name='RequirementProto.required_client_ver', index=1,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='probability', full_name='RequirementProto.probability', index=2,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='required_user_agent', full_name='RequirementProto.required_user_agent', index=3,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='required_client_capabilities', full_name='RequirementProto.required_client_capabilities', index=4,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=1747,
serialized_end=1905,
)
_LOOKATPROTO = descriptor.Descriptor(
name='LookAtProto',
full_name='LookAtProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='longitude', full_name='LookAtProto.longitude', index=0,
number=1, type=2, cpp_type=6, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='latitude', full_name='LookAtProto.latitude', index=1,
number=2, type=2, cpp_type=6, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='range', full_name='LookAtProto.range', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='tilt', full_name='LookAtProto.tilt', index=3,
number=4, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='heading', full_name='LookAtProto.heading', index=4,
number=5, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=1907,
serialized_end=2003,
)
_NESTEDFEATUREPROTO = descriptor.Descriptor(
name='NestedFeatureProto',
full_name='NestedFeatureProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='feature_type', full_name='NestedFeatureProto.feature_type', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='kml_url', full_name='NestedFeatureProto.kml_url', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='database_url', full_name='NestedFeatureProto.database_url', index=2,
number=21, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='layer', full_name='NestedFeatureProto.layer', index=3,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='folder', full_name='NestedFeatureProto.folder', index=4,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='requirement', full_name='NestedFeatureProto.requirement', index=5,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='channel_id', full_name='NestedFeatureProto.channel_id', index=6,
number=6, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='display_name', full_name='NestedFeatureProto.display_name', index=7,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='is_visible', full_name='NestedFeatureProto.is_visible', index=8,
number=8, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=True,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='is_enabled', full_name='NestedFeatureProto.is_enabled', index=9,
number=9, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=True,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='is_checked', full_name='NestedFeatureProto.is_checked', index=10,
number=10, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='layer_menu_icon_path', full_name='NestedFeatureProto.layer_menu_icon_path', index=11,
number=11, type=9, cpp_type=9, label=1,
has_default_value=True, default_value=unicode("icons/773_l.png", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='description', full_name='NestedFeatureProto.description', index=12,
number=12, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='look_at', full_name='NestedFeatureProto.look_at', index=13,
number=13, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='asset_uuid', full_name='NestedFeatureProto.asset_uuid', index=14,
number=15, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='is_save_locked', full_name='NestedFeatureProto.is_save_locked', index=15,
number=16, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=True,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='children', full_name='NestedFeatureProto.children', index=16,
number=17, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='client_config_script_name', full_name='NestedFeatureProto.client_config_script_name', index=17,
number=18, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='diorama_data_channel_base', full_name='NestedFeatureProto.diorama_data_channel_base', index=18,
number=19, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=-1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='replica_data_channel_base', full_name='NestedFeatureProto.replica_data_channel_base', index=19,
number=20, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=-1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_NESTEDFEATUREPROTO_FEATURETYPE,
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=2006,
serialized_end=2797,
)
_MFEDOMAINFEATURESPROTO = descriptor.Descriptor(
name='MfeDomainFeaturesProto',
full_name='MfeDomainFeaturesProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='country_code', full_name='MfeDomainFeaturesProto.country_code', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='domain_name', full_name='MfeDomainFeaturesProto.domain_name', index=1,
number=2, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='supported_features', full_name='MfeDomainFeaturesProto.supported_features', index=2,
number=3, type=14, cpp_type=8, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_MFEDOMAINFEATURESPROTO_SUPPORTEDFEATURE,
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=2800,
serialized_end=3014,
)
_CLIENTOPTIONSPROTO_PRECIPITATIONSOPTIONS_WEATHERMAPPING = descriptor.Descriptor(
name='WeatherMapping',
full_name='ClientOptionsProto.PrecipitationsOptions.WeatherMapping',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='color_abgr', full_name='ClientOptionsProto.PrecipitationsOptions.WeatherMapping.color_abgr', index=0,
number=1, type=13, cpp_type=3, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='weather_type', full_name='ClientOptionsProto.PrecipitationsOptions.WeatherMapping.weather_type', index=1,
number=2, type=14, cpp_type=8, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='elongation', full_name='ClientOptionsProto.PrecipitationsOptions.WeatherMapping.elongation', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='opacity', full_name='ClientOptionsProto.PrecipitationsOptions.WeatherMapping.opacity', index=3,
number=4, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='fog_density', full_name='ClientOptionsProto.PrecipitationsOptions.WeatherMapping.fog_density', index=4,
number=5, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='speed0', full_name='ClientOptionsProto.PrecipitationsOptions.WeatherMapping.speed0', index=5,
number=6, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='speed1', full_name='ClientOptionsProto.PrecipitationsOptions.WeatherMapping.speed1', index=6,
number=7, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='speed2', full_name='ClientOptionsProto.PrecipitationsOptions.WeatherMapping.speed2', index=7,
number=8, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='speed3', full_name='ClientOptionsProto.PrecipitationsOptions.WeatherMapping.speed3', index=8,
number=9, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_CLIENTOPTIONSPROTO_PRECIPITATIONSOPTIONS_WEATHERMAPPING_WEATHERTYPE,
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=3992,
serialized_end=4302,
)
_CLIENTOPTIONSPROTO_PRECIPITATIONSOPTIONS = descriptor.Descriptor(
name='PrecipitationsOptions',
full_name='ClientOptionsProto.PrecipitationsOptions',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='image_url', full_name='ClientOptionsProto.PrecipitationsOptions.image_url', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='image_expire_time', full_name='ClientOptionsProto.PrecipitationsOptions.image_expire_time', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=900,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='max_color_distance', full_name='ClientOptionsProto.PrecipitationsOptions.max_color_distance', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=20,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='image_level', full_name='ClientOptionsProto.PrecipitationsOptions.image_level', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=5,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='weather_mapping', full_name='ClientOptionsProto.PrecipitationsOptions.weather_mapping', index=4,
number=5, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='clouds_layer_url', full_name='ClientOptionsProto.PrecipitationsOptions.clouds_layer_url', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='animation_deceleration_delay', full_name='ClientOptionsProto.PrecipitationsOptions.animation_deceleration_delay', index=6,
number=7, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=20,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_CLIENTOPTIONSPROTO_PRECIPITATIONSOPTIONS_WEATHERMAPPING, ],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=3708,
serialized_end=4302,
)
_CLIENTOPTIONSPROTO_CAPTUREOPTIONS = descriptor.Descriptor(
name='CaptureOptions',
full_name='ClientOptionsProto.CaptureOptions',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='allow_save_as_image', full_name='ClientOptionsProto.CaptureOptions.allow_save_as_image', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=True,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='max_free_capture_res', full_name='ClientOptionsProto.CaptureOptions.max_free_capture_res', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=2400,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='max_premium_capture_res', full_name='ClientOptionsProto.CaptureOptions.max_premium_capture_res', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=4800,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=4304,
serialized_end=4430,
)
_CLIENTOPTIONSPROTO_MAPSOPTIONS = descriptor.Descriptor(
name='MapsOptions',
full_name='ClientOptionsProto.MapsOptions',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='enable_maps', full_name='ClientOptionsProto.MapsOptions.enable_maps', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='docs_auto_download_enabled', full_name='ClientOptionsProto.MapsOptions.docs_auto_download_enabled', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='docs_auto_download_interval', full_name='ClientOptionsProto.MapsOptions.docs_auto_download_interval', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='docs_auto_upload_enabled', full_name='ClientOptionsProto.MapsOptions.docs_auto_upload_enabled', index=3,
number=4, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='docs_auto_upload_delay', full_name='ClientOptionsProto.MapsOptions.docs_auto_upload_delay', index=4,
number=5, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=4433,
serialized_end=4606,
)
_CLIENTOPTIONSPROTO = descriptor.Descriptor(
name='ClientOptionsProto',
full_name='ClientOptionsProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='disable_disk_cache', full_name='ClientOptionsProto.disable_disk_cache', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='disable_embedded_browser_vista', full_name='ClientOptionsProto.disable_embedded_browser_vista', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='draw_atmosphere', full_name='ClientOptionsProto.draw_atmosphere', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=True,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='draw_stars', full_name='ClientOptionsProto.draw_stars', index=3,
number=4, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=True,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='shader_file_prefix', full_name='ClientOptionsProto.shader_file_prefix', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='use_protobuf_quadtree_packets', full_name='ClientOptionsProto.use_protobuf_quadtree_packets', index=5,
number=6, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='use_extended_copyright_ids', full_name='ClientOptionsProto.use_extended_copyright_ids', index=6,
number=7, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=True,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='precipitations_options', full_name='ClientOptionsProto.precipitations_options', index=7,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='capture_options', full_name='ClientOptionsProto.capture_options', index=8,
number=9, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='show_2d_maps_icon', full_name='ClientOptionsProto.show_2d_maps_icon', index=9,
number=10, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=True,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='disable_internal_browser', full_name='ClientOptionsProto.disable_internal_browser', index=10,
number=11, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='internal_browser_blacklist', full_name='ClientOptionsProto.internal_browser_blacklist', index=11,
number=12, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='internal_browser_origin_whitelist', full_name='ClientOptionsProto.internal_browser_origin_whitelist', index=12,
number=13, type=9, cpp_type=9, label=1,
has_default_value=True, default_value=unicode("*", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='polar_tile_merging_level', full_name='ClientOptionsProto.polar_tile_merging_level', index=13,
number=14, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='js_bridge_request_whitelist', full_name='ClientOptionsProto.js_bridge_request_whitelist', index=14,
number=15, type=9, cpp_type=9, label=1,
has_default_value=True, default_value=unicode("http://*.google.com/*", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='maps_options', full_name='ClientOptionsProto.maps_options', index=15,
number=16, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_CLIENTOPTIONSPROTO_PRECIPITATIONSOPTIONS, _CLIENTOPTIONSPROTO_CAPTUREOPTIONS, _CLIENTOPTIONSPROTO_MAPSOPTIONS, ],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=3017,
serialized_end=4606,
)
_FETCHINGOPTIONSPROTO = descriptor.Descriptor(
name='FetchingOptionsProto',
full_name='FetchingOptionsProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='max_requests_per_query', full_name='FetchingOptionsProto.max_requests_per_query', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='force_max_requests_per_query', full_name='FetchingOptionsProto.force_max_requests_per_query', index=1,
number=12, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='sort_batches', full_name='FetchingOptionsProto.sort_batches', index=2,
number=13, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='max_drawable', full_name='FetchingOptionsProto.max_drawable', index=3,
number=2, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=2,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='max_imagery', full_name='FetchingOptionsProto.max_imagery', index=4,
number=3, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=2,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='max_terrain', full_name='FetchingOptionsProto.max_terrain', index=5,
number=4, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=5,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='max_quadtree', full_name='FetchingOptionsProto.max_quadtree', index=6,
number=5, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=5,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='max_diorama_metadata', full_name='FetchingOptionsProto.max_diorama_metadata', index=7,
number=6, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='max_diorama_data', full_name='FetchingOptionsProto.max_diorama_data', index=8,
number=7, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='max_consumer_fetch_ratio', full_name='FetchingOptionsProto.max_consumer_fetch_ratio', index=9,
number=8, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='max_pro_ec_fetch_ratio', full_name='FetchingOptionsProto.max_pro_ec_fetch_ratio', index=10,
number=9, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='safe_overall_qps', full_name='FetchingOptionsProto.safe_overall_qps', index=11,
number=10, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='safe_imagery_qps', full_name='FetchingOptionsProto.safe_imagery_qps', index=12,
number=11, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='domains_for_https', full_name='FetchingOptionsProto.domains_for_https', index=13,
number=14, type=9, cpp_type=9, label=1,
has_default_value=True, default_value=unicode("google.com gstatic.com", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='hosts_for_http', full_name='FetchingOptionsProto.hosts_for_http', index=14,
number=15, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=4609,
serialized_end=5085,
)
_TIMEMACHINEOPTIONSPROTO = descriptor.Descriptor(
name='TimeMachineOptionsProto',
full_name='TimeMachineOptionsProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='server_url', full_name='TimeMachineOptionsProto.server_url', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='is_timemachine', full_name='TimeMachineOptionsProto.is_timemachine', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='dwell_time_ms', full_name='TimeMachineOptionsProto.dwell_time_ms', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=500,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='discoverability_altitude_meters', full_name='TimeMachineOptionsProto.discoverability_altitude_meters', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=15000,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=5088,
serialized_end=5233,
)
_AUTOPIAOPTIONSPROTO = descriptor.Descriptor(
name='AutopiaOptionsProto',
full_name='AutopiaOptionsProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='metadata_server_url', full_name='AutopiaOptionsProto.metadata_server_url', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=True, default_value=unicode("http://cbk0.google.com/cbk", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='depthmap_server_url', full_name='AutopiaOptionsProto.depthmap_server_url', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=True, default_value=unicode("http://cbk0.google.com/cbk", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='coverage_overlay_url', full_name='AutopiaOptionsProto.coverage_overlay_url', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=True, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='max_imagery_qps', full_name='AutopiaOptionsProto.max_imagery_qps', index=3,
number=4, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='max_metadata_depthmap_qps', full_name='AutopiaOptionsProto.max_metadata_depthmap_qps', index=4,
number=5, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=5236,
serialized_end=5463,
)
_CSIOPTIONSPROTO = descriptor.Descriptor(
name='CSIOptionsProto',
full_name='CSIOptionsProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='sampling_percentage', full_name='CSIOptionsProto.sampling_percentage', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='experiment_id', full_name='CSIOptionsProto.experiment_id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=5465,
serialized_end=5534,
)
_SEARCHTABPROTO_INPUTBOXINFO = descriptor.Descriptor(
name='InputBoxInfo',
full_name='SearchTabProto.InputBoxInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='label', full_name='SearchTabProto.InputBoxInfo.label', index=0,
number=1, type=11, cpp_type=10, label=2,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='query_verb', full_name='SearchTabProto.InputBoxInfo.query_verb', index=1,
number=2, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='query_prepend', full_name='SearchTabProto.InputBoxInfo.query_prepend', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=5749,
serialized_end=5844,
)
_SEARCHTABPROTO = descriptor.Descriptor(
name='SearchTabProto',
full_name='SearchTabProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='is_visible', full_name='SearchTabProto.is_visible', index=0,
number=1, type=8, cpp_type=7, label=2,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='tab_label', full_name='SearchTabProto.tab_label', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='base_url', full_name='SearchTabProto.base_url', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='viewport_prefix', full_name='SearchTabProto.viewport_prefix', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='input_box', full_name='SearchTabProto.input_box', index=4,
number=5, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='requirement', full_name='SearchTabProto.requirement', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_SEARCHTABPROTO_INPUTBOXINFO, ],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=5537,
serialized_end=5844,
)
_COBRANDPROTO_COORD = descriptor.Descriptor(
name='Coord',
full_name='CobrandProto.Coord',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='value', full_name='CobrandProto.Coord.value', index=0,
number=1, type=1, cpp_type=5, label=2,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='is_relative', full_name='CobrandProto.Coord.is_relative', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=6037,
serialized_end=6090,
)
_COBRANDPROTO = descriptor.Descriptor(
name='CobrandProto',
full_name='CobrandProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='logo_url', full_name='CobrandProto.logo_url', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='x_coord', full_name='CobrandProto.x_coord', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='y_coord', full_name='CobrandProto.y_coord', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='tie_point', full_name='CobrandProto.tie_point', index=3,
number=4, type=14, cpp_type=8, label=1,
has_default_value=True, default_value=6,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='screen_size', full_name='CobrandProto.screen_size', index=4,
number=5, type=1, cpp_type=5, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_COBRANDPROTO_COORD, ],
enum_types=[
_COBRANDPROTO_TIEPOINT,
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=5847,
serialized_end=6247,
)
_DATABASEDESCRIPTIONPROTO = descriptor.Descriptor(
name='DatabaseDescriptionProto',
full_name='DatabaseDescriptionProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='database_name', full_name='DatabaseDescriptionProto.database_name', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='database_url', full_name='DatabaseDescriptionProto.database_url', index=1,
number=2, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=6249,
serialized_end=6343,
)
_CONFIGSCRIPTPROTO = descriptor.Descriptor(
name='ConfigScriptProto',
full_name='ConfigScriptProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='script_name', full_name='ConfigScriptProto.script_name', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='script_data', full_name='ConfigScriptProto.script_data', index=1,
number=2, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=6345,
serialized_end=6406,
)
_SWOOPPARAMSPROTO = descriptor.Descriptor(
name='SwoopParamsProto',
full_name='SwoopParamsProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='start_dist_in_meters', full_name='SwoopParamsProto.start_dist_in_meters', index=0,
number=1, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=6408,
serialized_end=6456,
)
_POSTINGSERVERPROTO = descriptor.Descriptor(
name='PostingServerProto',
full_name='PostingServerProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='name', full_name='PostingServerProto.name', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='base_url', full_name='PostingServerProto.base_url', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='post_wizard_path', full_name='PostingServerProto.post_wizard_path', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='file_submit_path', full_name='PostingServerProto.file_submit_path', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=6459,
serialized_end=6655,
)
_PLANETARYDATABASEPROTO = descriptor.Descriptor(
name='PlanetaryDatabaseProto',
full_name='PlanetaryDatabaseProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='url', full_name='PlanetaryDatabaseProto.url', index=0,
number=1, type=11, cpp_type=10, label=2,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='name', full_name='PlanetaryDatabaseProto.name', index=1,
number=2, type=11, cpp_type=10, label=2,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=6657,
serialized_end=6754,
)
_LOGSERVERPROTO = descriptor.Descriptor(
name='LogServerProto',
full_name='LogServerProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='url', full_name='LogServerProto.url', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='enable', full_name='LogServerProto.enable', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='throttling_factor', full_name='LogServerProto.throttling_factor', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=6756,
serialized_end=6854,
)
_ENDSNIPPETPROTO_SEARCHCONFIGPROTO_SEARCHSERVER_SUPPLEMENTALUI = descriptor.Descriptor(
name='SupplementalUi',
full_name='EndSnippetProto.SearchConfigProto.SearchServer.SupplementalUi',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='url', full_name='EndSnippetProto.SearchConfigProto.SearchServer.SupplementalUi.url', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='label', full_name='EndSnippetProto.SearchConfigProto.SearchServer.SupplementalUi.label', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='height', full_name='EndSnippetProto.SearchConfigProto.SearchServer.SupplementalUi.height', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=160,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=11088,
serialized_end=11199,
)
_ENDSNIPPETPROTO_SEARCHCONFIGPROTO_SEARCHSERVER_SEARCHLETPROTO = descriptor.Descriptor(
name='SearchletProto',
full_name='EndSnippetProto.SearchConfigProto.SearchServer.SearchletProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='url', full_name='EndSnippetProto.SearchConfigProto.SearchServer.SearchletProto.url', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='name', full_name='EndSnippetProto.SearchConfigProto.SearchServer.SearchletProto.name', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='requirements', full_name='EndSnippetProto.SearchConfigProto.SearchServer.SearchletProto.requirements', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=11202,
serialized_end=11332,
)
_ENDSNIPPETPROTO_SEARCHCONFIGPROTO_SEARCHSERVER = descriptor.Descriptor(
name='SearchServer',
full_name='EndSnippetProto.SearchConfigProto.SearchServer',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='name', full_name='EndSnippetProto.SearchConfigProto.SearchServer.name', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='url', full_name='EndSnippetProto.SearchConfigProto.SearchServer.url', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='type', full_name='EndSnippetProto.SearchConfigProto.SearchServer.type', index=2,
number=3, type=14, cpp_type=8, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='html_transform_url', full_name='EndSnippetProto.SearchConfigProto.SearchServer.html_transform_url', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='kml_transform_url', full_name='EndSnippetProto.SearchConfigProto.SearchServer.kml_transform_url', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='supplemental_ui', full_name='EndSnippetProto.SearchConfigProto.SearchServer.supplemental_ui', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='suggestion', full_name='EndSnippetProto.SearchConfigProto.SearchServer.suggestion', index=6,
number=9, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='searchlet', full_name='EndSnippetProto.SearchConfigProto.SearchServer.searchlet', index=7,
number=7, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='requirements', full_name='EndSnippetProto.SearchConfigProto.SearchServer.requirements', index=8,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='suggest_server', full_name='EndSnippetProto.SearchConfigProto.SearchServer.suggest_server', index=9,
number=10, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_ENDSNIPPETPROTO_SEARCHCONFIGPROTO_SEARCHSERVER_SUPPLEMENTALUI, _ENDSNIPPETPROTO_SEARCHCONFIGPROTO_SEARCHSERVER_SEARCHLETPROTO, ],
enum_types=[
_ENDSNIPPETPROTO_SEARCHCONFIGPROTO_SEARCHSERVER_RESULTTYPE,
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=10504,
serialized_end=11388,
)
_ENDSNIPPETPROTO_SEARCHCONFIGPROTO_ONEBOXSERVICEPROTO = descriptor.Descriptor(
name='OneboxServiceProto',
full_name='EndSnippetProto.SearchConfigProto.OneboxServiceProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='service_url', full_name='EndSnippetProto.SearchConfigProto.OneboxServiceProto.service_url', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='requirements', full_name='EndSnippetProto.SearchConfigProto.OneboxServiceProto.requirements', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=11390,
serialized_end=11495,
)
_ENDSNIPPETPROTO_SEARCHCONFIGPROTO = descriptor.Descriptor(
name='SearchConfigProto',
full_name='EndSnippetProto.SearchConfigProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='search_server', full_name='EndSnippetProto.SearchConfigProto.search_server', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='onebox_service', full_name='EndSnippetProto.SearchConfigProto.onebox_service', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='kml_search_url', full_name='EndSnippetProto.SearchConfigProto.kml_search_url', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='kml_render_url', full_name='EndSnippetProto.SearchConfigProto.kml_render_url', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='search_history_url', full_name='EndSnippetProto.SearchConfigProto.search_history_url', index=4,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='error_page_url', full_name='EndSnippetProto.SearchConfigProto.error_page_url', index=5,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_ENDSNIPPETPROTO_SEARCHCONFIGPROTO_SEARCHSERVER, _ENDSNIPPETPROTO_SEARCHCONFIGPROTO_ONEBOXSERVICEPROTO, ],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=10139,
serialized_end=11495,
)
_ENDSNIPPETPROTO_SEARCHINFOPROTO = descriptor.Descriptor(
name='SearchInfoProto',
full_name='EndSnippetProto.SearchInfoProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='default_url', full_name='EndSnippetProto.SearchInfoProto.default_url', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=True, default_value=unicode("http://maps.google.com/maps", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='geocode_param', full_name='EndSnippetProto.SearchInfoProto.geocode_param', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=True, default_value=unicode("q", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=11497,
serialized_end=11590,
)
_ENDSNIPPETPROTO_ROCKTREEDATAPROTO = descriptor.Descriptor(
name='RockTreeDataProto',
full_name='EndSnippetProto.RockTreeDataProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='url', full_name='EndSnippetProto.RockTreeDataProto.url', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=11592,
serialized_end=11647,
)
_ENDSNIPPETPROTO_FILMSTRIPCONFIGPROTO_ALLEYCATIMAGERYTYPEPROTO = descriptor.Descriptor(
name='AlleycatImageryTypeProto',
full_name='EndSnippetProto.FilmstripConfigProto.AlleycatImageryTypeProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='imagery_type_id', full_name='EndSnippetProto.FilmstripConfigProto.AlleycatImageryTypeProto.imagery_type_id', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='imagery_type_label', full_name='EndSnippetProto.FilmstripConfigProto.AlleycatImageryTypeProto.imagery_type_label', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='metadata_url_template', full_name='EndSnippetProto.FilmstripConfigProto.AlleycatImageryTypeProto.metadata_url_template', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='thumbnail_url_template', full_name='EndSnippetProto.FilmstripConfigProto.AlleycatImageryTypeProto.thumbnail_url_template', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='kml_url_template', full_name='EndSnippetProto.FilmstripConfigProto.AlleycatImageryTypeProto.kml_url_template', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=12198,
serialized_end=12435,
)
_ENDSNIPPETPROTO_FILMSTRIPCONFIGPROTO = descriptor.Descriptor(
name='FilmstripConfigProto',
full_name='EndSnippetProto.FilmstripConfigProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='requirements', full_name='EndSnippetProto.FilmstripConfigProto.requirements', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='alleycat_url_template', full_name='EndSnippetProto.FilmstripConfigProto.alleycat_url_template', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='fallback_alleycat_url_template', full_name='EndSnippetProto.FilmstripConfigProto.fallback_alleycat_url_template', index=2,
number=9, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='metadata_url_template', full_name='EndSnippetProto.FilmstripConfigProto.metadata_url_template', index=3,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='thumbnail_url_template', full_name='EndSnippetProto.FilmstripConfigProto.thumbnail_url_template', index=4,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='kml_url_template', full_name='EndSnippetProto.FilmstripConfigProto.kml_url_template', index=5,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='featured_tours_url', full_name='EndSnippetProto.FilmstripConfigProto.featured_tours_url', index=6,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='enable_viewport_fallback', full_name='EndSnippetProto.FilmstripConfigProto.enable_viewport_fallback', index=7,
number=7, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='viewport_fallback_distance', full_name='EndSnippetProto.FilmstripConfigProto.viewport_fallback_distance', index=8,
number=8, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='imagery_type', full_name='EndSnippetProto.FilmstripConfigProto.imagery_type', index=9,
number=10, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_ENDSNIPPETPROTO_FILMSTRIPCONFIGPROTO_ALLEYCATIMAGERYTYPEPROTO, ],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=11650,
serialized_end=12435,
)
_ENDSNIPPETPROTO_STARDATAPROTO = descriptor.Descriptor(
name='StarDataProto',
full_name='EndSnippetProto.StarDataProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='url', full_name='EndSnippetProto.StarDataProto.url', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=12437,
serialized_end=12488,
)
_ENDSNIPPETPROTO = descriptor.Descriptor(
name='EndSnippetProto',
full_name='EndSnippetProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='model', full_name='EndSnippetProto.model', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='auth_server_url', full_name='EndSnippetProto.auth_server_url', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='disable_authentication', full_name='EndSnippetProto.disable_authentication', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='mfe_domains', full_name='EndSnippetProto.mfe_domains', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='mfe_lang_param', full_name='EndSnippetProto.mfe_lang_param', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=True, default_value=unicode("hl=$[hl]", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='ads_url_patterns', full_name='EndSnippetProto.ads_url_patterns', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='reverse_geocoder_url', full_name='EndSnippetProto.reverse_geocoder_url', index=6,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='reverse_geocoder_protocol_version', full_name='EndSnippetProto.reverse_geocoder_protocol_version', index=7,
number=8, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=3,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='sky_database_is_available', full_name='EndSnippetProto.sky_database_is_available', index=8,
number=9, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=True,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='sky_database_url', full_name='EndSnippetProto.sky_database_url', index=9,
number=10, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='default_web_page_intl_url', full_name='EndSnippetProto.default_web_page_intl_url', index=10,
number=11, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='num_start_up_tips', full_name='EndSnippetProto.num_start_up_tips', index=11,
number=12, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=17,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='start_up_tips_url', full_name='EndSnippetProto.start_up_tips_url', index=12,
number=13, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='num_pro_start_up_tips', full_name='EndSnippetProto.num_pro_start_up_tips', index=13,
number=51, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='pro_start_up_tips_url', full_name='EndSnippetProto.pro_start_up_tips_url', index=14,
number=52, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='startup_tips_intl_url', full_name='EndSnippetProto.startup_tips_intl_url', index=15,
number=64, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='user_guide_intl_url', full_name='EndSnippetProto.user_guide_intl_url', index=16,
number=14, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='support_center_intl_url', full_name='EndSnippetProto.support_center_intl_url', index=17,
number=15, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='business_listing_intl_url', full_name='EndSnippetProto.business_listing_intl_url', index=18,
number=16, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='support_answer_intl_url', full_name='EndSnippetProto.support_answer_intl_url', index=19,
number=17, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='support_topic_intl_url', full_name='EndSnippetProto.support_topic_intl_url', index=20,
number=18, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='support_request_intl_url', full_name='EndSnippetProto.support_request_intl_url', index=21,
number=19, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='earth_intl_url', full_name='EndSnippetProto.earth_intl_url', index=22,
number=20, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='add_content_url', full_name='EndSnippetProto.add_content_url', index=23,
number=21, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='sketchup_not_installed_url', full_name='EndSnippetProto.sketchup_not_installed_url', index=24,
number=22, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='sketchup_error_url', full_name='EndSnippetProto.sketchup_error_url', index=25,
number=23, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='free_license_url', full_name='EndSnippetProto.free_license_url', index=26,
number=24, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='pro_license_url', full_name='EndSnippetProto.pro_license_url', index=27,
number=25, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='tutorial_url', full_name='EndSnippetProto.tutorial_url', index=28,
number=48, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='keyboard_shortcuts_url', full_name='EndSnippetProto.keyboard_shortcuts_url', index=29,
number=49, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='release_notes_url', full_name='EndSnippetProto.release_notes_url', index=30,
number=50, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='hide_user_data', full_name='EndSnippetProto.hide_user_data', index=31,
number=26, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='use_ge_logo', full_name='EndSnippetProto.use_ge_logo', index=32,
number=27, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=True,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='diorama_description_url_base', full_name='EndSnippetProto.diorama_description_url_base', index=33,
number=28, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='diorama_default_color', full_name='EndSnippetProto.diorama_default_color', index=34,
number=29, type=13, cpp_type=3, label=1,
has_default_value=True, default_value=4291281607,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='diorama_blacklist_url', full_name='EndSnippetProto.diorama_blacklist_url', index=35,
number=53, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='client_options', full_name='EndSnippetProto.client_options', index=36,
number=30, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='fetching_options', full_name='EndSnippetProto.fetching_options', index=37,
number=31, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='time_machine_options', full_name='EndSnippetProto.time_machine_options', index=38,
number=32, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='csi_options', full_name='EndSnippetProto.csi_options', index=39,
number=33, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='search_tab', full_name='EndSnippetProto.search_tab', index=40,
number=34, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='cobrand_info', full_name='EndSnippetProto.cobrand_info', index=41,
number=35, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='valid_database', full_name='EndSnippetProto.valid_database', index=42,
number=36, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='config_script', full_name='EndSnippetProto.config_script', index=43,
number=37, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='deauth_server_url', full_name='EndSnippetProto.deauth_server_url', index=44,
number=38, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='swoop_parameters', full_name='EndSnippetProto.swoop_parameters', index=45,
number=39, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='bbs_server_info', full_name='EndSnippetProto.bbs_server_info', index=46,
number=40, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='data_error_server_info', full_name='EndSnippetProto.data_error_server_info', index=47,
number=41, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='planetary_database', full_name='EndSnippetProto.planetary_database', index=48,
number=42, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='log_server', full_name='EndSnippetProto.log_server', index=49,
number=43, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='autopia_options', full_name='EndSnippetProto.autopia_options', index=50,
number=44, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='search_config', full_name='EndSnippetProto.search_config', index=51,
number=54, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='search_info', full_name='EndSnippetProto.search_info', index=52,
number=45, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='elevation_service_base_url', full_name='EndSnippetProto.elevation_service_base_url', index=53,
number=46, type=9, cpp_type=9, label=1,
has_default_value=True, default_value=unicode("http://maps.google.com/maps/api/elevation/", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='elevation_profile_query_delay', full_name='EndSnippetProto.elevation_profile_query_delay', index=54,
number=47, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=500,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='pro_upgrade_url', full_name='EndSnippetProto.pro_upgrade_url', index=55,
number=55, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='earth_community_url', full_name='EndSnippetProto.earth_community_url', index=56,
number=56, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='google_maps_url', full_name='EndSnippetProto.google_maps_url', index=57,
number=57, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='sharing_url', full_name='EndSnippetProto.sharing_url', index=58,
number=58, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='privacy_policy_url', full_name='EndSnippetProto.privacy_policy_url', index=59,
number=59, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='do_gplus_user_check', full_name='EndSnippetProto.do_gplus_user_check', index=60,
number=60, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='rocktree_data_proto', full_name='EndSnippetProto.rocktree_data_proto', index=61,
number=61, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='filmstrip_config', full_name='EndSnippetProto.filmstrip_config', index=62,
number=62, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='show_signin_button', full_name='EndSnippetProto.show_signin_button', index=63,
number=63, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='pro_measure_upsell_url', full_name='EndSnippetProto.pro_measure_upsell_url', index=64,
number=65, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='pro_print_upsell_url', full_name='EndSnippetProto.pro_print_upsell_url', index=65,
number=66, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='star_data_proto', full_name='EndSnippetProto.star_data_proto', index=66,
number=67, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='feedback_url', full_name='EndSnippetProto.feedback_url', index=67,
number=68, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_ENDSNIPPETPROTO_SEARCHCONFIGPROTO, _ENDSNIPPETPROTO_SEARCHINFOPROTO, _ENDSNIPPETPROTO_ROCKTREEDATAPROTO, _ENDSNIPPETPROTO_FILMSTRIPCONFIGPROTO, _ENDSNIPPETPROTO_STARDATAPROTO, ],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=6857,
serialized_end=12488,
)
_DBROOTREFPROTO = descriptor.Descriptor(
name='DbRootRefProto',
full_name='DbRootRefProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='url', full_name='DbRootRefProto.url', index=0,
number=2, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='is_critical', full_name='DbRootRefProto.is_critical', index=1,
number=1, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='requirements', full_name='DbRootRefProto.requirements', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=12490,
serialized_end=12588,
)
_DATABASEVERSIONPROTO = descriptor.Descriptor(
name='DatabaseVersionProto',
full_name='DatabaseVersionProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='quadtree_version', full_name='DatabaseVersionProto.quadtree_version', index=0,
number=1, type=13, cpp_type=3, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=12590,
serialized_end=12638,
)
_DBROOTPROTO = descriptor.Descriptor(
name='DbRootProto',
full_name='DbRootProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='database_name', full_name='DbRootProto.database_name', index=0,
number=15, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='imagery_present', full_name='DbRootProto.imagery_present', index=1,
number=1, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=True,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='proto_imagery', full_name='DbRootProto.proto_imagery', index=2,
number=14, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='terrain_present', full_name='DbRootProto.terrain_present', index=3,
number=2, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='provider_info', full_name='DbRootProto.provider_info', index=4,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='nested_feature', full_name='DbRootProto.nested_feature', index=5,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='style_attribute', full_name='DbRootProto.style_attribute', index=6,
number=5, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='style_map', full_name='DbRootProto.style_map', index=7,
number=6, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='end_snippet', full_name='DbRootProto.end_snippet', index=8,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='translation_entry', full_name='DbRootProto.translation_entry', index=9,
number=8, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='language', full_name='DbRootProto.language', index=10,
number=9, type=9, cpp_type=9, label=1,
has_default_value=True, default_value=unicode("en", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='version', full_name='DbRootProto.version', index=11,
number=10, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=5,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='dbroot_reference', full_name='DbRootProto.dbroot_reference', index=12,
number=11, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='database_version', full_name='DbRootProto.database_version', index=13,
number=13, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='refresh_timeout', full_name='DbRootProto.refresh_timeout', index=14,
number=16, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=12641,
serialized_end=13207,
)
_ENCRYPTEDDBROOTPROTO = descriptor.Descriptor(
name='EncryptedDbRootProto',
full_name='EncryptedDbRootProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='encryption_type', full_name='EncryptedDbRootProto.encryption_type', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='encryption_data', full_name='EncryptedDbRootProto.encryption_data', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value="",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='dbroot_data', full_name='EncryptedDbRootProto.dbroot_data', index=2,
number=3, type=12, cpp_type=9, label=1,
has_default_value=False, default_value="",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_ENCRYPTEDDBROOTPROTO_ENCRYPTIONTYPE,
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=13210,
serialized_end=13379,
)
_PROVIDERINFOPROTO.fields_by_name['copyright_string'].message_type = _STRINGIDORVALUEPROTO
_POPUPPROTO.fields_by_name['text'].message_type = _STRINGIDORVALUEPROTO
_STYLEATTRIBUTEPROTO.fields_by_name['placemark_icon_path'].message_type = _STRINGIDORVALUEPROTO
_STYLEATTRIBUTEPROTO.fields_by_name['pop_up'].message_type = _POPUPPROTO
_STYLEATTRIBUTEPROTO.fields_by_name['draw_flag'].message_type = _DRAWFLAGPROTO
_DRAWFLAGPROTO.fields_by_name['draw_flag_type'].enum_type = _DRAWFLAGPROTO_DRAWFLAGTYPE
_DRAWFLAGPROTO_DRAWFLAGTYPE.containing_type = _DRAWFLAGPROTO;
_LAYERPROTO.fields_by_name['zoom_range'].message_type = _ZOOMRANGEPROTO
_NESTEDFEATUREPROTO.fields_by_name['feature_type'].enum_type = _NESTEDFEATUREPROTO_FEATURETYPE
_NESTEDFEATUREPROTO.fields_by_name['kml_url'].message_type = _STRINGIDORVALUEPROTO
_NESTEDFEATUREPROTO.fields_by_name['layer'].message_type = _LAYERPROTO
_NESTEDFEATUREPROTO.fields_by_name['folder'].message_type = _FOLDERPROTO
_NESTEDFEATUREPROTO.fields_by_name['requirement'].message_type = _REQUIREMENTPROTO
_NESTEDFEATUREPROTO.fields_by_name['display_name'].message_type = _STRINGIDORVALUEPROTO
_NESTEDFEATUREPROTO.fields_by_name['description'].message_type = _STRINGIDORVALUEPROTO
_NESTEDFEATUREPROTO.fields_by_name['look_at'].message_type = _LOOKATPROTO
_NESTEDFEATUREPROTO.fields_by_name['children'].message_type = _NESTEDFEATUREPROTO
_NESTEDFEATUREPROTO_FEATURETYPE.containing_type = _NESTEDFEATUREPROTO;
_MFEDOMAINFEATURESPROTO.fields_by_name['supported_features'].enum_type = _MFEDOMAINFEATURESPROTO_SUPPORTEDFEATURE
_MFEDOMAINFEATURESPROTO_SUPPORTEDFEATURE.containing_type = _MFEDOMAINFEATURESPROTO;
_CLIENTOPTIONSPROTO_PRECIPITATIONSOPTIONS_WEATHERMAPPING.fields_by_name['weather_type'].enum_type = _CLIENTOPTIONSPROTO_PRECIPITATIONSOPTIONS_WEATHERMAPPING_WEATHERTYPE
_CLIENTOPTIONSPROTO_PRECIPITATIONSOPTIONS_WEATHERMAPPING.containing_type = _CLIENTOPTIONSPROTO_PRECIPITATIONSOPTIONS;
_CLIENTOPTIONSPROTO_PRECIPITATIONSOPTIONS_WEATHERMAPPING_WEATHERTYPE.containing_type = _CLIENTOPTIONSPROTO_PRECIPITATIONSOPTIONS_WEATHERMAPPING;
_CLIENTOPTIONSPROTO_PRECIPITATIONSOPTIONS.fields_by_name['weather_mapping'].message_type = _CLIENTOPTIONSPROTO_PRECIPITATIONSOPTIONS_WEATHERMAPPING
_CLIENTOPTIONSPROTO_PRECIPITATIONSOPTIONS.containing_type = _CLIENTOPTIONSPROTO;
_CLIENTOPTIONSPROTO_CAPTUREOPTIONS.containing_type = _CLIENTOPTIONSPROTO;
_CLIENTOPTIONSPROTO_MAPSOPTIONS.containing_type = _CLIENTOPTIONSPROTO;
_CLIENTOPTIONSPROTO.fields_by_name['precipitations_options'].message_type = _CLIENTOPTIONSPROTO_PRECIPITATIONSOPTIONS
_CLIENTOPTIONSPROTO.fields_by_name['capture_options'].message_type = _CLIENTOPTIONSPROTO_CAPTUREOPTIONS
_CLIENTOPTIONSPROTO.fields_by_name['maps_options'].message_type = _CLIENTOPTIONSPROTO_MAPSOPTIONS
_SEARCHTABPROTO_INPUTBOXINFO.fields_by_name['label'].message_type = _STRINGIDORVALUEPROTO
_SEARCHTABPROTO_INPUTBOXINFO.containing_type = _SEARCHTABPROTO;
_SEARCHTABPROTO.fields_by_name['tab_label'].message_type = _STRINGIDORVALUEPROTO
_SEARCHTABPROTO.fields_by_name['input_box'].message_type = _SEARCHTABPROTO_INPUTBOXINFO
_SEARCHTABPROTO.fields_by_name['requirement'].message_type = _REQUIREMENTPROTO
_COBRANDPROTO_COORD.containing_type = _COBRANDPROTO;
_COBRANDPROTO.fields_by_name['x_coord'].message_type = _COBRANDPROTO_COORD
_COBRANDPROTO.fields_by_name['y_coord'].message_type = _COBRANDPROTO_COORD
_COBRANDPROTO.fields_by_name['tie_point'].enum_type = _COBRANDPROTO_TIEPOINT
_COBRANDPROTO_TIEPOINT.containing_type = _COBRANDPROTO;
_DATABASEDESCRIPTIONPROTO.fields_by_name['database_name'].message_type = _STRINGIDORVALUEPROTO
_POSTINGSERVERPROTO.fields_by_name['name'].message_type = _STRINGIDORVALUEPROTO
_POSTINGSERVERPROTO.fields_by_name['base_url'].message_type = _STRINGIDORVALUEPROTO
_POSTINGSERVERPROTO.fields_by_name['post_wizard_path'].message_type = _STRINGIDORVALUEPROTO
_POSTINGSERVERPROTO.fields_by_name['file_submit_path'].message_type = _STRINGIDORVALUEPROTO
_PLANETARYDATABASEPROTO.fields_by_name['url'].message_type = _STRINGIDORVALUEPROTO
_PLANETARYDATABASEPROTO.fields_by_name['name'].message_type = _STRINGIDORVALUEPROTO
_LOGSERVERPROTO.fields_by_name['url'].message_type = _STRINGIDORVALUEPROTO
_ENDSNIPPETPROTO_SEARCHCONFIGPROTO_SEARCHSERVER_SUPPLEMENTALUI.fields_by_name['url'].message_type = _STRINGIDORVALUEPROTO
_ENDSNIPPETPROTO_SEARCHCONFIGPROTO_SEARCHSERVER_SUPPLEMENTALUI.fields_by_name['label'].message_type = _STRINGIDORVALUEPROTO
_ENDSNIPPETPROTO_SEARCHCONFIGPROTO_SEARCHSERVER_SUPPLEMENTALUI.containing_type = _ENDSNIPPETPROTO_SEARCHCONFIGPROTO_SEARCHSERVER;
_ENDSNIPPETPROTO_SEARCHCONFIGPROTO_SEARCHSERVER_SEARCHLETPROTO.fields_by_name['url'].message_type = _STRINGIDORVALUEPROTO
_ENDSNIPPETPROTO_SEARCHCONFIGPROTO_SEARCHSERVER_SEARCHLETPROTO.fields_by_name['name'].message_type = _STRINGIDORVALUEPROTO
_ENDSNIPPETPROTO_SEARCHCONFIGPROTO_SEARCHSERVER_SEARCHLETPROTO.fields_by_name['requirements'].message_type = _REQUIREMENTPROTO
_ENDSNIPPETPROTO_SEARCHCONFIGPROTO_SEARCHSERVER_SEARCHLETPROTO.containing_type = _ENDSNIPPETPROTO_SEARCHCONFIGPROTO_SEARCHSERVER;
_ENDSNIPPETPROTO_SEARCHCONFIGPROTO_SEARCHSERVER.fields_by_name['name'].message_type = _STRINGIDORVALUEPROTO
_ENDSNIPPETPROTO_SEARCHCONFIGPROTO_SEARCHSERVER.fields_by_name['url'].message_type = _STRINGIDORVALUEPROTO
_ENDSNIPPETPROTO_SEARCHCONFIGPROTO_SEARCHSERVER.fields_by_name['type'].enum_type = _ENDSNIPPETPROTO_SEARCHCONFIGPROTO_SEARCHSERVER_RESULTTYPE
_ENDSNIPPETPROTO_SEARCHCONFIGPROTO_SEARCHSERVER.fields_by_name['html_transform_url'].message_type = _STRINGIDORVALUEPROTO
_ENDSNIPPETPROTO_SEARCHCONFIGPROTO_SEARCHSERVER.fields_by_name['kml_transform_url'].message_type = _STRINGIDORVALUEPROTO
_ENDSNIPPETPROTO_SEARCHCONFIGPROTO_SEARCHSERVER.fields_by_name['supplemental_ui'].message_type = _ENDSNIPPETPROTO_SEARCHCONFIGPROTO_SEARCHSERVER_SUPPLEMENTALUI
_ENDSNIPPETPROTO_SEARCHCONFIGPROTO_SEARCHSERVER.fields_by_name['suggestion'].message_type = _STRINGIDORVALUEPROTO
_ENDSNIPPETPROTO_SEARCHCONFIGPROTO_SEARCHSERVER.fields_by_name['searchlet'].message_type = _ENDSNIPPETPROTO_SEARCHCONFIGPROTO_SEARCHSERVER_SEARCHLETPROTO
_ENDSNIPPETPROTO_SEARCHCONFIGPROTO_SEARCHSERVER.fields_by_name['requirements'].message_type = _REQUIREMENTPROTO
_ENDSNIPPETPROTO_SEARCHCONFIGPROTO_SEARCHSERVER.fields_by_name['suggest_server'].message_type = _STRINGIDORVALUEPROTO
_ENDSNIPPETPROTO_SEARCHCONFIGPROTO_SEARCHSERVER.containing_type = _ENDSNIPPETPROTO_SEARCHCONFIGPROTO;
_ENDSNIPPETPROTO_SEARCHCONFIGPROTO_SEARCHSERVER_RESULTTYPE.containing_type = _ENDSNIPPETPROTO_SEARCHCONFIGPROTO_SEARCHSERVER;
_ENDSNIPPETPROTO_SEARCHCONFIGPROTO_ONEBOXSERVICEPROTO.fields_by_name['service_url'].message_type = _STRINGIDORVALUEPROTO
_ENDSNIPPETPROTO_SEARCHCONFIGPROTO_ONEBOXSERVICEPROTO.fields_by_name['requirements'].message_type = _REQUIREMENTPROTO
_ENDSNIPPETPROTO_SEARCHCONFIGPROTO_ONEBOXSERVICEPROTO.containing_type = _ENDSNIPPETPROTO_SEARCHCONFIGPROTO;
_ENDSNIPPETPROTO_SEARCHCONFIGPROTO.fields_by_name['search_server'].message_type = _ENDSNIPPETPROTO_SEARCHCONFIGPROTO_SEARCHSERVER
_ENDSNIPPETPROTO_SEARCHCONFIGPROTO.fields_by_name['onebox_service'].message_type = _ENDSNIPPETPROTO_SEARCHCONFIGPROTO_ONEBOXSERVICEPROTO
_ENDSNIPPETPROTO_SEARCHCONFIGPROTO.fields_by_name['kml_search_url'].message_type = _STRINGIDORVALUEPROTO
_ENDSNIPPETPROTO_SEARCHCONFIGPROTO.fields_by_name['kml_render_url'].message_type = _STRINGIDORVALUEPROTO
_ENDSNIPPETPROTO_SEARCHCONFIGPROTO.fields_by_name['search_history_url'].message_type = _STRINGIDORVALUEPROTO
_ENDSNIPPETPROTO_SEARCHCONFIGPROTO.fields_by_name['error_page_url'].message_type = _STRINGIDORVALUEPROTO
_ENDSNIPPETPROTO_SEARCHCONFIGPROTO.containing_type = _ENDSNIPPETPROTO;
_ENDSNIPPETPROTO_SEARCHINFOPROTO.containing_type = _ENDSNIPPETPROTO;
_ENDSNIPPETPROTO_ROCKTREEDATAPROTO.fields_by_name['url'].message_type = _STRINGIDORVALUEPROTO
_ENDSNIPPETPROTO_ROCKTREEDATAPROTO.containing_type = _ENDSNIPPETPROTO;
_ENDSNIPPETPROTO_FILMSTRIPCONFIGPROTO_ALLEYCATIMAGERYTYPEPROTO.fields_by_name['metadata_url_template'].message_type = _STRINGIDORVALUEPROTO
_ENDSNIPPETPROTO_FILMSTRIPCONFIGPROTO_ALLEYCATIMAGERYTYPEPROTO.fields_by_name['thumbnail_url_template'].message_type = _STRINGIDORVALUEPROTO
_ENDSNIPPETPROTO_FILMSTRIPCONFIGPROTO_ALLEYCATIMAGERYTYPEPROTO.fields_by_name['kml_url_template'].message_type = _STRINGIDORVALUEPROTO
_ENDSNIPPETPROTO_FILMSTRIPCONFIGPROTO_ALLEYCATIMAGERYTYPEPROTO.containing_type = _ENDSNIPPETPROTO_FILMSTRIPCONFIGPROTO;
_ENDSNIPPETPROTO_FILMSTRIPCONFIGPROTO.fields_by_name['requirements'].message_type = _REQUIREMENTPROTO
_ENDSNIPPETPROTO_FILMSTRIPCONFIGPROTO.fields_by_name['alleycat_url_template'].message_type = _STRINGIDORVALUEPROTO
_ENDSNIPPETPROTO_FILMSTRIPCONFIGPROTO.fields_by_name['fallback_alleycat_url_template'].message_type = _STRINGIDORVALUEPROTO
_ENDSNIPPETPROTO_FILMSTRIPCONFIGPROTO.fields_by_name['metadata_url_template'].message_type = _STRINGIDORVALUEPROTO
_ENDSNIPPETPROTO_FILMSTRIPCONFIGPROTO.fields_by_name['thumbnail_url_template'].message_type = _STRINGIDORVALUEPROTO
_ENDSNIPPETPROTO_FILMSTRIPCONFIGPROTO.fields_by_name['kml_url_template'].message_type = _STRINGIDORVALUEPROTO
_ENDSNIPPETPROTO_FILMSTRIPCONFIGPROTO.fields_by_name['featured_tours_url'].message_type = _STRINGIDORVALUEPROTO
_ENDSNIPPETPROTO_FILMSTRIPCONFIGPROTO.fields_by_name['imagery_type'].message_type = _ENDSNIPPETPROTO_FILMSTRIPCONFIGPROTO_ALLEYCATIMAGERYTYPEPROTO
_ENDSNIPPETPROTO_FILMSTRIPCONFIGPROTO.containing_type = _ENDSNIPPETPROTO;
_ENDSNIPPETPROTO_STARDATAPROTO.fields_by_name['url'].message_type = _STRINGIDORVALUEPROTO
_ENDSNIPPETPROTO_STARDATAPROTO.containing_type = _ENDSNIPPETPROTO;
_ENDSNIPPETPROTO.fields_by_name['model'].message_type = _PLANETMODELPROTO
_ENDSNIPPETPROTO.fields_by_name['auth_server_url'].message_type = _STRINGIDORVALUEPROTO
_ENDSNIPPETPROTO.fields_by_name['mfe_domains'].message_type = _MFEDOMAINFEATURESPROTO
_ENDSNIPPETPROTO.fields_by_name['reverse_geocoder_url'].message_type = _STRINGIDORVALUEPROTO
_ENDSNIPPETPROTO.fields_by_name['sky_database_url'].message_type = _STRINGIDORVALUEPROTO
_ENDSNIPPETPROTO.fields_by_name['default_web_page_intl_url'].message_type = _STRINGIDORVALUEPROTO
_ENDSNIPPETPROTO.fields_by_name['start_up_tips_url'].message_type = _STRINGIDORVALUEPROTO
_ENDSNIPPETPROTO.fields_by_name['pro_start_up_tips_url'].message_type = _STRINGIDORVALUEPROTO
_ENDSNIPPETPROTO.fields_by_name['startup_tips_intl_url'].message_type = _STRINGIDORVALUEPROTO
_ENDSNIPPETPROTO.fields_by_name['user_guide_intl_url'].message_type = _STRINGIDORVALUEPROTO
_ENDSNIPPETPROTO.fields_by_name['support_center_intl_url'].message_type = _STRINGIDORVALUEPROTO
_ENDSNIPPETPROTO.fields_by_name['business_listing_intl_url'].message_type = _STRINGIDORVALUEPROTO
_ENDSNIPPETPROTO.fields_by_name['support_answer_intl_url'].message_type = _STRINGIDORVALUEPROTO
_ENDSNIPPETPROTO.fields_by_name['support_topic_intl_url'].message_type = _STRINGIDORVALUEPROTO
_ENDSNIPPETPROTO.fields_by_name['support_request_intl_url'].message_type = _STRINGIDORVALUEPROTO
_ENDSNIPPETPROTO.fields_by_name['earth_intl_url'].message_type = _STRINGIDORVALUEPROTO
_ENDSNIPPETPROTO.fields_by_name['add_content_url'].message_type = _STRINGIDORVALUEPROTO
_ENDSNIPPETPROTO.fields_by_name['sketchup_not_installed_url'].message_type = _STRINGIDORVALUEPROTO
_ENDSNIPPETPROTO.fields_by_name['sketchup_error_url'].message_type = _STRINGIDORVALUEPROTO
_ENDSNIPPETPROTO.fields_by_name['free_license_url'].message_type = _STRINGIDORVALUEPROTO
_ENDSNIPPETPROTO.fields_by_name['pro_license_url'].message_type = _STRINGIDORVALUEPROTO
_ENDSNIPPETPROTO.fields_by_name['tutorial_url'].message_type = _STRINGIDORVALUEPROTO
_ENDSNIPPETPROTO.fields_by_name['keyboard_shortcuts_url'].message_type = _STRINGIDORVALUEPROTO
_ENDSNIPPETPROTO.fields_by_name['release_notes_url'].message_type = _STRINGIDORVALUEPROTO
_ENDSNIPPETPROTO.fields_by_name['diorama_description_url_base'].message_type = _STRINGIDORVALUEPROTO
_ENDSNIPPETPROTO.fields_by_name['diorama_blacklist_url'].message_type = _STRINGIDORVALUEPROTO
_ENDSNIPPETPROTO.fields_by_name['client_options'].message_type = _CLIENTOPTIONSPROTO
_ENDSNIPPETPROTO.fields_by_name['fetching_options'].message_type = _FETCHINGOPTIONSPROTO
_ENDSNIPPETPROTO.fields_by_name['time_machine_options'].message_type = _TIMEMACHINEOPTIONSPROTO
_ENDSNIPPETPROTO.fields_by_name['csi_options'].message_type = _CSIOPTIONSPROTO
_ENDSNIPPETPROTO.fields_by_name['search_tab'].message_type = _SEARCHTABPROTO
_ENDSNIPPETPROTO.fields_by_name['cobrand_info'].message_type = _COBRANDPROTO
_ENDSNIPPETPROTO.fields_by_name['valid_database'].message_type = _DATABASEDESCRIPTIONPROTO
_ENDSNIPPETPROTO.fields_by_name['config_script'].message_type = _CONFIGSCRIPTPROTO
_ENDSNIPPETPROTO.fields_by_name['deauth_server_url'].message_type = _STRINGIDORVALUEPROTO
_ENDSNIPPETPROTO.fields_by_name['swoop_parameters'].message_type = _SWOOPPARAMSPROTO
_ENDSNIPPETPROTO.fields_by_name['bbs_server_info'].message_type = _POSTINGSERVERPROTO
_ENDSNIPPETPROTO.fields_by_name['data_error_server_info'].message_type = _POSTINGSERVERPROTO
_ENDSNIPPETPROTO.fields_by_name['planetary_database'].message_type = _PLANETARYDATABASEPROTO
_ENDSNIPPETPROTO.fields_by_name['log_server'].message_type = _LOGSERVERPROTO
_ENDSNIPPETPROTO.fields_by_name['autopia_options'].message_type = _AUTOPIAOPTIONSPROTO
_ENDSNIPPETPROTO.fields_by_name['search_config'].message_type = _ENDSNIPPETPROTO_SEARCHCONFIGPROTO
_ENDSNIPPETPROTO.fields_by_name['search_info'].message_type = _ENDSNIPPETPROTO_SEARCHINFOPROTO
_ENDSNIPPETPROTO.fields_by_name['pro_upgrade_url'].message_type = _STRINGIDORVALUEPROTO
_ENDSNIPPETPROTO.fields_by_name['earth_community_url'].message_type = _STRINGIDORVALUEPROTO
_ENDSNIPPETPROTO.fields_by_name['google_maps_url'].message_type = _STRINGIDORVALUEPROTO
_ENDSNIPPETPROTO.fields_by_name['sharing_url'].message_type = _STRINGIDORVALUEPROTO
_ENDSNIPPETPROTO.fields_by_name['privacy_policy_url'].message_type = _STRINGIDORVALUEPROTO
_ENDSNIPPETPROTO.fields_by_name['rocktree_data_proto'].message_type = _ENDSNIPPETPROTO_ROCKTREEDATAPROTO
_ENDSNIPPETPROTO.fields_by_name['filmstrip_config'].message_type = _ENDSNIPPETPROTO_FILMSTRIPCONFIGPROTO
_ENDSNIPPETPROTO.fields_by_name['pro_measure_upsell_url'].message_type = _STRINGIDORVALUEPROTO
_ENDSNIPPETPROTO.fields_by_name['pro_print_upsell_url'].message_type = _STRINGIDORVALUEPROTO
_ENDSNIPPETPROTO.fields_by_name['star_data_proto'].message_type = _ENDSNIPPETPROTO_STARDATAPROTO
_ENDSNIPPETPROTO.fields_by_name['feedback_url'].message_type = _STRINGIDORVALUEPROTO
_DBROOTREFPROTO.fields_by_name['requirements'].message_type = _REQUIREMENTPROTO
_DBROOTPROTO.fields_by_name['database_name'].message_type = _STRINGIDORVALUEPROTO
_DBROOTPROTO.fields_by_name['provider_info'].message_type = _PROVIDERINFOPROTO
_DBROOTPROTO.fields_by_name['nested_feature'].message_type = _NESTEDFEATUREPROTO
_DBROOTPROTO.fields_by_name['style_attribute'].message_type = _STYLEATTRIBUTEPROTO
_DBROOTPROTO.fields_by_name['style_map'].message_type = _STYLEMAPPROTO
_DBROOTPROTO.fields_by_name['end_snippet'].message_type = _ENDSNIPPETPROTO
_DBROOTPROTO.fields_by_name['translation_entry'].message_type = _STRINGENTRYPROTO
_DBROOTPROTO.fields_by_name['dbroot_reference'].message_type = _DBROOTREFPROTO
_DBROOTPROTO.fields_by_name['database_version'].message_type = _DATABASEVERSIONPROTO
_ENCRYPTEDDBROOTPROTO.fields_by_name['encryption_type'].enum_type = _ENCRYPTEDDBROOTPROTO_ENCRYPTIONTYPE
_ENCRYPTEDDBROOTPROTO_ENCRYPTIONTYPE.containing_type = _ENCRYPTEDDBROOTPROTO;
DESCRIPTOR.message_types_by_name['StringEntryProto'] = _STRINGENTRYPROTO
DESCRIPTOR.message_types_by_name['StringIdOrValueProto'] = _STRINGIDORVALUEPROTO
DESCRIPTOR.message_types_by_name['PlanetModelProto'] = _PLANETMODELPROTO
DESCRIPTOR.message_types_by_name['ProviderInfoProto'] = _PROVIDERINFOPROTO
DESCRIPTOR.message_types_by_name['PopUpProto'] = _POPUPPROTO
DESCRIPTOR.message_types_by_name['StyleAttributeProto'] = _STYLEATTRIBUTEPROTO
DESCRIPTOR.message_types_by_name['StyleMapProto'] = _STYLEMAPPROTO
DESCRIPTOR.message_types_by_name['ZoomRangeProto'] = _ZOOMRANGEPROTO
DESCRIPTOR.message_types_by_name['DrawFlagProto'] = _DRAWFLAGPROTO
DESCRIPTOR.message_types_by_name['LayerProto'] = _LAYERPROTO
DESCRIPTOR.message_types_by_name['FolderProto'] = _FOLDERPROTO
DESCRIPTOR.message_types_by_name['RequirementProto'] = _REQUIREMENTPROTO
DESCRIPTOR.message_types_by_name['LookAtProto'] = _LOOKATPROTO
DESCRIPTOR.message_types_by_name['NestedFeatureProto'] = _NESTEDFEATUREPROTO
DESCRIPTOR.message_types_by_name['MfeDomainFeaturesProto'] = _MFEDOMAINFEATURESPROTO
DESCRIPTOR.message_types_by_name['ClientOptionsProto'] = _CLIENTOPTIONSPROTO
DESCRIPTOR.message_types_by_name['FetchingOptionsProto'] = _FETCHINGOPTIONSPROTO
DESCRIPTOR.message_types_by_name['TimeMachineOptionsProto'] = _TIMEMACHINEOPTIONSPROTO
DESCRIPTOR.message_types_by_name['AutopiaOptionsProto'] = _AUTOPIAOPTIONSPROTO
DESCRIPTOR.message_types_by_name['CSIOptionsProto'] = _CSIOPTIONSPROTO
DESCRIPTOR.message_types_by_name['SearchTabProto'] = _SEARCHTABPROTO
DESCRIPTOR.message_types_by_name['CobrandProto'] = _COBRANDPROTO
DESCRIPTOR.message_types_by_name['DatabaseDescriptionProto'] = _DATABASEDESCRIPTIONPROTO
DESCRIPTOR.message_types_by_name['ConfigScriptProto'] = _CONFIGSCRIPTPROTO
DESCRIPTOR.message_types_by_name['SwoopParamsProto'] = _SWOOPPARAMSPROTO
DESCRIPTOR.message_types_by_name['PostingServerProto'] = _POSTINGSERVERPROTO
DESCRIPTOR.message_types_by_name['PlanetaryDatabaseProto'] = _PLANETARYDATABASEPROTO
DESCRIPTOR.message_types_by_name['LogServerProto'] = _LOGSERVERPROTO
DESCRIPTOR.message_types_by_name['EndSnippetProto'] = _ENDSNIPPETPROTO
DESCRIPTOR.message_types_by_name['DbRootRefProto'] = _DBROOTREFPROTO
DESCRIPTOR.message_types_by_name['DatabaseVersionProto'] = _DATABASEVERSIONPROTO
DESCRIPTOR.message_types_by_name['DbRootProto'] = _DBROOTPROTO
DESCRIPTOR.message_types_by_name['EncryptedDbRootProto'] = _ENCRYPTEDDBROOTPROTO
class StringEntryProto(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _STRINGENTRYPROTO
# @@protoc_insertion_point(class_scope:StringEntryProto)
class StringIdOrValueProto(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _STRINGIDORVALUEPROTO
# @@protoc_insertion_point(class_scope:StringIdOrValueProto)
class PlanetModelProto(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _PLANETMODELPROTO
# @@protoc_insertion_point(class_scope:PlanetModelProto)
class ProviderInfoProto(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _PROVIDERINFOPROTO
# @@protoc_insertion_point(class_scope:ProviderInfoProto)
class PopUpProto(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _POPUPPROTO
# @@protoc_insertion_point(class_scope:PopUpProto)
class StyleAttributeProto(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _STYLEATTRIBUTEPROTO
# @@protoc_insertion_point(class_scope:StyleAttributeProto)
class StyleMapProto(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _STYLEMAPPROTO
# @@protoc_insertion_point(class_scope:StyleMapProto)
class ZoomRangeProto(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _ZOOMRANGEPROTO
# @@protoc_insertion_point(class_scope:ZoomRangeProto)
class DrawFlagProto(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _DRAWFLAGPROTO
# @@protoc_insertion_point(class_scope:DrawFlagProto)
class LayerProto(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _LAYERPROTO
# @@protoc_insertion_point(class_scope:LayerProto)
class FolderProto(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _FOLDERPROTO
# @@protoc_insertion_point(class_scope:FolderProto)
class RequirementProto(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _REQUIREMENTPROTO
# @@protoc_insertion_point(class_scope:RequirementProto)
class LookAtProto(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _LOOKATPROTO
# @@protoc_insertion_point(class_scope:LookAtProto)
class NestedFeatureProto(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _NESTEDFEATUREPROTO
# @@protoc_insertion_point(class_scope:NestedFeatureProto)
class MfeDomainFeaturesProto(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _MFEDOMAINFEATURESPROTO
# @@protoc_insertion_point(class_scope:MfeDomainFeaturesProto)
class ClientOptionsProto(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
class PrecipitationsOptions(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
class WeatherMapping(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _CLIENTOPTIONSPROTO_PRECIPITATIONSOPTIONS_WEATHERMAPPING
# @@protoc_insertion_point(class_scope:ClientOptionsProto.PrecipitationsOptions.WeatherMapping)
DESCRIPTOR = _CLIENTOPTIONSPROTO_PRECIPITATIONSOPTIONS
# @@protoc_insertion_point(class_scope:ClientOptionsProto.PrecipitationsOptions)
class CaptureOptions(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _CLIENTOPTIONSPROTO_CAPTUREOPTIONS
# @@protoc_insertion_point(class_scope:ClientOptionsProto.CaptureOptions)
class MapsOptions(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _CLIENTOPTIONSPROTO_MAPSOPTIONS
# @@protoc_insertion_point(class_scope:ClientOptionsProto.MapsOptions)
DESCRIPTOR = _CLIENTOPTIONSPROTO
# @@protoc_insertion_point(class_scope:ClientOptionsProto)
class FetchingOptionsProto(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _FETCHINGOPTIONSPROTO
# @@protoc_insertion_point(class_scope:FetchingOptionsProto)
class TimeMachineOptionsProto(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _TIMEMACHINEOPTIONSPROTO
# @@protoc_insertion_point(class_scope:TimeMachineOptionsProto)
class AutopiaOptionsProto(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _AUTOPIAOPTIONSPROTO
# @@protoc_insertion_point(class_scope:AutopiaOptionsProto)
class CSIOptionsProto(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _CSIOPTIONSPROTO
# @@protoc_insertion_point(class_scope:CSIOptionsProto)
class SearchTabProto(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
class InputBoxInfo(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _SEARCHTABPROTO_INPUTBOXINFO
# @@protoc_insertion_point(class_scope:SearchTabProto.InputBoxInfo)
DESCRIPTOR = _SEARCHTABPROTO
# @@protoc_insertion_point(class_scope:SearchTabProto)
class CobrandProto(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
class Coord(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _COBRANDPROTO_COORD
# @@protoc_insertion_point(class_scope:CobrandProto.Coord)
DESCRIPTOR = _COBRANDPROTO
# @@protoc_insertion_point(class_scope:CobrandProto)
class DatabaseDescriptionProto(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _DATABASEDESCRIPTIONPROTO
# @@protoc_insertion_point(class_scope:DatabaseDescriptionProto)
class ConfigScriptProto(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _CONFIGSCRIPTPROTO
# @@protoc_insertion_point(class_scope:ConfigScriptProto)
class SwoopParamsProto(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _SWOOPPARAMSPROTO
# @@protoc_insertion_point(class_scope:SwoopParamsProto)
class PostingServerProto(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _POSTINGSERVERPROTO
# @@protoc_insertion_point(class_scope:PostingServerProto)
class PlanetaryDatabaseProto(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _PLANETARYDATABASEPROTO
# @@protoc_insertion_point(class_scope:PlanetaryDatabaseProto)
class LogServerProto(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _LOGSERVERPROTO
# @@protoc_insertion_point(class_scope:LogServerProto)
class EndSnippetProto(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
class SearchConfigProto(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
class SearchServer(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
class SupplementalUi(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _ENDSNIPPETPROTO_SEARCHCONFIGPROTO_SEARCHSERVER_SUPPLEMENTALUI
# @@protoc_insertion_point(class_scope:EndSnippetProto.SearchConfigProto.SearchServer.SupplementalUi)
class SearchletProto(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _ENDSNIPPETPROTO_SEARCHCONFIGPROTO_SEARCHSERVER_SEARCHLETPROTO
# @@protoc_insertion_point(class_scope:EndSnippetProto.SearchConfigProto.SearchServer.SearchletProto)
DESCRIPTOR = _ENDSNIPPETPROTO_SEARCHCONFIGPROTO_SEARCHSERVER
# @@protoc_insertion_point(class_scope:EndSnippetProto.SearchConfigProto.SearchServer)
class OneboxServiceProto(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _ENDSNIPPETPROTO_SEARCHCONFIGPROTO_ONEBOXSERVICEPROTO
# @@protoc_insertion_point(class_scope:EndSnippetProto.SearchConfigProto.OneboxServiceProto)
DESCRIPTOR = _ENDSNIPPETPROTO_SEARCHCONFIGPROTO
# @@protoc_insertion_point(class_scope:EndSnippetProto.SearchConfigProto)
class SearchInfoProto(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _ENDSNIPPETPROTO_SEARCHINFOPROTO
# @@protoc_insertion_point(class_scope:EndSnippetProto.SearchInfoProto)
class RockTreeDataProto(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _ENDSNIPPETPROTO_ROCKTREEDATAPROTO
# @@protoc_insertion_point(class_scope:EndSnippetProto.RockTreeDataProto)
class FilmstripConfigProto(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
class AlleycatImageryTypeProto(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _ENDSNIPPETPROTO_FILMSTRIPCONFIGPROTO_ALLEYCATIMAGERYTYPEPROTO
# @@protoc_insertion_point(class_scope:EndSnippetProto.FilmstripConfigProto.AlleycatImageryTypeProto)
DESCRIPTOR = _ENDSNIPPETPROTO_FILMSTRIPCONFIGPROTO
# @@protoc_insertion_point(class_scope:EndSnippetProto.FilmstripConfigProto)
class StarDataProto(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _ENDSNIPPETPROTO_STARDATAPROTO
# @@protoc_insertion_point(class_scope:EndSnippetProto.StarDataProto)
DESCRIPTOR = _ENDSNIPPETPROTO
# @@protoc_insertion_point(class_scope:EndSnippetProto)
class DbRootRefProto(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _DBROOTREFPROTO
# @@protoc_insertion_point(class_scope:DbRootRefProto)
class DatabaseVersionProto(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _DATABASEVERSIONPROTO
# @@protoc_insertion_point(class_scope:DatabaseVersionProto)
class DbRootProto(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _DBROOTPROTO
# @@protoc_insertion_point(class_scope:DbRootProto)
class EncryptedDbRootProto(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _ENCRYPTEDDBROOTPROTO
# @@protoc_insertion_point(class_scope:EncryptedDbRootProto)
# @@protoc_insertion_point(module_scope)
| apache-2.0 |
ianyh/heroku-buildpack-python-opencv | vendor/.heroku/lib/python2.7/test/test_socket.py | 9 | 60282 | #!/usr/bin/env python
import unittest
from test import test_support
import errno
import socket
import select
import _testcapi
import time
import traceback
import Queue
import sys
import os
import array
import contextlib
from weakref import proxy
import signal
import math
def try_address(host, port=0, family=socket.AF_INET):
"""Try to bind a socket on the given host:port and return True
if that has been possible."""
try:
sock = socket.socket(family, socket.SOCK_STREAM)
sock.bind((host, port))
except (socket.error, socket.gaierror):
return False
else:
sock.close()
return True
HOST = test_support.HOST
MSG = b'Michael Gilfix was here\n'
SUPPORTS_IPV6 = socket.has_ipv6 and try_address('::1', family=socket.AF_INET6)
try:
import thread
import threading
except ImportError:
thread = None
threading = None
HOST = test_support.HOST
MSG = 'Michael Gilfix was here\n'
class SocketTCPTest(unittest.TestCase):
def setUp(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = test_support.bind_port(self.serv)
self.serv.listen(1)
def tearDown(self):
self.serv.close()
self.serv = None
class SocketUDPTest(unittest.TestCase):
def setUp(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.port = test_support.bind_port(self.serv)
def tearDown(self):
self.serv.close()
self.serv = None
class ThreadableTest:
"""Threadable Test class
The ThreadableTest class makes it easy to create a threaded
client/server pair from an existing unit test. To create a
new threaded class from an existing unit test, use multiple
inheritance:
class NewClass (OldClass, ThreadableTest):
pass
This class defines two new fixture functions with obvious
purposes for overriding:
clientSetUp ()
clientTearDown ()
Any new test functions within the class must then define
tests in pairs, where the test name is preceeded with a
'_' to indicate the client portion of the test. Ex:
def testFoo(self):
# Server portion
def _testFoo(self):
# Client portion
Any exceptions raised by the clients during their tests
are caught and transferred to the main thread to alert
the testing framework.
Note, the server setup function cannot call any blocking
functions that rely on the client thread during setup,
unless serverExplicitReady() is called just before
the blocking call (such as in setting up a client/server
connection and performing the accept() in setUp().
"""
def __init__(self):
# Swap the true setup function
self.__setUp = self.setUp
self.__tearDown = self.tearDown
self.setUp = self._setUp
self.tearDown = self._tearDown
def serverExplicitReady(self):
"""This method allows the server to explicitly indicate that
it wants the client thread to proceed. This is useful if the
server is about to execute a blocking routine that is
dependent upon the client thread during its setup routine."""
self.server_ready.set()
def _setUp(self):
self.server_ready = threading.Event()
self.client_ready = threading.Event()
self.done = threading.Event()
self.queue = Queue.Queue(1)
# Do some munging to start the client test.
methodname = self.id()
i = methodname.rfind('.')
methodname = methodname[i+1:]
test_method = getattr(self, '_' + methodname)
self.client_thread = thread.start_new_thread(
self.clientRun, (test_method,))
self.__setUp()
if not self.server_ready.is_set():
self.server_ready.set()
self.client_ready.wait()
def _tearDown(self):
self.__tearDown()
self.done.wait()
if not self.queue.empty():
msg = self.queue.get()
self.fail(msg)
def clientRun(self, test_func):
self.server_ready.wait()
self.clientSetUp()
self.client_ready.set()
if not callable(test_func):
raise TypeError("test_func must be a callable function.")
try:
test_func()
except Exception, strerror:
self.queue.put(strerror)
self.clientTearDown()
def clientSetUp(self):
raise NotImplementedError("clientSetUp must be implemented.")
def clientTearDown(self):
self.done.set()
thread.exit()
class ThreadedTCPSocketTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ThreadedUDPSocketTest(SocketUDPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketUDPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class SocketConnectedTest(ThreadedTCPSocketTest):
def __init__(self, methodName='runTest'):
ThreadedTCPSocketTest.__init__(self, methodName=methodName)
def setUp(self):
ThreadedTCPSocketTest.setUp(self)
# Indicate explicitly we're ready for the client thread to
# proceed and then perform the blocking call to accept
self.serverExplicitReady()
conn, addr = self.serv.accept()
self.cli_conn = conn
def tearDown(self):
self.cli_conn.close()
self.cli_conn = None
ThreadedTCPSocketTest.tearDown(self)
def clientSetUp(self):
ThreadedTCPSocketTest.clientSetUp(self)
self.cli.connect((HOST, self.port))
self.serv_conn = self.cli
def clientTearDown(self):
self.serv_conn.close()
self.serv_conn = None
ThreadedTCPSocketTest.clientTearDown(self)
class SocketPairTest(unittest.TestCase, ThreadableTest):
def __init__(self, methodName='runTest'):
unittest.TestCase.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def setUp(self):
self.serv, self.cli = socket.socketpair()
def tearDown(self):
self.serv.close()
self.serv = None
def clientSetUp(self):
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
#######################################################################
## Begin Tests
class GeneralModuleTests(unittest.TestCase):
def test_weakref(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
p = proxy(s)
self.assertEqual(p.fileno(), s.fileno())
s.close()
s = None
try:
p.fileno()
except ReferenceError:
pass
else:
self.fail('Socket proxy still exists')
def testSocketError(self):
# Testing socket module exceptions
def raise_error(*args, **kwargs):
raise socket.error
def raise_herror(*args, **kwargs):
raise socket.herror
def raise_gaierror(*args, **kwargs):
raise socket.gaierror
self.assertRaises(socket.error, raise_error,
"Error raising socket exception.")
self.assertRaises(socket.error, raise_herror,
"Error raising socket exception.")
self.assertRaises(socket.error, raise_gaierror,
"Error raising socket exception.")
def testSendtoErrors(self):
# Testing that sendto doens't masks failures. See #10169.
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.addCleanup(s.close)
s.bind(('', 0))
sockname = s.getsockname()
# 2 args
with self.assertRaises(UnicodeEncodeError):
s.sendto(u'\u2620', sockname)
with self.assertRaises(TypeError) as cm:
s.sendto(5j, sockname)
self.assertIn('not complex', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto('foo', None)
self.assertIn('not NoneType', str(cm.exception))
# 3 args
with self.assertRaises(UnicodeEncodeError):
s.sendto(u'\u2620', 0, sockname)
with self.assertRaises(TypeError) as cm:
s.sendto(5j, 0, sockname)
self.assertIn('not complex', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto('foo', 0, None)
self.assertIn('not NoneType', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto('foo', 'bar', sockname)
self.assertIn('an integer is required', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto('foo', None, None)
self.assertIn('an integer is required', str(cm.exception))
# wrong number of args
with self.assertRaises(TypeError) as cm:
s.sendto('foo')
self.assertIn('(1 given)', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto('foo', 0, sockname, 4)
self.assertIn('(4 given)', str(cm.exception))
def testCrucialConstants(self):
# Testing for mission critical constants
socket.AF_INET
socket.SOCK_STREAM
socket.SOCK_DGRAM
socket.SOCK_RAW
socket.SOCK_RDM
socket.SOCK_SEQPACKET
socket.SOL_SOCKET
socket.SO_REUSEADDR
def testHostnameRes(self):
# Testing hostname resolution mechanisms
hostname = socket.gethostname()
try:
ip = socket.gethostbyname(hostname)
except socket.error:
# Probably name lookup wasn't set up right; skip this test
return
self.assertTrue(ip.find('.') >= 0, "Error resolving host to ip.")
try:
hname, aliases, ipaddrs = socket.gethostbyaddr(ip)
except socket.error:
# Probably a similar problem as above; skip this test
return
all_host_names = [hostname, hname] + aliases
fqhn = socket.getfqdn(ip)
if not fqhn in all_host_names:
self.fail("Error testing host resolution mechanisms. (fqdn: %s, all: %s)" % (fqhn, repr(all_host_names)))
def testRefCountGetNameInfo(self):
# Testing reference count for getnameinfo
if hasattr(sys, "getrefcount"):
try:
# On some versions, this loses a reference
orig = sys.getrefcount(__name__)
socket.getnameinfo(__name__,0)
except TypeError:
self.assertEqual(sys.getrefcount(__name__), orig,
"socket.getnameinfo loses a reference")
def testInterpreterCrash(self):
# Making sure getnameinfo doesn't crash the interpreter
try:
# On some versions, this crashes the interpreter.
socket.getnameinfo(('x', 0, 0, 0), 0)
except socket.error:
pass
def testNtoH(self):
# This just checks that htons etc. are their own inverse,
# when looking at the lower 16 or 32 bits.
sizes = {socket.htonl: 32, socket.ntohl: 32,
socket.htons: 16, socket.ntohs: 16}
for func, size in sizes.items():
mask = (1L<<size) - 1
for i in (0, 1, 0xffff, ~0xffff, 2, 0x01234567, 0x76543210):
self.assertEqual(i & mask, func(func(i&mask)) & mask)
swapped = func(mask)
self.assertEqual(swapped & mask, mask)
self.assertRaises(OverflowError, func, 1L<<34)
def testNtoHErrors(self):
good_values = [ 1, 2, 3, 1L, 2L, 3L ]
bad_values = [ -1, -2, -3, -1L, -2L, -3L ]
for k in good_values:
socket.ntohl(k)
socket.ntohs(k)
socket.htonl(k)
socket.htons(k)
for k in bad_values:
self.assertRaises(OverflowError, socket.ntohl, k)
self.assertRaises(OverflowError, socket.ntohs, k)
self.assertRaises(OverflowError, socket.htonl, k)
self.assertRaises(OverflowError, socket.htons, k)
def testGetServBy(self):
eq = self.assertEqual
# Find one service that exists, then check all the related interfaces.
# I've ordered this by protocols that have both a tcp and udp
# protocol, at least for modern Linuxes.
if (sys.platform.startswith('linux') or
sys.platform.startswith('freebsd') or
sys.platform.startswith('netbsd') or
sys.platform == 'darwin'):
# avoid the 'echo' service on this platform, as there is an
# assumption breaking non-standard port/protocol entry
services = ('daytime', 'qotd', 'domain')
else:
services = ('echo', 'daytime', 'domain')
for service in services:
try:
port = socket.getservbyname(service, 'tcp')
break
except socket.error:
pass
else:
raise socket.error
# Try same call with optional protocol omitted
port2 = socket.getservbyname(service)
eq(port, port2)
# Try udp, but don't barf if it doesn't exist
try:
udpport = socket.getservbyname(service, 'udp')
except socket.error:
udpport = None
else:
eq(udpport, port)
# Now make sure the lookup by port returns the same service name
eq(socket.getservbyport(port2), service)
eq(socket.getservbyport(port, 'tcp'), service)
if udpport is not None:
eq(socket.getservbyport(udpport, 'udp'), service)
# Make sure getservbyport does not accept out of range ports.
self.assertRaises(OverflowError, socket.getservbyport, -1)
self.assertRaises(OverflowError, socket.getservbyport, 65536)
def testDefaultTimeout(self):
# Testing default timeout
# The default timeout should initially be None
self.assertEqual(socket.getdefaulttimeout(), None)
s = socket.socket()
self.assertEqual(s.gettimeout(), None)
s.close()
# Set the default timeout to 10, and see if it propagates
socket.setdefaulttimeout(10)
self.assertEqual(socket.getdefaulttimeout(), 10)
s = socket.socket()
self.assertEqual(s.gettimeout(), 10)
s.close()
# Reset the default timeout to None, and see if it propagates
socket.setdefaulttimeout(None)
self.assertEqual(socket.getdefaulttimeout(), None)
s = socket.socket()
self.assertEqual(s.gettimeout(), None)
s.close()
# Check that setting it to an invalid value raises ValueError
self.assertRaises(ValueError, socket.setdefaulttimeout, -1)
# Check that setting it to an invalid type raises TypeError
self.assertRaises(TypeError, socket.setdefaulttimeout, "spam")
def testIPv4_inet_aton_fourbytes(self):
if not hasattr(socket, 'inet_aton'):
return # No inet_aton, nothing to check
# Test that issue1008086 and issue767150 are fixed.
# It must return 4 bytes.
self.assertEqual('\x00'*4, socket.inet_aton('0.0.0.0'))
self.assertEqual('\xff'*4, socket.inet_aton('255.255.255.255'))
def testIPv4toString(self):
if not hasattr(socket, 'inet_pton'):
return # No inet_pton() on this platform
from socket import inet_aton as f, inet_pton, AF_INET
g = lambda a: inet_pton(AF_INET, a)
self.assertEqual('\x00\x00\x00\x00', f('0.0.0.0'))
self.assertEqual('\xff\x00\xff\x00', f('255.0.255.0'))
self.assertEqual('\xaa\xaa\xaa\xaa', f('170.170.170.170'))
self.assertEqual('\x01\x02\x03\x04', f('1.2.3.4'))
self.assertEqual('\xff\xff\xff\xff', f('255.255.255.255'))
self.assertEqual('\x00\x00\x00\x00', g('0.0.0.0'))
self.assertEqual('\xff\x00\xff\x00', g('255.0.255.0'))
self.assertEqual('\xaa\xaa\xaa\xaa', g('170.170.170.170'))
self.assertEqual('\xff\xff\xff\xff', g('255.255.255.255'))
def testIPv6toString(self):
if not hasattr(socket, 'inet_pton'):
return # No inet_pton() on this platform
try:
from socket import inet_pton, AF_INET6, has_ipv6
if not has_ipv6:
return
except ImportError:
return
f = lambda a: inet_pton(AF_INET6, a)
self.assertEqual('\x00' * 16, f('::'))
self.assertEqual('\x00' * 16, f('0::0'))
self.assertEqual('\x00\x01' + '\x00' * 14, f('1::'))
self.assertEqual(
'\x45\xef\x76\xcb\x00\x1a\x56\xef\xaf\xeb\x0b\xac\x19\x24\xae\xae',
f('45ef:76cb:1a:56ef:afeb:bac:1924:aeae')
)
def testStringToIPv4(self):
if not hasattr(socket, 'inet_ntop'):
return # No inet_ntop() on this platform
from socket import inet_ntoa as f, inet_ntop, AF_INET
g = lambda a: inet_ntop(AF_INET, a)
self.assertEqual('1.0.1.0', f('\x01\x00\x01\x00'))
self.assertEqual('170.85.170.85', f('\xaa\x55\xaa\x55'))
self.assertEqual('255.255.255.255', f('\xff\xff\xff\xff'))
self.assertEqual('1.2.3.4', f('\x01\x02\x03\x04'))
self.assertEqual('1.0.1.0', g('\x01\x00\x01\x00'))
self.assertEqual('170.85.170.85', g('\xaa\x55\xaa\x55'))
self.assertEqual('255.255.255.255', g('\xff\xff\xff\xff'))
def testStringToIPv6(self):
if not hasattr(socket, 'inet_ntop'):
return # No inet_ntop() on this platform
try:
from socket import inet_ntop, AF_INET6, has_ipv6
if not has_ipv6:
return
except ImportError:
return
f = lambda a: inet_ntop(AF_INET6, a)
self.assertEqual('::', f('\x00' * 16))
self.assertEqual('::1', f('\x00' * 15 + '\x01'))
self.assertEqual(
'aef:b01:506:1001:ffff:9997:55:170',
f('\x0a\xef\x0b\x01\x05\x06\x10\x01\xff\xff\x99\x97\x00\x55\x01\x70')
)
# XXX The following don't test module-level functionality...
def _get_unused_port(self, bind_address='0.0.0.0'):
"""Use a temporary socket to elicit an unused ephemeral port.
Args:
bind_address: Hostname or IP address to search for a port on.
Returns: A most likely to be unused port.
"""
tempsock = socket.socket()
tempsock.bind((bind_address, 0))
host, port = tempsock.getsockname()
tempsock.close()
return port
def testSockName(self):
# Testing getsockname()
port = self._get_unused_port()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
sock.bind(("0.0.0.0", port))
name = sock.getsockname()
# XXX(nnorwitz): http://tinyurl.com/os5jz seems to indicate
# it reasonable to get the host's addr in addition to 0.0.0.0.
# At least for eCos. This is required for the S/390 to pass.
try:
my_ip_addr = socket.gethostbyname(socket.gethostname())
except socket.error:
# Probably name lookup wasn't set up right; skip this test
return
self.assertIn(name[0], ("0.0.0.0", my_ip_addr), '%s invalid' % name[0])
self.assertEqual(name[1], port)
def testGetSockOpt(self):
# Testing getsockopt()
# We know a socket should start without reuse==0
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
reuse = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)
self.assertFalse(reuse != 0, "initial mode is reuse")
def testSetSockOpt(self):
# Testing setsockopt()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
reuse = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)
self.assertFalse(reuse == 0, "failed to set reuse mode")
def testSendAfterClose(self):
# testing send() after close() with timeout
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(1)
sock.close()
self.assertRaises(socket.error, sock.send, "spam")
def testNewAttributes(self):
# testing .family, .type and .protocol
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.assertEqual(sock.family, socket.AF_INET)
self.assertEqual(sock.type, socket.SOCK_STREAM)
self.assertEqual(sock.proto, 0)
sock.close()
def test_getsockaddrarg(self):
host = '0.0.0.0'
port = self._get_unused_port(bind_address=host)
big_port = port + 65536
neg_port = port - 65536
sock = socket.socket()
try:
self.assertRaises(OverflowError, sock.bind, (host, big_port))
self.assertRaises(OverflowError, sock.bind, (host, neg_port))
sock.bind((host, port))
finally:
sock.close()
@unittest.skipUnless(os.name == "nt", "Windows specific")
def test_sock_ioctl(self):
self.assertTrue(hasattr(socket.socket, 'ioctl'))
self.assertTrue(hasattr(socket, 'SIO_RCVALL'))
self.assertTrue(hasattr(socket, 'RCVALL_ON'))
self.assertTrue(hasattr(socket, 'RCVALL_OFF'))
self.assertTrue(hasattr(socket, 'SIO_KEEPALIVE_VALS'))
s = socket.socket()
self.addCleanup(s.close)
self.assertRaises(ValueError, s.ioctl, -1, None)
s.ioctl(socket.SIO_KEEPALIVE_VALS, (1, 100, 100))
def testGetaddrinfo(self):
try:
socket.getaddrinfo('localhost', 80)
except socket.gaierror as err:
if err.errno == socket.EAI_SERVICE:
# see http://bugs.python.org/issue1282647
self.skipTest("buggy libc version")
raise
# len of every sequence is supposed to be == 5
for info in socket.getaddrinfo(HOST, None):
self.assertEqual(len(info), 5)
# host can be a domain name, a string representation of an
# IPv4/v6 address or None
socket.getaddrinfo('localhost', 80)
socket.getaddrinfo('127.0.0.1', 80)
socket.getaddrinfo(None, 80)
if SUPPORTS_IPV6:
socket.getaddrinfo('::1', 80)
# port can be a string service name such as "http", a numeric
# port number (int or long), or None
socket.getaddrinfo(HOST, "http")
socket.getaddrinfo(HOST, 80)
socket.getaddrinfo(HOST, 80L)
socket.getaddrinfo(HOST, None)
# test family and socktype filters
infos = socket.getaddrinfo(HOST, None, socket.AF_INET)
for family, _, _, _, _ in infos:
self.assertEqual(family, socket.AF_INET)
infos = socket.getaddrinfo(HOST, None, 0, socket.SOCK_STREAM)
for _, socktype, _, _, _ in infos:
self.assertEqual(socktype, socket.SOCK_STREAM)
# test proto and flags arguments
socket.getaddrinfo(HOST, None, 0, 0, socket.SOL_TCP)
socket.getaddrinfo(HOST, None, 0, 0, 0, socket.AI_PASSIVE)
# a server willing to support both IPv4 and IPv6 will
# usually do this
socket.getaddrinfo(None, 0, socket.AF_UNSPEC, socket.SOCK_STREAM, 0,
socket.AI_PASSIVE)
# Issue 17269
if hasattr(socket, 'AI_NUMERICSERV'):
socket.getaddrinfo("localhost", None, 0, 0, 0, socket.AI_NUMERICSERV)
def check_sendall_interrupted(self, with_timeout):
# socketpair() is not stricly required, but it makes things easier.
if not hasattr(signal, 'alarm') or not hasattr(socket, 'socketpair'):
self.skipTest("signal.alarm and socket.socketpair required for this test")
# Our signal handlers clobber the C errno by calling a math function
# with an invalid domain value.
def ok_handler(*args):
self.assertRaises(ValueError, math.acosh, 0)
def raising_handler(*args):
self.assertRaises(ValueError, math.acosh, 0)
1 // 0
c, s = socket.socketpair()
old_alarm = signal.signal(signal.SIGALRM, raising_handler)
try:
if with_timeout:
# Just above the one second minimum for signal.alarm
c.settimeout(1.5)
with self.assertRaises(ZeroDivisionError):
signal.alarm(1)
c.sendall(b"x" * test_support.SOCK_MAX_SIZE)
if with_timeout:
signal.signal(signal.SIGALRM, ok_handler)
signal.alarm(1)
self.assertRaises(socket.timeout, c.sendall,
b"x" * test_support.SOCK_MAX_SIZE)
finally:
signal.signal(signal.SIGALRM, old_alarm)
c.close()
s.close()
def test_sendall_interrupted(self):
self.check_sendall_interrupted(False)
def test_sendall_interrupted_with_timeout(self):
self.check_sendall_interrupted(True)
def test_listen_backlog(self):
for backlog in 0, -1:
srv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
srv.bind((HOST, 0))
srv.listen(backlog)
srv.close()
# Issue 15989
srv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
srv.bind((HOST, 0))
self.assertRaises(OverflowError, srv.listen, _testcapi.INT_MAX + 1)
srv.close()
@unittest.skipUnless(SUPPORTS_IPV6, 'IPv6 required for this test.')
def test_flowinfo(self):
self.assertRaises(OverflowError, socket.getnameinfo,
('::1',0, 0xffffffff), 0)
s = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
try:
self.assertRaises(OverflowError, s.bind, ('::1', 0, -10))
finally:
s.close()
@unittest.skipUnless(thread, 'Threading required for this test.')
class BasicTCPTest(SocketConnectedTest):
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def testRecv(self):
# Testing large receive over TCP
msg = self.cli_conn.recv(1024)
self.assertEqual(msg, MSG)
def _testRecv(self):
self.serv_conn.send(MSG)
def testOverFlowRecv(self):
# Testing receive in chunks over TCP
seg1 = self.cli_conn.recv(len(MSG) - 3)
seg2 = self.cli_conn.recv(1024)
msg = seg1 + seg2
self.assertEqual(msg, MSG)
def _testOverFlowRecv(self):
self.serv_conn.send(MSG)
def testRecvFrom(self):
# Testing large recvfrom() over TCP
msg, addr = self.cli_conn.recvfrom(1024)
self.assertEqual(msg, MSG)
def _testRecvFrom(self):
self.serv_conn.send(MSG)
def testOverFlowRecvFrom(self):
# Testing recvfrom() in chunks over TCP
seg1, addr = self.cli_conn.recvfrom(len(MSG)-3)
seg2, addr = self.cli_conn.recvfrom(1024)
msg = seg1 + seg2
self.assertEqual(msg, MSG)
def _testOverFlowRecvFrom(self):
self.serv_conn.send(MSG)
def testSendAll(self):
# Testing sendall() with a 2048 byte string over TCP
msg = ''
while 1:
read = self.cli_conn.recv(1024)
if not read:
break
msg += read
self.assertEqual(msg, 'f' * 2048)
def _testSendAll(self):
big_chunk = 'f' * 2048
self.serv_conn.sendall(big_chunk)
def testFromFd(self):
# Testing fromfd()
if not hasattr(socket, "fromfd"):
return # On Windows, this doesn't exist
fd = self.cli_conn.fileno()
sock = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
msg = sock.recv(1024)
self.assertEqual(msg, MSG)
def _testFromFd(self):
self.serv_conn.send(MSG)
def testDup(self):
# Testing dup()
sock = self.cli_conn.dup()
self.addCleanup(sock.close)
msg = sock.recv(1024)
self.assertEqual(msg, MSG)
def _testDup(self):
self.serv_conn.send(MSG)
def testShutdown(self):
# Testing shutdown()
msg = self.cli_conn.recv(1024)
self.assertEqual(msg, MSG)
# wait for _testShutdown to finish: on OS X, when the server
# closes the connection the client also becomes disconnected,
# and the client's shutdown call will fail. (Issue #4397.)
self.done.wait()
def _testShutdown(self):
self.serv_conn.send(MSG)
# Issue 15989
self.assertRaises(OverflowError, self.serv_conn.shutdown,
_testcapi.INT_MAX + 1)
self.assertRaises(OverflowError, self.serv_conn.shutdown,
2 + (_testcapi.UINT_MAX + 1))
self.serv_conn.shutdown(2)
@unittest.skipUnless(thread, 'Threading required for this test.')
class BasicUDPTest(ThreadedUDPSocketTest):
def __init__(self, methodName='runTest'):
ThreadedUDPSocketTest.__init__(self, methodName=methodName)
def testSendtoAndRecv(self):
# Testing sendto() and Recv() over UDP
msg = self.serv.recv(len(MSG))
self.assertEqual(msg, MSG)
def _testSendtoAndRecv(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
def testRecvFrom(self):
# Testing recvfrom() over UDP
msg, addr = self.serv.recvfrom(len(MSG))
self.assertEqual(msg, MSG)
def _testRecvFrom(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
def testRecvFromNegative(self):
# Negative lengths passed to recvfrom should give ValueError.
self.assertRaises(ValueError, self.serv.recvfrom, -1)
def _testRecvFromNegative(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
@unittest.skipUnless(thread, 'Threading required for this test.')
class TCPCloserTest(ThreadedTCPSocketTest):
def testClose(self):
conn, addr = self.serv.accept()
conn.close()
sd = self.cli
read, write, err = select.select([sd], [], [], 1.0)
self.assertEqual(read, [sd])
self.assertEqual(sd.recv(1), '')
def _testClose(self):
self.cli.connect((HOST, self.port))
time.sleep(1.0)
@unittest.skipUnless(thread, 'Threading required for this test.')
class BasicSocketPairTest(SocketPairTest):
def __init__(self, methodName='runTest'):
SocketPairTest.__init__(self, methodName=methodName)
def testRecv(self):
msg = self.serv.recv(1024)
self.assertEqual(msg, MSG)
def _testRecv(self):
self.cli.send(MSG)
def testSend(self):
self.serv.send(MSG)
def _testSend(self):
msg = self.cli.recv(1024)
self.assertEqual(msg, MSG)
@unittest.skipUnless(thread, 'Threading required for this test.')
class NonBlockingTCPTests(ThreadedTCPSocketTest):
def __init__(self, methodName='runTest'):
ThreadedTCPSocketTest.__init__(self, methodName=methodName)
def testSetBlocking(self):
# Testing whether set blocking works
self.serv.setblocking(True)
self.assertIsNone(self.serv.gettimeout())
self.serv.setblocking(False)
self.assertEqual(self.serv.gettimeout(), 0.0)
start = time.time()
try:
self.serv.accept()
except socket.error:
pass
end = time.time()
self.assertTrue((end - start) < 1.0, "Error setting non-blocking mode.")
# Issue 15989
if _testcapi.UINT_MAX < _testcapi.ULONG_MAX:
self.serv.setblocking(_testcapi.UINT_MAX + 1)
self.assertIsNone(self.serv.gettimeout())
def _testSetBlocking(self):
pass
def testAccept(self):
# Testing non-blocking accept
self.serv.setblocking(0)
try:
conn, addr = self.serv.accept()
except socket.error:
pass
else:
self.fail("Error trying to do non-blocking accept.")
read, write, err = select.select([self.serv], [], [])
if self.serv in read:
conn, addr = self.serv.accept()
conn.close()
else:
self.fail("Error trying to do accept after select.")
def _testAccept(self):
time.sleep(0.1)
self.cli.connect((HOST, self.port))
def testConnect(self):
# Testing non-blocking connect
conn, addr = self.serv.accept()
conn.close()
def _testConnect(self):
self.cli.settimeout(10)
self.cli.connect((HOST, self.port))
def testRecv(self):
# Testing non-blocking recv
conn, addr = self.serv.accept()
conn.setblocking(0)
try:
msg = conn.recv(len(MSG))
except socket.error:
pass
else:
self.fail("Error trying to do non-blocking recv.")
read, write, err = select.select([conn], [], [])
if conn in read:
msg = conn.recv(len(MSG))
conn.close()
self.assertEqual(msg, MSG)
else:
self.fail("Error during select call to non-blocking socket.")
def _testRecv(self):
self.cli.connect((HOST, self.port))
time.sleep(0.1)
self.cli.send(MSG)
@unittest.skipUnless(thread, 'Threading required for this test.')
class FileObjectClassTestCase(SocketConnectedTest):
bufsize = -1 # Use default buffer size
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def setUp(self):
SocketConnectedTest.setUp(self)
self.serv_file = self.cli_conn.makefile('rb', self.bufsize)
def tearDown(self):
self.serv_file.close()
self.assertTrue(self.serv_file.closed)
SocketConnectedTest.tearDown(self)
self.serv_file = None
def clientSetUp(self):
SocketConnectedTest.clientSetUp(self)
self.cli_file = self.serv_conn.makefile('wb')
def clientTearDown(self):
self.cli_file.close()
self.assertTrue(self.cli_file.closed)
self.cli_file = None
SocketConnectedTest.clientTearDown(self)
def testSmallRead(self):
# Performing small file read test
first_seg = self.serv_file.read(len(MSG)-3)
second_seg = self.serv_file.read(3)
msg = first_seg + second_seg
self.assertEqual(msg, MSG)
def _testSmallRead(self):
self.cli_file.write(MSG)
self.cli_file.flush()
def testFullRead(self):
# read until EOF
msg = self.serv_file.read()
self.assertEqual(msg, MSG)
def _testFullRead(self):
self.cli_file.write(MSG)
self.cli_file.close()
def testUnbufferedRead(self):
# Performing unbuffered file read test
buf = ''
while 1:
char = self.serv_file.read(1)
if not char:
break
buf += char
self.assertEqual(buf, MSG)
def _testUnbufferedRead(self):
self.cli_file.write(MSG)
self.cli_file.flush()
def testReadline(self):
# Performing file readline test
line = self.serv_file.readline()
self.assertEqual(line, MSG)
def _testReadline(self):
self.cli_file.write(MSG)
self.cli_file.flush()
def testReadlineAfterRead(self):
a_baloo_is = self.serv_file.read(len("A baloo is"))
self.assertEqual("A baloo is", a_baloo_is)
_a_bear = self.serv_file.read(len(" a bear"))
self.assertEqual(" a bear", _a_bear)
line = self.serv_file.readline()
self.assertEqual("\n", line)
line = self.serv_file.readline()
self.assertEqual("A BALOO IS A BEAR.\n", line)
line = self.serv_file.readline()
self.assertEqual(MSG, line)
def _testReadlineAfterRead(self):
self.cli_file.write("A baloo is a bear\n")
self.cli_file.write("A BALOO IS A BEAR.\n")
self.cli_file.write(MSG)
self.cli_file.flush()
def testReadlineAfterReadNoNewline(self):
end_of_ = self.serv_file.read(len("End Of "))
self.assertEqual("End Of ", end_of_)
line = self.serv_file.readline()
self.assertEqual("Line", line)
def _testReadlineAfterReadNoNewline(self):
self.cli_file.write("End Of Line")
def testClosedAttr(self):
self.assertTrue(not self.serv_file.closed)
def _testClosedAttr(self):
self.assertTrue(not self.cli_file.closed)
class FileObjectInterruptedTestCase(unittest.TestCase):
"""Test that the file object correctly handles EINTR internally."""
class MockSocket(object):
def __init__(self, recv_funcs=()):
# A generator that returns callables that we'll call for each
# call to recv().
self._recv_step = iter(recv_funcs)
def recv(self, size):
return self._recv_step.next()()
@staticmethod
def _raise_eintr():
raise socket.error(errno.EINTR)
def _test_readline(self, size=-1, **kwargs):
mock_sock = self.MockSocket(recv_funcs=[
lambda : "This is the first line\nAnd the sec",
self._raise_eintr,
lambda : "ond line is here\n",
lambda : "",
])
fo = socket._fileobject(mock_sock, **kwargs)
self.assertEqual(fo.readline(size), "This is the first line\n")
self.assertEqual(fo.readline(size), "And the second line is here\n")
def _test_read(self, size=-1, **kwargs):
mock_sock = self.MockSocket(recv_funcs=[
lambda : "This is the first line\nAnd the sec",
self._raise_eintr,
lambda : "ond line is here\n",
lambda : "",
])
fo = socket._fileobject(mock_sock, **kwargs)
self.assertEqual(fo.read(size), "This is the first line\n"
"And the second line is here\n")
def test_default(self):
self._test_readline()
self._test_readline(size=100)
self._test_read()
self._test_read(size=100)
def test_with_1k_buffer(self):
self._test_readline(bufsize=1024)
self._test_readline(size=100, bufsize=1024)
self._test_read(bufsize=1024)
self._test_read(size=100, bufsize=1024)
def _test_readline_no_buffer(self, size=-1):
mock_sock = self.MockSocket(recv_funcs=[
lambda : "aa",
lambda : "\n",
lambda : "BB",
self._raise_eintr,
lambda : "bb",
lambda : "",
])
fo = socket._fileobject(mock_sock, bufsize=0)
self.assertEqual(fo.readline(size), "aa\n")
self.assertEqual(fo.readline(size), "BBbb")
def test_no_buffer(self):
self._test_readline_no_buffer()
self._test_readline_no_buffer(size=4)
self._test_read(bufsize=0)
self._test_read(size=100, bufsize=0)
class UnbufferedFileObjectClassTestCase(FileObjectClassTestCase):
"""Repeat the tests from FileObjectClassTestCase with bufsize==0.
In this case (and in this case only), it should be possible to
create a file object, read a line from it, create another file
object, read another line from it, without loss of data in the
first file object's buffer. Note that httplib relies on this
when reading multiple requests from the same socket."""
bufsize = 0 # Use unbuffered mode
def testUnbufferedReadline(self):
# Read a line, create a new file object, read another line with it
line = self.serv_file.readline() # first line
self.assertEqual(line, "A. " + MSG) # first line
self.serv_file = self.cli_conn.makefile('rb', 0)
line = self.serv_file.readline() # second line
self.assertEqual(line, "B. " + MSG) # second line
def _testUnbufferedReadline(self):
self.cli_file.write("A. " + MSG)
self.cli_file.write("B. " + MSG)
self.cli_file.flush()
class LineBufferedFileObjectClassTestCase(FileObjectClassTestCase):
bufsize = 1 # Default-buffered for reading; line-buffered for writing
class SocketMemo(object):
"""A wrapper to keep track of sent data, needed to examine write behaviour"""
def __init__(self, sock):
self._sock = sock
self.sent = []
def send(self, data, flags=0):
n = self._sock.send(data, flags)
self.sent.append(data[:n])
return n
def sendall(self, data, flags=0):
self._sock.sendall(data, flags)
self.sent.append(data)
def __getattr__(self, attr):
return getattr(self._sock, attr)
def getsent(self):
return [e.tobytes() if isinstance(e, memoryview) else e for e in self.sent]
def setUp(self):
FileObjectClassTestCase.setUp(self)
self.serv_file._sock = self.SocketMemo(self.serv_file._sock)
def testLinebufferedWrite(self):
# Write two lines, in small chunks
msg = MSG.strip()
print >> self.serv_file, msg,
print >> self.serv_file, msg
# second line:
print >> self.serv_file, msg,
print >> self.serv_file, msg,
print >> self.serv_file, msg
# third line
print >> self.serv_file, ''
self.serv_file.flush()
msg1 = "%s %s\n"%(msg, msg)
msg2 = "%s %s %s\n"%(msg, msg, msg)
msg3 = "\n"
self.assertEqual(self.serv_file._sock.getsent(), [msg1, msg2, msg3])
def _testLinebufferedWrite(self):
msg = MSG.strip()
msg1 = "%s %s\n"%(msg, msg)
msg2 = "%s %s %s\n"%(msg, msg, msg)
msg3 = "\n"
l1 = self.cli_file.readline()
self.assertEqual(l1, msg1)
l2 = self.cli_file.readline()
self.assertEqual(l2, msg2)
l3 = self.cli_file.readline()
self.assertEqual(l3, msg3)
class SmallBufferedFileObjectClassTestCase(FileObjectClassTestCase):
bufsize = 2 # Exercise the buffering code
class NetworkConnectionTest(object):
"""Prove network connection."""
def clientSetUp(self):
# We're inherited below by BasicTCPTest2, which also inherits
# BasicTCPTest, which defines self.port referenced below.
self.cli = socket.create_connection((HOST, self.port))
self.serv_conn = self.cli
class BasicTCPTest2(NetworkConnectionTest, BasicTCPTest):
"""Tests that NetworkConnection does not break existing TCP functionality.
"""
class NetworkConnectionNoServer(unittest.TestCase):
class MockSocket(socket.socket):
def connect(self, *args):
raise socket.timeout('timed out')
@contextlib.contextmanager
def mocked_socket_module(self):
"""Return a socket which times out on connect"""
old_socket = socket.socket
socket.socket = self.MockSocket
try:
yield
finally:
socket.socket = old_socket
def test_connect(self):
port = test_support.find_unused_port()
cli = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(cli.close)
with self.assertRaises(socket.error) as cm:
cli.connect((HOST, port))
self.assertEqual(cm.exception.errno, errno.ECONNREFUSED)
def test_create_connection(self):
# Issue #9792: errors raised by create_connection() should have
# a proper errno attribute.
port = test_support.find_unused_port()
with self.assertRaises(socket.error) as cm:
socket.create_connection((HOST, port))
# Issue #16257: create_connection() calls getaddrinfo() against
# 'localhost'. This may result in an IPV6 addr being returned
# as well as an IPV4 one:
# >>> socket.getaddrinfo('localhost', port, 0, SOCK_STREAM)
# >>> [(2, 2, 0, '', ('127.0.0.1', 41230)),
# (26, 2, 0, '', ('::1', 41230, 0, 0))]
#
# create_connection() enumerates through all the addresses returned
# and if it doesn't successfully bind to any of them, it propagates
# the last exception it encountered.
#
# On Solaris, ENETUNREACH is returned in this circumstance instead
# of ECONNREFUSED. So, if that errno exists, add it to our list of
# expected errnos.
expected_errnos = [ errno.ECONNREFUSED, ]
if hasattr(errno, 'ENETUNREACH'):
expected_errnos.append(errno.ENETUNREACH)
self.assertIn(cm.exception.errno, expected_errnos)
def test_create_connection_timeout(self):
# Issue #9792: create_connection() should not recast timeout errors
# as generic socket errors.
with self.mocked_socket_module():
with self.assertRaises(socket.timeout):
socket.create_connection((HOST, 1234))
@unittest.skipUnless(thread, 'Threading required for this test.')
class NetworkConnectionAttributesTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.source_port = test_support.find_unused_port()
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
def _justAccept(self):
conn, addr = self.serv.accept()
conn.close()
testFamily = _justAccept
def _testFamily(self):
self.cli = socket.create_connection((HOST, self.port), timeout=30)
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.family, 2)
testSourceAddress = _justAccept
def _testSourceAddress(self):
self.cli = socket.create_connection((HOST, self.port), timeout=30,
source_address=('', self.source_port))
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.getsockname()[1], self.source_port)
# The port number being used is sufficient to show that the bind()
# call happened.
testTimeoutDefault = _justAccept
def _testTimeoutDefault(self):
# passing no explicit timeout uses socket's global default
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(42)
try:
self.cli = socket.create_connection((HOST, self.port))
self.addCleanup(self.cli.close)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(self.cli.gettimeout(), 42)
testTimeoutNone = _justAccept
def _testTimeoutNone(self):
# None timeout means the same as sock.settimeout(None)
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(30)
try:
self.cli = socket.create_connection((HOST, self.port), timeout=None)
self.addCleanup(self.cli.close)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(self.cli.gettimeout(), None)
testTimeoutValueNamed = _justAccept
def _testTimeoutValueNamed(self):
self.cli = socket.create_connection((HOST, self.port), timeout=30)
self.assertEqual(self.cli.gettimeout(), 30)
testTimeoutValueNonamed = _justAccept
def _testTimeoutValueNonamed(self):
self.cli = socket.create_connection((HOST, self.port), 30)
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.gettimeout(), 30)
@unittest.skipUnless(thread, 'Threading required for this test.')
class NetworkConnectionBehaviourTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
def testInsideTimeout(self):
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
time.sleep(3)
conn.send("done!")
testOutsideTimeout = testInsideTimeout
def _testInsideTimeout(self):
self.cli = sock = socket.create_connection((HOST, self.port))
data = sock.recv(5)
self.assertEqual(data, "done!")
def _testOutsideTimeout(self):
self.cli = sock = socket.create_connection((HOST, self.port), timeout=1)
self.assertRaises(socket.timeout, lambda: sock.recv(5))
class Urllib2FileobjectTest(unittest.TestCase):
# urllib2.HTTPHandler has "borrowed" socket._fileobject, and requires that
# it close the socket if the close c'tor argument is true
def testClose(self):
class MockSocket:
closed = False
def flush(self): pass
def close(self): self.closed = True
# must not close unless we request it: the original use of _fileobject
# by module socket requires that the underlying socket not be closed until
# the _socketobject that created the _fileobject is closed
s = MockSocket()
f = socket._fileobject(s)
f.close()
self.assertTrue(not s.closed)
s = MockSocket()
f = socket._fileobject(s, close=True)
f.close()
self.assertTrue(s.closed)
class TCPTimeoutTest(SocketTCPTest):
def testTCPTimeout(self):
def raise_timeout(*args, **kwargs):
self.serv.settimeout(1.0)
self.serv.accept()
self.assertRaises(socket.timeout, raise_timeout,
"Error generating a timeout exception (TCP)")
def testTimeoutZero(self):
ok = False
try:
self.serv.settimeout(0.0)
foo = self.serv.accept()
except socket.timeout:
self.fail("caught timeout instead of error (TCP)")
except socket.error:
ok = True
except:
self.fail("caught unexpected exception (TCP)")
if not ok:
self.fail("accept() returned success when we did not expect it")
def testInterruptedTimeout(self):
# XXX I don't know how to do this test on MSWindows or any other
# plaform that doesn't support signal.alarm() or os.kill(), though
# the bug should have existed on all platforms.
if not hasattr(signal, "alarm"):
return # can only test on *nix
self.serv.settimeout(5.0) # must be longer than alarm
class Alarm(Exception):
pass
def alarm_handler(signal, frame):
raise Alarm
old_alarm = signal.signal(signal.SIGALRM, alarm_handler)
try:
signal.alarm(2) # POSIX allows alarm to be up to 1 second early
try:
foo = self.serv.accept()
except socket.timeout:
self.fail("caught timeout instead of Alarm")
except Alarm:
pass
except:
self.fail("caught other exception instead of Alarm:"
" %s(%s):\n%s" %
(sys.exc_info()[:2] + (traceback.format_exc(),)))
else:
self.fail("nothing caught")
finally:
signal.alarm(0) # shut off alarm
except Alarm:
self.fail("got Alarm in wrong place")
finally:
# no alarm can be pending. Safe to restore old handler.
signal.signal(signal.SIGALRM, old_alarm)
class UDPTimeoutTest(SocketUDPTest):
def testUDPTimeout(self):
def raise_timeout(*args, **kwargs):
self.serv.settimeout(1.0)
self.serv.recv(1024)
self.assertRaises(socket.timeout, raise_timeout,
"Error generating a timeout exception (UDP)")
def testTimeoutZero(self):
ok = False
try:
self.serv.settimeout(0.0)
foo = self.serv.recv(1024)
except socket.timeout:
self.fail("caught timeout instead of error (UDP)")
except socket.error:
ok = True
except:
self.fail("caught unexpected exception (UDP)")
if not ok:
self.fail("recv() returned success when we did not expect it")
class TestExceptions(unittest.TestCase):
def testExceptionTree(self):
self.assertTrue(issubclass(socket.error, Exception))
self.assertTrue(issubclass(socket.herror, socket.error))
self.assertTrue(issubclass(socket.gaierror, socket.error))
self.assertTrue(issubclass(socket.timeout, socket.error))
class TestLinuxAbstractNamespace(unittest.TestCase):
UNIX_PATH_MAX = 108
def testLinuxAbstractNamespace(self):
address = "\x00python-test-hello\x00\xff"
s1 = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
s1.bind(address)
s1.listen(1)
s2 = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
s2.connect(s1.getsockname())
s1.accept()
self.assertEqual(s1.getsockname(), address)
self.assertEqual(s2.getpeername(), address)
def testMaxName(self):
address = "\x00" + "h" * (self.UNIX_PATH_MAX - 1)
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
s.bind(address)
self.assertEqual(s.getsockname(), address)
def testNameOverflow(self):
address = "\x00" + "h" * self.UNIX_PATH_MAX
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.assertRaises(socket.error, s.bind, address)
@unittest.skipUnless(thread, 'Threading required for this test.')
class BufferIOTest(SocketConnectedTest):
"""
Test the buffer versions of socket.recv() and socket.send().
"""
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def testRecvIntoArray(self):
buf = array.array('c', ' '*1024)
nbytes = self.cli_conn.recv_into(buf)
self.assertEqual(nbytes, len(MSG))
msg = buf.tostring()[:len(MSG)]
self.assertEqual(msg, MSG)
def _testRecvIntoArray(self):
with test_support.check_py3k_warnings():
buf = buffer(MSG)
self.serv_conn.send(buf)
def testRecvIntoBytearray(self):
buf = bytearray(1024)
nbytes = self.cli_conn.recv_into(buf)
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvIntoBytearray = _testRecvIntoArray
def testRecvIntoMemoryview(self):
buf = bytearray(1024)
nbytes = self.cli_conn.recv_into(memoryview(buf))
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvIntoMemoryview = _testRecvIntoArray
def testRecvFromIntoArray(self):
buf = array.array('c', ' '*1024)
nbytes, addr = self.cli_conn.recvfrom_into(buf)
self.assertEqual(nbytes, len(MSG))
msg = buf.tostring()[:len(MSG)]
self.assertEqual(msg, MSG)
def _testRecvFromIntoArray(self):
with test_support.check_py3k_warnings():
buf = buffer(MSG)
self.serv_conn.send(buf)
def testRecvFromIntoBytearray(self):
buf = bytearray(1024)
nbytes, addr = self.cli_conn.recvfrom_into(buf)
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvFromIntoBytearray = _testRecvFromIntoArray
def testRecvFromIntoMemoryview(self):
buf = bytearray(1024)
nbytes, addr = self.cli_conn.recvfrom_into(memoryview(buf))
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvFromIntoMemoryview = _testRecvFromIntoArray
TIPC_STYPE = 2000
TIPC_LOWER = 200
TIPC_UPPER = 210
def isTipcAvailable():
"""Check if the TIPC module is loaded
The TIPC module is not loaded automatically on Ubuntu and probably
other Linux distros.
"""
if not hasattr(socket, "AF_TIPC"):
return False
if not os.path.isfile("/proc/modules"):
return False
with open("/proc/modules") as f:
for line in f:
if line.startswith("tipc "):
return True
if test_support.verbose:
print "TIPC module is not loaded, please 'sudo modprobe tipc'"
return False
class TIPCTest (unittest.TestCase):
def testRDM(self):
srv = socket.socket(socket.AF_TIPC, socket.SOCK_RDM)
cli = socket.socket(socket.AF_TIPC, socket.SOCK_RDM)
srv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
srvaddr = (socket.TIPC_ADDR_NAMESEQ, TIPC_STYPE,
TIPC_LOWER, TIPC_UPPER)
srv.bind(srvaddr)
sendaddr = (socket.TIPC_ADDR_NAME, TIPC_STYPE,
TIPC_LOWER + (TIPC_UPPER - TIPC_LOWER) / 2, 0)
cli.sendto(MSG, sendaddr)
msg, recvaddr = srv.recvfrom(1024)
self.assertEqual(cli.getsockname(), recvaddr)
self.assertEqual(msg, MSG)
class TIPCThreadableTest (unittest.TestCase, ThreadableTest):
def __init__(self, methodName = 'runTest'):
unittest.TestCase.__init__(self, methodName = methodName)
ThreadableTest.__init__(self)
def setUp(self):
self.srv = socket.socket(socket.AF_TIPC, socket.SOCK_STREAM)
self.srv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
srvaddr = (socket.TIPC_ADDR_NAMESEQ, TIPC_STYPE,
TIPC_LOWER, TIPC_UPPER)
self.srv.bind(srvaddr)
self.srv.listen(5)
self.serverExplicitReady()
self.conn, self.connaddr = self.srv.accept()
def clientSetUp(self):
# The is a hittable race between serverExplicitReady() and the
# accept() call; sleep a little while to avoid it, otherwise
# we could get an exception
time.sleep(0.1)
self.cli = socket.socket(socket.AF_TIPC, socket.SOCK_STREAM)
addr = (socket.TIPC_ADDR_NAME, TIPC_STYPE,
TIPC_LOWER + (TIPC_UPPER - TIPC_LOWER) / 2, 0)
self.cli.connect(addr)
self.cliaddr = self.cli.getsockname()
def testStream(self):
msg = self.conn.recv(1024)
self.assertEqual(msg, MSG)
self.assertEqual(self.cliaddr, self.connaddr)
def _testStream(self):
self.cli.send(MSG)
self.cli.close()
def test_main():
tests = [GeneralModuleTests, BasicTCPTest, TCPCloserTest, TCPTimeoutTest,
TestExceptions, BufferIOTest, BasicTCPTest2, BasicUDPTest,
UDPTimeoutTest ]
tests.extend([
NonBlockingTCPTests,
FileObjectClassTestCase,
FileObjectInterruptedTestCase,
UnbufferedFileObjectClassTestCase,
LineBufferedFileObjectClassTestCase,
SmallBufferedFileObjectClassTestCase,
Urllib2FileobjectTest,
NetworkConnectionNoServer,
NetworkConnectionAttributesTest,
NetworkConnectionBehaviourTest,
])
if hasattr(socket, "socketpair"):
tests.append(BasicSocketPairTest)
if sys.platform == 'linux2':
tests.append(TestLinuxAbstractNamespace)
if isTipcAvailable():
tests.append(TIPCTest)
tests.append(TIPCThreadableTest)
thread_info = test_support.threading_setup()
test_support.run_unittest(*tests)
test_support.threading_cleanup(*thread_info)
if __name__ == "__main__":
test_main()
| mit |
tunneln/CarnotKE | jyhton/Lib/test/test_StringIO_jy.py | 9 | 1994 | import unittest
import cStringIO
from test import test_support
class TestUnicodeInput(unittest.TestCase):
def test_differences_handling_unicode(self):
# Test for the "feature" described on #1089.
#
# Basically, StringIO returns unicode objects if you feed it unicode,
# but cStringIO don't. This should change in future versions of
# CPython and Jython.
self.assertEqual(u'foo', cStringIO.StringIO(u'foo').read())
self.assertEqual('foo', cStringIO.StringIO(u'foo').read())
class TestWrite(unittest.TestCase):
def test_write_seek_write(self):
f = cStringIO.StringIO()
f.write('hello')
f.seek(2)
f.write('hi')
self.assertEquals(f.getvalue(), 'hehio')
#XXX: this should get pushed to CPython's test_StringIO
def test_write_past_end(self):
f = cStringIO.StringIO()
f.write("abcdef")
f.seek(10)
f.write("uvwxyz")
self.assertEqual(f.getvalue(), 'abcdef\x00\x00\x00\x00uvwxyz')
def test_write_seek_back_then_write(self):
# http://bugs.jython.org/issue2324
s = "abcdef"
for i in xrange(len(s)):
f = cStringIO.StringIO()
f.write(s)
f.seek(i)
f.write("x" * 47)
self.assertEqual(f.getvalue(), s[:i] + ("x" * 47))
class TestGetValueAfterClose(unittest.TestCase):
# This test, or something like it, should be really be pushed upstream
def test_getvalue_after_close(self):
f = cStringIO.StringIO('hello')
f.getvalue()
f.close()
try:
f.getvalue()
except ValueError:
pass
else:
self.fail("cStringIO.StringIO: getvalue() after close() should have raised ValueError")
def test_main():
test_support.run_unittest(TestUnicodeInput)
test_support.run_unittest(TestWrite)
test_support.run_unittest(TestGetValueAfterClose)
if __name__ == '__main__':
test_main()
| apache-2.0 |
jpwhite3/python-whirlwind-tour | examples/lab4.py | 1 | 1539 | from __future__ import print_function
import sys
import re
import glob
import argparse
def eprint(*args, **kwargs):
# Print to STDERR instead of STDOUT
print(*args, file=sys.stderr, **kwargs)
def grep(expression, filepath, ignorecase=False, invert=False):
raw_expression = re.escape(expression)
with open(filepath) as file:
for line in file:
# Enable case matching?
if ignorecase:
matches = re.search(raw_expression, line, re.I)
else:
matches = re.search(raw_expression, line)
# Invert matches if need be and print
if matches and not invert:
print(line)
elif invert and not matches:
print(line)
def main():
parser = argparse.ArgumentParser(description='This is a pure Python based clone of the GREP command')
parser.add_argument('expression', action="store", type=str, help="Regular expression to match against")
parser.add_argument('filepath', action="store", type=str, help="Path to file to search in. supports wildcard globs")
parser.add_argument('-i', action="store_true", default=False, dest="ignorecase", help="Ignore case")
parser.add_argument('-v', action="store_true", default=False, dest="invert", help="Show lines that don't match")
args = parser.parse_args()
file_list = glob.glob(args.filepath)
for f in file_list:
if len(file_list) > 1:
eprint("\nResults for file: %s" % f)
eprint("-"*(len(f)+18))
grep(args.expression, f, ignorecase=args.ignorecase, invert=args.invert)
if __name__ == '__main__':
main()
| cc0-1.0 |
Nexenta/cinder | cinder/tests/unit/volume/drivers/emc/vnx/test_taskflows.py | 5 | 6841 | # Copyright (c) 2016 EMC Corporation, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import taskflow.engines
from taskflow.patterns import linear_flow
from taskflow.types import failure
from cinder import test
from cinder.tests.unit.volume.drivers.emc.vnx import fake_exception as vnx_ex
from cinder.tests.unit.volume.drivers.emc.vnx import res_mock
import cinder.volume.drivers.emc.vnx.taskflows as vnx_taskflow
class TestTaskflow(test.TestCase):
def setUp(self):
super(TestTaskflow, self).setUp()
self.work_flow = linear_flow.Flow('test_task')
@res_mock.patch_client
def test_copy_snapshot_task(self, client, mocked):
store_spec = {'client': client,
'snap_name': 'original_name',
'new_snap_name': 'new_name'
}
self.work_flow.add(vnx_taskflow.CopySnapshotTask())
engine = taskflow.engines.load(self.work_flow,
store=store_spec)
engine.run()
@res_mock.patch_client
def test_copy_snapshot_task_revert(self, client, mocked):
store_spec = {'client': client,
'snap_name': 'original_name',
'new_snap_name': 'new_name'
}
self.work_flow.add(vnx_taskflow.CopySnapshotTask())
engine = taskflow.engines.load(self.work_flow,
store=store_spec)
self.assertRaises(vnx_ex.VNXSnapError,
engine.run)
@res_mock.patch_client
def test_create_smp_task(self, client, mocked):
store_spec = {
'client': client,
'smp_name': 'mount_point_name',
'base_lun_name': 'base_name'
}
self.work_flow.add(vnx_taskflow.CreateSMPTask())
engine = taskflow.engines.load(self.work_flow,
store=store_spec)
engine.run()
smp_id = engine.storage.fetch('smp_id')
self.assertEqual(15, smp_id)
@res_mock.patch_client
def test_create_smp_task_revert(self, client, mocked):
store_spec = {
'client': client,
'smp_name': 'mount_point_name',
'base_lun_name': 'base_name'
}
self.work_flow.add(vnx_taskflow.CreateSMPTask())
engine = taskflow.engines.load(self.work_flow,
store=store_spec)
self.assertRaises(vnx_ex.VNXCreateLunError,
engine.run)
smp_id = engine.storage.fetch('smp_id')
self.assertIsInstance(smp_id, failure.Failure)
@res_mock.patch_client
def test_attach_snap_task(self, client, mocked):
store_spec = {
'client': client,
'smp_name': 'mount_point_name',
'snap_name': 'snap_name'
}
self.work_flow.add(vnx_taskflow.AttachSnapTask())
engine = taskflow.engines.load(self.work_flow,
store=store_spec)
engine.run()
@res_mock.patch_client
def test_attach_snap_task_revert(self, client, mocked):
store_spec = {
'client': client,
'smp_name': 'mount_point_name',
'snap_name': 'snap_name'
}
self.work_flow.add(vnx_taskflow.AttachSnapTask())
engine = taskflow.engines.load(self.work_flow,
store=store_spec)
self.assertRaises(vnx_ex.VNXAttachSnapError,
engine.run)
@res_mock.patch_client
def test_create_snapshot_task(self, client, mocked):
store_spec = {
'client': client,
'lun_id': 12,
'snap_name': 'snap_name'
}
self.work_flow.add(vnx_taskflow.CreateSnapshotTask())
engine = taskflow.engines.load(self.work_flow,
store=store_spec)
engine.run()
@res_mock.patch_client
def test_create_snapshot_task_revert(self, client, mocked):
store_spec = {
'client': client,
'lun_id': 13,
'snap_name': 'snap_name'
}
self.work_flow.add(vnx_taskflow.CreateSnapshotTask())
engine = taskflow.engines.load(self.work_flow,
store=store_spec)
self.assertRaises(vnx_ex.VNXCreateSnapError,
engine.run)
@res_mock.patch_client
def test_allow_read_write_task(self, client, mocked):
store_spec = {
'client': client,
'snap_name': 'snap_name'
}
self.work_flow.add(vnx_taskflow.AllowReadWriteTask())
engine = taskflow.engines.load(self.work_flow,
store=store_spec)
engine.run()
@res_mock.patch_client
def test_allow_read_write_task_revert(self, client, mocked):
store_spec = {
'client': client,
'snap_name': 'snap_name'
}
self.work_flow.add(vnx_taskflow.AllowReadWriteTask())
engine = taskflow.engines.load(self.work_flow,
store=store_spec)
self.assertRaises(vnx_ex.VNXSnapError,
engine.run)
@res_mock.patch_client
def test_create_cg_snapshot_task(self, client, mocked):
store_spec = {
'client': client,
'cg_name': 'test_cg',
'cg_snap_name': 'my_snap_name'
}
self.work_flow.add(vnx_taskflow.CreateCGSnapshotTask())
engine = taskflow.engines.load(self.work_flow,
store=store_spec)
engine.run()
snap_name = engine.storage.fetch('new_cg_snap_name')
self.assertIsInstance(snap_name, res_mock.StorageObjectMock)
@res_mock.patch_client
def test_create_cg_snapshot_task_revert(self, client, mocked):
store_spec = {
'client': client,
'cg_name': 'test_cg',
'cg_snap_name': 'my_snap_name'
}
self.work_flow.add(vnx_taskflow.CreateCGSnapshotTask())
engine = taskflow.engines.load(self.work_flow,
store=store_spec)
self.assertRaises(vnx_ex.VNXCreateSnapError,
engine.run)
| apache-2.0 |
lifeinoppo/littlefishlet-scode | RES/REF/python_sourcecode/ipython-master/IPython/utils/process.py | 17 | 2937 | # encoding: utf-8
"""
Utilities for working with external processes.
"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from __future__ import print_function
import os
import sys
if sys.platform == 'win32':
from ._process_win32 import system, getoutput, arg_split, check_pid
elif sys.platform == 'cli':
from ._process_cli import system, getoutput, arg_split, check_pid
else:
from ._process_posix import system, getoutput, arg_split, check_pid
from ._process_common import getoutputerror, get_output_error_code, process_handler
from . import py3compat
class FindCmdError(Exception):
pass
def find_cmd(cmd):
"""Find absolute path to executable cmd in a cross platform manner.
This function tries to determine the full path to a command line program
using `which` on Unix/Linux/OS X and `win32api` on Windows. Most of the
time it will use the version that is first on the users `PATH`.
Warning, don't use this to find IPython command line programs as there
is a risk you will find the wrong one. Instead find those using the
following code and looking for the application itself::
from IPython.utils.path import get_ipython_module_path
from IPython.utils.process import pycmd2argv
argv = pycmd2argv(get_ipython_module_path('IPython.terminal.ipapp'))
Parameters
----------
cmd : str
The command line program to look for.
"""
path = py3compat.which(cmd)
if path is None:
raise FindCmdError('command could not be found: %s' % cmd)
return path
def is_cmd_found(cmd):
"""Check whether executable `cmd` exists or not and return a bool."""
try:
find_cmd(cmd)
return True
except FindCmdError:
return False
def pycmd2argv(cmd):
r"""Take the path of a python command and return a list (argv-style).
This only works on Python based command line programs and will find the
location of the ``python`` executable using ``sys.executable`` to make
sure the right version is used.
For a given path ``cmd``, this returns [cmd] if cmd's extension is .exe,
.com or .bat, and [, cmd] otherwise.
Parameters
----------
cmd : string
The path of the command.
Returns
-------
argv-style list.
"""
ext = os.path.splitext(cmd)[1]
if ext in ['.exe', '.com', '.bat']:
return [cmd]
else:
return [sys.executable, cmd]
def abbrev_cwd():
""" Return abbreviated version of cwd, e.g. d:mydir """
cwd = py3compat.getcwd().replace('\\','/')
drivepart = ''
tail = cwd
if sys.platform == 'win32':
if len(cwd) < 4:
return cwd
drivepart,tail = os.path.splitdrive(cwd)
parts = tail.split('/')
if len(parts) > 2:
tail = '/'.join(parts[-2:])
return (drivepart + (
cwd == '/' and '/' or tail))
| gpl-2.0 |
galfaroi/trading-with-python | lib/extra.py | 77 | 2540 | '''
Created on Apr 28, 2013
Copyright: Jev Kuznetsov
License: BSD
'''
from __future__ import print_function
import sys
import urllib
import os
import xlrd # module for excel file reading
import pandas as pd
class ProgressBar:
def __init__(self, iterations):
self.iterations = iterations
self.prog_bar = '[]'
self.fill_char = '*'
self.width = 50
self.__update_amount(0)
def animate(self, iteration):
print('\r', self, end='')
sys.stdout.flush()
self.update_iteration(iteration + 1)
def update_iteration(self, elapsed_iter):
self.__update_amount((elapsed_iter / float(self.iterations)) * 100.0)
self.prog_bar += ' %d of %s complete' % (elapsed_iter, self.iterations)
def __update_amount(self, new_amount):
percent_done = int(round((new_amount / 100.0) * 100.0))
all_full = self.width - 2
num_hashes = int(round((percent_done / 100.0) * all_full))
self.prog_bar = '[' + self.fill_char * num_hashes + ' ' * (all_full - num_hashes) + ']'
pct_place = (len(self.prog_bar) // 2) - len(str(percent_done))
pct_string = '%d%%' % percent_done
self.prog_bar = self.prog_bar[0:pct_place] + \
(pct_string + self.prog_bar[pct_place + len(pct_string):])
def __str__(self):
return str(self.prog_bar)
def getSpyHoldings(dataDir):
''' get SPY holdings from the net, uses temp data storage to save xls file '''
dest = os.path.join(dataDir,"spy_holdings.xls")
if os.path.exists(dest):
print('File found, skipping download')
else:
print('saving to', dest)
urllib.urlretrieve ("https://www.spdrs.com/site-content/xls/SPY_All_Holdings.xls?fund=SPY&docname=All+Holdings&onyx_code1=1286&onyx_code2=1700",
dest) # download xls file and save it to data directory
# parse
wb = xlrd.open_workbook(dest) # open xls file, create a workbook
sh = wb.sheet_by_index(0) # select first sheet
data = {'name':[], 'symbol':[], 'weight':[],'sector':[]}
for rowNr in range(5,505): # cycle through the rows
v = sh.row_values(rowNr) # get all row values
data['name'].append(v[0])
data['symbol'].append(v[1]) # symbol is in the second column, append it to the list
data['weight'].append(float(v[2]))
data['sector'].append(v[3])
return pd.DataFrame(data)
| bsd-3-clause |
ayoubg/gem5-graphics | gem5-gpu/tests/quick/se_gpu/10.backprop/test.py | 1 | 1654 | # Copyright (c) 2006 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Joel Hestness
options.clusters = 4
options.cmd = 'gem5_gpu_backprop'
options.options = '256'
| bsd-3-clause |
bcarr092/pyCovertAudio | src/pyCovertAudio/BFSKModulator.py | 1 | 2146 | from pyCovertAudio_lib import *
from BaseModulator import BaseModulator
from SignalFunctions import SignalFunctions
class BFSKModulator(BaseModulator):
def __init__(
self, bitsPerSymbol, sampleRate, samplesPerSymbol,
symbolExpansionFactor, separationIntervals, configuration
):
BaseModulator.__init__(
self,
bitsPerSymbol,
sampleRate,
samplesPerSymbol,
symbolExpansionFactor,
separationIntervals,
configuration
)
(
self.symbol0Frequency,
self.symbol1Frequency,
self.deltaFrequency,
self.bandwidth
) = \
python_BFSK_determine_frequencies(
self.samplesPerSymbol,
self.sampleRate,
self.carrierFrequency,
self.separationIntervals
)
def modulate(self, symbolSequence, signal, sentinel=None):
symbolSignalLength = self.samplesPerSymbol * self.symbolExpansionFactor
for symbol in symbolSequence:
symbolFrequency = self.carrierFrequency
if(symbol == 1):
symbolFrequency += self.symbol1Frequency
else:
symbolFrequency += self.symbol0Frequency
x = \
SignalFunctions.modulateFSK(
symbolSignalLength, self.sampleRate, [symbolFrequency]
)
signal.extend(x[: self.samplesPerSymbol])
signal.extend(
[0.0 for i in range(
(self.symbolExpansionFactor - 1) * self.samplesPerSymbol)]
)
def toString(self):
return (
"Modulator:\n\tAlgorithm:\t\t\tBFSK\n\tSymbol 0 frequency:\t\t"
"%.02f\n\tSymbol 1 frequency:\t\t%.02f\n\tMin frequency"
" separation:\t%.02f\n\tBandwidth:\t\t\t%.02f\n%s"
% (
self.symbol0Frequency,
self.symbol1Frequency,
self.deltaFrequency,
self.bandwidth,
BaseModulator.toString(self)
)
)
| apache-2.0 |
Khan/git-bigfile | vendor/boto/datapipeline/exceptions.py | 235 | 1471 | # Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from boto.exception import JSONResponseError
class PipelineDeletedException(JSONResponseError):
pass
class InvalidRequestException(JSONResponseError):
pass
class TaskNotFoundException(JSONResponseError):
pass
class PipelineNotFoundException(JSONResponseError):
pass
class InternalServiceError(JSONResponseError):
pass
| mit |
40123112/w17exam | static/Brython3.1.3-20150514-095342/Lib/unittest/loader.py | 739 | 13883 | """Loading unittests."""
import os
import re
import sys
import traceback
import types
import functools
from fnmatch import fnmatch
from . import case, suite, util
__unittest = True
# what about .pyc or .pyo (etc)
# we would need to avoid loading the same tests multiple times
# from '.py', '.pyc' *and* '.pyo'
VALID_MODULE_NAME = re.compile(r'[_a-z]\w*\.py$', re.IGNORECASE)
def _make_failed_import_test(name, suiteClass):
message = 'Failed to import test module: %s\n%s' % (name, traceback.format_exc())
return _make_failed_test('ModuleImportFailure', name, ImportError(message),
suiteClass)
def _make_failed_load_tests(name, exception, suiteClass):
return _make_failed_test('LoadTestsFailure', name, exception, suiteClass)
def _make_failed_test(classname, methodname, exception, suiteClass):
def testFailure(self):
raise exception
attrs = {methodname: testFailure}
TestClass = type(classname, (case.TestCase,), attrs)
return suiteClass((TestClass(methodname),))
def _jython_aware_splitext(path):
if path.lower().endswith('$py.class'):
return path[:-9]
return os.path.splitext(path)[0]
class TestLoader(object):
"""
This class is responsible for loading tests according to various criteria
and returning them wrapped in a TestSuite
"""
testMethodPrefix = 'test'
sortTestMethodsUsing = staticmethod(util.three_way_cmp)
suiteClass = suite.TestSuite
_top_level_dir = None
def loadTestsFromTestCase(self, testCaseClass):
"""Return a suite of all tests cases contained in testCaseClass"""
if issubclass(testCaseClass, suite.TestSuite):
raise TypeError("Test cases should not be derived from TestSuite." \
" Maybe you meant to derive from TestCase?")
testCaseNames = self.getTestCaseNames(testCaseClass)
if not testCaseNames and hasattr(testCaseClass, 'runTest'):
testCaseNames = ['runTest']
loaded_suite = self.suiteClass(map(testCaseClass, testCaseNames))
return loaded_suite
def loadTestsFromModule(self, module, use_load_tests=True):
"""Return a suite of all tests cases contained in the given module"""
tests = []
for name in dir(module):
obj = getattr(module, name)
if isinstance(obj, type) and issubclass(obj, case.TestCase):
tests.append(self.loadTestsFromTestCase(obj))
load_tests = getattr(module, 'load_tests', None)
tests = self.suiteClass(tests)
if use_load_tests and load_tests is not None:
try:
return load_tests(self, tests, None)
except Exception as e:
return _make_failed_load_tests(module.__name__, e,
self.suiteClass)
return tests
def loadTestsFromName(self, name, module=None):
"""Return a suite of all tests cases given a string specifier.
The name may resolve either to a module, a test case class, a
test method within a test case class, or a callable object which
returns a TestCase or TestSuite instance.
The method optionally resolves the names relative to a given module.
"""
parts = name.split('.')
if module is None:
parts_copy = parts[:]
while parts_copy:
try:
module = __import__('.'.join(parts_copy))
break
except ImportError:
del parts_copy[-1]
if not parts_copy:
raise
parts = parts[1:]
obj = module
for part in parts:
parent, obj = obj, getattr(obj, part)
if isinstance(obj, types.ModuleType):
return self.loadTestsFromModule(obj)
elif isinstance(obj, type) and issubclass(obj, case.TestCase):
return self.loadTestsFromTestCase(obj)
elif (isinstance(obj, types.FunctionType) and
isinstance(parent, type) and
issubclass(parent, case.TestCase)):
name = parts[-1]
inst = parent(name)
# static methods follow a different path
if not isinstance(getattr(inst, name), types.FunctionType):
return self.suiteClass([inst])
elif isinstance(obj, suite.TestSuite):
return obj
if callable(obj):
test = obj()
if isinstance(test, suite.TestSuite):
return test
elif isinstance(test, case.TestCase):
return self.suiteClass([test])
else:
raise TypeError("calling %s returned %s, not a test" %
(obj, test))
else:
raise TypeError("don't know how to make test from: %s" % obj)
def loadTestsFromNames(self, names, module=None):
"""Return a suite of all tests cases found using the given sequence
of string specifiers. See 'loadTestsFromName()'.
"""
suites = [self.loadTestsFromName(name, module) for name in names]
return self.suiteClass(suites)
def getTestCaseNames(self, testCaseClass):
"""Return a sorted sequence of method names found within testCaseClass
"""
def isTestMethod(attrname, testCaseClass=testCaseClass,
prefix=self.testMethodPrefix):
return attrname.startswith(prefix) and \
callable(getattr(testCaseClass, attrname))
testFnNames = list(filter(isTestMethod, dir(testCaseClass)))
if self.sortTestMethodsUsing:
testFnNames.sort(key=functools.cmp_to_key(self.sortTestMethodsUsing))
return testFnNames
def discover(self, start_dir, pattern='test*.py', top_level_dir=None):
"""Find and return all test modules from the specified start
directory, recursing into subdirectories to find them and return all
tests found within them. Only test files that match the pattern will
be loaded. (Using shell style pattern matching.)
All test modules must be importable from the top level of the project.
If the start directory is not the top level directory then the top
level directory must be specified separately.
If a test package name (directory with '__init__.py') matches the
pattern then the package will be checked for a 'load_tests' function. If
this exists then it will be called with loader, tests, pattern.
If load_tests exists then discovery does *not* recurse into the package,
load_tests is responsible for loading all tests in the package.
The pattern is deliberately not stored as a loader attribute so that
packages can continue discovery themselves. top_level_dir is stored so
load_tests does not need to pass this argument in to loader.discover().
"""
set_implicit_top = False
if top_level_dir is None and self._top_level_dir is not None:
# make top_level_dir optional if called from load_tests in a package
top_level_dir = self._top_level_dir
elif top_level_dir is None:
set_implicit_top = True
top_level_dir = start_dir
top_level_dir = os.path.abspath(top_level_dir)
if not top_level_dir in sys.path:
# all test modules must be importable from the top level directory
# should we *unconditionally* put the start directory in first
# in sys.path to minimise likelihood of conflicts between installed
# modules and development versions?
sys.path.insert(0, top_level_dir)
self._top_level_dir = top_level_dir
is_not_importable = False
if os.path.isdir(os.path.abspath(start_dir)):
start_dir = os.path.abspath(start_dir)
if start_dir != top_level_dir:
is_not_importable = not os.path.isfile(os.path.join(start_dir, '__init__.py'))
else:
# support for discovery from dotted module names
try:
__import__(start_dir)
except ImportError:
is_not_importable = True
else:
the_module = sys.modules[start_dir]
top_part = start_dir.split('.')[0]
start_dir = os.path.abspath(os.path.dirname((the_module.__file__)))
if set_implicit_top:
self._top_level_dir = self._get_directory_containing_module(top_part)
sys.path.remove(top_level_dir)
if is_not_importable:
raise ImportError('Start directory is not importable: %r' % start_dir)
tests = list(self._find_tests(start_dir, pattern))
return self.suiteClass(tests)
def _get_directory_containing_module(self, module_name):
module = sys.modules[module_name]
full_path = os.path.abspath(module.__file__)
if os.path.basename(full_path).lower().startswith('__init__.py'):
return os.path.dirname(os.path.dirname(full_path))
else:
# here we have been given a module rather than a package - so
# all we can do is search the *same* directory the module is in
# should an exception be raised instead
return os.path.dirname(full_path)
def _get_name_from_path(self, path):
path = _jython_aware_splitext(os.path.normpath(path))
_relpath = os.path.relpath(path, self._top_level_dir)
assert not os.path.isabs(_relpath), "Path must be within the project"
assert not _relpath.startswith('..'), "Path must be within the project"
name = _relpath.replace(os.path.sep, '.')
return name
def _get_module_from_name(self, name):
__import__(name)
return sys.modules[name]
def _match_path(self, path, full_path, pattern):
# override this method to use alternative matching strategy
return fnmatch(path, pattern)
def _find_tests(self, start_dir, pattern):
"""Used by discovery. Yields test suites it loads."""
paths = os.listdir(start_dir)
for path in paths:
full_path = os.path.join(start_dir, path)
if os.path.isfile(full_path):
if not VALID_MODULE_NAME.match(path):
# valid Python identifiers only
continue
if not self._match_path(path, full_path, pattern):
continue
# if the test file matches, load it
name = self._get_name_from_path(full_path)
try:
module = self._get_module_from_name(name)
except:
yield _make_failed_import_test(name, self.suiteClass)
else:
mod_file = os.path.abspath(getattr(module, '__file__', full_path))
realpath = _jython_aware_splitext(os.path.realpath(mod_file))
fullpath_noext = _jython_aware_splitext(os.path.realpath(full_path))
if realpath.lower() != fullpath_noext.lower():
module_dir = os.path.dirname(realpath)
mod_name = _jython_aware_splitext(os.path.basename(full_path))
expected_dir = os.path.dirname(full_path)
msg = ("%r module incorrectly imported from %r. Expected %r. "
"Is this module globally installed?")
raise ImportError(msg % (mod_name, module_dir, expected_dir))
yield self.loadTestsFromModule(module)
elif os.path.isdir(full_path):
if not os.path.isfile(os.path.join(full_path, '__init__.py')):
continue
load_tests = None
tests = None
if fnmatch(path, pattern):
# only check load_tests if the package directory itself matches the filter
name = self._get_name_from_path(full_path)
package = self._get_module_from_name(name)
load_tests = getattr(package, 'load_tests', None)
tests = self.loadTestsFromModule(package, use_load_tests=False)
if load_tests is None:
if tests is not None:
# tests loaded from package file
yield tests
# recurse into the package
for test in self._find_tests(full_path, pattern):
yield test
else:
try:
yield load_tests(self, tests, pattern)
except Exception as e:
yield _make_failed_load_tests(package.__name__, e,
self.suiteClass)
defaultTestLoader = TestLoader()
def _makeLoader(prefix, sortUsing, suiteClass=None):
loader = TestLoader()
loader.sortTestMethodsUsing = sortUsing
loader.testMethodPrefix = prefix
if suiteClass:
loader.suiteClass = suiteClass
return loader
def getTestCaseNames(testCaseClass, prefix, sortUsing=util.three_way_cmp):
return _makeLoader(prefix, sortUsing).getTestCaseNames(testCaseClass)
def makeSuite(testCaseClass, prefix='test', sortUsing=util.three_way_cmp,
suiteClass=suite.TestSuite):
return _makeLoader(prefix, sortUsing, suiteClass).loadTestsFromTestCase(
testCaseClass)
def findTestCases(module, prefix='test', sortUsing=util.three_way_cmp,
suiteClass=suite.TestSuite):
return _makeLoader(prefix, sortUsing, suiteClass).loadTestsFromModule(\
module)
| agpl-3.0 |
sebrandon1/nova | nova/virt/libvirt/designer.py | 5 | 5322 | # Copyright (C) 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Policy based configuration of libvirt objects
This module provides helper APIs for populating the config.py
classes based on common operational needs / policies
"""
import six
from nova.pci import utils as pci_utils
def set_vif_guest_frontend_config(conf, mac, model, driver, queues=None):
"""Populate a LibvirtConfigGuestInterface instance
with guest frontend details.
"""
conf.mac_addr = mac
if model is not None:
conf.model = model
if driver is not None:
conf.driver_name = driver
if queues is not None:
conf.vhost_queues = queues
def set_vif_host_backend_bridge_config(conf, brname, tapname=None):
"""Populate a LibvirtConfigGuestInterface instance
with host backend details for a software bridge.
"""
conf.net_type = "bridge"
conf.source_dev = brname
if tapname:
conf.target_dev = tapname
def set_vif_host_backend_ethernet_config(conf, tapname):
"""Populate a LibvirtConfigGuestInterface instance
with host backend details for an externally configured
host device.
NB use of this configuration is discouraged by
libvirt project and will mark domains as 'tainted'.
"""
conf.net_type = "ethernet"
conf.target_dev = tapname
conf.script = ""
def set_vif_host_backend_802qbg_config(conf, devname, managerid,
typeid, typeidversion,
instanceid, tapname=None):
"""Populate a LibvirtConfigGuestInterface instance
with host backend details for an 802.1qbg device.
"""
conf.net_type = "direct"
conf.source_dev = devname
conf.source_mode = "vepa"
conf.vporttype = "802.1Qbg"
conf.add_vport_param("managerid", managerid)
conf.add_vport_param("typeid", typeid)
conf.add_vport_param("typeidversion", typeidversion)
conf.add_vport_param("instanceid", instanceid)
if tapname:
conf.target_dev = tapname
def set_vif_host_backend_802qbh_config(conf, net_type, devname, profileid,
tapname=None):
"""Populate a LibvirtConfigGuestInterface instance
with host backend details for an 802.1qbh device.
"""
conf.net_type = net_type
if net_type == 'direct':
conf.source_mode = 'passthrough'
conf.source_dev = pci_utils.get_ifname_by_pci_address(devname)
conf.driver_name = 'vhost'
else:
conf.source_dev = devname
conf.model = None
conf.vporttype = "802.1Qbh"
conf.add_vport_param("profileid", profileid)
if tapname:
conf.target_dev = tapname
def set_vif_host_backend_hw_veb(conf, net_type, devname, vlan,
tapname=None):
"""Populate a LibvirtConfigGuestInterface instance
with host backend details for an device that supports hardware
virtual ethernet bridge.
"""
conf.net_type = net_type
if net_type == 'direct':
conf.source_mode = 'passthrough'
conf.source_dev = pci_utils.get_ifname_by_pci_address(devname)
conf.driver_name = 'vhost'
else:
conf.source_dev = devname
conf.model = None
conf.vlan = vlan
if tapname:
conf.target_dev = tapname
def set_vif_host_backend_hostdev_pci_config(conf, pci_slot):
"""Populate a LibvirtConfigGuestHostdev instance with pci address data."""
conf.domain, conf.bus, conf.slot, conf.function = (
pci_utils.get_pci_address_fields(pci_slot))
def set_vif_host_backend_direct_config(conf, devname, mode="passthrough"):
"""Populate a LibvirtConfigGuestInterface instance
with direct Interface.
"""
conf.net_type = "direct"
conf.source_mode = mode
conf.source_dev = devname
conf.model = "virtio"
def set_vif_host_backend_vhostuser_config(conf, mode, path):
"""Populate a LibvirtConfigGuestInterface instance
with host backend details for vhostuser socket.
"""
conf.net_type = "vhostuser"
conf.vhostuser_type = "unix"
conf.vhostuser_mode = mode
conf.vhostuser_path = path
def set_vif_bandwidth_config(conf, inst_type):
"""Config vif inbound/outbound bandwidth limit. parameters are
set in instance_type_extra_specs table, key is in the format
quota:vif_inbound_average.
"""
bandwidth_items = ['vif_inbound_average', 'vif_inbound_peak',
'vif_inbound_burst', 'vif_outbound_average', 'vif_outbound_peak',
'vif_outbound_burst']
for key, value in six.iteritems(inst_type.get('extra_specs', {})):
scope = key.split(':')
if len(scope) > 1 and scope[0] == 'quota':
if scope[1] in bandwidth_items:
setattr(conf, scope[1], value)
| apache-2.0 |
nirmeshk/oh-mainline | vendor/packages/html5lib/html5lib/tests/test_serializer.py | 72 | 7831 | import os
import unittest
from support import html5lib_test_files
try:
import json
except ImportError:
import simplejson as json
import html5lib
from html5lib import html5parser, serializer, constants
from html5lib.treewalkers._base import TreeWalker
optionals_loaded = []
try:
from lxml import etree
optionals_loaded.append("lxml")
except ImportError:
pass
default_namespace = constants.namespaces["html"]
class JsonWalker(TreeWalker):
def __iter__(self):
for token in self.tree:
type = token[0]
if type == "StartTag":
if len(token) == 4:
namespace, name, attrib = token[1:4]
else:
namespace = default_namespace
name, attrib = token[1:3]
yield self.startTag(namespace, name, self._convertAttrib(attrib))
elif type == "EndTag":
if len(token) == 3:
namespace, name = token[1:3]
else:
namespace = default_namespace
name = token[1]
yield self.endTag(namespace, name)
elif type == "EmptyTag":
if len(token) == 4:
namespace, name, attrib = token[1:]
else:
namespace = default_namespace
name, attrib = token[1:]
for token in self.emptyTag(namespace, name, self._convertAttrib(attrib)):
yield token
elif type == "Comment":
yield self.comment(token[1])
elif type in ("Characters", "SpaceCharacters"):
for token in self.text(token[1]):
yield token
elif type == "Doctype":
if len(token) == 4:
yield self.doctype(token[1], token[2], token[3])
elif len(token) == 3:
yield self.doctype(token[1], token[2])
else:
yield self.doctype(token[1])
else:
raise ValueError("Unknown token type: " + type)
def _convertAttrib(self, attribs):
"""html5lib tree-walkers use a dict of (namespace, name): value for
attributes, but JSON cannot represent this. Convert from the format
in the serializer tests (a list of dicts with "namespace", "name",
and "value" as keys) to html5lib's tree-walker format."""
attrs = {}
for attrib in attribs:
name = (attrib["namespace"], attrib["name"])
assert(name not in attrs)
attrs[name] = attrib["value"]
return attrs
def serialize_html(input, options):
options = dict([(str(k),v) for k,v in options.iteritems()])
return serializer.HTMLSerializer(**options).render(JsonWalker(input),options.get("encoding",None))
def serialize_xhtml(input, options):
options = dict([(str(k),v) for k,v in options.iteritems()])
return serializer.XHTMLSerializer(**options).render(JsonWalker(input),options.get("encoding",None))
def make_test(input, expected, xhtml, options):
result = serialize_html(input, options)
if len(expected) == 1:
assert expected[0] == result, "Expected:\n%s\nActual:\n%s\nOptions\nxhtml:False\n%s"%(expected[0], result, str(options))
elif result not in expected:
assert False, "Expected: %s, Received: %s" % (expected, result)
if not xhtml:
return
result = serialize_xhtml(input, options)
if len(xhtml) == 1:
assert xhtml[0] == result, "Expected:\n%s\nActual:\n%s\nOptions\nxhtml:True\n%s"%(xhtml[0], result, str(options))
elif result not in xhtml:
assert False, "Expected: %s, Received: %s" % (xhtml, result)
class EncodingTestCase(unittest.TestCase):
def throwsWithLatin1(self, input):
self.assertRaises(UnicodeEncodeError, serialize_html, input, {"encoding": "iso-8859-1"})
def testDoctypeName(self):
self.throwsWithLatin1([["Doctype", u"\u0101"]])
def testDoctypePublicId(self):
self.throwsWithLatin1([["Doctype", u"potato", u"\u0101"]])
def testDoctypeSystemId(self):
self.throwsWithLatin1([["Doctype", u"potato", u"potato", u"\u0101"]])
def testCdataCharacters(self):
self.assertEquals("<style>ā", serialize_html([["StartTag", "http://www.w3.org/1999/xhtml", "style", {}],
["Characters", u"\u0101"]],
{"encoding": "iso-8859-1"}))
def testCharacters(self):
self.assertEquals("ā", serialize_html([["Characters", u"\u0101"]],
{"encoding": "iso-8859-1"}))
def testStartTagName(self):
self.throwsWithLatin1([["StartTag", u"http://www.w3.org/1999/xhtml", u"\u0101", []]])
def testEmptyTagName(self):
self.throwsWithLatin1([["EmptyTag", u"http://www.w3.org/1999/xhtml", u"\u0101", []]])
def testAttributeName(self):
self.throwsWithLatin1([["StartTag", u"http://www.w3.org/1999/xhtml", u"span", [{"namespace": None, "name": u"\u0101", "value": u"potato"}]]])
def testAttributeValue(self):
self.assertEquals("<span potato=ā>", serialize_html([["StartTag", u"http://www.w3.org/1999/xhtml", u"span",
[{"namespace": None, "name": u"potato", "value": u"\u0101"}]]],
{"encoding": "iso-8859-1"}))
def testEndTagName(self):
self.throwsWithLatin1([["EndTag", u"http://www.w3.org/1999/xhtml", u"\u0101"]])
def testComment(self):
self.throwsWithLatin1([["Comment", u"\u0101"]])
if "lxml" in optionals_loaded:
class LxmlTestCase(unittest.TestCase):
def setUp(self):
self.parser = etree.XMLParser(resolve_entities=False)
self.treewalker = html5lib.getTreeWalker("lxml")
self.serializer = serializer.HTMLSerializer()
def testEntityReplacement(self):
doc = """<!DOCTYPE html SYSTEM "about:legacy-compat"><html>β</html>"""
tree = etree.fromstring(doc, parser = self.parser).getroottree()
result = serializer.serialize(tree, tree="lxml", omit_optional_tags=False)
self.assertEquals(u"""<!DOCTYPE html SYSTEM "about:legacy-compat"><html>\u03B2</html>""", result)
def testEntityXML(self):
doc = """<!DOCTYPE html SYSTEM "about:legacy-compat"><html>></html>"""
tree = etree.fromstring(doc, parser = self.parser).getroottree()
result = serializer.serialize(tree, tree="lxml", omit_optional_tags=False)
self.assertEquals(u"""<!DOCTYPE html SYSTEM "about:legacy-compat"><html>></html>""", result)
def testEntityNoResolve(self):
doc = """<!DOCTYPE html SYSTEM "about:legacy-compat"><html>β</html>"""
tree = etree.fromstring(doc, parser = self.parser).getroottree()
result = serializer.serialize(tree, tree="lxml", omit_optional_tags=False,
resolve_entities=False)
self.assertEquals(u"""<!DOCTYPE html SYSTEM "about:legacy-compat"><html>β</html>""", result)
def test_serializer():
for filename in html5lib_test_files('serializer', '*.test'):
tests = json.load(file(filename))
test_name = os.path.basename(filename).replace('.test','')
for index, test in enumerate(tests['tests']):
xhtml = test.get("xhtml", test["expected"])
if test_name == 'optionaltags':
xhtml = None
yield make_test, test["input"], test["expected"], xhtml, test.get("options", {})
| agpl-3.0 |
davidwaroquiers/pymatgen | pymatgen/io/wannier90.py | 5 | 6189 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
Modules for working with wannier90 input and output.
"""
from typing import Sequence
import numpy as np
from scipy.io import FortranEOFError, FortranFile
__author__ = "Mark Turiansky"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "[email protected]"
__status__ = "Production"
__date__ = "Jun 04, 2020"
class Unk:
"""
Object representing the data in a UNK file.
.. attribute:: ik
int index of kpoint for this file
.. attribute:: data
numpy.ndarray that contains the wavefunction data for in the UNK file.
The shape should be (nbnd, ngx, ngy, ngz) for regular calculations and
(nbnd, 2, ngx, ngy, ngz) for noncollinear calculations.
.. attribute:: is_noncollinear
bool that specifies if data is from a noncollinear calculation
.. attribute:: nbnd
int number of bands in data
.. attribute:: ng
sequence of three integers that correspond to the grid size of the
given data. The definition is ng = (ngx, ngy, ngz).
"""
ik: int
is_noncollinear: bool
nbnd: int
ng: Sequence[int]
def __init__(self, ik: int, data: np.ndarray) -> None:
"""
Initialize Unk class.
Args:
ik (int): index of the kpoint UNK file is for
data (np.ndarray): data from the UNK file that has shape (nbnd,
ngx, ngy, ngz) or (nbnd, 2, ngx, ngy, ngz) if noncollinear
"""
self.ik = ik
self.data = data
@property
def data(self) -> np.ndarray:
"""
np.ndarray: contains the wavefunction data for in the UNK file.
The shape should be (nbnd, ngx, ngy, ngz) for regular calculations and
(nbnd, 2, ngx, ngy, ngz) for noncollinear calculations.
"""
return self._data
@data.setter
def data(self, value: np.ndarray) -> None:
"""
Sets the value of data.
Args:
value (np.ndarray): data to replace stored data, must haveshape
(nbnd, ngx, ngy, ngz) or (nbnd, 2, ngx, ngy, ngz) if
noncollinear calculation
"""
temp_val = np.array(value, dtype=np.complex128)
if len(temp_val.shape) not in [4, 5]:
raise ValueError(
"invalid data shape, must be (nbnd, ngx, ngy, ngz"
") or (nbnd, 2, ngx, ngy, ngz) for noncollinear "
f"data, given {temp_val.shape}"
)
if len(temp_val.shape) == 5 and temp_val.shape[1] != 2:
raise ValueError(
"invalid noncollinear data, shape should be (nbnd" f", 2, ngx, ngy, ngz), given {temp_val.shape}"
)
self._data = temp_val
# derived properties
self.is_noncollinear = len(self.data.shape) == 5
self.nbnd = self.data.shape[0]
self.ng = self.data.shape[-3:]
@staticmethod
def from_file(filename: str) -> object:
"""
Reads the UNK data from file.
Args:
filename (str): path to UNK file to read
Returns:
Unk object
"""
input_data = []
with FortranFile(filename, "r") as f:
*ng, ik, nbnd = f.read_ints()
for _ in range(nbnd):
input_data.append(
# when reshaping need to specify ordering as fortran
f.read_record(np.complex128).reshape(ng, order="F")
)
try:
for _ in range(nbnd):
input_data.append(f.read_record(np.complex128).reshape(ng, order="F"))
is_noncollinear = True
except FortranEOFError:
is_noncollinear = False
# mypy made me create an extra variable here >:(
data = np.array(input_data, dtype=np.complex128)
# spinors are interwoven, need to separate them
if is_noncollinear:
temp_data = np.empty((nbnd, 2, *ng), dtype=np.complex128)
temp_data[:, 0, :, :, :] = data[::2, :, :, :]
temp_data[:, 1, :, :, :] = data[1::2, :, :, :]
return Unk(ik, temp_data)
return Unk(ik, data)
def write_file(self, filename: str) -> None:
"""
Write the UNK file.
Args:
filename (str): path to UNK file to write, the name should have the
form 'UNKXXXXX.YY' where XXXXX is the kpoint index (Unk.ik) and
YY is 1 or 2 for the spin index or NC if noncollinear
"""
with FortranFile(filename, "w") as f:
f.write_record(np.array([*self.ng, self.ik, self.nbnd], dtype=np.int32))
for ib in range(self.nbnd):
if self.is_noncollinear:
f.write_record(self.data[ib, 0].flatten("F"))
f.write_record(self.data[ib, 1].flatten("F"))
else:
f.write_record(self.data[ib].flatten("F"))
def __repr__(self) -> str:
return (
f"<UNK ik={self.ik} nbnd={self.nbnd} ncl={self.is_noncollinear}"
+ f" ngx={self.ng[0]} ngy={self.ng[1]} ngz={self.ng[2]}>"
)
def __eq__(self, other: object) -> bool:
if not isinstance(other, Unk):
return NotImplemented
if not np.allclose(self.ng, other.ng):
return False
if self.ik != other.ik:
return False
if self.is_noncollinear != other.is_noncollinear:
return False
if self.nbnd != other.nbnd:
return False
for ib in range(self.nbnd):
if self.is_noncollinear:
if not (
np.allclose(self.data[ib, 0], other.data[ib, 0], atol=1e-4)
and np.allclose(self.data[ib, 1], other.data[ib, 1], atol=1e-4)
):
return False
else:
if not np.allclose(self.data[ib], other.data[ib], atol=1e-4):
return False
return True
| mit |
bkj/ernest | enrich/modules/enrich_terminal_nodes.py | 2 | 6773 | #!/usr/bin/env python
'''
Add single neighbor tag for owners and issuers to ownership index;
tag enables hiding terminal nodes in front end
** Note **
This runs prospectively using the --most-recent argument
'''
import argparse
import json
import logging
from elasticsearch import Elasticsearch
from elasticsearch.helpers import parallel_bulk, scan
class ENRICH_TERMINAL_NODES:
def __init__(self, args, parent_logger):
self.args = args
self.logger = logging.getLogger(parent_logger + ".terminal_nodes")
with open(args.config_path, 'r') as inf:
config = json.load(inf)
self.config = config
self.client = Elasticsearch([{
'host': config['es']['host'],
'port': config['es']['port']}
])
self.match_all = {
"query": {
"match_all": {}
}
}
def raw_dict(self, x, dict_type):
if dict_type == 'issuer':
key = x['issuerCik']
val = x['ownerCik']
elif dict_type == 'owner':
key = x['ownerCik']
val = x['issuerCik']
return {
"key": key,
"value": val
}
def build_query(self, val):
val = '__meta__.' + val + '_has_one_neighbor'
query = {
"query": {
"bool": {
"should": [
{
"filtered": {
"filter": {
"missing": {
"field": val
}
}
}
},
{
"match": {
val: True
}
}
],
"minimum_should_match": 1
}
}
}
return query
def get_terminal_nodes(self, search_type):
temp_dict = {}
for a in scan(self.client,
index=self.config['ownership']['index'],
query=self.match_all):
x = self.raw_dict(a['_source'], search_type)
if x["key"] in temp_dict:
if temp_dict[x["key"]]["terminal"] is True:
if x["value"] != temp_dict[x["key"]]["value"]:
temp_dict[x["key"]]["terminal"] = False
else:
pass
else:
pass
else:
temp_dict[x["key"]] = {
"value": x["value"],
"terminal": True
}
return [key for key in temp_dict if temp_dict[key]['terminal'] is True]
def get_update_nodes(self, query_type):
gtn = self.get_terminal_nodes(query_type)
if self.args.from_scratch:
query = self.build_query(query_type)
else:
query = {"query": {
"bool": {
"must_not": {
"match": {
"__meta__." + query_type + "_has_one_neighbor": True
}
},
"must": {
"terms": {
}
}
}
}}
return query, gtn
def main(self, query_type):
actions = []
query, t_nodes = self.get_update_nodes(query_type)
i = 0
tn = [t_nodes[j: j + 1024] for j in range(0, len(t_nodes), 1024)]
for p in tn:
query["query"]["bool"]["must"]["terms"][query_type + "Cik"] = p
for person in scan(self.client,
index=self.config['ownership']['index'],
query=query):
actions.append({
"_op_type": "update",
"_index": self.config['ownership']['index'],
"_id": person['_id'],
"_type": person['_type'],
"doc": {
"__meta__": {
query_type + "_has_one_neighbor": True
}
}
})
i += 1
if i > 500:
for success, info in parallel_bulk(self.client,
actions,
chunk_size=510):
if not success:
self.logger.error('[RESPONSE]|{}'.format(info))
else:
self.logger.info('[RESPONSE]|{}'.format(info))
actions = []
i = 0
for success, info in parallel_bulk(self.client,
actions,
chunk_size=510):
if not success:
self.logger.error('[RESPONSE]|{}'.format(info))
else:
self.logger.info('[RESPONSE]|{}'.format(info))
f_query = {
"query": {
"bool": {
"must_not": {
"terms": {
query_type + 'Cik': t_nodes
}
},
"must": {
"match": {
"__meta__." + query_type + "_has_one_neighbor": True
}
}
}
}
}
for a in scan(self.client,
index=self.config['ownership']['index'],
query=f_query):
a['_source']['__meta__'][query_type + '_has_one_neighbor'] = False
res = self.client.index(
index=self.config['ownership']['index'],
doc_type=a['_type'],
body=a['_source'],
id=a['_id']
)
self.logger.info(res)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='add single neighbor tags')
parser.add_argument('--from-scratch',
dest='from_scratch',
action="store_true")
parser.add_argument('--most-recent',
dest='most_recent',
action="store_true")
parser.add_argument('--config-path',
type=str,
action='store',
default='../config.json')
args = parser.parse_args()
| apache-2.0 |
midgetspy/Sick-Beard | lib/hachoir_core/error.py | 90 | 1350 | """
Functions to display an error (error, warning or information) message.
"""
from lib.hachoir_core.log import log
from lib.hachoir_core.tools import makePrintable
import sys, traceback
def getBacktrace(empty="Empty backtrace."):
"""
Try to get backtrace as string.
Returns "Error while trying to get backtrace" on failure.
"""
try:
info = sys.exc_info()
trace = traceback.format_exception(*info)
sys.exc_clear()
if trace[0] != "None\n":
return "".join(trace)
except:
# No i18n here (imagine if i18n function calls error...)
return "Error while trying to get backtrace"
return empty
class HachoirError(Exception):
"""
Parent of all errors in Hachoir library
"""
def __init__(self, message):
message_bytes = makePrintable(message, "ASCII")
Exception.__init__(self, message_bytes)
self.text = message
def __unicode__(self):
return self.text
# Error classes which may be raised by Hachoir core
# FIXME: Add EnvironmentError (IOError or OSError) and AssertionError?
# FIXME: Remove ArithmeticError and RuntimeError?
HACHOIR_ERRORS = (HachoirError, LookupError, NameError, AttributeError,
TypeError, ValueError, ArithmeticError, RuntimeError)
info = log.info
warning = log.warning
error = log.error
| gpl-3.0 |
lberruti/ansible-modules-extras | notification/irc.py | 41 | 6075 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Jan-Piet Mens <jpmens () gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
DOCUMENTATION = '''
---
module: irc
version_added: "1.2"
short_description: Send a message to an IRC channel
description:
- Send a message to an IRC channel. This is a very simplistic implementation.
options:
server:
description:
- IRC server name/address
required: false
default: localhost
port:
description:
- IRC server port number
required: false
default: 6667
nick:
description:
- Nickname. May be shortened, depending on server's NICKLEN setting.
required: false
default: ansible
msg:
description:
- The message body.
required: true
default: null
color:
description:
- Text color for the message. ("none" is a valid option in 1.6 or later, in 1.6 and prior, the default color is black, not "none").
required: false
default: "none"
choices: [ "none", "yellow", "red", "green", "blue", "black" ]
channel:
description:
- Channel name
required: true
key:
description:
- Channel key
required: false
version_added: 1.7
passwd:
description:
- Server password
required: false
timeout:
description:
- Timeout to use while waiting for successful registration and join
messages, this is to prevent an endless loop
default: 30
version_added: 1.5
use_ssl:
description:
- Designates whether TLS/SSL should be used when connecting to the IRC server
default: False
version_added: 1.8
# informational: requirements for nodes
requirements: [ socket ]
author: Jan-Piet Mens, Matt Martz
'''
EXAMPLES = '''
- irc: server=irc.example.net channel="#t1" msg="Hello world"
- local_action: irc port=6669
channel="#t1"
msg="All finished at {{ ansible_date_time.iso8601 }}"
color=red
nick=ansibleIRC
'''
# ===========================================
# IRC module support methods.
#
import re
import socket
import ssl
from time import sleep
def send_msg(channel, msg, server='localhost', port='6667', key=None,
nick="ansible", color='none', passwd=False, timeout=30, use_ssl=False):
'''send message to IRC'''
colornumbers = {
'black': "01",
'red': "04",
'green': "09",
'yellow': "08",
'blue': "12",
}
try:
colornumber = colornumbers[color]
colortext = "\x03" + colornumber
except:
colortext = ""
message = colortext + msg
irc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if use_ssl:
irc = ssl.wrap_socket(irc)
irc.connect((server, int(port)))
if passwd:
irc.send('PASS %s\r\n' % passwd)
irc.send('NICK %s\r\n' % nick)
irc.send('USER %s %s %s :ansible IRC\r\n' % (nick, nick, nick))
motd = ''
start = time.time()
while 1:
motd += irc.recv(1024)
# The server might send back a shorter nick than we specified (due to NICKLEN),
# so grab that and use it from now on (assuming we find the 00[1-4] response).
match = re.search('^:\S+ 00[1-4] (?P<nick>\S+) :', motd, flags=re.M)
if match:
nick = match.group('nick')
break
elif time.time() - start > timeout:
raise Exception('Timeout waiting for IRC server welcome response')
sleep(0.5)
if key:
irc.send('JOIN %s %s\r\n' % (channel, key))
else:
irc.send('JOIN %s\r\n' % channel)
join = ''
start = time.time()
while 1:
join += irc.recv(1024)
if re.search('^:\S+ 366 %s %s :' % (nick, channel), join, flags=re.M):
break
elif time.time() - start > timeout:
raise Exception('Timeout waiting for IRC JOIN response')
sleep(0.5)
irc.send('PRIVMSG %s :%s\r\n' % (channel, message))
sleep(1)
irc.send('PART %s\r\n' % channel)
irc.send('QUIT\r\n')
sleep(1)
irc.close()
# ===========================================
# Main
#
def main():
module = AnsibleModule(
argument_spec=dict(
server=dict(default='localhost'),
port=dict(default=6667),
nick=dict(default='ansible'),
msg=dict(required=True),
color=dict(default="none", choices=["yellow", "red", "green",
"blue", "black", "none"]),
channel=dict(required=True),
key=dict(),
passwd=dict(),
timeout=dict(type='int', default=30),
use_ssl=dict(type='bool', default=False)
),
supports_check_mode=True
)
server = module.params["server"]
port = module.params["port"]
nick = module.params["nick"]
msg = module.params["msg"]
color = module.params["color"]
channel = module.params["channel"]
key = module.params["key"]
passwd = module.params["passwd"]
timeout = module.params["timeout"]
use_ssl = module.params["use_ssl"]
try:
send_msg(channel, msg, server, port, key, nick, color, passwd, timeout, use_ssl)
except Exception, e:
module.fail_json(msg="unable to send to IRC: %s" % e)
module.exit_json(changed=False, channel=channel, nick=nick,
msg=msg)
# import module snippets
from ansible.module_utils.basic import *
main()
| gpl-3.0 |
bev-a-tron/pledgeservice | testlib/waitress/trigger.py | 31 | 7964 | ##############################################################################
#
# Copyright (c) 2001-2005 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
import asyncore
import os
import socket
import errno
from waitress.compat import thread
# Wake up a call to select() running in the main thread.
#
# This is useful in a context where you are using Medusa's I/O
# subsystem to deliver data, but the data is generated by another
# thread. Normally, if Medusa is in the middle of a call to
# select(), new output data generated by another thread will have
# to sit until the call to select() either times out or returns.
# If the trigger is 'pulled' by another thread, it should immediately
# generate a READ event on the trigger object, which will force the
# select() invocation to return.
#
# A common use for this facility: letting Medusa manage I/O for a
# large number of connections; but routing each request through a
# thread chosen from a fixed-size thread pool. When a thread is
# acquired, a transaction is performed, but output data is
# accumulated into buffers that will be emptied more efficiently
# by Medusa. [picture a server that can process database queries
# rapidly, but doesn't want to tie up threads waiting to send data
# to low-bandwidth connections]
#
# The other major feature provided by this class is the ability to
# move work back into the main thread: if you call pull_trigger()
# with a thunk argument, when select() wakes up and receives the
# event it will call your thunk from within that thread. The main
# purpose of this is to remove the need to wrap thread locks around
# Medusa's data structures, which normally do not need them. [To see
# why this is true, imagine this scenario: A thread tries to push some
# new data onto a channel's outgoing data queue at the same time that
# the main thread is trying to remove some]
class _triggerbase(object):
"""OS-independent base class for OS-dependent trigger class."""
kind = None # subclass must set to "pipe" or "loopback"; used by repr
def __init__(self):
self._closed = False
# `lock` protects the `thunks` list from being traversed and
# appended to simultaneously.
self.lock = thread.allocate_lock()
# List of no-argument callbacks to invoke when the trigger is
# pulled. These run in the thread running the asyncore mainloop,
# regardless of which thread pulls the trigger.
self.thunks = []
def readable(self):
return True
def writable(self):
return False
def handle_connect(self):
pass
def handle_close(self):
self.close()
# Override the asyncore close() method, because it doesn't know about
# (so can't close) all the gimmicks we have open. Subclass must
# supply a _close() method to do platform-specific closing work. _close()
# will be called iff we're not already closed.
def close(self):
if not self._closed:
self._closed = True
self.del_channel()
self._close() # subclass does OS-specific stuff
def pull_trigger(self, thunk=None):
if thunk:
self.lock.acquire()
try:
self.thunks.append(thunk)
finally:
self.lock.release()
self._physical_pull()
def handle_read(self):
try:
self.recv(8192)
except (OSError, socket.error):
return
self.lock.acquire()
try:
for thunk in self.thunks:
try:
thunk()
except:
nil, t, v, tbinfo = asyncore.compact_traceback()
self.log_info(
'exception in trigger thunk: (%s:%s %s)' %
(t, v, tbinfo))
self.thunks = []
finally:
self.lock.release()
if os.name == 'posix':
class trigger(_triggerbase, asyncore.file_dispatcher):
kind = "pipe"
def __init__(self, map):
_triggerbase.__init__(self)
r, self.trigger = self._fds = os.pipe()
asyncore.file_dispatcher.__init__(self, r, map=map)
def _close(self):
for fd in self._fds:
os.close(fd)
self._fds = []
def _physical_pull(self):
os.write(self.trigger, b'x')
else: # pragma: no cover
# Windows version; uses just sockets, because a pipe isn't select'able
# on Windows.
class trigger(_triggerbase, asyncore.dispatcher):
kind = "loopback"
def __init__(self, map):
_triggerbase.__init__(self)
# Get a pair of connected sockets. The trigger is the 'w'
# end of the pair, which is connected to 'r'. 'r' is put
# in the asyncore socket map. "pulling the trigger" then
# means writing something on w, which will wake up r.
w = socket.socket()
# Disable buffering -- pulling the trigger sends 1 byte,
# and we want that sent immediately, to wake up asyncore's
# select() ASAP.
w.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
count = 0
while True:
count += 1
# Bind to a local port; for efficiency, let the OS pick
# a free port for us.
# Unfortunately, stress tests showed that we may not
# be able to connect to that port ("Address already in
# use") despite that the OS picked it. This appears
# to be a race bug in the Windows socket implementation.
# So we loop until a connect() succeeds (almost always
# on the first try). See the long thread at
# http://mail.zope.org/pipermail/zope/2005-July/160433.html
# for hideous details.
a = socket.socket()
a.bind(("127.0.0.1", 0))
connect_address = a.getsockname() # assigned (host, port) pair
a.listen(1)
try:
w.connect(connect_address)
break # success
except socket.error as detail:
if detail[0] != errno.WSAEADDRINUSE:
# "Address already in use" is the only error
# I've seen on two WinXP Pro SP2 boxes, under
# Pythons 2.3.5 and 2.4.1.
raise
# (10048, 'Address already in use')
# assert count <= 2 # never triggered in Tim's tests
if count >= 10: # I've never seen it go above 2
a.close()
w.close()
raise RuntimeError("Cannot bind trigger!")
# Close `a` and try again. Note: I originally put a short
# sleep() here, but it didn't appear to help or hurt.
a.close()
r, addr = a.accept() # r becomes asyncore's (self.)socket
a.close()
self.trigger = w
asyncore.dispatcher.__init__(self, r, map=map)
def _close(self):
# self.socket is r, and self.trigger is w, from __init__
self.socket.close()
self.trigger.close()
def _physical_pull(self):
self.trigger.send(b'x')
| agpl-3.0 |
epssy/hue | desktop/core/ext-py/lxml/benchmark/bench_etree.py | 30 | 10920 | import sys, copy
from itertools import *
from StringIO import StringIO
import benchbase
from benchbase import (with_attributes, with_text, onlylib,
serialized, children, nochange)
TEXT = "some ASCII text"
UTEXT = u"some klingon: \F8D2"
############################################################
# Benchmarks
############################################################
class BenchMark(benchbase.TreeBenchMark):
@nochange
def bench_iter_children(self, root):
for child in root:
pass
@nochange
def bench_iter_children_reversed(self, root):
for child in reversed(root):
pass
@nochange
def bench_first_child(self, root):
for i in self.repeat1000:
child = root[0]
@nochange
def bench_last_child(self, root):
for i in self.repeat1000:
child = root[-1]
@nochange
def bench_middle_child(self, root):
pos = len(root) / 2
for i in self.repeat1000:
child = root[pos]
@nochange
@with_attributes(False)
@with_text(text=True)
@onlylib('lxe', 'ET')
def bench_tostring_text_ascii(self, root):
self.etree.tostring(root, method="text")
@nochange
@with_attributes(False)
@with_text(text=True, utext=True)
@onlylib('lxe')
def bench_tostring_text_unicode(self, root):
self.etree.tostring(root, method="text", encoding=unicode)
@nochange
@with_attributes(False)
@with_text(text=True, utext=True)
@onlylib('lxe', 'ET')
def bench_tostring_text_utf16(self, root):
self.etree.tostring(root, method="text", encoding='UTF-16')
@nochange
@with_attributes(False)
@with_text(text=True, utext=True)
@onlylib('lxe')
@children
def bench_tostring_text_utf8_with_tail(self, children):
for child in children:
self.etree.tostring(child, method="text",
encoding='UTF-8', with_tail=True)
@nochange
@with_attributes(True, False)
@with_text(text=True, utext=True)
def bench_tostring_utf8(self, root):
self.etree.tostring(root, encoding='UTF-8')
@nochange
@with_attributes(True, False)
@with_text(text=True, utext=True)
def bench_tostring_utf16(self, root):
self.etree.tostring(root, encoding='UTF-16')
@nochange
@with_attributes(True, False)
@with_text(text=True, utext=True)
def bench_tostring_utf8_unicode_XML(self, root):
xml = unicode(self.etree.tostring(root, encoding='UTF-8'), 'UTF-8')
self.etree.XML(xml)
@nochange
@with_attributes(True, False)
@with_text(text=True, utext=True)
def bench_write_utf8_parse_stringIO(self, root):
f = StringIO()
self.etree.ElementTree(root).write(f, encoding='UTF-8')
f.seek(0)
self.etree.parse(f)
@with_attributes(True, False)
@with_text(text=True, utext=True)
@serialized
def bench_parse_stringIO(self, root_xml):
f = StringIO(root_xml)
self.etree.parse(f)
@with_attributes(True, False)
@with_text(text=True, utext=True)
@serialized
def bench_XML(self, root_xml):
self.etree.XML(root_xml)
@with_attributes(True, False)
@with_text(text=True, utext=True)
@serialized
def bench_iterparse_stringIO(self, root_xml):
f = StringIO(root_xml)
for event, element in self.etree.iterparse(f):
pass
@with_attributes(True, False)
@with_text(text=True, utext=True)
@serialized
def bench_iterparse_stringIO_clear(self, root_xml):
f = StringIO(root_xml)
for event, element in self.etree.iterparse(f):
element.clear()
def bench_append_from_document(self, root1, root2):
# == "1,2 2,3 1,3 3,1 3,2 2,1" # trees 1 and 2, or 2 and 3, or ...
for el in root2:
root1.append(el)
def bench_insert_from_document(self, root1, root2):
pos = len(root1)/2
for el in root2:
root1.insert(pos, el)
pos = pos + 1
def bench_rotate_children(self, root):
# == "1 2 3" # runs on any single tree independently
for i in range(100):
el = root[0]
del root[0]
root.append(el)
def bench_reorder(self, root):
for i in range(1,len(root)/2):
el = root[0]
del root[0]
root[-i:-i] = [ el ]
def bench_reorder_slice(self, root):
for i in range(1,len(root)/2):
els = root[0:1]
del root[0]
root[-i:-i] = els
def bench_clear(self, root):
root.clear()
@nochange
@children
def bench_has_children(self, children):
for child in children:
if child and child and child and child and child:
pass
@nochange
@children
def bench_len(self, children):
for child in children:
map(len, repeat(child, 20))
@children
def bench_create_subelements(self, children):
SubElement = self.etree.SubElement
for child in children:
SubElement(child, '{test}test')
def bench_append_elements(self, root):
Element = self.etree.Element
for child in root:
el = Element('{test}test')
child.append(el)
@nochange
@children
def bench_makeelement(self, children):
empty_attrib = {}
for child in children:
child.makeelement('{test}test', empty_attrib)
@nochange
@children
def bench_create_elements(self, children):
Element = self.etree.Element
for child in children:
Element('{test}test')
@children
def bench_replace_children_element(self, children):
Element = self.etree.Element
for child in children:
el = Element('{test}test')
child[:] = [el]
@children
def bench_replace_children(self, children):
els = [ self.etree.Element("newchild") ]
for child in children:
child[:] = els
def bench_remove_children(self, root):
for child in root:
root.remove(child)
def bench_remove_children_reversed(self, root):
for child in reversed(root):
root.remove(child)
@children
def bench_set_attributes(self, children):
for child in children:
child.set('a', 'bla')
@with_attributes(True)
@children
@nochange
def bench_get_attributes(self, children):
for child in children:
child.get('bla1')
child.get('{attr}test1')
@children
def bench_setget_attributes(self, children):
for child in children:
child.set('a', 'bla')
for child in children:
child.get('a')
@nochange
def bench_root_getchildren(self, root):
root.getchildren()
@nochange
def bench_root_list_children(self, root):
list(root)
@nochange
@children
def bench_getchildren(self, children):
for child in children:
child.getchildren()
@nochange
@children
def bench_get_children_slice(self, children):
for child in children:
child[:]
@nochange
@children
def bench_get_children_slice_2x(self, children):
for child in children:
child[:]
child[:]
@nochange
@children
@with_attributes(True, False)
@with_text(utext=True, text=True, no_text=True)
def bench_deepcopy(self, children):
for child in children:
copy.deepcopy(child)
@nochange
@with_attributes(True, False)
@with_text(utext=True, text=True, no_text=True)
def bench_deepcopy_all(self, root):
copy.deepcopy(root)
@nochange
@children
def bench_tag(self, children):
for child in children:
child.tag
@nochange
@children
def bench_tag_repeat(self, children):
for child in children:
for i in self.repeat100:
child.tag
@nochange
@with_text(utext=True, text=True, no_text=True)
@children
def bench_text(self, children):
for child in children:
child.text
@nochange
@with_text(utext=True, text=True, no_text=True)
@children
def bench_text_repeat(self, children):
for child in children:
for i in self.repeat500:
child.text
@children
def bench_set_text(self, children):
text = TEXT
for child in children:
child.text = text
@children
def bench_set_utext(self, children):
text = UTEXT
for child in children:
child.text = text
@nochange
@onlylib('lxe')
def bench_index(self, root):
for child in root:
root.index(child)
@nochange
@onlylib('lxe')
def bench_index_slice(self, root):
for child in root[5:100]:
root.index(child, 5, 100)
@nochange
@onlylib('lxe')
def bench_index_slice_neg(self, root):
for child in root[-100:-5]:
root.index(child, start=-100, stop=-5)
@nochange
def bench_getiterator_all(self, root):
list(root.getiterator())
@nochange
def bench_getiterator_islice(self, root):
list(islice(root.getiterator(), 10, 110))
@nochange
def bench_getiterator_tag(self, root):
list(islice(root.getiterator(self.SEARCH_TAG), 3, 10))
@nochange
def bench_getiterator_tag_all(self, root):
list(root.getiterator(self.SEARCH_TAG))
@nochange
def bench_getiterator_tag_none(self, root):
list(root.getiterator("{ThisShould}NeverExist"))
@nochange
def bench_getiterator_tag_text(self, root):
[ e.text for e in root.getiterator(self.SEARCH_TAG) ]
@nochange
def bench_findall(self, root):
root.findall(".//*")
@nochange
def bench_findall_child(self, root):
root.findall(".//*/" + self.SEARCH_TAG)
@nochange
def bench_findall_tag(self, root):
root.findall(".//" + self.SEARCH_TAG)
@nochange
def bench_findall_path(self, root):
root.findall(".//*[%s]/./%s/./*" % (self.SEARCH_TAG, self.SEARCH_TAG))
@nochange
@onlylib('lxe')
def bench_xpath_path(self, root):
ns, tag = self.SEARCH_TAG[1:].split('}')
root.xpath(".//*[p:%s]/./p:%s/./*" % (tag,tag),
namespaces = {'p':ns})
@nochange
@onlylib('lxe')
def bench_iterfind(self, root):
list(root.iterfind(".//*"))
@nochange
@onlylib('lxe')
def bench_iterfind_tag(self, root):
list(root.iterfind(".//" + self.SEARCH_TAG))
@nochange
@onlylib('lxe')
def bench_iterfind_islice(self, root):
list(islice(root.iterfind(".//*"), 10, 110))
if __name__ == '__main__':
benchbase.main(BenchMark)
| apache-2.0 |
modera/mcloud | mcloud/plugins/monitor.py | 1 | 1101 | import inject
from mcloud.application import ApplicationController
from mcloud.events import EventBus
from mcloud.plugin import IMcloudPlugin
from mcloud.plugins import Plugin
from mcloud.txdocker import IDockerClient
from twisted.internet import reactor
from twisted.python import log
from zope.interface import implements
class DockerMonitorPlugin(Plugin):
"""
Monitors docker events and emmits "containers.updated" event when non-internal
containers change their state.
"""
implements(IMcloudPlugin)
client = inject.attr(IDockerClient)
event_bus = inject.attr(EventBus)
app_controller = inject.attr(ApplicationController)
def setup(self):
# reactor.callLater(0, self.attach_to_events)
pass
def on_event(self, event):
if not self.app_controller.is_internal(event['id']):
log.msg('New docker event: %s' % event)
self.event_bus.fire_event('containers.updated', event)
def attach_to_events(self, *args):
log.msg('Start monitoring docker events')
return self.client.events(self.on_event)
| apache-2.0 |