repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
w3nd1go/android_external_skia | platform_tools/android/gyp_gen/makefile_writer.py | 25 | 7208 | #!/usr/bin/python
# Copyright 2014 Google Inc.
#
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Functions for creating an Android.mk from already created dictionaries.
"""
import os
def write_group(f, name, items, append):
"""Helper function to list all names passed to a variable.
Args:
f: File open for writing (Android.mk)
name: Name of the makefile variable (e.g. LOCAL_CFLAGS)
items: list of strings to be passed to the variable.
append: Whether to append to the variable or overwrite it.
"""
if not items:
return
# Copy the list so we can prepend it with its name.
items_to_write = list(items)
if append:
items_to_write.insert(0, '%s +=' % name)
else:
items_to_write.insert(0, '%s :=' % name)
f.write(' \\\n\t'.join(items_to_write))
f.write('\n\n')
def write_local_vars(f, var_dict, append, name):
"""Helper function to write all the members of var_dict to the makefile.
Args:
f: File open for writing (Android.mk)
var_dict: VarsDict holding the unique values for one configuration.
append: Whether to append to each makefile variable or overwrite it.
name: If not None, a string to be appended to each key.
"""
for key in var_dict.keys():
_key = key
_items = var_dict[key]
if key == 'LOCAL_CFLAGS':
# Always append LOCAL_CFLAGS. This allows us to define some early on in
# the makefile and not overwrite them.
_append = True
elif key == 'DEFINES':
# For DEFINES, we want to append to LOCAL_CFLAGS.
_append = True
_key = 'LOCAL_CFLAGS'
_items_with_D = []
for define in _items:
_items_with_D.append('-D' + define)
_items = _items_with_D
elif key == 'KNOWN_TARGETS':
# KNOWN_TARGETS are not needed in the final make file.
continue
else:
_append = append
if name:
_key += '_' + name
write_group(f, _key, _items, _append)
AUTOGEN_WARNING = (
"""
###############################################################################
#
# THIS FILE IS AUTOGENERATED BY GYP_TO_ANDROID.PY. DO NOT EDIT.
#
# For bugs, please contact [email protected] or [email protected]
#
###############################################################################
"""
)
DEBUGGING_HELP = (
"""
###############################################################################
#
# PROBLEMS WITH SKIA DEBUGGING?? READ THIS...
#
# The debug build results in changes to the Skia headers. This means that those
# using libskia must also be built with the debug version of the Skia headers.
# There are a few scenarios where this comes into play:
#
# (1) You're building debug code that depends on libskia.
# (a) If libskia is built in release, then define SK_RELEASE when building
# your sources.
# (b) If libskia is built with debugging (see step 2), then no changes are
# needed since your sources and libskia have been built with SK_DEBUG.
# (2) You're building libskia in debug mode.
# (a) RECOMMENDED: You can build the entire system in debug mode. Do this by
# updating your build/core/config.mk to include -DSK_DEBUG on the line
# that defines COMMON_GLOBAL_CFLAGS
# (b) You can update all the users of libskia to define SK_DEBUG when they are
# building their sources.
#
# NOTE: If neither SK_DEBUG or SK_RELEASE are defined then Skia checks NDEBUG to
# determine which build type to use.
###############################################################################
"""
)
SKIA_TOOLS = (
"""
#############################################################
# Build the skia tools
#
# benchmark (timings)
include $(BASE_PATH)/bench/Android.mk
# diamond-master (one test to rule them all)
include $(BASE_PATH)/dm/Android.mk
"""
)
class VarsDictData(object):
"""Helper class to keep a VarsDict along with a name and optional condition.
"""
def __init__(self, vars_dict, name, condition=None):
"""Create a new VarsDictData.
Args:
vars_dict: A VarsDict. Can be accessed via self.vars_dict.
name: Name associated with the VarsDict. Can be accessed via
self.name.
condition: Optional string representing a condition. If not None,
used to create a conditional inside the makefile.
"""
self.vars_dict = vars_dict
self.condition = condition
self.name = name
def write_local_path(f):
"""Add the LOCAL_PATH line to the makefile.
Args:
f: File open for writing.
"""
f.write('LOCAL_PATH:= $(call my-dir)\n')
def write_clear_vars(f):
"""Add the CLEAR_VARS line to the makefile.
Args:
f: File open for writing.
"""
f.write('include $(CLEAR_VARS)\n')
def write_android_mk(target_dir, common, deviations_from_common):
"""Given all the variables, write the final make file.
Args:
target_dir: The full path to the directory to write Android.mk, or None
to use the current working directory.
common: VarsDict holding variables definitions common to all
configurations.
deviations_from_common: List of VarsDictData, one for each possible
configuration. VarsDictData.name will be appended to each key before
writing it to the makefile. VarsDictData.condition, if not None, will be
written to the makefile as a condition to determine whether to include
VarsDictData.vars_dict.
"""
target_file = 'Android.mk'
if target_dir:
target_file = os.path.join(target_dir, target_file)
with open(target_file, 'w') as f:
f.write(AUTOGEN_WARNING)
f.write('BASE_PATH := $(call my-dir)\n')
write_local_path(f)
f.write(DEBUGGING_HELP)
write_clear_vars(f)
# need flags to enable feedback driven optimization (FDO) when requested
# by the build system.
f.write('LOCAL_FDO_SUPPORT := true\n')
f.write('ifneq ($(strip $(TARGET_FDO_CFLAGS)),)\n')
f.write('\t# This should be the last -Oxxx specified in LOCAL_CFLAGS\n')
f.write('\tLOCAL_CFLAGS += -O2\n')
f.write('endif\n\n')
f.write('LOCAL_ARM_MODE := thumb\n')
# need a flag to tell the C side when we're on devices with large memory
# budgets (i.e. larger than the low-end devices that initially shipped)
# On arm, only define the flag if it has VFP. For all other architectures,
# always define the flag.
f.write('ifeq ($(TARGET_ARCH),arm)\n')
f.write('\tifeq ($(ARCH_ARM_HAVE_VFP),true)\n')
f.write('\t\tLOCAL_CFLAGS += -DANDROID_LARGE_MEMORY_DEVICE\n')
f.write('\tendif\n')
f.write('else\n')
f.write('\tLOCAL_CFLAGS += -DANDROID_LARGE_MEMORY_DEVICE\n')
f.write('endif\n\n')
f.write('# used for testing\n')
f.write('#LOCAL_CFLAGS += -g -O0\n\n')
f.write('ifeq ($(NO_FALLBACK_FONT),true)\n')
f.write('\tLOCAL_CFLAGS += -DNO_FALLBACK_FONT\n')
f.write('endif\n\n')
write_local_vars(f, common, False, None)
for data in deviations_from_common:
if data.condition:
f.write('ifeq ($(%s), true)\n' % data.condition)
write_local_vars(f, data.vars_dict, True, data.name)
if data.condition:
f.write('endif\n\n')
f.write('include $(BUILD_SHARED_LIBRARY)\n')
f.write(SKIA_TOOLS)
| bsd-3-clause |
dmitriy0611/django | tests/urlpatterns_reverse/tests.py | 7 | 42428 | # -*- coding: utf-8 -*-
"""
Unit tests for reverse URL lookups.
"""
from __future__ import unicode_literals
import sys
import unittest
from admin_scripts.tests import AdminScriptTestCase
from django.conf import settings
from django.conf.urls import include
from django.contrib.auth.models import User
from django.core.exceptions import ImproperlyConfigured, ViewDoesNotExist
from django.core.urlresolvers import (
NoReverseMatch, RegexURLPattern, RegexURLResolver, Resolver404,
ResolverMatch, get_callable, get_resolver, resolve, reverse, reverse_lazy,
)
from django.http import (
HttpRequest, HttpResponsePermanentRedirect, HttpResponseRedirect,
)
from django.shortcuts import redirect
from django.test import (
SimpleTestCase, TestCase, ignore_warnings, override_settings,
)
from django.test.utils import override_script_prefix
from django.utils import six
from django.utils.deprecation import RemovedInDjango20Warning
from . import middleware, urlconf_outer, views
from .views import empty_view
resolve_test_data = (
# These entries are in the format: (path, url_name, app_name, namespace, view_name, func, args, kwargs)
# Simple case
('/normal/42/37/', 'normal-view', None, '', 'normal-view', views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}),
('/view_class/42/37/', 'view-class', None, '', 'view-class', views.view_class_instance, tuple(), {'arg1': '42', 'arg2': '37'}),
('/included/normal/42/37/', 'inc-normal-view', None, '', 'inc-normal-view', views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}),
('/included/view_class/42/37/', 'inc-view-class', None, '', 'inc-view-class', views.view_class_instance, tuple(), {'arg1': '42', 'arg2': '37'}),
# Unnamed args are dropped if you have *any* kwargs in a pattern
('/mixed_args/42/37/', 'mixed-args', None, '', 'mixed-args', views.empty_view, tuple(), {'arg2': '37'}),
('/included/mixed_args/42/37/', 'inc-mixed-args', None, '', 'inc-mixed-args', views.empty_view, tuple(), {'arg2': '37'}),
('/included/12/mixed_args/42/37/', 'inc-mixed-args', None, '', 'inc-mixed-args', views.empty_view, tuple(), {'arg2': '37'}),
# Unnamed views should have None as the url_name. Regression data for #21157.
('/unnamed/normal/42/37/', None, None, '', 'urlpatterns_reverse.views.empty_view', views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}),
('/unnamed/view_class/42/37/', None, None, '', 'urlpatterns_reverse.views.ViewClass', views.view_class_instance, tuple(), {'arg1': '42', 'arg2': '37'}),
# If you have no kwargs, you get an args list.
('/no_kwargs/42/37/', 'no-kwargs', None, '', 'no-kwargs', views.empty_view, ('42', '37'), {}),
('/included/no_kwargs/42/37/', 'inc-no-kwargs', None, '', 'inc-no-kwargs', views.empty_view, ('42', '37'), {}),
('/included/12/no_kwargs/42/37/', 'inc-no-kwargs', None, '', 'inc-no-kwargs', views.empty_view, ('12', '42', '37'), {}),
# Namespaces
('/test1/inner/42/37/', 'urlobject-view', 'testapp', 'test-ns1', 'test-ns1:urlobject-view', views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}),
('/included/test3/inner/42/37/', 'urlobject-view', 'testapp', 'test-ns3', 'test-ns3:urlobject-view', views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}),
('/ns-included1/normal/42/37/', 'inc-normal-view', None, 'inc-ns1', 'inc-ns1:inc-normal-view', views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}),
('/included/test3/inner/42/37/', 'urlobject-view', 'testapp', 'test-ns3', 'test-ns3:urlobject-view', views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}),
('/default/inner/42/37/', 'urlobject-view', 'testapp', 'testapp', 'testapp:urlobject-view', views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}),
('/other2/inner/42/37/', 'urlobject-view', 'nodefault', 'other-ns2', 'other-ns2:urlobject-view', views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}),
('/other1/inner/42/37/', 'urlobject-view', 'nodefault', 'other-ns1', 'other-ns1:urlobject-view', views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}),
# Nested namespaces
('/ns-included1/test3/inner/42/37/', 'urlobject-view', 'testapp', 'inc-ns1:test-ns3', 'inc-ns1:test-ns3:urlobject-view', views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}),
('/ns-included1/ns-included4/ns-included2/test3/inner/42/37/', 'urlobject-view', 'testapp', 'inc-ns1:inc-ns4:inc-ns2:test-ns3', 'inc-ns1:inc-ns4:inc-ns2:test-ns3:urlobject-view', views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}),
# Namespaces capturing variables
('/inc70/', 'inner-nothing', None, 'inc-ns5', 'inc-ns5:inner-nothing', views.empty_view, tuple(), {'outer': '70'}),
('/inc78/extra/foobar/', 'inner-extra', None, 'inc-ns5', 'inc-ns5:inner-extra', views.empty_view, tuple(), {'outer': '78', 'extra': 'foobar'}),
)
test_data = (
('places', '/places/3/', [3], {}),
('places', '/places/3/', ['3'], {}),
('places', NoReverseMatch, ['a'], {}),
('places', NoReverseMatch, [], {}),
('places?', '/place/', [], {}),
('places+', '/places/', [], {}),
('places*', '/place/', [], {}),
('places2?', '/', [], {}),
('places2+', '/places/', [], {}),
('places2*', '/', [], {}),
('places3', '/places/4/', [4], {}),
('places3', '/places/harlem/', ['harlem'], {}),
('places3', NoReverseMatch, ['harlem64'], {}),
('places4', '/places/3/', [], {'id': 3}),
('people', NoReverseMatch, [], {}),
('people', '/people/adrian/', ['adrian'], {}),
('people', '/people/adrian/', [], {'name': 'adrian'}),
('people', NoReverseMatch, ['name with spaces'], {}),
('people', NoReverseMatch, [], {'name': 'name with spaces'}),
('people2', '/people/name/', [], {}),
('people2a', '/people/name/fred/', ['fred'], {}),
('people_backref', '/people/nate-nate/', ['nate'], {}),
('people_backref', '/people/nate-nate/', [], {'name': 'nate'}),
('optional', '/optional/fred/', [], {'name': 'fred'}),
('optional', '/optional/fred/', ['fred'], {}),
('named_optional', '/optional/1/', [1], {}),
('named_optional', '/optional/1/', [], {'arg1': 1}),
('named_optional', '/optional/1/2/', [1, 2], {}),
('named_optional', '/optional/1/2/', [], {'arg1': 1, 'arg2': 2}),
('named_optional_terminated', '/optional/1/2/', [1, 2], {}),
('named_optional_terminated', '/optional/1/2/', [], {'arg1': 1, 'arg2': 2}),
('hardcoded', '/hardcoded/', [], {}),
('hardcoded2', '/hardcoded/doc.pdf', [], {}),
('people3', '/people/il/adrian/', [], {'state': 'il', 'name': 'adrian'}),
('people3', NoReverseMatch, [], {'state': 'il'}),
('people3', NoReverseMatch, [], {'name': 'adrian'}),
('people4', NoReverseMatch, [], {'state': 'il', 'name': 'adrian'}),
('people6', '/people/il/test/adrian/', ['il/test', 'adrian'], {}),
('people6', '/people//adrian/', ['adrian'], {}),
('range', '/character_set/a/', [], {}),
('range2', '/character_set/x/', [], {}),
('price', '/price/$10/', ['10'], {}),
('price2', '/price/$10/', ['10'], {}),
('price3', '/price/$10/', ['10'], {}),
('product', '/product/chocolate+($2.00)/', [], {'price': '2.00', 'product': 'chocolate'}),
('headlines', '/headlines/2007.5.21/', [], dict(year=2007, month=5, day=21)),
('windows', r'/windows_path/C:%5CDocuments%20and%20Settings%5Cspam/', [], dict(drive_name='C', path=r'Documents and Settings\spam')),
('special', r'/special_chars/~@+%5C$*%7C/', [r'~@+\$*|'], {}),
('special', r'/special_chars/some%20resource/', [r'some resource'], {}),
('special', r'/special_chars/10%25%20complete/', [r'10% complete'], {}),
('special', r'/special_chars/some%20resource/', [], {'chars': r'some resource'}),
('special', r'/special_chars/10%25%20complete/', [], {'chars': r'10% complete'}),
('special', NoReverseMatch, [''], {}),
('mixed', '/john/0/', [], {'name': 'john'}),
('repeats', '/repeats/a/', [], {}),
('repeats2', '/repeats/aa/', [], {}),
('repeats3', '/repeats/aa/', [], {}),
('insensitive', '/CaseInsensitive/fred', ['fred'], {}),
('test', '/test/1', [], {}),
('test2', '/test/2', [], {}),
('inner-nothing', '/outer/42/', [], {'outer': '42'}),
('inner-nothing', '/outer/42/', ['42'], {}),
('inner-nothing', NoReverseMatch, ['foo'], {}),
('inner-extra', '/outer/42/extra/inner/', [], {'extra': 'inner', 'outer': '42'}),
('inner-extra', '/outer/42/extra/inner/', ['42', 'inner'], {}),
('inner-extra', NoReverseMatch, ['fred', 'inner'], {}),
('inner-no-kwargs', '/outer-no-kwargs/42/inner-no-kwargs/1/', ['42', '1'], {}),
('disjunction', NoReverseMatch, ['foo'], {}),
('inner-disjunction', NoReverseMatch, ['10', '11'], {}),
('extra-places', '/e-places/10/', ['10'], {}),
('extra-people', '/e-people/fred/', ['fred'], {}),
('extra-people', '/e-people/fred/', [], {'name': 'fred'}),
('part', '/part/one/', [], {'value': 'one'}),
('part', '/prefix/xx/part/one/', [], {'value': 'one', 'prefix': 'xx'}),
('part2', '/part2/one/', [], {'value': 'one'}),
('part2', '/part2/', [], {}),
('part2', '/prefix/xx/part2/one/', [], {'value': 'one', 'prefix': 'xx'}),
('part2', '/prefix/xx/part2/', [], {'prefix': 'xx'}),
# Tests for nested groups. Nested capturing groups will only work if you
# *only* supply the correct outer group.
('nested-noncapture', '/nested/noncapture/opt', [], {'p': 'opt'}),
('nested-capture', '/nested/capture/opt/', ['opt/'], {}),
('nested-capture', NoReverseMatch, [], {'p': 'opt'}),
('nested-mixedcapture', '/nested/capture/mixed/opt', ['opt'], {}),
('nested-mixedcapture', NoReverseMatch, [], {'p': 'opt'}),
('nested-namedcapture', '/nested/capture/named/opt/', [], {'outer': 'opt/'}),
('nested-namedcapture', NoReverseMatch, [], {'outer': 'opt/', 'inner': 'opt'}),
('nested-namedcapture', NoReverseMatch, [], {'inner': 'opt'}),
# Regression for #9038
# These views are resolved by method name. Each method is deployed twice -
# once with an explicit argument, and once using the default value on
# the method. This is potentially ambiguous, as you have to pick the
# correct view for the arguments provided.
('urlpatterns_reverse.views.absolute_kwargs_view', '/absolute_arg_view/', [], {}),
('urlpatterns_reverse.views.absolute_kwargs_view', '/absolute_arg_view/10/', [], {'arg1': 10}),
('non_path_include', '/includes/non_path_include/', [], {}),
# Tests for #13154
('defaults', '/defaults_view1/3/', [], {'arg1': 3, 'arg2': 1}),
('defaults', '/defaults_view2/3/', [], {'arg1': 3, 'arg2': 2}),
('defaults', NoReverseMatch, [], {'arg1': 3, 'arg2': 3}),
('defaults', NoReverseMatch, [], {'arg2': 1}),
# Security tests
('security', '/%2Fexample.com/security/', ['/example.com'], {}),
)
@override_settings(ROOT_URLCONF='urlpatterns_reverse.no_urls')
class NoURLPatternsTests(SimpleTestCase):
def test_no_urls_exception(self):
"""
RegexURLResolver should raise an exception when no urlpatterns exist.
"""
resolver = RegexURLResolver(r'^$', settings.ROOT_URLCONF)
self.assertRaisesMessage(
ImproperlyConfigured,
"The included urlconf 'urlpatterns_reverse.no_urls' does not "
"appear to have any patterns in it. If you see valid patterns in "
"the file then the issue is probably caused by a circular import.",
getattr, resolver, 'url_patterns'
)
@override_settings(ROOT_URLCONF='urlpatterns_reverse.urls')
class URLPatternReverse(SimpleTestCase):
@ignore_warnings(category=RemovedInDjango20Warning)
def test_urlpattern_reverse(self):
for name, expected, args, kwargs in test_data:
try:
got = reverse(name, args=args, kwargs=kwargs)
except NoReverseMatch:
self.assertEqual(expected, NoReverseMatch)
else:
self.assertEqual(got, expected)
def test_reverse_none(self):
# Reversing None should raise an error, not return the last un-named view.
self.assertRaises(NoReverseMatch, reverse, None)
@override_script_prefix('/{{invalid}}/')
def test_prefix_braces(self):
self.assertEqual(
'/%7B%7Binvalid%7D%7D/includes/non_path_include/',
reverse('non_path_include')
)
def test_prefix_parenthesis(self):
# Parentheses are allowed and should not cause errors or be escaped
with override_script_prefix('/bogus)/'):
self.assertEqual(
'/bogus)/includes/non_path_include/',
reverse('non_path_include')
)
with override_script_prefix('/(bogus)/'):
self.assertEqual(
'/(bogus)/includes/non_path_include/',
reverse('non_path_include')
)
@override_script_prefix('/bump%20map/')
def test_prefix_format_char(self):
self.assertEqual(
'/bump%2520map/includes/non_path_include/',
reverse('non_path_include')
)
@override_script_prefix('/%7Eme/')
def test_non_urlsafe_prefix_with_args(self):
# Regression for #20022, adjusted for #24013 because ~ is an unreserved
# character. Tests whether % is escaped.
self.assertEqual('/%257Eme/places/1/', reverse('places', args=[1]))
def test_patterns_reported(self):
# Regression for #17076
try:
# this url exists, but requires an argument
reverse("people", args=[])
except NoReverseMatch as e:
pattern_description = r"1 pattern(s) tried: ['people/(?P<name>\\w+)/$']"
self.assertIn(pattern_description, str(e))
else:
# we can't use .assertRaises, since we want to inspect the
# exception
self.fail("Expected a NoReverseMatch, but none occurred.")
@override_script_prefix('/script:name/')
def test_script_name_escaping(self):
self.assertEqual(
reverse('optional', args=['foo:bar']),
'/script:name/optional/foo:bar/'
)
def test_reverse_returns_unicode(self):
name, expected, args, kwargs = test_data[0]
self.assertIsInstance(
reverse(name, args=args, kwargs=kwargs),
six.text_type
)
class ResolverTests(unittest.TestCase):
def test_resolver_repr(self):
"""
Test repr of RegexURLResolver, especially when urlconf_name is a list
(#17892).
"""
# Pick a resolver from a namespaced urlconf
resolver = get_resolver('urlpatterns_reverse.namespace_urls')
sub_resolver = resolver.namespace_dict['test-ns1'][1]
self.assertIn('<RegexURLPattern list>', repr(sub_resolver))
def test_reverse_lazy_object_coercion_by_resolve(self):
"""
Verifies lazy object returned by reverse_lazy is coerced to
text by resolve(). Previous to #21043, this would raise a TypeError.
"""
urls = 'urlpatterns_reverse.named_urls'
proxy_url = reverse_lazy('named-url1', urlconf=urls)
resolver = get_resolver(urls)
try:
resolver.resolve(proxy_url)
except TypeError:
self.fail('Failed to coerce lazy object to text')
def test_non_regex(self):
"""
Verifies that we raise a Resolver404 if what we are resolving doesn't
meet the basic requirements of a path to match - i.e., at the very
least, it matches the root pattern '^/'. We must never return None
from resolve, or we will get a TypeError further down the line.
Regression for #10834.
"""
self.assertRaises(Resolver404, resolve, '')
self.assertRaises(Resolver404, resolve, 'a')
self.assertRaises(Resolver404, resolve, '\\')
self.assertRaises(Resolver404, resolve, '.')
def test_404_tried_urls_have_names(self):
"""
Verifies that the list of URLs that come back from a Resolver404
exception contains a list in the right format for printing out in
the DEBUG 404 page with both the patterns and URL names, if available.
"""
urls = 'urlpatterns_reverse.named_urls'
# this list matches the expected URL types and names returned when
# you try to resolve a non-existent URL in the first level of included
# URLs in named_urls.py (e.g., '/included/non-existent-url')
url_types_names = [
[{'type': RegexURLPattern, 'name': 'named-url1'}],
[{'type': RegexURLPattern, 'name': 'named-url2'}],
[{'type': RegexURLPattern, 'name': None}],
[{'type': RegexURLResolver}, {'type': RegexURLPattern, 'name': 'named-url3'}],
[{'type': RegexURLResolver}, {'type': RegexURLPattern, 'name': 'named-url4'}],
[{'type': RegexURLResolver}, {'type': RegexURLPattern, 'name': None}],
[{'type': RegexURLResolver}, {'type': RegexURLResolver}],
]
try:
resolve('/included/non-existent-url', urlconf=urls)
self.fail('resolve did not raise a 404')
except Resolver404 as e:
# make sure we at least matched the root ('/') url resolver:
self.assertIn('tried', e.args[0])
tried = e.args[0]['tried']
self.assertEqual(len(e.args[0]['tried']), len(url_types_names), 'Wrong number of tried URLs returned. Expected %s, got %s.' % (len(url_types_names), len(e.args[0]['tried'])))
for tried, expected in zip(e.args[0]['tried'], url_types_names):
for t, e in zip(tried, expected):
self.assertIsInstance(t, e['type']), str('%s is not an instance of %s') % (t, e['type'])
if 'name' in e:
if not e['name']:
self.assertIsNone(t.name, 'Expected no URL name but found %s.' % t.name)
else:
self.assertEqual(t.name, e['name'], 'Wrong URL name. Expected "%s", got "%s".' % (e['name'], t.name))
@override_settings(ROOT_URLCONF='urlpatterns_reverse.reverse_lazy_urls')
class ReverseLazyTest(TestCase):
def test_redirect_with_lazy_reverse(self):
response = self.client.get('/redirect/')
self.assertRedirects(response, "/redirected_to/", status_code=302)
def test_user_permission_with_lazy_reverse(self):
User.objects.create_user('alfred', '[email protected]', password='testpw')
response = self.client.get('/login_required_view/')
self.assertRedirects(response, "/login/?next=/login_required_view/", status_code=302)
self.client.login(username='alfred', password='testpw')
response = self.client.get('/login_required_view/')
self.assertEqual(response.status_code, 200)
def test_inserting_reverse_lazy_into_string(self):
self.assertEqual(
'Some URL: %s' % reverse_lazy('some-login-page'),
'Some URL: /login/'
)
if six.PY2:
self.assertEqual(
b'Some URL: %s' % reverse_lazy('some-login-page'),
'Some URL: /login/'
)
class ReverseLazySettingsTest(AdminScriptTestCase):
"""
Test that reverse_lazy can be used in settings without causing a circular
import error.
"""
def setUp(self):
self.write_settings('settings.py', extra="""
from django.core.urlresolvers import reverse_lazy
LOGIN_URL = reverse_lazy('login')""")
def tearDown(self):
self.remove_settings('settings.py')
def test_lazy_in_settings(self):
out, err = self.run_manage(['check'])
self.assertNoOutput(err)
@override_settings(ROOT_URLCONF='urlpatterns_reverse.urls')
class ReverseShortcutTests(SimpleTestCase):
def test_redirect_to_object(self):
# We don't really need a model; just something with a get_absolute_url
class FakeObj(object):
def get_absolute_url(self):
return "/hi-there/"
res = redirect(FakeObj())
self.assertIsInstance(res, HttpResponseRedirect)
self.assertEqual(res.url, '/hi-there/')
res = redirect(FakeObj(), permanent=True)
self.assertIsInstance(res, HttpResponsePermanentRedirect)
self.assertEqual(res.url, '/hi-there/')
def test_redirect_to_view_name(self):
res = redirect('hardcoded2')
self.assertEqual(res.url, '/hardcoded/doc.pdf')
res = redirect('places', 1)
self.assertEqual(res.url, '/places/1/')
res = redirect('headlines', year='2008', month='02', day='17')
self.assertEqual(res.url, '/headlines/2008.02.17/')
self.assertRaises(NoReverseMatch, redirect, 'not-a-view')
def test_redirect_to_url(self):
res = redirect('/foo/')
self.assertEqual(res.url, '/foo/')
res = redirect('http://example.com/')
self.assertEqual(res.url, 'http://example.com/')
# Assert that we can redirect using UTF-8 strings
res = redirect('/æøå/abc/')
self.assertEqual(res.url, '/%C3%A6%C3%B8%C3%A5/abc/')
# Assert that no imports are attempted when dealing with a relative path
# (previously, the below would resolve in a UnicodeEncodeError from __import__ )
res = redirect('/æøå.abc/')
self.assertEqual(res.url, '/%C3%A6%C3%B8%C3%A5.abc/')
res = redirect('os.path')
self.assertEqual(res.url, 'os.path')
def test_no_illegal_imports(self):
# modules that are not listed in urlpatterns should not be importable
redirect("urlpatterns_reverse.nonimported_module.view")
self.assertNotIn("urlpatterns_reverse.nonimported_module", sys.modules)
@ignore_warnings(category=RemovedInDjango20Warning)
def test_reverse_by_path_nested(self):
# Views that are added to urlpatterns using include() should be
# reversible by dotted path.
self.assertEqual(reverse('urlpatterns_reverse.views.nested_view'), '/includes/nested_path/')
def test_redirect_view_object(self):
from .views import absolute_kwargs_view
res = redirect(absolute_kwargs_view)
self.assertEqual(res.url, '/absolute_arg_view/')
self.assertRaises(NoReverseMatch, redirect, absolute_kwargs_view, wrong_argument=None)
@override_settings(ROOT_URLCONF='urlpatterns_reverse.namespace_urls')
class NamespaceTests(SimpleTestCase):
def test_ambiguous_object(self):
"Names deployed via dynamic URL objects that require namespaces can't be resolved"
self.assertRaises(NoReverseMatch, reverse, 'urlobject-view')
self.assertRaises(NoReverseMatch, reverse, 'urlobject-view', args=[37, 42])
self.assertRaises(NoReverseMatch, reverse, 'urlobject-view', kwargs={'arg1': 42, 'arg2': 37})
def test_ambiguous_urlpattern(self):
"Names deployed via dynamic URL objects that require namespaces can't be resolved"
self.assertRaises(NoReverseMatch, reverse, 'inner-nothing')
self.assertRaises(NoReverseMatch, reverse, 'inner-nothing', args=[37, 42])
self.assertRaises(NoReverseMatch, reverse, 'inner-nothing', kwargs={'arg1': 42, 'arg2': 37})
def test_non_existent_namespace(self):
"Non-existent namespaces raise errors"
self.assertRaises(NoReverseMatch, reverse, 'blahblah:urlobject-view')
self.assertRaises(NoReverseMatch, reverse, 'test-ns1:blahblah:urlobject-view')
def test_normal_name(self):
"Normal lookups work as expected"
self.assertEqual('/normal/', reverse('normal-view'))
self.assertEqual('/normal/37/42/', reverse('normal-view', args=[37, 42]))
self.assertEqual('/normal/42/37/', reverse('normal-view', kwargs={'arg1': 42, 'arg2': 37}))
self.assertEqual('/+%5C$*/', reverse('special-view'))
def test_simple_included_name(self):
"Normal lookups work on names included from other patterns"
self.assertEqual('/included/normal/', reverse('inc-normal-view'))
self.assertEqual('/included/normal/37/42/', reverse('inc-normal-view', args=[37, 42]))
self.assertEqual('/included/normal/42/37/', reverse('inc-normal-view', kwargs={'arg1': 42, 'arg2': 37}))
self.assertEqual('/included/+%5C$*/', reverse('inc-special-view'))
def test_namespace_object(self):
"Dynamic URL objects can be found using a namespace"
self.assertEqual('/test1/inner/', reverse('test-ns1:urlobject-view'))
self.assertEqual('/test1/inner/37/42/', reverse('test-ns1:urlobject-view', args=[37, 42]))
self.assertEqual('/test1/inner/42/37/', reverse('test-ns1:urlobject-view', kwargs={'arg1': 42, 'arg2': 37}))
self.assertEqual('/test1/inner/+%5C$*/', reverse('test-ns1:urlobject-special-view'))
def test_embedded_namespace_object(self):
"Namespaces can be installed anywhere in the URL pattern tree"
self.assertEqual('/included/test3/inner/', reverse('test-ns3:urlobject-view'))
self.assertEqual('/included/test3/inner/37/42/', reverse('test-ns3:urlobject-view', args=[37, 42]))
self.assertEqual('/included/test3/inner/42/37/', reverse('test-ns3:urlobject-view', kwargs={'arg1': 42, 'arg2': 37}))
self.assertEqual('/included/test3/inner/+%5C$*/', reverse('test-ns3:urlobject-special-view'))
def test_namespace_pattern(self):
"Namespaces can be applied to include()'d urlpatterns"
self.assertEqual('/ns-included1/normal/', reverse('inc-ns1:inc-normal-view'))
self.assertEqual('/ns-included1/normal/37/42/', reverse('inc-ns1:inc-normal-view', args=[37, 42]))
self.assertEqual('/ns-included1/normal/42/37/', reverse('inc-ns1:inc-normal-view', kwargs={'arg1': 42, 'arg2': 37}))
self.assertEqual('/ns-included1/+%5C$*/', reverse('inc-ns1:inc-special-view'))
def test_namespace_pattern_with_variable_prefix(self):
"When using an include with namespaces when there is a regex variable in front of it"
self.assertEqual('/ns-outer/42/normal/', reverse('inc-outer:inc-normal-view', kwargs={'outer': 42}))
self.assertEqual('/ns-outer/42/normal/', reverse('inc-outer:inc-normal-view', args=[42]))
self.assertEqual('/ns-outer/42/normal/37/4/', reverse('inc-outer:inc-normal-view', kwargs={'outer': 42, 'arg1': 37, 'arg2': 4}))
self.assertEqual('/ns-outer/42/normal/37/4/', reverse('inc-outer:inc-normal-view', args=[42, 37, 4]))
self.assertEqual('/ns-outer/42/+%5C$*/', reverse('inc-outer:inc-special-view', kwargs={'outer': 42}))
self.assertEqual('/ns-outer/42/+%5C$*/', reverse('inc-outer:inc-special-view', args=[42]))
def test_multiple_namespace_pattern(self):
"Namespaces can be embedded"
self.assertEqual('/ns-included1/test3/inner/', reverse('inc-ns1:test-ns3:urlobject-view'))
self.assertEqual('/ns-included1/test3/inner/37/42/', reverse('inc-ns1:test-ns3:urlobject-view', args=[37, 42]))
self.assertEqual('/ns-included1/test3/inner/42/37/', reverse('inc-ns1:test-ns3:urlobject-view', kwargs={'arg1': 42, 'arg2': 37}))
self.assertEqual('/ns-included1/test3/inner/+%5C$*/', reverse('inc-ns1:test-ns3:urlobject-special-view'))
def test_nested_namespace_pattern(self):
"Namespaces can be nested"
self.assertEqual('/ns-included1/ns-included4/ns-included1/test3/inner/', reverse('inc-ns1:inc-ns4:inc-ns1:test-ns3:urlobject-view'))
self.assertEqual('/ns-included1/ns-included4/ns-included1/test3/inner/37/42/', reverse('inc-ns1:inc-ns4:inc-ns1:test-ns3:urlobject-view', args=[37, 42]))
self.assertEqual('/ns-included1/ns-included4/ns-included1/test3/inner/42/37/', reverse('inc-ns1:inc-ns4:inc-ns1:test-ns3:urlobject-view', kwargs={'arg1': 42, 'arg2': 37}))
self.assertEqual('/ns-included1/ns-included4/ns-included1/test3/inner/+%5C$*/', reverse('inc-ns1:inc-ns4:inc-ns1:test-ns3:urlobject-special-view'))
def test_app_lookup_object(self):
"A default application namespace can be used for lookup"
self.assertEqual('/default/inner/', reverse('testapp:urlobject-view'))
self.assertEqual('/default/inner/37/42/', reverse('testapp:urlobject-view', args=[37, 42]))
self.assertEqual('/default/inner/42/37/', reverse('testapp:urlobject-view', kwargs={'arg1': 42, 'arg2': 37}))
self.assertEqual('/default/inner/+%5C$*/', reverse('testapp:urlobject-special-view'))
def test_app_lookup_object_with_default(self):
"A default application namespace is sensitive to the 'current' app can be used for lookup"
self.assertEqual('/included/test3/inner/', reverse('testapp:urlobject-view', current_app='test-ns3'))
self.assertEqual('/included/test3/inner/37/42/', reverse('testapp:urlobject-view', args=[37, 42], current_app='test-ns3'))
self.assertEqual('/included/test3/inner/42/37/', reverse('testapp:urlobject-view', kwargs={'arg1': 42, 'arg2': 37}, current_app='test-ns3'))
self.assertEqual('/included/test3/inner/+%5C$*/', reverse('testapp:urlobject-special-view', current_app='test-ns3'))
def test_app_lookup_object_without_default(self):
"An application namespace without a default is sensitive to the 'current' app can be used for lookup"
self.assertEqual('/other2/inner/', reverse('nodefault:urlobject-view'))
self.assertEqual('/other2/inner/37/42/', reverse('nodefault:urlobject-view', args=[37, 42]))
self.assertEqual('/other2/inner/42/37/', reverse('nodefault:urlobject-view', kwargs={'arg1': 42, 'arg2': 37}))
self.assertEqual('/other2/inner/+%5C$*/', reverse('nodefault:urlobject-special-view'))
self.assertEqual('/other1/inner/', reverse('nodefault:urlobject-view', current_app='other-ns1'))
self.assertEqual('/other1/inner/37/42/', reverse('nodefault:urlobject-view', args=[37, 42], current_app='other-ns1'))
self.assertEqual('/other1/inner/42/37/', reverse('nodefault:urlobject-view', kwargs={'arg1': 42, 'arg2': 37}, current_app='other-ns1'))
self.assertEqual('/other1/inner/+%5C$*/', reverse('nodefault:urlobject-special-view', current_app='other-ns1'))
def test_special_chars_namespace(self):
self.assertEqual('/+%5C$*/included/normal/', reverse('special:inc-normal-view'))
self.assertEqual('/+%5C$*/included/normal/37/42/', reverse('special:inc-normal-view', args=[37, 42]))
self.assertEqual('/+%5C$*/included/normal/42/37/', reverse('special:inc-normal-view', kwargs={'arg1': 42, 'arg2': 37}))
self.assertEqual('/+%5C$*/included/+%5C$*/', reverse('special:inc-special-view'))
def test_namespaces_with_variables(self):
"Namespace prefixes can capture variables: see #15900"
self.assertEqual('/inc70/', reverse('inc-ns5:inner-nothing', kwargs={'outer': '70'}))
self.assertEqual('/inc78/extra/foobar/', reverse('inc-ns5:inner-extra', kwargs={'outer': '78', 'extra': 'foobar'}))
self.assertEqual('/inc70/', reverse('inc-ns5:inner-nothing', args=['70']))
self.assertEqual('/inc78/extra/foobar/', reverse('inc-ns5:inner-extra', args=['78', 'foobar']))
@override_settings(ROOT_URLCONF=urlconf_outer.__name__)
class RequestURLconfTests(SimpleTestCase):
def test_urlconf(self):
response = self.client.get('/test/me/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'outer:/test/me/,inner:/inner_urlconf/second_test/')
response = self.client.get('/inner_urlconf/second_test/')
self.assertEqual(response.status_code, 200)
response = self.client.get('/second_test/')
self.assertEqual(response.status_code, 404)
@override_settings(
MIDDLEWARE_CLASSES=[
'%s.ChangeURLconfMiddleware' % middleware.__name__,
]
)
def test_urlconf_overridden(self):
response = self.client.get('/test/me/')
self.assertEqual(response.status_code, 404)
response = self.client.get('/inner_urlconf/second_test/')
self.assertEqual(response.status_code, 404)
response = self.client.get('/second_test/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'outer:,inner:/second_test/')
@override_settings(
MIDDLEWARE_CLASSES=[
'%s.NullChangeURLconfMiddleware' % middleware.__name__,
]
)
def test_urlconf_overridden_with_null(self):
self.assertRaises(ImproperlyConfigured, self.client.get, '/test/me/')
@override_settings(
MIDDLEWARE_CLASSES=[
'%s.ChangeURLconfMiddleware' % middleware.__name__,
'%s.ReverseInnerInResponseMiddleware' % middleware.__name__,
]
)
def test_reverse_inner_in_response_middleware(self):
"""
Test reversing an URL from the *overridden* URLconf from inside
a response middleware.
"""
response = self.client.get('/second_test/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'/second_test/')
@override_settings(
MIDDLEWARE_CLASSES=[
'%s.ChangeURLconfMiddleware' % middleware.__name__,
'%s.ReverseOuterInResponseMiddleware' % middleware.__name__,
]
)
def test_reverse_outer_in_response_middleware(self):
"""
Test reversing an URL from the *default* URLconf from inside
a response middleware.
"""
message = "Reverse for 'outer' with arguments '()' and keyword arguments '{}' not found."
with self.assertRaisesMessage(NoReverseMatch, message):
self.client.get('/second_test/')
@override_settings(
MIDDLEWARE_CLASSES=[
'%s.ChangeURLconfMiddleware' % middleware.__name__,
'%s.ReverseInnerInStreaming' % middleware.__name__,
]
)
def test_reverse_inner_in_streaming(self):
"""
Test reversing an URL from the *overridden* URLconf from inside
a streaming response.
"""
response = self.client.get('/second_test/')
self.assertEqual(response.status_code, 200)
self.assertEqual(b''.join(response), b'/second_test/')
@override_settings(
MIDDLEWARE_CLASSES=[
'%s.ChangeURLconfMiddleware' % middleware.__name__,
'%s.ReverseOuterInStreaming' % middleware.__name__,
]
)
def test_reverse_outer_in_streaming(self):
"""
Test reversing an URL from the *default* URLconf from inside
a streaming response.
"""
message = "Reverse for 'outer' with arguments '()' and keyword arguments '{}' not found."
with self.assertRaisesMessage(NoReverseMatch, message):
self.client.get('/second_test/')
b''.join(self.client.get('/second_test/'))
class ErrorHandlerResolutionTests(SimpleTestCase):
"""Tests for handler400, handler404 and handler500"""
def setUp(self):
urlconf = 'urlpatterns_reverse.urls_error_handlers'
urlconf_callables = 'urlpatterns_reverse.urls_error_handlers_callables'
self.resolver = RegexURLResolver(r'^$', urlconf)
self.callable_resolver = RegexURLResolver(r'^$', urlconf_callables)
def test_named_handlers(self):
handler = (empty_view, {})
self.assertEqual(self.resolver.resolve_error_handler(400), handler)
self.assertEqual(self.resolver.resolve_error_handler(404), handler)
self.assertEqual(self.resolver.resolve_error_handler(500), handler)
def test_callable_handers(self):
handler = (empty_view, {})
self.assertEqual(self.callable_resolver.resolve_error_handler(400), handler)
self.assertEqual(self.callable_resolver.resolve_error_handler(404), handler)
self.assertEqual(self.callable_resolver.resolve_error_handler(500), handler)
@override_settings(ROOT_URLCONF='urlpatterns_reverse.urls_without_full_import')
class DefaultErrorHandlerTests(SimpleTestCase):
def test_default_handler(self):
"If the urls.py doesn't specify handlers, the defaults are used"
try:
response = self.client.get('/test/')
self.assertEqual(response.status_code, 404)
except AttributeError:
self.fail("Shouldn't get an AttributeError due to undefined 404 handler")
try:
self.assertRaises(ValueError, self.client.get, '/bad_view/')
except AttributeError:
self.fail("Shouldn't get an AttributeError due to undefined 500 handler")
@override_settings(ROOT_URLCONF=None)
class NoRootUrlConfTests(SimpleTestCase):
"""Tests for handler404 and handler500 if urlconf is None"""
def test_no_handler_exception(self):
self.assertRaises(ImproperlyConfigured, self.client.get, '/test/me/')
@override_settings(ROOT_URLCONF='urlpatterns_reverse.namespace_urls')
class ResolverMatchTests(SimpleTestCase):
def test_urlpattern_resolve(self):
for path, url_name, app_name, namespace, view_name, func, args, kwargs in resolve_test_data:
# Test legacy support for extracting "function, args, kwargs"
match_func, match_args, match_kwargs = resolve(path)
self.assertEqual(match_func, func)
self.assertEqual(match_args, args)
self.assertEqual(match_kwargs, kwargs)
# Test ResolverMatch capabilities.
match = resolve(path)
self.assertEqual(match.__class__, ResolverMatch)
self.assertEqual(match.url_name, url_name)
self.assertEqual(match.app_name, app_name)
self.assertEqual(match.namespace, namespace)
self.assertEqual(match.view_name, view_name)
self.assertEqual(match.func, func)
self.assertEqual(match.args, args)
self.assertEqual(match.kwargs, kwargs)
# ... and for legacy purposes:
self.assertEqual(match[0], func)
self.assertEqual(match[1], args)
self.assertEqual(match[2], kwargs)
def test_resolver_match_on_request(self):
response = self.client.get('/resolver_match/')
resolver_match = response.resolver_match
self.assertEqual(resolver_match.url_name, 'test-resolver-match')
def test_resolver_match_on_request_before_resolution(self):
request = HttpRequest()
self.assertIsNone(request.resolver_match)
@override_settings(ROOT_URLCONF='urlpatterns_reverse.erroneous_urls')
class ErroneousViewTests(SimpleTestCase):
def test_erroneous_resolve(self):
self.assertRaises(ImportError, self.client.get, '/erroneous_inner/')
self.assertRaises(ImportError, self.client.get, '/erroneous_outer/')
self.assertRaises(ViewDoesNotExist, self.client.get, '/missing_inner/')
self.assertRaises(ViewDoesNotExist, self.client.get, '/missing_outer/')
self.assertRaises(ViewDoesNotExist, self.client.get, '/uncallable-dotted/')
self.assertRaises(ViewDoesNotExist, self.client.get, '/uncallable-object/')
# Regression test for #21157
self.assertRaises(ImportError, self.client.get, '/erroneous_unqualified/')
def test_erroneous_reverse(self):
"""
Ensure that a useful exception is raised when a regex is invalid in the
URLConf (#6170).
"""
# The regex error will be hit before NoReverseMatch can be raised
self.assertRaises(ImproperlyConfigured, reverse, 'whatever blah blah')
class ViewLoadingTests(SimpleTestCase):
def test_view_loading(self):
self.assertEqual(get_callable('urlpatterns_reverse.views.empty_view'), empty_view)
# passing a callable should return the callable
self.assertEqual(get_callable(empty_view), empty_view)
def test_exceptions(self):
# A missing view (identified by an AttributeError) should raise
# ViewDoesNotExist, ...
with six.assertRaisesRegex(self, ViewDoesNotExist, ".*View does not exist in.*"):
get_callable('urlpatterns_reverse.views.i_should_not_exist')
# ... but if the AttributeError is caused by something else don't
# swallow it.
with self.assertRaises(AttributeError):
get_callable('urlpatterns_reverse.views_broken.i_am_broken')
class IncludeTests(SimpleTestCase):
def test_include_app_name_but_no_namespace(self):
msg = "Must specify a namespace if specifying app_name."
with self.assertRaisesMessage(ValueError, msg):
include('urls', app_name='bar')
@override_settings(ROOT_URLCONF='urlpatterns_reverse.urls')
class LookaheadTests(SimpleTestCase):
def test_valid_resolve(self):
test_urls = [
'/lookahead-/a-city/',
'/lookbehind-/a-city/',
'/lookahead+/a-city/',
'/lookbehind+/a-city/',
]
for test_url in test_urls:
match = resolve(test_url)
self.assertEqual(match.kwargs, {'city': 'a-city'})
def test_invalid_resolve(self):
test_urls = [
'/lookahead-/not-a-city/',
'/lookbehind-/not-a-city/',
'/lookahead+/other-city/',
'/lookbehind+/other-city/',
]
for test_url in test_urls:
with self.assertRaises(Resolver404):
resolve(test_url)
def test_valid_reverse(self):
url = reverse('lookahead-positive', kwargs={'city': 'a-city'})
self.assertEqual(url, '/lookahead+/a-city/')
url = reverse('lookahead-negative', kwargs={'city': 'a-city'})
self.assertEqual(url, '/lookahead-/a-city/')
url = reverse('lookbehind-positive', kwargs={'city': 'a-city'})
self.assertEqual(url, '/lookbehind+/a-city/')
url = reverse('lookbehind-negative', kwargs={'city': 'a-city'})
self.assertEqual(url, '/lookbehind-/a-city/')
def test_invalid_reverse(self):
with self.assertRaises(NoReverseMatch):
reverse('lookahead-positive', kwargs={'city': 'other-city'})
with self.assertRaises(NoReverseMatch):
reverse('lookahead-negative', kwargs={'city': 'not-a-city'})
with self.assertRaises(NoReverseMatch):
reverse('lookbehind-positive', kwargs={'city': 'other-city'})
with self.assertRaises(NoReverseMatch):
reverse('lookbehind-negative', kwargs={'city': 'not-a-city'})
| bsd-3-clause |
pombredanne/dateparser-1 | dateparser/conf.py | 1 | 2222 | # -*- coding: utf-8 -*-
from pkgutil import get_data
from yaml import load as load_yaml
"""
:mod:`dateparser`'s parsing behavior can be configured like below
*``PREFER_DAY_OF_MONTH``* defaults to ``current`` and can have ``first`` and ``last`` as values::
>>> from dateparser.conf import settings
>>> from dateparser import parse
>>> parse(u'December 2015')
datetime.datetime(2015, 12, 16, 0, 0)
>>> settings.update('PREFER_DAY_OF_MONTH', 'last')
>>> parse(u'December 2015')
datetime.datetime(2015, 12, 31, 0, 0)
>>> settings.update('PREFER_DAY_OF_MONTH', 'first')
>>> parse(u'December 2015')
datetime.datetime(2015, 12, 1, 0, 0)
*``PREFER_DATES_FROM``* defaults to ``current_period`` and can have ``past`` and ``future`` as values.
Assuming current date is June 16, 2015::
>>> from dateparser.conf import settings
>>> from dateparser import parse
>>> parse(u'March')
datetime.datetime(2015, 3, 16, 0, 0)
>>> settings.update('PREFER_DATES_FROM', 'future')
>>> parse(u'March')
datetime.datetime(2016, 3, 16, 0, 0)
*``SKIP_TOKENS``* is a ``list`` of tokens to discard while detecting language. Defaults to ``['t']`` which skips T in iso format datetime string.e.g. ``2015-05-02T10:20:19+0000``.
This only works with :mod:`DateDataParser` like below:
>>> settings.update('SKIP_TOKENS', ['de']) # Turkish word for 'at'
>>> from dateparser.date import DateDataParser
>>> DateDataParser().get_date_data(u'27 Haziran 1981 de') # Turkish (at 27 June 1981)
{'date_obj': datetime.datetime(1981, 6, 27, 0, 0), 'period': 'day'}
"""
class Settings(object):
def __init__(self, **kwargs):
"""
Settings are now loaded using the data/settings.yaml file.
"""
data = get_data('data', 'settings.yaml')
data = load_yaml(data)
settings_data = data.pop('settings', {})
for datum in settings_data:
setattr(self, datum, settings_data[datum])
for key in kwargs:
setattr(self, key, kwargs[key])
def update(self, key, value):
setattr(self, key, value)
def reload_settings():
global settings
settings = Settings()
settings = Settings()
| bsd-3-clause |
luiseduardohdbackup/odoo | addons/crm/crm_phonecall.py | 255 | 14844 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import crm
from datetime import datetime
from openerp.osv import fields, osv
from openerp.tools import DEFAULT_SERVER_DATETIME_FORMAT
from openerp.tools.translate import _
class crm_phonecall(osv.osv):
""" Model for CRM phonecalls """
_name = "crm.phonecall"
_description = "Phonecall"
_order = "id desc"
_inherit = ['mail.thread']
_columns = {
'date_action_last': fields.datetime('Last Action', readonly=1),
'date_action_next': fields.datetime('Next Action', readonly=1),
'create_date': fields.datetime('Creation Date' , readonly=True),
'section_id': fields.many2one('crm.case.section', 'Sales Team', \
select=True, help='Sales team to which Case belongs to.'),
'user_id': fields.many2one('res.users', 'Responsible'),
'partner_id': fields.many2one('res.partner', 'Contact'),
'company_id': fields.many2one('res.company', 'Company'),
'description': fields.text('Description'),
'state': fields.selection(
[('open', 'Confirmed'),
('cancel', 'Cancelled'),
('pending', 'Pending'),
('done', 'Held')
], string='Status', readonly=True, track_visibility='onchange',
help='The status is set to Confirmed, when a case is created.\n'
'When the call is over, the status is set to Held.\n'
'If the callis not applicable anymore, the status can be set to Cancelled.'),
'email_from': fields.char('Email', size=128, help="These people will receive email."),
'date_open': fields.datetime('Opened', readonly=True),
# phonecall fields
'name': fields.char('Call Summary', required=True),
'active': fields.boolean('Active', required=False),
'duration': fields.float('Duration', help='Duration in minutes and seconds.'),
'categ_id': fields.many2one('crm.case.categ', 'Category', \
domain="['|',('section_id','=',section_id),('section_id','=',False),\
('object_id.model', '=', 'crm.phonecall')]"),
'partner_phone': fields.char('Phone'),
'partner_mobile': fields.char('Mobile'),
'priority': fields.selection([('0','Low'), ('1','Normal'), ('2','High')], 'Priority'),
'date_closed': fields.datetime('Closed', readonly=True),
'date': fields.datetime('Date'),
'opportunity_id': fields.many2one ('crm.lead', 'Lead/Opportunity'),
}
def _get_default_state(self, cr, uid, context=None):
if context and context.get('default_state'):
return context.get('default_state')
return 'open'
_defaults = {
'date': fields.datetime.now,
'priority': '1',
'state': _get_default_state,
'user_id': lambda self, cr, uid, ctx: uid,
'active': 1
}
def on_change_partner_id(self, cr, uid, ids, partner_id, context=None):
values = {}
if partner_id:
partner = self.pool.get('res.partner').browse(cr, uid, partner_id, context=context)
values = {
'partner_phone': partner.phone,
'partner_mobile': partner.mobile,
}
return {'value': values}
def write(self, cr, uid, ids, values, context=None):
""" Override to add case management: open/close dates """
if values.get('state'):
if values.get('state') == 'done':
values['date_closed'] = fields.datetime.now()
self.compute_duration(cr, uid, ids, context=context)
elif values.get('state') == 'open':
values['date_open'] = fields.datetime.now()
values['duration'] = 0.0
return super(crm_phonecall, self).write(cr, uid, ids, values, context=context)
def compute_duration(self, cr, uid, ids, context=None):
for phonecall in self.browse(cr, uid, ids, context=context):
if phonecall.duration <= 0:
duration = datetime.now() - datetime.strptime(phonecall.date, DEFAULT_SERVER_DATETIME_FORMAT)
values = {'duration': duration.seconds/float(60)}
self.write(cr, uid, [phonecall.id], values, context=context)
return True
def schedule_another_phonecall(self, cr, uid, ids, schedule_time, call_summary, \
user_id=False, section_id=False, categ_id=False, action='schedule', context=None):
"""
action :('schedule','Schedule a call'), ('log','Log a call')
"""
model_data = self.pool.get('ir.model.data')
phonecall_dict = {}
if not categ_id:
try:
res_id = model_data._get_id(cr, uid, 'crm', 'categ_phone2')
categ_id = model_data.browse(cr, uid, res_id, context=context).res_id
except ValueError:
pass
for call in self.browse(cr, uid, ids, context=context):
if not section_id:
section_id = call.section_id and call.section_id.id or False
if not user_id:
user_id = call.user_id and call.user_id.id or False
if not schedule_time:
schedule_time = call.date
vals = {
'name' : call_summary,
'user_id' : user_id or False,
'categ_id' : categ_id or False,
'description' : call.description or False,
'date' : schedule_time,
'section_id' : section_id or False,
'partner_id': call.partner_id and call.partner_id.id or False,
'partner_phone' : call.partner_phone,
'partner_mobile' : call.partner_mobile,
'priority': call.priority,
'opportunity_id': call.opportunity_id and call.opportunity_id.id or False,
}
new_id = self.create(cr, uid, vals, context=context)
if action == 'log':
self.write(cr, uid, [new_id], {'state': 'done'}, context=context)
phonecall_dict[call.id] = new_id
return phonecall_dict
def _call_create_partner(self, cr, uid, phonecall, context=None):
partner = self.pool.get('res.partner')
partner_id = partner.create(cr, uid, {
'name': phonecall.name,
'user_id': phonecall.user_id.id,
'comment': phonecall.description,
'address': []
})
return partner_id
def on_change_opportunity(self, cr, uid, ids, opportunity_id, context=None):
values = {}
if opportunity_id:
opportunity = self.pool.get('crm.lead').browse(cr, uid, opportunity_id, context=context)
values = {
'section_id' : opportunity.section_id and opportunity.section_id.id or False,
'partner_phone' : opportunity.phone,
'partner_mobile' : opportunity.mobile,
'partner_id' : opportunity.partner_id and opportunity.partner_id.id or False,
}
return {'value' : values}
def _call_set_partner(self, cr, uid, ids, partner_id, context=None):
write_res = self.write(cr, uid, ids, {'partner_id' : partner_id}, context=context)
self._call_set_partner_send_note(cr, uid, ids, context)
return write_res
def _call_create_partner_address(self, cr, uid, phonecall, partner_id, context=None):
address = self.pool.get('res.partner')
return address.create(cr, uid, {
'parent_id': partner_id,
'name': phonecall.name,
'phone': phonecall.partner_phone,
})
def handle_partner_assignation(self, cr, uid, ids, action='create', partner_id=False, context=None):
"""
Handle partner assignation during a lead conversion.
if action is 'create', create new partner with contact and assign lead to new partner_id.
otherwise assign lead to specified partner_id
:param list ids: phonecalls ids to process
:param string action: what has to be done regarding partners (create it, assign an existing one, or nothing)
:param int partner_id: partner to assign if any
:return dict: dictionary organized as followed: {lead_id: partner_assigned_id}
"""
#TODO this is a duplication of the handle_partner_assignation method of crm_lead
partner_ids = {}
# If a partner_id is given, force this partner for all elements
force_partner_id = partner_id
for call in self.browse(cr, uid, ids, context=context):
# If the action is set to 'create' and no partner_id is set, create a new one
if action == 'create':
partner_id = force_partner_id or self._call_create_partner(cr, uid, call, context=context)
self._call_create_partner_address(cr, uid, call, partner_id, context=context)
self._call_set_partner(cr, uid, [call.id], partner_id, context=context)
partner_ids[call.id] = partner_id
return partner_ids
def redirect_phonecall_view(self, cr, uid, phonecall_id, context=None):
model_data = self.pool.get('ir.model.data')
# Select the view
tree_view = model_data.get_object_reference(cr, uid, 'crm', 'crm_case_phone_tree_view')
form_view = model_data.get_object_reference(cr, uid, 'crm', 'crm_case_phone_form_view')
search_view = model_data.get_object_reference(cr, uid, 'crm', 'view_crm_case_phonecalls_filter')
value = {
'name': _('Phone Call'),
'view_type': 'form',
'view_mode': 'tree,form',
'res_model': 'crm.phonecall',
'res_id' : int(phonecall_id),
'views': [(form_view and form_view[1] or False, 'form'), (tree_view and tree_view[1] or False, 'tree'), (False, 'calendar')],
'type': 'ir.actions.act_window',
'search_view_id': search_view and search_view[1] or False,
}
return value
def convert_opportunity(self, cr, uid, ids, opportunity_summary=False, partner_id=False, planned_revenue=0.0, probability=0.0, context=None):
partner = self.pool.get('res.partner')
opportunity = self.pool.get('crm.lead')
opportunity_dict = {}
default_contact = False
for call in self.browse(cr, uid, ids, context=context):
if not partner_id:
partner_id = call.partner_id and call.partner_id.id or False
if partner_id:
address_id = partner.address_get(cr, uid, [partner_id])['default']
if address_id:
default_contact = partner.browse(cr, uid, address_id, context=context)
opportunity_id = opportunity.create(cr, uid, {
'name': opportunity_summary or call.name,
'planned_revenue': planned_revenue,
'probability': probability,
'partner_id': partner_id or False,
'mobile': default_contact and default_contact.mobile,
'section_id': call.section_id and call.section_id.id or False,
'description': call.description or False,
'priority': call.priority,
'type': 'opportunity',
'phone': call.partner_phone or False,
'email_from': default_contact and default_contact.email,
})
vals = {
'partner_id': partner_id,
'opportunity_id': opportunity_id,
'state': 'done',
}
self.write(cr, uid, [call.id], vals, context=context)
opportunity_dict[call.id] = opportunity_id
return opportunity_dict
def action_make_meeting(self, cr, uid, ids, context=None):
"""
Open meeting's calendar view to schedule a meeting on current phonecall.
:return dict: dictionary value for created meeting view
"""
partner_ids = []
phonecall = self.browse(cr, uid, ids[0], context)
if phonecall.partner_id and phonecall.partner_id.email:
partner_ids.append(phonecall.partner_id.id)
res = self.pool.get('ir.actions.act_window').for_xml_id(cr, uid, 'calendar', 'action_calendar_event', context)
res['context'] = {
'default_phonecall_id': phonecall.id,
'default_partner_ids': partner_ids,
'default_user_id': uid,
'default_email_from': phonecall.email_from,
'default_name': phonecall.name,
}
return res
def action_button_convert2opportunity(self, cr, uid, ids, context=None):
"""
Convert a phonecall into an opp and then redirect to the opp view.
:param list ids: list of calls ids to convert (typically contains a single id)
:return dict: containing view information
"""
if len(ids) != 1:
raise osv.except_osv(_('Warning!'),_('It\'s only possible to convert one phonecall at a time.'))
opportunity_dict = self.convert_opportunity(cr, uid, ids, context=context)
return self.pool.get('crm.lead').redirect_opportunity_view(cr, uid, opportunity_dict[ids[0]], context)
# ----------------------------------------
# OpenChatter
# ----------------------------------------
def _call_set_partner_send_note(self, cr, uid, ids, context=None):
return self.message_post(cr, uid, ids, body=_("Partner has been <b>created</b>."), context=context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
ghxandsky/ceph-deploy | ceph_deploy/hosts/__init__.py | 2 | 5008 | """
We deal (mostly) with remote hosts. To avoid special casing each different
commands (e.g. using `yum` as opposed to `apt`) we can make a one time call to
that remote host and set all the special cases for running commands depending
on the type of distribution/version we are dealing with.
"""
import logging
from ceph_deploy import exc
from ceph_deploy.hosts import debian, centos, fedora, suse, remotes, rhel
from ceph_deploy.connection import get_connection
logger = logging.getLogger()
def get(hostname,
username=None,
fallback=None,
detect_sudo=True,
use_rhceph=False):
"""
Retrieve the module that matches the distribution of a ``hostname``. This
function will connect to that host and retrieve the distribution
information, then return the appropriate module and slap a few attributes
to that module defining the information it found from the hostname.
For example, if host ``node1.example.com`` is an Ubuntu server, the
``debian`` module would be returned and the following would be set::
module.name = 'ubuntu'
module.release = '12.04'
module.codename = 'precise'
:param hostname: A hostname that is reachable/resolvable over the network
:param fallback: Optional fallback to use if no supported distro is found
:param use_rhceph: Whether or not to install RH Ceph on a RHEL machine or
the community distro. Changes what host module is
returned for RHEL.
"""
conn = get_connection(
hostname,
username=username,
logger=logging.getLogger(hostname),
detect_sudo=detect_sudo
)
try:
conn.import_module(remotes)
except IOError as error:
if 'already closed' in getattr(error, 'message', ''):
raise RuntimeError('remote connection got closed, ensure ``requiretty`` is disabled for %s' % hostname)
distro_name, release, codename = conn.remote_module.platform_information()
if not codename or not _get_distro(distro_name):
raise exc.UnsupportedPlatform(
distro=distro_name,
codename=codename,
release=release)
machine_type = conn.remote_module.machine_type()
module = _get_distro(distro_name, use_rhceph=use_rhceph)
module.name = distro_name
module.normalized_name = _normalized_distro_name(distro_name)
module.normalized_release = _normalized_release(release)
module.distro = module.normalized_name
module.is_el = module.normalized_name in ['redhat', 'centos', 'fedora', 'scientific']
module.is_rpm = module.normalized_name in ['redhat', 'centos',
'fedora', 'scientific', 'suse']
module.is_deb = not module.is_rpm
module.release = release
module.codename = codename
module.conn = conn
module.machine_type = machine_type
module.init = module.choose_init(module)
module.packager = module.get_packager(module)
return module
def _get_distro(distro, fallback=None, use_rhceph=False):
if not distro:
return
distro = _normalized_distro_name(distro)
distributions = {
'debian': debian,
'ubuntu': debian,
'centos': centos,
'scientific': centos,
'redhat': centos,
'fedora': fedora,
'suse': suse,
}
if distro == 'redhat' and use_rhceph:
return rhel
else:
return distributions.get(distro) or _get_distro(fallback)
def _normalized_distro_name(distro):
distro = distro.lower()
if distro.startswith(('redhat', 'red hat')):
return 'redhat'
elif distro.startswith(('scientific', 'scientific linux')):
return 'scientific'
elif distro.startswith(('suse', 'opensuse')):
return 'suse'
elif distro.startswith('centos'):
return 'centos'
elif distro.startswith('linuxmint'):
return 'ubuntu'
return distro
def _normalized_release(release):
"""
A normalizer function to make sense of distro
release versions.
Returns an object with: major, minor, patch, and garbage
These attributes can be accessed as ints with prefixed "int"
attribute names, for example:
normalized_version.int_major
"""
release = release.strip()
class NormalizedVersion(object):
pass
v = NormalizedVersion() # fake object to get nice dotted access
v.major, v.minor, v.patch, v.garbage = (release.split('.') + ["0"]*4)[:4]
release_map = dict(major=v.major, minor=v.minor, patch=v.patch, garbage=v.garbage)
# safe int versions that remove non-numerical chars
# for example 'rc1' in a version like '1-rc1
for name, value in release_map.items():
if '-' in value: # get rid of garbage like -dev1 or -rc1
value = value.split('-')[0]
value = float(''.join(c for c in value if c.isdigit()) or 0)
int_name = "int_%s" % name
setattr(v, int_name, value)
return v
| mit |
simbs/edx-platform | lms/djangoapps/courseware/management/commands/tests/test_dump_course.py | 44 | 9075 | # coding=utf-8
"""Tests for Django management commands"""
import json
from nose.plugins.attrib import attr
from path import Path as path
import shutil
from StringIO import StringIO
import tarfile
from tempfile import mkdtemp
import factory
from django.conf import settings
from django.core.management import call_command
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase, mixed_store_config
from xmodule.modulestore.tests.django_utils import (
TEST_DATA_MONGO_MODULESTORE, TEST_DATA_SPLIT_MODULESTORE
)
from xmodule.modulestore.tests.factories import CourseFactory
from xmodule.modulestore.xml_importer import import_course_from_xml
DATA_DIR = settings.COMMON_TEST_DATA_ROOT
XML_COURSE_DIRS = ['toy', 'simple', 'open_ended']
MAPPINGS = {
'edX/toy/2012_Fall': 'xml',
'edX/simple/2012_Fall': 'xml',
'edX/open_ended/2012_Fall': 'xml',
}
TEST_DATA_MIXED_XML_MODULESTORE = mixed_store_config(
DATA_DIR, MAPPINGS, include_xml=True, xml_source_dirs=XML_COURSE_DIRS,
)
@attr('shard_1')
class CommandsTestBase(ModuleStoreTestCase):
"""
Base class for testing different django commands.
Must be subclassed using override_settings set to the modulestore
to be tested.
"""
__test__ = False
url_name = '2012_Fall'
def setUp(self):
super(CommandsTestBase, self).setUp()
self.test_course_key = modulestore().make_course_key("edX", "simple", "2012_Fall")
self.loaded_courses = self.load_courses()
def load_courses(self):
"""Load test courses and return list of ids"""
store = modulestore()
# Add a course with a unicode name.
unique_org = factory.Sequence(lambda n: u'ëḋẌ.%d' % n)
CourseFactory.create(
org=unique_org,
course=u'śíḿṕĺé',
display_name=u'2012_Fáĺĺ',
modulestore=store
)
courses = store.get_courses()
# NOTE: if xml store owns these, it won't import them into mongo
if self.test_course_key not in [c.id for c in courses]:
import_course_from_xml(
store, ModuleStoreEnum.UserID.mgmt_command, DATA_DIR, XML_COURSE_DIRS, create_if_not_present=True
)
return [course.id for course in store.get_courses()]
def call_command(self, name, *args, **kwargs):
"""Call management command and return output"""
out = StringIO() # To Capture the output of the command
call_command(name, *args, stdout=out, **kwargs)
out.seek(0)
return out.read()
def test_dump_course_ids(self):
kwargs = {'modulestore': 'default'}
output = self.call_command('dump_course_ids', **kwargs)
dumped_courses = output.decode('utf-8').strip().split('\n')
course_ids = {unicode(course_id) for course_id in self.loaded_courses}
dumped_ids = set(dumped_courses)
self.assertEqual(course_ids, dumped_ids)
def test_correct_course_structure_metadata(self):
course_id = unicode(modulestore().make_course_key('edX', 'open_ended', '2012_Fall'))
args = [course_id]
kwargs = {'modulestore': 'default'}
try:
output = self.call_command('dump_course_structure', *args, **kwargs)
except TypeError, exception:
self.fail(exception)
dump = json.loads(output)
self.assertGreater(len(dump.values()), 0)
def test_dump_course_structure(self):
args = [unicode(self.test_course_key)]
kwargs = {'modulestore': 'default'}
output = self.call_command('dump_course_structure', *args, **kwargs)
dump = json.loads(output)
# check that all elements in the course structure have metadata,
# but not inherited metadata:
for element in dump.itervalues():
self.assertIn('metadata', element)
self.assertIn('children', element)
self.assertIn('category', element)
self.assertNotIn('inherited_metadata', element)
# Check a few elements in the course dump
test_course_key = self.test_course_key
parent_id = unicode(test_course_key.make_usage_key('chapter', 'Overview'))
self.assertEqual(dump[parent_id]['category'], 'chapter')
self.assertEqual(len(dump[parent_id]['children']), 3)
child_id = dump[parent_id]['children'][1]
self.assertEqual(dump[child_id]['category'], 'videosequence')
self.assertEqual(len(dump[child_id]['children']), 2)
video_id = unicode(test_course_key.make_usage_key('video', 'Welcome'))
self.assertEqual(dump[video_id]['category'], 'video')
self.assertItemsEqual(
dump[video_id]['metadata'].keys(),
['download_video', 'youtube_id_0_75', 'youtube_id_1_0', 'youtube_id_1_25', 'youtube_id_1_5']
)
self.assertIn('youtube_id_1_0', dump[video_id]['metadata'])
# Check if there are the right number of elements
self.assertEqual(len(dump), 16)
def test_dump_inherited_course_structure(self):
args = [unicode(self.test_course_key)]
kwargs = {'modulestore': 'default', 'inherited': True}
output = self.call_command('dump_course_structure', *args, **kwargs)
dump = json.loads(output)
# check that all elements in the course structure have inherited metadata,
# and that it contains a particular value as well:
for element in dump.itervalues():
self.assertIn('metadata', element)
self.assertIn('children', element)
self.assertIn('category', element)
self.assertIn('inherited_metadata', element)
self.assertIsNone(element['inherited_metadata']['ispublic'])
# ... but does not contain inherited metadata containing a default value:
self.assertNotIn('due', element['inherited_metadata'])
def test_dump_inherited_course_structure_with_defaults(self):
args = [unicode(self.test_course_key)]
kwargs = {'modulestore': 'default', 'inherited': True, 'inherited_defaults': True}
output = self.call_command('dump_course_structure', *args, **kwargs)
dump = json.loads(output)
# check that all elements in the course structure have inherited metadata,
# and that it contains a particular value as well:
for element in dump.itervalues():
self.assertIn('metadata', element)
self.assertIn('children', element)
self.assertIn('category', element)
self.assertIn('inherited_metadata', element)
self.assertIsNone(element['inherited_metadata']['ispublic'])
# ... and contains inherited metadata containing a default value:
self.assertIsNone(element['inherited_metadata']['due'])
def test_export_course(self):
tmp_dir = path(mkdtemp())
self.addCleanup(shutil.rmtree, tmp_dir)
filename = tmp_dir / 'test.tar.gz'
self.run_export_course(filename)
with tarfile.open(filename) as tar_file:
self.check_export_file(tar_file)
def test_export_course_stdout(self):
output = self.run_export_course('-')
with tarfile.open(fileobj=StringIO(output)) as tar_file:
self.check_export_file(tar_file)
def run_export_course(self, filename): # pylint: disable=missing-docstring
args = [unicode(self.test_course_key), filename]
kwargs = {'modulestore': 'default'}
return self.call_command('export_course', *args, **kwargs)
def check_export_file(self, tar_file): # pylint: disable=missing-docstring
names = tar_file.getnames()
# Check if some of the files are present.
# The rest is of the code should be covered by the tests for
# xmodule.modulestore.xml_exporter, used by the dump_course command
assert_in = self.assertIn
assert_in('edX-simple-2012_Fall', names)
assert_in('edX-simple-2012_Fall/policies/{}/policy.json'.format(self.url_name), names)
assert_in('edX-simple-2012_Fall/html/toylab.html', names)
assert_in('edX-simple-2012_Fall/videosequence/A_simple_sequence.xml', names)
assert_in('edX-simple-2012_Fall/sequential/Lecture_2.xml', names)
class CommandsXMLTestCase(CommandsTestBase):
"""
Test case for management commands with the xml modulestore present.
"""
MODULESTORE = TEST_DATA_MIXED_XML_MODULESTORE
__test__ = True
class CommandsMongoTestCase(CommandsTestBase):
"""
Test case for management commands using the mixed mongo modulestore with old mongo as the default.
"""
MODULESTORE = TEST_DATA_MONGO_MODULESTORE
__test__ = True
class CommandSplitMongoTestCase(CommandsTestBase):
"""
Test case for management commands using the mixed mongo modulestore with split as the default.
"""
MODULESTORE = TEST_DATA_SPLIT_MODULESTORE
__test__ = True
url_name = 'course'
| agpl-3.0 |
phaustin/pythermo | code/thermlib/rootfinder.py | 1 | 1456 | #!/usr/bin/env python
import numpy
from scipy import optimize
def find_interval(f, x, *args):
x1 = x
x2 = x
if x == 0.:
dx = 1./50.
else:
dx = x/50.
maxiter = 40
twosqrt = numpy.sqrt(2)
a = x
fa = f(a, *args)
b = x
fb = f(b, *args)
for i in range(maxiter):
dx = dx*twosqrt
a = x - dx
fa = f(a, *args)
b = x + dx
fb = f(b, *args)
if (fa*fb < 0.): return (a, b)
raise "Couldn't find a suitable range."
# This function evaluates a new point, sets the y range,
# and tests for convergence
def get_y(x, f, eps, ymax, ymin, *args):
y = f(x, *args)
ymax = max(ymax, y)
ymin = min(ymin, y)
converged = (abs(y) < eps*(ymax-ymin))
return (y, ymax, ymin, converged)
def fzero(the_func, root_bracket, *args, **parms):
# the_func is the function we wish to find the zeros of
# root_bracket is an initial guess of the zero location.
# Can be a float or a sequence of two floats specifying a range
# *args contains any other parameters needed for f
# **parms can be eps (allowable error) or maxiter (max number of iterations.)
answer=optimize.zeros.brenth(the_func,263,315,args=(e_target))
return answer
def testfunc(x):
return numpy.sin(x)
if __name__=="__main__":
f = testfunc
x = 1.
print fzero(f, x)
print fzero(f, x, eps=1e-300, maxiter = 80.)
| mit |
alexproca/askbot-devel | askbot/migrations/0095_postize_award_and_repute.py | 18 | 31809 | # encoding: utf-8
import sys
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
from askbot.utils.console import ProgressBar
class Migration(DataMigration):
def forwards(self, orm):
# ContentType for Post model should be created no later than in migration 0092
ct_post = orm['contenttypes.ContentType'].objects.get(app_label='askbot', model='post')
message = "Connecting award objects to posts"
num_awards = orm.Award.objects.count()
for aw in ProgressBar(orm.Award.objects.iterator(), num_awards, message):
ct = aw.content_type
if ct.app_label == 'askbot' and ct.model in ('question', 'answer', 'comment'):
aw.content_type = ct_post
try:
aw.object_id = orm.Post.objects.get(**{'self_%s__id' % str(ct.model): aw.object_id}).id
except orm.Post.DoesNotExist:
continue
aw.save()
###
message = "Connecting repute objects to posts"
num_reputes = orm.Repute.objects.count()
for rp in ProgressBar(orm.Repute.objects.iterator(), num_reputes, message):
if rp.question:
rp.question_post = orm.Post.objects.get(self_question__id=rp.question.id)
rp.save()
def backwards(self, orm):
"Write your backwards methods here."
models = {
'askbot.activity': {
'Meta': {'object_name': 'Activity', 'db_table': "u'activity'"},
'active_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'activity_type': ('django.db.models.fields.SmallIntegerField', [], {}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_auditted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'question_post': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Post']", 'null': 'True'}),
'receiving_users': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'received_activity'", 'symmetrical': 'False', 'to': "orm['auth.User']"}),
'recipients': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'incoming_activity'", 'symmetrical': 'False', 'through': "orm['askbot.ActivityAuditStatus']", 'to': "orm['auth.User']"}),
'summary': ('django.db.models.fields.TextField', [], {'default': "''"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'askbot.activityauditstatus': {
'Meta': {'unique_together': "(('user', 'activity'),)", 'object_name': 'ActivityAuditStatus'},
'activity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Activity']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'status': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'askbot.anonymousanswer': {
'Meta': {'object_name': 'AnonymousAnswer'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_addr': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'anonymous_answers'", 'to': "orm['askbot.Question']"}),
'session_key': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '180'}),
'text': ('django.db.models.fields.TextField', [], {}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'askbot.anonymousquestion': {
'Meta': {'object_name': 'AnonymousQuestion'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_addr': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'is_anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'session_key': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '180'}),
'tagnames': ('django.db.models.fields.CharField', [], {'max_length': '125'}),
'text': ('django.db.models.fields.TextField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'askbot.answer': {
'Meta': {'object_name': 'Answer', 'db_table': "u'answer'"},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'answers'", 'to': "orm['auth.User']"}),
'comment_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'deleted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'deleted_answers'", 'null': 'True', 'to': "orm['auth.User']"}),
'html': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_edited_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_edited_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'last_edited_answers'", 'null': 'True', 'to': "orm['auth.User']"}),
'locked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'locked_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'locked_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'locked_answers'", 'null': 'True', 'to': "orm['auth.User']"}),
'offensive_flag_count': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'answers'", 'to': "orm['askbot.Question']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '180'}),
'text': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'vote_down_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'vote_up_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'wikified_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'askbot.award': {
'Meta': {'object_name': 'Award', 'db_table': "u'award'"},
'awarded_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'badge': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'award_badge'", 'to': "orm['askbot.BadgeData']"}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notified': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'award_user'", 'to': "orm['auth.User']"})
},
'askbot.badgedata': {
'Meta': {'ordering': "('slug',)", 'object_name': 'BadgeData'},
'awarded_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'awarded_to': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'badges'", 'symmetrical': 'False', 'through': "orm['askbot.Award']", 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'})
},
'askbot.comment': {
'Meta': {'ordering': "('-added_at',)", 'object_name': 'Comment', 'db_table': "u'comment'"},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'comment': ('django.db.models.fields.CharField', [], {'max_length': '2048'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'html': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '2048'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'offensive_flag_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'comments'", 'to': "orm['auth.User']"})
},
'askbot.emailfeedsetting': {
'Meta': {'object_name': 'EmailFeedSetting'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'feed_type': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'frequency': ('django.db.models.fields.CharField', [], {'default': "'n'", 'max_length': '8'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'reported_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'subscriber': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'notification_subscriptions'", 'to': "orm['auth.User']"})
},
'askbot.favoritequestion': {
'Meta': {'object_name': 'FavoriteQuestion', 'db_table': "u'favorite_question'"},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'thread': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Thread']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user_favorite_questions'", 'to': "orm['auth.User']"})
},
'askbot.markedtag': {
'Meta': {'object_name': 'MarkedTag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'reason': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user_selections'", 'to': "orm['askbot.Tag']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'tag_selections'", 'to': "orm['auth.User']"})
},
'askbot.post': {
'Meta': {'object_name': 'Post'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'posts'", 'to': "orm['auth.User']"}),
'comment_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'deleted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'deleted_posts'", 'null': 'True', 'to': "orm['auth.User']"}),
'html': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_edited_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_edited_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'last_edited_posts'", 'null': 'True', 'to': "orm['auth.User']"}),
'locked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'locked_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'locked_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'locked_posts'", 'null': 'True', 'to': "orm['auth.User']"}),
'offensive_flag_count': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'comment_posts'", 'null': 'True', 'to': "orm['askbot.Post']"}),
'post_type': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'self_answer': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['askbot.Answer']"}),
'self_comment': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['askbot.Comment']"}),
'self_question': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['askbot.Question']"}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '180'}),
'text': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'thread': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'posts'", 'to': "orm['askbot.Thread']"}),
'vote_down_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'vote_up_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'wikified_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
# "Post-processing" - added manually to add support for URL mapping
'old_question_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': True, 'blank': True, 'default': None, 'unique': 'True'}),
'old_answer_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': True, 'blank': True, 'default': None, 'unique': 'True'}),
'old_comment_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': True, 'blank': True, 'default': None, 'unique': 'True'}),
},
'askbot.postrevision': {
'Meta': {'ordering': "('-revision',)", 'unique_together': "(('answer', 'revision'), ('question', 'revision'))", 'object_name': 'PostRevision'},
'answer': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'revisions'", 'null': 'True', 'to': "orm['askbot.Answer']"}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'postrevisions'", 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'revisions'", 'null': 'True', 'to': "orm['askbot.Question']"}),
'revised_at': ('django.db.models.fields.DateTimeField', [], {}),
'revision': ('django.db.models.fields.PositiveIntegerField', [], {}),
'revision_type': ('django.db.models.fields.SmallIntegerField', [], {}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '300', 'blank': 'True'}),
'tagnames': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '125', 'blank': 'True'}),
'text': ('django.db.models.fields.TextField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '300', 'blank': 'True'})
},
'askbot.question': {
'Meta': {'object_name': 'Question', 'db_table': "u'question'"},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'questions'", 'to': "orm['auth.User']"}),
'comment_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'deleted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'deleted_questions'", 'null': 'True', 'to': "orm['auth.User']"}),
'html': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_edited_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_edited_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'last_edited_questions'", 'null': 'True', 'to': "orm['auth.User']"}),
'locked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'locked_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'locked_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'locked_questions'", 'null': 'True', 'to': "orm['auth.User']"}),
'offensive_flag_count': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '180'}),
'text': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'thread': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'questions'", 'unique': 'True', 'to': "orm['askbot.Thread']"}),
'vote_down_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'vote_up_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'wikified_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'askbot.questionview': {
'Meta': {'object_name': 'QuestionView'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'viewed'", 'to': "orm['askbot.Question']"}),
'when': ('django.db.models.fields.DateTimeField', [], {}),
'who': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'question_views'", 'to': "orm['auth.User']"})
},
'askbot.repute': {
'Meta': {'object_name': 'Repute', 'db_table': "u'repute'"},
'comment': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'negative': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'positive': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Question']", 'null': 'True', 'blank': 'True'}),
'question_post': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Post']", 'null': 'True', 'blank': 'True'}),
'reputation': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'reputation_type': ('django.db.models.fields.SmallIntegerField', [], {}),
'reputed_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'askbot.tag': {
'Meta': {'ordering': "('-used_count', 'name')", 'object_name': 'Tag', 'db_table': "u'tag'"},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_tags'", 'to': "orm['auth.User']"}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'deleted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'deleted_tags'", 'null': 'True', 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'used_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'askbot.thread': {
'Meta': {'object_name': 'Thread'},
'accepted_answer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Answer']", 'null': 'True', 'blank': 'True'}),
'answer_accepted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'answer_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'close_reason': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'closed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'closed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'closed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'favorited_by': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'unused_favorite_threads'", 'symmetrical': 'False', 'through': "orm['askbot.FavoriteQuestion']", 'to': "orm['auth.User']"}),
'favourite_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'followed_by': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'followed_threads'", 'symmetrical': 'False', 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_activity_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_activity_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'unused_last_active_in_threads'", 'to': "orm['auth.User']"}),
'tagnames': ('django.db.models.fields.CharField', [], {'max_length': '125'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'threads'", 'symmetrical': 'False', 'to': "orm['askbot.Tag']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'view_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'askbot.vote': {
'Meta': {'unique_together': "(('user', 'voted_post'),)", 'object_name': 'Vote', 'db_table': "u'vote'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'votes'", 'to': "orm['auth.User']"}),
'vote': ('django.db.models.fields.SmallIntegerField', [], {}),
'voted_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'voted_post': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'post_votes'", 'to': "orm['askbot.Post']"})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'about': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'avatar_type': ('django.db.models.fields.CharField', [], {'default': "'n'", 'max_length': '1'}),
'bronze': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'consecutive_days_visit_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'country': ('django_countries.fields.CountryField', [], {'max_length': '2', 'blank': 'True'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'display_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'email_isvalid': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'email_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'email_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'gold': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'gravatar': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ignored_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'interesting_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'new_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'questions_per_page': ('django.db.models.fields.SmallIntegerField', [], {'default': '10'}),
'real_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'reputation': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'seen_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'show_country': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'silver': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'w'", 'max_length': '2'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['askbot']
| gpl-3.0 |
acarmel/CouchPotatoServer | libs/html5lib/filters/optionaltags.py | 1727 | 10500 | from __future__ import absolute_import, division, unicode_literals
from . import _base
class Filter(_base.Filter):
def slider(self):
previous1 = previous2 = None
for token in self.source:
if previous1 is not None:
yield previous2, previous1, token
previous2 = previous1
previous1 = token
yield previous2, previous1, None
def __iter__(self):
for previous, token, next in self.slider():
type = token["type"]
if type == "StartTag":
if (token["data"] or
not self.is_optional_start(token["name"], previous, next)):
yield token
elif type == "EndTag":
if not self.is_optional_end(token["name"], next):
yield token
else:
yield token
def is_optional_start(self, tagname, previous, next):
type = next and next["type"] or None
if tagname in 'html':
# An html element's start tag may be omitted if the first thing
# inside the html element is not a space character or a comment.
return type not in ("Comment", "SpaceCharacters")
elif tagname == 'head':
# A head element's start tag may be omitted if the first thing
# inside the head element is an element.
# XXX: we also omit the start tag if the head element is empty
if type in ("StartTag", "EmptyTag"):
return True
elif type == "EndTag":
return next["name"] == "head"
elif tagname == 'body':
# A body element's start tag may be omitted if the first thing
# inside the body element is not a space character or a comment,
# except if the first thing inside the body element is a script
# or style element and the node immediately preceding the body
# element is a head element whose end tag has been omitted.
if type in ("Comment", "SpaceCharacters"):
return False
elif type == "StartTag":
# XXX: we do not look at the preceding event, so we never omit
# the body element's start tag if it's followed by a script or
# a style element.
return next["name"] not in ('script', 'style')
else:
return True
elif tagname == 'colgroup':
# A colgroup element's start tag may be omitted if the first thing
# inside the colgroup element is a col element, and if the element
# is not immediately preceeded by another colgroup element whose
# end tag has been omitted.
if type in ("StartTag", "EmptyTag"):
# XXX: we do not look at the preceding event, so instead we never
# omit the colgroup element's end tag when it is immediately
# followed by another colgroup element. See is_optional_end.
return next["name"] == "col"
else:
return False
elif tagname == 'tbody':
# A tbody element's start tag may be omitted if the first thing
# inside the tbody element is a tr element, and if the element is
# not immediately preceeded by a tbody, thead, or tfoot element
# whose end tag has been omitted.
if type == "StartTag":
# omit the thead and tfoot elements' end tag when they are
# immediately followed by a tbody element. See is_optional_end.
if previous and previous['type'] == 'EndTag' and \
previous['name'] in ('tbody', 'thead', 'tfoot'):
return False
return next["name"] == 'tr'
else:
return False
return False
def is_optional_end(self, tagname, next):
type = next and next["type"] or None
if tagname in ('html', 'head', 'body'):
# An html element's end tag may be omitted if the html element
# is not immediately followed by a space character or a comment.
return type not in ("Comment", "SpaceCharacters")
elif tagname in ('li', 'optgroup', 'tr'):
# A li element's end tag may be omitted if the li element is
# immediately followed by another li element or if there is
# no more content in the parent element.
# An optgroup element's end tag may be omitted if the optgroup
# element is immediately followed by another optgroup element,
# or if there is no more content in the parent element.
# A tr element's end tag may be omitted if the tr element is
# immediately followed by another tr element, or if there is
# no more content in the parent element.
if type == "StartTag":
return next["name"] == tagname
else:
return type == "EndTag" or type is None
elif tagname in ('dt', 'dd'):
# A dt element's end tag may be omitted if the dt element is
# immediately followed by another dt element or a dd element.
# A dd element's end tag may be omitted if the dd element is
# immediately followed by another dd element or a dt element,
# or if there is no more content in the parent element.
if type == "StartTag":
return next["name"] in ('dt', 'dd')
elif tagname == 'dd':
return type == "EndTag" or type is None
else:
return False
elif tagname == 'p':
# A p element's end tag may be omitted if the p element is
# immediately followed by an address, article, aside,
# blockquote, datagrid, dialog, dir, div, dl, fieldset,
# footer, form, h1, h2, h3, h4, h5, h6, header, hr, menu,
# nav, ol, p, pre, section, table, or ul, element, or if
# there is no more content in the parent element.
if type in ("StartTag", "EmptyTag"):
return next["name"] in ('address', 'article', 'aside',
'blockquote', 'datagrid', 'dialog',
'dir', 'div', 'dl', 'fieldset', 'footer',
'form', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6',
'header', 'hr', 'menu', 'nav', 'ol',
'p', 'pre', 'section', 'table', 'ul')
else:
return type == "EndTag" or type is None
elif tagname == 'option':
# An option element's end tag may be omitted if the option
# element is immediately followed by another option element,
# or if it is immediately followed by an <code>optgroup</code>
# element, or if there is no more content in the parent
# element.
if type == "StartTag":
return next["name"] in ('option', 'optgroup')
else:
return type == "EndTag" or type is None
elif tagname in ('rt', 'rp'):
# An rt element's end tag may be omitted if the rt element is
# immediately followed by an rt or rp element, or if there is
# no more content in the parent element.
# An rp element's end tag may be omitted if the rp element is
# immediately followed by an rt or rp element, or if there is
# no more content in the parent element.
if type == "StartTag":
return next["name"] in ('rt', 'rp')
else:
return type == "EndTag" or type is None
elif tagname == 'colgroup':
# A colgroup element's end tag may be omitted if the colgroup
# element is not immediately followed by a space character or
# a comment.
if type in ("Comment", "SpaceCharacters"):
return False
elif type == "StartTag":
# XXX: we also look for an immediately following colgroup
# element. See is_optional_start.
return next["name"] != 'colgroup'
else:
return True
elif tagname in ('thead', 'tbody'):
# A thead element's end tag may be omitted if the thead element
# is immediately followed by a tbody or tfoot element.
# A tbody element's end tag may be omitted if the tbody element
# is immediately followed by a tbody or tfoot element, or if
# there is no more content in the parent element.
# A tfoot element's end tag may be omitted if the tfoot element
# is immediately followed by a tbody element, or if there is no
# more content in the parent element.
# XXX: we never omit the end tag when the following element is
# a tbody. See is_optional_start.
if type == "StartTag":
return next["name"] in ['tbody', 'tfoot']
elif tagname == 'tbody':
return type == "EndTag" or type is None
else:
return False
elif tagname == 'tfoot':
# A tfoot element's end tag may be omitted if the tfoot element
# is immediately followed by a tbody element, or if there is no
# more content in the parent element.
# XXX: we never omit the end tag when the following element is
# a tbody. See is_optional_start.
if type == "StartTag":
return next["name"] == 'tbody'
else:
return type == "EndTag" or type is None
elif tagname in ('td', 'th'):
# A td element's end tag may be omitted if the td element is
# immediately followed by a td or th element, or if there is
# no more content in the parent element.
# A th element's end tag may be omitted if the th element is
# immediately followed by a td or th element, or if there is
# no more content in the parent element.
if type == "StartTag":
return next["name"] in ('td', 'th')
else:
return type == "EndTag" or type is None
return False
| gpl-3.0 |
DailyActie/Surrogate-Model | 01-codes/scikit-learn-master/examples/decomposition/plot_sparse_coding.py | 1 | 4054 | """
===========================================
Sparse coding with a precomputed dictionary
===========================================
Transform a signal as a sparse combination of Ricker wavelets. This example
visually compares different sparse coding methods using the
:class:`sklearn.decomposition.SparseCoder` estimator. The Ricker (also known
as Mexican hat or the second derivative of a Gaussian) is not a particularly
good kernel to represent piecewise constant signals like this one. It can
therefore be seen how much adding different widths of atoms matters and it
therefore motivates learning the dictionary to best fit your type of signals.
The richer dictionary on the right is not larger in size, heavier subsampling
is performed in order to stay on the same order of magnitude.
"""
print(__doc__)
import matplotlib.pylab as plt
import numpy as np
from sklearn.decomposition import SparseCoder
def ricker_function(resolution, center, width):
"""Discrete sub-sampled Ricker (Mexican hat) wavelet"""
x = np.linspace(0, resolution - 1, resolution)
x = ((2 / ((np.sqrt(3 * width) * np.pi ** 1 / 4)))
* (1 - ((x - center) ** 2 / width ** 2))
* np.exp((-(x - center) ** 2) / (2 * width ** 2)))
return x
def ricker_matrix(width, resolution, n_components):
"""Dictionary of Ricker (Mexican hat) wavelets"""
centers = np.linspace(0, resolution - 1, n_components)
D = np.empty((n_components, resolution))
for i, center in enumerate(centers):
D[i] = ricker_function(resolution, center, width)
D /= np.sqrt(np.sum(D ** 2, axis=1))[:, np.newaxis]
return D
resolution = 1024
subsampling = 3 # subsampling factor
width = 100
n_components = resolution / subsampling
# Compute a wavelet dictionary
D_fixed = ricker_matrix(width=width, resolution=resolution,
n_components=n_components)
D_multi = np.r_[tuple(ricker_matrix(width=w, resolution=resolution,
n_components=np.floor(n_components / 5))
for w in (10, 50, 100, 500, 1000))]
# Generate a signal
y = np.linspace(0, resolution - 1, resolution)
first_quarter = y < resolution / 4
y[first_quarter] = 3.
y[np.logical_not(first_quarter)] = -1.
# List the different sparse coding methods in the following format:
# (title, transform_algorithm, transform_alpha, transform_n_nozero_coefs)
estimators = [('OMP', 'omp', None, 15, 'navy'),
('Lasso', 'lasso_cd', 2, None, 'turquoise'), ]
lw = 2
plt.figure(figsize=(13, 6))
for subplot, (D, title) in enumerate(zip((D_fixed, D_multi),
('fixed width', 'multiple widths'))):
plt.subplot(1, 2, subplot + 1)
plt.title('Sparse coding against %s dictionary' % title)
plt.plot(y, lw=lw, linestyle='--', label='Original signal')
# Do a wavelet approximation
for title, algo, alpha, n_nonzero, color in estimators:
coder = SparseCoder(dictionary=D, transform_n_nonzero_coefs=n_nonzero,
transform_alpha=alpha, transform_algorithm=algo)
x = coder.transform(y.reshape(1, -1))
density = len(np.flatnonzero(x))
x = np.ravel(np.dot(x, D))
squared_error = np.sum((y - x) ** 2)
plt.plot(x, color=color, lw=lw,
label='%s: %s nonzero coefs,\n%.2f error'
% (title, density, squared_error))
# Soft thresholding debiasing
coder = SparseCoder(dictionary=D, transform_algorithm='threshold',
transform_alpha=20)
x = coder.transform(y.reshape(1, -1))
_, idx = np.where(x != 0)
x[0, idx], _, _, _ = np.linalg.lstsq(D[idx, :].T, y)
x = np.ravel(np.dot(x, D))
squared_error = np.sum((y - x) ** 2)
plt.plot(x, color='darkorange', lw=lw,
label='Thresholding w/ debiasing:\n%d nonzero coefs, %.2f error'
% (len(idx), squared_error))
plt.axis('tight')
plt.legend(shadow=False, loc='best')
plt.subplots_adjust(.04, .07, .97, .90, .09, .2)
plt.show()
| mit |
txominpelu/airflow | airflow/jobs.py | 11 | 24429 | from builtins import str
from past.builtins import basestring
from collections import defaultdict
from datetime import datetime
import getpass
import logging
import signal
import socket
import subprocess
import sys
from time import sleep
from sqlalchemy import Column, Integer, String, DateTime, func, Index
from sqlalchemy.orm.session import make_transient
from airflow import executors, models, settings, utils
from airflow.configuration import conf
from airflow.utils import AirflowException, State
Base = models.Base
ID_LEN = models.ID_LEN
# Setting up a statsd client if needed
statsd = None
if conf.get('scheduler', 'statsd_on'):
from statsd import StatsClient
statsd = StatsClient(
host=conf.get('scheduler', 'statsd_host'),
port=conf.getint('scheduler', 'statsd_port'),
prefix=conf.get('scheduler', 'statsd_prefix'))
class BaseJob(Base):
"""
Abstract class to be derived for jobs. Jobs are processing items with state
and duration that aren't task instances. For instance a BackfillJob is
a collection of task instance runs, but should have it's own state, start
and end time.
"""
__tablename__ = "job"
id = Column(Integer, primary_key=True)
dag_id = Column(String(ID_LEN),)
state = Column(String(20))
job_type = Column(String(30))
start_date = Column(DateTime())
end_date = Column(DateTime())
latest_heartbeat = Column(DateTime())
executor_class = Column(String(500))
hostname = Column(String(500))
unixname = Column(String(1000))
__mapper_args__ = {
'polymorphic_on': job_type,
'polymorphic_identity': 'BaseJob'
}
__table_args__ = (
Index('job_type_heart', job_type, latest_heartbeat),
)
def __init__(
self,
executor=executors.DEFAULT_EXECUTOR,
heartrate=conf.getint('scheduler', 'JOB_HEARTBEAT_SEC'),
*args, **kwargs):
self.hostname = socket.gethostname()
self.executor = executor
self.executor_class = executor.__class__.__name__
self.start_date = datetime.now()
self.latest_heartbeat = datetime.now()
self.heartrate = heartrate
self.unixname = getpass.getuser()
super(BaseJob, self).__init__(*args, **kwargs)
def is_alive(self):
return (
(datetime.now() - self.latest_heartbeat).seconds <
(conf.getint('scheduler', 'JOB_HEARTBEAT_SEC') * 2.1)
)
def kill(self):
session = settings.Session()
job = session.query(BaseJob).filter(BaseJob.id == self.id).first()
job.end_date = datetime.now()
try:
self.on_kill()
except:
logging.error('on_kill() method failed')
session.merge(job)
session.commit()
session.close()
raise AirflowException("Job shut down externally.")
def on_kill(self):
'''
Will be called when an external kill command is received
'''
pass
def heartbeat_callback(self):
pass
def heartbeat(self):
'''
Heartbeats update the job's entry in the database with a timestamp
for the latest_heartbeat and allows for the job to be killed
externally. This allows at the system level to monitor what is
actually active.
For instance, an old heartbeat for SchedulerJob would mean something
is wrong.
This also allows for any job to be killed externally, regardless
of who is running it or on which machine it is running.
Note that if your heartbeat is set to 60 seconds and you call this
method after 10 seconds of processing since the last heartbeat, it
will sleep 50 seconds to complete the 60 seconds and keep a steady
heart rate. If you go over 60 seconds before calling it, it won't
sleep at all.
'''
session = settings.Session()
job = session.query(BaseJob).filter(BaseJob.id == self.id).first()
if job.state == State.SHUTDOWN:
self.kill()
if job.latest_heartbeat:
sleep_for = self.heartrate - (
datetime.now() - job.latest_heartbeat).total_seconds()
if sleep_for > 0:
sleep(sleep_for)
job.latest_heartbeat = datetime.now()
session.merge(job)
session.commit()
session.close()
self.heartbeat_callback()
logging.debug('[heart] Boom.')
def run(self):
if statsd:
statsd.incr(self.__class__.__name__.lower()+'_start', 1, 1)
# Adding an entry in the DB
session = settings.Session()
self.state = State.RUNNING
session.add(self)
session.commit()
id_ = self.id
make_transient(self)
self.id = id_
# Run
self._execute()
# Marking the success in the DB
self.end_date = datetime.now()
self.state = State.SUCCESS
session.merge(self)
session.commit()
session.close()
if statsd:
statsd.incr(self.__class__.__name__.lower()+'_end', 1, 1)
def _execute(self):
raise NotImplemented("This method needs to be overridden")
class SchedulerJob(BaseJob):
"""
This SchedulerJob runs indefinitely and constantly schedules the jobs
that are ready to run. It figures out the latest runs for each
task and see if the dependencies for the next schedules are met.
If so it triggers the task instance. It does this for each task
in each DAG and repeats.
:param dag_id: to run the scheduler for a single specific DAG
:type dag_id: string
:param subdir: to search for DAG under a certain folder only
:type subdir: string
:param test_mode: used for unit testing this class only, runs a single
schedule run
:type test_mode: bool
:param refresh_dags_every: force refresh the DAG definition every N
runs, as specified here
:type refresh_dags_every: int
"""
__mapper_args__ = {
'polymorphic_identity': 'SchedulerJob'
}
def __init__(
self,
dag_id=None,
subdir=None,
test_mode=False,
refresh_dags_every=10,
num_runs=None,
*args, **kwargs):
self.dag_id = dag_id
self.subdir = subdir
if test_mode:
self.num_runs = 1
else:
self.num_runs = num_runs
self.refresh_dags_every = refresh_dags_every
super(SchedulerJob, self).__init__(*args, **kwargs)
self.heartrate = conf.getint('scheduler', 'SCHEDULER_HEARTBEAT_SEC')
@utils.provide_session
def manage_slas(self, dag, session=None):
"""
Finding all tasks that have SLAs defined, and sending alert emails
where needed. New SLA misses are also recorded in the database.
Where assuming that the scheduler runs often, so we only check for
tasks that should have succeeded in the past hour.
"""
TI = models.TaskInstance
sq = (
session
.query(
TI.task_id,
func.max(TI.execution_date).label('max_ti'))
.filter(TI.dag_id == dag.dag_id)
.filter(TI.state == State.SUCCESS)
.filter(TI.task_id.in_(dag.task_ids))
.group_by(TI.task_id).subquery('sq')
)
max_tis = session.query(TI).filter(
TI.dag_id == dag.dag_id,
TI.task_id == sq.c.task_id,
TI.execution_date == sq.c.max_ti,
).all()
ts = datetime.now()
SlaMiss = models.SlaMiss
for ti in max_tis:
task = dag.get_task(ti.task_id)
dttm = ti.execution_date
if task.sla:
dttm += dag.schedule_interval
while dttm < datetime.now():
if dttm + task.sla + dag.schedule_interval < datetime.now():
session.merge(models.SlaMiss(
task_id=ti.task_id,
dag_id=ti.dag_id,
execution_date=dttm,
timestamp=ts))
dttm += dag.schedule_interval
session.commit()
slas = (
session
.query(SlaMiss)
.filter(SlaMiss.email_sent == False)
.filter(SlaMiss.dag_id == dag.dag_id)
.all()
)
task_list = "\n".join([
sla.task_id + ' on ' + sla.execution_date.isoformat()
for sla in slas])
from airflow import ascii
email_content = """\
Here's a list of tasks thas missed their SLAs:
<pre><code>{task_list}\n{ascii.bug}<code></pre>
""".format(**locals())
emails = []
for t in dag.tasks:
if t.email:
if isinstance(t.email, basestring):
l = [t.email]
elif isinstance(t.email, (list, tuple)):
l = t.email
for email in l:
if email not in emails:
emails.append(email)
if emails and len(slas):
utils.send_email(
emails,
"[airflow] SLA miss on DAG=" + dag.dag_id,
email_content)
for sla in slas:
sla.email_sent = True
session.merge(sla)
session.commit()
session.close()
def import_errors(self, dagbag):
session = settings.Session()
session.query(models.ImportError).delete()
for filename, stacktrace in list(dagbag.import_errors.items()):
session.add(models.ImportError(
filename=filename, stacktrace=stacktrace))
session.commit()
def process_dag(self, dag, executor):
"""
This method schedules a single DAG by looking at the latest
run for each task and attempting to schedule the following run.
As multiple schedulers may be running for redundancy, this
function takes a lock on the DAG and timestamps the last run
in ``last_scheduler_run``.
"""
DagModel = models.DagModel
session = settings.Session()
db_dag = session.query(
DagModel).filter(DagModel.dag_id == dag.dag_id).first()
last_scheduler_run = db_dag.last_scheduler_run or datetime(2000, 1, 1)
secs_since_last = (
datetime.now() - last_scheduler_run).total_seconds()
# if db_dag.scheduler_lock or
if secs_since_last < self.heartrate:
session.commit()
session.close()
return None
else:
# Taking a lock
db_dag.scheduler_lock = True
db_dag.last_scheduler_run = datetime.now()
session.commit()
TI = models.TaskInstance
logging.info(
"Getting latest instance "
"for all task in dag " + dag.dag_id)
sq = (
session
.query(
TI.task_id,
func.max(TI.execution_date).label('max_ti'))
.filter(TI.dag_id == dag.dag_id)
.group_by(TI.task_id).subquery('sq')
)
qry = session.query(TI).filter(
TI.dag_id == dag.dag_id,
TI.task_id == sq.c.task_id,
TI.execution_date == sq.c.max_ti,
)
logging.debug("Querying max dates for each task")
latest_ti = qry.all()
ti_dict = {ti.task_id: ti for ti in latest_ti}
session.expunge_all()
session.commit()
logging.debug("{} rows returned".format(len(latest_ti)))
for task in dag.tasks:
if task.adhoc:
continue
if task.task_id not in ti_dict:
# Brand new task, let's get started
ti = TI(task, task.start_date)
ti.refresh_from_db()
if ti.is_queueable(flag_upstream_failed=True):
logging.info(
'First run for {ti}'.format(**locals()))
executor.queue_task_instance(ti)
else:
ti = ti_dict[task.task_id]
ti.task = task # Hacky but worky
if ti.state == State.RUNNING:
continue # Only one task at a time
elif ti.state == State.UP_FOR_RETRY:
# If task instance if up for retry, make sure
# the retry delay is met
if ti.is_runnable():
logging.debug('Triggering retry: ' + str(ti))
executor.queue_task_instance(ti)
elif ti.state == State.QUEUED:
# If was queued we skipped so that in gets prioritized
# in self.prioritize_queued
continue
else:
# Trying to run the next schedule
next_schedule = (
ti.execution_date + task.schedule_interval)
if (
ti.task.end_date and
next_schedule > ti.task.end_date):
continue
ti = TI(
task=task,
execution_date=next_schedule,
)
ti.refresh_from_db()
if ti.is_queueable(flag_upstream_failed=True):
logging.debug('Queuing next run: ' + str(ti))
executor.queue_task_instance(ti)
# Releasing the lock
logging.debug("Unlocking DAG (scheduler_lock)")
db_dag = (
session.query(DagModel)
.filter(DagModel.dag_id == dag.dag_id)
.first()
)
db_dag.scheduler_lock = False
session.merge(db_dag)
session.commit()
session.close()
@utils.provide_session
def prioritize_queued(self, session, executor, dagbag):
# Prioritizing queued task instances
pools = {p.pool: p for p in session.query(models.Pool).all()}
TI = models.TaskInstance
queued_tis = (
session.query(TI)
.filter(TI.state == State.QUEUED)
.all()
)
session.expunge_all()
d = defaultdict(list)
for ti in queued_tis:
if (
ti.dag_id not in dagbag.dags or not
dagbag.dags[ti.dag_id].has_task(ti.task_id)):
# Deleting queued jobs that don't exist anymore
session.delete(ti)
session.commit()
else:
d[ti.pool].append(ti)
for pool, tis in list(d.items()):
open_slots = pools[pool].open_slots(session=session)
if open_slots > 0:
tis = sorted(
tis, key=lambda ti: (-ti.priority_weight, ti.start_date))
for ti in tis[:open_slots]:
task = None
try:
task = dagbag.dags[ti.dag_id].get_task(ti.task_id)
except:
logging.error("Queued task {} seems gone".format(ti))
session.delete(ti)
if task:
ti.task = task
if ti.are_dependencies_met():
executor.queue_task_instance(ti, force=True)
else:
session.delete(ti)
session.commit()
def _execute(self):
dag_id = self.dag_id
def signal_handler(signum, frame):
logging.error("SIGINT (ctrl-c) received")
sys.exit(1)
signal.signal(signal.SIGINT, signal_handler)
utils.pessimistic_connection_handling()
logging.basicConfig(level=logging.DEBUG)
logging.info("Starting the scheduler")
dagbag = models.DagBag(self.subdir, sync_to_db=True)
executor = dagbag.executor
executor.start()
i = 0
while not self.num_runs or self.num_runs > i:
loop_start_dttm = datetime.now()
try:
self.prioritize_queued(executor=executor, dagbag=dagbag)
except Exception as e:
logging.exception(e)
i += 1
try:
if i % self.refresh_dags_every == 0:
dagbag = models.DagBag(self.subdir, sync_to_db=True)
else:
dagbag.collect_dags(only_if_updated=True)
except:
logging.error("Failed at reloading the dagbag")
if statsd:
statsd.incr('dag_refresh_error', 1, 1)
sleep(5)
if dag_id:
dags = [dagbag.dags[dag_id]]
else:
dags = [
dag for dag in dagbag.dags.values() if not dag.parent_dag]
paused_dag_ids = dagbag.paused_dags()
for dag in dags:
logging.debug("Scheduling {}".format(dag.dag_id))
dag = dagbag.get_dag(dag.dag_id)
if not dag or (dag.dag_id in paused_dag_ids):
continue
try:
self.process_dag(dag, executor)
self.manage_slas(dag)
except Exception as e:
logging.exception(e)
logging.info(
"Done queuing tasks, calling the executor's heartbeat")
duration_sec = (datetime.now() - loop_start_dttm).total_seconds()
logging.info("Loop took: {} seconds".format(duration_sec))
try:
self.import_errors(dagbag)
except Exception as e:
logging.exception(e)
try:
# We really just want the scheduler to never ever stop.
executor.heartbeat()
self.heartbeat()
except Exception as e:
logging.exception(e)
logging.error("Tachycardia!")
def heartbeat_callback(self):
if statsd:
statsd.gauge('scheduler_heartbeat', 1, 1)
class BackfillJob(BaseJob):
"""
A backfill job consists of a dag or subdag for a specific time range. It
triggers a set of task instance runs, in the right order and lasts for
as long as it takes for the set of task instance to be completed.
"""
__mapper_args__ = {
'polymorphic_identity': 'BackfillJob'
}
def __init__(
self,
dag, start_date=None, end_date=None, mark_success=False,
include_adhoc=False,
donot_pickle=False,
ignore_dependencies=False,
*args, **kwargs):
self.dag = dag
dag.override_start_date(start_date)
self.dag_id = dag.dag_id
self.bf_start_date = start_date
self.bf_end_date = end_date
self.mark_success = mark_success
self.include_adhoc = include_adhoc
self.donot_pickle = donot_pickle
self.ignore_dependencies = ignore_dependencies
super(BackfillJob, self).__init__(*args, **kwargs)
def _execute(self):
"""
Runs a dag for a specified date range.
"""
session = settings.Session()
start_date = self.bf_start_date
end_date = self.bf_end_date
# picklin'
pickle_id = None
if not self.donot_pickle and self.executor.__class__ not in (
executors.LocalExecutor, executors.SequentialExecutor):
pickle = models.DagPickle(self.dag)
session.add(pickle)
session.commit()
pickle_id = pickle.id
executor = self.executor
executor.start()
# Build a list of all instances to run
tasks_to_run = {}
failed = []
succeeded = []
started = []
wont_run = []
for task in self.dag.tasks:
if (not self.include_adhoc) and task.adhoc:
continue
start_date = start_date or task.start_date
end_date = end_date or task.end_date or datetime.now()
for dttm in utils.date_range(
start_date, end_date, task.dag.schedule_interval):
ti = models.TaskInstance(task, dttm)
tasks_to_run[ti.key] = ti
# Triggering what is ready to get triggered
while tasks_to_run:
for key, ti in list(tasks_to_run.items()):
ti.refresh_from_db()
if ti.state == State.SUCCESS and key in tasks_to_run:
succeeded.append(key)
del tasks_to_run[key]
elif ti.is_runnable():
executor.queue_task_instance(
ti,
mark_success=self.mark_success,
task_start_date=self.bf_start_date,
pickle_id=pickle_id,
ignore_dependencies=self.ignore_dependencies)
ti.state = State.RUNNING
if key not in started:
started.append(key)
self.heartbeat()
executor.heartbeat()
# Reacting to events
for key, state in list(executor.get_event_buffer().items()):
dag_id, task_id, execution_date = key
if key not in tasks_to_run:
continue
ti = tasks_to_run[key]
ti.refresh_from_db()
if ti.state == State.FAILED:
failed.append(key)
logging.error("Task instance " + str(key) + " failed")
del tasks_to_run[key]
# Removing downstream tasks from the one that has failed
for t in self.dag.get_task(task_id).get_flat_relatives(
upstream=False):
key = (ti.dag_id, t.task_id, execution_date)
if key in tasks_to_run:
wont_run.append(key)
del tasks_to_run[key]
elif ti.state == State.SUCCESS:
succeeded.append(key)
del tasks_to_run[key]
msg = (
"[backfill progress] "
"waiting: {0} | "
"succeeded: {1} | "
"kicked_off: {2} | "
"failed: {3} | "
"skipped: {4} ").format(
len(tasks_to_run),
len(succeeded),
len(started),
len(failed),
len(wont_run))
logging.info(msg)
executor.end()
session.close()
if failed:
raise AirflowException(
"Some tasks instances failed, here's the list:\n"+str(failed))
logging.info("All done. Exiting.")
class LocalTaskJob(BaseJob):
__mapper_args__ = {
'polymorphic_identity': 'LocalTaskJob'
}
def __init__(
self,
task_instance,
ignore_dependencies=False,
force=False,
mark_success=False,
pickle_id=None,
task_start_date=None,
*args, **kwargs):
self.task_instance = task_instance
self.ignore_dependencies = ignore_dependencies
self.force = force
self.pickle_id = pickle_id
self.mark_success = mark_success
self.task_start_date = task_start_date
super(LocalTaskJob, self).__init__(*args, **kwargs)
def _execute(self):
command = self.task_instance.command(
raw=True,
ignore_dependencies=self.ignore_dependencies,
force=self.force,
pickle_id=self.pickle_id,
mark_success=self.mark_success,
task_start_date=self.task_start_date,
job_id=self.id,
)
self.process = subprocess.Popen(['bash', '-c', command])
return_code = None
while return_code is None:
self.heartbeat()
return_code = self.process.poll()
def on_kill(self):
self.process.terminate()
| apache-2.0 |
agiliq/django-graphos | graphos/renderers/flot.py | 1 | 3255 | import json
from .base import BaseChart
from ..utils import get_default_options, JSONEncoderForHTML
class BaseFlotChart(BaseChart):
""" LineChart """
def get_serieses(self):
# Assuming self.data_source.data is:
# [['Year', 'Sales', 'Expenses'], [2004, 100, 200], [2005, 300, 250]]
data_only = self.get_data()[1:]
# first_column = [2004, 2005]
first_column = [el[0] for el in data_only]
serieses = []
for i in range(1, len(self.header)):
current_column = [el[i] for el in data_only]
current_series = self.zip_list(first_column, current_column)
serieses.append(current_series)
# serieses = [[(2004, 100), (2005, 300)], [(2004, 200), (2005, 250)]]
return serieses
def get_series_objects(self):
series_objects = []
serieses = self.get_serieses()
for i in range(1, len(self.header)):
series_object = {}
series_object['label'] = self.header[i]
series_object['data'] = serieses[i - 1]
series_objects.append(series_object)
# series_objects = [{'label': 'Sales', 'data': [(2004, 100), (2005, 300)]}, {'label': 'Expenses': 'data': [(2004, 100), (2005, 300)]}]
return series_objects
def get_series_pie_objects(self):
series_objects = []
serieses = self.get_data()[1:]
try:
for i in serieses:
series_object = {}
series_object['label'] = i[0]
series_object['data'] = i[1]
series_objects.append(series_object)
except IndexError:
print("Input Data Format is [['Year', 'Sales'], [2004, 100], [2005, 300]]")
# series_objects = [{'label': '2004', 'data': 100}, {'label': '2005': 'data': 300}]
return json.dumps(series_objects, cls=JSONEncoderForHTML)
def get_series_objects_json(self):
return json.dumps(self.get_series_objects(), cls=JSONEncoderForHTML)
def get_options(self):
options = get_default_options()
options.update(self.options)
return options
def get_html_template(self):
return 'graphos/flot/html.html'
def get_js_template(self):
return 'graphos/flot/js.html'
class PointChart(BaseFlotChart):
def get_options(self):
options = get_default_options("points")
options.update(self.options)
return options
class LineChart(BaseFlotChart):
""" LineChart """
def get_options(self):
options = get_default_options("lines")
options.update(self.options)
return options
class BarChart(BaseFlotChart):
def get_options(self):
options = get_default_options("bars")
options.update(self.options)
return options
class ColumnChart(BaseFlotChart):
def get_options(self):
options = get_default_options("bars")
options.update(self.options)
options["horizontal"] = True
return options
class PieChart(BaseFlotChart):
def get_options(self):
options = get_default_options("pie")
options.update(self.options)
return options
def get_js_template(self):
return 'graphos/flot/pie_chart.html'
| bsd-2-clause |
nammaste6/kafka | system_test/utils/setup_utils.py | 117 | 1848 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#!/usr/bin/env python
# =================================================================
# setup_utils.py
# - This module provides some basic helper functions.
# =================================================================
import logging
import kafka_system_test_utils
import sys
class SetupUtils(object):
# dict to pass user-defined attributes to logger argument: "extra"
# to use: just update "thisClassName" to the appropriate value
thisClassName = '(ReplicaBasicTest)'
d = {'name_of_class': thisClassName}
logger = logging.getLogger("namedLogger")
anonLogger = logging.getLogger("anonymousLogger")
def __init__(self):
d = {'name_of_class': self.__class__.__name__}
self.logger.debug("#### constructor inside SetupUtils", extra=self.d)
def log_message(self, message):
print
self.anonLogger.info("======================================================")
self.anonLogger.info(message)
self.anonLogger.info("======================================================")
| apache-2.0 |
bertucho/moviestalk2 | venv/Lib/encodings/mac_greek.py | 593 | 13977 | """ Python Character Mapping Codec mac_greek generated from 'MAPPINGS/VENDORS/APPLE/GREEK.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='mac-greek',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> CONTROL CHARACTER
u'\x01' # 0x01 -> CONTROL CHARACTER
u'\x02' # 0x02 -> CONTROL CHARACTER
u'\x03' # 0x03 -> CONTROL CHARACTER
u'\x04' # 0x04 -> CONTROL CHARACTER
u'\x05' # 0x05 -> CONTROL CHARACTER
u'\x06' # 0x06 -> CONTROL CHARACTER
u'\x07' # 0x07 -> CONTROL CHARACTER
u'\x08' # 0x08 -> CONTROL CHARACTER
u'\t' # 0x09 -> CONTROL CHARACTER
u'\n' # 0x0A -> CONTROL CHARACTER
u'\x0b' # 0x0B -> CONTROL CHARACTER
u'\x0c' # 0x0C -> CONTROL CHARACTER
u'\r' # 0x0D -> CONTROL CHARACTER
u'\x0e' # 0x0E -> CONTROL CHARACTER
u'\x0f' # 0x0F -> CONTROL CHARACTER
u'\x10' # 0x10 -> CONTROL CHARACTER
u'\x11' # 0x11 -> CONTROL CHARACTER
u'\x12' # 0x12 -> CONTROL CHARACTER
u'\x13' # 0x13 -> CONTROL CHARACTER
u'\x14' # 0x14 -> CONTROL CHARACTER
u'\x15' # 0x15 -> CONTROL CHARACTER
u'\x16' # 0x16 -> CONTROL CHARACTER
u'\x17' # 0x17 -> CONTROL CHARACTER
u'\x18' # 0x18 -> CONTROL CHARACTER
u'\x19' # 0x19 -> CONTROL CHARACTER
u'\x1a' # 0x1A -> CONTROL CHARACTER
u'\x1b' # 0x1B -> CONTROL CHARACTER
u'\x1c' # 0x1C -> CONTROL CHARACTER
u'\x1d' # 0x1D -> CONTROL CHARACTER
u'\x1e' # 0x1E -> CONTROL CHARACTER
u'\x1f' # 0x1F -> CONTROL CHARACTER
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> CONTROL CHARACTER
u'\xc4' # 0x80 -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\xb9' # 0x81 -> SUPERSCRIPT ONE
u'\xb2' # 0x82 -> SUPERSCRIPT TWO
u'\xc9' # 0x83 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xb3' # 0x84 -> SUPERSCRIPT THREE
u'\xd6' # 0x85 -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xdc' # 0x86 -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\u0385' # 0x87 -> GREEK DIALYTIKA TONOS
u'\xe0' # 0x88 -> LATIN SMALL LETTER A WITH GRAVE
u'\xe2' # 0x89 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\xe4' # 0x8A -> LATIN SMALL LETTER A WITH DIAERESIS
u'\u0384' # 0x8B -> GREEK TONOS
u'\xa8' # 0x8C -> DIAERESIS
u'\xe7' # 0x8D -> LATIN SMALL LETTER C WITH CEDILLA
u'\xe9' # 0x8E -> LATIN SMALL LETTER E WITH ACUTE
u'\xe8' # 0x8F -> LATIN SMALL LETTER E WITH GRAVE
u'\xea' # 0x90 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xeb' # 0x91 -> LATIN SMALL LETTER E WITH DIAERESIS
u'\xa3' # 0x92 -> POUND SIGN
u'\u2122' # 0x93 -> TRADE MARK SIGN
u'\xee' # 0x94 -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\xef' # 0x95 -> LATIN SMALL LETTER I WITH DIAERESIS
u'\u2022' # 0x96 -> BULLET
u'\xbd' # 0x97 -> VULGAR FRACTION ONE HALF
u'\u2030' # 0x98 -> PER MILLE SIGN
u'\xf4' # 0x99 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\xf6' # 0x9A -> LATIN SMALL LETTER O WITH DIAERESIS
u'\xa6' # 0x9B -> BROKEN BAR
u'\u20ac' # 0x9C -> EURO SIGN # before Mac OS 9.2.2, was SOFT HYPHEN
u'\xf9' # 0x9D -> LATIN SMALL LETTER U WITH GRAVE
u'\xfb' # 0x9E -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\xfc' # 0x9F -> LATIN SMALL LETTER U WITH DIAERESIS
u'\u2020' # 0xA0 -> DAGGER
u'\u0393' # 0xA1 -> GREEK CAPITAL LETTER GAMMA
u'\u0394' # 0xA2 -> GREEK CAPITAL LETTER DELTA
u'\u0398' # 0xA3 -> GREEK CAPITAL LETTER THETA
u'\u039b' # 0xA4 -> GREEK CAPITAL LETTER LAMDA
u'\u039e' # 0xA5 -> GREEK CAPITAL LETTER XI
u'\u03a0' # 0xA6 -> GREEK CAPITAL LETTER PI
u'\xdf' # 0xA7 -> LATIN SMALL LETTER SHARP S
u'\xae' # 0xA8 -> REGISTERED SIGN
u'\xa9' # 0xA9 -> COPYRIGHT SIGN
u'\u03a3' # 0xAA -> GREEK CAPITAL LETTER SIGMA
u'\u03aa' # 0xAB -> GREEK CAPITAL LETTER IOTA WITH DIALYTIKA
u'\xa7' # 0xAC -> SECTION SIGN
u'\u2260' # 0xAD -> NOT EQUAL TO
u'\xb0' # 0xAE -> DEGREE SIGN
u'\xb7' # 0xAF -> MIDDLE DOT
u'\u0391' # 0xB0 -> GREEK CAPITAL LETTER ALPHA
u'\xb1' # 0xB1 -> PLUS-MINUS SIGN
u'\u2264' # 0xB2 -> LESS-THAN OR EQUAL TO
u'\u2265' # 0xB3 -> GREATER-THAN OR EQUAL TO
u'\xa5' # 0xB4 -> YEN SIGN
u'\u0392' # 0xB5 -> GREEK CAPITAL LETTER BETA
u'\u0395' # 0xB6 -> GREEK CAPITAL LETTER EPSILON
u'\u0396' # 0xB7 -> GREEK CAPITAL LETTER ZETA
u'\u0397' # 0xB8 -> GREEK CAPITAL LETTER ETA
u'\u0399' # 0xB9 -> GREEK CAPITAL LETTER IOTA
u'\u039a' # 0xBA -> GREEK CAPITAL LETTER KAPPA
u'\u039c' # 0xBB -> GREEK CAPITAL LETTER MU
u'\u03a6' # 0xBC -> GREEK CAPITAL LETTER PHI
u'\u03ab' # 0xBD -> GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA
u'\u03a8' # 0xBE -> GREEK CAPITAL LETTER PSI
u'\u03a9' # 0xBF -> GREEK CAPITAL LETTER OMEGA
u'\u03ac' # 0xC0 -> GREEK SMALL LETTER ALPHA WITH TONOS
u'\u039d' # 0xC1 -> GREEK CAPITAL LETTER NU
u'\xac' # 0xC2 -> NOT SIGN
u'\u039f' # 0xC3 -> GREEK CAPITAL LETTER OMICRON
u'\u03a1' # 0xC4 -> GREEK CAPITAL LETTER RHO
u'\u2248' # 0xC5 -> ALMOST EQUAL TO
u'\u03a4' # 0xC6 -> GREEK CAPITAL LETTER TAU
u'\xab' # 0xC7 -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbb' # 0xC8 -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\u2026' # 0xC9 -> HORIZONTAL ELLIPSIS
u'\xa0' # 0xCA -> NO-BREAK SPACE
u'\u03a5' # 0xCB -> GREEK CAPITAL LETTER UPSILON
u'\u03a7' # 0xCC -> GREEK CAPITAL LETTER CHI
u'\u0386' # 0xCD -> GREEK CAPITAL LETTER ALPHA WITH TONOS
u'\u0388' # 0xCE -> GREEK CAPITAL LETTER EPSILON WITH TONOS
u'\u0153' # 0xCF -> LATIN SMALL LIGATURE OE
u'\u2013' # 0xD0 -> EN DASH
u'\u2015' # 0xD1 -> HORIZONTAL BAR
u'\u201c' # 0xD2 -> LEFT DOUBLE QUOTATION MARK
u'\u201d' # 0xD3 -> RIGHT DOUBLE QUOTATION MARK
u'\u2018' # 0xD4 -> LEFT SINGLE QUOTATION MARK
u'\u2019' # 0xD5 -> RIGHT SINGLE QUOTATION MARK
u'\xf7' # 0xD6 -> DIVISION SIGN
u'\u0389' # 0xD7 -> GREEK CAPITAL LETTER ETA WITH TONOS
u'\u038a' # 0xD8 -> GREEK CAPITAL LETTER IOTA WITH TONOS
u'\u038c' # 0xD9 -> GREEK CAPITAL LETTER OMICRON WITH TONOS
u'\u038e' # 0xDA -> GREEK CAPITAL LETTER UPSILON WITH TONOS
u'\u03ad' # 0xDB -> GREEK SMALL LETTER EPSILON WITH TONOS
u'\u03ae' # 0xDC -> GREEK SMALL LETTER ETA WITH TONOS
u'\u03af' # 0xDD -> GREEK SMALL LETTER IOTA WITH TONOS
u'\u03cc' # 0xDE -> GREEK SMALL LETTER OMICRON WITH TONOS
u'\u038f' # 0xDF -> GREEK CAPITAL LETTER OMEGA WITH TONOS
u'\u03cd' # 0xE0 -> GREEK SMALL LETTER UPSILON WITH TONOS
u'\u03b1' # 0xE1 -> GREEK SMALL LETTER ALPHA
u'\u03b2' # 0xE2 -> GREEK SMALL LETTER BETA
u'\u03c8' # 0xE3 -> GREEK SMALL LETTER PSI
u'\u03b4' # 0xE4 -> GREEK SMALL LETTER DELTA
u'\u03b5' # 0xE5 -> GREEK SMALL LETTER EPSILON
u'\u03c6' # 0xE6 -> GREEK SMALL LETTER PHI
u'\u03b3' # 0xE7 -> GREEK SMALL LETTER GAMMA
u'\u03b7' # 0xE8 -> GREEK SMALL LETTER ETA
u'\u03b9' # 0xE9 -> GREEK SMALL LETTER IOTA
u'\u03be' # 0xEA -> GREEK SMALL LETTER XI
u'\u03ba' # 0xEB -> GREEK SMALL LETTER KAPPA
u'\u03bb' # 0xEC -> GREEK SMALL LETTER LAMDA
u'\u03bc' # 0xED -> GREEK SMALL LETTER MU
u'\u03bd' # 0xEE -> GREEK SMALL LETTER NU
u'\u03bf' # 0xEF -> GREEK SMALL LETTER OMICRON
u'\u03c0' # 0xF0 -> GREEK SMALL LETTER PI
u'\u03ce' # 0xF1 -> GREEK SMALL LETTER OMEGA WITH TONOS
u'\u03c1' # 0xF2 -> GREEK SMALL LETTER RHO
u'\u03c3' # 0xF3 -> GREEK SMALL LETTER SIGMA
u'\u03c4' # 0xF4 -> GREEK SMALL LETTER TAU
u'\u03b8' # 0xF5 -> GREEK SMALL LETTER THETA
u'\u03c9' # 0xF6 -> GREEK SMALL LETTER OMEGA
u'\u03c2' # 0xF7 -> GREEK SMALL LETTER FINAL SIGMA
u'\u03c7' # 0xF8 -> GREEK SMALL LETTER CHI
u'\u03c5' # 0xF9 -> GREEK SMALL LETTER UPSILON
u'\u03b6' # 0xFA -> GREEK SMALL LETTER ZETA
u'\u03ca' # 0xFB -> GREEK SMALL LETTER IOTA WITH DIALYTIKA
u'\u03cb' # 0xFC -> GREEK SMALL LETTER UPSILON WITH DIALYTIKA
u'\u0390' # 0xFD -> GREEK SMALL LETTER IOTA WITH DIALYTIKA AND TONOS
u'\u03b0' # 0xFE -> GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND TONOS
u'\xad' # 0xFF -> SOFT HYPHEN # before Mac OS 9.2.2, was undefined
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| mit |
vbshah1992/microblog | flask/lib/python2.7/site-packages/sqlalchemy/dialects/sqlite/pysqlite.py | 17 | 13150 | # sqlite/pysqlite.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Support for the SQLite database via pysqlite.
Note that pysqlite is the same driver as the ``sqlite3``
module included with the Python distribution.
Driver
------
When using Python 2.5 and above, the built in ``sqlite3`` driver is
already installed and no additional installation is needed. Otherwise,
the ``pysqlite2`` driver needs to be present. This is the same driver as
``sqlite3``, just with a different name.
The ``pysqlite2`` driver will be loaded first, and if not found, ``sqlite3``
is loaded. This allows an explicitly installed pysqlite driver to take
precedence over the built in one. As with all dialects, a specific
DBAPI module may be provided to :func:`~sqlalchemy.create_engine()` to control
this explicitly::
from sqlite3 import dbapi2 as sqlite
e = create_engine('sqlite+pysqlite:///file.db', module=sqlite)
Full documentation on pysqlite is available at:
`<http://www.initd.org/pub/software/pysqlite/doc/usage-guide.html>`_
Connect Strings
---------------
The file specification for the SQLite database is taken as the "database" portion of
the URL. Note that the format of a url is::
driver://user:pass@host/database
This means that the actual filename to be used starts with the characters to the
**right** of the third slash. So connecting to a relative filepath looks like::
# relative path
e = create_engine('sqlite:///path/to/database.db')
An absolute path, which is denoted by starting with a slash, means you need **four**
slashes::
# absolute path
e = create_engine('sqlite:////path/to/database.db')
To use a Windows path, regular drive specifications and backslashes can be used.
Double backslashes are probably needed::
# absolute path on Windows
e = create_engine('sqlite:///C:\\\\path\\\\to\\\\database.db')
The sqlite ``:memory:`` identifier is the default if no filepath is present. Specify
``sqlite://`` and nothing else::
# in-memory database
e = create_engine('sqlite://')
Compatibility with sqlite3 "native" date and datetime types
-----------------------------------------------------------
The pysqlite driver includes the sqlite3.PARSE_DECLTYPES and
sqlite3.PARSE_COLNAMES options, which have the effect of any column
or expression explicitly cast as "date" or "timestamp" will be converted
to a Python date or datetime object. The date and datetime types provided
with the pysqlite dialect are not currently compatible with these options,
since they render the ISO date/datetime including microseconds, which
pysqlite's driver does not. Additionally, SQLAlchemy does not at
this time automatically render the "cast" syntax required for the
freestanding functions "current_timestamp" and "current_date" to return
datetime/date types natively. Unfortunately, pysqlite
does not provide the standard DBAPI types in ``cursor.description``,
leaving SQLAlchemy with no way to detect these types on the fly
without expensive per-row type checks.
Keeping in mind that pysqlite's parsing option is not recommended,
nor should be necessary, for use with SQLAlchemy, usage of PARSE_DECLTYPES
can be forced if one configures "native_datetime=True" on create_engine()::
engine = create_engine('sqlite://',
connect_args={'detect_types': sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES},
native_datetime=True
)
With this flag enabled, the DATE and TIMESTAMP types (but note - not the DATETIME
or TIME types...confused yet ?) will not perform any bind parameter or result
processing. Execution of "func.current_date()" will return a string.
"func.current_timestamp()" is registered as returning a DATETIME type in
SQLAlchemy, so this function still receives SQLAlchemy-level result processing.
Threading/Pooling Behavior
---------------------------
Pysqlite's default behavior is to prohibit the usage of a single connection
in more than one thread. This is originally intended to work with older versions
of SQLite that did not support multithreaded operation under
various circumstances. In particular, older SQLite versions
did not allow a ``:memory:`` database to be used in multiple threads
under any circumstances.
Pysqlite does include a now-undocumented flag known as
``check_same_thread`` which will disable this check, however note that pysqlite
connections are still not safe to use in concurrently in multiple threads.
In particular, any statement execution calls would need to be externally
mutexed, as Pysqlite does not provide for thread-safe propagation of error
messages among other things. So while even ``:memory:`` databases can be
shared among threads in modern SQLite, Pysqlite doesn't provide enough
thread-safety to make this usage worth it.
SQLAlchemy sets up pooling to work with Pysqlite's default behavior:
* When a ``:memory:`` SQLite database is specified, the dialect by default will use
:class:`.SingletonThreadPool`. This pool maintains a single connection per
thread, so that all access to the engine within the current thread use the
same ``:memory:`` database - other threads would access a different
``:memory:`` database.
* When a file-based database is specified, the dialect will use :class:`.NullPool`
as the source of connections. This pool closes and discards connections
which are returned to the pool immediately. SQLite file-based connections
have extremely low overhead, so pooling is not necessary. The scheme also
prevents a connection from being used again in a different thread and works
best with SQLite's coarse-grained file locking.
.. versionchanged:: 0.7
Default selection of :class:`.NullPool` for SQLite file-based databases.
Previous versions select :class:`.SingletonThreadPool` by
default for all SQLite databases.
Using a Memory Database in Multiple Threads
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
To use a ``:memory:`` database in a multithreaded scenario, the same connection
object must be shared among threads, since the database exists
only within the scope of that connection. The :class:`.StaticPool` implementation
will maintain a single connection globally, and the ``check_same_thread`` flag
can be passed to Pysqlite as ``False``::
from sqlalchemy.pool import StaticPool
engine = create_engine('sqlite://',
connect_args={'check_same_thread':False},
poolclass=StaticPool)
Note that using a ``:memory:`` database in multiple threads requires a recent
version of SQLite.
Using Temporary Tables with SQLite
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Due to the way SQLite deals with temporary tables, if you wish to use a temporary table
in a file-based SQLite database across multiple checkouts from the connection pool, such
as when using an ORM :class:`.Session` where the temporary table should continue to remain
after :meth:`.commit` or :meth:`.rollback` is called,
a pool which maintains a single connection must be used. Use :class:`.SingletonThreadPool`
if the scope is only needed within the current thread, or :class:`.StaticPool` is scope is
needed within multiple threads for this case::
# maintain the same connection per thread
from sqlalchemy.pool import SingletonThreadPool
engine = create_engine('sqlite:///mydb.db',
poolclass=SingletonThreadPool)
# maintain the same connection across all threads
from sqlalchemy.pool import StaticPool
engine = create_engine('sqlite:///mydb.db',
poolclass=StaticPool)
Note that :class:`.SingletonThreadPool` should be configured for the number of threads
that are to be used; beyond that number, connections will be closed out in a non deterministic
way.
Unicode
-------
The pysqlite driver only returns Python ``unicode`` objects in result sets, never
plain strings, and accommodates ``unicode`` objects within bound parameter
values in all cases. Regardless of the SQLAlchemy string type in use,
string-based result values will by Python ``unicode`` in Python 2.
The :class:`.Unicode` type should still be used to indicate those columns that
require unicode, however, so that non-``unicode`` values passed inadvertently
will emit a warning. Pysqlite will emit an error if a non-``unicode`` string
is passed containing non-ASCII characters.
.. _pysqlite_serializable:
Serializable Transaction Isolation
----------------------------------
The pysqlite DBAPI driver has a long-standing bug in which transactional
state is not begun until the first DML statement, that is INSERT, UPDATE
or DELETE, is emitted. A SELECT statement will not cause transactional
state to begin. While this mode of usage is fine for typical situations
and has the advantage that the SQLite database file is not prematurely
locked, it breaks serializable transaction isolation, which requires
that the database file be locked upon any SQL being emitted.
To work around this issue, the ``BEGIN`` keyword can be emitted
at the start of each transaction. The following recipe establishes
a :meth:`.ConnectionEvents.begin` handler to achieve this::
from sqlalchemy import create_engine, event
engine = create_engine("sqlite:///myfile.db", isolation_level='SERIALIZABLE')
@event.listens_for(engine, "begin")
def do_begin(conn):
conn.execute("BEGIN")
"""
from sqlalchemy.dialects.sqlite.base import SQLiteDialect, DATETIME, DATE
from sqlalchemy import exc, pool
from sqlalchemy import types as sqltypes
from sqlalchemy import util
import os
class _SQLite_pysqliteTimeStamp(DATETIME):
def bind_processor(self, dialect):
if dialect.native_datetime:
return None
else:
return DATETIME.bind_processor(self, dialect)
def result_processor(self, dialect, coltype):
if dialect.native_datetime:
return None
else:
return DATETIME.result_processor(self, dialect, coltype)
class _SQLite_pysqliteDate(DATE):
def bind_processor(self, dialect):
if dialect.native_datetime:
return None
else:
return DATE.bind_processor(self, dialect)
def result_processor(self, dialect, coltype):
if dialect.native_datetime:
return None
else:
return DATE.result_processor(self, dialect, coltype)
class SQLiteDialect_pysqlite(SQLiteDialect):
default_paramstyle = 'qmark'
colspecs = util.update_copy(
SQLiteDialect.colspecs,
{
sqltypes.Date:_SQLite_pysqliteDate,
sqltypes.TIMESTAMP:_SQLite_pysqliteTimeStamp,
}
)
# Py3K
#description_encoding = None
driver = 'pysqlite'
def __init__(self, **kwargs):
SQLiteDialect.__init__(self, **kwargs)
if self.dbapi is not None:
sqlite_ver = self.dbapi.version_info
if sqlite_ver < (2, 1, 3):
util.warn(
("The installed version of pysqlite2 (%s) is out-dated "
"and will cause errors in some cases. Version 2.1.3 "
"or greater is recommended.") %
'.'.join([str(subver) for subver in sqlite_ver]))
@classmethod
def dbapi(cls):
try:
from pysqlite2 import dbapi2 as sqlite
except ImportError, e:
try:
from sqlite3 import dbapi2 as sqlite #try the 2.5+ stdlib name.
except ImportError:
raise e
return sqlite
@classmethod
def get_pool_class(cls, url):
if url.database and url.database != ':memory:':
return pool.NullPool
else:
return pool.SingletonThreadPool
def _get_server_version_info(self, connection):
return self.dbapi.sqlite_version_info
def create_connect_args(self, url):
if url.username or url.password or url.host or url.port:
raise exc.ArgumentError(
"Invalid SQLite URL: %s\n"
"Valid SQLite URL forms are:\n"
" sqlite:///:memory: (or, sqlite://)\n"
" sqlite:///relative/path/to/file.db\n"
" sqlite:////absolute/path/to/file.db" % (url,))
filename = url.database or ':memory:'
if filename != ':memory:':
filename = os.path.abspath(filename)
opts = url.query.copy()
util.coerce_kw_type(opts, 'timeout', float)
util.coerce_kw_type(opts, 'isolation_level', str)
util.coerce_kw_type(opts, 'detect_types', int)
util.coerce_kw_type(opts, 'check_same_thread', bool)
util.coerce_kw_type(opts, 'cached_statements', int)
return ([filename], opts)
def is_disconnect(self, e, connection, cursor):
return isinstance(e, self.dbapi.ProgrammingError) and \
"Cannot operate on a closed database." in str(e)
dialect = SQLiteDialect_pysqlite
| bsd-3-clause |
NL66278/odoo | addons/google_account/controllers/main.py | 350 | 1270 | import simplejson
import urllib
import openerp
from openerp import http
from openerp.http import request
import openerp.addons.web.controllers.main as webmain
from openerp.addons.web.http import SessionExpiredException
from werkzeug.exceptions import BadRequest
import werkzeug.utils
class google_auth(http.Controller):
@http.route('/google_account/authentication', type='http', auth="none")
def oauth2callback(self, **kw):
""" This route/function is called by Google when user Accept/Refuse the consent of Google """
state = simplejson.loads(kw['state'])
dbname = state.get('d')
service = state.get('s')
url_return = state.get('f')
registry = openerp.modules.registry.RegistryManager.get(dbname)
with registry.cursor() as cr:
if kw.get('code',False):
registry.get('google.%s' % service).set_all_tokens(cr,request.session.uid,kw['code'])
return werkzeug.utils.redirect(url_return)
elif kw.get('error'):
return werkzeug.utils.redirect("%s%s%s" % (url_return ,"?error=" , kw.get('error')))
else:
return werkzeug.utils.redirect("%s%s" % (url_return ,"?error=Unknown_error"))
| agpl-3.0 |
repotvsupertuga/repo | plugin.video.TVsupertuga/resources/lib/zsources/xmovies.py | 4 | 4807 | # -*- coding: utf-8 -*-
'''
Exodus Add-on
Copyright (C) 2016 Exodus
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,urllib,urlparse,json,time
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import directstream
from resources.lib.modules import cache
class source:
def __init__(self):
self.priority = 1
self.language = ['en']
self.domains = ['xmovies8.tv', 'xmovies8.ru']
self.base_link = 'https://xmovies8.ru'
self.moviesearch_link = '/movies/search?s=%s'
def movie(self, imdb, title, localtitle, year):
try:
url = self.searchMovie(title, year)
if url == None:
t = cache.get(self.getImdbTitle, 900, imdb)
if t != title:
url = self.searchMovie(t, year)
return url
except:
return
def getImdbTitle(self, imdb):
try:
t = 'http://www.omdbapi.com/?i=%s' % imdb
t = client.request(t)
t = json.loads(t)
t = cleantitle.normalize(t['Title'])
return t
except:
return
def searchMovie(self, title, year):
try:
title = cleantitle.normalize(title)
url = urlparse.urljoin(self.base_link, self.moviesearch_link % (cleantitle.geturl(title.replace('\'', '-'))))
r = client.request(url)
t = cleantitle.get(title)
r = client.parseDOM(r, 'h2', attrs={'class': 'tit'})
r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title')) for i in r]
r = [(i[0][0], i[1][0]) for i in r if len(i[0]) > 0 and len(i[1]) > 0]
r = [(i[0], re.findall('(.+?) \((\d{4})', i[1])) for i in r]
r = [(i[0], i[1][0][0], i[1][0][1]) for i in r if len(i[1]) > 0]
r = [i[0] for i in r if t in cleantitle.get(i[1]) and year == i[2]][0]
url = re.findall('(?://.+?|)(/.+)', r)[0]
url = client.replaceHTMLCodes(url)
return url.encode('utf-8')
except:
return
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
url = urlparse.urljoin(self.base_link, url)
url = path = re.sub('/watching.html$', '', url.strip('/'))
url = referer = url + '/watching.html'
p = client.request(url)
p = re.findall('load_player\(.+?(\d+)', p)
p = urllib.urlencode({'id': p[0]})
headers = {
'Accept-Formating': 'application/json, text/javascript',
'Server': 'cloudflare-nginx',
'Referer': referer}
r = urlparse.urljoin(self.base_link, '/ajax/movie/load_player_v3')
r = client.request(r, post=p, headers=headers, XHR=True)
url = json.loads(r)['value']
url = client.request(url, headers=headers, XHR=True, output='geturl')
if 'openload.io' in url or 'openload.co' in url or 'oload.tv' in url:
sources.append({'source': 'openload.co', 'quality': 'HD', 'language': 'en', 'url': url, 'direct': False,'debridonly': False})
raise Exception()
r = client.request(url, headers=headers, XHR=True)
try:
src = json.loads(r)['playlist'][0]['sources']
links = [i['file'] for i in src if 'file' in i]
for i in links:
try:
sources.append(
{'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'language': 'en',
'url': i, 'direct': True, 'debridonly': False})
except:
pass
except:
pass
return sources
except:
return sources
def resolve(self, url):
try:
for i in range(3):
u = directstream.googlepass(url)
if not u == None: break
return u
except:
return | gpl-2.0 |
njase/numpy | numpy/distutils/command/build.py | 187 | 1618 | from __future__ import division, absolute_import, print_function
import os
import sys
from distutils.command.build import build as old_build
from distutils.util import get_platform
from numpy.distutils.command.config_compiler import show_fortran_compilers
class build(old_build):
sub_commands = [('config_cc', lambda *args: True),
('config_fc', lambda *args: True),
('build_src', old_build.has_ext_modules),
] + old_build.sub_commands
user_options = old_build.user_options + [
('fcompiler=', None,
"specify the Fortran compiler type"),
('parallel=', 'j',
"number of parallel jobs"),
]
help_options = old_build.help_options + [
('help-fcompiler', None, "list available Fortran compilers",
show_fortran_compilers),
]
def initialize_options(self):
old_build.initialize_options(self)
self.fcompiler = None
self.parallel = None
def finalize_options(self):
if self.parallel:
try:
self.parallel = int(self.parallel)
except ValueError:
raise ValueError("--parallel/-j argument must be an integer")
build_scripts = self.build_scripts
old_build.finalize_options(self)
plat_specifier = ".%s-%s" % (get_platform(), sys.version[0:3])
if build_scripts is None:
self.build_scripts = os.path.join(self.build_base,
'scripts' + plat_specifier)
def run(self):
old_build.run(self)
| bsd-3-clause |
georgyberdyshev/ascend | pygtk/canvas/asclibrary.py | 1 | 2535 | '''Import the SWIG wrapper'''
import os
DEFAULT_CANVAS_MODEL_LIBRARY_FOLDER = os.path.join('..','..','models','test','canvas')
try:
import ascpy
except ImportError as e:
print "Error: Could not load ASCEND Library. Please check the paths \
ASECNDLIBRARY and LD_LIBRARY_PATH\n",e
from blocktype import BlockType
from blockstream import BlockStream
class ascPy(object):
'''
The ASCEND Library class. Everything that talks to ASCEND should be here.
'''
def __init__(self):
self.library = None
self.annodb = None
self.modules = None
self.types = None
self.canvas_blocks = []
self.streams = []
self.reporter = ascpy.getReporter()
self.defaultlibraryfolder = DEFAULT_CANVAS_MODEL_LIBRARY_FOLDER
def load_library(self,lib_name = None):
if lib_name == None:
return
lib_path = os.path.join('test','canvas',lib_name)
try:
self.library.clear()
self.library.load(lib_path)
except Exception as e:
self.library = ascpy.Library()
self.library.load(lib_path)
self.annodb = self.library.getAnnotationDatabase()
self.modules = self.library.getModules()
try:
self.blocktypes = set()
self.streamtypes = set()
for m in self.modules:
self.types = self.library.getModuleTypes(m)
for t in self.types:
#if t.hasParameters():
# continue
self.parse_types(t)
self.parse_streams(t)
except Exception as e:
print 'Error: ASCEND Blocks Could not be loaded \n',e
exit()
try:
del self.canvas_blocks[:]
for t in self.blocktypes:
b = BlockType(t,self.annodb)
self.canvas_blocks +=[b]
except Exception as e:
print 'Error: Could not load blocktypes \n',e
exit()
try:
for stream in self.streamtypes:
s = BlockStream(stream,self.annodb)
self.streams +=[s]
except Exception as e:
print 'Error: Could not load streams \n',e
exit()
'''
try:
for stream in streamtypes:
notes = self.annodb.getTypeRefinedNotesLang(stream,
ascpy.SymChar("inline"))
for n in notes:
types = str(n.getText()).split(',')
self.streams.append((str(n.getId()),types))
except Exception as e:
print 'Error: Could not load streamtypes \n',e
exit()
'''
def parse_types(self,t):
x = self.annodb.getNotes(t,ascpy.SymChar("block"),ascpy.SymChar("SELF"))
if x:
self.blocktypes.add(t)
def parse_streams(self,t):
x = self.annodb.getNotes(t,ascpy.SymChar("stream"),ascpy.SymChar("SELF"))
if x:
self.streamtypes.add(t)
# vim: set ts=4 noet:
| gpl-2.0 |
angelman/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/layout_tests/models/test_run_results.py | 118 | 11747 | # Copyright (C) 2010 Google Inc. All rights reserved.
# Copyright (C) 2010 Gabor Rapcsanyi ([email protected]), University of Szeged
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
from webkitpy.layout_tests.models import test_expectations
from webkitpy.layout_tests.models import test_failures
_log = logging.getLogger(__name__)
class TestRunResults(object):
def __init__(self, expectations, num_tests):
self.total = num_tests
self.remaining = self.total
self.expectations = expectations
self.expected = 0
self.unexpected = 0
self.unexpected_failures = 0
self.unexpected_crashes = 0
self.unexpected_timeouts = 0
self.tests_by_expectation = {}
self.tests_by_timeline = {}
self.results_by_name = {} # Map of test name to the last result for the test.
self.all_results = [] # All results from a run, including every iteration of every test.
self.unexpected_results_by_name = {}
self.failures_by_name = {}
self.total_failures = 0
self.expected_skips = 0
for expectation in test_expectations.TestExpectations.EXPECTATIONS.values():
self.tests_by_expectation[expectation] = set()
for timeline in test_expectations.TestExpectations.TIMELINES.values():
self.tests_by_timeline[timeline] = expectations.get_tests_with_timeline(timeline)
self.slow_tests = set()
self.interrupted = False
def add(self, test_result, expected, test_is_slow):
self.tests_by_expectation[test_result.type].add(test_result.test_name)
self.results_by_name[test_result.test_name] = test_result
if test_result.type != test_expectations.SKIP:
self.all_results.append(test_result)
self.remaining -= 1
if len(test_result.failures):
self.total_failures += 1
self.failures_by_name[test_result.test_name] = test_result.failures
if expected:
self.expected += 1
if test_result.type == test_expectations.SKIP:
self.expected_skips += 1
else:
self.unexpected_results_by_name[test_result.test_name] = test_result
self.unexpected += 1
if len(test_result.failures):
self.unexpected_failures += 1
if test_result.type == test_expectations.CRASH:
self.unexpected_crashes += 1
elif test_result.type == test_expectations.TIMEOUT:
self.unexpected_timeouts += 1
if test_is_slow:
self.slow_tests.add(test_result.test_name)
class RunDetails(object):
def __init__(self, exit_code, summarized_results=None, initial_results=None, retry_results=None, enabled_pixel_tests_in_retry=False):
self.exit_code = exit_code
self.summarized_results = summarized_results
self.initial_results = initial_results
self.retry_results = retry_results
self.enabled_pixel_tests_in_retry = enabled_pixel_tests_in_retry
def _interpret_test_failures(failures):
test_dict = {}
failure_types = [type(failure) for failure in failures]
# FIXME: get rid of all this is_* values once there is a 1:1 map between
# TestFailure type and test_expectations.EXPECTATION.
if test_failures.FailureMissingAudio in failure_types:
test_dict['is_missing_audio'] = True
if test_failures.FailureMissingResult in failure_types:
test_dict['is_missing_text'] = True
if test_failures.FailureMissingImage in failure_types or test_failures.FailureMissingImageHash in failure_types:
test_dict['is_missing_image'] = True
if 'image_diff_percent' not in test_dict:
for failure in failures:
if isinstance(failure, test_failures.FailureImageHashMismatch) or isinstance(failure, test_failures.FailureReftestMismatch):
test_dict['image_diff_percent'] = failure.diff_percent
return test_dict
def summarize_results(port_obj, expectations, initial_results, retry_results, enabled_pixel_tests_in_retry):
"""Returns a dictionary containing a summary of the test runs, with the following fields:
'version': a version indicator
'fixable': The number of fixable tests (NOW - PASS)
'skipped': The number of skipped tests (NOW & SKIPPED)
'num_regressions': The number of non-flaky failures
'num_flaky': The number of flaky failures
'num_missing': The number of tests with missing results
'num_passes': The number of unexpected passes
'tests': a dict of tests -> {'expected': '...', 'actual': '...'}
"""
results = {}
results['version'] = 3
tbe = initial_results.tests_by_expectation
tbt = initial_results.tests_by_timeline
results['fixable'] = len(tbt[test_expectations.NOW] - tbe[test_expectations.PASS])
results['skipped'] = len(tbt[test_expectations.NOW] & tbe[test_expectations.SKIP])
num_passes = 0
num_flaky = 0
num_missing = 0
num_regressions = 0
keywords = {}
for expecation_string, expectation_enum in test_expectations.TestExpectations.EXPECTATIONS.iteritems():
keywords[expectation_enum] = expecation_string.upper()
for modifier_string, modifier_enum in test_expectations.TestExpectations.MODIFIERS.iteritems():
keywords[modifier_enum] = modifier_string.upper()
tests = {}
for test_name, result in initial_results.results_by_name.iteritems():
# Note that if a test crashed in the original run, we ignore
# whether or not it crashed when we retried it (if we retried it),
# and always consider the result not flaky.
expected = expectations.get_expectations_string(test_name)
result_type = result.type
actual = [keywords[result_type]]
if result_type == test_expectations.SKIP:
continue
test_dict = {}
if result.has_stderr:
test_dict['has_stderr'] = True
if result.reftest_type:
test_dict.update(reftest_type=list(result.reftest_type))
if expectations.has_modifier(test_name, test_expectations.WONTFIX):
test_dict['wontfix'] = True
if result_type == test_expectations.PASS:
num_passes += 1
# FIXME: include passing tests that have stderr output.
if expected == 'PASS':
continue
elif result_type == test_expectations.CRASH:
if test_name in initial_results.unexpected_results_by_name:
num_regressions += 1
elif result_type == test_expectations.MISSING:
if test_name in initial_results.unexpected_results_by_name:
num_missing += 1
elif test_name in initial_results.unexpected_results_by_name:
if retry_results and test_name not in retry_results.unexpected_results_by_name:
actual.extend(expectations.get_expectations_string(test_name).split(" "))
num_flaky += 1
elif retry_results:
retry_result_type = retry_results.unexpected_results_by_name[test_name].type
if result_type != retry_result_type:
if enabled_pixel_tests_in_retry and result_type == test_expectations.TEXT and retry_result_type == test_expectations.IMAGE_PLUS_TEXT:
num_regressions += 1
else:
num_flaky += 1
actual.append(keywords[retry_result_type])
else:
num_regressions += 1
else:
num_regressions += 1
test_dict['expected'] = expected
test_dict['actual'] = " ".join(actual)
test_dict.update(_interpret_test_failures(result.failures))
if retry_results:
retry_result = retry_results.unexpected_results_by_name.get(test_name)
if retry_result:
test_dict.update(_interpret_test_failures(retry_result.failures))
# Store test hierarchically by directory. e.g.
# foo/bar/baz.html: test_dict
# foo/bar/baz1.html: test_dict
#
# becomes
# foo: {
# bar: {
# baz.html: test_dict,
# baz1.html: test_dict
# }
# }
parts = test_name.split('/')
current_map = tests
for i, part in enumerate(parts):
if i == (len(parts) - 1):
current_map[part] = test_dict
break
if part not in current_map:
current_map[part] = {}
current_map = current_map[part]
results['tests'] = tests
results['num_passes'] = num_passes
results['num_flaky'] = num_flaky
results['num_missing'] = num_missing
results['num_regressions'] = num_regressions
results['uses_expectations_file'] = port_obj.uses_test_expectations_file()
results['interrupted'] = initial_results.interrupted # Does results.html have enough information to compute this itself? (by checking total number of results vs. total number of tests?)
results['layout_tests_dir'] = port_obj.layout_tests_dir()
results['has_wdiff'] = port_obj.wdiff_available()
results['has_pretty_patch'] = port_obj.pretty_patch_available()
results['pixel_tests_enabled'] = port_obj.get_option('pixel_tests')
try:
# We only use the svn revision for using trac links in the results.html file,
# Don't do this by default since it takes >100ms.
# FIXME: Do we really need to populate this both here and in the json_results_generator?
if port_obj.get_option("builder_name"):
port_obj.host.initialize_scm()
results['revision'] = port_obj.host.scm().head_svn_revision()
except Exception, e:
_log.warn("Failed to determine svn revision for checkout (cwd: %s, webkit_base: %s), leaving 'revision' key blank in full_results.json.\n%s" % (port_obj._filesystem.getcwd(), port_obj.path_from_webkit_base(), e))
# Handle cases where we're running outside of version control.
import traceback
_log.debug('Failed to learn head svn revision:')
_log.debug(traceback.format_exc())
results['revision'] = ""
return results
| bsd-3-clause |
crazy-cat/incubator-mxnet | example/speech-demo/train_lstm_proj.py | 25 | 13880 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import re
import sys
sys.path.insert(0, "../../python")
import time
import logging
import os.path
import mxnet as mx
import numpy as np
from speechSGD import speechSGD
from lstm_proj import lstm_unroll
from io_util import BucketSentenceIter, TruncatedSentenceIter, DataReadStream
from config_util import parse_args, get_checkpoint_path, parse_contexts
# some constants
METHOD_BUCKETING = 'bucketing'
METHOD_TBPTT = 'truncated-bptt'
def prepare_data(args):
batch_size = args.config.getint('train', 'batch_size')
num_hidden = args.config.getint('arch', 'num_hidden')
num_hidden_proj = args.config.getint('arch', 'num_hidden_proj')
num_lstm_layer = args.config.getint('arch', 'num_lstm_layer')
init_c = [('l%d_init_c'%l, (batch_size, num_hidden)) for l in range(num_lstm_layer)]
if num_hidden_proj > 0:
init_h = [('l%d_init_h'%l, (batch_size, num_hidden_proj)) for l in range(num_lstm_layer)]
else:
init_h = [('l%d_init_h'%l, (batch_size, num_hidden)) for l in range(num_lstm_layer)]
init_states = init_c + init_h
file_train = args.config.get('data', 'train')
file_dev = args.config.get('data', 'dev')
file_format = args.config.get('data', 'format')
feat_dim = args.config.getint('data', 'xdim')
train_data_args = {
"gpu_chunk": 32768,
"lst_file": file_train,
"file_format": file_format,
"separate_lines": True
}
dev_data_args = {
"gpu_chunk": 32768,
"lst_file": file_dev,
"file_format": file_format,
"separate_lines": True
}
train_sets = DataReadStream(train_data_args, feat_dim)
dev_sets = DataReadStream(dev_data_args, feat_dim)
return (init_states, train_sets, dev_sets)
def CrossEntropy(labels, preds):
labels = labels.reshape((-1,))
preds = preds.reshape((-1, preds.shape[1]))
loss = 0.
num_inst = 0
for i in range(preds.shape[0]):
label = labels[i]
if label > 0:
loss += -np.log(max(1e-10, preds[i][int(label)]))
num_inst += 1
return loss , num_inst
def Acc_exclude_padding(labels, preds):
labels = labels.reshape((-1,))
preds = preds.reshape((-1, preds.shape[1]))
sum_metric = 0
num_inst = 0
for i in range(preds.shape[0]):
pred_label = np.argmax(preds[i], axis=0)
label = labels[i]
ind = np.nonzero(label.flat)
pred_label_real = pred_label.flat[ind]
label_real = label.flat[ind]
sum_metric += (pred_label_real == label_real).sum()
num_inst += len(pred_label_real)
return sum_metric, num_inst
class SimpleLRScheduler(mx.lr_scheduler.LRScheduler):
"""A simple lr schedule that simply return `dynamic_lr`. We will set `dynamic_lr`
dynamically based on performance on the validation set.
"""
def __init__(self, dynamic_lr, effective_sample_count=1, momentum=0.9, optimizer="sgd"):
super(SimpleLRScheduler, self).__init__()
self.dynamic_lr = dynamic_lr
self.effective_sample_count = effective_sample_count
self.momentum = momentum
self.optimizer = optimizer
def __call__(self, num_update):
if self.optimizer == "speechSGD":
return self.dynamic_lr / self.effective_sample_count, self.momentum
else:
return self.dynamic_lr / self.effective_sample_count
def score_with_state_forwarding(module, eval_data, eval_metric):
eval_data.reset()
eval_metric.reset()
for eval_batch in eval_data:
module.forward(eval_batch, is_train=False)
module.update_metric(eval_metric, eval_batch.label)
# copy over states
outputs = module.get_outputs()
# outputs[0] is softmax, 1:end are states
for i in range(1, len(outputs)):
outputs[i].copyto(eval_data.init_state_arrays[i-1])
def get_initializer(args):
init_type = getattr(mx.initializer, args.config.get('train', 'initializer'))
init_scale = args.config.getfloat('train', 'init_scale')
if init_type is mx.initializer.Xavier:
return mx.initializer.Xavier(magnitude=init_scale)
return init_type(init_scale)
def do_training(training_method, args, module, data_train, data_val):
from distutils.dir_util import mkpath
mkpath(os.path.dirname(get_checkpoint_path(args)))
batch_size = data_train.batch_size
batch_end_callbacks = [mx.callback.Speedometer(batch_size,
args.config.getint('train', 'show_every'))]
eval_allow_extra = True if training_method == METHOD_TBPTT else False
eval_metric = [mx.metric.np(CrossEntropy, allow_extra_outputs=eval_allow_extra),
mx.metric.np(Acc_exclude_padding, allow_extra_outputs=eval_allow_extra)]
eval_metric = mx.metric.create(eval_metric)
optimizer = args.config.get('train', 'optimizer')
momentum = args.config.getfloat('train', 'momentum')
learning_rate = args.config.getfloat('train', 'learning_rate')
lr_scheduler = SimpleLRScheduler(learning_rate, momentum=momentum, optimizer=optimizer)
if training_method == METHOD_TBPTT:
lr_scheduler.seq_len = data_train.truncate_len
n_epoch = 0
num_epoch = args.config.getint('train', 'num_epoch')
learning_rate = args.config.getfloat('train', 'learning_rate')
decay_factor = args.config.getfloat('train', 'decay_factor')
decay_bound = args.config.getfloat('train', 'decay_lower_bound')
clip_gradient = args.config.getfloat('train', 'clip_gradient')
weight_decay = args.config.getfloat('train', 'weight_decay')
if clip_gradient == 0:
clip_gradient = None
last_acc = -float("Inf")
last_params = None
module.bind(data_shapes=data_train.provide_data,
label_shapes=data_train.provide_label,
for_training=True)
module.init_params(initializer=get_initializer(args))
def reset_optimizer():
if optimizer == "sgd" or optimizer == "speechSGD":
module.init_optimizer(kvstore='device',
optimizer=args.config.get('train', 'optimizer'),
optimizer_params={'lr_scheduler': lr_scheduler,
'momentum': momentum,
'rescale_grad': 1.0,
'clip_gradient': clip_gradient,
'wd': weight_decay},
force_init=True)
else:
module.init_optimizer(kvstore='device',
optimizer=args.config.get('train', 'optimizer'),
optimizer_params={'lr_scheduler': lr_scheduler,
'rescale_grad': 1.0,
'clip_gradient': clip_gradient,
'wd': weight_decay},
force_init=True)
reset_optimizer()
while True:
tic = time.time()
eval_metric.reset()
for nbatch, data_batch in enumerate(data_train):
if training_method == METHOD_TBPTT:
lr_scheduler.effective_sample_count = data_train.batch_size * truncate_len
lr_scheduler.momentum = np.power(np.power(momentum, 1.0/(data_train.batch_size * truncate_len)), data_batch.effective_sample_count)
else:
if data_batch.effective_sample_count is not None:
lr_scheduler.effective_sample_count = 1#data_batch.effective_sample_count
module.forward_backward(data_batch)
module.update()
module.update_metric(eval_metric, data_batch.label)
batch_end_params = mx.model.BatchEndParam(epoch=n_epoch, nbatch=nbatch,
eval_metric=eval_metric,
locals=None)
for callback in batch_end_callbacks:
callback(batch_end_params)
if training_method == METHOD_TBPTT:
# copy over states
outputs = module.get_outputs()
# outputs[0] is softmax, 1:end are states
for i in range(1, len(outputs)):
outputs[i].copyto(data_train.init_state_arrays[i-1])
for name, val in eval_metric.get_name_value():
logging.info('Epoch[%d] Train-%s=%f', n_epoch, name, val)
toc = time.time()
logging.info('Epoch[%d] Time cost=%.3f', n_epoch, toc-tic)
data_train.reset()
# test on eval data
score_with_state_forwarding(module, data_val, eval_metric)
# test whether we should decay learning rate
curr_acc = None
for name, val in eval_metric.get_name_value():
logging.info("Epoch[%d] Dev-%s=%f", n_epoch, name, val)
if name == 'CrossEntropy':
curr_acc = val
assert curr_acc is not None, 'cannot find Acc_exclude_padding in eval metric'
if n_epoch > 0 and lr_scheduler.dynamic_lr > decay_bound and curr_acc > last_acc:
logging.info('Epoch[%d] !!! Dev set performance drops, reverting this epoch',
n_epoch)
logging.info('Epoch[%d] !!! LR decay: %g => %g', n_epoch,
lr_scheduler.dynamic_lr, lr_scheduler.dynamic_lr / float(decay_factor))
lr_scheduler.dynamic_lr /= decay_factor
# we reset the optimizer because the internal states (e.g. momentum)
# might already be exploded, so we want to start from fresh
reset_optimizer()
module.set_params(*last_params)
else:
last_params = module.get_params()
last_acc = curr_acc
n_epoch += 1
# save checkpoints
mx.model.save_checkpoint(get_checkpoint_path(args), n_epoch,
module.symbol, *last_params)
if n_epoch == num_epoch:
break
if __name__ == '__main__':
args = parse_args()
args.config.write(sys.stdout)
training_method = args.config.get('train', 'method')
contexts = parse_contexts(args)
init_states, train_sets, dev_sets = prepare_data(args)
state_names = [x[0] for x in init_states]
batch_size = args.config.getint('train', 'batch_size')
num_hidden = args.config.getint('arch', 'num_hidden')
num_hidden_proj = args.config.getint('arch', 'num_hidden_proj')
num_lstm_layer = args.config.getint('arch', 'num_lstm_layer')
feat_dim = args.config.getint('data', 'xdim')
label_dim = args.config.getint('data', 'ydim')
logging.basicConfig(level=logging.DEBUG, format='%(asctime)-15s %(message)s')
if training_method == METHOD_BUCKETING:
buckets = args.config.get('train', 'buckets')
buckets = list(map(int, re.split(r'\W+', buckets)))
data_train = BucketSentenceIter(train_sets, buckets, batch_size, init_states, feat_dim=feat_dim)
data_val = BucketSentenceIter(dev_sets, buckets, batch_size, init_states, feat_dim=feat_dim)
def sym_gen(seq_len):
sym = lstm_unroll(num_lstm_layer, seq_len, feat_dim, num_hidden=num_hidden,
num_label=label_dim, num_hidden_proj=num_hidden_proj)
data_names = ['data'] + state_names
label_names = ['softmax_label']
return (sym, data_names, label_names)
module = mx.mod.BucketingModule(sym_gen,
default_bucket_key=data_train.default_bucket_key,
context=contexts)
do_training(training_method, args, module, data_train, data_val)
elif training_method == METHOD_TBPTT:
truncate_len = args.config.getint('train', 'truncate_len')
data_train = TruncatedSentenceIter(train_sets, batch_size, init_states,
truncate_len=truncate_len, feat_dim=feat_dim)
data_val = TruncatedSentenceIter(dev_sets, batch_size, init_states,
truncate_len=truncate_len, feat_dim=feat_dim,
do_shuffling=False, pad_zeros=True)
sym = lstm_unroll(num_lstm_layer, truncate_len, feat_dim, num_hidden=num_hidden,
num_label=label_dim, output_states=True, num_hidden_proj=num_hidden_proj)
data_names = [x[0] for x in data_train.provide_data]
label_names = [x[0] for x in data_train.provide_label]
module = mx.mod.Module(sym, context=contexts, data_names=data_names,
label_names=label_names)
do_training(training_method, args, module, data_train, data_val)
else:
raise RuntimeError('Unknown training method: %s' % training_method)
print("="*80)
print("Finished Training")
print("="*80)
args.config.write(sys.stdout)
| apache-2.0 |
tectronics/open-ihm | src/openihm/gui/interface/frmproject_configure_wildfoodincome.py | 3 | 6232 | #!/usr/bin/env python
"""
This file is part of open-ihm.
open-ihm is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation, either version 3 of the License, or (at your
option) any later version.
open-ihm is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with open-ihm. If not, see <http://www.gnu.org/licenses/>.
"""
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from control.controller import Controller
from mixins import TableViewMixin
class WildfoodIncomeManager(TableViewMixin):
def getProjectWildfoods(self):
incomes = []
row = 0
while (self.tblSelectedWildfoods.model().item(row,0)):
val = self.tblSelectedWildfoods.model().item(row,0).text()
incomes.append(val)
row = row + 1
return incomes
def displayAvailableWildfoods(self):
''' Retrieve and display available wildfood '''
incomes = self.project.getFoodIncomes("wildfoods")
model = QStandardItemModel(1,1)
# set model headers
model.setHorizontalHeaderItem(0,QStandardItem('Income Source'))
# add data rows
num = 0
for income in incomes:
qtIncome = QStandardItem( income)
model.setItem( num, 0, qtIncome )
num = num + 1
self.tblAvailableWildfoods.setModel(model)
self.tblAvailableWildfoods.setSelectionMode(QAbstractItemView.ExtendedSelection)
self.tblAvailableWildfoods.resizeColumnsToContents()
def displaySelectedWildfoods(self):
''' Retrieve and display Project Wildfood Incomes'''
incomes = self.project.getIncomeSources("wildfoods")
model = QStandardItemModel(1,1)
# set model headers
model.setHorizontalHeaderItem(0,QStandardItem('Income Source'))
# add data rows
num = 0
for income in incomes:
qtIncome = QStandardItem( income.name )
model.setItem( num, 0, qtIncome )
num = num + 1
self.tblSelectedWildfoods.setModel(model)
self.tblSelectedWildfoods.setSelectionMode(QAbstractItemView.ExtendedSelection)
self.tblSelectedWildfoods.resizeColumnsToContents()
def moveAllWildfoods(self):
''' Add all available wildfoods to Project'''
row = 0
while( self.tblAvailableWildfoods.model().item(row,0)):
income = self.tblAvailableWildfoods.model().item(row,0).text()
currentProjectWildfoods = self.getProjectWildfoods()
if income not in currentProjectWildfoods:
self.project.addIncomeSource(income, "wildfoods")
else:
msg = "The income source labelled, %s, has already been added to project" % (income)
QMessageBox.information(self,"Project Configuration",msg)
row = row + 1
self.displaySelectedWildfoods()
def removeAllWildfoods(self):
''' remove all listed household or person characteristics from Project'''
msg = "Are you sure you want to remove all selected wildfoods from this project?"
ret = QMessageBox.question(self,"Confirm Deletion", msg, QMessageBox.Yes|QMessageBox.No)
# if deletion is rejected return without deleting
if ret == QMessageBox.No:
return
incomes = self.getProjectWildfoods()
self.project.deleteIncomeSources( incomes )
self.displaySelectedWildfoods()
def moveSelectedWildfoods(self):
''' Add selected available wildfoods to Project'''
numSelected = self.countRowsSelected(self.tblAvailableWildfoods)
if numSelected != 0:
selectedRows = self.getSelectedRows(self.tblAvailableWildfoods)
for row in selectedRows:
income = self.tblAvailableWildfoods.model().item(row,0).text()
currentProjectWildfoods = self.getProjectWildfoods()
if income not in currentProjectWildfoods:
self.project.addIncomeSource(income, "wildfoods")
else:
msg = "The income source labelled, %s, has already been added to project" % (income)
QMessageBox.information(self,"Project Configuration",msg)
self.displaySelectedWildfoods()
else:
msg = "Please select the wildfoods you want to add."
QMessageBox.information(self,"Project Configuration",msg)
def removeSelectedWildfoods(self):
''' remove selected wildfoods from Project'''
numSelected = self.countRowsSelected(self.tblSelectedWildfoods)
if numSelected != 0:
msg = "Are you sure you want to remove the selected wildfood(s) from this project?"
ret = QMessageBox.question(self,"Confirm Deletion", msg, QMessageBox.Yes|QMessageBox.No)
# if deletion is rejected return without deleting
if ret == QMessageBox.No:
return
selectedRows = self.getSelectedRows(self.tblSelectedWildfoods)
incomes = []
for row in selectedRows:
income = self.tblSelectedWildfoods.model().item(row,0).text()
incomes.append(income)
self.project.deleteIncomeSources( incomes )
self.displaySelectedWildfoods()
else:
msg = "Please select the wildfoods you want to remove."
QMessageBox.information(self,"Project Configuration",msg)
| lgpl-3.0 |
c2theg/DDoS_Information_Sharing | libraries/suds-jurko-0.6/suds/serviceproxy.py | 18 | 2838 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( [email protected] )
"""
The service proxy provides access to web services.
Replaced by: L{client.Client}
"""
from suds import *
from suds.client import Client
class ServiceProxy(UnicodeMixin):
"""
A lightweight soap based web service proxy.
@ivar __client__: A client.
Everything is delegated to the 2nd generation API.
@type __client__: L{Client}
@note: Deprecated, replaced by L{Client}.
"""
def __init__(self, url, **kwargs):
"""
@param url: The URL for the WSDL.
@type url: str
@param kwargs: keyword arguments.
@keyword faults: Raise faults raised by server (default:True),
else return tuple from service method invocation as (http code, object).
@type faults: boolean
@keyword proxy: An http proxy to be specified on requests (default:{}).
The proxy is defined as {protocol:proxy,}
@type proxy: dict
"""
client = Client(url, **kwargs)
self.__client__ = client
def get_instance(self, name):
"""
Get an instance of a WSDL type by name
@param name: The name of a type defined in the WSDL.
@type name: str
@return: An instance on success, else None
@rtype: L{sudsobject.Object}
"""
return self.__client__.factory.create(name)
def get_enum(self, name):
"""
Get an instance of an enumeration defined in the WSDL by name.
@param name: The name of a enumeration defined in the WSDL.
@type name: str
@return: An instance on success, else None
@rtype: L{sudsobject.Object}
"""
return self.__client__.factory.create(name)
def __unicode__(self):
return unicode(self.__client__)
def __getattr__(self, name):
builtin = name.startswith('__') and name.endswith('__')
if builtin:
return self.__dict__[name]
else:
return getattr(self.__client__.service, name)
| mit |
grupoprog3/proyecto_final | proyecto/flask/Lib/shutil.py | 1 | 41006 | """Utility functions for copying and archiving files and directory trees.
XXX The functions here don't copy the resource fork or other metadata on Mac.
"""
import os
import sys
import stat
import fnmatch
import collections
import errno
import tarfile
try:
import bz2
del bz2
_BZ2_SUPPORTED = True
except ImportError:
_BZ2_SUPPORTED = False
try:
import lzma
del lzma
_LZMA_SUPPORTED = True
except ImportError:
_LZMA_SUPPORTED = False
try:
from pwd import getpwnam
except ImportError:
getpwnam = None
try:
from grp import getgrnam
except ImportError:
getgrnam = None
__all__ = ["copyfileobj", "copyfile", "copymode", "copystat", "copy", "copy2",
"copytree", "move", "rmtree", "Error", "SpecialFileError",
"ExecError", "make_archive", "get_archive_formats",
"register_archive_format", "unregister_archive_format",
"get_unpack_formats", "register_unpack_format",
"unregister_unpack_format", "unpack_archive",
"ignore_patterns", "chown", "which", "get_terminal_size",
"SameFileError"]
# disk_usage is added later, if available on the platform
class Error(OSError):
pass
class SameFileError(Error):
"""Raised when source and destination are the same file."""
class SpecialFileError(OSError):
"""Raised when trying to do a kind of operation (e.g. copying) which is
not supported on a special file (e.g. a named pipe)"""
class ExecError(OSError):
"""Raised when a command could not be executed"""
class ReadError(OSError):
"""Raised when an archive cannot be read"""
class RegistryError(Exception):
"""Raised when a registry operation with the archiving
and unpacking registeries fails"""
def copyfileobj(fsrc, fdst, length=16*1024):
"""copy data from file-like object fsrc to file-like object fdst"""
while 1:
buf = fsrc.read(length)
if not buf:
break
fdst.write(buf)
def _samefile(src, dst):
# Macintosh, Unix.
if hasattr(os.path, 'samefile'):
try:
return os.path.samefile(src, dst)
except OSError:
return False
# All other platforms: check for same pathname.
return (os.path.normcase(os.path.abspath(src)) ==
os.path.normcase(os.path.abspath(dst)))
def copyfile(src, dst, *, follow_symlinks=True):
"""Copy data from src to dst.
If follow_symlinks is not set and src is a symbolic link, a new
symlink will be created instead of copying the file it points to.
"""
if _samefile(src, dst):
raise SameFileError("{!r} and {!r} are the same file".format(src, dst))
for fn in [src, dst]:
try:
st = os.stat(fn)
except OSError:
# File most likely does not exist
pass
else:
# XXX What about other special files? (sockets, devices...)
if stat.S_ISFIFO(st.st_mode):
raise SpecialFileError("`%s` is a named pipe" % fn)
if not follow_symlinks and os.path.islink(src):
os.symlink(os.readlink(src), dst)
else:
with open(src, 'rb') as fsrc:
with open(dst, 'wb') as fdst:
copyfileobj(fsrc, fdst)
return dst
def copymode(src, dst, *, follow_symlinks=True):
"""Copy mode bits from src to dst.
If follow_symlinks is not set, symlinks aren't followed if and only
if both `src` and `dst` are symlinks. If `lchmod` isn't available
(e.g. Linux) this method does nothing.
"""
if not follow_symlinks and os.path.islink(src) and os.path.islink(dst):
if hasattr(os, 'lchmod'):
stat_func, chmod_func = os.lstat, os.lchmod
else:
return
elif hasattr(os, 'chmod'):
stat_func, chmod_func = os.stat, os.chmod
else:
return
st = stat_func(src)
chmod_func(dst, stat.S_IMODE(st.st_mode))
if hasattr(os, 'listxattr'):
def _copyxattr(src, dst, *, follow_symlinks=True):
"""Copy extended filesystem attributes from `src` to `dst`.
Overwrite existing attributes.
If `follow_symlinks` is false, symlinks won't be followed.
"""
try:
names = os.listxattr(src, follow_symlinks=follow_symlinks)
except OSError as e:
if e.errno not in (errno.ENOTSUP, errno.ENODATA):
raise
return
for name in names:
try:
value = os.getxattr(src, name, follow_symlinks=follow_symlinks)
os.setxattr(dst, name, value, follow_symlinks=follow_symlinks)
except OSError as e:
if e.errno not in (errno.EPERM, errno.ENOTSUP, errno.ENODATA):
raise
else:
def _copyxattr(*args, **kwargs):
pass
def copystat(src, dst, *, follow_symlinks=True):
"""Copy all stat info (mode bits, atime, mtime, flags) from src to dst.
If the optional flag `follow_symlinks` is not set, symlinks aren't followed if and
only if both `src` and `dst` are symlinks.
"""
def _nop(*args, ns=None, follow_symlinks=None):
pass
# follow symlinks (aka don't not follow symlinks)
follow = follow_symlinks or not (os.path.islink(src) and os.path.islink(dst))
if follow:
# use the real function if it exists
def lookup(name):
return getattr(os, name, _nop)
else:
# use the real function only if it exists
# *and* it supports follow_symlinks
def lookup(name):
fn = getattr(os, name, _nop)
if fn in os.supports_follow_symlinks:
return fn
return _nop
st = lookup("stat")(src, follow_symlinks=follow)
mode = stat.S_IMODE(st.st_mode)
lookup("utime")(dst, ns=(st.st_atime_ns, st.st_mtime_ns),
follow_symlinks=follow)
try:
lookup("chmod")(dst, mode, follow_symlinks=follow)
except NotImplementedError:
# if we got a NotImplementedError, it's because
# * follow_symlinks=False,
# * lchown() is unavailable, and
# * either
# * fchownat() is unavailable or
# * fchownat() doesn't implement AT_SYMLINK_NOFOLLOW.
# (it returned ENOSUP.)
# therefore we're out of options--we simply cannot chown the
# symlink. give up, suppress the error.
# (which is what shutil always did in this circumstance.)
pass
if hasattr(st, 'st_flags'):
try:
lookup("chflags")(dst, st.st_flags, follow_symlinks=follow)
except OSError as why:
for err in 'EOPNOTSUPP', 'ENOTSUP':
if hasattr(errno, err) and why.errno == getattr(errno, err):
break
else:
raise
_copyxattr(src, dst, follow_symlinks=follow)
def copy(src, dst, *, follow_symlinks=True):
"""Copy data and mode bits ("cp src dst"). Return the file's destination.
The destination may be a directory.
If follow_symlinks is false, symlinks won't be followed. This
resembles GNU's "cp -P src dst".
If source and destination are the same file, a SameFileError will be
raised.
"""
if os.path.isdir(dst):
dst = os.path.join(dst, os.path.basename(src))
copyfile(src, dst, follow_symlinks=follow_symlinks)
copymode(src, dst, follow_symlinks=follow_symlinks)
return dst
def copy2(src, dst, *, follow_symlinks=True):
"""Copy data and all stat info ("cp -p src dst"). Return the file's
destination."
The destination may be a directory.
If follow_symlinks is false, symlinks won't be followed. This
resembles GNU's "cp -P src dst".
"""
if os.path.isdir(dst):
dst = os.path.join(dst, os.path.basename(src))
copyfile(src, dst, follow_symlinks=follow_symlinks)
copystat(src, dst, follow_symlinks=follow_symlinks)
return dst
def ignore_patterns(*patterns):
"""Function that can be used as copytree() ignore parameter.
Patterns is a sequence of glob-style patterns
that are used to exclude files"""
def _ignore_patterns(path, names):
ignored_names = []
for pattern in patterns:
ignored_names.extend(fnmatch.filter(names, pattern))
return set(ignored_names)
return _ignore_patterns
def copytree(src, dst, symlinks=False, ignore=None, copy_function=copy2,
ignore_dangling_symlinks=False):
"""Recursively copy a directory tree.
The destination directory must not already exist.
If exception(s) occur, an Error is raised with a list of reasons.
If the optional symlinks flag is true, symbolic links in the
source tree result in symbolic links in the destination tree; if
it is false, the contents of the files pointed to by symbolic
links are copied. If the file pointed by the symlink doesn't
exist, an exception will be added in the list of errors raised in
an Error exception at the end of the copy process.
You can set the optional ignore_dangling_symlinks flag to true if you
want to silence this exception. Notice that this has no effect on
platforms that don't support os.symlink.
The optional ignore argument is a callable. If given, it
is called with the `src` parameter, which is the directory
being visited by copytree(), and `names` which is the list of
`src` contents, as returned by os.listdir():
callable(src, names) -> ignored_names
Since copytree() is called recursively, the callable will be
called once for each directory that is copied. It returns a
list of names relative to the `src` directory that should
not be copied.
The optional copy_function argument is a callable that will be used
to copy each file. It will be called with the source path and the
destination path as arguments. By default, copy2() is used, but any
function that supports the same signature (like copy()) can be used.
"""
names = os.listdir(src)
if ignore is not None:
ignored_names = ignore(src, names)
else:
ignored_names = set()
os.makedirs(dst)
errors = []
for name in names:
if name in ignored_names:
continue
srcname = os.path.join(src, name)
dstname = os.path.join(dst, name)
try:
if os.path.islink(srcname):
linkto = os.readlink(srcname)
if symlinks:
# We can't just leave it to `copy_function` because legacy
# code with a custom `copy_function` may rely on copytree
# doing the right thing.
os.symlink(linkto, dstname)
copystat(srcname, dstname, follow_symlinks=not symlinks)
else:
# ignore dangling symlink if the flag is on
if not os.path.exists(linkto) and ignore_dangling_symlinks:
continue
# otherwise let the copy occurs. copy2 will raise an error
if os.path.isdir(srcname):
copytree(srcname, dstname, symlinks, ignore,
copy_function)
else:
copy_function(srcname, dstname)
elif os.path.isdir(srcname):
copytree(srcname, dstname, symlinks, ignore, copy_function)
else:
# Will raise a SpecialFileError for unsupported file types
copy_function(srcname, dstname)
# catch the Error from the recursive copytree so that we can
# continue with other files
except Error as err:
errors.extend(err.args[0])
except OSError as why:
errors.append((srcname, dstname, str(why)))
try:
copystat(src, dst)
except OSError as why:
# Copying file access times may fail on Windows
if getattr(why, 'winerror', None) is None:
errors.append((src, dst, str(why)))
if errors:
raise Error(errors)
return dst
# version vulnerable to race conditions
def _rmtree_unsafe(path, onerror):
try:
if os.path.islink(path):
# symlinks to directories are forbidden, see bug #1669
raise OSError("Cannot call rmtree on a symbolic link")
except OSError:
onerror(os.path.islink, path, sys.exc_info())
# can't continue even if onerror hook returns
return
names = []
try:
names = os.listdir(path)
except OSError:
onerror(os.listdir, path, sys.exc_info())
for name in names:
fullname = os.path.join(path, name)
try:
mode = os.lstat(fullname).st_mode
except OSError:
mode = 0
if stat.S_ISDIR(mode):
_rmtree_unsafe(fullname, onerror)
else:
try:
os.unlink(fullname)
except OSError:
onerror(os.unlink, fullname, sys.exc_info())
try:
os.rmdir(path)
except OSError:
onerror(os.rmdir, path, sys.exc_info())
# Version using fd-based APIs to protect against races
def _rmtree_safe_fd(topfd, path, onerror):
names = []
try:
names = os.listdir(topfd)
except OSError as err:
err.filename = path
onerror(os.listdir, path, sys.exc_info())
for name in names:
fullname = os.path.join(path, name)
try:
orig_st = os.stat(name, dir_fd=topfd, follow_symlinks=False)
mode = orig_st.st_mode
except OSError:
mode = 0
if stat.S_ISDIR(mode):
try:
dirfd = os.open(name, os.O_RDONLY, dir_fd=topfd)
except OSError:
onerror(os.open, fullname, sys.exc_info())
else:
try:
if os.path.samestat(orig_st, os.fstat(dirfd)):
_rmtree_safe_fd(dirfd, fullname, onerror)
try:
os.rmdir(name, dir_fd=topfd)
except OSError:
onerror(os.rmdir, fullname, sys.exc_info())
else:
try:
# This can only happen if someone replaces
# a directory with a symlink after the call to
# stat.S_ISDIR above.
raise OSError("Cannot call rmtree on a symbolic "
"link")
except OSError:
onerror(os.path.islink, fullname, sys.exc_info())
finally:
os.close(dirfd)
else:
try:
os.unlink(name, dir_fd=topfd)
except OSError:
onerror(os.unlink, fullname, sys.exc_info())
_use_fd_functions = ({os.open, os.stat, os.unlink, os.rmdir} <=
os.supports_dir_fd and
os.listdir in os.supports_fd and
os.stat in os.supports_follow_symlinks)
def rmtree(path, ignore_errors=False, onerror=None):
"""Recursively delete a directory tree.
If ignore_errors is set, errors are ignored; otherwise, if onerror
is set, it is called to handle the error with arguments (func,
path, exc_info) where func is platform and implementation dependent;
path is the argument to that function that caused it to fail; and
exc_info is a tuple returned by sys.exc_info(). If ignore_errors
is false and onerror is None, an exception is raised.
"""
if ignore_errors:
def onerror(*args):
pass
elif onerror is None:
def onerror(*args):
raise
if _use_fd_functions:
# While the unsafe rmtree works fine on bytes, the fd based does not.
if isinstance(path, bytes):
path = os.fsdecode(path)
# Note: To guard against symlink races, we use the standard
# lstat()/open()/fstat() trick.
try:
orig_st = os.lstat(path)
except Exception:
onerror(os.lstat, path, sys.exc_info())
return
try:
fd = os.open(path, os.O_RDONLY)
except Exception:
onerror(os.lstat, path, sys.exc_info())
return
try:
if os.path.samestat(orig_st, os.fstat(fd)):
_rmtree_safe_fd(fd, path, onerror)
try:
os.rmdir(path)
except OSError:
onerror(os.rmdir, path, sys.exc_info())
else:
try:
# symlinks to directories are forbidden, see bug #1669
raise OSError("Cannot call rmtree on a symbolic link")
except OSError:
onerror(os.path.islink, path, sys.exc_info())
finally:
os.close(fd)
else:
return _rmtree_unsafe(path, onerror)
# Allow introspection of whether or not the hardening against symlink
# attacks is supported on the current platform
rmtree.avoids_symlink_attacks = _use_fd_functions
def _basename(path):
# A basename() variant which first strips the trailing slash, if present.
# Thus we always get the last component of the path, even for directories.
sep = os.path.sep + (os.path.altsep or '')
return os.path.basename(path.rstrip(sep))
def move(src, dst, copy_function=copy2):
"""Recursively move a file or directory to another location. This is
similar to the Unix "mv" command. Return the file or directory's
destination.
If the destination is a directory or a symlink to a directory, the source
is moved inside the directory. The destination path must not already
exist.
If the destination already exists but is not a directory, it may be
overwritten depending on os.rename() semantics.
If the destination is on our current filesystem, then rename() is used.
Otherwise, src is copied to the destination and then removed. Symlinks are
recreated under the new name if os.rename() fails because of cross
filesystem renames.
The optional `copy_function` argument is a callable that will be used
to copy the source or it will be delegated to `copytree`.
By default, copy2() is used, but any function that supports the same
signature (like copy()) can be used.
A lot more could be done here... A look at a mv.c shows a lot of
the issues this implementation glosses over.
"""
real_dst = dst
if os.path.isdir(dst):
if _samefile(src, dst):
# We might be on a case insensitive filesystem,
# perform the rename anyway.
os.rename(src, dst)
return
real_dst = os.path.join(dst, _basename(src))
if os.path.exists(real_dst):
raise Error("Destination path '%s' already exists" % real_dst)
try:
os.rename(src, real_dst)
except OSError:
if os.path.islink(src):
linkto = os.readlink(src)
os.symlink(linkto, real_dst)
os.unlink(src)
elif os.path.isdir(src):
if _destinsrc(src, dst):
raise Error("Cannot move a directory '%s' into itself"
" '%s'." % (src, dst))
copytree(src, real_dst, copy_function=copy_function,
symlinks=True)
rmtree(src)
else:
copy_function(src, real_dst)
os.unlink(src)
return real_dst
def _destinsrc(src, dst):
src = os.path.abspath(src)
dst = os.path.abspath(dst)
if not src.endswith(os.path.sep):
src += os.path.sep
if not dst.endswith(os.path.sep):
dst += os.path.sep
return dst.startswith(src)
def _get_gid(name):
"""Returns a gid, given a group name."""
if getgrnam is None or name is None:
return None
try:
result = getgrnam(name)
except KeyError:
result = None
if result is not None:
return result[2]
return None
def _get_uid(name):
"""Returns an uid, given a user name."""
if getpwnam is None or name is None:
return None
try:
result = getpwnam(name)
except KeyError:
result = None
if result is not None:
return result[2]
return None
def _make_tarball(base_name, base_dir, compress="gzip", verbose=0, dry_run=0,
owner=None, group=None, logger=None):
"""Create a (possibly compressed) tar file from all the files under
'base_dir'.
'compress' must be "gzip" (the default), "bzip2", "xz", or None.
'owner' and 'group' can be used to define an owner and a group for the
archive that is being built. If not provided, the current owner and group
will be used.
The output tar file will be named 'base_name' + ".tar", possibly plus
the appropriate compression extension (".gz", ".bz2", or ".xz").
Returns the output filename.
"""
tar_compression = {'gzip': 'gz', None: ''}
compress_ext = {'gzip': '.gz'}
if _BZ2_SUPPORTED:
tar_compression['bzip2'] = 'bz2'
compress_ext['bzip2'] = '.bz2'
if _LZMA_SUPPORTED:
tar_compression['xz'] = 'xz'
compress_ext['xz'] = '.xz'
# flags for compression program, each element of list will be an argument
if compress is not None and compress not in compress_ext:
raise ValueError("bad value for 'compress', or compression format not "
"supported : {0}".format(compress))
archive_name = base_name + '.tar' + compress_ext.get(compress, '')
archive_dir = os.path.dirname(archive_name)
if archive_dir and not os.path.exists(archive_dir):
if logger is not None:
logger.info("creating %s", archive_dir)
if not dry_run:
os.makedirs(archive_dir)
# creating the tarball
if logger is not None:
logger.info('Creating tar archive')
uid = _get_uid(owner)
gid = _get_gid(group)
def _set_uid_gid(tarinfo):
if gid is not None:
tarinfo.gid = gid
tarinfo.gname = group
if uid is not None:
tarinfo.uid = uid
tarinfo.uname = owner
return tarinfo
if not dry_run:
tar = tarfile.open(archive_name, 'w|%s' % tar_compression[compress])
try:
tar.add(base_dir, filter=_set_uid_gid)
finally:
tar.close()
return archive_name
def _make_zipfile(base_name, base_dir, verbose=0, dry_run=0, logger=None):
"""Create a zip file from all the files under 'base_dir'.
The output zip file will be named 'base_name' + ".zip". Uses either the
"zipfile" Python module (if available) or the InfoZIP "zip" utility
(if installed and found on the default search path). If neither tool is
available, raises ExecError. Returns the name of the output zip
file.
"""
import zipfile
zip_filename = base_name + ".zip"
archive_dir = os.path.dirname(base_name)
if archive_dir and not os.path.exists(archive_dir):
if logger is not None:
logger.info("creating %s", archive_dir)
if not dry_run:
os.makedirs(archive_dir)
if logger is not None:
logger.info("creating '%s' and adding '%s' to it",
zip_filename, base_dir)
if not dry_run:
with zipfile.ZipFile(zip_filename, "w",
compression=zipfile.ZIP_DEFLATED) as zf:
path = os.path.normpath(base_dir)
zf.write(path, path)
if logger is not None:
logger.info("adding '%s'", path)
for dirpath, dirnames, filenames in os.walk(base_dir):
for name in sorted(dirnames):
path = os.path.normpath(os.path.join(dirpath, name))
zf.write(path, path)
if logger is not None:
logger.info("adding '%s'", path)
for name in filenames:
path = os.path.normpath(os.path.join(dirpath, name))
if os.path.isfile(path):
zf.write(path, path)
if logger is not None:
logger.info("adding '%s'", path)
return zip_filename
_ARCHIVE_FORMATS = {
'gztar': (_make_tarball, [('compress', 'gzip')], "gzip'ed tar-file"),
'tar': (_make_tarball, [('compress', None)], "uncompressed tar file"),
'zip': (_make_zipfile, [], "ZIP file")
}
if _BZ2_SUPPORTED:
_ARCHIVE_FORMATS['bztar'] = (_make_tarball, [('compress', 'bzip2')],
"bzip2'ed tar-file")
if _LZMA_SUPPORTED:
_ARCHIVE_FORMATS['xztar'] = (_make_tarball, [('compress', 'xz')],
"xz'ed tar-file")
def get_archive_formats():
"""Returns a list of supported formats for archiving and unarchiving.
Each element of the returned sequence is a tuple (name, description)
"""
formats = [(name, registry[2]) for name, registry in
_ARCHIVE_FORMATS.items()]
formats.sort()
return formats
def register_archive_format(name, function, extra_args=None, description=''):
"""Registers an archive format.
name is the name of the format. function is the callable that will be
used to create archives. If provided, extra_args is a sequence of
(name, value) tuples that will be passed as arguments to the callable.
description can be provided to describe the format, and will be returned
by the get_archive_formats() function.
"""
if extra_args is None:
extra_args = []
if not callable(function):
raise TypeError('The %s object is not callable' % function)
if not isinstance(extra_args, (tuple, list)):
raise TypeError('extra_args needs to be a sequence')
for element in extra_args:
if not isinstance(element, (tuple, list)) or len(element) !=2:
raise TypeError('extra_args elements are : (arg_name, value)')
_ARCHIVE_FORMATS[name] = (function, extra_args, description)
def unregister_archive_format(name):
del _ARCHIVE_FORMATS[name]
def make_archive(base_name, format, root_dir=None, base_dir=None, verbose=0,
dry_run=0, owner=None, group=None, logger=None):
"""Create an archive file (eg. zip or tar).
'base_name' is the name of the file to create, minus any format-specific
extension; 'format' is the archive format: one of "zip", "tar", "bztar"
or "gztar".
'root_dir' is a directory that will be the root directory of the
archive; ie. we typically chdir into 'root_dir' before creating the
archive. 'base_dir' is the directory where we start archiving from;
ie. 'base_dir' will be the common prefix of all files and
directories in the archive. 'root_dir' and 'base_dir' both default
to the current directory. Returns the name of the archive file.
'owner' and 'group' are used when creating a tar archive. By default,
uses the current owner and group.
"""
save_cwd = os.getcwd()
if root_dir is not None:
if logger is not None:
logger.debug("changing into '%s'", root_dir)
base_name = os.path.abspath(base_name)
if not dry_run:
os.chdir(root_dir)
if base_dir is None:
base_dir = os.curdir
kwargs = {'dry_run': dry_run, 'logger': logger}
try:
format_info = _ARCHIVE_FORMATS[format]
except KeyError:
raise ValueError("unknown archive format '%s'" % format)
func = format_info[0]
for arg, val in format_info[1]:
kwargs[arg] = val
if format != 'zip':
kwargs['owner'] = owner
kwargs['group'] = group
try:
filename = func(base_name, base_dir, **kwargs)
finally:
if root_dir is not None:
if logger is not None:
logger.debug("changing back to '%s'", save_cwd)
os.chdir(save_cwd)
return filename
def get_unpack_formats():
"""Returns a list of supported formats for unpacking.
Each element of the returned sequence is a tuple
(name, extensions, description)
"""
formats = [(name, info[0], info[3]) for name, info in
_UNPACK_FORMATS.items()]
formats.sort()
return formats
def _check_unpack_options(extensions, function, extra_args):
"""Checks what gets registered as an unpacker."""
# first make sure no other unpacker is registered for this extension
existing_extensions = {}
for name, info in _UNPACK_FORMATS.items():
for ext in info[0]:
existing_extensions[ext] = name
for extension in extensions:
if extension in existing_extensions:
msg = '%s is already registered for "%s"'
raise RegistryError(msg % (extension,
existing_extensions[extension]))
if not callable(function):
raise TypeError('The registered function must be a callable')
def register_unpack_format(name, extensions, function, extra_args=None,
description=''):
"""Registers an unpack format.
`name` is the name of the format. `extensions` is a list of extensions
corresponding to the format.
`function` is the callable that will be
used to unpack archives. The callable will receive archives to unpack.
If it's unable to handle an archive, it needs to raise a ReadError
exception.
If provided, `extra_args` is a sequence of
(name, value) tuples that will be passed as arguments to the callable.
description can be provided to describe the format, and will be returned
by the get_unpack_formats() function.
"""
if extra_args is None:
extra_args = []
_check_unpack_options(extensions, function, extra_args)
_UNPACK_FORMATS[name] = extensions, function, extra_args, description
def unregister_unpack_format(name):
"""Removes the pack format from the registery."""
del _UNPACK_FORMATS[name]
def _ensure_directory(path):
"""Ensure that the parent directory of `path` exists"""
dirname = os.path.dirname(path)
if not os.path.isdir(dirname):
os.makedirs(dirname)
def _unpack_zipfile(filename, extract_dir):
"""Unpack zip `filename` to `extract_dir`
"""
try:
import zipfile
except ImportError:
raise ReadError('zlib not supported, cannot unpack this archive.')
if not zipfile.is_zipfile(filename):
raise ReadError("%s is not a zip file" % filename)
zip = zipfile.ZipFile(filename)
try:
for info in zip.infolist():
name = info.filename
# don't extract absolute paths or ones with .. in them
if name.startswith('/') or '..' in name:
continue
target = os.path.join(extract_dir, *name.split('/'))
if not target:
continue
_ensure_directory(target)
if not name.endswith('/'):
# file
data = zip.read(info.filename)
f = open(target, 'wb')
try:
f.write(data)
finally:
f.close()
del data
finally:
zip.close()
def _unpack_tarfile(filename, extract_dir):
"""Unpack tar/tar.gz/tar.bz2/tar.xz `filename` to `extract_dir`
"""
try:
tarobj = tarfile.open(filename)
except tarfile.TarError:
raise ReadError(
"%s is not a compressed or uncompressed tar file" % filename)
try:
tarobj.extractall(extract_dir)
finally:
tarobj.close()
_UNPACK_FORMATS = {
'gztar': (['.tar.gz', '.tgz'], _unpack_tarfile, [], "gzip'ed tar-file"),
'tar': (['.tar'], _unpack_tarfile, [], "uncompressed tar file"),
'zip': (['.zip'], _unpack_zipfile, [], "ZIP file")
}
if _BZ2_SUPPORTED:
_UNPACK_FORMATS['bztar'] = (['.tar.bz2', '.tbz2'], _unpack_tarfile, [],
"bzip2'ed tar-file")
if _LZMA_SUPPORTED:
_UNPACK_FORMATS['xztar'] = (['.tar.xz', '.txz'], _unpack_tarfile, [],
"xz'ed tar-file")
def _find_unpack_format(filename):
for name, info in _UNPACK_FORMATS.items():
for extension in info[0]:
if filename.endswith(extension):
return name
return None
def unpack_archive(filename, extract_dir=None, format=None):
"""Unpack an archive.
`filename` is the name of the archive.
`extract_dir` is the name of the target directory, where the archive
is unpacked. If not provided, the current working directory is used.
`format` is the archive format: one of "zip", "tar", or "gztar". Or any
other registered format. If not provided, unpack_archive will use the
filename extension and see if an unpacker was registered for that
extension.
In case none is found, a ValueError is raised.
"""
if extract_dir is None:
extract_dir = os.getcwd()
if format is not None:
try:
format_info = _UNPACK_FORMATS[format]
except KeyError:
raise ValueError("Unknown unpack format '{0}'".format(format))
func = format_info[1]
func(filename, extract_dir, **dict(format_info[2]))
else:
# we need to look at the registered unpackers supported extensions
format = _find_unpack_format(filename)
if format is None:
raise ReadError("Unknown archive format '{0}'".format(filename))
func = _UNPACK_FORMATS[format][1]
kwargs = dict(_UNPACK_FORMATS[format][2])
func(filename, extract_dir, **kwargs)
if hasattr(os, 'statvfs'):
__all__.append('disk_usage')
_ntuple_diskusage = collections.namedtuple('usage', 'total used free')
def disk_usage(path):
"""Return disk usage statistics about the given path.
Returned value is a named tuple with attributes 'total', 'used' and
'free', which are the amount of total, used and free space, in bytes.
"""
st = os.statvfs(path)
free = st.f_bavail * st.f_frsize
total = st.f_blocks * st.f_frsize
used = (st.f_blocks - st.f_bfree) * st.f_frsize
return _ntuple_diskusage(total, used, free)
elif os.name == 'nt':
import nt
__all__.append('disk_usage')
_ntuple_diskusage = collections.namedtuple('usage', 'total used free')
def disk_usage(path):
"""Return disk usage statistics about the given path.
Returned values is a named tuple with attributes 'total', 'used' and
'free', which are the amount of total, used and free space, in bytes.
"""
total, free = nt._getdiskusage(path)
used = total - free
return _ntuple_diskusage(total, used, free)
def chown(path, user=None, group=None):
"""Change owner user and group of the given path.
user and group can be the uid/gid or the user/group names, and in that case,
they are converted to their respective uid/gid.
"""
if user is None and group is None:
raise ValueError("user and/or group must be set")
_user = user
_group = group
# -1 means don't change it
if user is None:
_user = -1
# user can either be an int (the uid) or a string (the system username)
elif isinstance(user, str):
_user = _get_uid(user)
if _user is None:
raise LookupError("no such user: {!r}".format(user))
if group is None:
_group = -1
elif not isinstance(group, int):
_group = _get_gid(group)
if _group is None:
raise LookupError("no such group: {!r}".format(group))
os.chown(path, _user, _group)
def get_terminal_size(fallback=(80, 24)):
"""Get the size of the terminal window.
For each of the two dimensions, the environment variable, COLUMNS
and LINES respectively, is checked. If the variable is defined and
the value is a positive integer, it is used.
When COLUMNS or LINES is not defined, which is the common case,
the terminal connected to sys.__stdout__ is queried
by invoking os.get_terminal_size.
If the terminal size cannot be successfully queried, either because
the system doesn't support querying, or because we are not
connected to a terminal, the value given in fallback parameter
is used. Fallback defaults to (80, 24) which is the default
size used by many terminal emulators.
The value returned is a named tuple of type os.terminal_size.
"""
# columns, lines are the working values
try:
columns = int(os.environ['COLUMNS'])
except (KeyError, ValueError):
columns = 0
try:
lines = int(os.environ['LINES'])
except (KeyError, ValueError):
lines = 0
# only query if necessary
if columns <= 0 or lines <= 0:
try:
size = os.get_terminal_size(sys.__stdout__.fileno())
except (AttributeError, ValueError, OSError):
# stdout is None, closed, detached, or not a terminal, or
# os.get_terminal_size() is unsupported
size = os.terminal_size(fallback)
if columns <= 0:
columns = size.columns
if lines <= 0:
lines = size.lines
return os.terminal_size((columns, lines))
def which(cmd, mode=os.F_OK | os.X_OK, path=None):
"""Given a command, mode, and a PATH string, return the path which
conforms to the given mode on the PATH, or None if there is no such
file.
`mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result
of os.environ.get("PATH"), or can be overridden with a custom search
path.
"""
# Check that a given file can be accessed with the correct mode.
# Additionally check that `file` is not a directory, as on Windows
# directories pass the os.access check.
def _access_check(fn, mode):
return (os.path.exists(fn) and os.access(fn, mode)
and not os.path.isdir(fn))
# If we're given a path with a directory part, look it up directly rather
# than referring to PATH directories. This includes checking relative to the
# current directory, e.g. ./script
if os.path.dirname(cmd):
if _access_check(cmd, mode):
return cmd
return None
if path is None:
path = os.environ.get("PATH", os.defpath)
if not path:
return None
path = path.split(os.pathsep)
if sys.platform == "win32":
# The current directory takes precedence on Windows.
if not os.curdir in path:
path.insert(0, os.curdir)
# PATHEXT is necessary to check on Windows.
pathext = os.environ.get("PATHEXT", "").split(os.pathsep)
# See if the given file matches any of the expected path extensions.
# This will allow us to short circuit when given "python.exe".
# If it does match, only test that one, otherwise we have to try
# others.
if any(cmd.lower().endswith(ext.lower()) for ext in pathext):
files = [cmd]
else:
files = [cmd + ext for ext in pathext]
else:
# On other platforms you don't have things like PATHEXT to tell you
# what file suffixes are executable, so just pass on cmd as-is.
files = [cmd]
seen = set()
for dir in path:
normdir = os.path.normcase(dir)
if not normdir in seen:
seen.add(normdir)
for thefile in files:
name = os.path.join(dir, thefile)
if _access_check(name, mode):
return name
return None
| apache-2.0 |
ChrisBeaumont/brut | bubbly/hyperopt.py | 2 | 2563 | """
A simple interface for random exploration of hyperparameter space
"""
import random
import numpy as np
from scipy import stats
from sklearn.metrics import auc
from sklearn import metrics as met
class Choice(object):
"""Randomly select from a list"""
def __init__(self, *choices):
self._choices = choices
def rvs(self):
return random.choice(self._choices)
class Space(object):
"""
Spaces gather and randomly sample
collections of hyperparameters
Any class with an rvs method is a valid hyperparameter
(e.g., anything in scipy.stats is a hyperparameter)
"""
def __init__(self, **hyperparams):
self._hyperparams = hyperparams
def __iter__(self):
while True:
yield {k: v.rvs() for k, v in self._hyperparams.items()}
def auc_below_fpos(y, yp, fpos):
"""
Variant on the area under the ROC curve score
Only integrate the portion of the curve
to the left of a threshold in fpos
"""
fp, tp, th = met.roc_curve(y, yp)
good = (fp <= fpos)
return auc(fp[good], tp[good])
def fmin(objective, space, threshold=np.inf):
"""
Generator that randomly samples a space,
and yields whenever a new minimum is encountered
Parameters
----------
objective : A function which takes hyperparameters
as input, and computes an objective function and classifier
out output
space : the Space to sample
threshold : A threshold in the objective function values.
If provided, will not yield anything until
the objective function falls below threshold
Yields
------
Tuples of (objective function, parameter dict, classifier)
"""
best = threshold
try:
for p in space:
f, clf = objective(**p)
if f < best:
best = f
yield best, p, clf
except KeyboardInterrupt:
pass
#default space for Gradient Boosted Decision trees
gb_space = Space(learning_rate = stats.uniform(1e-3, 1 - 1.01e-3),
n_estimators = Choice(50, 100, 200),
max_depth = Choice(1, 2, 3),
subsample = stats.uniform(1e-3, 1 - 1.01e-3))
#default space for WiseRF random forests
rf_space = Space(n_estimators = Choice(200, 400, 800, 1600),
min_samples_split = Choice(1, 2, 4),
criterion = Choice('gini', 'gainratio', 'infogain'),
max_features = Choice('auto'),
n_jobs = Choice(2))
| mit |
Zhongqilong/mykbengineer | kbe/src/lib/python/Tools/scripts/fixdiv.py | 94 | 13938 | #! /usr/bin/env python3
"""fixdiv - tool to fix division operators.
To use this tool, first run `python -Qwarnall yourscript.py 2>warnings'.
This runs the script `yourscript.py' while writing warning messages
about all uses of the classic division operator to the file
`warnings'. The warnings look like this:
<file>:<line>: DeprecationWarning: classic <type> division
The warnings are written to stderr, so you must use `2>' for the I/O
redirect. I know of no way to redirect stderr on Windows in a DOS
box, so you will have to modify the script to set sys.stderr to some
kind of log file if you want to do this on Windows.
The warnings are not limited to the script; modules imported by the
script may also trigger warnings. In fact a useful technique is to
write a test script specifically intended to exercise all code in a
particular module or set of modules.
Then run `python fixdiv.py warnings'. This first reads the warnings,
looking for classic division warnings, and sorts them by file name and
line number. Then, for each file that received at least one warning,
it parses the file and tries to match the warnings up to the division
operators found in the source code. If it is successful, it writes
its findings to stdout, preceded by a line of dashes and a line of the
form:
Index: <file>
If the only findings found are suggestions to change a / operator into
a // operator, the output is acceptable input for the Unix 'patch'
program.
Here are the possible messages on stdout (N stands for a line number):
- A plain-diff-style change ('NcN', a line marked by '<', a line
containing '---', and a line marked by '>'):
A / operator was found that should be changed to //. This is the
recommendation when only int and/or long arguments were seen.
- 'True division / operator at line N' and a line marked by '=':
A / operator was found that can remain unchanged. This is the
recommendation when only float and/or complex arguments were seen.
- 'Ambiguous / operator (..., ...) at line N', line marked by '?':
A / operator was found for which int or long as well as float or
complex arguments were seen. This is highly unlikely; if it occurs,
you may have to restructure the code to keep the classic semantics,
or maybe you don't care about the classic semantics.
- 'No conclusive evidence on line N', line marked by '*':
A / operator was found for which no warnings were seen. This could
be code that was never executed, or code that was only executed
with user-defined objects as arguments. You will have to
investigate further. Note that // can be overloaded separately from
/, using __floordiv__. True division can also be separately
overloaded, using __truediv__. Classic division should be the same
as either of those. (XXX should I add a warning for division on
user-defined objects, to disambiguate this case from code that was
never executed?)
- 'Phantom ... warnings for line N', line marked by '*':
A warning was seen for a line not containing a / operator. The most
likely cause is a warning about code executed by 'exec' or eval()
(see note below), or an indirect invocation of the / operator, for
example via the div() function in the operator module. It could
also be caused by a change to the file between the time the test
script was run to collect warnings and the time fixdiv was run.
- 'More than one / operator in line N'; or
'More than one / operator per statement in lines N-N':
The scanner found more than one / operator on a single line, or in a
statement split across multiple lines. Because the warnings
framework doesn't (and can't) show the offset within the line, and
the code generator doesn't always give the correct line number for
operations in a multi-line statement, we can't be sure whether all
operators in the statement were executed. To be on the safe side,
by default a warning is issued about this case. In practice, these
cases are usually safe, and the -m option suppresses these warning.
- 'Can't find the / operator in line N', line marked by '*':
This really shouldn't happen. It means that the tokenize module
reported a '/' operator but the line it returns didn't contain a '/'
character at the indicated position.
- 'Bad warning for line N: XYZ', line marked by '*':
This really shouldn't happen. It means that a 'classic XYZ
division' warning was read with XYZ being something other than
'int', 'long', 'float', or 'complex'.
Notes:
- The augmented assignment operator /= is handled the same way as the
/ operator.
- This tool never looks at the // operator; no warnings are ever
generated for use of this operator.
- This tool never looks at the / operator when a future division
statement is in effect; no warnings are generated in this case, and
because the tool only looks at files for which at least one classic
division warning was seen, it will never look at files containing a
future division statement.
- Warnings may be issued for code not read from a file, but executed
using the exec() or eval() functions. These may have
<string> in the filename position, in which case the fixdiv script
will attempt and fail to open a file named '<string>' and issue a
warning about this failure; or these may be reported as 'Phantom'
warnings (see above). You're on your own to deal with these. You
could make all recommended changes and add a future division
statement to all affected files, and then re-run the test script; it
should not issue any warnings. If there are any, and you have a
hard time tracking down where they are generated, you can use the
-Werror option to force an error instead of a first warning,
generating a traceback.
- The tool should be run from the same directory as that from which
the original script was run, otherwise it won't be able to open
files given by relative pathnames.
"""
import sys
import getopt
import re
import tokenize
multi_ok = 0
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], "hm")
except getopt.error as msg:
usage(msg)
return 2
for o, a in opts:
if o == "-h":
print(__doc__)
return
if o == "-m":
global multi_ok
multi_ok = 1
if not args:
usage("at least one file argument is required")
return 2
if args[1:]:
sys.stderr.write("%s: extra file arguments ignored\n", sys.argv[0])
warnings = readwarnings(args[0])
if warnings is None:
return 1
files = list(warnings.keys())
if not files:
print("No classic division warnings read from", args[0])
return
files.sort()
exit = None
for filename in files:
x = process(filename, warnings[filename])
exit = exit or x
return exit
def usage(msg):
sys.stderr.write("%s: %s\n" % (sys.argv[0], msg))
sys.stderr.write("Usage: %s [-m] warnings\n" % sys.argv[0])
sys.stderr.write("Try `%s -h' for more information.\n" % sys.argv[0])
PATTERN = ("^(.+?):(\d+): DeprecationWarning: "
"classic (int|long|float|complex) division$")
def readwarnings(warningsfile):
prog = re.compile(PATTERN)
try:
f = open(warningsfile)
except IOError as msg:
sys.stderr.write("can't open: %s\n" % msg)
return
warnings = {}
while 1:
line = f.readline()
if not line:
break
m = prog.match(line)
if not m:
if line.find("division") >= 0:
sys.stderr.write("Warning: ignored input " + line)
continue
filename, lineno, what = m.groups()
list = warnings.get(filename)
if list is None:
warnings[filename] = list = []
list.append((int(lineno), sys.intern(what)))
f.close()
return warnings
def process(filename, list):
print("-"*70)
assert list # if this fails, readwarnings() is broken
try:
fp = open(filename)
except IOError as msg:
sys.stderr.write("can't open: %s\n" % msg)
return 1
print("Index:", filename)
f = FileContext(fp)
list.sort()
index = 0 # list[:index] has been processed, list[index:] is still to do
g = tokenize.generate_tokens(f.readline)
while 1:
startlineno, endlineno, slashes = lineinfo = scanline(g)
if startlineno is None:
break
assert startlineno <= endlineno is not None
orphans = []
while index < len(list) and list[index][0] < startlineno:
orphans.append(list[index])
index += 1
if orphans:
reportphantomwarnings(orphans, f)
warnings = []
while index < len(list) and list[index][0] <= endlineno:
warnings.append(list[index])
index += 1
if not slashes and not warnings:
pass
elif slashes and not warnings:
report(slashes, "No conclusive evidence")
elif warnings and not slashes:
reportphantomwarnings(warnings, f)
else:
if len(slashes) > 1:
if not multi_ok:
rows = []
lastrow = None
for (row, col), line in slashes:
if row == lastrow:
continue
rows.append(row)
lastrow = row
assert rows
if len(rows) == 1:
print("*** More than one / operator in line", rows[0])
else:
print("*** More than one / operator per statement", end=' ')
print("in lines %d-%d" % (rows[0], rows[-1]))
intlong = []
floatcomplex = []
bad = []
for lineno, what in warnings:
if what in ("int", "long"):
intlong.append(what)
elif what in ("float", "complex"):
floatcomplex.append(what)
else:
bad.append(what)
lastrow = None
for (row, col), line in slashes:
if row == lastrow:
continue
lastrow = row
line = chop(line)
if line[col:col+1] != "/":
print("*** Can't find the / operator in line %d:" % row)
print("*", line)
continue
if bad:
print("*** Bad warning for line %d:" % row, bad)
print("*", line)
elif intlong and not floatcomplex:
print("%dc%d" % (row, row))
print("<", line)
print("---")
print(">", line[:col] + "/" + line[col:])
elif floatcomplex and not intlong:
print("True division / operator at line %d:" % row)
print("=", line)
elif intlong and floatcomplex:
print("*** Ambiguous / operator (%s, %s) at line %d:" % (
"|".join(intlong), "|".join(floatcomplex), row))
print("?", line)
fp.close()
def reportphantomwarnings(warnings, f):
blocks = []
lastrow = None
lastblock = None
for row, what in warnings:
if row != lastrow:
lastblock = [row]
blocks.append(lastblock)
lastblock.append(what)
for block in blocks:
row = block[0]
whats = "/".join(block[1:])
print("*** Phantom %s warnings for line %d:" % (whats, row))
f.report(row, mark="*")
def report(slashes, message):
lastrow = None
for (row, col), line in slashes:
if row != lastrow:
print("*** %s on line %d:" % (message, row))
print("*", chop(line))
lastrow = row
class FileContext:
def __init__(self, fp, window=5, lineno=1):
self.fp = fp
self.window = 5
self.lineno = 1
self.eoflookahead = 0
self.lookahead = []
self.buffer = []
def fill(self):
while len(self.lookahead) < self.window and not self.eoflookahead:
line = self.fp.readline()
if not line:
self.eoflookahead = 1
break
self.lookahead.append(line)
def readline(self):
self.fill()
if not self.lookahead:
return ""
line = self.lookahead.pop(0)
self.buffer.append(line)
self.lineno += 1
return line
def truncate(self):
del self.buffer[-window:]
def __getitem__(self, index):
self.fill()
bufstart = self.lineno - len(self.buffer)
lookend = self.lineno + len(self.lookahead)
if bufstart <= index < self.lineno:
return self.buffer[index - bufstart]
if self.lineno <= index < lookend:
return self.lookahead[index - self.lineno]
raise KeyError
def report(self, first, last=None, mark="*"):
if last is None:
last = first
for i in range(first, last+1):
try:
line = self[first]
except KeyError:
line = "<missing line>"
print(mark, chop(line))
def scanline(g):
slashes = []
startlineno = None
endlineno = None
for type, token, start, end, line in g:
endlineno = end[0]
if startlineno is None:
startlineno = endlineno
if token in ("/", "/="):
slashes.append((start, line))
if type == tokenize.NEWLINE:
break
return startlineno, endlineno, slashes
def chop(line):
if line.endswith("\n"):
return line[:-1]
else:
return line
if __name__ == "__main__":
sys.exit(main())
| lgpl-3.0 |
wbbeyourself/cn-deep-learning | ipnd-neural-network/NN.py | 6 | 2597 | import numpy as np
class NeuralNetwork(object):
def sigmoid(self, x):
return 1/(1 + np.exp(-x))
def __init__(self, input_nodes, hidden_nodes, output_nodes, learning_rate):
# Set number of nodes in input, hidden and output layers.
self.input_nodes = input_nodes
self.hidden_nodes = hidden_nodes
self.output_nodes = output_nodes
# Initialize weights
self.weights_input_to_hidden = np.random.normal(0.0, self.hidden_nodes**-0.5,
(self.hidden_nodes, self.input_nodes))
self.weights_hidden_to_output = np.random.normal(0.0, self.output_nodes**-0.5,
(self.output_nodes, self.hidden_nodes))
self.lr = learning_rate
# Activation function is the sigmoid function
self.activation_function = self.sigmoid
def train(self, inputs_list, targets_list):
# Convert inputs list to 2d array, column vector
inputs = np.array(inputs_list, ndmin=2).T
targets = np.array(targets_list, ndmin=2).T
#### Implement the forward pass here ####
### Forward pass ###
#Hidden layer
hidden_inputs = np.dot(self.weights_input_to_hidden, inputs)
hidden_outputs = self.activation_function(hidden_inputs)
#Output layer
final_inputs = np.dot(self.weights_hidden_to_output, hidden_outputs)
final_outputs = final_inputs
#### Implement the backward pass here ####
### Backward pass ###
# 1 is the gradient of f'(x) where f(x) = x
output_delta = (targets - final_outputs) * 1
hidden_delta = np.dot(self.weights_hidden_to_output.T, output_delta) * hidden_outputs * (1-hidden_outputs)
# TODO: Update the weights
self.weights_hidden_to_output += self.lr * np.dot(output_delta, hidden_outputs.T)
self.weights_input_to_hidden += self.lr * np.dot(hidden_delta, inputs.T)
#predict with a inputs_list
def run(self, inputs_list):
# Run a forward pass through the network
inputs = np.array(inputs_list, ndmin=2).T
#### Implement the forward pass here ####
#Hidden layer
hidden_inputs = np.dot(self.weights_input_to_hidden, inputs)
hidden_outputs = self.activation_function(hidden_inputs)
#Output layer
final_inputs = np.dot(self.weights_hidden_to_output, hidden_outputs)
final_outputs = final_inputs
return final_outputs
| mit |
EvilKanoa/ardupilot | libraries/AP_OpticalFlow/examples/ADNS3080ImageGrabber/ADNS3080ImageGrabber.py | 53 | 6246 | # File: ADNS3080ImageGrabber.py
import serial
import string
import math
import time
from Tkinter import *
from threading import Timer
comPort = 'COM8' #default com port
comPortBaud = 115200
class App:
grid_size = 15
num_pixels = 30
image_started = FALSE
image_current_row = 0;
ser = serial.Serial()
pixel_dictionary = {}
def __init__(self, master):
# set main window's title
master.title("ADNS3080ImageGrabber")
frame = Frame(master)
frame.grid(row=0,column=0)
self.comPortStr = StringVar()
self.comPort = Entry(frame,textvariable=self.comPortStr)
self.comPort.grid(row=0,column=0)
self.comPort.delete(0, END)
self.comPort.insert(0,comPort)
self.button = Button(frame, text="Open", fg="red", command=self.open_serial)
self.button.grid(row=0,column=1)
self.entryStr = StringVar()
self.entry = Entry(frame,textvariable=self.entryStr)
self.entry.grid(row=0,column=2)
self.entry.delete(0, END)
self.entry.insert(0,"I")
self.send_button = Button(frame, text="Send", command=self.send_to_serial)
self.send_button.grid(row=0,column=3)
self.canvas = Canvas(master, width=self.grid_size*self.num_pixels, height=self.grid_size*self.num_pixels)
self.canvas.grid(row=1)
## start attempts to read from serial port
self.read_loop()
def __del__(self):
self.stop_read_loop()
def open_serial(self):
# close the serial port
if( self.ser.isOpen() ):
try:
self.ser.close()
except:
i=i # do nothing
# open the serial port
try:
self.ser = serial.Serial(port=self.comPortStr.get(),baudrate=comPortBaud, timeout=1)
print("serial port '" + self.comPortStr.get() + "' opened!")
except:
print("failed to open serial port '" + self.comPortStr.get() + "'")
def send_to_serial(self):
if self.ser.isOpen():
self.ser.write(self.entryStr.get())
print "sent '" + self.entryStr.get() + "' to " + self.ser.portstr
else:
print "Serial port not open!"
def read_loop(self):
try:
self.t.cancel()
except:
aVar = 1 # do nothing
#print("reading")
if( self.ser.isOpen() ) :
self.read_from_serial();
self.t = Timer(0.0,self.read_loop)
self.t.start()
def stop_read_loop(self):
try:
self.t.cancel()
except:
print("failed to cancel timer")
# do nothing
def read_from_serial(self):
if( self.ser.isOpen() ):
while( self.ser.inWaiting() > 0 ):
self.line_processed = FALSE
line = self.ser.readline()
# process the line read
if( line.find("-------------------------") == 0 ):
self.line_processed = TRUE
self.image_started = FALSE
self.image_current_row = 0
if( self.image_started == TRUE ):
if( self.image_current_row >= self.num_pixels ):
self.image_started == FALSE
else:
words = string.split(line,",")
if len(words) >= 30:
self.line_processed = TRUE
x = 0
for v in words:
try:
colour = int(v)
except:
colour = 0;
#self.display_pixel(x,self.image_current_row,colour)
self.display_pixel(self.num_pixels-1-self.image_current_row,self.num_pixels-1-x,colour)
x += 1
self.image_current_row += 1
else:
print("line " + str(self.image_current_row) + "incomplete (" + str(len(words)) + " of " + str(self.num_pixels) + "), ignoring")
#print("bad line: " + line);
if( line.find("image data") >= 0 ):
self.line_processed = TRUE
self.image_started = TRUE
self.image_current_row = 0
# clear canvas
#self.canvas.delete(ALL) # remove all items
#display the line if we couldn't understand it
if( self.line_processed == FALSE ):
print( line )
def display_default_image(self):
# display the grid
for x in range(0, self.num_pixels-1):
for y in range(0, self.num_pixels-1):
colour = x * y / 3.53
self.display_pixel(x,y,colour)
def display_pixel(self, x, y, colour):
if( x >= 0 and x < self.num_pixels and y >= 0 and y < self.num_pixels ) :
#find the old pixel if it exists and delete it
if self.pixel_dictionary.has_key(x+y*self.num_pixels) :
self.old_pixel = self.pixel_dictionary[x+y*self.num_pixels]
self.canvas.delete(self.old_pixel)
del(self.old_pixel)
fillColour = "#%02x%02x%02x" % (colour, colour, colour)
#draw a new pixel and add to pixel_array
self.new_pixel = self.canvas.create_rectangle(x*self.grid_size, y*self.grid_size, (x+1)*self.grid_size, (y+1)*self.grid_size, fill=fillColour)
self.pixel_dictionary[x+y*self.num_pixels] = self.new_pixel
## main loop ##
root = Tk()
#root.withdraw()
#serPort = SerialHandler(comPort,comPortBaud)
# create main display
app = App(root)
app.display_default_image()
print("entering main loop!")
root.mainloop()
app.stop_read_loop()
print("exiting")
| gpl-3.0 |
CentOS-PaaS-SIG/linchpin | linchpin/provision/action_plugins/gcp_compute_network.py | 3 | 1255 | from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.plugins.action import ActionBase
import linchpin.MockUtils.MockUtils as mock_utils
class ActionModule(ActionBase):
def run(self, tmp=None, task_vars=None):
"""
Simple action plugin that returns the mocked output
when linchpin_mock is True
"""
super(ActionModule, self).run(tmp, task_vars)
# contains all the module arguments
module_args = self._task.args.copy()
# task vars.keys() contains all the variable required
# when passed a extra_var as key value pair task_vars
# would return mocked output of the named module.
# print(task_vars['vars'].keys())
# print(task_vars['vars'].get('linchpin_mock', False))
linchpin_mock = task_vars['vars'].get('linchpin_mock',
False)
if linchpin_mock:
return mock_utils.get_mock_data(module_args,
"gcp_compute_network")
module_return = self._execute_module(module_args=module_args,
task_vars=task_vars, tmp=tmp)
return module_return
| gpl-3.0 |
ubc/edx-ora2 | openassessment/assessment/worker/training.py | 10 | 12547 | """
Asynchronous tasks for training classifiers from examples.
"""
import datetime
from collections import defaultdict
from celery import task
from celery.utils.log import get_task_logger
from dogapi import dog_stats_api
from django.conf import settings
from django.db import DatabaseError
from openassessment.assessment.api import ai_worker as ai_worker_api
from openassessment.assessment.errors import AIError, ANTICIPATED_CELERY_ERRORS
from .algorithm import AIAlgorithm, AIAlgorithmError
from .grading import reschedule_grading_tasks
from openassessment.assessment.errors.ai import AIGradingInternalError
from openassessment.assessment.models.ai import AITrainingWorkflow
MAX_RETRIES = 2
logger = get_task_logger(__name__)
# If the Django settings define a low-priority queue, use that.
# Otherwise, use the default queue.
TRAINING_TASK_QUEUE = getattr(settings, 'LOW_PRIORITY_QUEUE', None)
RESCHEDULE_TASK_QUEUE = getattr(settings, 'LOW_PRIORITY_QUEUE', None)
class InvalidExample(Exception):
"""
The example retrieved from the AI API had an invalid format.
"""
def __init__(self, example_dict, msg):
err_msg = u"Training example \"{example}\" is not valid: {msg}".format(
example=example_dict,
msg=msg
)
super(InvalidExample, self).__init__(err_msg)
@task(queue=TRAINING_TASK_QUEUE, max_retries=MAX_RETRIES) # pylint: disable=E1102
@dog_stats_api.timed('openassessment.assessment.ai.train_classifiers.time')
def train_classifiers(workflow_uuid):
"""
Asynchronous task to train classifiers for AI grading.
This task uses the AI API to retrieve task parameters
(algorithm ID and training examples) and upload
the trained classifiers.
If the task could not be completed successfully,
it is retried a few times. If it continues to fail,
it is left incomplete. Since the AI API tracks all
training tasks in the database, incomplete tasks
can always be rescheduled manually later.
Args:
workflow_uuid (str): The UUID of the workflow associated
with this training task.
Returns:
None
Raises:
AIError: An error occurred during a request to the AI API.
AIAlgorithmError: An error occurred while training the AI classifiers.
InvalidExample: The training examples provided by the AI API were not valid.
"""
# Short-circuit if the workflow is already marked complete
# This is an optimization, but training tasks could still
# execute multiple times depending on when they get picked
# up by workers and marked complete.
try:
if ai_worker_api.is_training_workflow_complete(workflow_uuid):
return
except AIError:
msg = (
u"An unexpected error occurred while checking the "
u"completion of training workflow with UUID {uuid}"
).format(uuid=workflow_uuid)
logger.exception(msg)
raise train_classifiers.retry()
# Retrieve task parameters
try:
params = ai_worker_api.get_training_task_params(workflow_uuid)
examples = params['training_examples']
algorithm_id = params['algorithm_id']
course_id = params['course_id']
item_id = params['item_id']
except (AIError, KeyError):
msg = (
u"An error occurred while retrieving AI training "
u"task parameters for the workflow with UUID {}"
).format(workflow_uuid)
logger.exception(msg)
raise train_classifiers.retry()
# Retrieve the ML algorithm to use for training
# (based on task params and worker configuration)
try:
algorithm = AIAlgorithm.algorithm_for_id(algorithm_id)
except AIAlgorithmError:
msg = (
u"An error occurred while loading the "
u"AI algorithm (training workflow UUID {})"
).format(workflow_uuid)
logger.exception(msg)
raise train_classifiers.retry()
except AIError:
msg = (
u"An error occurred while retrieving "
u"the algorithm ID (training workflow UUID {})"
).format(workflow_uuid)
logger.exception(msg)
raise train_classifiers.retry()
# Train a classifier for each criterion
# The AIAlgorithm subclass is responsible for ensuring that
# the trained classifiers are JSON-serializable.
try:
classifier_set = {
criterion_name: algorithm.train_classifier(examples_dict)
for criterion_name, examples_dict
in _examples_by_criterion(examples).iteritems()
}
except InvalidExample:
msg = (
u"Training example format was not valid "
u"(training workflow UUID {})"
).format(workflow_uuid)
logger.exception(msg)
raise train_classifiers.retry()
except AIAlgorithmError:
msg = (
u"An error occurred while training AI classifiers "
u"(training workflow UUID {})"
).format(workflow_uuid)
logger.exception(msg)
raise train_classifiers.retry()
# Upload the classifiers
# (implicitly marks the workflow complete)
try:
ai_worker_api.create_classifiers(workflow_uuid, classifier_set)
except AIError:
msg = (
u"An error occurred while uploading trained classifiers "
u"(training workflow UUID {})"
).format(workflow_uuid)
logger.exception(msg)
raise train_classifiers.retry()
# Upon successful completion of the creation of classifiers, we will try to automatically schedule any
# grading tasks for the same item.
try:
reschedule_grading_tasks.apply_async(args=[course_id, item_id])
except AIGradingInternalError as ex:
msg = (
u"An error occured while trying to regrade all ungraded assignments"
u"after classifiers were trained successfully: {}"
).format(ex)
logger.exception(msg)
# Here we don't retry, because they will already retry once in the grading task.
raise
@task(queue=RESCHEDULE_TASK_QUEUE, max_retries=MAX_RETRIES) #pylint: disable=E1102
@dog_stats_api.timed('openassessment.assessment.ai.reschedule_training_tasks.time')
def reschedule_training_tasks(course_id, item_id):
"""
Reschedules all incomplete training tasks
Args:
course_id (unicode): The course that we are going to search for unfinished training workflows
item_id (unicode): The specific item within that course that we will reschedule unfinished workflows for
Raises:
AIReschedulingInternalError
DatabaseError
"""
# Starts logging the details of the rescheduling
_log_start_reschedule_training(course_id=course_id, item_id=item_id)
start_time = datetime.datetime.now()
# Run a query to find the incomplete training workflows
try:
training_workflows = AITrainingWorkflow.get_incomplete_workflows(course_id, item_id)
except (DatabaseError, AITrainingWorkflow.DoesNotExist) as ex:
msg = (
u"An unexpected error occurred while retrieving all incomplete "
u"training tasks for course_id: {cid} and item_id: {iid}: {ex}"
).format(cid=course_id, iid=item_id, ex=ex)
logger.exception(msg)
raise reschedule_training_tasks.retry()
# Tries to train every workflow that has not completed.
for target_workflow in training_workflows:
try:
train_classifiers.apply_async(args=[target_workflow.uuid])
logger.info(
u"Rescheduling of training was successful for workflow with uuid{}".format(target_workflow.uuid)
)
except ANTICIPATED_CELERY_ERRORS as ex:
msg = (
u"An unexpected error occurred while scheduling the task for training workflow with UUID {id}: {ex}"
).format(id=target_workflow.uuid, ex=ex)
logger.exception(msg)
time_delta = datetime.datetime.now() - start_time
_log_complete_reschedule_training(
course_id=course_id, item_id=item_id, seconds=time_delta.total_seconds(), success=False
)
raise reschedule_training_tasks.retry()
# Logs the total time to reschedule all training of classifiers if not logged beforehand by exception.
time_delta = datetime.datetime.now() - start_time
_log_complete_reschedule_training(
course_id=course_id, item_id=item_id, seconds=time_delta.total_seconds(), success=True
)
def _examples_by_criterion(examples):
"""
Transform the examples returned by the AI API into our internal format.
Args:
examples (list): Training examples of the form returned by the AI API.
Each element of the list should be a dictionary with keys
'text' (the essay text) and 'scores' (a dictionary mapping
criterion names to numeric scores).
Returns:
dict: keys are the criteria names, and each value is list of `AIAlgorithm.ExampleEssay`s
Raises:
InvalidExample: The provided training examples are not in a valid format.
"""
internal_examples = defaultdict(list)
prev_criteria = None
for example_dict in examples:
# Check that the example contains the expected keys
try:
scores_dict = example_dict['scores']
text = unicode(example_dict['text'])
except KeyError:
raise InvalidExample(example_dict, u'Example dict must have keys "scores" and "text"')
# Check that the criteria names are consistent across examples
if prev_criteria is None:
prev_criteria = set(scores_dict.keys())
else:
if prev_criteria != set(scores_dict.keys()):
msg = (
u"Example criteria do not match "
u"the previous example: {criteria}"
).format(criteria=prev_criteria)
raise InvalidExample(example_dict, msg)
for criterion_name, score in scores_dict.iteritems():
try:
score = int(score)
except ValueError:
raise InvalidExample(example_dict, u"Example score is not an integer")
else:
internal_ex = AIAlgorithm.ExampleEssay(text, score)
internal_examples[criterion_name].append(internal_ex)
return internal_examples
def _log_start_reschedule_training(course_id=None, item_id=None):
"""
Sends data about the rescheduling_training task to datadog
Args:
course_id (unicode): the course id to associate with the log start
item_id (unicode): the item id to tag with the log start
"""
tags = [
u"course_id:{}".format(course_id),
u"item_id:{}".format(item_id),
]
dog_stats_api.increment('openassessment.assessment.ai_task.AIRescheduleTraining.scheduled_count', tags)
msg = u"Rescheduling of incomplete training tasks began for course_id={cid} and item_id={iid}"
logger.info(msg.format(cid=course_id, iid=item_id))
def _log_complete_reschedule_training(course_id=None, item_id=None, seconds=-1, success=False):
"""
Sends the total time the rescheduling of training tasks took to datadog
Note that this function may be invoked multiple times per call to reschedule_training_tasks,
because the time for EACH ATTEMPT is taken (i.e. if we fail (by error) to schedule training once,
we log the time elapsed before trying again.)
Args:
course_id (unicode): the course_id to tag the task with
item_id (unicode): the item_id to tag the task with
seconds (int): the number of seconds that elapsed during the rescheduling task.
success (bool): indicates whether or not all attempts to reschedule were successful
"""
tags = [
u"course_id:{}".format(course_id),
u"item_id:{}".format(item_id),
u"success:{}".format(success)
]
dog_stats_api.histogram('openassessment.assessment.ai_task.AIRescheduleTraining.turnaround_time', seconds,tags)
dog_stats_api.increment('openassessment.assessment.ai_task.AIRescheduleTraining.completed_count', tags)
msg = u"Rescheduling of incomplete training tasks for course_id={cid} and item_id={iid} completed in {s} seconds."
if not success:
msg += u" At least one rescheduling task failed due to internal error."
msg.format(cid=course_id, iid=item_id, s=seconds)
logger.info(msg)
| agpl-3.0 |
westinedu/sovleit | django/test/simple.py | 150 | 15012 | import unittest as real_unittest
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.db.models import get_app, get_apps
from django.test import _doctest as doctest
from django.test.utils import setup_test_environment, teardown_test_environment
from django.test.testcases import OutputChecker, DocTestRunner, TestCase
from django.utils import unittest
try:
all
except NameError:
from django.utils.itercompat import all
__all__ = ('DjangoTestRunner', 'DjangoTestSuiteRunner', 'run_tests')
# The module name for tests outside models.py
TEST_MODULE = 'tests'
doctestOutputChecker = OutputChecker()
class DjangoTestRunner(unittest.TextTestRunner):
def __init__(self, *args, **kwargs):
import warnings
warnings.warn(
"DjangoTestRunner is deprecated; it's functionality is indistinguishable from TextTestRunner",
PendingDeprecationWarning
)
super(DjangoTestRunner, self).__init__(*args, **kwargs)
def get_tests(app_module):
try:
app_path = app_module.__name__.split('.')[:-1]
test_module = __import__('.'.join(app_path + [TEST_MODULE]), {}, {}, TEST_MODULE)
except ImportError, e:
# Couldn't import tests.py. Was it due to a missing file, or
# due to an import error in a tests.py that actually exists?
import os.path
from imp import find_module
try:
mod = find_module(TEST_MODULE, [os.path.dirname(app_module.__file__)])
except ImportError:
# 'tests' module doesn't exist. Move on.
test_module = None
else:
# The module exists, so there must be an import error in the
# test module itself. We don't need the module; so if the
# module was a single file module (i.e., tests.py), close the file
# handle returned by find_module. Otherwise, the test module
# is a directory, and there is nothing to close.
if mod[0]:
mod[0].close()
raise
return test_module
def build_suite(app_module):
"Create a complete Django test suite for the provided application module"
suite = unittest.TestSuite()
# Load unit and doctests in the models.py module. If module has
# a suite() method, use it. Otherwise build the test suite ourselves.
if hasattr(app_module, 'suite'):
suite.addTest(app_module.suite())
else:
suite.addTest(unittest.defaultTestLoader.loadTestsFromModule(app_module))
try:
suite.addTest(doctest.DocTestSuite(app_module,
checker=doctestOutputChecker,
runner=DocTestRunner))
except ValueError:
# No doc tests in models.py
pass
# Check to see if a separate 'tests' module exists parallel to the
# models module
test_module = get_tests(app_module)
if test_module:
# Load unit and doctests in the tests.py module. If module has
# a suite() method, use it. Otherwise build the test suite ourselves.
if hasattr(test_module, 'suite'):
suite.addTest(test_module.suite())
else:
suite.addTest(unittest.defaultTestLoader.loadTestsFromModule(test_module))
try:
suite.addTest(doctest.DocTestSuite(test_module,
checker=doctestOutputChecker,
runner=DocTestRunner))
except ValueError:
# No doc tests in tests.py
pass
return suite
def build_test(label):
"""Construct a test case with the specified label. Label should be of the
form model.TestClass or model.TestClass.test_method. Returns an
instantiated test or test suite corresponding to the label provided.
"""
parts = label.split('.')
if len(parts) < 2 or len(parts) > 3:
raise ValueError("Test label '%s' should be of the form app.TestCase or app.TestCase.test_method" % label)
#
# First, look for TestCase instances with a name that matches
#
app_module = get_app(parts[0])
test_module = get_tests(app_module)
TestClass = getattr(app_module, parts[1], None)
# Couldn't find the test class in models.py; look in tests.py
if TestClass is None:
if test_module:
TestClass = getattr(test_module, parts[1], None)
try:
if issubclass(TestClass, (unittest.TestCase, real_unittest.TestCase)):
if len(parts) == 2: # label is app.TestClass
try:
return unittest.TestLoader().loadTestsFromTestCase(TestClass)
except TypeError:
raise ValueError("Test label '%s' does not refer to a test class" % label)
else: # label is app.TestClass.test_method
return TestClass(parts[2])
except TypeError:
# TestClass isn't a TestClass - it must be a method or normal class
pass
#
# If there isn't a TestCase, look for a doctest that matches
#
tests = []
for module in app_module, test_module:
try:
doctests = doctest.DocTestSuite(module,
checker=doctestOutputChecker,
runner=DocTestRunner)
# Now iterate over the suite, looking for doctests whose name
# matches the pattern that was given
for test in doctests:
if test._dt_test.name in (
'%s.%s' % (module.__name__, '.'.join(parts[1:])),
'%s.__test__.%s' % (module.__name__, '.'.join(parts[1:]))):
tests.append(test)
except ValueError:
# No doctests found.
pass
# If no tests were found, then we were given a bad test label.
if not tests:
raise ValueError("Test label '%s' does not refer to a test" % label)
# Construct a suite out of the tests that matched.
return unittest.TestSuite(tests)
def partition_suite(suite, classes, bins):
"""
Partitions a test suite by test type.
classes is a sequence of types
bins is a sequence of TestSuites, one more than classes
Tests of type classes[i] are added to bins[i],
tests with no match found in classes are place in bins[-1]
"""
for test in suite:
if isinstance(test, unittest.TestSuite):
partition_suite(test, classes, bins)
else:
for i in range(len(classes)):
if isinstance(test, classes[i]):
bins[i].addTest(test)
break
else:
bins[-1].addTest(test)
def reorder_suite(suite, classes):
"""
Reorders a test suite by test type.
classes is a sequence of types
All tests of type clases[0] are placed first, then tests of type classes[1], etc.
Tests with no match in classes are placed last.
"""
class_count = len(classes)
bins = [unittest.TestSuite() for i in range(class_count+1)]
partition_suite(suite, classes, bins)
for i in range(class_count):
bins[0].addTests(bins[i+1])
return bins[0]
def dependency_ordered(test_databases, dependencies):
"""Reorder test_databases into an order that honors the dependencies
described in TEST_DEPENDENCIES.
"""
ordered_test_databases = []
resolved_databases = set()
while test_databases:
changed = False
deferred = []
while test_databases:
signature, (db_name, aliases) = test_databases.pop()
dependencies_satisfied = True
for alias in aliases:
if alias in dependencies:
if all(a in resolved_databases for a in dependencies[alias]):
# all dependencies for this alias are satisfied
dependencies.pop(alias)
resolved_databases.add(alias)
else:
dependencies_satisfied = False
else:
resolved_databases.add(alias)
if dependencies_satisfied:
ordered_test_databases.append((signature, (db_name, aliases)))
changed = True
else:
deferred.append((signature, (db_name, aliases)))
if not changed:
raise ImproperlyConfigured("Circular dependency in TEST_DEPENDENCIES")
test_databases = deferred
return ordered_test_databases
class DjangoTestSuiteRunner(object):
def __init__(self, verbosity=1, interactive=True, failfast=True, **kwargs):
self.verbosity = verbosity
self.interactive = interactive
self.failfast = failfast
def setup_test_environment(self, **kwargs):
setup_test_environment()
settings.DEBUG = False
unittest.installHandler()
def build_suite(self, test_labels, extra_tests=None, **kwargs):
suite = unittest.TestSuite()
if test_labels:
for label in test_labels:
if '.' in label:
suite.addTest(build_test(label))
else:
app = get_app(label)
suite.addTest(build_suite(app))
else:
for app in get_apps():
suite.addTest(build_suite(app))
if extra_tests:
for test in extra_tests:
suite.addTest(test)
return reorder_suite(suite, (TestCase,))
def setup_databases(self, **kwargs):
from django.db import connections, DEFAULT_DB_ALIAS
# First pass -- work out which databases actually need to be created,
# and which ones are test mirrors or duplicate entries in DATABASES
mirrored_aliases = {}
test_databases = {}
dependencies = {}
for alias in connections:
connection = connections[alias]
if connection.settings_dict['TEST_MIRROR']:
# If the database is marked as a test mirror, save
# the alias.
mirrored_aliases[alias] = connection.settings_dict['TEST_MIRROR']
else:
# Store a tuple with DB parameters that uniquely identify it.
# If we have two aliases with the same values for that tuple,
# we only need to create the test database once.
item = test_databases.setdefault(
connection.creation.test_db_signature(),
(connection.settings_dict['NAME'], [])
)
item[1].append(alias)
if 'TEST_DEPENDENCIES' in connection.settings_dict:
dependencies[alias] = connection.settings_dict['TEST_DEPENDENCIES']
else:
if alias != DEFAULT_DB_ALIAS:
dependencies[alias] = connection.settings_dict.get('TEST_DEPENDENCIES', [DEFAULT_DB_ALIAS])
# Second pass -- actually create the databases.
old_names = []
mirrors = []
for signature, (db_name, aliases) in dependency_ordered(test_databases.items(), dependencies):
# Actually create the database for the first connection
connection = connections[aliases[0]]
old_names.append((connection, db_name, True))
test_db_name = connection.creation.create_test_db(self.verbosity, autoclobber=not self.interactive)
for alias in aliases[1:]:
connection = connections[alias]
if db_name:
old_names.append((connection, db_name, False))
connection.settings_dict['NAME'] = test_db_name
else:
# If settings_dict['NAME'] isn't defined, we have a backend where
# the name isn't important -- e.g., SQLite, which uses :memory:.
# Force create the database instead of assuming it's a duplicate.
old_names.append((connection, db_name, True))
connection.creation.create_test_db(self.verbosity, autoclobber=not self.interactive)
for alias, mirror_alias in mirrored_aliases.items():
mirrors.append((alias, connections[alias].settings_dict['NAME']))
connections[alias].settings_dict['NAME'] = connections[mirror_alias].settings_dict['NAME']
return old_names, mirrors
def run_suite(self, suite, **kwargs):
return unittest.TextTestRunner(verbosity=self.verbosity, failfast=self.failfast).run(suite)
def teardown_databases(self, old_config, **kwargs):
from django.db import connections
old_names, mirrors = old_config
# Point all the mirrors back to the originals
for alias, old_name in mirrors:
connections[alias].settings_dict['NAME'] = old_name
# Destroy all the non-mirror databases
for connection, old_name, destroy in old_names:
if destroy:
connection.creation.destroy_test_db(old_name, self.verbosity)
else:
connection.settings_dict['NAME'] = old_name
def teardown_test_environment(self, **kwargs):
unittest.removeHandler()
teardown_test_environment()
def suite_result(self, suite, result, **kwargs):
return len(result.failures) + len(result.errors)
def run_tests(self, test_labels, extra_tests=None, **kwargs):
"""
Run the unit tests for all the test labels in the provided list.
Labels must be of the form:
- app.TestClass.test_method
Run a single specific test method
- app.TestClass
Run all the test methods in a given class
- app
Search for doctests and unittests in the named application.
When looking for tests, the test runner will look in the models and
tests modules for the application.
A list of 'extra' tests may also be provided; these tests
will be added to the test suite.
Returns the number of tests that failed.
"""
self.setup_test_environment()
suite = self.build_suite(test_labels, extra_tests)
old_config = self.setup_databases()
result = self.run_suite(suite)
self.teardown_databases(old_config)
self.teardown_test_environment()
return self.suite_result(suite, result)
def run_tests(test_labels, verbosity=1, interactive=True, failfast=False, extra_tests=None):
import warnings
warnings.warn(
'The run_tests() test runner has been deprecated in favor of DjangoTestSuiteRunner.',
DeprecationWarning
)
test_runner = DjangoTestSuiteRunner(verbosity=verbosity, interactive=interactive, failfast=failfast)
return test_runner.run_tests(test_labels, extra_tests=extra_tests)
| bsd-3-clause |
jrabbit/compose | tests/unit/cli/command_test.py | 9 | 3080 | # ~*~ encoding: utf-8 ~*~
from __future__ import absolute_import
from __future__ import unicode_literals
import os
import pytest
import six
from compose.cli.command import get_config_path_from_options
from compose.config.environment import Environment
from compose.const import IS_WINDOWS_PLATFORM
from tests import mock
class TestGetConfigPathFromOptions(object):
def test_path_from_options(self):
paths = ['one.yml', 'two.yml']
opts = {'--file': paths}
environment = Environment.from_env_file('.')
assert get_config_path_from_options('.', opts, environment) == paths
def test_single_path_from_env(self):
with mock.patch.dict(os.environ):
os.environ['COMPOSE_FILE'] = 'one.yml'
environment = Environment.from_env_file('.')
assert get_config_path_from_options('.', {}, environment) == ['one.yml']
@pytest.mark.skipif(IS_WINDOWS_PLATFORM, reason='posix separator')
def test_multiple_path_from_env(self):
with mock.patch.dict(os.environ):
os.environ['COMPOSE_FILE'] = 'one.yml:two.yml'
environment = Environment.from_env_file('.')
assert get_config_path_from_options(
'.', {}, environment
) == ['one.yml', 'two.yml']
@pytest.mark.skipif(not IS_WINDOWS_PLATFORM, reason='windows separator')
def test_multiple_path_from_env_windows(self):
with mock.patch.dict(os.environ):
os.environ['COMPOSE_FILE'] = 'one.yml;two.yml'
environment = Environment.from_env_file('.')
assert get_config_path_from_options(
'.', {}, environment
) == ['one.yml', 'two.yml']
def test_multiple_path_from_env_custom_separator(self):
with mock.patch.dict(os.environ):
os.environ['COMPOSE_PATH_SEPARATOR'] = '^'
os.environ['COMPOSE_FILE'] = 'c:\\one.yml^.\\semi;colon.yml'
environment = Environment.from_env_file('.')
assert get_config_path_from_options(
'.', {}, environment
) == ['c:\\one.yml', '.\\semi;colon.yml']
def test_no_path(self):
environment = Environment.from_env_file('.')
assert not get_config_path_from_options('.', {}, environment)
def test_unicode_path_from_options(self):
paths = [b'\xe5\xb0\xb1\xe5\x90\x83\xe9\xa5\xad/docker-compose.yml']
opts = {'--file': paths}
environment = Environment.from_env_file('.')
assert get_config_path_from_options(
'.', opts, environment
) == ['就吃饭/docker-compose.yml']
@pytest.mark.skipif(six.PY3, reason='Env values in Python 3 are already Unicode')
def test_unicode_path_from_env(self):
with mock.patch.dict(os.environ):
os.environ['COMPOSE_FILE'] = b'\xe5\xb0\xb1\xe5\x90\x83\xe9\xa5\xad/docker-compose.yml'
environment = Environment.from_env_file('.')
assert get_config_path_from_options(
'.', {}, environment
) == ['就吃饭/docker-compose.yml']
| apache-2.0 |
rhyolight/nupic | src/nupic/data/dict_utils.py | 49 | 5295 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import copy
# TODO: Note the functions 'rUpdate' are duplicated in
# the swarming.hypersearch.utils.py module
class DictObj(dict):
"""Dictionary that allows attribute-like access to its elements.
Attributes are read-only."""
def __getattr__(self, name):
if name == '__deepcopy__':
return super(DictObj, self).__getattribute__("__deepcopy__")
return self[name]
def __setstate__(self, state):
for k, v in state.items():
self[k] = v
def rUpdate(original, updates):
"""Recursively updates the values in original with the values from updates."""
# Keep a list of the sub-dictionaries that need to be updated to avoid having
# to use recursion (which could fail for dictionaries with a lot of nesting.
dictPairs = [(original, updates)]
while len(dictPairs) > 0:
original, updates = dictPairs.pop()
for k, v in updates.iteritems():
if k in original and isinstance(original[k], dict) and isinstance(v, dict):
dictPairs.append((original[k], v))
else:
original[k] = v
def rApply(d, f):
"""Recursively applies f to the values in dict d.
Args:
d: The dict to recurse over.
f: A function to apply to values in d that takes the value and a list of
keys from the root of the dict to the value.
"""
remainingDicts = [(d, ())]
while len(remainingDicts) > 0:
current, prevKeys = remainingDicts.pop()
for k, v in current.iteritems():
keys = prevKeys + (k,)
if isinstance(v, dict):
remainingDicts.insert(0, (v, keys))
else:
f(v, keys)
def find(d, target):
remainingDicts = [d]
while len(remainingDicts) > 0:
current = remainingDicts.pop()
for k, v in current.iteritems():
if k == target:
return v
if isinstance(v, dict):
remainingDicts.insert(0, v)
return None
def get(d, keys):
for key in keys:
d = d[key]
return d
def set(d, keys, value):
for key in keys[:-1]:
d = d[key]
d[keys[-1]] = value
def dictDiffAndReport(da, db):
""" Compares two python dictionaries at the top level and report differences,
if any, to stdout
da: first dictionary
db: second dictionary
Returns: The same value as returned by dictDiff() for the given args
"""
differences = dictDiff(da, db)
if not differences:
return differences
if differences['inAButNotInB']:
print ">>> inAButNotInB: %s" % differences['inAButNotInB']
if differences['inBButNotInA']:
print ">>> inBButNotInA: %s" % differences['inBButNotInA']
for key in differences['differentValues']:
print ">>> da[%s] != db[%s]" % (key, key)
print "da[%s] = %r" % (key, da[key])
print "db[%s] = %r" % (key, db[key])
return differences
def dictDiff(da, db):
""" Compares two python dictionaries at the top level and return differences
da: first dictionary
db: second dictionary
Returns: None if dictionaries test equal; otherwise returns a
dictionary as follows:
{
'inAButNotInB':
<sequence of keys that are in da but not in db>
'inBButNotInA':
<sequence of keys that are in db but not in da>
'differentValues':
<sequence of keys whose corresponding values differ
between da and db>
}
"""
different = False
resultDict = dict()
resultDict['inAButNotInB'] = set(da) - set(db)
if resultDict['inAButNotInB']:
different = True
resultDict['inBButNotInA'] = set(db) - set(da)
if resultDict['inBButNotInA']:
different = True
resultDict['differentValues'] = []
for key in (set(da) - resultDict['inAButNotInB']):
comparisonResult = da[key] == db[key]
if isinstance(comparisonResult, bool):
isEqual = comparisonResult
else:
# This handles numpy arrays (but only at the top level)
isEqual = comparisonResult.all()
if not isEqual:
resultDict['differentValues'].append(key)
different = True
assert (((resultDict['inAButNotInB'] or resultDict['inBButNotInA'] or
resultDict['differentValues']) and different) or not different)
return resultDict if different else None
| agpl-3.0 |
thingsinjars/electron | script/dump-symbols.py | 144 | 1962 | #!/usr/bin/env python
import os
import sys
from lib.config import PLATFORM
from lib.util import atom_gyp, execute, rm_rf
SOURCE_ROOT = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
DIST_DIR = os.path.join(SOURCE_ROOT, 'dist')
OUT_DIR = os.path.join(SOURCE_ROOT, 'out', 'R')
CHROMIUM_DIR = os.path.join(SOURCE_ROOT, 'vendor', 'brightray', 'vendor',
'download', 'libchromiumcontent', 'static_library')
def main(destination):
# if PLATFORM == 'win32':
# register_required_dll()
rm_rf(destination)
(project_name, product_name) = get_names_from_gyp()
if PLATFORM in ['darwin', 'linux']:
generate_breakpad_symbols = os.path.join(SOURCE_ROOT, 'tools', 'posix',
'generate_breakpad_symbols.py')
if PLATFORM == 'darwin':
start = os.path.join(OUT_DIR, '{0}.app'.format(product_name), 'Contents',
'MacOS', product_name)
else:
start = os.path.join(OUT_DIR, project_name)
args = [
'--build-dir={0}'.format(OUT_DIR),
'--binary={0}'.format(start),
'--symbols-dir={0}'.format(destination),
'--libchromiumcontent-dir={0}'.format(CHROMIUM_DIR),
'--clear',
'--jobs=16',
]
else:
generate_breakpad_symbols = os.path.join(SOURCE_ROOT, 'tools', 'win',
'generate_breakpad_symbols.py')
args = [
'--symbols-dir={0}'.format(destination),
'--jobs=16',
os.path.relpath(OUT_DIR),
]
execute([sys.executable, generate_breakpad_symbols] + args)
def register_required_dll():
register = os.path.join(SOURCE_ROOT, 'tools', 'win',
'register_msdia80_dll.js')
execute(['node.exe', os.path.relpath(register)]);
def get_names_from_gyp():
variables = atom_gyp()
return (variables['project_name%'], variables['product_name%'])
if __name__ == '__main__':
sys.exit(main(sys.argv[1]))
| mit |
tima/ansible | contrib/inventory/vmware.py | 92 | 18476 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
VMware Inventory Script
=======================
Retrieve information about virtual machines from a vCenter server or
standalone ESX host. When `group_by=false` (in the INI file), host systems
are also returned in addition to VMs.
This script will attempt to read configuration from an INI file with the same
base filename if present, or `vmware.ini` if not. It is possible to create
symlinks to the inventory script to support multiple configurations, e.g.:
* `vmware.py` (this script)
* `vmware.ini` (default configuration, will be read by `vmware.py`)
* `vmware_test.py` (symlink to `vmware.py`)
* `vmware_test.ini` (test configuration, will be read by `vmware_test.py`)
* `vmware_other.py` (symlink to `vmware.py`, will read `vmware.ini` since no
`vmware_other.ini` exists)
The path to an INI file may also be specified via the `VMWARE_INI` environment
variable, in which case the filename matching rules above will not apply.
Host and authentication parameters may be specified via the `VMWARE_HOST`,
`VMWARE_USER` and `VMWARE_PASSWORD` environment variables; these options will
take precedence over options present in the INI file. An INI file is not
required if these options are specified using environment variables.
'''
from __future__ import print_function
import collections
import json
import logging
import optparse
import os
import ssl
import sys
import time
from six import integer_types, text_type, string_types
from six.moves import configparser
# Disable logging message trigged by pSphere/suds.
try:
from logging import NullHandler
except ImportError:
from logging import Handler
class NullHandler(Handler):
def emit(self, record):
pass
logging.getLogger('psphere').addHandler(NullHandler())
logging.getLogger('suds').addHandler(NullHandler())
from psphere.client import Client
from psphere.errors import ObjectNotFoundError
from psphere.managedobjects import HostSystem, VirtualMachine, ManagedObject, Network, ClusterComputeResource
from suds.sudsobject import Object as SudsObject
class VMwareInventory(object):
def __init__(self, guests_only=None):
self.config = configparser.SafeConfigParser()
if os.environ.get('VMWARE_INI', ''):
config_files = [os.environ['VMWARE_INI']]
else:
config_files = [os.path.abspath(sys.argv[0]).rstrip('.py') + '.ini', 'vmware.ini']
for config_file in config_files:
if os.path.exists(config_file):
self.config.read(config_file)
break
# Retrieve only guest VMs, or include host systems?
if guests_only is not None:
self.guests_only = guests_only
elif self.config.has_option('defaults', 'guests_only'):
self.guests_only = self.config.getboolean('defaults', 'guests_only')
else:
self.guests_only = True
# Read authentication information from VMware environment variables
# (if set), otherwise from INI file.
auth_host = os.environ.get('VMWARE_HOST')
if not auth_host and self.config.has_option('auth', 'host'):
auth_host = self.config.get('auth', 'host')
auth_user = os.environ.get('VMWARE_USER')
if not auth_user and self.config.has_option('auth', 'user'):
auth_user = self.config.get('auth', 'user')
auth_password = os.environ.get('VMWARE_PASSWORD')
if not auth_password and self.config.has_option('auth', 'password'):
auth_password = self.config.get('auth', 'password')
sslcheck = os.environ.get('VMWARE_SSLCHECK')
if not sslcheck and self.config.has_option('auth', 'sslcheck'):
sslcheck = self.config.get('auth', 'sslcheck')
if not sslcheck:
sslcheck = True
else:
if sslcheck.lower() in ['no', 'false']:
sslcheck = False
else:
sslcheck = True
# Limit the clusters being scanned
self.filter_clusters = os.environ.get('VMWARE_CLUSTERS')
if not self.filter_clusters and self.config.has_option('defaults', 'clusters'):
self.filter_clusters = self.config.get('defaults', 'clusters')
if self.filter_clusters:
self.filter_clusters = [x.strip() for x in self.filter_clusters.split(',') if x.strip()]
# Override certificate checks
if not sslcheck:
if hasattr(ssl, '_create_unverified_context'):
ssl._create_default_https_context = ssl._create_unverified_context
# Create the VMware client connection.
self.client = Client(auth_host, auth_user, auth_password)
def _put_cache(self, name, value):
'''
Saves the value to cache with the name given.
'''
if self.config.has_option('defaults', 'cache_dir'):
cache_dir = os.path.expanduser(self.config.get('defaults', 'cache_dir'))
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
cache_file = os.path.join(cache_dir, name)
with open(cache_file, 'w') as cache:
json.dump(value, cache)
def _get_cache(self, name, default=None):
'''
Retrieves the value from cache for the given name.
'''
if self.config.has_option('defaults', 'cache_dir'):
cache_dir = self.config.get('defaults', 'cache_dir')
cache_file = os.path.join(cache_dir, name)
if os.path.exists(cache_file):
if self.config.has_option('defaults', 'cache_max_age'):
cache_max_age = self.config.getint('defaults', 'cache_max_age')
else:
cache_max_age = 0
cache_stat = os.stat(cache_file)
if (cache_stat.st_mtime + cache_max_age) >= time.time():
with open(cache_file) as cache:
return json.load(cache)
return default
def _flatten_dict(self, d, parent_key='', sep='_'):
'''
Flatten nested dicts by combining keys with a separator. Lists with
only string items are included as is; any other lists are discarded.
'''
items = []
for k, v in d.items():
if k.startswith('_'):
continue
new_key = parent_key + sep + k if parent_key else k
if isinstance(v, collections.MutableMapping):
items.extend(self._flatten_dict(v, new_key, sep).items())
elif isinstance(v, (list, tuple)):
if all([isinstance(x, string_types) for x in v]):
items.append((new_key, v))
else:
items.append((new_key, v))
return dict(items)
def _get_obj_info(self, obj, depth=99, seen=None):
'''
Recursively build a data structure for the given pSphere object (depth
only applies to ManagedObject instances).
'''
seen = seen or set()
if isinstance(obj, ManagedObject):
try:
obj_unicode = text_type(getattr(obj, 'name'))
except AttributeError:
obj_unicode = ()
if obj in seen:
return obj_unicode
seen.add(obj)
if depth <= 0:
return obj_unicode
d = {}
for attr in dir(obj):
if attr.startswith('_'):
continue
try:
val = getattr(obj, attr)
obj_info = self._get_obj_info(val, depth - 1, seen)
if obj_info != ():
d[attr] = obj_info
except Exception as e:
pass
return d
elif isinstance(obj, SudsObject):
d = {}
for key, val in iter(obj):
obj_info = self._get_obj_info(val, depth, seen)
if obj_info != ():
d[key] = obj_info
return d
elif isinstance(obj, (list, tuple)):
l = []
for val in iter(obj):
obj_info = self._get_obj_info(val, depth, seen)
if obj_info != ():
l.append(obj_info)
return l
elif isinstance(obj, (type(None), bool, float) + string_types + integer_types):
return obj
else:
return ()
def _get_host_info(self, host, prefix='vmware'):
'''
Return a flattened dict with info about the given host system.
'''
host_info = {
'name': host.name,
}
for attr in ('datastore', 'network', 'vm'):
try:
value = getattr(host, attr)
host_info['%ss' % attr] = self._get_obj_info(value, depth=0)
except AttributeError:
host_info['%ss' % attr] = []
for k, v in self._get_obj_info(host.summary, depth=0).items():
if isinstance(v, collections.MutableMapping):
for k2, v2 in v.items():
host_info[k2] = v2
elif k != 'host':
host_info[k] = v
try:
host_info['ipAddress'] = host.config.network.vnic[0].spec.ip.ipAddress
except Exception as e:
print(e, file=sys.stderr)
host_info = self._flatten_dict(host_info, prefix)
if ('%s_ipAddress' % prefix) in host_info:
host_info['ansible_ssh_host'] = host_info['%s_ipAddress' % prefix]
return host_info
def _get_vm_info(self, vm, prefix='vmware'):
'''
Return a flattened dict with info about the given virtual machine.
'''
vm_info = {
'name': vm.name,
}
for attr in ('datastore', 'network'):
try:
value = getattr(vm, attr)
vm_info['%ss' % attr] = self._get_obj_info(value, depth=0)
except AttributeError:
vm_info['%ss' % attr] = []
try:
vm_info['resourcePool'] = self._get_obj_info(vm.resourcePool, depth=0)
except AttributeError:
vm_info['resourcePool'] = ''
try:
vm_info['guestState'] = vm.guest.guestState
except AttributeError:
vm_info['guestState'] = ''
for k, v in self._get_obj_info(vm.summary, depth=0).items():
if isinstance(v, collections.MutableMapping):
for k2, v2 in v.items():
if k2 == 'host':
k2 = 'hostSystem'
vm_info[k2] = v2
elif k != 'vm':
vm_info[k] = v
vm_info = self._flatten_dict(vm_info, prefix)
if ('%s_ipAddress' % prefix) in vm_info:
vm_info['ansible_ssh_host'] = vm_info['%s_ipAddress' % prefix]
return vm_info
def _add_host(self, inv, parent_group, host_name):
'''
Add the host to the parent group in the given inventory.
'''
p_group = inv.setdefault(parent_group, [])
if isinstance(p_group, dict):
group_hosts = p_group.setdefault('hosts', [])
else:
group_hosts = p_group
if host_name not in group_hosts:
group_hosts.append(host_name)
def _add_child(self, inv, parent_group, child_group):
'''
Add a child group to a parent group in the given inventory.
'''
if parent_group != 'all':
p_group = inv.setdefault(parent_group, {})
if not isinstance(p_group, dict):
inv[parent_group] = {'hosts': p_group}
p_group = inv[parent_group]
group_children = p_group.setdefault('children', [])
if child_group not in group_children:
group_children.append(child_group)
inv.setdefault(child_group, [])
def get_inventory(self, meta_hostvars=True):
'''
Reads the inventory from cache or VMware API via pSphere.
'''
# Use different cache names for guests only vs. all hosts.
if self.guests_only:
cache_name = '__inventory_guests__'
else:
cache_name = '__inventory_all__'
inv = self._get_cache(cache_name, None)
if inv is not None:
return inv
inv = {'all': {'hosts': []}}
if meta_hostvars:
inv['_meta'] = {'hostvars': {}}
default_group = os.path.basename(sys.argv[0]).rstrip('.py')
if not self.guests_only:
if self.config.has_option('defaults', 'hw_group'):
hw_group = self.config.get('defaults', 'hw_group')
else:
hw_group = default_group + '_hw'
if self.config.has_option('defaults', 'vm_group'):
vm_group = self.config.get('defaults', 'vm_group')
else:
vm_group = default_group + '_vm'
if self.config.has_option('defaults', 'prefix_filter'):
prefix_filter = self.config.get('defaults', 'prefix_filter')
else:
prefix_filter = None
if self.filter_clusters:
# Loop through clusters and find hosts:
hosts = []
for cluster in ClusterComputeResource.all(self.client):
if cluster.name in self.filter_clusters:
for host in cluster.host:
hosts.append(host)
else:
# Get list of all physical hosts
hosts = HostSystem.all(self.client)
# Loop through physical hosts:
for host in hosts:
if not self.guests_only:
self._add_host(inv, 'all', host.name)
self._add_host(inv, hw_group, host.name)
host_info = self._get_host_info(host)
if meta_hostvars:
inv['_meta']['hostvars'][host.name] = host_info
self._put_cache(host.name, host_info)
# Loop through all VMs on physical host.
for vm in host.vm:
if prefix_filter:
if vm.name.startswith(prefix_filter):
continue
self._add_host(inv, 'all', vm.name)
self._add_host(inv, vm_group, vm.name)
vm_info = self._get_vm_info(vm)
if meta_hostvars:
inv['_meta']['hostvars'][vm.name] = vm_info
self._put_cache(vm.name, vm_info)
# Group by resource pool.
vm_resourcePool = vm_info.get('vmware_resourcePool', None)
if vm_resourcePool:
self._add_child(inv, vm_group, 'resource_pools')
self._add_child(inv, 'resource_pools', vm_resourcePool)
self._add_host(inv, vm_resourcePool, vm.name)
# Group by datastore.
for vm_datastore in vm_info.get('vmware_datastores', []):
self._add_child(inv, vm_group, 'datastores')
self._add_child(inv, 'datastores', vm_datastore)
self._add_host(inv, vm_datastore, vm.name)
# Group by network.
for vm_network in vm_info.get('vmware_networks', []):
self._add_child(inv, vm_group, 'networks')
self._add_child(inv, 'networks', vm_network)
self._add_host(inv, vm_network, vm.name)
# Group by guest OS.
vm_guestId = vm_info.get('vmware_guestId', None)
if vm_guestId:
self._add_child(inv, vm_group, 'guests')
self._add_child(inv, 'guests', vm_guestId)
self._add_host(inv, vm_guestId, vm.name)
# Group all VM templates.
vm_template = vm_info.get('vmware_template', False)
if vm_template:
self._add_child(inv, vm_group, 'templates')
self._add_host(inv, 'templates', vm.name)
self._put_cache(cache_name, inv)
return inv
def get_host(self, hostname):
'''
Read info about a specific host or VM from cache or VMware API.
'''
inv = self._get_cache(hostname, None)
if inv is not None:
return inv
if not self.guests_only:
try:
host = HostSystem.get(self.client, name=hostname)
inv = self._get_host_info(host)
except ObjectNotFoundError:
pass
if inv is None:
try:
vm = VirtualMachine.get(self.client, name=hostname)
inv = self._get_vm_info(vm)
except ObjectNotFoundError:
pass
if inv is not None:
self._put_cache(hostname, inv)
return inv or {}
def main():
parser = optparse.OptionParser()
parser.add_option('--list', action='store_true', dest='list',
default=False, help='Output inventory groups and hosts')
parser.add_option('--host', dest='host', default=None, metavar='HOST',
help='Output variables only for the given hostname')
# Additional options for use when running the script standalone, but never
# used by Ansible.
parser.add_option('--pretty', action='store_true', dest='pretty',
default=False, help='Output nicely-formatted JSON')
parser.add_option('--include-host-systems', action='store_true',
dest='include_host_systems', default=False,
help='Include host systems in addition to VMs')
parser.add_option('--no-meta-hostvars', action='store_false',
dest='meta_hostvars', default=True,
help='Exclude [\'_meta\'][\'hostvars\'] with --list')
options, args = parser.parse_args()
if options.include_host_systems:
vmware_inventory = VMwareInventory(guests_only=False)
else:
vmware_inventory = VMwareInventory()
if options.host is not None:
inventory = vmware_inventory.get_host(options.host)
else:
inventory = vmware_inventory.get_inventory(options.meta_hostvars)
json_kwargs = {}
if options.pretty:
json_kwargs.update({'indent': 4, 'sort_keys': True})
json.dump(inventory, sys.stdout, **json_kwargs)
if __name__ == '__main__':
main()
| gpl-3.0 |
Nachtfeuer/concept-py | tests/test_vector_2d.py | 1 | 5600 | """
=======
License
=======
Copyright (c) 2017 Thomas Lehmann
Permission is hereby granted, free of charge, to any person obtaining a copy of this
software and associated documentation files (the "Software"), to deal in the Software
without restriction, including without limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons
to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies
or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
# pylint: disable=R0201
import math
import unittest
from hamcrest import assert_that, equal_to
from concept.math.vector import Vector2d
class TestVector2d(unittest.TestCase):
""" Testing math 2d vector. """
def test_init(self):
"""Testing of method Vector2d.__init__."""
assert_that(Vector2d(1.0, 2.0).x, equal_to(1.0))
assert_that(Vector2d(1.0, 2.0).y, equal_to(2.0))
assert_that(Vector2d(), equal_to(Vector2d(0.0, 0.0)))
def test_repr(self):
"""Testing of method Vector2d.__repr__."""
assert_that(str(Vector2d(1.2, 3.4)), equal_to("Vector2d(x=1.2, y=3.4)"))
def test_add(self):
"""Testing of method Vector2d.__add__."""
assert_that(Vector2d(1.0, 2.0) + Vector2d(3.0, 4.0), equal_to(Vector2d(4.0, 6.0)))
def test_sub(self):
"""Testing of method Vector2d.__sub__."""
assert_that(Vector2d(1.0, 5.0) - Vector2d(3.0, 4.0), equal_to(Vector2d(-2.0, 1.0)))
def test_scalar_product(self):
"""Testing of method Vector2d.scalar_product."""
assert_that(Vector2d(2.0, 5.0).scalar_product(Vector2d(3.0, 4.0)), equal_to(26))
def test_length(self):
"""Testing of method Vector2d.length."""
assert_that(Vector2d(3.0, 4.0).length(), equal_to(5.0))
def test_scaled(self):
"""Testing of method Vector2d.scaled."""
vec = Vector2d(3.0, 4.0)
assert_that(vec.scaled(2), equal_to(Vector2d(6.0, 8.0)))
assert_that(vec, equal_to(Vector2d(3.0, 4.0)))
def test_scale(self):
"""Testing of method Vector2d.scale."""
vec = Vector2d(3.0, 4.0)
vec.scale(2.0)
assert_that(vec, equal_to(Vector2d(6.0, 8.0)))
def test_rotated(self):
"""Testing of method Vector2d.rotated."""
vec_a = Vector2d(1.0, 0.0)
vec_b = vec_a.rotated(math.pi / 180.0 * 90)
assert_that(abs(vec_b.x) < 1e-10, equal_to(True))
assert_that(abs(vec_b.y - 1.0) < 1e-10, equal_to(True))
def test_turned_left(self):
"""Testing of method Vector2d.turned_left."""
assert_that(Vector2d(1.0, 0.0).turned_left(), equal_to(Vector2d(0.0, 1.0)))
assert_that(Vector2d(0.0, 1.0).turned_left(), equal_to(Vector2d(-1.0, 0.0)))
assert_that(Vector2d(-1.0, 0.0).turned_left(), equal_to(Vector2d(0.0, -1.0)))
assert_that(Vector2d(0.0, -1.0).turned_left(), equal_to(Vector2d(1.0, 0.0)))
def test_turned_right(self):
"""Testing of method Vector2d.turned_right."""
assert_that(Vector2d(1.0, 0.0).turned_right(), equal_to(Vector2d(0.0, -1.0)))
assert_that(Vector2d(0.0, -1.0).turned_right(), equal_to(Vector2d(-1.0, 0.0)))
assert_that(Vector2d(-1.0, 0.0).turned_right(), equal_to(Vector2d(0.0, 1.0)))
assert_that(Vector2d(0.0, 1.0).turned_right(), equal_to(Vector2d(1.0, 0.0)))
def test_angle(self):
"""Testing of method Vector2d.angle."""
angle_a = Vector2d(0.0, 1.0).angle(Vector2d(1.0, 0.0)) * 180.0 / math.pi
angle_b = Vector2d(1.0, 0.0).angle(Vector2d(0.0, 1.0)) * 180.0 / math.pi
assert_that(abs(angle_a - 90.0) <= 1e-10, equal_to(True))
assert_that(abs(angle_b + 90.0) <= 1e-10, equal_to(True))
def test_normalized(self):
"""Testing of method Vector2d.normalized."""
normalized_vec_a = Vector2d(10.0, 0).normalized()
normalized_vec_b = Vector2d(0.0, 10.0).normalized()
assert_that(normalized_vec_a, equal_to(Vector2d(1.0, 0.0)))
assert_that(normalized_vec_b, equal_to(Vector2d(0.0, 1.0)))
def test_cross_product(self):
"""Testing of method Vector2d.cross_product."""
assert_that(Vector2d(2.0, 5.0).cross_product(Vector2d(3.0, 4.0)), equal_to(-7.0))
def test_eq(self):
"""Testing of method Vector2d.__eq__."""
assert_that(Vector2d(1.2, 3.4), equal_to(Vector2d(1.2, 3.4)))
assert_that(Vector2d(1.2, 3.4).__eq__(1234), equal_to(False))
def test_neg(self):
"""Testing negating a vector."""
assert_that(-Vector2d(1.0, 2.0), equal_to(Vector2d(-1.0, -2.0)))
def test_is_perpendicular(self):
"""Testing method Vector2d.is_perpendicular."""
assert_that(Vector2d(0.0, 1.0).is_perpendicular(Vector2d(1.0, 0.0)), equal_to(True))
assert_that(Vector2d(1.0, 1.0).is_perpendicular(Vector2d(1.0, 0.0)), equal_to(False))
assert_that(Vector2d(1.0, 1.0).is_perpendicular("hello world"), equal_to(False))
| mit |
kickstandproject/python-ripcordclient | ripcordclient/tests/v1/test_subscriber.py | 1 | 2502 | # -*- coding: utf-8 -*-
# Copyright (c) 2013 PolyBeacon, Inc.
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testtools
from ripcordclient.tests import utils
from ripcordclient.v1 import subscriber
SUBSCRIBER = {
'username': 'alice',
'uuid': 'b5142338-d88a-403e-bb14-e1fba0a318d2',
}
CREATE_SUBSCRIBER = {
'username': 'alice',
}
FIXTURES = {
'/v1/subscribers': {
'GET': (
{},
[SUBSCRIBER],
),
'POST': (
{},
SUBSCRIBER,
),
},
'/v1/subscribers/%s' % SUBSCRIBER['uuid']: {
'GET': (
{},
SUBSCRIBER,
),
'DELETE': (
{},
None,
),
},
}
class SubscriberManagerTest(testtools.TestCase):
def setUp(self):
super(SubscriberManagerTest, self).setUp()
self.api = utils.FakeAPI(FIXTURES)
self.manager = subscriber.SubscriberManager(self.api)
def test_create(self):
res = self.manager.create(**CREATE_SUBSCRIBER)
expect = [
('POST', '/v1/subscribers', {}, CREATE_SUBSCRIBER),
]
self.assertEqual(self.api.calls, expect)
self.assertTrue(res)
def test_delete(self):
res = self.manager.delete(uuid=SUBSCRIBER['uuid'])
expect = [
('DELETE', '/v1/subscribers/%s' % SUBSCRIBER['uuid'], {}, None),
]
self.assertEqual(self.api.calls, expect)
self.assertEqual(res, None)
def test_list(self):
res = self.manager.list()
expect = [
('GET', '/v1/subscribers', {}, None),
]
self.assertEqual(self.api.calls, expect)
self.assertEqual(len(res), 1)
def test_show(self):
res = self.manager.get(uuid=SUBSCRIBER['uuid'])
expect = [
('GET', '/v1/subscribers/%s' % SUBSCRIBER['uuid'], {}, None),
]
self.assertEqual(self.api.calls, expect)
self.assertEqual(res.uuid, SUBSCRIBER['uuid'])
| apache-2.0 |
OpenDataNode/ckanext-odn-ic2pc-sync | ckanext/commands/publishing_cmd.py | 1 | 5849 | '''
Created on 30.10.2014
@author: mvi
'''
from ckan.lib.cli import CkanCommand
import sys
import logging
from ckanext.model.external_catalog import external_catalog_table,\
migrate_to_v0_3, migrate_to_v0_4, migrate_to_v0_6
log = logging.getLogger('ckanext')
class PublishingCmd(CkanCommand):
'''Pushes datasets from one ckan to another
needs set properties in provided config file:
odn.ic2pc.src.ckan.url - source ckan from which we are harvesting datasets
odn.ic2pc.dst.ckan.url - destination ckan to which we are pushing the datasets
odn.ic2pc.dst.ckan.api.key - destination ckan api key needed for authentication
odn.ic2pc.package.extras.whitelist - package extras allowed to be synchronized
odn.ic2pc.resource.extras.whitelist - resource extras allowed to be synchronized
The whitelist properties have a blank space as delimiter
Usage:
publishing_cmd test
- start test that writes source and destination ckan url that are
set in provided config file
publishing_cmd run
- starts pushing datasets
publishing_cmd initdb
- initializes DB tables needed for THIS extension
publishing_cmd migrate_to_v0.3.0
- updates db model from v0.2.x to v0.3.0
publishing_cmd migrate_to_v0.4.0
- updates db model from v0.3.x to v0.4
publishing_cmd uninstall
- drops tables in DB needed for THIS extension
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = 5
min_args = 0
def __init__(self, name):
super(PublishingCmd, self).__init__(name)
def command(self):
self._load_config()
if len(self.args) == 0:
self.parser.print_usage()
sys.exit(1)
cmd = self.args[0]
if cmd == 'test':
log.info('Starting [PublishingCmd test]')
conf = self._get_config()
src_ckan_url = conf.get('odn.ic2pc.src.ckan.url')
dst_ckan_url = conf.get('odn.ic2pc.dst.ckan.url')
dst_ckan_api_key = conf.get('odn.ic2pc.dst.ckan.api.key')
package_extras_whitelist = conf.get('odn.ic2pc.package.extras.whitelist')
resource_extras_whitelist = conf.get('odn.ic2pc.resource.extras.whitelist')
log.info('source ckan url: %s' % (src_ckan_url,))
log.info('destination ckan url: %s' % (dst_ckan_url,))
log.info('destination api key: %s' % (dst_ckan_api_key,))
log.info('package extras whitelist: {0}'.format(package_extras_whitelist))
log.info('resource extras whitelist: {0}'.format(resource_extras_whitelist))
elif cmd == 'run':
log.info('Starting [PublishingCmd run]')
from ckanext.publishing.ckan_sync import CkanSync
from odn_ckancommons.ckan_helper import CkanAPIWrapper
conf = self._get_config()
src_ckan_url = conf.get('odn.ic2pc.src.ckan.url')
dst_ckan_url = conf.get('odn.ic2pc.dst.ckan.url')
dst_ckan_api_key = conf.get('odn.ic2pc.dst.ckan.api.key')
package_extras_whitelist = conf.get('odn.ic2pc.package.extras.whitelist', "")
resource_extras_whitelist = conf.get('odn.ic2pc.resource.extras.whitelist', "")
package_extras_whitelist = package_extras_whitelist.split(' ')
resource_extras_whitelist = resource_extras_whitelist.split(' ')
assert src_ckan_url
assert dst_ckan_url
assert dst_ckan_api_key
src_ckan = CkanAPIWrapper(src_ckan_url, None)
dst_ckan = CkanAPIWrapper(dst_ckan_url, dst_ckan_api_key)
pusher = CkanSync()
pusher.push(src_ckan, dst_ckan, whitelist_package_extras=package_extras_whitelist,
whitelist_resource_extras=resource_extras_whitelist)
log.info('End of [PublishingCmd run]')
elif cmd == 'initdb':
log.info('Starting db initialization')
if not external_catalog_table.exists():
log.info("creating external_catalog table")
external_catalog_table.create()
log.info("external_catalog table created successfully")
else:
log.info("external_catalog table already exists")
log.info('End of db initialization')
elif cmd == 'migrate_to_v0.3.0':
log.info('Starting migration of DB to v0.3.0')
migrate_to_v0_3()
log.info('End of migration of DB to v0.3.0')
elif cmd == 'migrate_to_v0.4.0':
log.info('Starting migration of DB to v0.4.0')
migrate_to_v0_4()
log.info('End of migration of DB to v0.4.0')
elif cmd == 'migrate_to_v0.6.0':
log.info('Starting migration of DB to v0.6.0')
migrate_to_v0_6()
log.info('End of migration of DB to v0.6.0')
elif cmd == 'uninstall':
log.info('Starting uninstall command')
if external_catalog_table.exists():
log.info("dropping external_catalog table")
external_catalog_table.drop()
log.info("dropped external_catalog table successfully")
else:
log.info("Table external_catalog doesn't exist")
log.info('End of uninstall command')
else:
log.info('No command with name \'{0}\''.format(cmd))
def _load_config(self):
super(PublishingCmd, self)._load_config() | agpl-3.0 |
mikel-egana-aranguren/SADI-Galaxy-Docker | galaxy-dist/lib/galaxy/webapps/tool_shed/model/migrate/versions/0020_add_repository_type_column.py | 1 | 1608 | """Migration script to add the type column to the repository table."""
from sqlalchemy import *
from sqlalchemy.orm import *
from migrate import *
from migrate.changeset import *
# Need our custom types, but don't import anything else from model
from galaxy.model.custom_types import *
import sys, logging
log = logging.getLogger( __name__ )
log.setLevel(logging.DEBUG)
handler = logging.StreamHandler( sys.stdout )
format = "%(name)s %(levelname)s %(asctime)s %(message)s"
formatter = logging.Formatter( format )
handler.setFormatter( formatter )
log.addHandler( handler )
metadata = MetaData()
def upgrade( migrate_engine ):
print __doc__
metadata.bind = migrate_engine
metadata.reflect()
Repository_table = Table( "repository", metadata, autoload=True )
c = Column( "type", TrimmedString( 255 ), index=True )
try:
# Create
c.create( Repository_table, index_name="ix_repository_type" )
assert c is Repository_table.c.type
except Exception, e:
print "Adding type column to the repository table failed: %s" % str( e )
# Update the type column to have the default unrestricted value.
cmd = "UPDATE repository SET type = 'unrestricted'"
migrate_engine.execute( cmd )
def downgrade( migrate_engine ):
metadata.bind = migrate_engine
metadata.reflect()
# Drop type column from repository table.
Repository_table = Table( "repository", metadata, autoload=True )
try:
Repository_table.c.type.drop()
except Exception, e:
print "Dropping column type from the repository table failed: %s" % str( e )
| gpl-3.0 |
jdugge/QGIS | tests/src/python/test_qgsserver_wfs.py | 7 | 34015 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsServer WFS.
From build dir, run: ctest -R PyQgsServerWFS -V
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'René-Luc Dhont'
__date__ = '19/09/2017'
__copyright__ = 'Copyright 2017, The QGIS Project'
import os
# Needed on Qt 5 so that the serialization of XML is consistent among all executions
os.environ['QT_HASH_SEED'] = '1'
import re
import urllib.request
import urllib.parse
import urllib.error
from qgis.server import QgsServerRequest
from qgis.testing import unittest
from qgis.PyQt.QtCore import QSize
from qgis.core import (
QgsVectorLayer,
QgsFeatureRequest,
QgsExpression,
QgsCoordinateReferenceSystem,
QgsCoordinateTransform,
QgsCoordinateTransformContext,
QgsGeometry,
)
import osgeo.gdal # NOQA
from test_qgsserver import QgsServerTestBase
# Strip path and content length because path may vary
RE_STRIP_UNCHECKABLE = br'MAP=[^"]+|Content-Length: \d+|timeStamp="[^"]+"'
RE_ATTRIBUTES = br'[^>\s]+=[^>\s]+'
class TestQgsServerWFS(QgsServerTestBase):
"""QGIS Server WFS Tests"""
# Set to True in child classes to re-generate reference files for this class
regenerate_reference = False
def wfs_request_compare(self,
request, version='',
extra_query_string='',
reference_base_name=None,
project_file="test_project_wfs.qgs",
requestMethod=QgsServerRequest.GetMethod,
data=None):
project = self.testdata_path + project_file
assert os.path.exists(project), "Project file not found: " + project
query_string = '?MAP=%s&SERVICE=WFS&REQUEST=%s' % (
urllib.parse.quote(project), request)
if version:
query_string += '&VERSION=%s' % version
if extra_query_string:
query_string += '&%s' % extra_query_string
header, body = self._execute_request(
query_string, requestMethod=requestMethod, data=data)
self.assert_headers(header, body)
response = header + body
if reference_base_name is not None:
reference_name = reference_base_name
else:
reference_name = 'wfs_' + request.lower()
if version == '1.0.0':
reference_name += '_1_0_0'
reference_name += '.txt'
reference_path = self.testdata_path + reference_name
self.store_reference(reference_path, response)
f = open(reference_path, 'rb')
expected = f.read()
f.close()
response = re.sub(RE_STRIP_UNCHECKABLE, b'', response)
expected = re.sub(RE_STRIP_UNCHECKABLE, b'', expected)
self.assertXMLEqual(response, expected, msg="request %s failed.\n Query: %s" % (
query_string, request))
return header, body
def test_operation_not_supported(self):
qs = '?MAP=%s&SERVICE=WFS&VERSION=1.1.0&REQUEST=NotAValidRequest' % urllib.parse.quote(self.projectPath)
self._assert_status_code(501, qs)
def test_project_wfs(self):
"""Test some WFS request"""
for request in ('GetCapabilities', 'DescribeFeatureType'):
self.wfs_request_compare(request)
self.wfs_request_compare(request, '1.0.0')
def wfs_getfeature_compare(self, requestid, request):
project = self.testdata_path + "test_project_wfs.qgs"
assert os.path.exists(project), "Project file not found: " + project
query_string = '?MAP=%s&SERVICE=WFS&VERSION=1.0.0&REQUEST=%s' % (
urllib.parse.quote(project), request)
header, body = self._execute_request(query_string)
if requestid == 'hits':
body = re.sub(br'timeStamp="\d+-\d+-\d+T\d+:\d+:\d+"',
b'timeStamp="****-**-**T**:**:**"', body)
self.result_compare(
'wfs_getfeature_' + requestid + '.txt',
"request %s failed.\n Query: %s" % (
query_string,
request,
),
header, body
)
def test_getfeature(self):
tests = []
tests.append(('nobbox', 'GetFeature&TYPENAME=testlayer'))
tests.append(
('startindex2', 'GetFeature&TYPENAME=testlayer&STARTINDEX=2'))
tests.append(('limit2', 'GetFeature&TYPENAME=testlayer&MAXFEATURES=2'))
tests.append(
('start1_limit1', 'GetFeature&TYPENAME=testlayer&MAXFEATURES=1&STARTINDEX=1'))
tests.append(
('srsname', 'GetFeature&TYPENAME=testlayer&SRSNAME=EPSG:3857'))
tests.append(('sortby', 'GetFeature&TYPENAME=testlayer&SORTBY=id D'))
tests.append(('hits', 'GetFeature&TYPENAME=testlayer&RESULTTYPE=hits'))
for id, req in tests:
self.wfs_getfeature_compare(id, req)
def test_wfs_getcapabilities_100_url(self):
"""Check that URL in GetCapabilities response is complete"""
# empty url in project
project = os.path.join(
self.testdata_path, "test_project_without_urls.qgs")
qs = "?" + "&".join(["%s=%s" % i for i in list({
"MAP": urllib.parse.quote(project),
"SERVICE": "WFS",
"VERSION": "1.0.0",
"REQUEST": "GetCapabilities"
}.items())])
r, h = self._result(self._execute_request(qs))
for item in str(r).split("\\n"):
if "onlineResource" in item:
self.assertEqual("onlineResource=\"?" in item, True)
# url well defined in query string
project = os.path.join(
self.testdata_path, "test_project_without_urls.qgs")
qs = "https://www.qgis-server.org?" + "&".join(["%s=%s" % i for i in list({
"MAP": urllib.parse.quote(project),
"SERVICE": "WFS",
"VERSION": "1.0.0",
"REQUEST": "GetCapabilities"
}.items())])
r, h = self._result(self._execute_request(qs))
for item in str(r).split("\\n"):
if "onlineResource" in item:
self.assertTrue(
"onlineResource=\"https://www.qgis-server.org?" in item, True)
# url well defined in project
project = os.path.join(
self.testdata_path, "test_project_with_urls.qgs")
qs = "?" + "&".join(["%s=%s" % i for i in list({
"MAP": urllib.parse.quote(project),
"SERVICE": "WFS",
"VERSION": "1.0.0",
"REQUEST": "GetCapabilities"
}.items())])
r, h = self._result(self._execute_request(qs))
for item in str(r).split("\\n"):
if "onlineResource" in item:
self.assertEqual(
"onlineResource=\"my_wfs_advertised_url\"" in item, True)
def result_compare(self, file_name, error_msg_header, header, body):
self.assert_headers(header, body)
response = header + body
reference_path = self.testdata_path + file_name
self.store_reference(reference_path, response)
f = open(reference_path, 'rb')
expected = f.read()
f.close()
response = re.sub(RE_STRIP_UNCHECKABLE, b'', response)
expected = re.sub(RE_STRIP_UNCHECKABLE, b'', expected)
self.assertXMLEqual(response, expected, msg="%s\n" %
(error_msg_header))
def wfs_getfeature_post_compare(self, requestid, request):
project = self.testdata_path + "test_project_wfs.qgs"
assert os.path.exists(project), "Project file not found: " + project
query_string = '?MAP={}'.format(urllib.parse.quote(project))
header, body = self._execute_request(
query_string, requestMethod=QgsServerRequest.PostMethod, data=request.encode('utf-8'))
self.result_compare(
'wfs_getfeature_{}.txt'.format(requestid),
"GetFeature in POST for '{}' failed.".format(requestid),
header, body,
)
def test_getfeature_post(self):
tests = []
template = """<?xml version="1.0" encoding="UTF-8"?>
<wfs:GetFeature service="WFS" version="1.0.0" {} xmlns:wfs="http://www.opengis.net/wfs" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.opengis.net/wfs http://schemas.opengis.net/wfs/1.1.0/wfs.xsd">
<wfs:Query typeName="testlayer" xmlns:feature="http://www.qgis.org/gml">
<ogc:Filter xmlns:ogc="http://www.opengis.net/ogc">
<ogc:BBOX>
<ogc:PropertyName>geometry</ogc:PropertyName>
<gml:Envelope xmlns:gml="http://www.opengis.net/gml">
<gml:lowerCorner>8 44</gml:lowerCorner>
<gml:upperCorner>9 45</gml:upperCorner>
</gml:Envelope>
</ogc:BBOX>
</ogc:Filter>
</wfs:Query>
</wfs:GetFeature>
"""
tests.append(('nobbox_post', template.format("")))
tests.append(('startindex2_post', template.format('startIndex="2"')))
tests.append(('limit2_post', template.format('maxFeatures="2"')))
tests.append(('start1_limit1_post', template.format(
'startIndex="1" maxFeatures="1"')))
srsTemplate = """<?xml version="1.0" encoding="UTF-8"?>
<wfs:GetFeature service="WFS" version="1.0.0" {} xmlns:wfs="http://www.opengis.net/wfs" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.opengis.net/wfs http://schemas.opengis.net/wfs/1.1.0/wfs.xsd">
<wfs:Query typeName="testlayer" srsName="EPSG:3857" xmlns:feature="http://www.qgis.org/gml">
<ogc:Filter xmlns:ogc="http://www.opengis.net/ogc">
<ogc:BBOX>
<ogc:PropertyName>geometry</ogc:PropertyName>
<gml:Envelope xmlns:gml="http://www.opengis.net/gml">
<gml:lowerCorner>890555.92634619 5465442.18332275</gml:lowerCorner>
<gml:upperCorner>1001875.41713946 5621521.48619207</gml:upperCorner>
</gml:Envelope>
</ogc:BBOX>
</ogc:Filter>
</wfs:Query>
</wfs:GetFeature>
"""
tests.append(('srsname_post', srsTemplate.format("")))
# Issue https://github.com/qgis/QGIS/issues/36398
# Check get feature within polygon having srsName=EPSG:4326 (same as the project/layer)
within4326FilterTemplate = """<?xml version="1.0" encoding="UTF-8"?>
<wfs:GetFeature service="WFS" version="1.0.0" {} xmlns:wfs="http://www.opengis.net/wfs" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.opengis.net/wfs http://schemas.opengis.net/wfs/1.1.0/wfs.xsd">
<wfs:Query typeName="testlayer" srsName="EPSG:4326" xmlns:feature="http://www.qgis.org/gml">
<ogc:Filter xmlns:ogc="http://www.opengis.net/ogc">
<Within>
<PropertyName>geometry</PropertyName>
<Polygon xmlns="http://www.opengis.net/gml" srsName="EPSG:4326">
<exterior>
<LinearRing>
<posList srsDimension="2">
8.20344131 44.90137909
8.20347748 44.90137909
8.20347748 44.90141005
8.20344131 44.90141005
8.20344131 44.90137909
</posList>
</LinearRing>
</exterior>
</Polygon>
</Within>
</ogc:Filter>
</wfs:Query>
</wfs:GetFeature>
"""
tests.append(('within4326FilterTemplate_post', within4326FilterTemplate.format("")))
# Check get feature within polygon having srsName=EPSG:3857 (different from the project/layer)
# The coordinates are converted from the one in 4326
within3857FilterTemplate = """<?xml version="1.0" encoding="UTF-8"?>
<wfs:GetFeature service="WFS" version="1.0.0" {} xmlns:wfs="http://www.opengis.net/wfs" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.opengis.net/wfs http://schemas.opengis.net/wfs/1.1.0/wfs.xsd">
<wfs:Query typeName="testlayer" srsName="EPSG:3857" xmlns:feature="http://www.qgis.org/gml">
<ogc:Filter xmlns:ogc="http://www.opengis.net/ogc">
<Within>
<PropertyName>geometry</PropertyName>
<Polygon xmlns="http://www.opengis.net/gml" srsName="EPSG:3857">
<exterior>
<LinearRing>
<posList srsDimension="2">
913202.90938171 5606008.98136456
913206.93580769 5606008.98136456
913206.93580769 5606013.84701639
913202.90938171 5606013.84701639
913202.90938171 5606008.98136456
</posList>
</LinearRing>
</exterior>
</Polygon>
</Within>
</ogc:Filter>
</wfs:Query>
</wfs:GetFeature>
"""
tests.append(('within3857FilterTemplate_post', within3857FilterTemplate.format("")))
srsTwoLayersTemplate = """<?xml version="1.0" encoding="UTF-8"?>
<wfs:GetFeature service="WFS" version="1.0.0" {} xmlns:wfs="http://www.opengis.net/wfs" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.opengis.net/wfs http://schemas.opengis.net/wfs/1.1.0/wfs.xsd">
<wfs:Query typeName="testlayer" srsName="EPSG:3857" xmlns:feature="http://www.qgis.org/gml">
<ogc:Filter xmlns:ogc="http://www.opengis.net/ogc">
<ogc:BBOX>
<ogc:PropertyName>geometry</ogc:PropertyName>
<gml:Envelope xmlns:gml="http://www.opengis.net/gml">
<gml:lowerCorner>890555.92634619 5465442.18332275</gml:lowerCorner>
<gml:upperCorner>1001875.41713946 5621521.48619207</gml:upperCorner>
</gml:Envelope>
</ogc:BBOX>
</ogc:Filter>
</wfs:Query>
<wfs:Query typeName="testlayer" srsName="EPSG:3857" xmlns:feature="http://www.qgis.org/gml">
<ogc:Filter xmlns:ogc="http://www.opengis.net/ogc">
<ogc:BBOX>
<ogc:PropertyName>geometry</ogc:PropertyName>
<gml:Envelope xmlns:gml="http://www.opengis.net/gml">
<gml:lowerCorner>890555.92634619 5465442.18332275</gml:lowerCorner>
<gml:upperCorner>1001875.41713946 5621521.48619207</gml:upperCorner>
</gml:Envelope>
</ogc:BBOX>
</ogc:Filter>
</wfs:Query>
</wfs:GetFeature>
"""
tests.append(('srs_two_layers_post', srsTwoLayersTemplate.format("")))
sortTemplate = """<?xml version="1.0" encoding="UTF-8"?>
<wfs:GetFeature service="WFS" version="1.0.0" {} xmlns:wfs="http://www.opengis.net/wfs" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.opengis.net/wfs http://schemas.opengis.net/wfs/1.1.0/wfs.xsd">
<wfs:Query typeName="testlayer" xmlns:feature="http://www.qgis.org/gml">
<ogc:Filter xmlns:ogc="http://www.opengis.net/ogc">
<ogc:BBOX>
<ogc:PropertyName>geometry</ogc:PropertyName>
<gml:Envelope xmlns:gml="http://www.opengis.net/gml">
<gml:lowerCorner>8 44</gml:lowerCorner>
<gml:upperCorner>9 45</gml:upperCorner>
</gml:Envelope>
</ogc:BBOX>
</ogc:Filter>
<ogc:SortBy>
<ogc:SortProperty>
<ogc:PropertyName>id</ogc:PropertyName>
<ogc:SortOrder>DESC</ogc:SortOrder>
</ogc:SortProperty>
</ogc:SortBy>
</wfs:Query>
</wfs:GetFeature>
"""
tests.append(('sortby_post', sortTemplate.format("")))
andTemplate = """<?xml version="1.0" encoding="UTF-8"?>
<wfs:GetFeature service="WFS" version="1.0.0" {} xmlns:wfs="http://www.opengis.net/wfs" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.opengis.net/wfs http://schemas.opengis.net/wfs/1.1.0/wfs.xsd">
<wfs:Query typeName="testlayer" srsName="EPSG:3857" xmlns:feature="http://www.qgis.org/gml">
<ogc:Filter xmlns:ogc="http://www.opengis.net/ogc">
<ogc:And>
<ogc:PropertyIsGreaterThan>
<ogc:PropertyName>id</ogc:PropertyName>
<ogc:Literal>1</ogc:Literal>
</ogc:PropertyIsGreaterThan>
<ogc:PropertyIsLessThan>
<ogc:PropertyName>id</ogc:PropertyName>
<ogc:Literal>3</ogc:Literal>
</ogc:PropertyIsLessThan>
</ogc:And>
</ogc:Filter>
</wfs:Query>
</wfs:GetFeature>
"""
tests.append(('and_post', andTemplate.format("")))
andBboxTemplate = """<?xml version="1.0" encoding="UTF-8"?>
<wfs:GetFeature service="WFS" version="1.0.0" {} xmlns:wfs="http://www.opengis.net/wfs" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.opengis.net/wfs http://schemas.opengis.net/wfs/1.1.0/wfs.xsd">
<wfs:Query typeName="testlayer" srsName="EPSG:3857" xmlns:feature="http://www.qgis.org/gml">
<ogc:Filter xmlns:ogc="http://www.opengis.net/ogc">
<ogc:And>
<ogc:BBOX>
<ogc:PropertyName>geometry</ogc:PropertyName>
<gml:Envelope xmlns:gml="http://www.opengis.net/gml">
<gml:lowerCorner>890555.92634619 5465442.18332275</gml:lowerCorner>
<gml:upperCorner>1001875.41713946 5621521.48619207</gml:upperCorner>
</gml:Envelope>
</ogc:BBOX>
<ogc:PropertyIsGreaterThan>
<ogc:PropertyName>id</ogc:PropertyName>
<ogc:Literal>1</ogc:Literal>
</ogc:PropertyIsGreaterThan>
<ogc:PropertyIsLessThan>
<ogc:PropertyName>id</ogc:PropertyName>
<ogc:Literal>3</ogc:Literal>
</ogc:PropertyIsLessThan>
</ogc:And>
</ogc:Filter>
</wfs:Query>
</wfs:GetFeature>
"""
tests.append(('bbox_inside_and_post', andBboxTemplate.format("")))
# With namespace
template = """<?xml version="1.0" encoding="UTF-8"?>
<wfs:GetFeature service="WFS" version="1.0.0" {} xmlns:wfs="http://www.opengis.net/wfs" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.opengis.net/wfs http://schemas.opengis.net/wfs/1.1.0/wfs.xsd">
<wfs:Query typeName="feature:testlayer" xmlns:feature="http://www.qgis.org/gml">
<ogc:Filter xmlns:ogc="http://www.opengis.net/ogc">
<ogc:BBOX>
<ogc:PropertyName>geometry</ogc:PropertyName>
<gml:Envelope xmlns:gml="http://www.opengis.net/gml">
<gml:lowerCorner>8 44</gml:lowerCorner>
<gml:upperCorner>9 45</gml:upperCorner>
</gml:Envelope>
</ogc:BBOX>
</ogc:Filter>
</wfs:Query>
</wfs:GetFeature>
"""
tests.append(('nobbox_post', template.format("")))
template = """<?xml version="1.0" encoding="UTF-8"?>
<wfs:GetFeature service="WFS" version="1.0.0" {} xmlns:wfs="http://www.opengis.net/wfs" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.opengis.net/wfs http://schemas.opengis.net/wfs/1.1.0/wfs.xsd">
<wfs:Query typeName="testlayer" xmlns="http://www.qgis.org/gml">
<ogc:Filter xmlns:ogc="http://www.opengis.net/ogc">
<ogc:BBOX>
<ogc:PropertyName>geometry</ogc:PropertyName>
<gml:Envelope xmlns:gml="http://www.opengis.net/gml">
<gml:lowerCorner>8 44</gml:lowerCorner>
<gml:upperCorner>9 45</gml:upperCorner>
</gml:Envelope>
</ogc:BBOX>
</ogc:Filter>
</wfs:Query>
</wfs:GetFeature>
"""
tests.append(('nobbox_post', template.format("")))
for id, req in tests:
self.wfs_getfeature_post_compare(id, req)
def test_getFeatureBBOX(self):
"""Test with (1.1.0) and without (1.0.0) CRS"""
# Tests without CRS
self.wfs_request_compare(
"GetFeature", '1.0.0', "TYPENAME=testlayer&RESULTTYPE=hits&BBOX=8.20347,44.901471,8.2035354,44.901493", 'wfs_getFeature_1_0_0_bbox_1_feature')
self.wfs_request_compare(
"GetFeature", '1.0.0', "TYPENAME=testlayer&RESULTTYPE=hits&BBOX=8.203127,44.9012765,8.204138,44.901632", 'wfs_getFeature_1_0_0_bbox_3_feature')
# Tests with CRS
self.wfs_request_compare(
"GetFeature", '1.0.0', "SRSNAME=EPSG:4326&TYPENAME=testlayer&RESULTTYPE=hits&BBOX=8.20347,44.901471,8.2035354,44.901493,EPSG:4326", 'wfs_getFeature_1_0_0_epsgbbox_1_feature')
self.wfs_request_compare(
"GetFeature", '1.0.0', "SRSNAME=EPSG:4326&TYPENAME=testlayer&RESULTTYPE=hits&BBOX=8.203127,44.9012765,8.204138,44.901632,EPSG:4326", 'wfs_getFeature_1_0_0_epsgbbox_3_feature')
self.wfs_request_compare(
"GetFeature", '1.1.0', "SRSNAME=EPSG:4326&TYPENAME=testlayer&RESULTTYPE=hits&BBOX=8.20347,44.901471,8.2035354,44.901493,EPSG:4326", 'wfs_getFeature_1_1_0_epsgbbox_1_feature')
self.wfs_request_compare(
"GetFeature", '1.1.0', "SRSNAME=EPSG:4326&TYPENAME=testlayer&RESULTTYPE=hits&BBOX=8.203127,44.9012765,8.204138,44.901632,EPSG:4326", 'wfs_getFeature_1_1_0_epsgbbox_3_feature')
self.wfs_request_compare("GetFeature", '1.0.0', "SRSNAME=EPSG:4326&TYPENAME=testlayer&RESULTTYPE=hits&BBOX=913144,5605992,913303,5606048,EPSG:3857",
'wfs_getFeature_1_0_0_epsgbbox_3_feature_3857')
self.wfs_request_compare("GetFeature", '1.0.0', "SRSNAME=EPSG:4326&TYPENAME=testlayer&RESULTTYPE=hits&BBOX=913206,5606024,913213,5606026,EPSG:3857",
'wfs_getFeature_1_0_0_epsgbbox_1_feature_3857')
self.wfs_request_compare("GetFeature", '1.1.0', "SRSNAME=EPSG:4326&TYPENAME=testlayer&RESULTTYPE=hits&BBOX=913144,5605992,913303,5606048,EPSG:3857",
'wfs_getFeature_1_1_0_epsgbbox_3_feature_3857')
self.wfs_request_compare("GetFeature", '1.1.0', "SRSNAME=EPSG:4326&TYPENAME=testlayer&RESULTTYPE=hits&BBOX=913206,5606024,913213,5606026,EPSG:3857",
'wfs_getFeature_1_1_0_epsgbbox_1_feature_3857')
self.wfs_request_compare("GetFeature", '1.0.0', "SRSNAME=EPSG:3857&TYPENAME=testlayer&RESULTTYPE=hits&BBOX=913144,5605992,913303,5606048,EPSG:3857",
'wfs_getFeature_1_0_0_epsgbbox_3_feature_3857')
self.wfs_request_compare("GetFeature", '1.0.0', "SRSNAME=EPSG:3857&TYPENAME=testlayer&RESULTTYPE=hits&BBOX=913206,5606024,913213,5606026,EPSG:3857",
'wfs_getFeature_1_0_0_epsgbbox_1_feature_3857')
self.wfs_request_compare("GetFeature", '1.1.0', "SRSNAME=EPSG:3857&TYPENAME=testlayer&RESULTTYPE=hits&BBOX=913144,5605992,913303,5606048,EPSG:3857",
'wfs_getFeature_1_1_0_epsgbbox_3_feature_3857')
self.wfs_request_compare("GetFeature", '1.1.0', "SRSNAME=EPSG:3857&TYPENAME=testlayer&RESULTTYPE=hits&BBOX=913206,5606024,913213,5606026,EPSG:3857",
'wfs_getFeature_1_1_0_epsgbbox_1_feature_3857')
def test_getFeatureFeatureId(self):
"""Test GetFeature with featureid"""
self.wfs_request_compare(
"GetFeature", '1.0.0', "SRSNAME=EPSG:4326&TYPENAME=testlayer&FEATUREID=testlayer.0", 'wfs_getFeature_1_0_0_featureid_0')
def test_getFeature_EXP_FILTER_regression_20927(self):
"""Test expressions with EXP_FILTER"""
self.wfs_request_compare(
"GetFeature", '1.0.0', "SRSNAME=EPSG:4326&TYPENAME=testlayer&FEATUREID=testlayer.0&EXP_FILTER=\"name\"='one'", 'wfs_getFeature_1_0_0_EXP_FILTER_FID_one')
# Note that FEATUREID takes precedence over EXP_FILTER and the filter is completely ignored when FEATUREID is set
self.wfs_request_compare(
"GetFeature", '1.0.0', "SRSNAME=EPSG:4326&TYPENAME=testlayer&FEATUREID=testlayer.0&EXP_FILTER=\"name\"='two'", 'wfs_getFeature_1_0_0_EXP_FILTER_FID_one')
self.wfs_request_compare(
"GetFeature", '1.0.0', "SRSNAME=EPSG:4326&TYPENAME=testlayer&EXP_FILTER=\"name\"='two'", 'wfs_getFeature_1_0_0_EXP_FILTER_two')
self.wfs_request_compare(
"GetFeature", '1.0.0', "SRSNAME=EPSG:4326&TYPENAME=testlayer&EXP_FILTER=\"name\"=concat('tw', 'o')", 'wfs_getFeature_1_0_0_EXP_FILTER_two')
# Syntax ok but function does not exist
self.wfs_request_compare("GetFeature", '1.0.0', "SRSNAME=EPSG:4326&TYPENAME=testlayer&EXP_FILTER=\"name\"=invalid_expression('tw', 'o')",
'wfs_getFeature_1_0_0_EXP_FILTER_invalid_expression')
# Syntax error in exp
self.wfs_request_compare("GetFeature", '1.0.0', "SRSNAME=EPSG:4326&TYPENAME=testlayer&EXP_FILTER=\"name\"=concat('tw, 'o')",
'wfs_getFeature_1_0_0_EXP_FILTER_syntax_error')
# BBOX gml expressions
self.wfs_request_compare("GetFeature", '1.0.0', "SRSNAME=EPSG:4326&TYPENAME=testlayer&EXP_FILTER=intersects($geometry, geom_from_gml('<gml:Box> <gml:coordinates cs=\",\" ts=\" \">8.20344750430995617,44.9013881888184514 8.20347909100379269,44.90140004005827024</gml:coordinates></gml:Box>'))", 'wfs_getFeature_1_0_0_EXP_FILTER_gml_bbox_three')
self.wfs_request_compare("GetFeature", '1.0.0', "SRSNAME=EPSG:4326&TYPENAME=testlayer&EXP_FILTER=intersects($geometry, geom_from_gml('<gml:Box> <gml:coordinates cs=\",\" ts=\" \">8.20348458304175665,44.90147459621791626 8.20351616973559317,44.9014864474577351</gml:coordinates></gml:Box>'))", 'wfs_getFeature_1_0_0_EXP_FILTER_gml_bbox_one')
def test_describeFeatureType(self):
"""Test DescribeFeatureType with TYPENAME filters"""
project_file = "test_project_wms_grouped_layers.qgs"
self.wfs_request_compare("DescribeFeatureType", '1.0.0', "TYPENAME=as_areas&",
'wfs_describeFeatureType_1_0_0_typename_as_areas', project_file=project_file)
self.wfs_request_compare("DescribeFeatureType", '1.1.0', "TYPENAME=as_areas&",
'wfs_describeFeatureType_1_1_0_typename_as_areas', project_file=project_file)
self.wfs_request_compare("DescribeFeatureType", '1.0.0', "",
'wfs_describeFeatureType_1_0_0_typename_empty', project_file=project_file)
self.wfs_request_compare("DescribeFeatureType", '1.1.0', "",
'wfs_describeFeatureType_1_1_0_typename_empty', project_file=project_file)
self.wfs_request_compare("DescribeFeatureType", '1.0.0', "TYPENAME=does_not_exist&",
'wfs_describeFeatureType_1_0_0_typename_wrong', project_file=project_file)
self.wfs_request_compare("DescribeFeatureType", '1.1.0', "TYPENAME=does_not_exist&",
'wfs_describeFeatureType_1_1_0_typename_wrong', project_file=project_file)
def test_describeFeatureTypeVirtualFields(self):
"""Test DescribeFeatureType with virtual fields: bug GH-29767"""
project_file = "bug_gh29767_double_vfield.qgs"
self.wfs_request_compare("DescribeFeatureType", '1.1.0', "",
'wfs_describeFeatureType_1_1_0_virtual_fields', project_file=project_file)
def test_getFeatureFeature_0_nulls(self):
"""Test that 0 and null in integer columns are reported correctly"""
# Test transactions with 0 and nulls
post_data = """<?xml version="1.0" ?>
<wfs:Transaction service="WFS" version="{version}"
xmlns:ogc="http://www.opengis.net/ogc"
xmlns:wfs="http://www.opengis.net/wfs"
xmlns:gml="http://www.opengis.net/gml">
<wfs:Update typeName="cdb_lines">
<wfs:Property>
<wfs:Name>{field}</wfs:Name>
<wfs:Value>{value}</wfs:Value>
</wfs:Property>
<fes:Filter>
<fes:FeatureId fid="cdb_lines.22"/>
</fes:Filter>
</wfs:Update>
</wfs:Transaction>
"""
def _round_trip(value, field, version='1.1.0'):
"""Set a value on fid 22 and field and check it back"""
encoded_data = post_data.format(field=field, value=value, version=version).encode('utf8')
# Strip the field if NULL
if value is None:
encoded_data = encoded_data.replace(b'<wfs:Value>None</wfs:Value>', b'')
header, body = self._execute_request("?MAP=%s&SERVICE=WFS&VERSION=%s" % (
self.testdata_path + 'test_project_wms_grouped_layers.qgs', version), QgsServerRequest.PostMethod, encoded_data)
if version == '1.0.0':
self.assertTrue(b'<SUCCESS/>' in body, body)
else:
self.assertTrue(b'<totalUpdated>1</totalUpdated>' in body, body)
header, body = self._execute_request("?MAP=%s&SERVICE=WFS&REQUEST=GetFeature&TYPENAME=cdb_lines&FEATUREID=cdb_lines.22" % (
self.testdata_path + 'test_project_wms_grouped_layers.qgs'))
if value is not None:
xml_value = '<qgs:{0}>{1}</qgs:{0}>'.format(field, value).encode('utf8')
self.assertTrue(xml_value in body, "%s not found in body" % xml_value)
else:
xml_value = '<qgs:{0}>'.format(field).encode('utf8')
self.assertFalse(xml_value in body)
# Check the backend
vl = QgsVectorLayer(
self.testdata_path + 'test_project_wms_grouped_layers.gpkg|layername=cdb_lines', 'vl', 'ogr')
self.assertTrue(vl.isValid())
self.assertEqual(
str(vl.getFeature(22)[field]), value if value is not None else 'NULL')
for version in ('1.0.0', '1.1.0'):
_round_trip('0', 'id_long', version)
_round_trip('12345', 'id_long', version)
_round_trip('0', 'id', version)
_round_trip('12345', 'id', version)
_round_trip(None, 'id', version)
_round_trip(None, 'id_long', version)
# "name" is NOT NULL: try to set it to empty string
_round_trip('', 'name', version)
# Then NULL
data = post_data.format(field='name', value='', version=version).encode('utf8')
encoded_data = data.replace(b'<wfs:Value></wfs:Value>', b'')
header, body = self._execute_request("?MAP=%s&SERVICE=WFS" % (
self.testdata_path + 'test_project_wms_grouped_layers.qgs'), QgsServerRequest.PostMethod, encoded_data)
if version == '1.0.0':
self.assertTrue(b'<ERROR/>' in body, body)
else:
self.assertTrue(b'<totalUpdated>0</totalUpdated>' in body)
self.assertTrue(b'<Message>NOT NULL constraint error on layer \'cdb_lines\', field \'name\'</Message>' in body, body)
def test_describeFeatureTypeGeometryless(self):
"""Test DescribeFeatureType with geometryless tables - bug GH-30381"""
project_file = "test_project_geometryless_gh30381.qgs"
self.wfs_request_compare("DescribeFeatureType", '1.1.0',
reference_base_name='wfs_describeFeatureType_1_1_0_geometryless',
project_file=project_file)
def test_getFeatureFeatureIdJson(self):
"""Test GetFeature with featureid JSON format and various content types"""
for ct in ('GeoJSON', 'application/vnd.geo+json', 'application/json', 'application/geo+json'):
self.wfs_request_compare(
"GetFeature",
'1.0.0',
("OUTPUTFORMAT=%s" % ct)
+ "&SRSNAME=EPSG:4326&TYPENAME=testlayer&FEATUREID=testlayer.0",
'wfs_getFeature_1_0_0_featureid_0_json')
def test_insert_srsName(self):
"""Test srsName is respected when insering"""
post_data = """
<Transaction xmlns="http://www.opengis.net/wfs" xsi:schemaLocation="http://www.qgis.org/gml http://localhost:8000/?SERVICE=WFS&REQUEST=DescribeFeatureType&VERSION=1.0.0&TYPENAME=as_symbols" service="WFS" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" version="{version}" xmlns:gml="http://www.opengis.net/gml">
<Insert xmlns="http://www.opengis.net/wfs">
<as_symbols xmlns="http://www.qgis.org/gml">
<name xmlns="http://www.qgis.org/gml">{name}</name>
<geometry xmlns="http://www.qgis.org/gml">
<gml:Point srsName="{srsName}">
<gml:coordinates cs="," ts=" ">{coordinates}</gml:coordinates>
</gml:Point>
</geometry>
</as_symbols>
</Insert>
</Transaction>
"""
project = self.testdata_path + \
"test_project_wms_grouped_layers.qgs"
assert os.path.exists(project), "Project file not found: " + project
query_string = '?SERVICE=WFS&MAP={}'.format(
urllib.parse.quote(project))
request = post_data.format(
name='4326-test1',
version='1.1.0',
srsName='EPSG:4326',
coordinates='10.67,52.48'
)
header, body = self._execute_request(
query_string, requestMethod=QgsServerRequest.PostMethod, data=request.encode('utf-8'))
# Verify
vl = QgsVectorLayer(self.testdata_path + 'test_project_wms_grouped_layers.gpkg|layername=as_symbols', 'as_symbols')
self.assertTrue(vl.isValid())
feature = next(vl.getFeatures(QgsFeatureRequest(QgsExpression('"name" = \'4326-test1\''))))
geom = feature.geometry()
tr = QgsCoordinateTransform(QgsCoordinateReferenceSystem.fromEpsgId(4326), vl.crs(), QgsCoordinateTransformContext())
geom_4326 = QgsGeometry.fromWkt('point( 10.67 52.48)')
geom_4326.transform(tr)
self.assertEqual(geom.asWkt(0), geom_4326.asWkt(0))
# Now: insert a feature in layer's CRS
request = post_data.format(
name='25832-test1',
version='1.1.0',
srsName='EPSG:25832',
coordinates='613412,5815738'
)
header, body = self._execute_request(
query_string, requestMethod=QgsServerRequest.PostMethod, data=request.encode('utf-8'))
feature = next(vl.getFeatures(QgsFeatureRequest(QgsExpression('"name" = \'25832-test1\''))))
geom = feature.geometry()
self.assertEqual(geom.asWkt(0), geom_4326.asWkt(0))
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
Mauricio3000/fk_ik_sine_rig | tests/test_rig/test_sine_rig.py | 1 | 1187 | import unittest
import pymel.core as pm
from tool.errors import errors
from tool.rig import sine_rig
class Test_sine_rig(unittest.TestCase):
def test_sine_rig_build_errors(self):
self.assertRaises(errors.InputError, sine_rig.build)
self.assertRaises(errors.InputError, sine_rig.build,
'temp', 'plane', 'reg_node')
def test_sine_rig_build(self):
name = 'temp'
crv = pm.circle()[0]
reg_node = pm.nt.Transform()
cnt = pm.circle()[0]
reg_node.addAttr('temp1_ik_cnt', at='message')
reg_node.addAttr('version', at='message')
reg_node.addAttr('reg_node', at='message')
cnt.message >> reg_node.temp1_ik_cnt
reg_node = sine_rig.build(name, crv, reg_node)
for a in ['wavelength', 'amplitude', 'sineOffOn',
'offset', 'direction']:
self.assertTrue(hasattr(cnt, a))
self.assertTrue(hasattr(reg_node, 'sine_handle'))
self.assertTrue(pm.PyNode('%s_sineDeformer' % name).objExists())
def suite():
return unittest.TestLoader().loadTestsFromTestCase(Test_sine_rig)
if __name__ == "__main__":
unittest.__main__()
| gpl-3.0 |
AltSchool/django | tests/gis_tests/layermap/models.py | 235 | 2523 | from django.utils.encoding import python_2_unicode_compatible
from ..models import models
@python_2_unicode_compatible
class NamedModel(models.Model):
name = models.CharField(max_length=25)
objects = models.GeoManager()
class Meta:
abstract = True
required_db_features = ['gis_enabled']
def __str__(self):
return self.name
class State(NamedModel):
pass
class County(NamedModel):
state = models.ForeignKey(State, models.CASCADE)
mpoly = models.MultiPolygonField(srid=4269) # Multipolygon in NAD83
class CountyFeat(NamedModel):
poly = models.PolygonField(srid=4269)
class City(NamedModel):
name_txt = models.TextField(default='')
name_short = models.CharField(max_length=5)
population = models.IntegerField()
density = models.DecimalField(max_digits=7, decimal_places=1)
dt = models.DateField()
point = models.PointField()
class Meta:
app_label = 'layermap'
required_db_features = ['gis_enabled']
class Interstate(NamedModel):
length = models.DecimalField(max_digits=6, decimal_places=2)
path = models.LineStringField()
class Meta:
app_label = 'layermap'
required_db_features = ['gis_enabled']
# Same as `City` above, but for testing model inheritance.
class CityBase(NamedModel):
population = models.IntegerField()
density = models.DecimalField(max_digits=7, decimal_places=1)
point = models.PointField()
class ICity1(CityBase):
dt = models.DateField()
class Meta(CityBase.Meta):
pass
class ICity2(ICity1):
dt_time = models.DateTimeField(auto_now=True)
class Meta(ICity1.Meta):
pass
class Invalid(models.Model):
point = models.PointField()
class Meta:
required_db_features = ['gis_enabled']
# Mapping dictionaries for the models above.
co_mapping = {
'name': 'Name',
# ForeignKey's use another mapping dictionary for the _related_ Model (State in this case).
'state': {'name': 'State'},
'mpoly': 'MULTIPOLYGON', # Will convert POLYGON features into MULTIPOLYGONS.
}
cofeat_mapping = {'name': 'Name',
'poly': 'POLYGON',
}
city_mapping = {'name': 'Name',
'population': 'Population',
'density': 'Density',
'dt': 'Created',
'point': 'POINT',
}
inter_mapping = {'name': 'Name',
'length': 'Length',
'path': 'LINESTRING',
}
| bsd-3-clause |
collective/cyn.in | products/WebServerAuth/tests/test_extraction.py | 4 | 2764 | """Unit tests for extraction plugin"""
from Products.PloneTestCase import PloneTestCase
from Products.CMFCore.utils import getToolByName
from Products.WebServerAuth.utils import firstInstanceOfClass
from Products.WebServerAuth.plugin import usernameKey, defaultUsernameHeader, stripDomainNamesKey, usernameHeaderKey
from Products.WebServerAuth.tests.base import WebServerAuthTestCase
PloneTestCase.installProduct('WebServerAuth')
PloneTestCase.setupPloneSite(products=['WebServerAuth'])
_username = 'someUsername'
_domain = 'example.com'
_userAtDomain = '%s@%s' % (_username, _domain)
class _MockRequest(object):
def __init__(self, environ=None):
self.environ = environ or {}
class TestExtraction(WebServerAuthTestCase):
def afterSetUp(self):
self.plugin = self._plugin()
def testDefaultExtraction(self):
"""Assert default behavior of extraction works."""
request = _MockRequest()
self.failUnless(self.plugin.extractCredentials(request) is None, msg="Found credentials to extract, even though we shouldn't have.")
request.environ[defaultUsernameHeader] = _username
self.failUnlessEqual(self.plugin.extractCredentials(request), {usernameKey: _username})
# Make sure the domain name gets stripped off the end of the username by default:
request.environ[defaultUsernameHeader] = _userAtDomain
self.failUnlessEqual(self.plugin.extractCredentials(request), {usernameKey: _username})
def testUsernameHeaderCustomization(self):
"""Assert the name of the header in which the username is passed can be changed."""
alternateHeader = 'HTTP_REMOTE_USER'
request = _MockRequest(environ={alternateHeader: _username})
saveHeader = self.plugin.config[usernameHeaderKey]
self.plugin.config[usernameHeaderKey] = alternateHeader
try:
self.failUnlessEqual(self.plugin.extractCredentials(request), {usernameKey: _username})
finally:
self.plugin.config[usernameHeaderKey] = saveHeader
def testDomainStripping(self):
"""Assert choosing to not strip the domain off the end of a [email protected] username works."""
request = _MockRequest(environ={defaultUsernameHeader: _userAtDomain})
saveStrip = self.plugin.config[stripDomainNamesKey]
self.plugin.config[stripDomainNamesKey] = False
try:
self.failUnlessEqual(self.plugin.extractCredentials(request), {usernameKey: _userAtDomain})
finally:
self.plugin.config[stripDomainNamesKey] = saveStrip
def test_suite():
from unittest import TestSuite, makeSuite
suite = TestSuite()
suite.addTest(makeSuite(TestExtraction))
return suite
| gpl-3.0 |
dhanunjaya/neutron | neutron/agent/l3/dvr.py | 26 | 2827 | # Copyright (c) 2014 Openstack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import weakref
from oslo_log import log as logging
from neutron.agent.l3 import dvr_fip_ns
from neutron.agent.l3 import dvr_snat_ns
LOG = logging.getLogger(__name__)
# TODO(Carl) Following constants retained to increase SNR during refactoring
SNAT_INT_DEV_PREFIX = dvr_snat_ns.SNAT_INT_DEV_PREFIX
SNAT_NS_PREFIX = dvr_snat_ns.SNAT_NS_PREFIX
class AgentMixin(object):
def __init__(self, host):
# dvr data
self._fip_namespaces = weakref.WeakValueDictionary()
super(AgentMixin, self).__init__(host)
def get_fip_ns(self, ext_net_id):
# TODO(Carl) is this necessary? Code that this replaced was careful to
# convert these to string like this so I preserved that.
ext_net_id = str(ext_net_id)
fip_ns = self._fip_namespaces.get(ext_net_id)
if fip_ns and not fip_ns.destroyed:
return fip_ns
fip_ns = dvr_fip_ns.FipNamespace(ext_net_id,
self.conf,
self.driver,
self.use_ipv6)
self._fip_namespaces[ext_net_id] = fip_ns
return fip_ns
def get_ports_by_subnet(self, subnet_id):
return self.plugin_rpc.get_ports_by_subnet(self.context, subnet_id)
def add_arp_entry(self, context, payload):
"""Add arp entry into router namespace. Called from RPC."""
router_id = payload['router_id']
ri = self.router_info.get(router_id)
if not ri:
return
arp_table = payload['arp_table']
ip = arp_table['ip_address']
mac = arp_table['mac_address']
subnet_id = arp_table['subnet_id']
ri._update_arp_entry(ip, mac, subnet_id, 'add')
def del_arp_entry(self, context, payload):
"""Delete arp entry from router namespace. Called from RPC."""
router_id = payload['router_id']
ri = self.router_info.get(router_id)
if not ri:
return
arp_table = payload['arp_table']
ip = arp_table['ip_address']
mac = arp_table['mac_address']
subnet_id = arp_table['subnet_id']
ri._update_arp_entry(ip, mac, subnet_id, 'delete')
| apache-2.0 |
ThiagoGarciaAlves/erpnext | erpnext/accounts/doctype/cost_center/cost_center.py | 16 | 2789 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import msgprint, _
from frappe.utils.nestedset import NestedSet
class CostCenter(NestedSet):
nsm_parent_field = 'parent_cost_center'
def autoname(self):
self.name = self.cost_center_name.strip() + ' - ' + \
frappe.db.get_value("Company", self.company, "abbr")
def validate_mandatory(self):
if self.cost_center_name != self.company and not self.parent_cost_center:
msgprint(_("Please enter parent cost center"), raise_exception=1)
elif self.cost_center_name == self.company and self.parent_cost_center:
msgprint(_("Root cannot have a parent cost center"), raise_exception=1)
def convert_group_to_ledger(self):
if self.check_if_child_exists():
msgprint(_("Cannot convert Cost Center to ledger as it has child nodes"), raise_exception=1)
elif self.check_gle_exists():
msgprint(_("Cost Center with existing transactions can not be converted to ledger"), raise_exception=1)
else:
self.is_group = 0
self.save()
return 1
def convert_ledger_to_group(self):
if self.check_gle_exists():
msgprint(_("Cost Center with existing transactions can not be converted to group"), raise_exception=1)
else:
self.is_group = 1
self.save()
return 1
def check_gle_exists(self):
return frappe.db.get_value("GL Entry", {"cost_center": self.name})
def check_if_child_exists(self):
return frappe.db.sql("select name from `tabCost Center` where \
parent_cost_center = %s and docstatus != 2", self.name)
def validate_budget_details(self):
check_acc_list = []
for d in self.get('budgets'):
if self.is_group==1:
msgprint(_("Budget cannot be set for Group Cost Centers"), raise_exception=1)
if [d.account, d.fiscal_year] in check_acc_list:
msgprint(_("Account {0} has been entered more than once for fiscal year {1}").format(d.account, d.fiscal_year), raise_exception=1)
else:
check_acc_list.append([d.account, d.fiscal_year])
def validate(self):
self.validate_mandatory()
self.validate_budget_details()
def before_rename(self, olddn, newdn, merge=False):
# Add company abbr if not provided
from erpnext.setup.doctype.company.company import get_name_with_abbr
new_cost_center = get_name_with_abbr(newdn, self.company)
# Validate properties before merging
super(CostCenter, self).before_rename(olddn, new_cost_center, merge, "is_group")
return new_cost_center
def after_rename(self, olddn, newdn, merge=False):
if not merge:
frappe.db.set_value("Cost Center", newdn, "cost_center_name",
" - ".join(newdn.split(" - ")[:-1]))
else:
super(CostCenter, self).after_rename(olddn, newdn, merge)
| agpl-3.0 |
watchdogpolska/feder | feder/institutions/migrations/0008_auto_20161001_2053.py | 1 | 2085 | # Generated by Django 1.10.1 on 2016-10-01 20:53
import django.utils.timezone
import model_utils.fields
from django.db import migrations, models
def forwards_func(apps, schema_editor):
# We get the model from the versioned app registry;
# if we directly import it, it'll be the wrong version
Institution = apps.get_model("institutions", "institution")
Email = apps.get_model("institutions", "Email")
db_alias = schema_editor.connection.alias
for institution in Institution.objects.using(db_alias).all():
emails = list(
Email.objects.filter(institution=institution.pk).order_by("priority").all()
)
if emails:
institution.email = max(emails, key=lambda x: x.priority).email
institution.save()
class Migration(migrations.Migration):
dependencies = [("institutions", "0007_auto_20160912_2250")]
operations = [
migrations.AlterUniqueTogether(name="email", unique_together=set()),
migrations.AddField(
model_name="institution",
name="email",
field=models.EmailField(
default="[email protected]",
max_length=254,
verbose_name="Email of institution",
),
preserve_default=False,
),
migrations.RunPython(forwards_func),
migrations.RemoveField(model_name="email", name="institution"),
migrations.AddField(
model_name="institution",
name="created",
field=model_utils.fields.AutoCreatedField(
default=django.utils.timezone.now,
editable=False,
verbose_name="created",
),
),
migrations.AddField(
model_name="institution",
name="modified",
field=model_utils.fields.AutoLastModifiedField(
default=django.utils.timezone.now,
editable=False,
verbose_name="modified",
),
),
migrations.DeleteModel(name="Email"),
]
| mit |
mandeepdhami/netvirt-ctrl | cli/midw.py | 3 | 18056 | #
# Copyright (c) 2012,2013 Big Switch Networks, Inc.
#
# Licensed under the Eclipse Public License, Version 1.0 (the
# "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
#
# http://www.eclipse.org/legal/epl-v10.html
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
#
# The database/model descriptions exist to meet particular
# needs, for example, switch-alias exist to provide an
# alternate name space from dpid's, to allow for a more
# readable and human usable form of the same dpid. Aliases
# would then naturally need a alias->dpid conversion, and
# at the same time, a dpid->alias (at least for the display
# of dpid's).
#
# The functions in this file provide these type of abstractions,
# taking the output from model lookup's in the rest api, and
# supplying some service used by the cli.
#
import rest_to_model
import fmtcnv
import json
import utif
def init_midware(bs, modi):
global sdnsh, mi
sdnsh = bs
mi = modi
#
# --------------------------------------------------------------------------------
def create_obj_type_dict(obj_type, field, key = None, value = None):
"""
Return a dictionary from a table search, where the key is one of the
fields. This doesn't manage multiple field matches.
Typically, the field selected is a foreign key for the obj_type.
For ('host-network-address', 'host'), this creates a dict
indexed by the mac address, returning the row in the table associated
with the mac (since the primary key for 'host' is a mac address).
For ('tag-mapping', 'host'), this creates a dict indexed by
the mac, returning the matching row in the table.
note: This gets the whole table
"""
if not mi.obj_type_has_field(obj_type, field):
return {}
if not mi.obj_type_has_model(obj_type):
data = {}
if key and value:
data[key] = value
rows = rest_to_model.get_model_from_url(obj_type, data)
elif not type(key) is dict:
try:
rows = sdnsh.get_table_from_store(obj_type, key, value)
except Exception, e:
errors = sdnsh.rest_error_to_dict(e)
print sdnsh.rest_error_dict_to_message(errors)
rows = []
else:
try:
rows = sdnsh.rest_query_objects(obj_type, key)
except Exception, e:
errors = sdnsh.rest_error_to_dict(e)
print sdnsh.rest_error_dict_to_message(errors)
rows = []
s_dict = {}
for row in rows:
if row[field] in s_dict:
s_dict[row[field]].append(row)
else:
s_dict[row[field]] = [row]
return s_dict
#
# ALIAS
#
#
# --------------------------------------------------------------------------------
def alias_lookup(alias_obj_type, alias_id):
"""
Return the value for the alias replacement by looking it up in the store.
When there is no alias replacement, return None.
"""
field = mi.alias_obj_type_field(alias_obj_type)
if not field:
print sdnsh.error_msg("Error: no field for alias")
return None
try:
alias_key = mi.pk(alias_obj_type)
# use an exact search instead of a 'get_object...()' since
# a miss for an exact search can lead to a 404 error, which
# gets recorded in the error logs
alias_row = sdnsh.get_table_from_store(alias_obj_type,
alias_key,
alias_id,
"exact")
if len(alias_row) == 1:
return alias_row[0][field]
# only len(alias_row) == 0 at this point
except:
pass
return None
#
# --------------------------------------------------------------------------------
def convert_alias_to_object_key(obj_type, name_or_alias):
"""
For a specific obj_type (table/model) which may have an alias 'row',
return the alias when it exists for this name_or_alias.
"""
if obj_type in mi.alias_obj_type_xref:
if name_or_alias in sdnsh.reserved_words:
return name_or_alias
for alias in mi.alias_obj_type_xref[obj_type]:
alias_value = alias_lookup(alias, name_or_alias)
if alias_value:
return alias_value
return name_or_alias
#
# --------------------------------------------------------------------------------
def alias_choices_for_alias_obj_type(entries, obj_type, text):
"""
Used to return all choices of entries for an alias. Remove any original
items which appear in the entries list passed in preventing duplication
of entries
Also see cp_alias_choices(), which is similar, but includes
the current mode.
"""
if obj_type in mi.alias_obj_type_xref:
for alias in mi.alias_obj_type_xref[obj_type]:
try:
key = mi.pk(alias)
alias_dict = create_obj_type_dict(alias, key, key, text)
#
# remove the alias name if the dpid is in the
# list of entries... In all cases the alias is added,
# especially since the alias_dict may only contain selected
# entries from the 'text' query, and entries may already
# exclude those items.
alias_field = mi.alias_obj_type_field(alias)
if not alias_field:
continue
for item in alias_dict:
if alias_dict[item][0][alias_field] in entries:
entries.remove(alias_dict[item][0][alias_field])
entries.append(item)
except Exception, e:
pass
return entries
#
# --------------------------------------------------------------------------------
def alias_lookup_with_foreign_key(alias_obj_type, foreign_key):
"""
Find the alias name for some alias based on the foreign key's
value it's associaed with.
"""
foreign_field = mi.alias_obj_type_field(alias_obj_type)
try:
rows = sdnsh.get_table_from_store(alias_obj_type,
foreign_field,
foreign_key,
"exact")
except Exception, e:
errors = sdnsh.rest_error_to_dict(e)
print sdnsh.rest_error_dict_to_message(errors)
rows = []
if len(rows) == 1:
return rows[0][mi.pk(alias_obj_type)]
return None
#
# Interface between the cli and table output requires dictionaries
# which map between low item type values (for example, dpid's) and
# alias names for the items (for example, switch aliases), to be
# updated before display. If cassandra could provide some version
# number (or hash of the complete table), the lookup could be avoided
# by valiating that the current result is up-to-date.
#
#
# --------------------------------------------------------------------------------
def update_show_alias(obj_type):
"""
Update alias associations for the pretty printer, used for the
'show' of tables
"""
if obj_type in mi.alias_obj_type_xref:
for alias in mi.alias_obj_type_xref[obj_type]:
field = mi.alias_obj_type_field(alias)
if not field:
print sdnsh.error_msg("update show alias alias_obj_type_field")
return
try:
table = sdnsh.get_table_from_store(alias)
except Exception, e:
table = []
new_dict = {}
key = mi.pk(alias)
# (foreign_obj, foreign_field) = \
# mi.foreign_key_references(alias, field)
for row in table:
new_dict[row[field]] = row[key]
fmtcnv.update_alias_dict(obj_type, new_dict)
return
#
# --------------------------------------------------------------------------------
def update_switch_alias_cache():
"""
Update the cliModeInfo prettyprinting switch table
"""
return update_show_alias('switch-config')
#
# --------------------------------------------------------------------------------
def update_switch_port_name_cache():
"""
Update the cliModeInfo prettyprinting portNames table
"""
# return update_show_alias('port')
errors = None
switch_port_to_name_dict = {}
try:
ports = rest_to_model.get_model_from_url('interfaces', {})
except Exception, e:
errors = sdnsh.rest_error_to_dict(e)
if errors:
print sdnsh.rest_error_dict_to_message(errors)
return
for port in ports:
key_string = port['switch'] + "." + "%d" % port['portNumber']
switch_port_to_name_dict[key_string] = port['portName']
fmtcnv.update_alias_dict("portNames", switch_port_to_name_dict)
#
# --------------------------------------------------------------------------------
def update_host_alias_cache():
"""
Update the cliModeInfo prettyprinting host table
"""
return update_show_alias('host-config')
#
# --------------------------------------------------------------------------------
# update_flow_cookie_hash
def update_flow_cookie_hash():
"""
The formatter keeps a map for static flow entries.
"""
# iterate through all the static flows and get their hashes once
flow_map = {}
prime = 211
for sf in sdnsh.get_table_from_store("flow-entry"):
flow_hash = 2311
for i in range(0, len(sf['name'])):
flow_hash = flow_hash * prime + ord(sf['name'][i])
flow_hash = flow_hash & ( (1 << 20) - 1)
flow_map[flow_hash] = sf['name']
fmtcnv.update_alias_dict("staticflow", flow_map)
fmtcnv.callout_flow_encoders(sdnsh)
#
# --------------------------------------------------------------------------------
#
def update_controller_node_alias_cache():
return update_show_alias('controller-node')
#
# --------------------------------------------------------------------------------
#
def obj_type_show_alias_update(obj_type):
"""
When some item is about to be displayed, particular 'alias'
items for the display may require updating. instead of just
updating everything all the time, peek at the different formatting
functions and use those function names to determine what needs to
be updated.
Also see formatter_to_update in climodelinfo, since it may
need to include new formatting functions.
"""
update = {}
sdnsh.pp.format_to_alias_update(obj_type, update)
# select objects from 'update' dict
if 'host' in update:
update_host_alias_cache()
if 'switch' in update:
update_switch_alias_cache()
if 'port' in update:
update_switch_port_name_cache()
if 'flow' in update:
update_flow_cookie_hash()
if 'controller-node' in update:
update_controller_node_alias_cache()
#
# OBJECTs middleware.
#
#
# --------------------------------------------------------------------------------
def objects_starting_with(obj_type, text = "", key = None):
"""
The function returns a list of matching keys from table/model
identified by the 'obj_type' parameter
If the table/model has a 'alias' field, then this field's
values are also examined for matches
The first argument is the name of a table/model in the store,
while the second argument is a prefix to filter the results.
The filter is applied to the key of the table/model, which
was previously populated.
"""
if key:
if not mi.obj_type_has_field(obj_type, key):
sdnsh.warning("objects_starting_with: %s doesn't have field %s" %
(obj_type, key))
else:
key = mi.pk(obj_type)
if key == None:
sdnsh.warning("objects_starting_with: %s doesn't have pk" %
(obj_type))
key_entries = []
# Next, find the object
# Deal with any changes to the lookup name based on the 'contenation'
# of the config mode name to the named identifer.
#
case = mi.get_obj_type_field_case_sensitive(obj_type, key)
id_value = utif.convert_case(case, text)
if mi.obj_type_has_model(obj_type):
# from the database
try:
entries = sdnsh.get_table_from_store(obj_type, key, id_value)
errors = None
except Exception, e:
errors = sdnsh.rest_error_to_dict(e)
if errors:
print sdnsh.rest_error_dict_to_message(errors)
return key_entries
else:
if id_value == '':
entries = rest_to_model.get_model_from_url(obj_type, {})
else:
entries = rest_to_model.get_model_from_url(obj_type, { key + "__startswith" : id_value })
if key and entries:
# Expand any key values which are lists (hosts, for example)
items = [x[key] for x in entries if x.get(key)]
entries = []
for item in items:
if type(item) == list:
entries += item
else:
entries.append(item)
key_entries = [sdnsh.quote_item(obj_type, x)
for x in entries if x.startswith(id_value)]
#
# for some specific tables which have id's concatenated from multiple other
# components, only part of the id is available for completion.
#
if mi.is_compound_key(obj_type, key):
separator_character = mi.compound_key_separator(obj_type, key)
keyDict = {}
for key in key_entries:
# keyDict[key.split(separator_character)[0]] = ''
keyDict[key] = ''
key_entries = keyDict.keys()
alias_obj_type = obj_type
if key != mi.pk(alias_obj_type):
# if this is a forgeign key, use the obj_type of the fk.
if mi.is_foreign_key(alias_obj_type, key):
(alias_obj_type, fk_name) = mi.foreign_key_references(alias_obj_type, key)
else:
# XXX possibly other choices to determine alias_obj_type?
alias_obj_type = None
if alias_obj_type:
obj_type_config = mi.obj_type_related_config_obj_type(alias_obj_type)
# alias_choices_for_alias_obj_type() removes switch dpid's which
# have associated alias names,
key_entries = alias_choices_for_alias_obj_type(key_entries,
obj_type_config,
text)
return key_entries
#
# --------------------------------------------------------------------------------
def local_interfaces_firewall_open(protos, ports, controller_id = None):
"""
Return a list of interfaces, which have the proto and port currently enabled
@param proto a string, or list of strings, identifying the protocol
@param port a strings, or list of strings or ints
"""
# first collect all associated rules
if type(protos) != list:
protos = [protos]
if type(ports) != list:
ports = [ports]
rules = []
for proto in protos:
for port in ports:
query_dict = { 'proto' : proto, 'port' : port }
rules += sdnsh.rest_query_objects('firewall-rule', query_dict)
# create a dictionary indexed by the interface, which is part of the pk 'id'
rules_of_interface = dict([[x['interface'], x] for x in rules])
if controller_id == None:
# request 'this' controller
controller_url = "http://%s/rest/v1/system/controller" % sdnsh.controller
result = sdnsh.store.rest_simple_request(controller_url)
sdnsh.check_rest_result(result)
controller_id = json.loads(result)
if controller_id != 'all':
query_dict = { 'controller' : controller_id['id'] }
ifs = sdnsh.rest_query_objects('controller-interface', query_dict)
return [ifn for ifn in ifs if ifn['id'] in rules_of_interface]
#
# --------------------------------------------------------------------------------
def log_url(ip_and_port = None, log = None):
"""
Returns the url of the log's on the named ip_and_port.
"""
log_path = 'http://%s/rest/v1/system/log' % ip_and_port
if log:
log_path += '/' + log
return log_path
#
# --------------------------------------------------------------------------------
def controller_ip_and_port(controller):
"""
Return a list of ip:port values for named controllers,
to use to build urls for REST API's. If a controller of 'all'
is passed in, then all the controllers ar enumerated.
If both port 80, and 8000 are open, then two ip:port
pairs will be returned for a controller. This returns
ALL values which match, not a single ip:port for each
controller.
"""
url = 'http://%s/rest/v1/system/controller' % sdnsh.controller
rest_dict = sdnsh.rest_simple_request_to_dict(url)
this_controller = rest_dict['id']
ips_80 = [x for x in local_interfaces_firewall_open('tcp', 80,
controller)
if (x['ip'] != '' or x['discovered-ip'] != '')]
ips_8000 = [x for x in local_interfaces_firewall_open('tcp', 8000,
controller)
if (x['ip'] != '' or x['discovered-ip'] != '')]
return ['%s:80' % '127.0.0.1' if x['controller'] == this_controller else
x['discovered-ip'] if x['discovered-ip'] != '' else x['ip']
for x in ips_80] + ['%s:8000' %
'127.0.0.1' if x['controller'] == this_controller else
x['discovered-ip'] if x['discovered-ip'] != '' else x['ip']
for x in ips_8000]
| epl-1.0 |
doug-fish/horizon | openstack_dashboard/test/api_tests/cinder_tests.py | 21 | 8000 | # Copyright 2012 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.test.utils import override_settings
import six
import cinderclient as cinder_client
from openstack_dashboard import api
from openstack_dashboard.test import helpers as test
class CinderApiTests(test.APITestCase):
def test_volume_list(self):
search_opts = {'all_tenants': 1}
detailed = True
volumes = self.cinder_volumes.list()
volume_transfers = self.cinder_volume_transfers.list()
cinderclient = self.stub_cinderclient()
cinderclient.volumes = self.mox.CreateMockAnything()
cinderclient.volumes.list(search_opts=search_opts,).AndReturn(volumes)
cinderclient.transfers = self.mox.CreateMockAnything()
cinderclient.transfers.list(
detailed=detailed,
search_opts=search_opts,).AndReturn(volume_transfers)
self.mox.ReplayAll()
# No assertions are necessary. Verification is handled by mox.
api.cinder.volume_list(self.request, search_opts=search_opts)
def test_volume_snapshot_list(self):
search_opts = {'all_tenants': 1}
volume_snapshots = self.cinder_volume_snapshots.list()
cinderclient = self.stub_cinderclient()
cinderclient.volume_snapshots = self.mox.CreateMockAnything()
cinderclient.volume_snapshots.list(search_opts=search_opts).\
AndReturn(volume_snapshots)
self.mox.ReplayAll()
api.cinder.volume_snapshot_list(self.request, search_opts=search_opts)
def test_volume_snapshot_list_no_volume_configured(self):
# remove volume from service catalog
catalog = self.service_catalog
for service in catalog:
if service["type"] == "volume":
self.service_catalog.remove(service)
search_opts = {'all_tenants': 1}
volume_snapshots = self.cinder_volume_snapshots.list()
cinderclient = self.stub_cinderclient()
cinderclient.volume_snapshots = self.mox.CreateMockAnything()
cinderclient.volume_snapshots.list(search_opts=search_opts).\
AndReturn(volume_snapshots)
self.mox.ReplayAll()
api.cinder.volume_snapshot_list(self.request, search_opts=search_opts)
def test_volume_type_list_with_qos_associations(self):
volume_types = self.cinder_volume_types.list()
# Due to test data limitations, we can only run this test using
# one qos spec, which is associated with one volume type.
# If we use multiple qos specs, the test data will always
# return the same associated volume type, which is invalid
# and prevented by the UI.
qos_specs_full = self.cinder_qos_specs.list()
qos_specs_only_one = [qos_specs_full[0]]
associations = self.cinder_qos_spec_associations.list()
cinderclient = self.stub_cinderclient()
cinderclient.volume_types = self.mox.CreateMockAnything()
cinderclient.volume_types.list().AndReturn(volume_types)
cinderclient.qos_specs = self.mox.CreateMockAnything()
cinderclient.qos_specs.list().AndReturn(qos_specs_only_one)
cinderclient.qos_specs.get_associations = self.mox.CreateMockAnything()
cinderclient.qos_specs.get_associations(qos_specs_only_one[0].id).\
AndReturn(associations)
self.mox.ReplayAll()
assoc_vol_types = \
api.cinder.volume_type_list_with_qos_associations(self.request)
associate_spec = assoc_vol_types[0].associated_qos_spec
self.assertTrue(associate_spec, qos_specs_only_one[0].name)
def test_absolute_limits_with_negative_values(self):
values = {"maxTotalVolumes": -1, "totalVolumesUsed": -1}
expected_results = {"maxTotalVolumes": float("inf"),
"totalVolumesUsed": 0}
limits = self.mox.CreateMockAnything()
limits.absolute = []
for key, val in six.iteritems(values):
limit = self.mox.CreateMockAnything()
limit.name = key
limit.value = val
limits.absolute.append(limit)
cinderclient = self.stub_cinderclient()
cinderclient.limits = self.mox.CreateMockAnything()
cinderclient.limits.get().AndReturn(limits)
self.mox.ReplayAll()
ret_val = api.cinder.tenant_absolute_limits(self.request)
for key in expected_results.keys():
self.assertEqual(expected_results[key], ret_val[key])
def test_pool_list(self):
pools = self.cinder_pools.list()
cinderclient = self.stub_cinderclient()
cinderclient.pools = self.mox.CreateMockAnything()
cinderclient.pools.list(detailed=True).AndReturn(pools)
self.mox.ReplayAll()
# No assertions are necessary. Verification is handled by mox.
api.cinder.pool_list(self.request, detailed=True)
class CinderApiVersionTests(test.TestCase):
def setUp(self):
super(CinderApiVersionTests, self).setUp()
# The version is set when the module is loaded. Reset the
# active version each time so that we can test with different
# versions.
api.cinder.VERSIONS._active = None
def test_default_client_is_v2(self):
client = api.cinder.cinderclient(self.request)
self.assertIsInstance(client, cinder_client.v2.client.Client)
@override_settings(OPENSTACK_API_VERSIONS={'volume': 2})
def test_v2_setting_returns_v2_client(self):
client = api.cinder.cinderclient(self.request)
self.assertIsInstance(client, cinder_client.v2.client.Client)
def test_get_v2_volume_attributes(self):
# Get a v2 volume
volume = self.cinder_volumes.get(name="v2_volume")
self.assertTrue(hasattr(volume._apiresource, 'name'))
self.assertFalse(hasattr(volume._apiresource, 'display_name'))
name = "A v2 test volume name"
description = "A v2 volume description"
setattr(volume._apiresource, 'name', name)
setattr(volume._apiresource, 'description', description)
self.assertEqual(name, volume.name)
self.assertEqual(description, volume.description)
def test_get_v2_snapshot_attributes(self):
# Get a v2 snapshot
snapshot = self.cinder_volume_snapshots.get(
description="v2 volume snapshot description")
self.assertFalse(hasattr(snapshot._apiresource, 'display_name'))
name = "A v2 test snapshot name"
description = "A v2 snapshot description"
setattr(snapshot._apiresource, 'name', name)
setattr(snapshot._apiresource, 'description', description)
self.assertEqual(name, snapshot.name)
self.assertEqual(description, snapshot.description)
def test_get_id_for_nameless_volume(self):
volume = self.cinder_volumes.first()
setattr(volume._apiresource, 'display_name', "")
self.assertEqual(volume.id, volume.name)
def test_adapt_dictionary_to_v2(self):
volume = self.cinder_volumes.first()
data = {'name': volume.name,
'description': volume.description,
'size': volume.size}
ret_data = api.cinder._replace_v2_parameters(data)
self.assertIn('name', ret_data.keys())
self.assertIn('description', ret_data.keys())
self.assertNotIn('display_name', ret_data.keys())
self.assertNotIn('display_description', ret_data.keys())
| apache-2.0 |
chrishokamp/fuel | fuel/converters/caltech101_silhouettes.py | 12 | 2497 | import os
import h5py
from scipy.io import loadmat
from fuel.converters.base import fill_hdf5_file, MissingInputFiles
def convert_silhouettes(size, directory, output_directory,
output_file=None):
""" Convert the CalTech 101 Silhouettes Datasets.
Parameters
----------
size : {16, 28}
Convert either the 16x16 or 28x28 sized version of the dataset.
directory : str
Directory in which the required input files reside.
output_file : str
Where to save the converted dataset.
"""
if size not in (16, 28):
raise ValueError('size must be 16 or 28')
if output_file is None:
output_file = 'caltech101_silhouettes{}.hdf5'.format(size)
output_file = os.path.join(output_directory, output_file)
input_file = 'caltech101_silhouettes_{}_split1.mat'.format(size)
input_file = os.path.join(directory, input_file)
if not os.path.isfile(input_file):
raise MissingInputFiles('Required files missing', [input_file])
with h5py.File(output_file, mode="w") as h5file:
mat = loadmat(input_file)
train_features = mat['train_data'].reshape([-1, 1, size, size])
train_targets = mat['train_labels']
valid_features = mat['val_data'].reshape([-1, 1, size, size])
valid_targets = mat['val_labels']
test_features = mat['test_data'].reshape([-1, 1, size, size])
test_targets = mat['test_labels']
data = (
('train', 'features', train_features),
('train', 'targets', train_targets),
('valid', 'features', valid_features),
('valid', 'targets', valid_targets),
('test', 'features', test_features),
('test', 'targets', test_targets),
)
fill_hdf5_file(h5file, data)
for i, label in enumerate(('batch', 'channel', 'height', 'width')):
h5file['features'].dims[i].label = label
for i, label in enumerate(('batch', 'index')):
h5file['targets'].dims[i].label = label
return (output_file,)
def fill_subparser(subparser):
"""Sets up a subparser to convert CalTech101 Silhouettes Database files.
Parameters
----------
subparser : :class:`argparse.ArgumentParser`
Subparser handling the `caltech101_silhouettes` command.
"""
subparser.add_argument(
"size", type=int, choices=(16, 28),
help="height/width of the datapoints")
return convert_silhouettes
| mit |
norangmangto/pypi-default | setup.py | 1 | 4103 | """A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
import re
import ast
_version_re = re.compile(r'__version__\s+=\s+(.*)')
with open('default/__init__.py', 'rb') as f:
version = str(ast.literal_eval(_version_re.search(
f.read().decode('utf-8')).group(1)))
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='default',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version=version,
description='Merge JSON data with default JSON data',
long_description=long_description,
# The project's main homepage.
url='https://github.com/norangmangto/pypi-default',
# Author details
author='Beomsoo Jang',
author_email='[email protected]',
# Choose your license
license='GPLv3',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
# What does your project relate to?
keywords='default development',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['contrib', 'docs', 'tests']),
# Alternatively, if you want to distribute just a my_module.py, uncomment
# this:
# py_modules=["my_module"],
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=[],
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
extras_require={
'dev': [],
'test': [],
},
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
package_data={},
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
data_files=[],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={},
)
| gpl-3.0 |
jabesq/home-assistant | homeassistant/components/caldav/calendar.py | 1 | 9850 | """Support for WebDav Calendar."""
import copy
from datetime import datetime, timedelta
import logging
import re
import voluptuous as vol
from homeassistant.components.calendar import (
ENTITY_ID_FORMAT, PLATFORM_SCHEMA, CalendarEventDevice, calculate_offset,
get_date, is_offset_reached)
from homeassistant.const import (
CONF_NAME, CONF_PASSWORD, CONF_URL, CONF_USERNAME, CONF_VERIFY_SSL)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import generate_entity_id
from homeassistant.util import Throttle, dt
_LOGGER = logging.getLogger(__name__)
CONF_CALENDARS = 'calendars'
CONF_CUSTOM_CALENDARS = 'custom_calendars'
CONF_CALENDAR = 'calendar'
CONF_SEARCH = 'search'
OFFSET = '!!'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
# pylint: disable=no-value-for-parameter
vol.Required(CONF_URL): vol.Url(),
vol.Optional(CONF_CALENDARS, default=[]):
vol.All(cv.ensure_list, [cv.string]),
vol.Inclusive(CONF_USERNAME, 'authentication'): cv.string,
vol.Inclusive(CONF_PASSWORD, 'authentication'): cv.string,
vol.Optional(CONF_CUSTOM_CALENDARS, default=[]):
vol.All(cv.ensure_list, [
vol.Schema({
vol.Required(CONF_CALENDAR): cv.string,
vol.Required(CONF_NAME): cv.string,
vol.Required(CONF_SEARCH): cv.string,
})
]),
vol.Optional(CONF_VERIFY_SSL, default=True): cv.boolean
})
MIN_TIME_BETWEEN_UPDATES = timedelta(minutes=15)
def setup_platform(hass, config, add_entities, disc_info=None):
"""Set up the WebDav Calendar platform."""
import caldav
url = config[CONF_URL]
username = config.get(CONF_USERNAME)
password = config.get(CONF_PASSWORD)
client = caldav.DAVClient(
url, None, username, password, ssl_verify_cert=config[CONF_VERIFY_SSL])
calendars = client.principal().calendars()
calendar_devices = []
for calendar in list(calendars):
# If a calendar name was given in the configuration,
# ignore all the others
if (config[CONF_CALENDARS]
and calendar.name not in config[CONF_CALENDARS]):
_LOGGER.debug("Ignoring calendar '%s'", calendar.name)
continue
# Create additional calendars based on custom filtering rules
for cust_calendar in config[CONF_CUSTOM_CALENDARS]:
# Check that the base calendar matches
if cust_calendar[CONF_CALENDAR] != calendar.name:
continue
name = cust_calendar[CONF_NAME]
device_id = "{} {}".format(
cust_calendar[CONF_CALENDAR], cust_calendar[CONF_NAME])
entity_id = generate_entity_id(
ENTITY_ID_FORMAT, device_id, hass=hass)
calendar_devices.append(
WebDavCalendarEventDevice(
name, calendar, entity_id, True,
cust_calendar[CONF_SEARCH]))
# Create a default calendar if there was no custom one
if not config[CONF_CUSTOM_CALENDARS]:
name = calendar.name
device_id = calendar.name
entity_id = generate_entity_id(
ENTITY_ID_FORMAT, device_id, hass=hass)
calendar_devices.append(
WebDavCalendarEventDevice(name, calendar, entity_id)
)
add_entities(calendar_devices, True)
class WebDavCalendarEventDevice(CalendarEventDevice):
"""A device for getting the next Task from a WebDav Calendar."""
def __init__(self, name, calendar, entity_id, all_day=False, search=None):
"""Create the WebDav Calendar Event Device."""
self.data = WebDavCalendarData(calendar, all_day, search)
self.entity_id = entity_id
self._event = None
self._name = name
self._offset_reached = False
@property
def device_state_attributes(self):
"""Return the device state attributes."""
return {
'offset_reached': self._offset_reached,
}
@property
def event(self):
"""Return the next upcoming event."""
return self._event
@property
def name(self):
"""Return the name of the entity."""
return self._name
async def async_get_events(self, hass, start_date, end_date):
"""Get all events in a specific time frame."""
return await self.data.async_get_events(hass, start_date, end_date)
def update(self):
"""Update event data."""
self.data.update()
event = copy.deepcopy(self.data.event)
if event is None:
self._event = event
return
event = calculate_offset(event, OFFSET)
self._offset_reached = is_offset_reached(event)
self._event = event
class WebDavCalendarData:
"""Class to utilize the calendar dav client object to get next event."""
def __init__(self, calendar, include_all_day, search):
"""Set up how we are going to search the WebDav calendar."""
self.calendar = calendar
self.include_all_day = include_all_day
self.search = search
self.event = None
async def async_get_events(self, hass, start_date, end_date):
"""Get all events in a specific time frame."""
# Get event list from the current calendar
vevent_list = await hass.async_add_job(self.calendar.date_search,
start_date, end_date)
event_list = []
for event in vevent_list:
vevent = event.instance.vevent
uid = None
if hasattr(vevent, 'uid'):
uid = vevent.uid.value
data = {
"uid": uid,
"title": vevent.summary.value,
"start": self.get_hass_date(vevent.dtstart.value),
"end": self.get_hass_date(self.get_end_date(vevent)),
"location": self.get_attr_value(vevent, "location"),
"description": self.get_attr_value(vevent, "description"),
}
data['start'] = get_date(data['start']).isoformat()
data['end'] = get_date(data['end']).isoformat()
event_list.append(data)
return event_list
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Get the latest data."""
# We have to retrieve the results for the whole day as the server
# won't return events that have already started
results = self.calendar.date_search(
dt.start_of_local_day(),
dt.start_of_local_day() + timedelta(days=1)
)
# dtstart can be a date or datetime depending if the event lasts a
# whole day. Convert everything to datetime to be able to sort it
results.sort(key=lambda x: self.to_datetime(
x.instance.vevent.dtstart.value
))
vevent = next((
event.instance.vevent
for event in results
if (self.is_matching(event.instance.vevent, self.search)
and (
not self.is_all_day(event.instance.vevent)
or self.include_all_day)
and not self.is_over(event.instance.vevent))), None)
# If no matching event could be found
if vevent is None:
_LOGGER.debug(
"No matching event found in the %d results for %s",
len(results), self.calendar.name)
self.event = None
return
# Populate the entity attributes with the event values
self.event = {
"summary": vevent.summary.value,
"start": self.get_hass_date(vevent.dtstart.value),
"end": self.get_hass_date(self.get_end_date(vevent)),
"location": self.get_attr_value(vevent, "location"),
"description": self.get_attr_value(vevent, "description")
}
@staticmethod
def is_matching(vevent, search):
"""Return if the event matches the filter criteria."""
if search is None:
return True
pattern = re.compile(search)
return (
hasattr(vevent, "summary")
and pattern.match(vevent.summary.value)
or hasattr(vevent, "location")
and pattern.match(vevent.location.value)
or hasattr(vevent, "description")
and pattern.match(vevent.description.value))
@staticmethod
def is_all_day(vevent):
"""Return if the event last the whole day."""
return not isinstance(vevent.dtstart.value, datetime)
@staticmethod
def is_over(vevent):
"""Return if the event is over."""
return dt.now() >= WebDavCalendarData.to_datetime(
WebDavCalendarData.get_end_date(vevent)
)
@staticmethod
def get_hass_date(obj):
"""Return if the event matches."""
if isinstance(obj, datetime):
return {"dateTime": obj.isoformat()}
return {"date": obj.isoformat()}
@staticmethod
def to_datetime(obj):
"""Return a datetime."""
if isinstance(obj, datetime):
return obj
return dt.as_local(dt.dt.datetime.combine(obj, dt.dt.time.min))
@staticmethod
def get_attr_value(obj, attribute):
"""Return the value of the attribute if defined."""
if hasattr(obj, attribute):
return getattr(obj, attribute).value
return None
@staticmethod
def get_end_date(obj):
"""Return the end datetime as determined by dtend or duration."""
if hasattr(obj, "dtend"):
enddate = obj.dtend.value
elif hasattr(obj, "duration"):
enddate = obj.dtstart.value + obj.duration.value
else:
enddate = obj.dtstart.value + timedelta(days=1)
return enddate
| apache-2.0 |
charukiewicz/beer-manager | venv/lib/python3.4/site-packages/jinja2/testsuite/filters.py | 394 | 19169 | # -*- coding: utf-8 -*-
"""
jinja2.testsuite.filters
~~~~~~~~~~~~~~~~~~~~~~~~
Tests for the jinja filters.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import unittest
from jinja2.testsuite import JinjaTestCase
from jinja2 import Markup, Environment
from jinja2._compat import text_type, implements_to_string
env = Environment()
class FilterTestCase(JinjaTestCase):
def test_filter_calling(self):
rv = env.call_filter('sum', [1, 2, 3])
self.assert_equal(rv, 6)
def test_capitalize(self):
tmpl = env.from_string('{{ "foo bar"|capitalize }}')
assert tmpl.render() == 'Foo bar'
def test_center(self):
tmpl = env.from_string('{{ "foo"|center(9) }}')
assert tmpl.render() == ' foo '
def test_default(self):
tmpl = env.from_string(
"{{ missing|default('no') }}|{{ false|default('no') }}|"
"{{ false|default('no', true) }}|{{ given|default('no') }}"
)
assert tmpl.render(given='yes') == 'no|False|no|yes'
def test_dictsort(self):
tmpl = env.from_string(
'{{ foo|dictsort }}|'
'{{ foo|dictsort(true) }}|'
'{{ foo|dictsort(false, "value") }}'
)
out = tmpl.render(foo={"aa": 0, "b": 1, "c": 2, "AB": 3})
assert out == ("[('aa', 0), ('AB', 3), ('b', 1), ('c', 2)]|"
"[('AB', 3), ('aa', 0), ('b', 1), ('c', 2)]|"
"[('aa', 0), ('b', 1), ('c', 2), ('AB', 3)]")
def test_batch(self):
tmpl = env.from_string("{{ foo|batch(3)|list }}|"
"{{ foo|batch(3, 'X')|list }}")
out = tmpl.render(foo=list(range(10)))
assert out == ("[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]]|"
"[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 'X', 'X']]")
def test_slice(self):
tmpl = env.from_string('{{ foo|slice(3)|list }}|'
'{{ foo|slice(3, "X")|list }}')
out = tmpl.render(foo=list(range(10)))
assert out == ("[[0, 1, 2, 3], [4, 5, 6], [7, 8, 9]]|"
"[[0, 1, 2, 3], [4, 5, 6, 'X'], [7, 8, 9, 'X']]")
def test_escape(self):
tmpl = env.from_string('''{{ '<">&'|escape }}''')
out = tmpl.render()
assert out == '<">&'
def test_striptags(self):
tmpl = env.from_string('''{{ foo|striptags }}''')
out = tmpl.render(foo=' <p>just a small \n <a href="#">'
'example</a> link</p>\n<p>to a webpage</p> '
'<!-- <p>and some commented stuff</p> -->')
assert out == 'just a small example link to a webpage'
def test_filesizeformat(self):
tmpl = env.from_string(
'{{ 100|filesizeformat }}|'
'{{ 1000|filesizeformat }}|'
'{{ 1000000|filesizeformat }}|'
'{{ 1000000000|filesizeformat }}|'
'{{ 1000000000000|filesizeformat }}|'
'{{ 100|filesizeformat(true) }}|'
'{{ 1000|filesizeformat(true) }}|'
'{{ 1000000|filesizeformat(true) }}|'
'{{ 1000000000|filesizeformat(true) }}|'
'{{ 1000000000000|filesizeformat(true) }}'
)
out = tmpl.render()
self.assert_equal(out, (
'100 Bytes|1.0 kB|1.0 MB|1.0 GB|1.0 TB|100 Bytes|'
'1000 Bytes|976.6 KiB|953.7 MiB|931.3 GiB'
))
def test_filesizeformat_issue59(self):
tmpl = env.from_string(
'{{ 300|filesizeformat }}|'
'{{ 3000|filesizeformat }}|'
'{{ 3000000|filesizeformat }}|'
'{{ 3000000000|filesizeformat }}|'
'{{ 3000000000000|filesizeformat }}|'
'{{ 300|filesizeformat(true) }}|'
'{{ 3000|filesizeformat(true) }}|'
'{{ 3000000|filesizeformat(true) }}'
)
out = tmpl.render()
self.assert_equal(out, (
'300 Bytes|3.0 kB|3.0 MB|3.0 GB|3.0 TB|300 Bytes|'
'2.9 KiB|2.9 MiB'
))
def test_first(self):
tmpl = env.from_string('{{ foo|first }}')
out = tmpl.render(foo=list(range(10)))
assert out == '0'
def test_float(self):
tmpl = env.from_string('{{ "42"|float }}|'
'{{ "ajsghasjgd"|float }}|'
'{{ "32.32"|float }}')
out = tmpl.render()
assert out == '42.0|0.0|32.32'
def test_format(self):
tmpl = env.from_string('''{{ "%s|%s"|format("a", "b") }}''')
out = tmpl.render()
assert out == 'a|b'
def test_indent(self):
tmpl = env.from_string('{{ foo|indent(2) }}|{{ foo|indent(2, true) }}')
text = '\n'.join([' '.join(['foo', 'bar'] * 2)] * 2)
out = tmpl.render(foo=text)
assert out == ('foo bar foo bar\n foo bar foo bar| '
'foo bar foo bar\n foo bar foo bar')
def test_int(self):
tmpl = env.from_string('{{ "42"|int }}|{{ "ajsghasjgd"|int }}|'
'{{ "32.32"|int }}')
out = tmpl.render()
assert out == '42|0|32'
def test_join(self):
tmpl = env.from_string('{{ [1, 2, 3]|join("|") }}')
out = tmpl.render()
assert out == '1|2|3'
env2 = Environment(autoescape=True)
tmpl = env2.from_string('{{ ["<foo>", "<span>foo</span>"|safe]|join }}')
assert tmpl.render() == '<foo><span>foo</span>'
def test_join_attribute(self):
class User(object):
def __init__(self, username):
self.username = username
tmpl = env.from_string('''{{ users|join(', ', 'username') }}''')
assert tmpl.render(users=map(User, ['foo', 'bar'])) == 'foo, bar'
def test_last(self):
tmpl = env.from_string('''{{ foo|last }}''')
out = tmpl.render(foo=list(range(10)))
assert out == '9'
def test_length(self):
tmpl = env.from_string('''{{ "hello world"|length }}''')
out = tmpl.render()
assert out == '11'
def test_lower(self):
tmpl = env.from_string('''{{ "FOO"|lower }}''')
out = tmpl.render()
assert out == 'foo'
def test_pprint(self):
from pprint import pformat
tmpl = env.from_string('''{{ data|pprint }}''')
data = list(range(1000))
assert tmpl.render(data=data) == pformat(data)
def test_random(self):
tmpl = env.from_string('''{{ seq|random }}''')
seq = list(range(100))
for _ in range(10):
assert int(tmpl.render(seq=seq)) in seq
def test_reverse(self):
tmpl = env.from_string('{{ "foobar"|reverse|join }}|'
'{{ [1, 2, 3]|reverse|list }}')
assert tmpl.render() == 'raboof|[3, 2, 1]'
def test_string(self):
x = [1, 2, 3, 4, 5]
tmpl = env.from_string('''{{ obj|string }}''')
assert tmpl.render(obj=x) == text_type(x)
def test_title(self):
tmpl = env.from_string('''{{ "foo bar"|title }}''')
assert tmpl.render() == "Foo Bar"
tmpl = env.from_string('''{{ "foo's bar"|title }}''')
assert tmpl.render() == "Foo's Bar"
tmpl = env.from_string('''{{ "foo bar"|title }}''')
assert tmpl.render() == "Foo Bar"
tmpl = env.from_string('''{{ "f bar f"|title }}''')
assert tmpl.render() == "F Bar F"
tmpl = env.from_string('''{{ "foo-bar"|title }}''')
assert tmpl.render() == "Foo-Bar"
tmpl = env.from_string('''{{ "foo\tbar"|title }}''')
assert tmpl.render() == "Foo\tBar"
tmpl = env.from_string('''{{ "FOO\tBAR"|title }}''')
assert tmpl.render() == "Foo\tBar"
def test_truncate(self):
tmpl = env.from_string(
'{{ data|truncate(15, true, ">>>") }}|'
'{{ data|truncate(15, false, ">>>") }}|'
'{{ smalldata|truncate(15) }}'
)
out = tmpl.render(data='foobar baz bar' * 1000,
smalldata='foobar baz bar')
assert out == 'foobar baz barf>>>|foobar baz >>>|foobar baz bar'
def test_upper(self):
tmpl = env.from_string('{{ "foo"|upper }}')
assert tmpl.render() == 'FOO'
def test_urlize(self):
tmpl = env.from_string('{{ "foo http://www.example.com/ bar"|urlize }}')
assert tmpl.render() == 'foo <a href="http://www.example.com/">'\
'http://www.example.com/</a> bar'
def test_wordcount(self):
tmpl = env.from_string('{{ "foo bar baz"|wordcount }}')
assert tmpl.render() == '3'
def test_block(self):
tmpl = env.from_string('{% filter lower|escape %}<HEHE>{% endfilter %}')
assert tmpl.render() == '<hehe>'
def test_chaining(self):
tmpl = env.from_string('''{{ ['<foo>', '<bar>']|first|upper|escape }}''')
assert tmpl.render() == '<FOO>'
def test_sum(self):
tmpl = env.from_string('''{{ [1, 2, 3, 4, 5, 6]|sum }}''')
assert tmpl.render() == '21'
def test_sum_attributes(self):
tmpl = env.from_string('''{{ values|sum('value') }}''')
assert tmpl.render(values=[
{'value': 23},
{'value': 1},
{'value': 18},
]) == '42'
def test_sum_attributes_nested(self):
tmpl = env.from_string('''{{ values|sum('real.value') }}''')
assert tmpl.render(values=[
{'real': {'value': 23}},
{'real': {'value': 1}},
{'real': {'value': 18}},
]) == '42'
def test_sum_attributes_tuple(self):
tmpl = env.from_string('''{{ values.items()|sum('1') }}''')
assert tmpl.render(values={
'foo': 23,
'bar': 1,
'baz': 18,
}) == '42'
def test_abs(self):
tmpl = env.from_string('''{{ -1|abs }}|{{ 1|abs }}''')
assert tmpl.render() == '1|1', tmpl.render()
def test_round_positive(self):
tmpl = env.from_string('{{ 2.7|round }}|{{ 2.1|round }}|'
"{{ 2.1234|round(3, 'floor') }}|"
"{{ 2.1|round(0, 'ceil') }}")
assert tmpl.render() == '3.0|2.0|2.123|3.0', tmpl.render()
def test_round_negative(self):
tmpl = env.from_string('{{ 21.3|round(-1)}}|'
"{{ 21.3|round(-1, 'ceil')}}|"
"{{ 21.3|round(-1, 'floor')}}")
assert tmpl.render() == '20.0|30.0|20.0',tmpl.render()
def test_xmlattr(self):
tmpl = env.from_string("{{ {'foo': 42, 'bar': 23, 'fish': none, "
"'spam': missing, 'blub:blub': '<?>'}|xmlattr }}")
out = tmpl.render().split()
assert len(out) == 3
assert 'foo="42"' in out
assert 'bar="23"' in out
assert 'blub:blub="<?>"' in out
def test_sort1(self):
tmpl = env.from_string('{{ [2, 3, 1]|sort }}|{{ [2, 3, 1]|sort(true) }}')
assert tmpl.render() == '[1, 2, 3]|[3, 2, 1]'
def test_sort2(self):
tmpl = env.from_string('{{ "".join(["c", "A", "b", "D"]|sort) }}')
assert tmpl.render() == 'AbcD'
def test_sort3(self):
tmpl = env.from_string('''{{ ['foo', 'Bar', 'blah']|sort }}''')
assert tmpl.render() == "['Bar', 'blah', 'foo']"
def test_sort4(self):
@implements_to_string
class Magic(object):
def __init__(self, value):
self.value = value
def __str__(self):
return text_type(self.value)
tmpl = env.from_string('''{{ items|sort(attribute='value')|join }}''')
assert tmpl.render(items=map(Magic, [3, 2, 4, 1])) == '1234'
def test_groupby(self):
tmpl = env.from_string('''
{%- for grouper, list in [{'foo': 1, 'bar': 2},
{'foo': 2, 'bar': 3},
{'foo': 1, 'bar': 1},
{'foo': 3, 'bar': 4}]|groupby('foo') -%}
{{ grouper }}{% for x in list %}: {{ x.foo }}, {{ x.bar }}{% endfor %}|
{%- endfor %}''')
assert tmpl.render().split('|') == [
"1: 1, 2: 1, 1",
"2: 2, 3",
"3: 3, 4",
""
]
def test_groupby_tuple_index(self):
tmpl = env.from_string('''
{%- for grouper, list in [('a', 1), ('a', 2), ('b', 1)]|groupby(0) -%}
{{ grouper }}{% for x in list %}:{{ x.1 }}{% endfor %}|
{%- endfor %}''')
assert tmpl.render() == 'a:1:2|b:1|'
def test_groupby_multidot(self):
class Date(object):
def __init__(self, day, month, year):
self.day = day
self.month = month
self.year = year
class Article(object):
def __init__(self, title, *date):
self.date = Date(*date)
self.title = title
articles = [
Article('aha', 1, 1, 1970),
Article('interesting', 2, 1, 1970),
Article('really?', 3, 1, 1970),
Article('totally not', 1, 1, 1971)
]
tmpl = env.from_string('''
{%- for year, list in articles|groupby('date.year') -%}
{{ year }}{% for x in list %}[{{ x.title }}]{% endfor %}|
{%- endfor %}''')
assert tmpl.render(articles=articles).split('|') == [
'1970[aha][interesting][really?]',
'1971[totally not]',
''
]
def test_filtertag(self):
tmpl = env.from_string("{% filter upper|replace('FOO', 'foo') %}"
"foobar{% endfilter %}")
assert tmpl.render() == 'fooBAR'
def test_replace(self):
env = Environment()
tmpl = env.from_string('{{ string|replace("o", 42) }}')
assert tmpl.render(string='<foo>') == '<f4242>'
env = Environment(autoescape=True)
tmpl = env.from_string('{{ string|replace("o", 42) }}')
assert tmpl.render(string='<foo>') == '<f4242>'
tmpl = env.from_string('{{ string|replace("<", 42) }}')
assert tmpl.render(string='<foo>') == '42foo>'
tmpl = env.from_string('{{ string|replace("o", ">x<") }}')
assert tmpl.render(string=Markup('foo')) == 'f>x<>x<'
def test_forceescape(self):
tmpl = env.from_string('{{ x|forceescape }}')
assert tmpl.render(x=Markup('<div />')) == u'<div />'
def test_safe(self):
env = Environment(autoescape=True)
tmpl = env.from_string('{{ "<div>foo</div>"|safe }}')
assert tmpl.render() == '<div>foo</div>'
tmpl = env.from_string('{{ "<div>foo</div>" }}')
assert tmpl.render() == '<div>foo</div>'
def test_urlencode(self):
env = Environment(autoescape=True)
tmpl = env.from_string('{{ "Hello, world!"|urlencode }}')
assert tmpl.render() == 'Hello%2C%20world%21'
tmpl = env.from_string('{{ o|urlencode }}')
assert tmpl.render(o=u"Hello, world\u203d") == "Hello%2C%20world%E2%80%BD"
assert tmpl.render(o=(("f", 1),)) == "f=1"
assert tmpl.render(o=(('f', 1), ("z", 2))) == "f=1&z=2"
assert tmpl.render(o=((u"\u203d", 1),)) == "%E2%80%BD=1"
assert tmpl.render(o={u"\u203d": 1}) == "%E2%80%BD=1"
assert tmpl.render(o={0: 1}) == "0=1"
def test_simple_map(self):
env = Environment()
tmpl = env.from_string('{{ ["1", "2", "3"]|map("int")|sum }}')
self.assertEqual(tmpl.render(), '6')
def test_attribute_map(self):
class User(object):
def __init__(self, name):
self.name = name
env = Environment()
users = [
User('john'),
User('jane'),
User('mike'),
]
tmpl = env.from_string('{{ users|map(attribute="name")|join("|") }}')
self.assertEqual(tmpl.render(users=users), 'john|jane|mike')
def test_empty_map(self):
env = Environment()
tmpl = env.from_string('{{ none|map("upper")|list }}')
self.assertEqual(tmpl.render(), '[]')
def test_simple_select(self):
env = Environment()
tmpl = env.from_string('{{ [1, 2, 3, 4, 5]|select("odd")|join("|") }}')
self.assertEqual(tmpl.render(), '1|3|5')
def test_bool_select(self):
env = Environment()
tmpl = env.from_string('{{ [none, false, 0, 1, 2, 3, 4, 5]|select|join("|") }}')
self.assertEqual(tmpl.render(), '1|2|3|4|5')
def test_simple_reject(self):
env = Environment()
tmpl = env.from_string('{{ [1, 2, 3, 4, 5]|reject("odd")|join("|") }}')
self.assertEqual(tmpl.render(), '2|4')
def test_bool_reject(self):
env = Environment()
tmpl = env.from_string('{{ [none, false, 0, 1, 2, 3, 4, 5]|reject|join("|") }}')
self.assertEqual(tmpl.render(), 'None|False|0')
def test_simple_select_attr(self):
class User(object):
def __init__(self, name, is_active):
self.name = name
self.is_active = is_active
env = Environment()
users = [
User('john', True),
User('jane', True),
User('mike', False),
]
tmpl = env.from_string('{{ users|selectattr("is_active")|'
'map(attribute="name")|join("|") }}')
self.assertEqual(tmpl.render(users=users), 'john|jane')
def test_simple_reject_attr(self):
class User(object):
def __init__(self, name, is_active):
self.name = name
self.is_active = is_active
env = Environment()
users = [
User('john', True),
User('jane', True),
User('mike', False),
]
tmpl = env.from_string('{{ users|rejectattr("is_active")|'
'map(attribute="name")|join("|") }}')
self.assertEqual(tmpl.render(users=users), 'mike')
def test_func_select_attr(self):
class User(object):
def __init__(self, id, name):
self.id = id
self.name = name
env = Environment()
users = [
User(1, 'john'),
User(2, 'jane'),
User(3, 'mike'),
]
tmpl = env.from_string('{{ users|selectattr("id", "odd")|'
'map(attribute="name")|join("|") }}')
self.assertEqual(tmpl.render(users=users), 'john|mike')
def test_func_reject_attr(self):
class User(object):
def __init__(self, id, name):
self.id = id
self.name = name
env = Environment()
users = [
User(1, 'john'),
User(2, 'jane'),
User(3, 'mike'),
]
tmpl = env.from_string('{{ users|rejectattr("id", "odd")|'
'map(attribute="name")|join("|") }}')
self.assertEqual(tmpl.render(users=users), 'jane')
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(FilterTestCase))
return suite
| mit |
i945/An | An/extra_apps/xadmin/migrations/0002_log.py | 15 | 1849 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-07-15 05:50
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('xadmin', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Log',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('action_time', models.DateTimeField(default=django.utils.timezone.now, editable=False, verbose_name='action time')),
('ip_addr', models.GenericIPAddressField(blank=True, null=True, verbose_name='action ip')),
('object_id', models.TextField(blank=True, null=True, verbose_name='object id')),
('object_repr', models.CharField(max_length=200, verbose_name='object repr')),
('action_flag', models.PositiveSmallIntegerField(verbose_name='action flag')),
('message', models.TextField(blank=True, verbose_name='change message')),
('content_type', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='contenttypes.ContentType', verbose_name='content type')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='user')),
],
options={
'ordering': ('-action_time',),
'verbose_name': 'log entry',
'verbose_name_plural': 'log entries',
},
),
]
| mit |
houzhenggang/OpenWRT-1 | scripts/dl_cleanup.py | 202 | 5942 | #!/usr/bin/env python
"""
# OpenWRT download directory cleanup utility.
# Delete all but the very last version of the program tarballs.
#
# Copyright (C) 2010 Michael Buesch <[email protected]>
# Copyright (C) 2013 OpenWrt.org
"""
import sys
import os
import re
import getopt
# Commandline options
opt_dryrun = False
def parseVer_1234(match, filepath):
progname = match.group(1)
progversion = (int(match.group(2)) << 64) |\
(int(match.group(3)) << 48) |\
(int(match.group(4)) << 32) |\
(int(match.group(5)) << 16)
return (progname, progversion)
def parseVer_123(match, filepath):
progname = match.group(1)
try:
patchlevel = match.group(5)
except (IndexError), e:
patchlevel = None
if patchlevel:
patchlevel = ord(patchlevel[0])
else:
patchlevel = 0
progversion = (int(match.group(2)) << 64) |\
(int(match.group(3)) << 48) |\
(int(match.group(4)) << 32) |\
patchlevel
return (progname, progversion)
def parseVer_12(match, filepath):
progname = match.group(1)
try:
patchlevel = match.group(4)
except (IndexError), e:
patchlevel = None
if patchlevel:
patchlevel = ord(patchlevel[0])
else:
patchlevel = 0
progversion = (int(match.group(2)) << 64) |\
(int(match.group(3)) << 48) |\
patchlevel
return (progname, progversion)
def parseVer_r(match, filepath):
progname = match.group(1)
progversion = (int(match.group(2)) << 64)
return (progname, progversion)
def parseVer_ymd(match, filepath):
progname = match.group(1)
progversion = (int(match.group(2)) << 64) |\
(int(match.group(3)) << 48) |\
(int(match.group(4)) << 32)
return (progname, progversion)
def parseVer_GIT(match, filepath):
progname = match.group(1)
st = os.stat(filepath)
progversion = int(st.st_mtime) << 64
return (progname, progversion)
extensions = (
".tar.gz",
".tar.bz2",
".tar.xz",
".orig.tar.gz",
".orig.tar.bz2",
".orig.tar.xz",
".zip",
".tgz",
".tbz",
".txz",
)
versionRegex = (
(re.compile(r"(.+)[-_](\d+)\.(\d+)\.(\d+)\.(\d+)"), parseVer_1234), # xxx-1.2.3.4
(re.compile(r"(.+)[-_](\d\d\d\d)-?(\d\d)-?(\d\d)"), parseVer_ymd), # xxx-YYYY-MM-DD
(re.compile(r"(.+)[-_]([0-9a-fA-F]{40,40})"), parseVer_GIT), # xxx-GIT_SHASUM
(re.compile(r"(.+)[-_](\d+)\.(\d+)\.(\d+)(\w?)"), parseVer_123), # xxx-1.2.3a
(re.compile(r"(.+)[-_](\d+)_(\d+)_(\d+)"), parseVer_123), # xxx-1_2_3
(re.compile(r"(.+)[-_](\d+)\.(\d+)(\w?)"), parseVer_12), # xxx-1.2a
(re.compile(r"(.+)[-_]r?(\d+)"), parseVer_r), # xxx-r1111
)
blacklist = [
("linux", re.compile(r"linux-.*")),
("gcc", re.compile(r"gcc-.*")),
("wl_apsta", re.compile(r"wl_apsta.*")),
(".fw", re.compile(r".*\.fw")),
(".arm", re.compile(r".*\.arm")),
(".bin", re.compile(r".*\.bin")),
("rt-firmware", re.compile(r"RT[\d\w]+_Firmware.*")),
]
class EntryParseError(Exception): pass
class Entry:
def __init__(self, directory, filename):
self.directory = directory
self.filename = filename
self.progname = ""
self.fileext = ""
for ext in extensions:
if filename.endswith(ext):
filename = filename[0:0-len(ext)]
self.fileext = ext
break
else:
print self.filename, "has an unknown file-extension"
raise EntryParseError("ext")
for (regex, parseVersion) in versionRegex:
match = regex.match(filename)
if match:
(self.progname, self.version) = parseVersion(
match, directory + "/" + filename + self.fileext)
break
else:
print self.filename, "has an unknown version pattern"
raise EntryParseError("ver")
def deleteFile(self):
path = (self.directory + "/" + self.filename).replace("//", "/")
print "Deleting", path
if not opt_dryrun:
os.unlink(path)
def __eq__(self, y):
return self.filename == y.filename
def __ge__(self, y):
return self.version >= y.version
def usage():
print "OpenWRT download directory cleanup utility"
print "Usage: " + sys.argv[0] + " [OPTIONS] <path/to/dl>"
print ""
print " -d|--dry-run Do a dry-run. Don't delete any files"
print " -B|--show-blacklist Show the blacklist and exit"
print " -w|--whitelist ITEM Remove ITEM from blacklist"
def main(argv):
global opt_dryrun
try:
(opts, args) = getopt.getopt(argv[1:],
"hdBw:",
[ "help", "dry-run", "show-blacklist", "whitelist=", ])
if len(args) != 1:
usage()
return 1
except getopt.GetoptError:
usage()
return 1
directory = args[0]
for (o, v) in opts:
if o in ("-h", "--help"):
usage()
return 0
if o in ("-d", "--dry-run"):
opt_dryrun = True
if o in ("-w", "--whitelist"):
for i in range(0, len(blacklist)):
(name, regex) = blacklist[i]
if name == v:
del blacklist[i]
break
else:
print "Whitelist error: Item", v,\
"is not in blacklist"
return 1
if o in ("-B", "--show-blacklist"):
for (name, regex) in blacklist:
print name
return 0
# Create a directory listing and parse the file names.
entries = []
for filename in os.listdir(directory):
if filename == "." or filename == "..":
continue
for (name, regex) in blacklist:
if regex.match(filename):
if opt_dryrun:
print filename, "is blacklisted"
break
else:
try:
entries.append(Entry(directory, filename))
except (EntryParseError), e: pass
# Create a map of programs
progmap = {}
for entry in entries:
if entry.progname in progmap.keys():
progmap[entry.progname].append(entry)
else:
progmap[entry.progname] = [entry,]
# Traverse the program map and delete everything but the last version
for prog in progmap:
lastVersion = None
versions = progmap[prog]
for version in versions:
if lastVersion is None or version >= lastVersion:
lastVersion = version
if lastVersion:
for version in versions:
if version != lastVersion:
version.deleteFile()
if opt_dryrun:
print "Keeping", lastVersion.filename
return 0
if __name__ == "__main__":
sys.exit(main(sys.argv))
| gpl-2.0 |
tianchaijz/MTHTTPServerWFM | MTHTTPServerWFM.py | 1 | 19948 | #!/usr/bin/env python
# encoding: utf-8
"""Multiple Threading HTTP Server With File Management.
This program is extended from the standard `SimpleHTTPServer` module by adding
upload and delete file features.
"""
__version__ = "0.31"
__all__ = ["HTTPRequestHandlerWFM"]
__author__ = "Jinzheng Zhang"
__email__ = "[email protected]"
__git__ = "https://github.com/tianchaijz/MTHTTPServerWFM"
import os
import sys
import re
import cgi
import json
import shutil
import socket
import urllib
import hashlib
import logging
import mimetypes
import posixpath
import threading
from copy import deepcopy
from SocketServer import ThreadingMixIn
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
# ============================== Config ==============================
ENC = sys.stdout.encoding
ENC_MAP = {"cp936": "gbk"}
CHARSET = ENC_MAP.get(ENC, "utf-8")
reload(sys)
sys.setdefaultencoding("utf-8")
logging.basicConfig(
level=logging.INFO,
format="[%(asctime)s] [%(levelname)s] - %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
)
FILE_NAME = os.path.basename(__file__).split('.')[0]
WORK_PATH = sys.argv[2] if sys.argv[2:] else os.getcwd()
# ====================================================================
class HTMLStyle(object):
CSS = """
body { background:#FFF; color:#000;
font-family:Helvetica, Arial, sans-serif; }
h1 { margin:.5em 0 0; }
h2 { margin:.8em 0 .3em; }
h3 { margin:.5em 0 .3em; }
table { font-size:.8em; border-collapse:collapse;
border-bottom:1px solid #DDEEDD; width:100%; margin:.5em 0; }
thead th { font-size:1em; background:#DDEEDD;
border:.2em solid #FFFFFF; padding:.1em .3em; }
tbody tr.odd { background:#F5F5F5; }
tbody th { text-align:left; }
tbody td { height:1.2em; text-align:right; }
"""
GETPAGE = """
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8"/>
<title>Directory listing for {directory}</title>
<style>{css}</style>
</head>
<body>
<div>
<h3><a href="/">Home</a> Directory listing for {directory}</h3>
</div>
<div>
<hr color="#DDEEDD">
<form enctype="multipart/form-data" method="post">
Upload File: <input name="file" type="file"/>
<input type="submit" value="Upload"/>
</form>
</div>
<div>
<hr color="#DDEEDD">
<form action="/delete" method="post">
Delete File: <input type="text" name="filename">
<input type="submit" value="Submit">
</form>
</div>
<div>
<hr color="#DDEEDD">
<table>
<thead>
<tr> <th rowspan="2">NAME</th> <th colspan="2">INFO</th> </tr>
<tr> <th>SIZE</th> <th>SHA1SUM</th> </tr>
</thead>
"""
POSTPAGE = """
<!DOCTYPE html>
<html>
<head> <meta charset="utf-8"/> <title>Result Page</title> </head>
<body>
<h3>Result:</h3>
<hr color="#DDEEDD">
<strong>{result}: </strong>
{msg}
<hr color="#DDEEDD"><br><a href="{refer}">Go Back</a>
<body>
</html>
"""
TBODY = """
<tbody>
{tr_class}
<th><a href="{linkname}">{displayname}</a></th>
<td>{size}</td> <td>{sha1sum}</td>
</tr>
</tbody>
"""
def __init__(self):
self.count = 0
def gen_getpage(self, **kwargs):
kwargs["css"] = HTMLStyle.CSS
return HTMLStyle.GETPAGE.format(**kwargs)
def gen_postpage(self, **kwargs):
return HTMLStyle.POSTPAGE.format(**kwargs)
def gen_table_body(self, **kwargs):
self.count = 1 - self.count
kwargs["tr_class"] = '<tr class="odd">' if self.count > 0 else '<tr>'
return HTMLStyle.TBODY.format(**kwargs)
class FileInfoHandler(object):
FILE_LOCK = threading.Lock()
def __init__(self):
self.info_file = os.path.join(WORK_PATH, "__%s.json" % FILE_NAME)
self.lock = threading.Lock()
self.info, self.oldinfo = {}, {}
threading.Thread(
target=self._load_info, name="Thread: Load File Info",
).start()
def _load_info(self):
try:
with FileInfoHandler.FILE_LOCK:
with open(self.info_file, 'rb') as fd:
info = json.load(fd, encoding=ENC)
except IOError, e:
pass
except Exception, e:
logging.exception(str(e))
self.flush_info()
else:
logging.info("Load file info success")
self.info, self.oldinfo = info, deepcopy(info)
def _do_flush(self):
with FileInfoHandler.FILE_LOCK:
try:
with open(self.info_file, 'wb') as fd:
json.dump(self.info, fd, encoding=ENC)
except IOError:
pass
except Exception, e:
logging.exception(str(e))
def _gen_info(self, file):
def hashfile(fd, hasher, blocksize=65536):
buf = fd.read(blocksize)
while len(buf) > 0:
hasher.update(buf)
buf = fd.read(blocksize)
return hasher.hexdigest()
try:
logging.debug("Add file info: %s" % file)
size = str(os.path.getsize(file))
mtime = str(os.path.getmtime(file))
with open(file, 'rb') as fd:
sha1sum = hashfile(fd, hashlib.sha1())
with self.lock:
self.info[file] = {
"sha1sum": sha1sum, "size": size, "mtime": mtime
}
self._do_flush()
except IOError, e:
logging.exception("%s: %s" % (file, str(e)))
def get_info(self, file):
file_info = self.info.get(file, False)
if file_info:
file_mtime = os.path.getmtime(file)
if str(file_mtime) != file_info["mtime"]:
logging.debug("Update file info: %s" % file)
self.add_info(file)
return file_info
else:
if os.path.isfile(file):
self.add_info(file)
return self.dummy_info()
def del_info(self, file):
with self.lock:
try:
del self.info[file]
logging.info("Delete file info: %s" % file)
self._do_flush()
except KeyError:
logging.exception("%s not found" % file)
except ValueError, e:
logging.exception(str(e))
def add_info(self, file):
thread = threading.Thread(
target=self._gen_info,
args=(file,),
name="Thread - " + file,
)
thread.daemon = True
thread.start()
def flush_info(self):
with self.lock:
self._do_flush()
def need_flush(self):
return bool(set(self.info) - set(self.oldinfo))
def dummy_info(self):
return {"size": '', "sha1sum": ''}
class HTTPRequestHandlerWFM(BaseHTTPRequestHandler):
"""HTTP request handler with GET, HEAD and POST commands.
This serves files from the current directory and any of its
subdirectories. The MIME type for files is determined by
calling the .guess_type() method.
The GET, HEAD and POST requests are identical except that the HEAD
request omits the actual contents of the file.
"""
server_version = "%s/%s" % (FILE_NAME, __version__)
CWD = WORK_PATH
FIH = FileInfoHandler()
HS = HTMLStyle()
def __init__(self, *args, **kwargs):
logging.debug("__init__ %s" % (self.__class__.__name__))
self.fih = HTTPRequestHandlerWFM.FIH
self.hs = HTTPRequestHandlerWFM.HS
BaseHTTPRequestHandler.__init__(self, *args, **kwargs)
def do_GET(self):
"""Serve a GET request."""
logging.debug("Current thread: %s" % threading.current_thread())
f = self.send_head()
if f:
try:
self.copyfile(f, self.wfile)
finally:
f.close()
def do_HEAD(self):
"""Serve a HEAD request."""
f = self.send_head()
if f:
f.close()
def do_POST(self):
"""Serve a POST request."""
def parse_post_data():
if self.path == "/delete":
form = cgi.FieldStorage(
fp=self.rfile,
headers=self.headers,
environ={
"REQUEST_METHOD": "POST",
"CONTENT_TYPE": self.headers["Content-Type"],
}
)
filename = form.getvalue("filename")
if filename is None:
return (False, "no file specified")
filename = urllib.unquote(filename).decode("utf-8")
fullname = os.path.join(HTTPRequestHandlerWFM.CWD, filename)
try:
os.remove(fullname)
logging.warn("Delete file: %s" %
self.real_path(fullname.encode(ENC)))
self.fih.del_info(fullname)
return (True, "file: %s deleted" %
self.real_path(fullname))
except OSError, e:
return (False, str(e).decode("string_escape"))
else:
return self.deal_post_file()
res, msg = parse_post_data()
logging.info("Post %s, %s by %s"
% (res, msg, self.client_address))
f = StringIO()
postpage = self.hs.gen_postpage(
result=str(res), msg=msg, refer=self.headers["Referer"]
)
f.write(postpage)
length = f.tell()
f.seek(0)
self.send_response(200)
self.send_header("Content-type", "text/html")
self.send_header("Content-Length", str(length))
self.end_headers()
if f:
self.copyfile(f, self.wfile)
f.close()
def deal_post_file(self):
self.is_upload = True
try:
boundary = self.headers.plisttext.split("=")[1]
except IndexError:
self.is_upload = False
if self.is_upload:
content_length = remainbytes = int(self.headers["Content-Length"])
line = self.rfile.readline()
remainbytes -= len(line)
if boundary not in line:
return (False, "content can't begin with boundary")
line = self.rfile.readline()
remainbytes -= len(line)
fn = re.findall(
r'Content-Disposition.*name="file"; filename="(.+)"',
line
)
if not fn:
return (False, "can't find out the file name")
path = self.translate_path(self.path)
fn = os.path.join(path, fn[0].decode("utf-8"))
while os.path.exists(fn):
fn += "_"
line = self.rfile.readline()
remainbytes -= len(line)
line = self.rfile.readline()
remainbytes -= len(line)
try:
out = open(fn, 'wb')
logging.info("Post file: %s, Content-Length: %d" %
(self.real_path(fn.encode(ENC)), content_length))
logging.info("Write to file: %s" %
self.real_path(fn.encode(ENC)))
except IOError, e:
return (False, "can't write file: %s" % str(e))
preline = self.rfile.readline()
remainbytes -= len(preline)
while remainbytes > 0:
line = self.rfile.readline()
remainbytes -= len(line)
if boundary in line:
preline = preline[0:-1]
if preline.endswith('\r'):
preline = preline[0:-1]
out.write(preline)
out.close()
return (True, "file '%s' uploaded" % fn)
else:
out.write(preline)
preline = line
return (False, "unexpect ends of data.")
else:
body = self.rfile.read()
return (False, "unknow post data: %s ..." % body[0:9])
def send_head(self):
"""Common code for GET and HEAD commands.
This sends the response code and MIME headers.
Return value is either a file object (which has to be copied
to the outputfile by the caller unless the command was HEAD,
and must be closed by the caller under all circumstances), or
None, in which case the caller has nothing further to do.
"""
path = self.translate_path(self.path)
f = None
if os.path.isdir(path):
if not self.path.endswith('/'):
# redirect browser - doing basically what apache does
self.send_response(301)
self.send_header("Location", self.path + "/")
self.end_headers()
return None
for index in "index.html", "index.htm":
index = os.path.join(path, index)
if os.path.exists(index):
path = index
break
else:
HTTPRequestHandlerWFM.CWD = path
return self.list_directory(path)
ctype = "%s; charset=%s" % (self.guess_type(path), CHARSET)
try:
# Always read in binary mode. Opening files in text mode may cause
# newline translations, making the actual size of the content
# transmitted *less* than the content-length!
f = open(path, 'rb')
logging.info("Get file: %s" % self.real_path(path.encode(ENC)))
except IOError, e:
self.send_error(404, str(e))
return None
try:
self.send_response(200)
self.send_header("Content-type", ctype)
fs = os.fstat(f.fileno())
self.send_header("Content-Length", str(fs[6]))
self.send_header(
"Last-Modified", self.date_time_string(fs.st_mtime))
self.end_headers()
return f
except:
f.close()
raise
def real_path(self, path):
return os.path.relpath(path, HTTPRequestHandlerWFM.CWD)
def list_directory(self, path):
"""Helper to produce a directory listing (absent index.html).
Return value is either a file object, or None (indicating an
error). In either case, the headers are sent, making the
interface the same as for send_head().
"""
try:
files = os.listdir(path)
list = map(lambda s:
(s if isinstance(s, unicode) else s.decode(ENC)), files)
logging.info("Get directory: %s" %
self.real_path(path.encode(ENC)))
except os.error:
self.send_error(403, "No permission to list directory")
return None
list.sort(key=lambda a: a.lower())
f = StringIO()
displaypath = cgi.escape(urllib.unquote(self.path))
f.write(self.hs.gen_getpage(directory=displaypath))
for name in list:
fullname = os.path.join(path, name)
displayname = linkname = name
info = self.fih.get_info(fullname)
# Append / for directories or @ for symbolic links
if os.path.isdir(fullname):
displayname = name + "/"
linkname = name + "/"
if os.path.islink(fullname):
displayname = name + "@"
# Note: a link to a directory displays with @ and links with /
f.write(self.hs.gen_table_body(
linkname=urllib.quote(linkname.encode("utf-8")),
displayname=cgi.escape(displayname.encode("utf-8")),
**info
))
f.write("\n".join(["</table>", "</div>", "</body>", "</html>"]))
length = f.tell()
f.seek(0)
self.send_response(200)
self.send_header("Content-Length", str(length))
self.end_headers()
if self.fih.need_flush():
self.fih.flush_info()
return f
def translate_path(self, path):
"""Translate a /-separated PATH to the local filename syntax.
Components that mean special things to the local file system
(e.g. drive or directory names) are ignored. (XXX They should
probably be diagnosed.)
"""
# abandon query parameters
path = path.split('?', 1)[0]
path = path.split('#', 1)[0]
# Don't forget explicit trailing slash when normalizing. Issue17324
trailing_slash = path.rstrip().endswith('/')
path = posixpath.normpath(urllib.unquote(path).decode("utf-8"))
words = path.split('/')
words = filter(None, words)
path = os.getcwd()
for word in words:
drive, word = os.path.splitdrive(word)
head, word = os.path.split(word)
if word in (os.curdir, os.pardir):
continue
path = os.path.join(path, word)
if trailing_slash:
path += '/'
return path
def copyfile(self, source, outputfile):
"""Copy all data between two file objects.
The SOURCE argument is a file object open for reading
(or anything with a read() method) and the DESTINATION
argument is a file object open for writing (or
anything with a write() method).
The only reason for overriding this would be to change
the block size or perhaps to replace newlines by CRLF
-- note however that this the default server uses this
to copy binary data as well.
"""
shutil.copyfileobj(source, outputfile)
def guess_type(self, path):
"""Guess the type of a file.
Argument is a PATH (a filename).
Return value is a string of the form type/subtype,
usable for a MIME Content-type header.
The default implementation looks the file's extension
up in the table self.extensions_map, using application/octet-stream
as a default; however it would be permissible (if
slow) to look inside the data to make a better guess.
"""
base, ext = posixpath.splitext(path)
if ext in self.extensions_map:
return self.extensions_map[ext]
ext = ext.lower()
if ext in self.extensions_map:
return self.extensions_map[ext]
else:
return self.extensions_map['']
if not mimetypes.inited:
mimetypes.init() # try to read system mime.types
extensions_map = mimetypes.types_map.copy()
extensions_map.update({
'': 'application/octet-stream', # Default
'.c': 'text/plain',
'.h': 'text/plain',
'.sh': 'text/plain',
'.py': 'text/plain',
'.txt': 'text/plain',
'.lua': 'text/plain',
'.json': 'application/json',
})
def log_request(self, code='-'):
sys.stdout.write("Status: %s\n" % str(code))
class MultiThreadingServer(ThreadingMixIn, HTTPServer):
pass
def main():
if sys.argv[1:]:
port = int(sys.argv[1])
else:
port = 8000
if sys.argv[2:]:
os.chdir(sys.argv[2])
server_address = ('', port)
server = MultiThreadingServer(server_address, HTTPRequestHandlerWFM)
sa = server.socket.getsockname()
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
logging.info("IP address: %s, pid: %d" %
(s.getsockname()[0], os.getpid()))
s.close()
except:
pass
logging.info("Serving HTTP on: %s, port: %d" % (sa[0], sa[1]))
try:
server.serve_forever()
except KeyboardInterrupt:
print
logging.info("Serving Finished")
if __name__ == '__main__':
main()
| mit |
siosio/intellij-community | plugins/hg4idea/testData/bin/mercurial/verify.py | 93 | 10933 | # verify.py - repository integrity checking for Mercurial
#
# Copyright 2006, 2007 Matt Mackall <[email protected]>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from node import nullid, short
from i18n import _
import os
import revlog, util, error
def verify(repo):
lock = repo.lock()
try:
return _verify(repo)
finally:
lock.release()
def _normpath(f):
# under hg < 2.4, convert didn't sanitize paths properly, so a
# converted repo may contain repeated slashes
while '//' in f:
f = f.replace('//', '/')
return f
def _verify(repo):
repo = repo.unfiltered()
mflinkrevs = {}
filelinkrevs = {}
filenodes = {}
revisions = 0
badrevs = set()
errors = [0]
warnings = [0]
ui = repo.ui
cl = repo.changelog
mf = repo.manifest
lrugetctx = util.lrucachefunc(repo.changectx)
if not repo.cancopy():
raise util.Abort(_("cannot verify bundle or remote repos"))
def err(linkrev, msg, filename=None):
if linkrev is not None:
badrevs.add(linkrev)
else:
linkrev = '?'
msg = "%s: %s" % (linkrev, msg)
if filename:
msg = "%s@%s" % (filename, msg)
ui.warn(" " + msg + "\n")
errors[0] += 1
def exc(linkrev, msg, inst, filename=None):
if isinstance(inst, KeyboardInterrupt):
ui.warn(_("interrupted"))
raise
if not str(inst):
inst = repr(inst)
err(linkrev, "%s: %s" % (msg, inst), filename)
def warn(msg):
ui.warn(msg + "\n")
warnings[0] += 1
def checklog(obj, name, linkrev):
if not len(obj) and (havecl or havemf):
err(linkrev, _("empty or missing %s") % name)
return
d = obj.checksize()
if d[0]:
err(None, _("data length off by %d bytes") % d[0], name)
if d[1]:
err(None, _("index contains %d extra bytes") % d[1], name)
if obj.version != revlog.REVLOGV0:
if not revlogv1:
warn(_("warning: `%s' uses revlog format 1") % name)
elif revlogv1:
warn(_("warning: `%s' uses revlog format 0") % name)
def checkentry(obj, i, node, seen, linkrevs, f):
lr = obj.linkrev(obj.rev(node))
if lr < 0 or (havecl and lr not in linkrevs):
if lr < 0 or lr >= len(cl):
msg = _("rev %d points to nonexistent changeset %d")
else:
msg = _("rev %d points to unexpected changeset %d")
err(None, msg % (i, lr), f)
if linkrevs:
if f and len(linkrevs) > 1:
try:
# attempt to filter down to real linkrevs
linkrevs = [l for l in linkrevs
if lrugetctx(l)[f].filenode() == node]
except Exception:
pass
warn(_(" (expected %s)") % " ".join(map(str, linkrevs)))
lr = None # can't be trusted
try:
p1, p2 = obj.parents(node)
if p1 not in seen and p1 != nullid:
err(lr, _("unknown parent 1 %s of %s") %
(short(p1), short(node)), f)
if p2 not in seen and p2 != nullid:
err(lr, _("unknown parent 2 %s of %s") %
(short(p2), short(node)), f)
except Exception, inst:
exc(lr, _("checking parents of %s") % short(node), inst, f)
if node in seen:
err(lr, _("duplicate revision %d (%d)") % (i, seen[node]), f)
seen[node] = i
return lr
if os.path.exists(repo.sjoin("journal")):
ui.warn(_("abandoned transaction found - run hg recover\n"))
revlogv1 = cl.version != revlog.REVLOGV0
if ui.verbose or not revlogv1:
ui.status(_("repository uses revlog format %d\n") %
(revlogv1 and 1 or 0))
havecl = len(cl) > 0
havemf = len(mf) > 0
ui.status(_("checking changesets\n"))
refersmf = False
seen = {}
checklog(cl, "changelog", 0)
total = len(repo)
for i in repo:
ui.progress(_('checking'), i, total=total, unit=_('changesets'))
n = cl.node(i)
checkentry(cl, i, n, seen, [i], "changelog")
try:
changes = cl.read(n)
if changes[0] != nullid:
mflinkrevs.setdefault(changes[0], []).append(i)
refersmf = True
for f in changes[3]:
filelinkrevs.setdefault(_normpath(f), []).append(i)
except Exception, inst:
refersmf = True
exc(i, _("unpacking changeset %s") % short(n), inst)
ui.progress(_('checking'), None)
ui.status(_("checking manifests\n"))
seen = {}
if refersmf:
# Do not check manifest if there are only changelog entries with
# null manifests.
checklog(mf, "manifest", 0)
total = len(mf)
for i in mf:
ui.progress(_('checking'), i, total=total, unit=_('manifests'))
n = mf.node(i)
lr = checkentry(mf, i, n, seen, mflinkrevs.get(n, []), "manifest")
if n in mflinkrevs:
del mflinkrevs[n]
else:
err(lr, _("%s not in changesets") % short(n), "manifest")
try:
for f, fn in mf.readdelta(n).iteritems():
if not f:
err(lr, _("file without name in manifest"))
elif f != "/dev/null":
filenodes.setdefault(_normpath(f), {}).setdefault(fn, lr)
except Exception, inst:
exc(lr, _("reading manifest delta %s") % short(n), inst)
ui.progress(_('checking'), None)
ui.status(_("crosschecking files in changesets and manifests\n"))
total = len(mflinkrevs) + len(filelinkrevs) + len(filenodes)
count = 0
if havemf:
for c, m in sorted([(c, m) for m in mflinkrevs
for c in mflinkrevs[m]]):
count += 1
if m == nullid:
continue
ui.progress(_('crosschecking'), count, total=total)
err(c, _("changeset refers to unknown manifest %s") % short(m))
mflinkrevs = None # del is bad here due to scope issues
for f in sorted(filelinkrevs):
count += 1
ui.progress(_('crosschecking'), count, total=total)
if f not in filenodes:
lr = filelinkrevs[f][0]
err(lr, _("in changeset but not in manifest"), f)
if havecl:
for f in sorted(filenodes):
count += 1
ui.progress(_('crosschecking'), count, total=total)
if f not in filelinkrevs:
try:
fl = repo.file(f)
lr = min([fl.linkrev(fl.rev(n)) for n in filenodes[f]])
except Exception:
lr = None
err(lr, _("in manifest but not in changeset"), f)
ui.progress(_('crosschecking'), None)
ui.status(_("checking files\n"))
storefiles = set()
for f, f2, size in repo.store.datafiles():
if not f:
err(None, _("cannot decode filename '%s'") % f2)
elif size > 0 or not revlogv1:
storefiles.add(_normpath(f))
files = sorted(set(filenodes) | set(filelinkrevs))
total = len(files)
for i, f in enumerate(files):
ui.progress(_('checking'), i, item=f, total=total)
try:
linkrevs = filelinkrevs[f]
except KeyError:
# in manifest but not in changelog
linkrevs = []
if linkrevs:
lr = linkrevs[0]
else:
lr = None
try:
fl = repo.file(f)
except error.RevlogError, e:
err(lr, _("broken revlog! (%s)") % e, f)
continue
for ff in fl.files():
try:
storefiles.remove(ff)
except KeyError:
err(lr, _("missing revlog!"), ff)
checklog(fl, f, lr)
seen = {}
rp = None
for i in fl:
revisions += 1
n = fl.node(i)
lr = checkentry(fl, i, n, seen, linkrevs, f)
if f in filenodes:
if havemf and n not in filenodes[f]:
err(lr, _("%s not in manifests") % (short(n)), f)
else:
del filenodes[f][n]
# verify contents
try:
l = len(fl.read(n))
rp = fl.renamed(n)
if l != fl.size(i):
if len(fl.revision(n)) != fl.size(i):
err(lr, _("unpacked size is %s, %s expected") %
(l, fl.size(i)), f)
except Exception, inst:
exc(lr, _("unpacking %s") % short(n), inst, f)
# check renames
try:
if rp:
if lr is not None and ui.verbose:
ctx = lrugetctx(lr)
found = False
for pctx in ctx.parents():
if rp[0] in pctx:
found = True
break
if not found:
warn(_("warning: copy source of '%s' not"
" in parents of %s") % (f, ctx))
fl2 = repo.file(rp[0])
if not len(fl2):
err(lr, _("empty or missing copy source revlog %s:%s")
% (rp[0], short(rp[1])), f)
elif rp[1] == nullid:
ui.note(_("warning: %s@%s: copy source"
" revision is nullid %s:%s\n")
% (f, lr, rp[0], short(rp[1])))
else:
fl2.rev(rp[1])
except Exception, inst:
exc(lr, _("checking rename of %s") % short(n), inst, f)
# cross-check
if f in filenodes:
fns = [(lr, n) for n, lr in filenodes[f].iteritems()]
for lr, node in sorted(fns):
err(lr, _("%s in manifests not found") % short(node), f)
ui.progress(_('checking'), None)
for f in storefiles:
warn(_("warning: orphan revlog '%s'") % f)
ui.status(_("%d files, %d changesets, %d total revisions\n") %
(len(files), len(cl), revisions))
if warnings[0]:
ui.warn(_("%d warnings encountered!\n") % warnings[0])
if errors[0]:
ui.warn(_("%d integrity errors encountered!\n") % errors[0])
if badrevs:
ui.warn(_("(first damaged changeset appears to be %d)\n")
% min(badrevs))
return 1
| apache-2.0 |
leiferikb/bitpop | depot_tools/third_party/pylint/reporters/html.py | 20 | 2541 | # Copyright (c) 2003-2006 Sylvain Thenault ([email protected]).
# Copyright (c) 2003-2011 LOGILAB S.A. (Paris, FRANCE).
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
"""HTML reporter"""
import sys
from cgi import escape
from logilab.common.ureports import HTMLWriter, Section, Table
from pylint.interfaces import IReporter
from pylint.reporters import BaseReporter
class HTMLReporter(BaseReporter):
"""report messages and layouts in HTML"""
__implements__ = IReporter
extension = 'html'
def __init__(self, output=sys.stdout):
BaseReporter.__init__(self, output)
self.msgs = []
def add_message(self, msg_id, location, msg):
"""manage message of different type and in the context of path"""
module, obj, line, col_offset = location[1:]
if self.include_ids:
sigle = msg_id
else:
sigle = msg_id[0]
self.msgs += [sigle, module, obj, str(line), str(col_offset), escape(msg)]
def set_output(self, output=None):
"""set output stream
messages buffered for old output is processed first"""
if self.out and self.msgs:
self._display(Section())
BaseReporter.set_output(self, output)
def _display(self, layout):
"""launch layouts display
overridden from BaseReporter to add insert the messages section
(in add_message, message is not displayed, just collected so it
can be displayed in an html table)
"""
if self.msgs:
# add stored messages to the layout
msgs = ['type', 'module', 'object', 'line', 'col_offset', 'message']
msgs += self.msgs
sect = Section('Messages')
layout.append(sect)
sect.append(Table(cols=6, children=msgs, rheaders=1))
self.msgs = []
HTMLWriter().format(layout, self.out)
| gpl-3.0 |
CMPUT410W15T02/CMPUT410W15-project | testenv/lib/python2.7/site-packages/django/contrib/contenttypes/forms.py | 93 | 3837 | from __future__ import unicode_literals
from django.db import models
from django.forms import ModelForm, modelformset_factory
from django.forms.models import BaseModelFormSet
from django.contrib.contenttypes.models import ContentType
class BaseGenericInlineFormSet(BaseModelFormSet):
"""
A formset for generic inline objects to a parent.
"""
def __init__(self, data=None, files=None, instance=None, save_as_new=None,
prefix=None, queryset=None, **kwargs):
opts = self.model._meta
self.instance = instance
self.rel_name = '-'.join((
opts.app_label, opts.model_name,
self.ct_field.name, self.ct_fk_field.name,
))
if self.instance is None or self.instance.pk is None:
qs = self.model._default_manager.none()
else:
if queryset is None:
queryset = self.model._default_manager
qs = queryset.filter(**{
self.ct_field.name: ContentType.objects.get_for_model(
self.instance, for_concrete_model=self.for_concrete_model),
self.ct_fk_field.name: self.instance.pk,
})
super(BaseGenericInlineFormSet, self).__init__(
queryset=qs, data=data, files=files,
prefix=prefix,
**kwargs
)
@classmethod
def get_default_prefix(cls):
opts = cls.model._meta
return '-'.join(
(opts.app_label, opts.model_name,
cls.ct_field.name, cls.ct_fk_field.name)
)
def save_new(self, form, commit=True):
setattr(form.instance, self.ct_field.get_attname(),
ContentType.objects.get_for_model(self.instance).pk)
setattr(form.instance, self.ct_fk_field.get_attname(),
self.instance.pk)
return form.save(commit=commit)
def generic_inlineformset_factory(model, form=ModelForm,
formset=BaseGenericInlineFormSet,
ct_field="content_type", fk_field="object_id",
fields=None, exclude=None,
extra=3, can_order=False, can_delete=True,
max_num=None, formfield_callback=None,
validate_max=False, for_concrete_model=True,
min_num=None, validate_min=False):
"""
Returns a ``GenericInlineFormSet`` for the given kwargs.
You must provide ``ct_field`` and ``fk_field`` if they are different from
the defaults ``content_type`` and ``object_id`` respectively.
"""
opts = model._meta
# if there is no field called `ct_field` let the exception propagate
ct_field = opts.get_field(ct_field)
if not isinstance(ct_field, models.ForeignKey) or ct_field.rel.to != ContentType:
raise Exception("fk_name '%s' is not a ForeignKey to ContentType" % ct_field)
fk_field = opts.get_field(fk_field) # let the exception propagate
if exclude is not None:
exclude = list(exclude)
exclude.extend([ct_field.name, fk_field.name])
else:
exclude = [ct_field.name, fk_field.name]
FormSet = modelformset_factory(model, form=form,
formfield_callback=formfield_callback,
formset=formset,
extra=extra, can_delete=can_delete, can_order=can_order,
fields=fields, exclude=exclude, max_num=max_num,
validate_max=validate_max, min_num=min_num,
validate_min=validate_min)
FormSet.ct_field = ct_field
FormSet.ct_fk_field = fk_field
FormSet.for_concrete_model = for_concrete_model
return FormSet
| gpl-2.0 |
kelvin13/Knockout | pygments/lexers/configs.py | 21 | 27854 | # -*- coding: utf-8 -*-
"""
pygments.lexers.configs
~~~~~~~~~~~~~~~~~~~~~~~
Lexers for configuration file formats.
:copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, default, words, bygroups, include, using
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Whitespace, Literal
from pygments.lexers.shell import BashLexer
__all__ = ['IniLexer', 'RegeditLexer', 'PropertiesLexer', 'KconfigLexer',
'Cfengine3Lexer', 'ApacheConfLexer', 'SquidConfLexer',
'NginxConfLexer', 'LighttpdConfLexer', 'DockerLexer',
'TerraformLexer', 'TermcapLexer', 'TerminfoLexer',
'PkgConfigLexer', 'PacmanConfLexer']
class IniLexer(RegexLexer):
"""
Lexer for configuration files in INI style.
"""
name = 'INI'
aliases = ['ini', 'cfg', 'dosini']
filenames = ['*.ini', '*.cfg', '*.inf']
mimetypes = ['text/x-ini', 'text/inf']
tokens = {
'root': [
(r'\s+', Text),
(r'[;#].*', Comment.Single),
(r'\[.*?\]$', Keyword),
(r'(.*?)([ \t]*)(=)([ \t]*)(.*(?:\n[ \t].+)*)',
bygroups(Name.Attribute, Text, Operator, Text, String))
]
}
def analyse_text(text):
npos = text.find('\n')
if npos < 3:
return False
return text[0] == '[' and text[npos-1] == ']'
class RegeditLexer(RegexLexer):
"""
Lexer for `Windows Registry
<http://en.wikipedia.org/wiki/Windows_Registry#.REG_files>`_ files produced
by regedit.
.. versionadded:: 1.6
"""
name = 'reg'
aliases = ['registry']
filenames = ['*.reg']
mimetypes = ['text/x-windows-registry']
tokens = {
'root': [
(r'Windows Registry Editor.*', Text),
(r'\s+', Text),
(r'[;#].*', Comment.Single),
(r'(\[)(-?)(HKEY_[A-Z_]+)(.*?\])$',
bygroups(Keyword, Operator, Name.Builtin, Keyword)),
# String keys, which obey somewhat normal escaping
(r'("(?:\\"|\\\\|[^"])+")([ \t]*)(=)([ \t]*)',
bygroups(Name.Attribute, Text, Operator, Text),
'value'),
# Bare keys (includes @)
(r'(.*?)([ \t]*)(=)([ \t]*)',
bygroups(Name.Attribute, Text, Operator, Text),
'value'),
],
'value': [
(r'-', Operator, '#pop'), # delete value
(r'(dword|hex(?:\([0-9a-fA-F]\))?)(:)([0-9a-fA-F,]+)',
bygroups(Name.Variable, Punctuation, Number), '#pop'),
# As far as I know, .reg files do not support line continuation.
(r'.+', String, '#pop'),
default('#pop'),
]
}
def analyse_text(text):
return text.startswith('Windows Registry Editor')
class PropertiesLexer(RegexLexer):
"""
Lexer for configuration files in Java's properties format.
.. versionadded:: 1.4
"""
name = 'Properties'
aliases = ['properties', 'jproperties']
filenames = ['*.properties']
mimetypes = ['text/x-java-properties']
tokens = {
'root': [
(r'\s+', Text),
(r'(?:[;#]|//).*$', Comment),
(r'(.*?)([ \t]*)([=:])([ \t]*)(.*(?:(?<=\\)\n.*)*)',
bygroups(Name.Attribute, Text, Operator, Text, String)),
],
}
def _rx_indent(level):
# Kconfig *always* interprets a tab as 8 spaces, so this is the default.
# Edit this if you are in an environment where KconfigLexer gets expanded
# input (tabs expanded to spaces) and the expansion tab width is != 8,
# e.g. in connection with Trac (trac.ini, [mimeviewer], tab_width).
# Value range here is 2 <= {tab_width} <= 8.
tab_width = 8
# Regex matching a given indentation {level}, assuming that indentation is
# a multiple of {tab_width}. In other cases there might be problems.
if tab_width == 2:
space_repeat = '+'
else:
space_repeat = '{1,%d}' % (tab_width - 1)
if level == 1:
level_repeat = ''
else:
level_repeat = '{%s}' % level
return r'(?:\t| %s\t| {%s})%s.*\n' % (space_repeat, tab_width, level_repeat)
class KconfigLexer(RegexLexer):
"""
For Linux-style Kconfig files.
.. versionadded:: 1.6
"""
name = 'Kconfig'
aliases = ['kconfig', 'menuconfig', 'linux-config', 'kernel-config']
# Adjust this if new kconfig file names appear in your environment
filenames = ['Kconfig', '*Config.in*', 'external.in*',
'standard-modules.in']
mimetypes = ['text/x-kconfig']
# No re.MULTILINE, indentation-aware help text needs line-by-line handling
flags = 0
def call_indent(level):
# If indentation >= {level} is detected, enter state 'indent{level}'
return (_rx_indent(level), String.Doc, 'indent%s' % level)
def do_indent(level):
# Print paragraphs of indentation level >= {level} as String.Doc,
# ignoring blank lines. Then return to 'root' state.
return [
(_rx_indent(level), String.Doc),
(r'\s*\n', Text),
default('#pop:2')
]
tokens = {
'root': [
(r'\s+', Text),
(r'#.*?\n', Comment.Single),
(words((
'mainmenu', 'config', 'menuconfig', 'choice', 'endchoice',
'comment', 'menu', 'endmenu', 'visible if', 'if', 'endif',
'source', 'prompt', 'select', 'depends on', 'default',
'range', 'option'), suffix=r'\b'),
Keyword),
(r'(---help---|help)[\t ]*\n', Keyword, 'help'),
(r'(bool|tristate|string|hex|int|defconfig_list|modules|env)\b',
Name.Builtin),
(r'[!=&|]', Operator),
(r'[()]', Punctuation),
(r'[0-9]+', Number.Integer),
(r"'(''|[^'])*'", String.Single),
(r'"(""|[^"])*"', String.Double),
(r'\S+', Text),
],
# Help text is indented, multi-line and ends when a lower indentation
# level is detected.
'help': [
# Skip blank lines after help token, if any
(r'\s*\n', Text),
# Determine the first help line's indentation level heuristically(!).
# Attention: this is not perfect, but works for 99% of "normal"
# indentation schemes up to a max. indentation level of 7.
call_indent(7),
call_indent(6),
call_indent(5),
call_indent(4),
call_indent(3),
call_indent(2),
call_indent(1),
default('#pop'), # for incomplete help sections without text
],
# Handle text for indentation levels 7 to 1
'indent7': do_indent(7),
'indent6': do_indent(6),
'indent5': do_indent(5),
'indent4': do_indent(4),
'indent3': do_indent(3),
'indent2': do_indent(2),
'indent1': do_indent(1),
}
class Cfengine3Lexer(RegexLexer):
"""
Lexer for `CFEngine3 <http://cfengine.org>`_ policy files.
.. versionadded:: 1.5
"""
name = 'CFEngine3'
aliases = ['cfengine3', 'cf3']
filenames = ['*.cf']
mimetypes = []
tokens = {
'root': [
(r'#.*?\n', Comment),
(r'(body)(\s+)(\S+)(\s+)(control)',
bygroups(Keyword, Text, Keyword, Text, Keyword)),
(r'(body|bundle)(\s+)(\S+)(\s+)(\w+)(\()',
bygroups(Keyword, Text, Keyword, Text, Name.Function, Punctuation),
'arglist'),
(r'(body|bundle)(\s+)(\S+)(\s+)(\w+)',
bygroups(Keyword, Text, Keyword, Text, Name.Function)),
(r'(")([^"]+)(")(\s+)(string|slist|int|real)(\s*)(=>)(\s*)',
bygroups(Punctuation, Name.Variable, Punctuation,
Text, Keyword.Type, Text, Operator, Text)),
(r'(\S+)(\s*)(=>)(\s*)',
bygroups(Keyword.Reserved, Text, Operator, Text)),
(r'"', String, 'string'),
(r'(\w+)(\()', bygroups(Name.Function, Punctuation)),
(r'([\w.!&|()]+)(::)', bygroups(Name.Class, Punctuation)),
(r'(\w+)(:)', bygroups(Keyword.Declaration, Punctuation)),
(r'@[{(][^)}]+[})]', Name.Variable),
(r'[(){},;]', Punctuation),
(r'=>', Operator),
(r'->', Operator),
(r'\d+\.\d+', Number.Float),
(r'\d+', Number.Integer),
(r'\w+', Name.Function),
(r'\s+', Text),
],
'string': [
(r'\$[{(]', String.Interpol, 'interpol'),
(r'\\.', String.Escape),
(r'"', String, '#pop'),
(r'\n', String),
(r'.', String),
],
'interpol': [
(r'\$[{(]', String.Interpol, '#push'),
(r'[})]', String.Interpol, '#pop'),
(r'[^${()}]+', String.Interpol),
],
'arglist': [
(r'\)', Punctuation, '#pop'),
(r',', Punctuation),
(r'\w+', Name.Variable),
(r'\s+', Text),
],
}
class ApacheConfLexer(RegexLexer):
"""
Lexer for configuration files following the Apache config file
format.
.. versionadded:: 0.6
"""
name = 'ApacheConf'
aliases = ['apacheconf', 'aconf', 'apache']
filenames = ['.htaccess', 'apache.conf', 'apache2.conf']
mimetypes = ['text/x-apacheconf']
flags = re.MULTILINE | re.IGNORECASE
tokens = {
'root': [
(r'\s+', Text),
(r'(#.*?)$', Comment),
(r'(<[^\s>]+)(?:(\s+)(.*?))?(>)',
bygroups(Name.Tag, Text, String, Name.Tag)),
(r'([a-z]\w*)(\s+)',
bygroups(Name.Builtin, Text), 'value'),
(r'\.+', Text),
],
'value': [
(r'\\\n', Text),
(r'$', Text, '#pop'),
(r'\\', Text),
(r'[^\S\n]+', Text),
(r'\d+\.\d+\.\d+\.\d+(?:/\d+)?', Number),
(r'\d+', Number),
(r'/([a-z0-9][\w./-]+)', String.Other),
(r'(on|off|none|any|all|double|email|dns|min|minimal|'
r'os|productonly|full|emerg|alert|crit|error|warn|'
r'notice|info|debug|registry|script|inetd|standalone|'
r'user|group)\b', Keyword),
(r'"([^"\\]*(?:\\.[^"\\]*)*)"', String.Double),
(r'[^\s"\\]+', Text)
],
}
class SquidConfLexer(RegexLexer):
"""
Lexer for `squid <http://www.squid-cache.org/>`_ configuration files.
.. versionadded:: 0.9
"""
name = 'SquidConf'
aliases = ['squidconf', 'squid.conf', 'squid']
filenames = ['squid.conf']
mimetypes = ['text/x-squidconf']
flags = re.IGNORECASE
keywords = (
"access_log", "acl", "always_direct", "announce_host",
"announce_period", "announce_port", "announce_to", "anonymize_headers",
"append_domain", "as_whois_server", "auth_param_basic",
"authenticate_children", "authenticate_program", "authenticate_ttl",
"broken_posts", "buffered_logs", "cache_access_log", "cache_announce",
"cache_dir", "cache_dns_program", "cache_effective_group",
"cache_effective_user", "cache_host", "cache_host_acl",
"cache_host_domain", "cache_log", "cache_mem", "cache_mem_high",
"cache_mem_low", "cache_mgr", "cachemgr_passwd", "cache_peer",
"cache_peer_access", "cahce_replacement_policy", "cache_stoplist",
"cache_stoplist_pattern", "cache_store_log", "cache_swap",
"cache_swap_high", "cache_swap_log", "cache_swap_low", "client_db",
"client_lifetime", "client_netmask", "connect_timeout", "coredump_dir",
"dead_peer_timeout", "debug_options", "delay_access", "delay_class",
"delay_initial_bucket_level", "delay_parameters", "delay_pools",
"deny_info", "dns_children", "dns_defnames", "dns_nameservers",
"dns_testnames", "emulate_httpd_log", "err_html_text",
"fake_user_agent", "firewall_ip", "forwarded_for", "forward_snmpd_port",
"fqdncache_size", "ftpget_options", "ftpget_program", "ftp_list_width",
"ftp_passive", "ftp_user", "half_closed_clients", "header_access",
"header_replace", "hierarchy_stoplist", "high_response_time_warning",
"high_page_fault_warning", "hosts_file", "htcp_port", "http_access",
"http_anonymizer", "httpd_accel", "httpd_accel_host",
"httpd_accel_port", "httpd_accel_uses_host_header",
"httpd_accel_with_proxy", "http_port", "http_reply_access",
"icp_access", "icp_hit_stale", "icp_port", "icp_query_timeout",
"ident_lookup", "ident_lookup_access", "ident_timeout",
"incoming_http_average", "incoming_icp_average", "inside_firewall",
"ipcache_high", "ipcache_low", "ipcache_size", "local_domain",
"local_ip", "logfile_rotate", "log_fqdn", "log_icp_queries",
"log_mime_hdrs", "maximum_object_size", "maximum_single_addr_tries",
"mcast_groups", "mcast_icp_query_timeout", "mcast_miss_addr",
"mcast_miss_encode_key", "mcast_miss_port", "memory_pools",
"memory_pools_limit", "memory_replacement_policy", "mime_table",
"min_http_poll_cnt", "min_icp_poll_cnt", "minimum_direct_hops",
"minimum_object_size", "minimum_retry_timeout", "miss_access",
"negative_dns_ttl", "negative_ttl", "neighbor_timeout",
"neighbor_type_domain", "netdb_high", "netdb_low", "netdb_ping_period",
"netdb_ping_rate", "never_direct", "no_cache", "passthrough_proxy",
"pconn_timeout", "pid_filename", "pinger_program", "positive_dns_ttl",
"prefer_direct", "proxy_auth", "proxy_auth_realm", "query_icmp",
"quick_abort", "quick_abort_max", "quick_abort_min",
"quick_abort_pct", "range_offset_limit", "read_timeout",
"redirect_children", "redirect_program",
"redirect_rewrites_host_header", "reference_age",
"refresh_pattern", "reload_into_ims", "request_body_max_size",
"request_size", "request_timeout", "shutdown_lifetime",
"single_parent_bypass", "siteselect_timeout", "snmp_access",
"snmp_incoming_address", "snmp_port", "source_ping", "ssl_proxy",
"store_avg_object_size", "store_objects_per_bucket",
"strip_query_terms", "swap_level1_dirs", "swap_level2_dirs",
"tcp_incoming_address", "tcp_outgoing_address", "tcp_recv_bufsize",
"test_reachability", "udp_hit_obj", "udp_hit_obj_size",
"udp_incoming_address", "udp_outgoing_address", "unique_hostname",
"unlinkd_program", "uri_whitespace", "useragent_log",
"visible_hostname", "wais_relay", "wais_relay_host", "wais_relay_port",
)
opts = (
"proxy-only", "weight", "ttl", "no-query", "default", "round-robin",
"multicast-responder", "on", "off", "all", "deny", "allow", "via",
"parent", "no-digest", "heap", "lru", "realm", "children", "q1", "q2",
"credentialsttl", "none", "disable", "offline_toggle", "diskd",
)
actions = (
"shutdown", "info", "parameter", "server_list", "client_list",
r'squid.conf',
)
actions_stats = (
"objects", "vm_objects", "utilization", "ipcache", "fqdncache", "dns",
"redirector", "io", "reply_headers", "filedescriptors", "netdb",
)
actions_log = ("status", "enable", "disable", "clear")
acls = (
"url_regex", "urlpath_regex", "referer_regex", "port", "proto",
"req_mime_type", "rep_mime_type", "method", "browser", "user", "src",
"dst", "time", "dstdomain", "ident", "snmp_community",
)
ip_re = (
r'(?:(?:(?:[3-9]\d?|2(?:5[0-5]|[0-4]?\d)?|1\d{0,2}|0x0*[0-9a-f]{1,2}|'
r'0+[1-3]?[0-7]{0,2})(?:\.(?:[3-9]\d?|2(?:5[0-5]|[0-4]?\d)?|1\d{0,2}|'
r'0x0*[0-9a-f]{1,2}|0+[1-3]?[0-7]{0,2})){3})|(?!.*::.*::)(?:(?!:)|'
r':(?=:))(?:[0-9a-f]{0,4}(?:(?<=::)|(?<!::):)){6}(?:[0-9a-f]{0,4}'
r'(?:(?<=::)|(?<!::):)[0-9a-f]{0,4}(?:(?<=::)|(?<!:)|(?<=:)(?<!::):)|'
r'(?:25[0-4]|2[0-4]\d|1\d\d|[1-9]?\d)(?:\.(?:25[0-4]|2[0-4]\d|1\d\d|'
r'[1-9]?\d)){3}))'
)
tokens = {
'root': [
(r'\s+', Whitespace),
(r'#', Comment, 'comment'),
(words(keywords, prefix=r'\b', suffix=r'\b'), Keyword),
(words(opts, prefix=r'\b', suffix=r'\b'), Name.Constant),
# Actions
(words(actions, prefix=r'\b', suffix=r'\b'), String),
(words(actions_stats, prefix=r'stats/', suffix=r'\b'), String),
(words(actions_log, prefix=r'log/', suffix=r'='), String),
(words(acls, prefix=r'\b', suffix=r'\b'), Keyword),
(ip_re + r'(?:/(?:' + ip_re + r'|\b\d+\b))?', Number.Float),
(r'(?:\b\d+\b(?:-\b\d+|%)?)', Number),
(r'\S+', Text),
],
'comment': [
(r'\s*TAG:.*', String.Escape, '#pop'),
(r'.+', Comment, '#pop'),
default('#pop'),
],
}
class NginxConfLexer(RegexLexer):
"""
Lexer for `Nginx <http://nginx.net/>`_ configuration files.
.. versionadded:: 0.11
"""
name = 'Nginx configuration file'
aliases = ['nginx']
filenames = []
mimetypes = ['text/x-nginx-conf']
tokens = {
'root': [
(r'(include)(\s+)([^\s;]+)', bygroups(Keyword, Text, Name)),
(r'[^\s;#]+', Keyword, 'stmt'),
include('base'),
],
'block': [
(r'\}', Punctuation, '#pop:2'),
(r'[^\s;#]+', Keyword.Namespace, 'stmt'),
include('base'),
],
'stmt': [
(r'\{', Punctuation, 'block'),
(r';', Punctuation, '#pop'),
include('base'),
],
'base': [
(r'#.*\n', Comment.Single),
(r'on|off', Name.Constant),
(r'\$[^\s;#()]+', Name.Variable),
(r'([a-z0-9.-]+)(:)([0-9]+)',
bygroups(Name, Punctuation, Number.Integer)),
(r'[a-z-]+/[a-z-+]+', String), # mimetype
# (r'[a-zA-Z._-]+', Keyword),
(r'[0-9]+[km]?\b', Number.Integer),
(r'(~)(\s*)([^\s{]+)', bygroups(Punctuation, Text, String.Regex)),
(r'[:=~]', Punctuation),
(r'[^\s;#{}$]+', String), # catch all
(r'/[^\s;#]*', Name), # pathname
(r'\s+', Text),
(r'[$;]', Text), # leftover characters
],
}
class LighttpdConfLexer(RegexLexer):
"""
Lexer for `Lighttpd <http://lighttpd.net/>`_ configuration files.
.. versionadded:: 0.11
"""
name = 'Lighttpd configuration file'
aliases = ['lighty', 'lighttpd']
filenames = []
mimetypes = ['text/x-lighttpd-conf']
tokens = {
'root': [
(r'#.*\n', Comment.Single),
(r'/\S*', Name), # pathname
(r'[a-zA-Z._-]+', Keyword),
(r'\d+\.\d+\.\d+\.\d+(?:/\d+)?', Number),
(r'[0-9]+', Number),
(r'=>|=~|\+=|==|=|\+', Operator),
(r'\$[A-Z]+', Name.Builtin),
(r'[(){}\[\],]', Punctuation),
(r'"([^"\\]*(?:\\.[^"\\]*)*)"', String.Double),
(r'\s+', Text),
],
}
class DockerLexer(RegexLexer):
"""
Lexer for `Docker <http://docker.io>`_ configuration files.
.. versionadded:: 2.0
"""
name = 'Docker'
aliases = ['docker', 'dockerfile']
filenames = ['Dockerfile', '*.docker']
mimetypes = ['text/x-dockerfile-config']
_keywords = (r'(?:FROM|MAINTAINER|CMD|EXPOSE|ENV|ADD|ENTRYPOINT|'
r'VOLUME|WORKDIR)')
flags = re.IGNORECASE | re.MULTILINE
tokens = {
'root': [
(r'^(ONBUILD)(\s+)(%s)\b' % (_keywords,),
bygroups(Name.Keyword, Whitespace, Keyword)),
(r'^(%s)\b(.*)' % (_keywords,), bygroups(Keyword, String)),
(r'#.*', Comment),
(r'RUN', Keyword), # Rest of line falls through
(r'(.*\\\n)*.+', using(BashLexer)),
],
}
class TerraformLexer(RegexLexer):
"""
Lexer for `terraformi .tf files <https://www.terraform.io/>`_.
.. versionadded:: 2.1
"""
name = 'Terraform'
aliases = ['terraform', 'tf']
filenames = ['*.tf']
mimetypes = ['application/x-tf', 'application/x-terraform']
tokens = {
'root': [
include('string'),
include('punctuation'),
include('curly'),
include('basic'),
include('whitespace'),
(r'[0-9]+', Number),
],
'basic': [
(words(('true', 'false'), prefix=r'\b', suffix=r'\b'), Keyword.Type),
(r'\s*/\*', Comment.Multiline, 'comment'),
(r'\s*#.*\n', Comment.Single),
(r'(.*?)(\s*)(=)', bygroups(Name.Attribute, Text, Operator)),
(words(('variable', 'resource', 'provider', 'provisioner', 'module'),
prefix=r'\b', suffix=r'\b'), Keyword.Reserved, 'function'),
(words(('ingress', 'egress', 'listener', 'default', 'connection'),
prefix=r'\b', suffix=r'\b'), Keyword.Declaration),
('\$\{', String.Interpol, 'var_builtin'),
],
'function': [
(r'(\s+)(".*")(\s+)', bygroups(Text, String, Text)),
include('punctuation'),
include('curly'),
],
'var_builtin': [
(r'\$\{', String.Interpol, '#push'),
(words(('concat', 'file', 'join', 'lookup', 'element'),
prefix=r'\b', suffix=r'\b'), Name.Builtin),
include('string'),
include('punctuation'),
(r'\s+', Text),
(r'\}', String.Interpol, '#pop'),
],
'string': [
(r'(".*")', bygroups(String.Double)),
],
'punctuation': [
(r'[\[\]\(\),.]', Punctuation),
],
# Keep this seperate from punctuation - we sometimes want to use different
# Tokens for { }
'curly': [
(r'\{', Text.Punctuation),
(r'\}', Text.Punctuation),
],
'comment': [
(r'[^*/]', Comment.Multiline),
(r'/\*', Comment.Multiline, '#push'),
(r'\*/', Comment.Multiline, '#pop'),
(r'[*/]', Comment.Multiline)
],
'whitespace': [
(r'\n', Text),
(r'\s+', Text),
(r'\\\n', Text),
],
}
class TermcapLexer(RegexLexer):
"""
Lexer for termcap database source.
This is very simple and minimal.
.. versionadded:: 2.1
"""
name = 'Termcap'
aliases = ['termcap',]
filenames = ['termcap', 'termcap.src',]
mimetypes = []
# NOTE:
# * multiline with trailing backslash
# * separator is ':'
# * to embed colon as data, we must use \072
# * space after separator is not allowed (mayve)
tokens = {
'root': [
(r'^#.*$', Comment),
(r'^[^\s#:\|]+', Name.Tag, 'names'),
],
'names': [
(r'\n', Text, '#pop'),
(r':', Punctuation, 'defs'),
(r'\|', Punctuation),
(r'[^:\|]+', Name.Attribute),
],
'defs': [
(r'\\\n[ \t]*', Text),
(r'\n[ \t]*', Text, '#pop:2'),
(r'(#)([0-9]+)', bygroups(Operator, Number)),
(r'=', Operator, 'data'),
(r':', Punctuation),
(r'[^\s:=#]+', Name.Class),
],
'data': [
(r'\\072', Literal),
(r':', Punctuation, '#pop'),
(r'[^:\\]+', Literal), # for performance
(r'.', Literal),
],
}
class TerminfoLexer(RegexLexer):
"""
Lexer for terminfo database source.
This is very simple and minimal.
.. versionadded:: 2.1
"""
name = 'Terminfo'
aliases = ['terminfo',]
filenames = ['terminfo', 'terminfo.src',]
mimetypes = []
# NOTE:
# * multiline with leading whitespace
# * separator is ','
# * to embed comma as data, we can use \,
# * space after separator is allowed
tokens = {
'root': [
(r'^#.*$', Comment),
(r'^[^\s#,\|]+', Name.Tag, 'names'),
],
'names': [
(r'\n', Text, '#pop'),
(r'(,)([ \t]*)', bygroups(Punctuation, Text), 'defs'),
(r'\|', Punctuation),
(r'[^,\|]+', Name.Attribute),
],
'defs': [
(r'\n[ \t]+', Text),
(r'\n', Text, '#pop:2'),
(r'(#)([0-9]+)', bygroups(Operator, Number)),
(r'=', Operator, 'data'),
(r'(,)([ \t]*)', bygroups(Punctuation, Text)),
(r'[^\s,=#]+', Name.Class),
],
'data': [
(r'\\[,\\]', Literal),
(r'(,)([ \t]*)', bygroups(Punctuation, Text), '#pop'),
(r'[^\\,]+', Literal), # for performance
(r'.', Literal),
],
}
class PkgConfigLexer(RegexLexer):
"""
Lexer for `pkg-config
<http://www.freedesktop.org/wiki/Software/pkg-config/>`_
(see also `manual page <http://linux.die.net/man/1/pkg-config>`_).
.. versionadded:: 2.1
"""
name = 'PkgConfig'
aliases = ['pkgconfig',]
filenames = ['*.pc',]
mimetypes = []
tokens = {
'root': [
(r'#.*$', Comment.Single),
# variable definitions
(r'^(\w+)(=)', bygroups(Name.Attribute, Operator)),
# keyword lines
(r'^([\w.]+)(:)',
bygroups(Name.Tag, Punctuation), 'spvalue'),
# variable references
include('interp'),
# fallback
(r'[^${}#=:\n.]+', Text),
(r'.', Text),
],
'interp': [
# you can escape literal "$" as "$$"
(r'\$\$', Text),
# variable references
(r'\$\{', String.Interpol, 'curly'),
],
'curly': [
(r'\}', String.Interpol, '#pop'),
(r'\w+', Name.Attribute),
],
'spvalue': [
include('interp'),
(r'#.*$', Comment.Single, '#pop'),
(r'\n', Text, '#pop'),
# fallback
(r'[^${}#\n]+', Text),
(r'.', Text),
],
}
class PacmanConfLexer(RegexLexer):
"""
Lexer for `pacman.conf
<https://www.archlinux.org/pacman/pacman.conf.5.html>`_.
Actually, IniLexer works almost fine for this format,
but it yield error token. It is because pacman.conf has
a form without assignment like:
UseSyslog
Color
TotalDownload
CheckSpace
VerbosePkgLists
These are flags to switch on.
.. versionadded:: 2.1
"""
name = 'PacmanConf'
aliases = ['pacmanconf',]
filenames = ['pacman.conf',]
mimetypes = []
tokens = {
'root': [
# comment
(r'#.*$', Comment.Single),
# section header
(r'^\s*\[.*?\]\s*$', Keyword),
# variable definitions
# (Leading space is allowed...)
(r'(\w+)(\s*)(=)',
bygroups(Name.Attribute, Text, Operator)),
# flags to on
(r'^(\s*)(\w+)(\s*)$',
bygroups(Text, Name.Attribute, Text)),
# built-in special values
(words((
'$repo', # repository
'$arch', # architecture
'%o', # outfile
'%u', # url
), suffix=r'\b'),
Name.Variable),
# fallback
(r'.', Text),
],
}
| gpl-3.0 |
alexlo03/ansible | lib/ansible/modules/cloud/ovirt/ovirt_host_pm.py | 8 | 8366 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Red Hat, Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ovirt_host_pm
short_description: Module to manage power management of hosts in oVirt/RHV
version_added: "2.3"
author: "Ondra Machacek (@machacekondra)"
description:
- "Module to manage power management of hosts in oVirt/RHV."
options:
name:
description:
- "Name of the host to manage."
required: true
aliases: ['host']
state:
description:
- "Should the host be present/absent."
choices: ['present', 'absent']
default: present
address:
description:
- "Address of the power management interface."
username:
description:
- "Username to be used to connect to power management interface."
password:
description:
- "Password of the user specified in C(username) parameter."
type:
description:
- "Type of the power management. oVirt/RHV predefined values are I(drac5), I(ipmilan), I(rsa),
I(bladecenter), I(alom), I(apc), I(apc_snmp), I(eps), I(wti), I(rsb), I(cisco_ucs),
I(drac7), I(hpblade), I(ilo), I(ilo2), I(ilo3), I(ilo4), I(ilo_ssh),
but user can have defined custom type."
port:
description:
- "Power management interface port."
options:
description:
- "Dictionary of additional fence agent options (including Power Management slot)."
- "Additional information about options can be found at U(https://github.com/ClusterLabs/fence-agents/blob/master/doc/FenceAgentAPI.md)."
encrypt_options:
description:
- "If I(true) options will be encrypted when send to agent."
aliases: ['encrypt']
order:
description:
- "Integer value specifying, by default it's added at the end."
version_added: "2.5"
extends_documentation_fragment: ovirt
'''
EXAMPLES = '''
# Examples don't contain auth parameter for simplicity,
# look at ovirt_auth module to see how to reuse authentication:
# Add fence agent to host 'myhost'
- ovirt_host_pm:
name: myhost
address: 1.2.3.4
options:
myoption1: x
myoption2: y
username: admin
password: admin
port: 3333
type: ipmilan
# Add fence agent to host 'myhost' using 'slot' option
- ovirt_host_pm:
name: myhost
address: 1.2.3.4
options:
myoption1: x
myoption2: y
slot: myslot
username: admin
password: admin
port: 3333
type: ipmilan
# Remove ipmilan fence agent with address 1.2.3.4 on host 'myhost'
- ovirt_host_pm:
state: absent
name: myhost
address: 1.2.3.4
type: ipmilan
'''
RETURN = '''
id:
description: ID of the agent which is managed
returned: On success if agent is found.
type: str
sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
agent:
description: "Dictionary of all the agent attributes. Agent attributes can be found on your oVirt/RHV instance
at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/agent."
returned: On success if agent is found.
type: dict
'''
import traceback
try:
import ovirtsdk4.types as otypes
except ImportError:
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ovirt import (
BaseModule,
check_sdk,
create_connection,
equal,
ovirt_full_argument_spec,
search_by_name,
)
class HostModule(BaseModule):
def build_entity(self):
return otypes.Host(
power_management=otypes.PowerManagement(
enabled=True,
),
)
def update_check(self, entity):
return equal(True, entity.power_management.enabled)
class HostPmModule(BaseModule):
def pre_create(self, entity):
# Save the entity, so we know if Agent already existed
self.entity = entity
def build_entity(self):
last = next((s for s in sorted([a.order for a in self._service.list()])), 0)
order = self.param('order') if self.param('order') is not None else self.entity.order if self.entity else last + 1
return otypes.Agent(
address=self._module.params['address'],
encrypt_options=self._module.params['encrypt_options'],
options=[
otypes.Option(
name=name,
value=value,
) for name, value in self._module.params['options'].items()
] if self._module.params['options'] else None,
password=self._module.params['password'],
port=self._module.params['port'],
type=self._module.params['type'],
username=self._module.params['username'],
order=order,
)
def update_check(self, entity):
def check_options():
if self.param('options'):
current = []
if entity.options:
current = [(opt.name, str(opt.value)) for opt in entity.options]
passed = [(k, str(v)) for k, v in self.param('options').items()]
return sorted(current) == sorted(passed)
return True
return (
check_options() and
equal(self._module.params.get('address'), entity.address) and
equal(self._module.params.get('encrypt_options'), entity.encrypt_options) and
equal(self._module.params.get('username'), entity.username) and
equal(self._module.params.get('port'), entity.port) and
equal(self._module.params.get('type'), entity.type) and
equal(self._module.params.get('order'), entity.order)
)
def main():
argument_spec = ovirt_full_argument_spec(
state=dict(
choices=['present', 'absent'],
default='present',
),
name=dict(default=None, required=True, aliases=['host']),
address=dict(default=None),
username=dict(default=None),
password=dict(default=None, no_log=True),
type=dict(default=None),
port=dict(default=None, type='int'),
order=dict(default=None, type='int'),
options=dict(default=None, type='dict'),
encrypt_options=dict(default=None, type='bool', aliases=['encrypt']),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
check_sdk(module)
try:
auth = module.params.pop('auth')
connection = create_connection(auth)
hosts_service = connection.system_service().hosts_service()
host = search_by_name(hosts_service, module.params['name'])
fence_agents_service = hosts_service.host_service(host.id).fence_agents_service()
host_pm_module = HostPmModule(
connection=connection,
module=module,
service=fence_agents_service,
)
host_module = HostModule(
connection=connection,
module=module,
service=hosts_service,
)
state = module.params['state']
if state == 'present':
agent = host_pm_module.search_entity(
search_params={
'address': module.params['address'],
'type': module.params['type'],
}
)
ret = host_pm_module.create(entity=agent)
# Enable Power Management, if it's not enabled:
host_module.create(entity=host)
elif state == 'absent':
agent = host_pm_module.search_entity(
search_params={
'address': module.params['address'],
'type': module.params['type'],
}
)
ret = host_pm_module.remove(entity=agent)
module.exit_json(**ret)
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
finally:
connection.close(logout=auth.get('token') is None)
if __name__ == "__main__":
main()
| gpl-3.0 |
nearai/program_synthesis | program_synthesis/naps/uast/uast.py | 1 | 66502 | from __future__ import print_function
import functools
import six
import sys
import time
import math
import numpy as np
import re
from operator import mul
from sortedcontainers import SortedDict, SortedSet
from .uast_watcher import WatcherEvent, tuplify
DEBUG_INFO = False
LARGEST_INT = 2 ** 64
OBJECT = "object"
BOOL = "bool"
CHAR = "char"
STRING = "char*"
INT = "int"
REAL = "real"
VOID = "void"
FUNC = "func"
if not six.PY2:
long = int
def watchable(event_type):
def watchable_internal(some_func):
def wrapper(executor, context, *args, **kwargs):
if not executor.watchers: # don't waste precious cycles if there are no watchers
return some_func(executor, context, *args, **kwargs)
assert len(kwargs) <= 1, "%s for %s" % (kwargs, some_func)
all_args = list(args) + list(kwargs.values())
executor._watch(WatcherEvent("before_" + event_type, executor, context, *all_args))
ret = some_func(executor, context, *args, **kwargs)
executor._watch(WatcherEvent("after_" + event_type, executor, context, ret, *all_args))
return ret
return wrapper
return watchable_internal
class IO_t:
SCANNER = 'scanner'
PRINTER = 'printer'
next_int = ['invoke', INT, '_io_next_int', []]
next_real = ['invoke', INT, '_io_next_real', []]
next_line = ['invoke', STRING, '_io_next_line', []]
next_string = ['invoke', STRING, '_io_next_word', []]
def print_(self, x):
return ['invoke', VOID, '_io_print', [x]]
def println(self, x):
return ['invoke', VOID, '_io_println', [x]]
def __init__(self):
self.func_to_type = {}
self.func_to_type[self.next_int[2]] = INT
self.func_to_type[self.next_real[2]] = REAL
self.func_to_type[self.next_line[2]] = STRING
self.func_to_type[self.next_string[2]] = STRING
IO = IO_t()
GLOBALS_NAME = "__globals__"
class UASTNotImplementedException(Exception):
def __init__(self, feature, *args, **kwargs):
Exception.__init__(self, *args, **kwargs)
self.feature = feature
def __str__(self):
return "UAST Not Implemented: %s" % self.feature
class UASTTimeLimitExceeded(Exception):
def __init__(self, *args, **kwargs):
Exception.__init__(self, *args, **kwargs)
class UASTParseError(Exception):
def __init__(self, *args, **kwargs):
Exception.__init__(self, *args, **kwargs)
def var(name, type_):
return ["var", type_, name]
def get_expr_type(var):
return var[1]
def get_var_name(var):
assert var[0] == 'var'
return var[2]
def set_var_name(var, name):
var[2] = name
def constant(type_, value):
return ["val", type_, value]
def func(name, return_type=VOID):
return ["func", return_type, name, [], [], []]
def get_func_return_type(f):
assert f[0] in ['func', 'ctor'], f[0]
return f[1]
def get_func_name(f):
assert f[0] in ['func', 'ctor'], f[0]
return f[2]
def set_func_name(f, new_name):
assert f[0] in ['func', 'ctor'], f[0]
f[2] = new_name
def get_func_args(f):
assert f[0] in ['func', 'ctor'], f[0]
return f[3]
def get_func_vars(f):
assert f[0] in ['func', 'ctor'], f[0]
return f[4]
def get_func_body(f):
assert f[0] in ['func', 'ctor'], f[0]
return f[5]
def record(name):
return ["record", name, {}]
def get_record_name(record):
return record[1]
def get_record_fields(record):
return record[2]
def func_call(func_name, args, type_):
return ["invoke", type_, func_name, args]
def assign(lhs, rhs):
return ["assign", rhs[1], lhs, rhs]
def field(jcontext, obj, field):
type_ = get_expr_type(obj)
assert isinstance(type_, six.string_types), type_
assert type_[-1] == '#', type_
record = jcontext.get_record(type_[:-1])
return ["field", get_expr_type(get_record_fields(record)[field]), obj, field]
def type_array(subtype):
return subtype + "*"
def type_set(subtype):
return subtype + "%"
def type_map(subtype1, subtype2):
return '<' + subtype1 + '|' + subtype2 + ">"
def type_record(name):
return name + "#"
def get_array_subtype(tp):
assert tp[-1] == '*', tp
return tp[:-1]
def get_set_subtype(tp):
assert tp[-1] == '%', tp
return tp[:-1]
def get_map_key_type(tp):
assert tp[0] == '<', tp
assert tp[-1] == '>'
ret = ""
balance = 0
for ch in tp[1:-1]:
if ch == '<':
balance += 1
elif ch == '>':
assert balance > 0
balance -= 1
elif ch == '|' and balance == 0:
break
ret += ch
return ret
def get_map_value_type(tp):
assert tp[0] == '<', tp
assert tp[-1] == '>'
ret = ""
balance = 0
saw_pipe = False
for ch in tp[1:-1]:
if saw_pipe:
ret += ch
if ch == '<':
balance += 1
elif ch == '>':
assert balance > 0
balance -= 1
elif ch == '>':
assert saw_pipe
break
elif ch == '|' and balance == 0:
saw_pipe = True
return ret
def type_to_record_name(tp):
assert tp[-1] == '#', "%s <> %s" % (tp, tp[-1])
return tp[:-1]
def is_array(tp):
return tp[-1] == '*'
def is_record_type(tp):
return tp[-1] in ['#']
def is_int_type(tp): # doesn't include char!
return tp in [INT]
def is_set_type(tp):
return tp[-1] in ['%']
def is_map_type(tp):
return tp[-1] in ['>']
def if_(cond, then, else_):
return ["if", VOID, cond, then, else_]
def ternary(cond, then, else_):
return ["?:", arithmetic_op_type(get_expr_type(then), get_expr_type(else_), allow_same=True), cond, then, else_]
def while_(cond, body, finally_):
return ["while", VOID, cond, body, finally_]
def for_each(var, collection, body):
return ["foreach", VOID, var, collection, body]
def arithmetic_op_type(tp1, tp2, allow_same=False):
if allow_same and (tp1 == tp2 or tp1 == OBJECT or tp2 == OBJECT): # TODO: check that we are not comparing object and value type
return tp1
for pr in [REAL, INT, CHAR, BOOL]:
if tp1 == pr or tp2 == pr:
return pr
raise UASTNotImplementedException("Arithmetic op on %s and %s" % (tp1, tp2))
def convert_binary_expression(arg1, arg2, operator):
if get_expr_type(arg1) == BOOL and get_expr_type(arg2) == BOOL \
and operator in ['|', '&', '^']:
operator += operator
if operator in ['&&', '||', '==', '!=', '<', '<=', '>', '>=', '^^']:
return func_call(operator if operator != '^^' else '^', [arg1, arg2], BOOL)
if get_expr_type(arg1) == STRING or get_expr_type(arg2) == STRING:
if get_expr_type(arg1) in [STRING, CHAR] and get_expr_type(arg2) in [STRING, CHAR]:
assert operator == '+', operator
return func_call('concat', [arg1, arg2], STRING)
elif get_expr_type(arg1) in [STRING, VOID] and get_expr_type(arg2) in [STRING, VOID]:
assert operator in ['==', '!='], operator
return func_call(operator, [arg1, arg2], BOOL)
elif get_expr_type(arg1) == STRING:
assert operator == '+', operator
return func_call('concat', [arg1, func_call('str', [arg2], STRING)], STRING)
elif get_expr_type(arg2) == STRING:
assert operator == '+', operator
return func_call('concat', [func_call('str', [arg1], STRING), arg2], STRING)
assert False, "%s %s %s" % (get_expr_type(arg1), operator, get_expr_type(arg2))
if operator in ['+', '*', '%', '&', '|', '^', '-', '/', '>>', '<<']:
tp_ = arithmetic_op_type(get_expr_type(arg1), get_expr_type(arg2))
return func_call(operator, [arg1, arg2], tp_)
else:
raise UASTNotImplementedException("operator %s" % operator)
def is_assigneable(expr):
return expr[0] in ['var', 'field'] or expr[0] == 'invoke' and expr[2] == 'array_index'
def assert_val_matches_type(val, tp):
if tp == '?':
return
if not val_matches_type(val, tp):
if isinstance(val, float) and is_int_type(tp):
raise UASTNotImplementedException("Implicit cast from REAL to INT")
if val is None and is_int_type(tp):
raise UASTNotImplementedException("Implicit cast from NULL to INT")
assert False, "Type mismatch.\n Type: %s;\n Val: %s\n Val type: %s\n" % (tp, val, type(val))
def val_matches_type(val, tp, verbose=False):
if is_int_type(tp) or tp == CHAR:
# allow implicit conversion from float to int
return isinstance(val, float) or isinstance(val, int) or isinstance(val, long)
elif tp in [REAL]:
return isinstance(val, float) or isinstance(val, int)
elif tp in [STRING]:
return isinstance(val, six.string_types) or val is None
elif tp in [BOOL]:
return isinstance(val, bool)
elif tp[-1] in ["*"]:
return isinstance(val, list) or val is None
elif tp[-1] in ['#']:
return isinstance(val, dict) or val is None
elif tp[-1] in ['>']:
return isinstance(val, SortedDict) or val is None
elif tp[-1] in ['%']:
return isinstance(val, SortedSet) or val is None
elif tp == 'void':
return val is None
elif tp == 'func':
return isinstance(val, six.string_types)
elif tp in 'object':
return not isinstance(val, int) and not isinstance(val, long) and not isinstance(val, float) and not isinstance(val, bool)
elif tp in [IO.SCANNER, IO.PRINTER]:
return val is None
else:
assert False, tp
def can_cast(to_, from_):
if from_ == '?':
return True
if (to_[-1] in ['*', '#', '>', '%'] or to_ == OBJECT) and \
(from_[-1] in ['*', '#', '>', '%'] or from_ == OBJECT):
return True
return to_ in [INT, REAL, CHAR] and from_ in [INT, REAL, CHAR, STRING]
def get_block_statements(block):
assert isinstance(block, list)
return block
def default_value(ret_type):
if ret_type in [INT, REAL, CHAR]:
return 0
elif ret_type in [STRING]:
return ""
elif ret_type == BOOL:
return False
return None
# parse_context is either JContext or CContext
def prepare_global_var_and_func(parse_context):
gi_fname = GLOBALS_NAME + ".__init__"
globals_ = record(GLOBALS_NAME)
parse_context.register_type(GLOBALS_NAME, type_record(GLOBALS_NAME))
parse_context.program['types'].append(globals_)
parse_context.globals_record = globals_
parse_context.globals_init_var = var(GLOBALS_NAME, type_record(GLOBALS_NAME))
parse_context.globals_init_func = func(gi_fname, VOID)
return gi_fname
class InputSchemaExtractorNotSupportedException(Exception):
def __init__(self, *args, **kwargs):
Exception.__init__(self, *args, **kwargs)
class InputSchemaExtractor(object):
def __init__(self, data, attempt_multi_test=False):
super(InputSchemaExtractor, self).__init__()
self.uast = data
self.is_multi_test = attempt_multi_test
self.multi_test_var = None
self.multi_test_iter = None
self.multi_test_loop = None
self.inside_multi_test_loop = False
self.funcs = {get_func_name(func): func for func in data['funcs']}
self.types = {get_record_name(record): record for record in data['types']}
self.schema = []
self.cur_schema = self.schema
self.var_map = {}
self.arr_map = {}
self.init_vals = {}
self.var_map_assigns = {}
self.arr_map_assigns = {}
self.arr_map_inits = {}
self.bypassed_arrays = set()
self.remove_vars = set()
self.not_impl_stack = []
self.loop_stack = []
self.func_stack = []
self.cur_branch_out_len = 0
self.max_branch_out_len = 0
self.output_type = None
self.funcs_visited = {}
self.funcs_with_reads = set()
self.funcs_with_writes = set()
self.func_returns = {}
self.num_args = 0
# datastructures to postprocess code
self.replace_with_noops = []
self.process_input_blocks = []
self.process_output_blocks = []
self.process_invokes_with_reads = []
self.process_invokes_with_writes = []
def push_not_impl(self, s):
self.not_impl_stack.append(s)
def pop_not_impl(self):
self.not_impl_stack.pop()
def check_not_impl(self):
if self.not_impl_stack:
raise InputSchemaExtractorNotSupportedException(self.not_impl_stack[-1])
def next_arg_name(self):
if self.num_args >= 26:
raise InputSchemaExtractorNotSupportedException("More than 26 arguments")
self.num_args += 1
return chr(ord('a') + self.num_args - 1)
def crawl_stmt(self, stmt):
# TODO: presently not supporting reading lines
def hack_fix_me(s):
if s == 'line': return 'word'
return s
if stmt[0] == 'if': # TODO: properly handle this
self.push_not_impl("IO inside if")
self.crawl_stmt(stmt[2])
out_depth_before = self.cur_branch_out_len
self.crawl_stmt_list(stmt[3])
out_depth_after = self.cur_branch_out_len
self.cur_branch_out_len = out_depth_before
self.crawl_stmt_list(stmt[4])
self.cur_branch_out_len = max(self.cur_branch_out_len, out_depth_after)
self.pop_not_impl()
elif stmt[0] == 'foreach':
self.push_not_impl("IO inside foreach")
self.loop_stack.append(None)
self.crawl_stmt_list(stmt[4])
self.loop_stack.pop()
self.pop_not_impl()
elif stmt[0] == 'while':
cond = stmt[2]
body = stmt[3]
is_up_to_t = self.is_multi_test and self.multi_test_var is not None
if is_up_to_t:
is_up_to_t = cond[0] == 'invoke' and cond[2] in ('<', '<=') and cond[3][1][0] == 'var' and cond[3][1][2] == self.multi_test_var[0][2] and cond[3][0][0] == 'var' and cond[3][0][2] in self.init_vals
init_val = self.init_vals[cond[3][0][2]] if is_up_to_t else None
is_up_to_t = is_up_to_t and init_val is not None and (cond[2] == '<' and init_val == 0) #or cond[2] == '<=' and init_val == 1) # TODO: add support for 1-based indexing. Presently turned off because we use the variable to index into the tests
if is_up_to_t:
self.multi_test_iter = cond[3][0]
self.multi_test_loop = stmt
assert len(self.schema) == 1 and self.schema[0] == self.multi_test_var[1], "%s <> %s" % (self.schema, self.multi_test_var[1])
self.process_input_blocks.append([self.schema[0], cond[3][1]])
#print(('T', is_up_to_t, cond, init_val, cond[3][0][2], self.multi_test_var))
else: # try while(t-->0)
if cond[0] == 'invoke' and cond[2] in ('>', '!='):
#print("Step1...")
cond_lhs = cond[3][0]
cond_rhs = cond[3][1]
common_cond = cond_rhs[0] == 'val' and cond_rhs[2] == 0
while_t_minus_minus = common_cond and cond_lhs[0] == 'invoke' and cond_lhs[2] == '+' and cond_lhs[3][1][0] == 'val' and cond_lhs[3][1][2] == 1
if while_t_minus_minus:
#print("Step2...")
while_t_minus_minus = cond_lhs[3][0][0] == 'assign' and cond_lhs[3][0][2][0] == 'var' and cond_lhs[3][0][2][2] == self.multi_test_var[0][2]
while_t = False
if not while_t_minus_minus:
while_t = common_cond and cond_lhs[0] == 'var' and cond_lhs[2] == self.multi_test_var[0][2]
while_t = while_t and body and body[-1][0] == 'assign' and body[-1][2][0] == 'var' and body[-1][2][2] == cond_lhs[2]
if while_t:
assign_rhs = body[-1][3]
#print("ASSIGN_RHS", assign_rhs)
while_t = assign_rhs[0] == 'invoke' and assign_rhs[2] == '-' and assign_rhs[3][0][0] == 'var' and assign_rhs[3][0][2] == cond_lhs[2]
#print(body[-1])
if while_t_minus_minus or while_t:
if while_t:
body.pop()
if body and body[-1][0] == 'var' and body[-1][2] == self.multi_test_var[0][2]:
body.pop()
# TODO: would make sense to check if assign is correct, but probabilisticly it probably is :)
#print("Step3...")
self.multi_test_iter = ["var", INT, "ti"] #TODO: ti should be available
self.multi_test_loop = stmt
is_up_to_t = True
assert len(self.schema) == 1 and self.schema[0] == self.multi_test_var[1], "%s <> %s" % (self.schema, self.multi_test_var[1])
new_lhs = self.multi_test_iter
new_rhs = cond_lhs[3][0][2] if while_t_minus_minus else cond_lhs
stmt[2] = cond = ["invoke", BOOL, "<", [new_lhs, new_rhs]]
stmt[4] = [["assign", INT, self.multi_test_iter, ['invoke', INT, '+', [self.multi_test_iter, ['val', INT, 1]]]]]
self.process_input_blocks.append([self.schema[0], new_rhs])
is_up_to_n = cond[0] == 'invoke' and cond[2] in ('<', '<=') and cond[3][1][0] == 'var' and cond[3][1][2] in self.var_map and cond[3][0][0] == 'var' and cond[3][0][2] in self.init_vals
init_val = self.init_vals[cond[3][0][2]] if is_up_to_n else None
is_up_to_n = is_up_to_n and init_val is not None and (cond[2] == '<' and init_val == 0 or cond[2] == '<=' and init_val == 1)
assert not is_up_to_n or not is_up_to_t
#print(('N', is_up_to_n, cond, init_val, cond[3][0][2]))
if is_up_to_t:
if self.inside_multi_test_loop:
raise InputSchemaExtractorNotSupportedException("Iterating over the `t` inside iterating over `t` for multitest case")
self.inside_multi_test_loop = True
self.crawl_stmt_list(body)
self.inside_multi_test_loop = False
elif is_up_to_n:
self.loop_stack.append(self.var_map[cond[3][1][2]])
old_cur_schema = self.cur_schema
self.cur_schema.append(['loop', VOID, []])
self.cur_schema = self.schema[-1][2]
self.crawl_stmt_list(body)
if not self.cur_schema:
old_cur_schema.pop()
self.cur_schema = old_cur_schema
self.loop_stack.pop()
else:
self.push_not_impl("IO inside for other than range for on an input")
self.loop_stack.append(None)
self.crawl_stmt_list(body)
self.loop_stack.pop()
self.pop_not_impl()
elif stmt[0] in ['break', 'continue', 'noop']:
pass
elif stmt[0] == 'return':
func_name = get_func_name(self.func_stack[-1])
if func_name not in self.func_returns:
self.func_returns[func_name] = []
self.func_returns[func_name].append(stmt)
self.crawl_stmt(stmt[2])
# Expressions
elif stmt[0] == 'assign':
if stmt[2][1] in [IO.SCANNER, IO.PRINTER]:
self.replace_with_noops.append(stmt)
else:
ret = self.crawl_stmt(stmt[3])
if ret is not None and stmt[2][0] == 'var':
if self.is_multi_test and self.multi_test_var is None:
self.multi_test_var = (stmt[2], ret)
self.replace_with_noops.append(stmt)
else:
self.var_map[stmt[2][2]] = ret
if stmt[3][0] != 'var':
self.var_map_assigns[stmt[2][2]] = stmt
if ret is not None and stmt[2][0] == 'invoke' and stmt[2][2] == 'array_index' and stmt[2][3][0][0] == 'var' and stmt[2][3][1][0] == 'var':
self.arr_map[stmt[2][3][0][2]] = ret
self.arr_map_assigns[stmt[2][3][0][2]] = stmt
if stmt[3][0] == 'val' and stmt[2][0] == 'var':
#print("Assigning %s to %s" % (stmt[2][2], stmt[3][2]))
self.init_vals[stmt[2][2]] = stmt[3][2]
if stmt[2][0] == 'var':
if stmt[2][2] not in self.arr_map_inits:
self.arr_map_inits[stmt[2][2]] = stmt
else:
self.arr_map_inits[stmt[2][2]] = False
elif stmt[0] == 'var':
if stmt[2] in self.var_map:
return self.var_map[stmt[2]]
elif stmt[0] == 'field':
self.crawl_stmt(stmt[2])
elif stmt[0] == 'val':
pass
elif stmt[0] == 'invoke':
if stmt[2].startswith('_io_next_'):
if self.is_multi_test and self.multi_test_var is not None and not self.inside_multi_test_loop:
raise InputSchemaExtractorNotSupportedException("Multitest schema with input outside of multitest while loop: %s" % stmt)
self.funcs_with_reads.add(get_func_name(self.func_stack[-1]))
self.check_not_impl()
if len(self.loop_stack) > 1:
raise InputSchemaExtractorNotSupportedException("Nested loops")
if not self.loop_stack:
new_entry = ['in', IO.func_to_type[stmt[2]], self.next_arg_name(), hack_fix_me(stmt[2].split('_')[-1])]
else:
new_entry = ['in', type_array(IO.func_to_type[stmt[2]]), self.next_arg_name(), stmt[2].split('_')[-1]]
if self.loop_stack[-1][0] == 'in':
self.loop_stack[-1][0] = 'size'
self.loop_stack[-1][1] = INT
self.loop_stack[-1][2] = [new_entry[2]]
else:
assert self.loop_stack[-1][0] == 'size'
if new_entry[2] not in self.loop_stack[-1][2]:
self.loop_stack[-1][2].append(new_entry[2])
self.process_input_blocks.append([new_entry, stmt])
self.cur_schema.append(new_entry)
return new_entry
elif stmt[2].startswith('_io_print'):
self.funcs_with_writes.add(get_func_name(self.func_stack[-1]))
assert len(stmt[3]) in [0, 1], stmt
if len(stmt[3]):
#if self.is_multi_test and not self.inside_multi_test_loop:
# raise InputSchemaExtractorNotSupportedException("Multitest schema with output outside of multitest while loop")
if self.loop_stack or self.inside_multi_test_loop:
self.cur_branch_out_len = 2 # >1 means return a list
else:
self.cur_branch_out_len += 1
self.max_branch_out_len = max(self.max_branch_out_len, self.cur_branch_out_len)
new_output_type = get_expr_type(stmt[3][0])
if self.output_type is not None and self.output_type != new_output_type:
if self.output_type == 'char*' and not new_output_type.endswith('*'):
pass
elif not self.output_type.endswith('*') and new_output_type == 'char*':
self.output_type = 'char*'
else:
raise InputSchemaExtractorNotSupportedException("Mixing different output types: %s and %s" % (self.output_type, new_output_type))
else:
self.output_type = new_output_type
self.process_output_blocks.append(stmt)
else:
self.replace_with_noops.append(stmt)
else:
assert not stmt[2].startswith('_io_')
# TODO: invoke the function if it's a user-defined function
for arg in stmt[3]:
if len(arg) <= 1:
assert False, "argument doesn't have two elements. Stmt: %s; arg: %s" % (stmt, arg)
if arg[1] in [IO.PRINTER, IO.SCANNER]:
arg[:] = ['val', VOID, None]
self.crawl_stmt(arg)
if stmt[2] in self.funcs:
snapshot_var_map = self.var_map
self.var_map = {}
assert get_func_name(self.funcs[stmt[2]]) == stmt[2], "%s <> %s" % (self.funcs[stmt[2]], stmt[2])
self.crawl_func(self.funcs[stmt[2]])
# TODO: this won't work if a function that reads stuff is called twice, but it doesn't appear to be a common case
if stmt[2] in self.funcs_with_reads:
self.funcs_with_reads.add(get_func_name(self.func_stack[-1]))
self.process_invokes_with_reads.append(stmt)
if stmt[2] in self.funcs_with_writes:
self.funcs_with_writes.add(get_func_name(self.func_stack[-1]))
self.process_invokes_with_writes.append(stmt)
self.var_map = snapshot_var_map
elif stmt[0] == '?:':
self.push_not_impl("IO inside ternary op")
self.crawl_stmt(stmt[2])
self.crawl_stmt(stmt[3])
self.crawl_stmt(stmt[4])
self.pop_not_impl()
elif stmt[0] == 'cast':
ret = self.crawl_stmt(stmt[2])
if ret is not None:
if get_expr_type(stmt) not in (INT, REAL):
raise InputSchemaExtractorNotSupportedException("CAST of input to %s" % get_expr_type(stmt))
if not ret[1].startswith('char*'):
return None
#print("replacing %s / %s with %s" % (ret[1], ret[3], get_expr_type(stmt)))
ret[1] = ret[1].replace('char*', get_expr_type(stmt))
ret[3] = get_expr_type(stmt)
return ret
else:
assert False, stmt[0]
def crawl_stmt_list(self, l):
for s in l:
self.crawl_stmt(s)
def crawl_func(self, func):
self.func_stack.append(func)
func_name = get_func_name(func)
if func_name not in self.funcs_visited:
self.funcs_visited[func_name] = 1
else:
self.funcs_visited[func_name] += 1
if self.funcs_visited[func_name] > 10:
self.func_stack.pop()
return # to prevent recursion / exponential blow up
self.crawl_stmt_list(get_func_body(func))
self.func_stack.pop()
def extract_schema(self, lang):
entry_point = None
for func_name, func in self.funcs.items():
if lang == 'c++':
if func_name == 'main':
if entry_point is not None:
raise InputSchemaExtractorNotSupportedException("Multiple entry points")
entry_point = func
elif lang == 'java':
if func_name.endswith(".main"):
args = get_func_args(func)
if len(args) == 1 and get_var_name(args[0]) != 'this':
if entry_point is not None:
raise InputSchemaExtractorNotSupportedException("Multiple entry points")
entry_point = func
else:
assert False
if entry_point is None:
raise InputSchemaExtractorNotSupportedException("Entry point not found")
self.entry_point = entry_point
self.push_not_impl("I/O in global initializer")
self.crawl_func(self.funcs[GLOBALS_NAME + ".__init__"])
self.pop_not_impl()
self.crawl_func(entry_point)
if not self.schema or (self.is_multi_test and len(self.schema) == 1):
raise InputSchemaExtractorNotSupportedException("Input schema is not derived")
if self.output_type is not None:
if self.max_branch_out_len > 1:
self.output_type = type_array(self.output_type)
self.schema.append(['out', self.output_type])
else:
raise InputSchemaExtractorNotSupportedException("Output type is not derived")
if self.is_multi_test:
self.schema[0][0] = 'testN'
# BFS to remove empty loops
while True:
found = False
x = [(x, self.schema, i) for (i, x) in enumerate(self.schema)]
for el, parent, idx in x:
if el[0] == 'loop':
if not el[2]:
del parent[idx]
found = True
break
else:
x += [(x, el[2], i) for (i, x) in enumerate(el[2])]
if not found:
break
if not self.is_multi_test:
for k, v in self.var_map.items():
if v[0] == 'in' and (v[1] == 'char*' or not v[1].endswith('*')) and k in self.var_map_assigns:
self.remove_vars.add(k)
self.replace_with_noops.append(self.var_map_assigns[k])
v[2] = k
for k, v in self.arr_map.items():
if v[0] == 'in' and v[1].endswith('*') and v[1] != 'char*':
if k in self.arr_map_inits:
if self.arr_map_inits[k] == False:
continue
self.replace_with_noops.append(self.arr_map_inits[k])
self.remove_vars.add(k)
self.replace_with_noops.append(self.arr_map_assigns[k])
self.bypassed_arrays.add(k)
for sz in self.schema:
if sz[0] == 'size':
for i, x in enumerate(sz[2]):
if x == v[2]:
sz[2][i] = k
v[2] = k
#print(self.arr_map)
#print(self.arr_map, self.arr_map_assigns)
return self.schema
def matches_schema(self, other_schema):
if len(self.schema) != len(other_schema):
return False
for our, their in zip(self.schema, other_schema):
if our != their:
if our[0] == 'out' and their[0] == 'out':
if our[1] + '*' == their[1]:
continue
if our[1] == their[1] + '*':
continue
return False
return True
def postprocess_uast(self, desired_schema):
assert self.matches_schema(desired_schema)
if self.is_multi_test and (not self.multi_test_iter or not self.multi_test_var):
raise InputSchemaExtractorNotSupportedException("Multitest schema extractor hasn't found the multitest iter or multitest var")
for x in desired_schema:
if x[0] == 'out':
# it is common for schemas to be different only in whether the output is array or not
# hence allow the caller to choose the output type
self.output_type = x[1]
entry_point = self.entry_point
original_vars = (set([x[2] for x in get_func_vars(entry_point) if len(x) > 2]) | \
set([x[2] for x in get_func_args(entry_point) if len(x) > 2]))
for func_name in self.funcs_with_reads:
func = self.funcs[func_name]
original_vars |= (set([x[2] for x in get_func_vars(func) if len(x) > 2]) | \
set([x[2] for x in get_func_args(func) if len(x) > 2]))
original_vars -= set(self.remove_vars)
def arg_name(s):
if s == 'testN': return s
ord_ = 0
orig = s
while s in original_vars:
ord_ += 1
s = orig + str(ord_)
return s
def idx_name(s):
s = "%s_i" % arg_name(s)
ord_ = 0
orig = s
while s in original_vars:
ord_ += 1
s = orig + str(ord_)
return s
for block in self.replace_with_noops:
del block[:]
block.append("noop")
set_func_name(entry_point, '__main__')
args = []
vars_ = []
body = []
idx_reset = []
body_after = []
args_map = {}
args_idx_map = {}
for entry, block in self.process_input_blocks:
del block[:]
if entry[0] == 'size':
arg_var = ["var", OBJECT, arg_name(entry[2][0])] # TODO: OBJECT should be the actual type
replace_with = arg_var
if self.is_multi_test and entry != self.multi_test_var[1]:
arg_var[1] += '*'
arg_var = ["invoke", OBJECT, 'array_index', [arg_var, self.multi_test_iter]]
block.append("invoke")
block.append(INT)
block.append("len")
block.append([arg_var])
elif entry[0] in ['testN', 'in']:
arg_var = ["var", entry[1], arg_name(entry[2])]
tp = entry[1]
replace_with = arg_var
if self.is_multi_test and entry != self.multi_test_var[1]:
arg_var[1] += '*'
entry[1] += '*'
replace_with = ["invoke", tp, 'array_index', [arg_var, self.multi_test_iter]]
if entry[2] not in args_map:
args.append(arg_var)
args_map[entry[2]] = args[-1]
if entry[2] in self.bypassed_arrays:
continue
if tp.endswith("*") and tp != 'char*':
if entry[2] not in args_idx_map:
vars_.append(["var", INT, idx_name(entry[2])])
args_idx_map[entry[2]] = vars_[-1]
idx_reset.insert(0, ["assign", INT, vars_[-1], constant(INT, 0)])
block.append("invoke")
block.append(tp[:-1])
block.append("array_index")
inc_idx = ["var", INT, idx_name(entry[2])]
inc_idx = ["assign", INT, inc_idx, ['invoke', INT, '+', [inc_idx, constant(INT, 1)]]]
inc_idx = ["invoke", INT, '-', [inc_idx, constant(INT, 1)]]
block.append([replace_with, inc_idx])
else:
block[:] = replace_with
out_type = self.output_type
if out_type.endswith('*') and out_type != 'char*':
vars_.append(["var", out_type, '__ret'])
out_var = vars_[-1]
body = [["assign", out_type, out_var, ["invoke", out_type, "_ctor", []]]] + body
body_after += [["return", out_type, out_var]]
for block in self.process_output_blocks:
if block[0] == 'return': # has been processed already
continue
if out_type.endswith('*') and out_type != 'char*':
block_val = block[3][0]
del block[:]
if out_type == 'char**':
if get_expr_type(block_val) == 'char*':
block.append('assign')
block.append('char**')
block.append(out_var)
if block_val[0] == "val" and '\t' not in block_val[2] and ' ' not in block_val[3]:
block.append(['invoke', 'char**', 'array_concat', [out_var, block_val]])
else:
block.append(['invoke', 'char**', 'array_concat', [out_var, ['invoke', 'char**', 'string_split', [block_val, ['val', 'char*', ' \\t']]]]])
else:
block.append('invoke')
block.append('char**')
block.append('array_push')
block.append([out_var, ['invoke', STRING, 'str', [block_val]]])
else:
block.append('invoke')
block.append('void')
block.append('array_push')
block.append([out_var, block_val])
else:
assert len(block) == 4, block
block[0] = 'return'
if get_expr_type(block[3][0]) != 'char*' and self.output_type == 'char*':
block[2] = block[3][0]
block[2] = ['invoke', 'char*', 'str', [block[3][0]]]
else:
block[2] = block[3][0]
block.pop()
if not self.is_multi_test:
body = body + idx_reset
else:
assert self.multi_test_loop
self.multi_test_loop[3] = idx_reset + self.multi_test_loop[3]
misses_multi_test_iter_in_vars = self.multi_test_iter and all([x[2] != self.multi_test_iter[2] for x in get_func_vars(entry_point)])
if misses_multi_test_iter_in_vars:
vars_.append(self.multi_test_iter)
body.append(["assign", INT, self.multi_test_iter, ["val", INT, 0]])
get_func_args(entry_point)[:] = [x for x in args]
if self.multi_test_var:
self.multi_test_var[0][2] = arg_name(self.multi_test_var[1][2])
get_func_vars(entry_point)[:] = [x for x in get_func_vars(entry_point) if (not self.multi_test_var or x[2] != self.multi_test_var[0][2]) and not x[2] in self.remove_vars] + vars_
get_func_body(entry_point)[:] = body + [x for x in get_func_body(entry_point) if not (x[0] == 'while' and (len(x[3]) == 0 or (len(x[3]) == 1 and x[3][0][0] == 'noop')))] + body_after
for func in self.funcs_with_reads:
if get_func_name(self.funcs[func]) != '__main__':
get_func_args(self.funcs[func])[:] = get_func_args(self.funcs[func])[:] + args + vars_
get_func_vars(self.funcs[func])[:] = [x for x in get_func_vars(self.funcs[func]) if not x[2] in self.remove_vars]
for func in self.funcs_with_writes:
self.funcs[func][1] = out_type
if self.funcs[func][0] == 'ctor':
self.funcs[func][0] = 'func'
self.funcs[func][2] = self.funcs[func][2].replace('.__init__', '_')
get_func_body(self.funcs[func]).pop() # drop the return statement
if func in self.func_returns:
for stmt in self.func_returns[func]:
stmt[1] = out_type
stmt[2] = ["var", out_type, "__ret"]
for invoke in self.process_invokes_with_reads:
invoke[3] += args + vars_
for invoke in self.process_invokes_with_writes:
if invoke[0] == 'return': # already processed
continue
try:
invoke[2] = invoke[2].replace('.__init__', '_')
except:
print(invoke)
raise
invoke[1] = out_type
invoke[:] = ['return', VOID, [x for x in invoke]]
return self.uast
class ExecutorContext(object):
def __init__(self):
super(ExecutorContext, self).__init__()
self._registered_vars = set()
self._vals = {}
self._return_value = None
self._flow_control = None
self._instructions_count = 0
def register_var(self, var):
assert var[2] not in self._registered_vars, var[2]
self._registered_vars.add(var[2])
def set_val(self, var, val):
assert var[2] in self._registered_vars, var
self._vals[var[2]] = val
def get_val(self, var):
if var[2] not in self._vals:
assert False, var
return self._vals[var[2]]
def array_fill(a, b):
for idx in range(len(a)):
if isinstance(a, six.string_types):
raise UASTNotImplementedException("Mutable strings")
a[idx] = b
def map_put(a, b, c):
a[b] = c
def map_remove_key(a, y):
del a[y]
def array_map_clear(a):
if isinstance(a, list):
del a[:]
elif isinstance(a, SortedDict):
a.clear()
elif isinstance(a, SortedSet):
a.clear()
else:
assert False, type(a)
def array_remove_idx(a, y):
ret = a[y]
del a[y]
return ret
def array_remove_value(a, y):
y = a.index(y)
ret = a[y]
del a[y]
return ret
def magic_escape(x):
return x if x not in ['|', '\\', '+', '(', ')', ',', '[', ']'] else '\\' + x
def cmp_to_key(mycmp):
'Convert a cmp= function into a key= function'
class K:
def __init__(self, obj, *args):
self.obj = obj
def __lt__(self, other):
return mycmp(self.obj, other.obj) < 0
def __gt__(self, other):
return mycmp(self.obj, other.obj) > 0
def __eq__(self, other):
return mycmp(self.obj, other.obj) == 0
def __le__(self, other):
return mycmp(self.obj, other.obj) <= 0
def __ge__(self, other):
return mycmp(self.obj, other.obj) >= 0
def __ne__(self, other):
return mycmp(self.obj, other.obj) != 0
return K
DEFAULT_TYPE_FUNCS = {
'+': lambda x, y: arithmetic_op_type(x, y, allow_same=True),
'-': lambda x, y: arithmetic_op_type(x, y, allow_same=True),
'*': lambda x, y: arithmetic_op_type(x, y, allow_same=True),
'/': lambda x, y: arithmetic_op_type(x, y, allow_same=True),
'%': lambda x, y: 'int',
'>': lambda x, y: 'bool',
'<': lambda x, y: 'bool',
'>=': lambda x, y: 'bool',
'<=': lambda x, y: 'bool',
'==': lambda x, y: 'bool',
'!=': lambda x, y: 'bool',
'||': lambda x, y: 'bool',
'&&': lambda x, y: 'bool',
'sin': lambda x: 'real',
'cos': lambda x: 'real',
"str": lambda x: 'char*',
"len": lambda x: 'int',
"sqrt": lambda x: 'real',
"log": lambda x: 'real',
"ceil": lambda x: 'int',
"sort": lambda x: x,
"array_push": lambda x, y: 'void',
"array_index": lambda x, y: get_array_subtype(x),
"reverse": lambda x: x,
"sort_cmp": lambda x, y: x,
"concat": lambda x, y: 'char*',
"string_find": lambda x, y: 'int',
"string_find_last": lambda x, y: 'int',
"string_split": lambda x, y: type_array(x),
"map_get": lambda x, y: get_map_value_type(x),
"map_keys": lambda x: type_array(get_map_key_type(x)),
"map_values": lambda x: type_array(get_map_value_type(x)),
"map_put": lambda x, y, z: 'void',
"map_has_key": lambda x, y: 'bool',
'!': lambda x: x,
'~': lambda x: x,
'&': lambda x, y: arithmetic_op_type(x, y, allow_same=True),
'|': lambda x, y: arithmetic_op_type(x, y, allow_same=True),
'^': lambda x, y: arithmetic_op_type(x, y, allow_same=True),
'>>': lambda x, y: x,
'<<': lambda x, y: x,
'atan2': lambda x, y: 'real',
'pow': lambda x, y: arithmetic_op_type(x, y, allow_same=True),
'round': lambda x: 'int',
'floor': lambda x: 'int',
'clear': lambda x: 'void',
'min': lambda x, y: arithmetic_op_type(x, y, allow_same=True),
'max': lambda x, y: arithmetic_op_type(x, y, allow_same=True),
'abs': lambda x: x,
'lower': lambda x: 'char*',
'upper': lambda x: 'char*',
'fill': lambda x, y: 'void',
'copy_range': lambda x, y, z: x,
'array_index': lambda x, y: get_array_subtype(x),
'contains': lambda x, y: 'bool',
'string_replace_one': lambda x, y, z: x,
'string_replace_all': lambda x, y, z: x,
'array_concat': lambda x, y: x,
'string_insert': lambda x, y, z: x,
'string_trim': lambda x: x,
'substring': lambda x, y, z: x,
'substring_end': lambda x, y: x,
'array_push': lambda x, y: 'void',
'array_pop': lambda x: get_array_subtype(x),
'array_insert': lambda x, y, z: 'void',
'array_remove_idx': lambda x, y: get_array_subtype(x),
'array_remove_value': lambda x, y: get_array_subtype(x),
'array_find': lambda x, y: 'int',
'array_find_next': lambda x, y: 'int',
'set_push': lambda x, y: 'void',
'set_remove': lambda x, y: 'void',
'map_remove_key': lambda x, y: 'void',
'array_initializer': lambda *args: type_array(args[0]),
}
# TODO: for now passing the executor for sort_cmp, might want to find a different solution later
def get_default_funcs(executor):
funcs = {}
funcs['=='] = lambda x, y: x == y
funcs['!='] = lambda x, y: x != y
funcs['&&'] = lambda x, y: x and y
funcs['||'] = lambda x, y: x or y
funcs['!'] = lambda x: not x
funcs['~'] = lambda x: ~ x
funcs['<'] = lambda x, y: x < y
funcs['>'] = lambda x, y: x > y
funcs['<='] = lambda x, y: x <= y
funcs['>='] = lambda x, y: x >= y
funcs['*'] = lambda x, y: x * y
funcs['/'] = lambda x, y: x // y if not isinstance(x, float) and not isinstance(y, float) else x / y
funcs['%'] = lambda x, y: x % y
funcs['+'] = lambda x, y: x + y
funcs['-'] = lambda x, y: x - y
funcs['&'] = lambda x, y: x & y
funcs['|'] = lambda x, y: x | y
funcs['^'] = lambda x, y: x ^ y
funcs['>>'] = lambda x, y: x >> y
funcs['<<'] = lambda x, y: x << y
funcs['str'] = lambda x: str(x)
funcs['len'] = lambda x: len(x)
funcs['sqrt'] = lambda x: math.sqrt(x)
funcs['log'] = lambda x: math.log(x)
funcs['atan2'] = lambda x, y: math.atan2(x, y)
funcs['sin'] = lambda x: math.sin(x)
funcs['cos'] = lambda x: math.cos(x)
funcs['pow'] = lambda x, y: x ** y if y < 100 else pow(x, y, 1 << 64)
funcs['round'] = lambda x: math.floor(x + 0.5)
funcs['floor'] = lambda x: math.floor(x)
funcs['ceil'] = lambda x: math.ceil(x)
funcs['clear'] = array_map_clear
funcs['min'] = lambda a, b: min(a, b)
funcs['max'] = lambda a, b: max(a, b)
funcs['abs'] = lambda a: abs(a)
funcs['reverse'] = lambda a: list(reversed(a)) if not isinstance(a, six.string_types) else ''.join(reversed(a))
funcs['lower'] = lambda a: a.lower()
funcs['upper'] = lambda a: a.upper()
funcs['sort'] = lambda a: list(sorted(a)) if not a or not isinstance(a[0], dict) else list(sorted(a, key=lambda x: tuple(x.items())))
funcs['sort_cmp'] = lambda a, b: list(sorted(a, key=cmp_to_key(lambda x,y: executor.execute_func(b, [x,y]))))
funcs['fill'] = array_fill
funcs['copy_range'] = lambda arr, fr, to: [x for x in arr[fr:to]]
funcs['array_index'] = lambda x, y: x[y] if not isinstance(x, six.string_types) else ord(x[y])
funcs['contains'] = lambda x, y: y in x
funcs['string_find'] = lambda x, y: x.find(y if isinstance(y, six.string_types) else chr(y))
funcs['string_find_last'] = lambda x, y: x.rfind(y if isinstance(y, six.string_types) else chr(y))
funcs['string_replace_one'] = lambda x, y, z: x.replace(y if isinstance(y, six.string_types) else chr(y), z if isinstance(z, six.string_types) else chr(z), 1)
funcs['string_replace_all'] = lambda x, y, z: x.replace(y if isinstance(y, six.string_types) else chr(y), z if isinstance(z, six.string_types) else chr(z))
funcs['concat'] = lambda x, y: (x if isinstance(x, six.string_types) else chr(x)) + (y if isinstance(y, six.string_types) else chr(y))
funcs['array_concat'] = lambda x, y: x + y
funcs['string_insert'] = lambda x, pos, y: x[:pos] + (y if isinstance(y, six.string_types) else chr(y)) + x[pos:]
funcs['string_split'] = lambda x, y: [z for z in re.split('|'.join([magic_escape(_) for _ in y]), x) if z] if y != '' else [z for z in x]
funcs['string_trim'] = lambda x: x.strip()
funcs['substring'] = lambda x, y, z: x[y:z]
funcs['substring_end'] = lambda x, y: x[y:]
funcs['array_push'] = lambda x, y: x.append(y)
funcs['array_pop'] = lambda x: x.pop()
funcs['array_insert'] = lambda x, pos, y: x.insert(pos, y)
funcs['array_remove_idx'] = array_remove_idx
funcs['array_remove_value'] = array_remove_value
funcs['array_find'] = lambda x, y: x.index(y) if y in x else -1
funcs['array_find_next'] = lambda x, y, z: x.index(y, z) if y in x[z:] else -1
funcs['set_push'] = lambda x, y: x.add(y)
funcs['set_remove'] = lambda x, y: x.remove(y)
funcs['map_has_key'] = lambda x, y: y in x
funcs['map_put'] = map_put
funcs['map_get'] = lambda x, y: x[y] if y in x else None
funcs['map_keys'] = lambda x: list(x.keys())
funcs['map_values'] = lambda x: list(x.values())
funcs['map_remove_key'] = map_remove_key
funcs['array_initializer'] = lambda *x: list(x)
return funcs
class Executor(object):
def __init__(self, data, timeout=600):
super(Executor, self).__init__()
self.funcs = {get_func_name(func): func for func in data['funcs']}
self.types = {get_record_name(record): record for record in data['types']}
self.watchers = []
self.timeout = timeout
self.start_time = time.time()
self.funcs.update(get_default_funcs(self))
self.globals_ = {}
global_init_func = GLOBALS_NAME + ".__init__"
if global_init_func in self.funcs:
self.execute_func(global_init_func, [])
elif GLOBALS_NAME in self.types and len(self.types[GLOBALS_NAME][2]) > 2:
raise ValueError("Must have %s if %s struct is present and non empty (%s)." % (
global_init_func, GLOBALS_NAME, self.types[GLOBALS_NAME]
))
def _observe_read(self, context, read_store, args):
if self.watchers:
args[-1] = (args[-1][0], tuplify(args[-1][1])) # tuplify the new value
if read_store is not None:
read_store[0] = args
else:
evt = WatcherEvent("read", self, context, args)
self._watch(evt)
elif read_store is not None:
read_store[0] = []
def _observe_write(self, context, args):
if self.watchers:
args[-1] = (args[-1][0], tuplify(args[-1][1])) # tuplify the new value
evt = WatcherEvent("write", self, context, args)
self._watch(evt)
def register_watcher(self, watcher):
self.watchers.append(watcher)
def _watch(self, event):
for watcher in self.watchers:
watcher.watch(event)
def compute_lhs(self, context, expr, read_store):
assert read_store is not None and read_store[1]
return self.compute_expression(context, expr, read_store=read_store)
@watchable("expression")
def compute_expression(self, context, expr, read_store=None):
is_lhs = read_store is not None and read_store[1]
if is_lhs and not is_assigneable(expr):
raise UASTNotImplementedException("Non-lhs expression as argument while computing lhs")
if expr[0] == 'assign':
rhs = self.compute_expression(context, expr[3])
assert is_assigneable(expr[2]), expr
if expr[2][0] == 'var':
# Fail if integer values are too big.
if isinstance(rhs, int) and abs(rhs) > LARGEST_INT:
raise OverflowError()
context.set_val(expr[2], rhs)
# Same as with the field.
inner_read_store = [None, is_lhs]
# Calling to compute_expression to observe before_expression and after_expression events. compute
# expression would also call to observe_read that we would prefer to skip, which we ignore here by not
# using the contents of inner_read_store.
self.compute_expression(context, expr[2], read_store=inner_read_store)
self._observe_write(context, [(expr[2][2], rhs)])
elif expr[2][0] == 'field':
field = expr[2]
inner_read_store = [None, True]
record = self.compute_lhs(context, field[2], read_store=inner_read_store)
record[field[3]] = rhs
assert inner_read_store[0] is not None
dependants = inner_read_store[0]
self._observe_write(context, dependants + [(field[3], rhs)])
elif expr[2][0] == 'invoke' and expr[2][2] == 'array_index':
args = expr[2][3]
deref = args[0]
inner_read_store = [None, True]
array = self.compute_lhs(context, args[0], read_store=inner_read_store)
assert inner_read_store[0] is not None
array_index = int(self.compute_expression(context, args[1]))
assert_val_matches_type(array_index, INT)
if isinstance(array, six.string_types):
# a hack way to achieve some sort of mutability in strings
new_val = array[:array_index] + (rhs if isinstance(rhs, six.string_types) else chr(rhs)) + array[array_index+1:]
self.compute_expression(context, ["assign", STRING, args[0], constant(STRING, new_val)])
else:
array[array_index] = rhs
assert inner_read_store[0] is not None
dependants = inner_read_store[0]
self._observe_write(context, dependants + [(array_index, rhs)])
else:
assert False, expr
ret = rhs
elif expr[0] == 'var':
ret = context.get_val(expr)
self._observe_read(context, read_store, [(expr[2], ret)])
elif expr[0] == 'field':
inner_read_store = [None, is_lhs]
obj = self.compute_expression(context, expr[2], read_store=inner_read_store)
ret = obj[expr[3]]
dependants = inner_read_store[0]
if dependants is not None:
self._observe_read(context, read_store, dependants + [(expr[3], ret)])
elif expr[0] == 'val':
assert_val_matches_type(expr[2], expr[1])
ret = expr[2]
if isinstance(ret, six.string_types):
ret = ret.replace("\\n", "\n").replace("\\t", "\t") # TODO: proper unescaping
elif expr[0] == 'invoke':
if expr[2] in ['&&', '||']: # short circuiting
larg = self.compute_expression(context, expr[3][0])
assert type(larg) == bool
if (larg and expr[2] == '||') or (not larg and expr[2] == '&&'):
ret = larg
else:
ret = self.compute_expression(context, expr[3][1])
else:
if expr[2] == 'array_index':
inner_read_store = [None, is_lhs]
arg_vals = [self.compute_expression(context, x, read_store=inner_read_store) for x in expr[3][:1]]
arg_vals += [self.compute_expression(context, x) for x in expr[3][1:]]
else:
try:
arg_vals = [self.compute_expression(context, x) for x in expr[3]]
except:
#print expr
raise
if expr[2] == 'str' and expr[3][0][1] == CHAR: # TODO: fix it by replacing "str" with cast
ret = chr(arg_vals[0])
elif expr[2] == '_ctor':
ret = self.execute_ctor(expr[1], arg_vals, expressions=expr[3])
else:
try:
ret = self.execute_func(expr[2], arg_vals, expressions=expr[3])
except Exception:
raise
if expr[2] == 'array_index':
dependants = inner_read_store[0]
if dependants is not None:
self._observe_read(context, read_store, dependants + [(arg_vals[1], ret)])
if expr[2] == 'array_initializer' and expr[1] == STRING: # TODO: fix somehow
ret = ''.join([chr(x) for x in ret])
elif get_expr_type(expr) == STRING and type(ret) == list:
assert len(ret) == 0 or isinstance(ret[0], six.string_types), ret
ret = ''.join(ret)
elif expr[0] == '?:':
cond = self.compute_expression(context, expr[2])
if cond:
ret = self.compute_ternary_expression(context, expr[2], expr[3])
else:
ret = self.compute_ternary_expression(context, expr[2], expr[4])
elif expr[0] == 'cast':
assert can_cast(expr[1], expr[2][1]), expr
ret = self.compute_expression(context, expr[2])
if is_int_type(expr[1]):
ret = int(float(ret))
elif expr[1] == REAL:
ret = float(ret)
return ret
else:
raise UASTNotImplementedException("Execution of expressoin %s" % expr)
assert False, expr
try:
assert_val_matches_type(ret, expr[1])
except Exception as e:
#print("Type mismatch between %s and %s while evaluating: %s (%s: %s)" % (
# str(ret)[:100], expr[1], expr, type(e), e), file=sys.stderr)
#val_matches_type(ret, expr[1], True)
raise
if expr[1] in [REAL]:
ret = float(ret)
elif is_int_type(expr[1]):
ret = int(ret)
return ret
@watchable("block")
def execute_block(self, context, block):
for stmt in block:
if self.execute_statement(context, stmt):
return True
if context._flow_control in ['break', 'continue']:
break
assert context._flow_control is None
return False
@watchable("if_block")
def execute_if_block(self, context, expr, block):
# expr can be used by the watchers, e.g. for constructing the control-flow.
return self.execute_block(context, block)
@watchable("foreach_block")
def execute_foreach_block(self, context, expr, block):
# expr can be used by the watchers, e.g. for constructing the control-flow.
return self.execute_block(context, block)
@watchable("while_block")
def execute_while_block(self, context, expr, block):
# expr can be used by the watchers, e.g. for constructing the control-flow.
return self.execute_block(context, block)
@watchable("ternary_expression")
def compute_ternary_expression(self, context, pred_expr, expr):
# pred_expr can be used by the watchers, e.g. for constructing the control-flow.
return self.compute_expression(context, expr)
@watchable("statement")
def execute_statement(self, context, stmt):
if time.time() - self.start_time > self.timeout:
raise UASTTimeLimitExceeded()
context._instructions_count += 1
if DEBUG_INFO and context._instructions_count >= 10000 and hasattr(stmt, 'position'):
context._instructions_count = 0
print("DEBUG INFO: pos:", stmt.position, 'vars:', context._vals, file=sys.stderr)
if stmt[0] == 'if':
cond = self.compute_expression(context, stmt[2])
assert isinstance(cond, bool), (cond, stmt[2])
if cond:
return self.execute_if_block(context, stmt[2], stmt[3])
else:
return self.execute_if_block(context, stmt[2], stmt[4])
elif stmt[0] == 'foreach':
lst = self.compute_expression(context, stmt[3])
need_ord = isinstance(lst, six.string_types)
for x in lst:
context.set_val(stmt[2], x if not need_ord else ord(x))
if self.execute_foreach_block(context, stmt[3], stmt[4]):
return True
if context._flow_control == 'break':
context._flow_control = None
break
elif context._flow_control == 'continue':
context._flow_control = None
elif stmt[0] == 'while':
while True:
cond = self.compute_expression(context, stmt[2])
assert isinstance(cond, bool)
if not cond:
break
if self.execute_while_block(context, stmt[2], stmt[3]):
return True
if context._flow_control == 'break':
context._flow_control = None
break
elif context._flow_control == 'continue':
context._flow_control = None
assert not self.execute_while_block(context, stmt[2], stmt[4])
elif stmt[0] == 'break':
context._flow_control = 'break'
return False
elif stmt[0] == 'continue':
context._flow_control = 'continue'
return False
elif stmt[0] == 'return':
context._return_value = self.compute_expression(context, stmt[2])
return True
elif stmt[0] == 'noop':
return False
else:
self.compute_expression(context, stmt)
def execute_ctor(self, ret_type, args, expressions):
if ret_type.endswith("*"):
if len(args) == 0:
return [] if ret_type != 'char*' else ""
elif len(args) == 1 and not val_matches_type(args[0], INT):
# initialize with the first argument
return list(args[0])
else:
assert len(ret_type) > len(args) and all([x == '*' for x in ret_type[-len(args):]]), "TYPE: %s, ARGS: %s" % (ret_type, args)
subtype = ret_type
for arg in args:
assert_val_matches_type(arg, INT)
subtype = get_array_subtype(subtype)
# We measured the size of the N-dimensional array initialized with default values of different types and
# measured the approx number of bytes used by each element. Based on this we cut the maximum array size
# that we can initialize.
approx_memory_overhead = {
INT: 8,
REAL: 8,
CHAR: 4,
STRING: 4,
BOOL: 1
}
memory_cutoff = 10*2**20 # Allocate no more than 10MiB during array initialization.
assert functools.reduce(mul, args) * approx_memory_overhead[subtype] <= memory_cutoff, (
"CTOR allocates too much memory %s %s, %s" % (ret_type, args, expressions))
return np.full(tuple(args), default_value(subtype)).tolist()
elif ret_type.endswith("%"):
return SortedSet() if len(args) == 0 else SortedSet(args[0])
elif ret_type.endswith('>'):
return SortedDict()
elif ret_type == INT:
assert len(args) == 0
return 0
elif ret_type.endswith('#'):
return self.execute_func(ret_type[:-1] + ".__init__", args, expressions=expressions)
else:
assert False, ret_type
@watchable('func_block')
def execute_func_block(self, context, func_name, func_vars, func_args, args_vals, expressions, block):
# func_name, func_vars, func_args, args_vals and expressions can be used by the watchers, e.g. for constructing
# the data-flow.
assert len(func_args) == len(args_vals)
assert expressions is None or len(expressions) == len(func_args)
self.execute_block(context, block)
def execute_func(self, func_name, args, tolerate_missing_this=False, expressions=None):
context = ExecutorContext()
if func_name not in self.funcs:
raise UASTNotImplementedException("Interpreter function %s" % func_name)
func = self.funcs[func_name]
if callable(func):
try:
return func(*args)
except Exception:
# print(func_name, args)
raise
if self.watchers:
self._watch(WatcherEvent("before_func", self, context, func, args))
globals_var = var(GLOBALS_NAME, func[1])
context.register_var(globals_var)
context.set_val(globals_var, self.globals_)
if func[0] == 'ctor':
ctor_type_name = type_to_record_name(func[1])
ctor_type = self.types[ctor_type_name]
ret_var = var("this", func[1])
context.register_var(ret_var)
context.set_val(ret_var, {})
if tolerate_missing_this and len(args) == len(get_func_args(func)) + 1:
args = args[1:]
if len(args) != len(get_func_args(func)):
#print >> sys.stderr, func
#print >> sys.stderr, args
#print >> sys.stderr, get_func_args(func)
raise UASTNotImplementedException("Polymorphism (len(%s) <> len(%s) when calling %s)" % (args, get_func_args(func), get_func_name(func)))
for arg, arg_def in zip(args, get_func_args(func)):
assert_val_matches_type(arg, get_expr_type(arg_def))
context.register_var(arg_def)
context.set_val(arg_def, arg)
for var_ in get_func_vars(func):
context.register_var(var_)
self.execute_func_block(context, func_name, get_func_vars(func), get_func_args(func), args, expressions, get_func_body(func))
assert_val_matches_type(context._return_value, get_func_return_type(func))
if self.watchers:
self._watch(WatcherEvent("after_func", self, context, context._return_value, func, args))
return context._return_value
| apache-2.0 |
jni/networkx | networkx/algorithms/components/connected.py | 10 | 4068 | # -*- coding: utf-8 -*-
"""
Connected components.
"""
# Copyright (C) 2004-2013 by
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
import networkx as nx
from networkx.utils.decorators import not_implemented_for
from networkx.algorithms.shortest_paths \
import single_source_shortest_path_length as sp_length
__authors__ = "\n".join(['Eben Kenah',
'Aric Hagberg <[email protected]>'
'Christopher Ellison'])
__all__ = ['number_connected_components', 'connected_components',
'connected_component_subgraphs','is_connected',
'node_connected_component']
@not_implemented_for('directed')
def connected_components(G):
"""Generate connected components.
Parameters
----------
G : NetworkX graph
An undirected graph
Returns
-------
comp : generator of lists
A list of nodes for each component of G.
Examples
--------
Generate a sorted list of connected components, largest first.
>>> G = nx.path_graph(4)
>>> G.add_path([10, 11, 12])
>>> sorted(nx.connected_components(G), key = len, reverse=True)
[[0, 1, 2, 3], [10, 11, 12]]
See Also
--------
strongly_connected_components
Notes
-----
For undirected graphs only.
"""
seen={}
for v in G:
if v not in seen:
c = sp_length(G, v)
yield list(c)
seen.update(c)
@not_implemented_for('directed')
def connected_component_subgraphs(G, copy=True):
"""Generate connected components as subgraphs.
Parameters
----------
G : NetworkX graph
An undirected graph.
copy: bool (default=True)
If True make a copy of the graph attributes
Returns
-------
comp : generator
A generator of graphs, one for each connected component of G.
Examples
--------
>>> G = nx.path_graph(4)
>>> G.add_edge(5,6)
>>> graphs = list(nx.connected_component_subgraphs(G))
See Also
--------
connected_components
Notes
-----
For undirected graphs only.
Graph, node, and edge attributes are copied to the subgraphs by default.
"""
for c in connected_components(G):
if copy:
yield G.subgraph(c).copy()
else:
yield G.subgraph(c)
def number_connected_components(G):
"""Return the number of connected components.
Parameters
----------
G : NetworkX graph
An undirected graph.
Returns
-------
n : integer
Number of connected components
See Also
--------
connected_components
Notes
-----
For undirected graphs only.
"""
return len(list(connected_components(G)))
@not_implemented_for('directed')
def is_connected(G):
"""Return True if the graph is connected, false otherwise.
Parameters
----------
G : NetworkX Graph
An undirected graph.
Returns
-------
connected : bool
True if the graph is connected, false otherwise.
Examples
--------
>>> G = nx.path_graph(4)
>>> print(nx.is_connected(G))
True
See Also
--------
connected_components
Notes
-----
For undirected graphs only.
"""
if len(G) == 0:
raise nx.NetworkXPointlessConcept('Connectivity is undefined ',
'for the null graph.')
return len(sp_length(G, next(G.nodes_iter()))) == len(G)
@not_implemented_for('directed')
def node_connected_component(G, n):
"""Return the nodes in the component of graph containing node n.
Parameters
----------
G : NetworkX Graph
An undirected graph.
n : node label
A node in G
Returns
-------
comp : lists
A list of nodes in component of G containing node n.
See Also
--------
connected_components
Notes
-----
For undirected graphs only.
"""
return list(sp_length(G, n))
| bsd-3-clause |
NickShaffner/rhea | rhea/build/boards/xilinx/_xula.py | 2 | 7089 | #
# Copyright (c) 2014-2015 Christopher Felton
#
from rhea.build import FPGA
from rhea.build.extintf import Port
# @todo: get SDRAM interface from rhea.cores.sdram
# from ...extintf._sdram import SDRAM
from rhea.build.toolflow import ISE
class Xula(FPGA):
vendor = 'xilinx'
family = 'spartan3A'
device = 'XC3S200A'
package = 'VQ100'
speed = '-4'
_name = 'xula'
default_clocks = {
'clock': dict(frequency=12e6, pins=(43,)),
'chan_clk': dict(frequency=1e6, pins=(44,))
}
default_ports = {
'chan': dict(pins=(36, 37, 39, 50, 52, 56, 57, 61, # 0-7
62, 68, 72, 73, 82, 83, 84, 35, # 8-15
34, 33, 32, 21, 20, 19, 13, 12, # 17-23
7, 4, 3, 97, 94, 93, 89, 88)) # 24-31
}
def get_flow(self, top=None):
return ISE(brd=self, top=top)
class XulaStickItMB(Xula):
def __init__(self):
""" StickIt board port definitions
This class defines the port to pin mapping for the Xess StickIt
board. The Xula module can be plugged into the StickIt board.
The StickIt board provides connections to many common physical
interfaces: pmod, shields, etc. Many of the pins are redefined
to match the names of the connector connections
"""
chan_pins = self.default_ports['chan']['pins']
chan_pins = chan_pins + self.default_clocks['chan_clk']['pins']
assert len(chan_pins) == 33
self.default_ports['chan']['pins'] = chan_pins
# the following are the bit-selects (chan[idx]) and not
# the pins.
self.add_port_name('pm1', 'chan', (15, 32, 16, 0, # pmod A
11, 28, 13, 14)) # pmod B
self.add_port_name('pm2', 'chan', (17, 1, 18, 3, # pmod A
15, 32, 16, 0)) # pmod B
self.add_port_name('pm3', 'chan', (20, 4, 21, 5, # pmod A
17, 1, 18, 3)) # pmod B
self.add_port_name('pm4', 'chan', (22, 6, 23, 7, # pmod A
20, 4, 21, 5)) # pmod B
self.add_port_name('pm5', 'chan', (8, 25, 26, 10, # pmod A
22, 6, 23, 7)) # pmod B
self.add_port_name('pm6', 'chan', (11, 28, 13, 14, # pmod A
8, 25, 26, 10)) # pmod B
# @todo: add the wing defintions
class Xula2(FPGA):
vendor = 'xilinx'
family = 'spartan6'
device = 'XC6SLX25'
package = 'FTG256'
speed = '-2'
_name = 'xula2'
default_clocks = {
'clock': dict(frequency=12e6, pins=('A9',)),
'chan_clk': dict(frequency=1e6, pins=('T7',))
}
default_ports = {
'chan': dict(pins=('R7','R15','R16','M15','M16','K15', #0-5
'K16','J16','J14','F15','F16','C16', #6-11
'C15','B16','B15','T4','R2','R1', #12-17
'M2','M1','K3','J4','H1','H2', #18-23
'F1','F2','E1','E2','C1','B1', #24-29
'B2','A2',) )
}
default_extintf = {
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# VGA:
'vga': None,
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# SDRAM: the Xula2 has a 256Mbit WINBond SDRAM,
# http://www.winbond.com/hq/enu/ProductAndSales/ProductLines/SpecialtyDRAM/SDRAM/W9825G6JH.htm
# @todo: mege into rhea.syste/rhea.cores intefaces
# 'sdram': SDRAM(
# Port('addr', pins=('E4', 'E3', 'D3', 'C3', # 0-3
# 'B12', 'A12', 'D12', 'E12', # 4-7
# 'G16', 'G12', 'F4', 'G11', # 8-11
# 'H13',) # 12
# ),
# Port('data', pins=('P6', 'T6', 'T5', 'P5', # 0-3
# 'R5', 'N5', 'P4', 'N4', # 4-7
# 'P12', 'R12', 'T13', 'T14', # 8-11
# 'R14', 'T15', 'T12', 'P11',) # 12-15
# ),
# Port('bs', pins=('H3', 'G3',) ),
# Port('cas', pins=('L3',) ),
# Port('ras', pins=('L4',) ),
# Port('ldqm', pins=('M4',) ),
# Port('udqm', pins=('L13',) ),
# Port('clk', pins=('K12',) ),
# Port('clkfb', pins=('K11',) ),
# Port('cs', pins=('H4',) ),
# Port('we', pins=('M3',) ),
# Port('cke', pins=('J12',)),
#
# # timing information, all in ns
# timing = dict(
# init = 200000.0,
# ras = 45.0,
# rcd = 20.0,
# ref = 64000000.0,
# rfc = 65.0,
# rp = 20.0,
# xsr = 75.0
# ),
# ddr = 0 # single data rate
# ),
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# SPI and MicroSD
#'flash': _extintf(
# Port('sclk', pins=()),
# Port('sdi', pins=()),
# Port('sdo', pins=()),
# port('cs', pins=()),
# ),
#
#'microsd' : None,
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
}
def get_flow(self, top=None):
return ISE(brd=self, top=top)
class Xula2StickItMB(Xula2):
def __init__(self):
""" """
# to simplify the connector mapping append chan_clk to the
# end of the channel pins. Note overlapping ports cannot
# be simultaneously used.
chan_pins = self.default_ports['chan']['pins']
chan_pins = chan_pins + self.default_clocks['chan_clk']['pins']
# assert len(chan_pins) == 33, "len == {}".format(len(chan_pins))
self.default_ports['chan']['pins'] = chan_pins
super(Xula2StickItMB, self).__init__()
self.add_port_name('pm1', 'chan', (0, 2, 4, 5,
32, 1, 3, 5))
self.add_port_name('pm2', 'chan', (15, 17, 19, 21,
16, 18, 20, 22))
self.add_port_name('pm3', 'chan', (23, 25, 27, 29,
24, 26, 28, 30))
# @todo: add grove board connectors
# RPi GPIO connector, each port defined as the
self.add_port_name('bcm2_sda', 'chan', 31)
self.add_port_name('bcm3_scl', 'chan', 30)
self.add_port_name('bcm4_gpclk0', 'chan', 29)
self.add_port_name('bcm17', 'chan', 28)
self.add_port_name('bcm27_pcm_d', 'chan', 27)
self.add_port_name('bcm22', 'chan', 26)
# ...
self.add_port_name('bcm14_txd', 'chan', 14)
self.add_port_name('bcm15_rxd', 'chan', 13)
# @todo: finish ...
| mit |
insertnamehere1/maraschino | lib/sqlalchemy/dialects/sybase/base.py | 22 | 15166 | # sybase/base.py
# Copyright (C) 2010-2011 the SQLAlchemy authors and contributors <see AUTHORS file>
# get_select_precolumns(), limit_clause() implementation
# copyright (C) 2007 Fisch Asset Management
# AG http://www.fam.ch, with coding by Alexander Houben
# [email protected]
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Support for Sybase Adaptive Server Enterprise (ASE).
Note that this dialect is no longer specific to Sybase iAnywhere.
ASE is the primary support platform.
"""
import operator
from sqlalchemy.sql import compiler, expression, text, bindparam
from sqlalchemy.engine import default, base, reflection
from sqlalchemy import types as sqltypes
from sqlalchemy.sql import operators as sql_operators
from sqlalchemy import schema as sa_schema
from sqlalchemy import util, sql, exc
from sqlalchemy.types import CHAR, VARCHAR, TIME, NCHAR, NVARCHAR,\
TEXT,DATE,DATETIME, FLOAT, NUMERIC,\
BIGINT,INT, INTEGER, SMALLINT, BINARY,\
VARBINARY, DECIMAL, TIMESTAMP, Unicode,\
UnicodeText
RESERVED_WORDS = set([
"add", "all", "alter", "and",
"any", "as", "asc", "backup",
"begin", "between", "bigint", "binary",
"bit", "bottom", "break", "by",
"call", "capability", "cascade", "case",
"cast", "char", "char_convert", "character",
"check", "checkpoint", "close", "comment",
"commit", "connect", "constraint", "contains",
"continue", "convert", "create", "cross",
"cube", "current", "current_timestamp", "current_user",
"cursor", "date", "dbspace", "deallocate",
"dec", "decimal", "declare", "default",
"delete", "deleting", "desc", "distinct",
"do", "double", "drop", "dynamic",
"else", "elseif", "encrypted", "end",
"endif", "escape", "except", "exception",
"exec", "execute", "existing", "exists",
"externlogin", "fetch", "first", "float",
"for", "force", "foreign", "forward",
"from", "full", "goto", "grant",
"group", "having", "holdlock", "identified",
"if", "in", "index", "index_lparen",
"inner", "inout", "insensitive", "insert",
"inserting", "install", "instead", "int",
"integer", "integrated", "intersect", "into",
"iq", "is", "isolation", "join",
"key", "lateral", "left", "like",
"lock", "login", "long", "match",
"membership", "message", "mode", "modify",
"natural", "new", "no", "noholdlock",
"not", "notify", "null", "numeric",
"of", "off", "on", "open",
"option", "options", "or", "order",
"others", "out", "outer", "over",
"passthrough", "precision", "prepare", "primary",
"print", "privileges", "proc", "procedure",
"publication", "raiserror", "readtext", "real",
"reference", "references", "release", "remote",
"remove", "rename", "reorganize", "resource",
"restore", "restrict", "return", "revoke",
"right", "rollback", "rollup", "save",
"savepoint", "scroll", "select", "sensitive",
"session", "set", "setuser", "share",
"smallint", "some", "sqlcode", "sqlstate",
"start", "stop", "subtrans", "subtransaction",
"synchronize", "syntax_error", "table", "temporary",
"then", "time", "timestamp", "tinyint",
"to", "top", "tran", "trigger",
"truncate", "tsequal", "unbounded", "union",
"unique", "unknown", "unsigned", "update",
"updating", "user", "using", "validate",
"values", "varbinary", "varchar", "variable",
"varying", "view", "wait", "waitfor",
"when", "where", "while", "window",
"with", "with_cube", "with_lparen", "with_rollup",
"within", "work", "writetext",
])
class _SybaseUnitypeMixin(object):
"""these types appear to return a buffer object."""
def result_processor(self, dialect, coltype):
def process(value):
if value is not None:
return str(value) #.decode("ucs-2")
else:
return None
return process
class UNICHAR(_SybaseUnitypeMixin, sqltypes.Unicode):
__visit_name__ = 'UNICHAR'
class UNIVARCHAR(_SybaseUnitypeMixin, sqltypes.Unicode):
__visit_name__ = 'UNIVARCHAR'
class UNITEXT(_SybaseUnitypeMixin, sqltypes.UnicodeText):
__visit_name__ = 'UNITEXT'
class TINYINT(sqltypes.Integer):
__visit_name__ = 'TINYINT'
class BIT(sqltypes.TypeEngine):
__visit_name__ = 'BIT'
class MONEY(sqltypes.TypeEngine):
__visit_name__ = "MONEY"
class SMALLMONEY(sqltypes.TypeEngine):
__visit_name__ = "SMALLMONEY"
class UNIQUEIDENTIFIER(sqltypes.TypeEngine):
__visit_name__ = "UNIQUEIDENTIFIER"
class IMAGE(sqltypes.LargeBinary):
__visit_name__ = 'IMAGE'
class SybaseTypeCompiler(compiler.GenericTypeCompiler):
def visit_large_binary(self, type_):
return self.visit_IMAGE(type_)
def visit_boolean(self, type_):
return self.visit_BIT(type_)
def visit_unicode(self, type_):
return self.visit_NVARCHAR(type_)
def visit_UNICHAR(self, type_):
return "UNICHAR(%d)" % type_.length
def visit_UNIVARCHAR(self, type_):
return "UNIVARCHAR(%d)" % type_.length
def visit_UNITEXT(self, type_):
return "UNITEXT"
def visit_TINYINT(self, type_):
return "TINYINT"
def visit_IMAGE(self, type_):
return "IMAGE"
def visit_BIT(self, type_):
return "BIT"
def visit_MONEY(self, type_):
return "MONEY"
def visit_SMALLMONEY(self, type_):
return "SMALLMONEY"
def visit_UNIQUEIDENTIFIER(self, type_):
return "UNIQUEIDENTIFIER"
ischema_names = {
'integer' : INTEGER,
'unsigned int' : INTEGER, # TODO: unsigned flags
'unsigned smallint' : SMALLINT, # TODO: unsigned flags
'unsigned bigint' : BIGINT, # TODO: unsigned flags
'bigint': BIGINT,
'smallint' : SMALLINT,
'tinyint' : TINYINT,
'varchar' : VARCHAR,
'long varchar' : TEXT, # TODO
'char' : CHAR,
'decimal' : DECIMAL,
'numeric' : NUMERIC,
'float' : FLOAT,
'double' : NUMERIC, # TODO
'binary' : BINARY,
'varbinary' : VARBINARY,
'bit': BIT,
'image' : IMAGE,
'timestamp': TIMESTAMP,
'money': MONEY,
'smallmoney': MONEY,
'uniqueidentifier': UNIQUEIDENTIFIER,
}
class SybaseExecutionContext(default.DefaultExecutionContext):
_enable_identity_insert = False
def set_ddl_autocommit(self, connection, value):
"""Must be implemented by subclasses to accommodate DDL executions.
"connection" is the raw unwrapped DBAPI connection. "value"
is True or False. when True, the connection should be configured
such that a DDL can take place subsequently. when False,
a DDL has taken place and the connection should be resumed
into non-autocommit mode.
"""
raise NotImplementedError()
def pre_exec(self):
if self.isinsert:
tbl = self.compiled.statement.table
seq_column = tbl._autoincrement_column
insert_has_sequence = seq_column is not None
if insert_has_sequence:
self._enable_identity_insert = \
seq_column.key in self.compiled_parameters[0]
else:
self._enable_identity_insert = False
if self._enable_identity_insert:
self.cursor.execute("SET IDENTITY_INSERT %s ON" %
self.dialect.identifier_preparer.format_table(tbl))
if self.isddl:
# TODO: to enhance this, we can detect "ddl in tran" on the
# database settings. this error message should be improved to
# include a note about that.
if not self.should_autocommit:
raise exc.InvalidRequestError(
"The Sybase dialect only supports "
"DDL in 'autocommit' mode at this time.")
self.root_connection.engine.logger.info(
"AUTOCOMMIT (Assuming no Sybase 'ddl in tran')")
self.set_ddl_autocommit(
self.root_connection.connection.connection,
True)
def post_exec(self):
if self.isddl:
self.set_ddl_autocommit(self.root_connection, False)
if self._enable_identity_insert:
self.cursor.execute(
"SET IDENTITY_INSERT %s OFF" %
self.dialect.identifier_preparer.
format_table(self.compiled.statement.table)
)
def get_lastrowid(self):
cursor = self.create_cursor()
cursor.execute("SELECT @@identity AS lastrowid")
lastrowid = cursor.fetchone()[0]
cursor.close()
return lastrowid
class SybaseSQLCompiler(compiler.SQLCompiler):
ansi_bind_rules = True
extract_map = util.update_copy(
compiler.SQLCompiler.extract_map,
{
'doy': 'dayofyear',
'dow': 'weekday',
'milliseconds': 'millisecond'
})
def get_select_precolumns(self, select):
s = select._distinct and "DISTINCT " or ""
# TODO: don't think Sybase supports
# bind params for FIRST / TOP
if select._limit:
#if select._limit == 1:
#s += "FIRST "
#else:
#s += "TOP %s " % (select._limit,)
s += "TOP %s " % (select._limit,)
if select._offset:
if not select._limit:
# FIXME: sybase doesn't allow an offset without a limit
# so use a huge value for TOP here
s += "TOP 1000000 "
s += "START AT %s " % (select._offset+1,)
return s
def get_from_hint_text(self, table, text):
return text
def limit_clause(self, select):
# Limit in sybase is after the select keyword
return ""
def visit_extract(self, extract, **kw):
field = self.extract_map.get(extract.field, extract.field)
return 'DATEPART("%s", %s)' % (
field, self.process(extract.expr, **kw))
def for_update_clause(self, select):
# "FOR UPDATE" is only allowed on "DECLARE CURSOR"
# which SQLAlchemy doesn't use
return ''
def order_by_clause(self, select, **kw):
kw['literal_binds'] = True
order_by = self.process(select._order_by_clause, **kw)
# SybaseSQL only allows ORDER BY in subqueries if there is a LIMIT
if order_by and (not self.is_subquery() or select._limit):
return " ORDER BY " + order_by
else:
return ""
class SybaseDDLCompiler(compiler.DDLCompiler):
def get_column_specification(self, column, **kwargs):
colspec = self.preparer.format_column(column) + " " + \
self.dialect.type_compiler.process(column.type)
if column.table is None:
raise exc.InvalidRequestError(
"The Sybase dialect requires Table-bound "
"columns in order to generate DDL")
seq_col = column.table._autoincrement_column
# install a IDENTITY Sequence if we have an implicit IDENTITY column
if seq_col is column:
sequence = isinstance(column.default, sa_schema.Sequence) \
and column.default
if sequence:
start, increment = sequence.start or 1, \
sequence.increment or 1
else:
start, increment = 1, 1
if (start, increment) == (1, 1):
colspec += " IDENTITY"
else:
# TODO: need correct syntax for this
colspec += " IDENTITY(%s,%s)" % (start, increment)
else:
if column.nullable is not None:
if not column.nullable or column.primary_key:
colspec += " NOT NULL"
else:
colspec += " NULL"
default = self.get_column_default_string(column)
if default is not None:
colspec += " DEFAULT " + default
return colspec
def visit_drop_index(self, drop):
index = drop.element
return "\nDROP INDEX %s.%s" % (
self.preparer.quote_identifier(index.table.name),
self.preparer.quote(
self._index_identifier(index.name), index.quote)
)
class SybaseIdentifierPreparer(compiler.IdentifierPreparer):
reserved_words = RESERVED_WORDS
class SybaseDialect(default.DefaultDialect):
name = 'sybase'
supports_unicode_statements = False
supports_sane_rowcount = False
supports_sane_multi_rowcount = False
supports_native_boolean = False
supports_unicode_binds = False
postfetch_lastrowid = True
colspecs = {}
ischema_names = ischema_names
type_compiler = SybaseTypeCompiler
statement_compiler = SybaseSQLCompiler
ddl_compiler = SybaseDDLCompiler
preparer = SybaseIdentifierPreparer
def _get_default_schema_name(self, connection):
return connection.scalar(
text("SELECT user_name() as user_name",
typemap={'user_name':Unicode})
)
def initialize(self, connection):
super(SybaseDialect, self).initialize(connection)
if self.server_version_info is not None and\
self.server_version_info < (15, ):
self.max_identifier_length = 30
else:
self.max_identifier_length = 255
@reflection.cache
def get_table_names(self, connection, schema=None, **kw):
if schema is None:
schema = self.default_schema_name
result = connection.execute(
text("select sysobjects.name from sysobjects, sysusers "
"where sysobjects.uid=sysusers.uid and "
"sysusers.name=:schemaname and "
"sysobjects.type='U'",
bindparams=[
bindparam('schemaname', schema)
])
)
return [r[0] for r in result]
def has_table(self, connection, tablename, schema=None):
if schema is None:
schema = self.default_schema_name
result = connection.execute(
text("select sysobjects.name from sysobjects, sysusers "
"where sysobjects.uid=sysusers.uid and "
"sysobjects.name=:tablename and "
"sysusers.name=:schemaname and "
"sysobjects.type='U'",
bindparams=[
bindparam('tablename', tablename),
bindparam('schemaname', schema)
])
)
return result.scalar() is not None
def reflecttable(self, connection, table, include_columns):
raise NotImplementedError()
| mit |
raghavs1108/DataPlotter | examples/GLVolumeItem.py | 28 | 1968 | # -*- coding: utf-8 -*-
"""
Demonstrates GLVolumeItem for displaying volumetric data.
"""
## Add path to library (just for examples; you do not need this)
import initExample
from pyqtgraph.Qt import QtCore, QtGui
import pyqtgraph.opengl as gl
app = QtGui.QApplication([])
w = gl.GLViewWidget()
w.opts['distance'] = 200
w.show()
w.setWindowTitle('pyqtgraph example: GLVolumeItem')
#b = gl.GLBoxItem()
#w.addItem(b)
g = gl.GLGridItem()
g.scale(10, 10, 1)
w.addItem(g)
import numpy as np
## Hydrogen electron probability density
def psi(i, j, k, offset=(50,50,100)):
x = i-offset[0]
y = j-offset[1]
z = k-offset[2]
th = np.arctan2(z, (x**2+y**2)**0.5)
phi = np.arctan2(y, x)
r = (x**2 + y**2 + z **2)**0.5
a0 = 2
#ps = (1./81.) * (2./np.pi)**0.5 * (1./a0)**(3/2) * (6 - r/a0) * (r/a0) * np.exp(-r/(3*a0)) * np.cos(th)
ps = (1./81.) * 1./(6.*np.pi)**0.5 * (1./a0)**(3/2) * (r/a0)**2 * np.exp(-r/(3*a0)) * (3 * np.cos(th)**2 - 1)
return ps
#return ((1./81.) * (1./np.pi)**0.5 * (1./a0)**(3/2) * (r/a0)**2 * (r/a0) * np.exp(-r/(3*a0)) * np.sin(th) * np.cos(th) * np.exp(2 * 1j * phi))**2
data = np.fromfunction(psi, (100,100,200))
positive = np.log(np.clip(data, 0, data.max())**2)
negative = np.log(np.clip(-data, 0, -data.min())**2)
d2 = np.empty(data.shape + (4,), dtype=np.ubyte)
d2[..., 0] = positive * (255./positive.max())
d2[..., 1] = negative * (255./negative.max())
d2[..., 2] = d2[...,1]
d2[..., 3] = d2[..., 0]*0.3 + d2[..., 1]*0.3
d2[..., 3] = (d2[..., 3].astype(float) / 255.) **2 * 255
d2[:, 0, 0] = [255,0,0,100]
d2[0, :, 0] = [0,255,0,100]
d2[0, 0, :] = [0,0,255,100]
v = gl.GLVolumeItem(d2)
v.translate(-50,-50,-100)
w.addItem(v)
ax = gl.GLAxisItem()
w.addItem(ax)
## Start Qt event loop unless running in interactive mode.
if __name__ == '__main__':
import sys
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
QtGui.QApplication.instance().exec_()
| mit |
ktan2020/legacy-automation | win/Lib/hotshot/log.py | 20 | 6433 | import _hotshot
import os.path
import parser
import symbol
from _hotshot import \
WHAT_ENTER, \
WHAT_EXIT, \
WHAT_LINENO, \
WHAT_DEFINE_FILE, \
WHAT_DEFINE_FUNC, \
WHAT_ADD_INFO
__all__ = ["LogReader", "ENTER", "EXIT", "LINE"]
ENTER = WHAT_ENTER
EXIT = WHAT_EXIT
LINE = WHAT_LINENO
class LogReader:
def __init__(self, logfn):
# fileno -> filename
self._filemap = {}
# (fileno, lineno) -> filename, funcname
self._funcmap = {}
self._reader = _hotshot.logreader(logfn)
self._nextitem = self._reader.next
self._info = self._reader.info
if 'current-directory' in self._info:
self.cwd = self._info['current-directory']
else:
self.cwd = None
# This mirrors the call stack of the profiled code as the log
# is read back in. It contains tuples of the form:
#
# (file name, line number of function def, function name)
#
self._stack = []
self._append = self._stack.append
self._pop = self._stack.pop
def close(self):
self._reader.close()
def fileno(self):
"""Return the file descriptor of the log reader's log file."""
return self._reader.fileno()
def addinfo(self, key, value):
"""This method is called for each additional ADD_INFO record.
This can be overridden by applications that want to receive
these events. The default implementation does not need to be
called by alternate implementations.
The initial set of ADD_INFO records do not pass through this
mechanism; this is only needed to receive notification when
new values are added. Subclasses can inspect self._info after
calling LogReader.__init__().
"""
pass
def get_filename(self, fileno):
try:
return self._filemap[fileno]
except KeyError:
raise ValueError, "unknown fileno"
def get_filenames(self):
return self._filemap.values()
def get_fileno(self, filename):
filename = os.path.normcase(os.path.normpath(filename))
for fileno, name in self._filemap.items():
if name == filename:
return fileno
raise ValueError, "unknown filename"
def get_funcname(self, fileno, lineno):
try:
return self._funcmap[(fileno, lineno)]
except KeyError:
raise ValueError, "unknown function location"
# Iteration support:
# This adds an optional (& ignored) parameter to next() so that the
# same bound method can be used as the __getitem__() method -- this
# avoids using an additional method call which kills the performance.
def next(self, index=0):
while 1:
# This call may raise StopIteration:
what, tdelta, fileno, lineno = self._nextitem()
# handle the most common cases first
if what == WHAT_ENTER:
filename, funcname = self._decode_location(fileno, lineno)
t = (filename, lineno, funcname)
self._append(t)
return what, t, tdelta
if what == WHAT_EXIT:
try:
return what, self._pop(), tdelta
except IndexError:
raise StopIteration
if what == WHAT_LINENO:
filename, firstlineno, funcname = self._stack[-1]
return what, (filename, lineno, funcname), tdelta
if what == WHAT_DEFINE_FILE:
filename = os.path.normcase(os.path.normpath(tdelta))
self._filemap[fileno] = filename
elif what == WHAT_DEFINE_FUNC:
filename = self._filemap[fileno]
self._funcmap[(fileno, lineno)] = (filename, tdelta)
elif what == WHAT_ADD_INFO:
# value already loaded into self.info; call the
# overridable addinfo() handler so higher-level code
# can pick up the new value
if tdelta == 'current-directory':
self.cwd = lineno
self.addinfo(tdelta, lineno)
else:
raise ValueError, "unknown event type"
def __iter__(self):
return self
#
# helpers
#
def _decode_location(self, fileno, lineno):
try:
return self._funcmap[(fileno, lineno)]
except KeyError:
#
# This should only be needed when the log file does not
# contain all the DEFINE_FUNC records needed to allow the
# function name to be retrieved from the log file.
#
if self._loadfile(fileno):
filename = funcname = None
try:
filename, funcname = self._funcmap[(fileno, lineno)]
except KeyError:
filename = self._filemap.get(fileno)
funcname = None
self._funcmap[(fileno, lineno)] = (filename, funcname)
return filename, funcname
def _loadfile(self, fileno):
try:
filename = self._filemap[fileno]
except KeyError:
print "Could not identify fileId", fileno
return 1
if filename is None:
return 1
absname = os.path.normcase(os.path.join(self.cwd, filename))
try:
fp = open(absname)
except IOError:
return
st = parser.suite(fp.read())
fp.close()
# Scan the tree looking for def and lambda nodes, filling in
# self._funcmap with all the available information.
funcdef = symbol.funcdef
lambdef = symbol.lambdef
stack = [st.totuple(1)]
while stack:
tree = stack.pop()
try:
sym = tree[0]
except (IndexError, TypeError):
continue
if sym == funcdef:
self._funcmap[(fileno, tree[2][2])] = filename, tree[2][1]
elif sym == lambdef:
self._funcmap[(fileno, tree[1][2])] = filename, "<lambda>"
stack.extend(list(tree[1:]))
| mit |
e-dorigatti/pyspider | pyspider/libs/multiprocessing_queue.py | 14 | 2808 | import six
import platform
import multiprocessing
from multiprocessing.queues import Queue as BaseQueue
# The SharedCounter and Queue classes come from:
# https://github.com/vterron/lemon/commit/9ca6b4b
class SharedCounter(object):
""" A synchronized shared counter.
The locking done by multiprocessing.Value ensures that only a single
process or thread may read or write the in-memory ctypes object. However,
in order to do n += 1, Python performs a read followed by a write, so a
second process may read the old value before the new one is written by the
first process. The solution is to use a multiprocessing.Lock to guarantee
the atomicity of the modifications to Value.
This class comes almost entirely from Eli Bendersky's blog:
http://eli.thegreenplace.net/2012/01/04/shared-counter-with-pythons-multiprocessing/
"""
def __init__(self, n=0):
self.count = multiprocessing.Value('i', n)
def increment(self, n=1):
""" Increment the counter by n (default = 1) """
with self.count.get_lock():
self.count.value += n
@property
def value(self):
""" Return the value of the counter """
return self.count.value
class MultiProcessingQueue(BaseQueue):
""" A portable implementation of multiprocessing.Queue.
Because of multithreading / multiprocessing semantics, Queue.qsize() may
raise the NotImplementedError exception on Unix platforms like Mac OS X
where sem_getvalue() is not implemented. This subclass addresses this
problem by using a synchronized shared counter (initialized to zero) and
increasing / decreasing its value every time the put() and get() methods
are called, respectively. This not only prevents NotImplementedError from
being raised, but also allows us to implement a reliable version of both
qsize() and empty().
"""
def __init__(self, *args, **kwargs):
super(MultiProcessingQueue, self).__init__(*args, **kwargs)
self.size = SharedCounter(0)
def put(self, *args, **kwargs):
self.size.increment(1)
super(MultiProcessingQueue, self).put(*args, **kwargs)
def get(self, *args, **kwargs):
v = super(MultiProcessingQueue, self).get(*args, **kwargs)
self.size.increment(-1)
return v
def qsize(self):
""" Reliable implementation of multiprocessing.Queue.qsize() """
return self.size.value
if platform.system() == 'Darwin':
if hasattr(multiprocessing, 'get_context'): # for py34
def Queue(maxsize=0):
return MultiProcessingQueue(maxsize, ctx=multiprocessing.get_context())
else:
def Queue(maxsize=0):
return MultiProcessingQueue(maxsize)
else:
from multiprocessing import Queue # flake8: noqa
| apache-2.0 |
zoeyangyy/event-extraction | tf_test/lstm-pos.py | 1 | 7622 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import gensim
import tensorflow as tf
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
from tensorflow.contrib import rnn
import numpy as np
'''
For Chinese word segmentation.
https://github.com/yongyehuang/Tensorflow-Tutorial/blob/master/Tutorial_6%20-%20Bi-directional%20LSTM%20for%20sequence%20labeling%20(Chinese%20segmentation).ipynb
'''
# ##################### config ######################
decay = 0.85
max_epoch = 5
max_max_epoch = 10
timestep_size = max_len = 32 # 句子长度
vocab_size = 5159 # 样本中不同字的个数,根据处理数据的时候得到
input_size = embedding_size = 100 # 字向量长度
class_num = 5
hidden_size = 128 # 隐含层节点数
layer_num = 2 # bi-lstm 层数
max_grad_norm = 5.0 # 最大梯度(超过此值的梯度将被裁剪)
lr = tf.placeholder(tf.float32)
keep_prob = tf.placeholder(tf.float32)
batch_size = 128 # 注意类型必须为 tf.int32
model_save_path = 'my_net/bi-lstm.ckpt' # 模型保存位置
def weight_variable(shape):
"""Create a weight variable with appropriate initialization."""
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
"""Create a bias variable with appropriate initialization."""
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
X_inputs = tf.placeholder(tf.int32, [None, timestep_size], name='X_input')
y_inputs = tf.placeholder(tf.int32, [None, timestep_size], name='y_input')
model = gensim.models.Word2Vec.load("../raw_file/text100.model")
def bi_lstm(X_inputs):
"""build the bi-LSTMs network. Return the y_pred"""
# ** 0.char embedding,请自行理解 embedding 的原理!!做 NLP 的朋友必须理解这个
embedding = tf.get_variable("embedding", [vocab_size, embedding_size], dtype=tf.float32)
# X_inputs.shape = [batchsize, timestep_size] -> inputs.shape = [batchsize, timestep_size, embedding_size]
inputs = tf.nn.embedding_lookup(embedding, X_inputs)
# ** 1.LSTM 层
# lstm_fw_cell = rnn.BasicLSTMCell(hidden_size, forget_bias=1.0, state_is_tuple=True)
# lstm_bw_cell = rnn.BasicLSTMCell(hidden_size, forget_bias=1.0, state_is_tuple=True)
# # ** 2.dropout
# lstm_fw_cell = rnn.DropoutWrapper(cell=lstm_fw_cell, input_keep_prob=1.0, output_keep_prob=keep_prob)
# lstm_bw_cell = rnn.DropoutWrapper(cell=lstm_bw_cell, input_keep_prob=1.0, output_keep_prob=keep_prob)
# ** 3.多层 LSTM
stacked_fw = []
for i in range(layer_num):
lstm_fw_cell = rnn.BasicLSTMCell(num_units=hidden_size, forget_bias=1.0, state_is_tuple=True)
stacked_fw.append(rnn.DropoutWrapper(cell=lstm_fw_cell, input_keep_prob=1.0, output_keep_prob=keep_prob))
stacked_bw = []
for i in range(layer_num):
lstm_bw_cell = rnn.BasicLSTMCell(num_units=hidden_size, forget_bias=1.0, state_is_tuple=True)
stacked_bw.append(rnn.DropoutWrapper(cell=lstm_bw_cell, input_keep_prob=1.0, output_keep_prob=keep_prob))
cell_fw = rnn.MultiRNNCell(cells=stacked_fw, state_is_tuple=True)
cell_bw = rnn.MultiRNNCell(cells=stacked_bw, state_is_tuple=True)
# ** 4.初始状态
initial_state_fw = cell_fw.zero_state(batch_size, tf.float32)
initial_state_bw = cell_bw.zero_state(batch_size, tf.float32)
# 下面两部分是等价的
# **************************************************************
# ** 把 inputs 处理成 rnn.static_bidirectional_rnn 的要求形式
# ** 文档说明
# inputs: A length T list of inputs, each a tensor of shape
# [batch_size, input_size], or a nested tuple of such elements.
# *************************************************************
# Unstack to get a list of 'n_steps' tensors of shape (batch_size, n_input)
# inputs.shape = [batchsize, timestep_size, embedding_size] -> timestep_size tensor, each_tensor.shape = [batchsize, embedding_size]
# inputs = tf.unstack(inputs, timestep_size, 1)
# ** 5.bi-lstm 计算(tf封装) 一般采用下面 static_bidirectional_rnn 函数调用。
# 但是为了理解计算的细节,所以把后面的这段代码进行展开自己实现了一遍。
# try:
# outputs, _, _ = rnn.static_bidirectional_rnn(cell_fw, cell_bw, inputs,
# initial_state_fw = initial_state_fw, initial_state_bw = initial_state_bw, dtype=tf.float32)
# except Exception: # Old TensorFlow version only returns outputs not states
# outputs = rnn.static_bidirectional_rnn(cell_fw, cell_bw, inputs,
# initial_state_fw = initial_state_fw, initial_state_bw = initial_state_bw, dtype=tf.float32)
# output = tf.reshape(tf.concat(outputs, 1), [-1, hidden_size * 2])
# ***********************************************************
# ***********************************************************
# ** 5. bi-lstm 计算(展开)
with tf.variable_scope('bidirectional_rnn'):
# *** 下面,两个网络是分别计算 output 和 state
# Forward direction
outputs_fw = list()
state_fw = initial_state_fw
with tf.variable_scope('fw'):
for timestep in range(timestep_size):
if timestep > 0:
tf.get_variable_scope().reuse_variables()
(output_fw, state_fw) = cell_fw(inputs[:, timestep, :], state_fw)
outputs_fw.append(output_fw)
# backward direction
outputs_bw = list()
state_bw = initial_state_bw
with tf.variable_scope('bw') as bw_scope:
inputs = tf.reverse(inputs, [1])
for timestep in range(timestep_size):
if timestep > 0:
tf.get_variable_scope().reuse_variables()
(output_bw, state_bw) = cell_bw(inputs[:, timestep, :], state_bw)
outputs_bw.append(output_bw)
# *** 然后把 output_bw 在 timestep 维度进行翻转
# outputs_bw.shape = [timestep_size, batch_size, hidden_size]
outputs_bw = tf.reverse(outputs_bw, [0])
# 把两个oupputs 拼成 [timestep_size, batch_size, hidden_size*2]
output = tf.concat([outputs_fw, outputs_bw], 2)
# output.shape 必须和 y_input.shape=[batch_size,timestep_size] 对齐
output = tf.transpose(output, perm=[1,0,2])
output = tf.reshape(output, [-1, hidden_size*2])
# ***********************************************************
softmax_w = weight_variable([hidden_size * 2, class_num])
softmax_b = bias_variable([class_num])
logits = tf.matmul(output, softmax_w) + softmax_b
return logits
y_pred = bi_lstm(X_inputs)
# adding extra statistics to monitor
# y_inputs.shape = [batch_size, timestep_size]
correct_prediction = tf.equal(tf.cast(tf.argmax(y_pred, 1), tf.int32), tf.reshape(y_inputs, [-1]))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
cost = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels = tf.reshape(y_inputs, [-1]), logits = y_pred))
# ***** 优化求解 *******
# 获取模型的所有参数
tvars = tf.trainable_variables()
# 获取损失函数对于每个参数的梯度
grads, _ = tf.clip_by_global_norm(tf.gradients(cost, tvars), max_grad_norm)
# 优化器
optimizer = tf.train.AdamOptimizer(learning_rate=lr)
# 梯度下降计算
train_op = optimizer.apply_gradients( zip(grads, tvars),
global_step=tf.contrib.framework.get_or_create_global_step())
print('Finished creating the bi-lstm model.') | mit |
longman694/youtube-dl | youtube_dl/extractor/tass.py | 64 | 2016 | # coding: utf-8
from __future__ import unicode_literals
import json
from .common import InfoExtractor
from ..utils import (
js_to_json,
qualities,
)
class TassIE(InfoExtractor):
_VALID_URL = r'https?://(?:tass\.ru|itar-tass\.com)/[^/]+/(?P<id>\d+)'
_TESTS = [
{
'url': 'http://tass.ru/obschestvo/1586870',
'md5': '3b4cdd011bc59174596b6145cda474a4',
'info_dict': {
'id': '1586870',
'ext': 'mp4',
'title': 'Посетителям московского зоопарка показали красную панду',
'description': 'Приехавшую из Дублина Зейну можно увидеть в павильоне "Кошки тропиков"',
'thumbnail': r're:^https?://.*\.jpg$',
},
},
{
'url': 'http://itar-tass.com/obschestvo/1600009',
'only_matching': True,
},
]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
sources = json.loads(js_to_json(self._search_regex(
r'(?s)sources\s*:\s*(\[.+?\])', webpage, 'sources')))
quality = qualities(['sd', 'hd'])
formats = []
for source in sources:
video_url = source.get('file')
if not video_url or not video_url.startswith('http') or not video_url.endswith('.mp4'):
continue
label = source.get('label')
formats.append({
'url': video_url,
'format_id': label,
'quality': quality(label),
})
self._sort_formats(formats)
return {
'id': video_id,
'title': self._og_search_title(webpage),
'description': self._og_search_description(webpage),
'thumbnail': self._og_search_thumbnail(webpage),
'formats': formats,
}
| unlicense |
ryfeus/lambda-packs | Tensorflow_OpenCV_Nightly/source/tensorflow/contrib/keras/python/keras/applications/vgg16.py | 30 | 9077 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=invalid-name
"""VGG16 model for Keras.
# Reference
- [Very Deep Convolutional Networks for Large-Scale Image
Recognition](https://arxiv.org/abs/1409.1556)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import warnings
from tensorflow.contrib.keras.python.keras import backend as K
from tensorflow.contrib.keras.python.keras.applications.imagenet_utils import _obtain_input_shape
from tensorflow.contrib.keras.python.keras.applications.imagenet_utils import decode_predictions # pylint: disable=unused-import
from tensorflow.contrib.keras.python.keras.applications.imagenet_utils import preprocess_input # pylint: disable=unused-import
from tensorflow.contrib.keras.python.keras.engine.topology import get_source_inputs
from tensorflow.contrib.keras.python.keras.layers import Conv2D
from tensorflow.contrib.keras.python.keras.layers import Dense
from tensorflow.contrib.keras.python.keras.layers import Flatten
from tensorflow.contrib.keras.python.keras.layers import GlobalAveragePooling2D
from tensorflow.contrib.keras.python.keras.layers import GlobalMaxPooling2D
from tensorflow.contrib.keras.python.keras.layers import Input
from tensorflow.contrib.keras.python.keras.layers import MaxPooling2D
from tensorflow.contrib.keras.python.keras.models import Model
from tensorflow.contrib.keras.python.keras.utils import layer_utils
from tensorflow.contrib.keras.python.keras.utils.data_utils import get_file
WEIGHTS_PATH = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.1/vgg16_weights_tf_dim_ordering_tf_kernels.h5'
WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.1/vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5'
def VGG16(include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000):
"""Instantiates the VGG16 architecture.
Optionally loads weights pre-trained
on ImageNet. Note that when using TensorFlow,
for best performance you should set
`image_data_format="channels_last"` in your Keras config
at ~/.keras/keras.json.
The model and the weights are compatible with both
TensorFlow and Theano. The data format
convention used by the model is the one
specified in your Keras config file.
Arguments:
include_top: whether to include the 3 fully-connected
layers at the top of the network.
weights: one of `None` (random initialization)
or "imagenet" (pre-training on ImageNet).
input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(224, 224, 3)` (with `channels_last` data format)
or `(3, 224, 244)` (with `channels_first` data format).
It should have exactly 3 inputs channels,
and width and height should be no smaller than 48.
E.g. `(200, 200, 3)` would be one valid value.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be
the 4D tensor output of the
last convolutional layer.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional layer, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will
be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
Returns:
A Keras model instance.
Raises:
ValueError: in case of invalid argument for `weights`,
or invalid input shape.
"""
if weights not in {'imagenet', None}:
raise ValueError('The `weights` argument should be either '
'`None` (random initialization) or `imagenet` '
'(pre-training on ImageNet).')
if weights == 'imagenet' and include_top and classes != 1000:
raise ValueError('If using `weights` as imagenet with `include_top`'
' as true, `classes` should be 1000')
# Determine proper input shape
input_shape = _obtain_input_shape(
input_shape,
default_size=224,
min_size=48,
data_format=K.image_data_format(),
include_top=include_top)
if input_tensor is None:
img_input = Input(shape=input_shape)
else:
img_input = Input(tensor=input_tensor, shape=input_shape)
# Block 1
x = Conv2D(
64, (3, 3), activation='relu', padding='same',
name='block1_conv1')(img_input)
x = Conv2D(
64, (3, 3), activation='relu', padding='same', name='block1_conv2')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)
# Block 2
x = Conv2D(
128, (3, 3), activation='relu', padding='same', name='block2_conv1')(x)
x = Conv2D(
128, (3, 3), activation='relu', padding='same', name='block2_conv2')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)
# Block 3
x = Conv2D(
256, (3, 3), activation='relu', padding='same', name='block3_conv1')(x)
x = Conv2D(
256, (3, 3), activation='relu', padding='same', name='block3_conv2')(x)
x = Conv2D(
256, (3, 3), activation='relu', padding='same', name='block3_conv3')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x)
# Block 4
x = Conv2D(
512, (3, 3), activation='relu', padding='same', name='block4_conv1')(x)
x = Conv2D(
512, (3, 3), activation='relu', padding='same', name='block4_conv2')(x)
x = Conv2D(
512, (3, 3), activation='relu', padding='same', name='block4_conv3')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x)
# Block 5
x = Conv2D(
512, (3, 3), activation='relu', padding='same', name='block5_conv1')(x)
x = Conv2D(
512, (3, 3), activation='relu', padding='same', name='block5_conv2')(x)
x = Conv2D(
512, (3, 3), activation='relu', padding='same', name='block5_conv3')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool')(x)
if include_top:
# Classification block
x = Flatten(name='flatten')(x)
x = Dense(4096, activation='relu', name='fc1')(x)
x = Dense(4096, activation='relu', name='fc2')(x)
x = Dense(classes, activation='softmax', name='predictions')(x)
else:
if pooling == 'avg':
x = GlobalAveragePooling2D()(x)
elif pooling == 'max':
x = GlobalMaxPooling2D()(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
model = Model(inputs, x, name='vgg16')
# load weights
if weights == 'imagenet':
if include_top:
weights_path = get_file(
'vgg16_weights_tf_dim_ordering_tf_kernels.h5',
WEIGHTS_PATH,
cache_subdir='models')
else:
weights_path = get_file(
'vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5',
WEIGHTS_PATH_NO_TOP,
cache_subdir='models')
model.load_weights(weights_path)
if K.backend() == 'theano':
layer_utils.convert_all_kernels_in_model(model)
if K.image_data_format() == 'channels_first':
if include_top:
maxpool = model.get_layer(name='block5_pool')
shape = maxpool.output_shape[1:]
dense = model.get_layer(name='fc1')
layer_utils.convert_dense_weights_data_format(dense, shape,
'channels_first')
if K.backend() == 'tensorflow':
warnings.warn('You are using the TensorFlow backend, yet you '
'are using the Theano '
'image data format convention '
'(`image_data_format="channels_first"`). '
'For best performance, set '
'`image_data_format="channels_last"` in '
'your Keras config '
'at ~/.keras/keras.json.')
return model
| mit |
iivic/BoiseStateX | common/test/acceptance/tests/studio/test_studio_general.py | 105 | 5669 | """
Acceptance tests for Studio.
"""
from unittest import skip
from bok_choy.web_app_test import WebAppTest
from ...pages.studio.asset_index import AssetIndexPage
from ...pages.studio.auto_auth import AutoAuthPage
from ...pages.studio.checklists import ChecklistsPage
from ...pages.studio.course_info import CourseUpdatesPage
from ...pages.studio.edit_tabs import PagesPage
from ...pages.studio.import_export import ExportCoursePage, ImportCoursePage
from ...pages.studio.howitworks import HowitworksPage
from ...pages.studio.index import DashboardPage
from ...pages.studio.login import LoginPage
from ...pages.studio.users import CourseTeamPage
from ...pages.studio.overview import CourseOutlinePage
from ...pages.studio.settings import SettingsPage
from ...pages.studio.settings_advanced import AdvancedSettingsPage
from ...pages.studio.settings_graders import GradingPage
from ...pages.studio.signup import SignupPage
from ...pages.studio.textbooks import TextbooksPage
from ...fixtures.course import XBlockFixtureDesc
from base_studio_test import StudioCourseTest
class LoggedOutTest(WebAppTest):
"""
Smoke test for pages in Studio that are visible when logged out.
"""
def setUp(self):
super(LoggedOutTest, self).setUp()
self.pages = [LoginPage(self.browser), HowitworksPage(self.browser), SignupPage(self.browser)]
def test_page_existence(self):
"""
Make sure that all the pages are accessible.
Rather than fire up the browser just to check each url,
do them all sequentially in this testcase.
"""
for page in self.pages:
page.visit()
class LoggedInPagesTest(WebAppTest):
"""
Tests that verify the pages in Studio that you can get to when logged
in and do not have a course yet.
"""
def setUp(self):
super(LoggedInPagesTest, self).setUp()
self.auth_page = AutoAuthPage(self.browser, staff=True)
self.dashboard_page = DashboardPage(self.browser)
def test_dashboard_no_courses(self):
"""
Make sure that you can get to the dashboard page without a course.
"""
self.auth_page.visit()
self.dashboard_page.visit()
class CoursePagesTest(StudioCourseTest):
"""
Tests that verify the pages in Studio that you can get to when logged
in and have a course.
"""
COURSE_ID_SEPARATOR = "."
def setUp(self):
"""
Install a course with no content using a fixture.
"""
super(CoursePagesTest, self).setUp()
self.pages = [
clz(self.browser, self.course_info['org'], self.course_info['number'], self.course_info['run'])
for clz in [
AssetIndexPage, ChecklistsPage, CourseUpdatesPage,
PagesPage, ExportCoursePage, ImportCoursePage, CourseTeamPage, CourseOutlinePage, SettingsPage,
AdvancedSettingsPage, GradingPage, TextbooksPage
]
]
def test_page_redirect(self):
"""
/course/ is the base URL for all courses, but by itself, it should
redirect to /home/.
"""
self.dashboard_page = DashboardPage(self.browser) # pylint: disable=attribute-defined-outside-init
self.dashboard_page.visit()
self.assertEqual(self.browser.current_url.strip('/').rsplit('/')[-1], 'home')
@skip('Intermittently failing with Page not found error for Assets. TE-418')
def test_page_existence(self):
"""
Make sure that all these pages are accessible once you have a course.
Rather than fire up the browser just to check each url,
do them all sequentially in this testcase.
"""
# In the real workflow you will be at the dashboard page
# after you log in. This test was intermittently failing on the
# first (asset) page load with a 404.
# Not exactly sure why, so adding in a visit
# to the dashboard page here to replicate the usual flow.
self.dashboard_page = DashboardPage(self.browser)
self.dashboard_page.visit()
# Verify that each page is available
for page in self.pages:
page.visit()
class DiscussionPreviewTest(StudioCourseTest):
"""
Tests that Inline Discussions are rendered with a custom preview in Studio
"""
def setUp(self):
super(DiscussionPreviewTest, self).setUp()
cop = CourseOutlinePage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
cop.visit()
self.unit = cop.section('Test Section').subsection('Test Subsection').expand_subsection().unit('Test Unit')
self.unit.go_to()
def populate_course_fixture(self, course_fixture):
"""
Return a test course fixture containing a discussion component.
"""
course_fixture.add_children(
XBlockFixtureDesc("chapter", "Test Section").add_children(
XBlockFixtureDesc("sequential", "Test Subsection").add_children(
XBlockFixtureDesc("vertical", "Test Unit").add_children(
XBlockFixtureDesc(
"discussion",
"Test Discussion",
)
)
)
)
)
def test_is_preview(self):
"""
Ensure that the preview version of the discussion is rendered.
"""
self.assertTrue(self.unit.q(css=".discussion-preview").present)
self.assertFalse(self.unit.q(css=".discussion-show").present)
| agpl-3.0 |
cloudfoundry/php-buildpack-legacy | builds/runtimes/python-2.7.6/lib/python2.7/test/test_posixpath.py | 71 | 17716 | import unittest
from test import test_support, test_genericpath
import posixpath, os
from posixpath import realpath, abspath, dirname, basename
# An absolute path to a temporary filename for testing. We can't rely on TESTFN
# being an absolute path, so we need this.
ABSTFN = abspath(test_support.TESTFN)
def skip_if_ABSTFN_contains_backslash(test):
"""
On Windows, posixpath.abspath still returns paths with backslashes
instead of posix forward slashes. If this is the case, several tests
fail, so skip them.
"""
found_backslash = '\\' in ABSTFN
msg = "ABSTFN is not a posix path - tests fail"
return [test, unittest.skip(msg)(test)][found_backslash]
def safe_rmdir(dirname):
try:
os.rmdir(dirname)
except OSError:
pass
class PosixPathTest(unittest.TestCase):
def setUp(self):
self.tearDown()
def tearDown(self):
for suffix in ["", "1", "2"]:
test_support.unlink(test_support.TESTFN + suffix)
safe_rmdir(test_support.TESTFN + suffix)
def test_join(self):
self.assertEqual(posixpath.join("/foo", "bar", "/bar", "baz"), "/bar/baz")
self.assertEqual(posixpath.join("/foo", "bar", "baz"), "/foo/bar/baz")
self.assertEqual(posixpath.join("/foo/", "bar/", "baz/"), "/foo/bar/baz/")
def test_split(self):
self.assertEqual(posixpath.split("/foo/bar"), ("/foo", "bar"))
self.assertEqual(posixpath.split("/"), ("/", ""))
self.assertEqual(posixpath.split("foo"), ("", "foo"))
self.assertEqual(posixpath.split("////foo"), ("////", "foo"))
self.assertEqual(posixpath.split("//foo//bar"), ("//foo", "bar"))
def splitextTest(self, path, filename, ext):
self.assertEqual(posixpath.splitext(path), (filename, ext))
self.assertEqual(posixpath.splitext("/" + path), ("/" + filename, ext))
self.assertEqual(posixpath.splitext("abc/" + path), ("abc/" + filename, ext))
self.assertEqual(posixpath.splitext("abc.def/" + path), ("abc.def/" + filename, ext))
self.assertEqual(posixpath.splitext("/abc.def/" + path), ("/abc.def/" + filename, ext))
self.assertEqual(posixpath.splitext(path + "/"), (filename + ext + "/", ""))
def test_splitext(self):
self.splitextTest("foo.bar", "foo", ".bar")
self.splitextTest("foo.boo.bar", "foo.boo", ".bar")
self.splitextTest("foo.boo.biff.bar", "foo.boo.biff", ".bar")
self.splitextTest(".csh.rc", ".csh", ".rc")
self.splitextTest("nodots", "nodots", "")
self.splitextTest(".cshrc", ".cshrc", "")
self.splitextTest("...manydots", "...manydots", "")
self.splitextTest("...manydots.ext", "...manydots", ".ext")
self.splitextTest(".", ".", "")
self.splitextTest("..", "..", "")
self.splitextTest("........", "........", "")
self.splitextTest("", "", "")
def test_isabs(self):
self.assertIs(posixpath.isabs(""), False)
self.assertIs(posixpath.isabs("/"), True)
self.assertIs(posixpath.isabs("/foo"), True)
self.assertIs(posixpath.isabs("/foo/bar"), True)
self.assertIs(posixpath.isabs("foo/bar"), False)
def test_basename(self):
self.assertEqual(posixpath.basename("/foo/bar"), "bar")
self.assertEqual(posixpath.basename("/"), "")
self.assertEqual(posixpath.basename("foo"), "foo")
self.assertEqual(posixpath.basename("////foo"), "foo")
self.assertEqual(posixpath.basename("//foo//bar"), "bar")
def test_dirname(self):
self.assertEqual(posixpath.dirname("/foo/bar"), "/foo")
self.assertEqual(posixpath.dirname("/"), "/")
self.assertEqual(posixpath.dirname("foo"), "")
self.assertEqual(posixpath.dirname("////foo"), "////")
self.assertEqual(posixpath.dirname("//foo//bar"), "//foo")
def test_islink(self):
self.assertIs(posixpath.islink(test_support.TESTFN + "1"), False)
f = open(test_support.TESTFN + "1", "wb")
try:
f.write("foo")
f.close()
self.assertIs(posixpath.islink(test_support.TESTFN + "1"), False)
if hasattr(os, "symlink"):
os.symlink(test_support.TESTFN + "1", test_support.TESTFN + "2")
self.assertIs(posixpath.islink(test_support.TESTFN + "2"), True)
os.remove(test_support.TESTFN + "1")
self.assertIs(posixpath.islink(test_support.TESTFN + "2"), True)
self.assertIs(posixpath.exists(test_support.TESTFN + "2"), False)
self.assertIs(posixpath.lexists(test_support.TESTFN + "2"), True)
finally:
if not f.close():
f.close()
def test_samefile(self):
f = open(test_support.TESTFN + "1", "wb")
try:
f.write("foo")
f.close()
self.assertIs(
posixpath.samefile(
test_support.TESTFN + "1",
test_support.TESTFN + "1"
),
True
)
# If we don't have links, assume that os.stat doesn't return
# reasonable inode information and thus, that samefile() doesn't
# work.
if hasattr(os, "symlink"):
os.symlink(
test_support.TESTFN + "1",
test_support.TESTFN + "2"
)
self.assertIs(
posixpath.samefile(
test_support.TESTFN + "1",
test_support.TESTFN + "2"
),
True
)
os.remove(test_support.TESTFN + "2")
f = open(test_support.TESTFN + "2", "wb")
f.write("bar")
f.close()
self.assertIs(
posixpath.samefile(
test_support.TESTFN + "1",
test_support.TESTFN + "2"
),
False
)
finally:
if not f.close():
f.close()
def test_samestat(self):
f = open(test_support.TESTFN + "1", "wb")
try:
f.write("foo")
f.close()
self.assertIs(
posixpath.samestat(
os.stat(test_support.TESTFN + "1"),
os.stat(test_support.TESTFN + "1")
),
True
)
# If we don't have links, assume that os.stat() doesn't return
# reasonable inode information and thus, that samestat() doesn't
# work.
if hasattr(os, "symlink"):
os.symlink(test_support.TESTFN + "1", test_support.TESTFN + "2")
self.assertIs(
posixpath.samestat(
os.stat(test_support.TESTFN + "1"),
os.stat(test_support.TESTFN + "2")
),
True
)
os.remove(test_support.TESTFN + "2")
f = open(test_support.TESTFN + "2", "wb")
f.write("bar")
f.close()
self.assertIs(
posixpath.samestat(
os.stat(test_support.TESTFN + "1"),
os.stat(test_support.TESTFN + "2")
),
False
)
finally:
if not f.close():
f.close()
def test_ismount(self):
self.assertIs(posixpath.ismount("/"), True)
def test_expanduser(self):
self.assertEqual(posixpath.expanduser("foo"), "foo")
try:
import pwd
except ImportError:
pass
else:
self.assertIsInstance(posixpath.expanduser("~/"), basestring)
# if home directory == root directory, this test makes no sense
if posixpath.expanduser("~") != '/':
self.assertEqual(
posixpath.expanduser("~") + "/",
posixpath.expanduser("~/")
)
self.assertIsInstance(posixpath.expanduser("~root/"), basestring)
self.assertIsInstance(posixpath.expanduser("~foo/"), basestring)
with test_support.EnvironmentVarGuard() as env:
env['HOME'] = '/'
self.assertEqual(posixpath.expanduser("~"), "/")
self.assertEqual(posixpath.expanduser("~/foo"), "/foo")
def test_normpath(self):
self.assertEqual(posixpath.normpath(""), ".")
self.assertEqual(posixpath.normpath("/"), "/")
self.assertEqual(posixpath.normpath("//"), "//")
self.assertEqual(posixpath.normpath("///"), "/")
self.assertEqual(posixpath.normpath("///foo/.//bar//"), "/foo/bar")
self.assertEqual(posixpath.normpath("///foo/.//bar//.//..//.//baz"), "/foo/baz")
self.assertEqual(posixpath.normpath("///..//./foo/.//bar"), "/foo/bar")
@skip_if_ABSTFN_contains_backslash
def test_realpath_curdir(self):
self.assertEqual(realpath('.'), os.getcwd())
self.assertEqual(realpath('./.'), os.getcwd())
self.assertEqual(realpath('/'.join(['.'] * 100)), os.getcwd())
@skip_if_ABSTFN_contains_backslash
def test_realpath_pardir(self):
self.assertEqual(realpath('..'), dirname(os.getcwd()))
self.assertEqual(realpath('../..'), dirname(dirname(os.getcwd())))
self.assertEqual(realpath('/'.join(['..'] * 100)), '/')
if hasattr(os, "symlink"):
def test_realpath_basic(self):
# Basic operation.
try:
os.symlink(ABSTFN+"1", ABSTFN)
self.assertEqual(realpath(ABSTFN), ABSTFN+"1")
finally:
test_support.unlink(ABSTFN)
def test_realpath_symlink_loops(self):
# Bug #930024, return the path unchanged if we get into an infinite
# symlink loop.
try:
old_path = abspath('.')
os.symlink(ABSTFN, ABSTFN)
self.assertEqual(realpath(ABSTFN), ABSTFN)
os.symlink(ABSTFN+"1", ABSTFN+"2")
os.symlink(ABSTFN+"2", ABSTFN+"1")
self.assertEqual(realpath(ABSTFN+"1"), ABSTFN+"1")
self.assertEqual(realpath(ABSTFN+"2"), ABSTFN+"2")
self.assertEqual(realpath(ABSTFN+"1/x"), ABSTFN+"1/x")
self.assertEqual(realpath(ABSTFN+"1/.."), dirname(ABSTFN))
self.assertEqual(realpath(ABSTFN+"1/../x"), dirname(ABSTFN) + "/x")
os.symlink(ABSTFN+"x", ABSTFN+"y")
self.assertEqual(realpath(ABSTFN+"1/../" + basename(ABSTFN) + "y"),
ABSTFN + "y")
self.assertEqual(realpath(ABSTFN+"1/../" + basename(ABSTFN) + "1"),
ABSTFN + "1")
os.symlink(basename(ABSTFN) + "a/b", ABSTFN+"a")
self.assertEqual(realpath(ABSTFN+"a"), ABSTFN+"a/b")
os.symlink("../" + basename(dirname(ABSTFN)) + "/" +
basename(ABSTFN) + "c", ABSTFN+"c")
self.assertEqual(realpath(ABSTFN+"c"), ABSTFN+"c")
# Test using relative path as well.
os.chdir(dirname(ABSTFN))
self.assertEqual(realpath(basename(ABSTFN)), ABSTFN)
finally:
os.chdir(old_path)
test_support.unlink(ABSTFN)
test_support.unlink(ABSTFN+"1")
test_support.unlink(ABSTFN+"2")
test_support.unlink(ABSTFN+"y")
test_support.unlink(ABSTFN+"c")
test_support.unlink(ABSTFN+"a")
def test_realpath_repeated_indirect_symlinks(self):
# Issue #6975.
try:
os.mkdir(ABSTFN)
os.symlink('../' + basename(ABSTFN), ABSTFN + '/self')
os.symlink('self/self/self', ABSTFN + '/link')
self.assertEqual(realpath(ABSTFN + '/link'), ABSTFN)
finally:
test_support.unlink(ABSTFN + '/self')
test_support.unlink(ABSTFN + '/link')
safe_rmdir(ABSTFN)
def test_realpath_deep_recursion(self):
depth = 10
old_path = abspath('.')
try:
os.mkdir(ABSTFN)
for i in range(depth):
os.symlink('/'.join(['%d' % i] * 10), ABSTFN + '/%d' % (i + 1))
os.symlink('.', ABSTFN + '/0')
self.assertEqual(realpath(ABSTFN + '/%d' % depth), ABSTFN)
# Test using relative path as well.
os.chdir(ABSTFN)
self.assertEqual(realpath('%d' % depth), ABSTFN)
finally:
os.chdir(old_path)
for i in range(depth + 1):
test_support.unlink(ABSTFN + '/%d' % i)
safe_rmdir(ABSTFN)
def test_realpath_resolve_parents(self):
# We also need to resolve any symlinks in the parents of a relative
# path passed to realpath. E.g.: current working directory is
# /usr/doc with 'doc' being a symlink to /usr/share/doc. We call
# realpath("a"). This should return /usr/share/doc/a/.
try:
old_path = abspath('.')
os.mkdir(ABSTFN)
os.mkdir(ABSTFN + "/y")
os.symlink(ABSTFN + "/y", ABSTFN + "/k")
os.chdir(ABSTFN + "/k")
self.assertEqual(realpath("a"), ABSTFN + "/y/a")
finally:
os.chdir(old_path)
test_support.unlink(ABSTFN + "/k")
safe_rmdir(ABSTFN + "/y")
safe_rmdir(ABSTFN)
def test_realpath_resolve_before_normalizing(self):
# Bug #990669: Symbolic links should be resolved before we
# normalize the path. E.g.: if we have directories 'a', 'k' and 'y'
# in the following hierarchy:
# a/k/y
#
# and a symbolic link 'link-y' pointing to 'y' in directory 'a',
# then realpath("link-y/..") should return 'k', not 'a'.
try:
old_path = abspath('.')
os.mkdir(ABSTFN)
os.mkdir(ABSTFN + "/k")
os.mkdir(ABSTFN + "/k/y")
os.symlink(ABSTFN + "/k/y", ABSTFN + "/link-y")
# Absolute path.
self.assertEqual(realpath(ABSTFN + "/link-y/.."), ABSTFN + "/k")
# Relative path.
os.chdir(dirname(ABSTFN))
self.assertEqual(realpath(basename(ABSTFN) + "/link-y/.."),
ABSTFN + "/k")
finally:
os.chdir(old_path)
test_support.unlink(ABSTFN + "/link-y")
safe_rmdir(ABSTFN + "/k/y")
safe_rmdir(ABSTFN + "/k")
safe_rmdir(ABSTFN)
def test_realpath_resolve_first(self):
# Bug #1213894: The first component of the path, if not absolute,
# must be resolved too.
try:
old_path = abspath('.')
os.mkdir(ABSTFN)
os.mkdir(ABSTFN + "/k")
os.symlink(ABSTFN, ABSTFN + "link")
os.chdir(dirname(ABSTFN))
base = basename(ABSTFN)
self.assertEqual(realpath(base + "link"), ABSTFN)
self.assertEqual(realpath(base + "link/k"), ABSTFN + "/k")
finally:
os.chdir(old_path)
test_support.unlink(ABSTFN + "link")
safe_rmdir(ABSTFN + "/k")
safe_rmdir(ABSTFN)
def test_relpath(self):
(real_getcwd, os.getcwd) = (os.getcwd, lambda: r"/home/user/bar")
try:
curdir = os.path.split(os.getcwd())[-1]
self.assertRaises(ValueError, posixpath.relpath, "")
self.assertEqual(posixpath.relpath("a"), "a")
self.assertEqual(posixpath.relpath(posixpath.abspath("a")), "a")
self.assertEqual(posixpath.relpath("a/b"), "a/b")
self.assertEqual(posixpath.relpath("../a/b"), "../a/b")
self.assertEqual(posixpath.relpath("a", "../b"), "../"+curdir+"/a")
self.assertEqual(posixpath.relpath("a/b", "../c"), "../"+curdir+"/a/b")
self.assertEqual(posixpath.relpath("a", "b/c"), "../../a")
self.assertEqual(posixpath.relpath("a", "a"), ".")
self.assertEqual(posixpath.relpath("/foo/bar/bat", "/x/y/z"), '../../../foo/bar/bat')
self.assertEqual(posixpath.relpath("/foo/bar/bat", "/foo/bar"), 'bat')
self.assertEqual(posixpath.relpath("/foo/bar/bat", "/"), 'foo/bar/bat')
self.assertEqual(posixpath.relpath("/", "/foo/bar/bat"), '../../..')
self.assertEqual(posixpath.relpath("/foo/bar/bat", "/x"), '../foo/bar/bat')
self.assertEqual(posixpath.relpath("/x", "/foo/bar/bat"), '../../../x')
self.assertEqual(posixpath.relpath("/", "/"), '.')
self.assertEqual(posixpath.relpath("/a", "/a"), '.')
self.assertEqual(posixpath.relpath("/a/b", "/a/b"), '.')
finally:
os.getcwd = real_getcwd
class PosixCommonTest(test_genericpath.CommonTest):
pathmodule = posixpath
attributes = ['relpath', 'samefile', 'sameopenfile', 'samestat']
def test_main():
test_support.run_unittest(PosixPathTest, PosixCommonTest)
if __name__=="__main__":
test_main()
| mit |
cloudfoundry/php-buildpack-legacy | builds/runtimes/python-2.7.6/lib/python2.7/test/test_cfgparser.py | 71 | 27744 | import ConfigParser
import StringIO
import os
import unittest
import UserDict
from test import test_support
class SortedDict(UserDict.UserDict):
def items(self):
result = self.data.items()
result.sort()
return result
def keys(self):
result = self.data.keys()
result.sort()
return result
def values(self):
# XXX never used?
result = self.items()
return [i[1] for i in result]
def iteritems(self): return iter(self.items())
def iterkeys(self): return iter(self.keys())
__iter__ = iterkeys
def itervalues(self): return iter(self.values())
class TestCaseBase(unittest.TestCase):
allow_no_value = False
def newconfig(self, defaults=None):
if defaults is None:
self.cf = self.config_class(allow_no_value=self.allow_no_value)
else:
self.cf = self.config_class(defaults,
allow_no_value=self.allow_no_value)
return self.cf
def fromstring(self, string, defaults=None):
cf = self.newconfig(defaults)
sio = StringIO.StringIO(string)
cf.readfp(sio)
return cf
def test_basic(self):
config_string = (
"[Foo Bar]\n"
"foo=bar\n"
"[Spacey Bar]\n"
"foo = bar\n"
"[Commented Bar]\n"
"foo: bar ; comment\n"
"[Long Line]\n"
"foo: this line is much, much longer than my editor\n"
" likes it.\n"
"[Section\\with$weird%characters[\t]\n"
"[Internationalized Stuff]\n"
"foo[bg]: Bulgarian\n"
"foo=Default\n"
"foo[en]=English\n"
"foo[de]=Deutsch\n"
"[Spaces]\n"
"key with spaces : value\n"
"another with spaces = splat!\n"
)
if self.allow_no_value:
config_string += (
"[NoValue]\n"
"option-without-value\n"
)
cf = self.fromstring(config_string)
L = cf.sections()
L.sort()
E = [r'Commented Bar',
r'Foo Bar',
r'Internationalized Stuff',
r'Long Line',
r'Section\with$weird%characters[' '\t',
r'Spaces',
r'Spacey Bar',
]
if self.allow_no_value:
E.append(r'NoValue')
E.sort()
eq = self.assertEqual
eq(L, E)
# The use of spaces in the section names serves as a
# regression test for SourceForge bug #583248:
# http://www.python.org/sf/583248
eq(cf.get('Foo Bar', 'foo'), 'bar')
eq(cf.get('Spacey Bar', 'foo'), 'bar')
eq(cf.get('Commented Bar', 'foo'), 'bar')
eq(cf.get('Spaces', 'key with spaces'), 'value')
eq(cf.get('Spaces', 'another with spaces'), 'splat!')
if self.allow_no_value:
eq(cf.get('NoValue', 'option-without-value'), None)
self.assertNotIn('__name__', cf.options("Foo Bar"),
'__name__ "option" should not be exposed by the API!')
# Make sure the right things happen for remove_option();
# added to include check for SourceForge bug #123324:
self.assertTrue(cf.remove_option('Foo Bar', 'foo'),
"remove_option() failed to report existence of option")
self.assertFalse(cf.has_option('Foo Bar', 'foo'),
"remove_option() failed to remove option")
self.assertFalse(cf.remove_option('Foo Bar', 'foo'),
"remove_option() failed to report non-existence of option"
" that was removed")
self.assertRaises(ConfigParser.NoSectionError,
cf.remove_option, 'No Such Section', 'foo')
eq(cf.get('Long Line', 'foo'),
'this line is much, much longer than my editor\nlikes it.')
def test_case_sensitivity(self):
cf = self.newconfig()
cf.add_section("A")
cf.add_section("a")
L = cf.sections()
L.sort()
eq = self.assertEqual
eq(L, ["A", "a"])
cf.set("a", "B", "value")
eq(cf.options("a"), ["b"])
eq(cf.get("a", "b"), "value",
"could not locate option, expecting case-insensitive option names")
self.assertTrue(cf.has_option("a", "b"))
cf.set("A", "A-B", "A-B value")
for opt in ("a-b", "A-b", "a-B", "A-B"):
self.assertTrue(
cf.has_option("A", opt),
"has_option() returned false for option which should exist")
eq(cf.options("A"), ["a-b"])
eq(cf.options("a"), ["b"])
cf.remove_option("a", "B")
eq(cf.options("a"), [])
# SF bug #432369:
cf = self.fromstring(
"[MySection]\nOption: first line\n\tsecond line\n")
eq(cf.options("MySection"), ["option"])
eq(cf.get("MySection", "Option"), "first line\nsecond line")
# SF bug #561822:
cf = self.fromstring("[section]\nnekey=nevalue\n",
defaults={"key":"value"})
self.assertTrue(cf.has_option("section", "Key"))
def test_default_case_sensitivity(self):
cf = self.newconfig({"foo": "Bar"})
self.assertEqual(
cf.get("DEFAULT", "Foo"), "Bar",
"could not locate option, expecting case-insensitive option names")
cf = self.newconfig({"Foo": "Bar"})
self.assertEqual(
cf.get("DEFAULT", "Foo"), "Bar",
"could not locate option, expecting case-insensitive defaults")
def test_parse_errors(self):
self.newconfig()
self.parse_error(ConfigParser.ParsingError,
"[Foo]\n extra-spaces: splat\n")
self.parse_error(ConfigParser.ParsingError,
"[Foo]\n extra-spaces= splat\n")
self.parse_error(ConfigParser.ParsingError,
"[Foo]\n:value-without-option-name\n")
self.parse_error(ConfigParser.ParsingError,
"[Foo]\n=value-without-option-name\n")
self.parse_error(ConfigParser.MissingSectionHeaderError,
"No Section!\n")
def parse_error(self, exc, src):
sio = StringIO.StringIO(src)
self.assertRaises(exc, self.cf.readfp, sio)
def test_query_errors(self):
cf = self.newconfig()
self.assertEqual(cf.sections(), [],
"new ConfigParser should have no defined sections")
self.assertFalse(cf.has_section("Foo"),
"new ConfigParser should have no acknowledged "
"sections")
self.assertRaises(ConfigParser.NoSectionError,
cf.options, "Foo")
self.assertRaises(ConfigParser.NoSectionError,
cf.set, "foo", "bar", "value")
self.get_error(ConfigParser.NoSectionError, "foo", "bar")
cf.add_section("foo")
self.get_error(ConfigParser.NoOptionError, "foo", "bar")
def get_error(self, exc, section, option):
try:
self.cf.get(section, option)
except exc, e:
return e
else:
self.fail("expected exception type %s.%s"
% (exc.__module__, exc.__name__))
def test_boolean(self):
cf = self.fromstring(
"[BOOLTEST]\n"
"T1=1\n"
"T2=TRUE\n"
"T3=True\n"
"T4=oN\n"
"T5=yes\n"
"F1=0\n"
"F2=FALSE\n"
"F3=False\n"
"F4=oFF\n"
"F5=nO\n"
"E1=2\n"
"E2=foo\n"
"E3=-1\n"
"E4=0.1\n"
"E5=FALSE AND MORE"
)
for x in range(1, 5):
self.assertTrue(cf.getboolean('BOOLTEST', 't%d' % x))
self.assertFalse(cf.getboolean('BOOLTEST', 'f%d' % x))
self.assertRaises(ValueError,
cf.getboolean, 'BOOLTEST', 'e%d' % x)
def test_weird_errors(self):
cf = self.newconfig()
cf.add_section("Foo")
self.assertRaises(ConfigParser.DuplicateSectionError,
cf.add_section, "Foo")
def test_write(self):
config_string = (
"[Long Line]\n"
"foo: this line is much, much longer than my editor\n"
" likes it.\n"
"[DEFAULT]\n"
"foo: another very\n"
" long line\n"
)
if self.allow_no_value:
config_string += (
"[Valueless]\n"
"option-without-value\n"
)
cf = self.fromstring(config_string)
output = StringIO.StringIO()
cf.write(output)
expect_string = (
"[DEFAULT]\n"
"foo = another very\n"
"\tlong line\n"
"\n"
"[Long Line]\n"
"foo = this line is much, much longer than my editor\n"
"\tlikes it.\n"
"\n"
)
if self.allow_no_value:
expect_string += (
"[Valueless]\n"
"option-without-value\n"
"\n"
)
self.assertEqual(output.getvalue(), expect_string)
def test_set_string_types(self):
cf = self.fromstring("[sect]\n"
"option1=foo\n")
# Check that we don't get an exception when setting values in
# an existing section using strings:
class mystr(str):
pass
cf.set("sect", "option1", "splat")
cf.set("sect", "option1", mystr("splat"))
cf.set("sect", "option2", "splat")
cf.set("sect", "option2", mystr("splat"))
try:
unicode
except NameError:
pass
else:
cf.set("sect", "option1", unicode("splat"))
cf.set("sect", "option2", unicode("splat"))
def test_read_returns_file_list(self):
file1 = test_support.findfile("cfgparser.1")
# check when we pass a mix of readable and non-readable files:
cf = self.newconfig()
parsed_files = cf.read([file1, "nonexistent-file"])
self.assertEqual(parsed_files, [file1])
self.assertEqual(cf.get("Foo Bar", "foo"), "newbar")
# check when we pass only a filename:
cf = self.newconfig()
parsed_files = cf.read(file1)
self.assertEqual(parsed_files, [file1])
self.assertEqual(cf.get("Foo Bar", "foo"), "newbar")
# check when we pass only missing files:
cf = self.newconfig()
parsed_files = cf.read(["nonexistent-file"])
self.assertEqual(parsed_files, [])
# check when we pass no files:
cf = self.newconfig()
parsed_files = cf.read([])
self.assertEqual(parsed_files, [])
# shared by subclasses
def get_interpolation_config(self):
return self.fromstring(
"[Foo]\n"
"bar=something %(with1)s interpolation (1 step)\n"
"bar9=something %(with9)s lots of interpolation (9 steps)\n"
"bar10=something %(with10)s lots of interpolation (10 steps)\n"
"bar11=something %(with11)s lots of interpolation (11 steps)\n"
"with11=%(with10)s\n"
"with10=%(with9)s\n"
"with9=%(with8)s\n"
"with8=%(With7)s\n"
"with7=%(WITH6)s\n"
"with6=%(with5)s\n"
"With5=%(with4)s\n"
"WITH4=%(with3)s\n"
"with3=%(with2)s\n"
"with2=%(with1)s\n"
"with1=with\n"
"\n"
"[Mutual Recursion]\n"
"foo=%(bar)s\n"
"bar=%(foo)s\n"
"\n"
"[Interpolation Error]\n"
"name=%(reference)s\n",
# no definition for 'reference'
defaults={"getname": "%(__name__)s"})
def check_items_config(self, expected):
cf = self.fromstring(
"[section]\n"
"name = value\n"
"key: |%(name)s| \n"
"getdefault: |%(default)s|\n"
"getname: |%(__name__)s|",
defaults={"default": "<default>"})
L = list(cf.items("section"))
L.sort()
self.assertEqual(L, expected)
class ConfigParserTestCase(TestCaseBase):
config_class = ConfigParser.ConfigParser
allow_no_value = True
def test_interpolation(self):
rawval = {
ConfigParser.ConfigParser: ("something %(with11)s "
"lots of interpolation (11 steps)"),
ConfigParser.SafeConfigParser: "%(with1)s",
}
cf = self.get_interpolation_config()
eq = self.assertEqual
eq(cf.get("Foo", "getname"), "Foo")
eq(cf.get("Foo", "bar"), "something with interpolation (1 step)")
eq(cf.get("Foo", "bar9"),
"something with lots of interpolation (9 steps)")
eq(cf.get("Foo", "bar10"),
"something with lots of interpolation (10 steps)")
self.get_error(ConfigParser.InterpolationDepthError, "Foo", "bar11")
def test_interpolation_missing_value(self):
self.get_interpolation_config()
e = self.get_error(ConfigParser.InterpolationError,
"Interpolation Error", "name")
self.assertEqual(e.reference, "reference")
self.assertEqual(e.section, "Interpolation Error")
self.assertEqual(e.option, "name")
def test_items(self):
self.check_items_config([('default', '<default>'),
('getdefault', '|<default>|'),
('getname', '|section|'),
('key', '|value|'),
('name', 'value')])
def test_set_nonstring_types(self):
cf = self.newconfig()
cf.add_section('non-string')
cf.set('non-string', 'int', 1)
cf.set('non-string', 'list', [0, 1, 1, 2, 3, 5, 8, 13, '%('])
cf.set('non-string', 'dict', {'pi': 3.14159, '%(': 1,
'%(list)': '%(list)'})
cf.set('non-string', 'string_with_interpolation', '%(list)s')
cf.set('non-string', 'no-value')
self.assertEqual(cf.get('non-string', 'int', raw=True), 1)
self.assertRaises(TypeError, cf.get, 'non-string', 'int')
self.assertEqual(cf.get('non-string', 'list', raw=True),
[0, 1, 1, 2, 3, 5, 8, 13, '%('])
self.assertRaises(TypeError, cf.get, 'non-string', 'list')
self.assertEqual(cf.get('non-string', 'dict', raw=True),
{'pi': 3.14159, '%(': 1, '%(list)': '%(list)'})
self.assertRaises(TypeError, cf.get, 'non-string', 'dict')
self.assertEqual(cf.get('non-string', 'string_with_interpolation',
raw=True), '%(list)s')
self.assertRaises(ValueError, cf.get, 'non-string',
'string_with_interpolation', raw=False)
self.assertEqual(cf.get('non-string', 'no-value'), None)
class MultilineValuesTestCase(TestCaseBase):
config_class = ConfigParser.ConfigParser
wonderful_spam = ("I'm having spam spam spam spam "
"spam spam spam beaked beans spam "
"spam spam and spam!").replace(' ', '\t\n')
def setUp(self):
cf = self.newconfig()
for i in range(100):
s = 'section{}'.format(i)
cf.add_section(s)
for j in range(10):
cf.set(s, 'lovely_spam{}'.format(j), self.wonderful_spam)
with open(test_support.TESTFN, 'w') as f:
cf.write(f)
def tearDown(self):
os.unlink(test_support.TESTFN)
def test_dominating_multiline_values(self):
# we're reading from file because this is where the code changed
# during performance updates in Python 3.2
cf_from_file = self.newconfig()
with open(test_support.TESTFN) as f:
cf_from_file.readfp(f)
self.assertEqual(cf_from_file.get('section8', 'lovely_spam4'),
self.wonderful_spam.replace('\t\n', '\n'))
class RawConfigParserTestCase(TestCaseBase):
config_class = ConfigParser.RawConfigParser
def test_interpolation(self):
cf = self.get_interpolation_config()
eq = self.assertEqual
eq(cf.get("Foo", "getname"), "%(__name__)s")
eq(cf.get("Foo", "bar"),
"something %(with1)s interpolation (1 step)")
eq(cf.get("Foo", "bar9"),
"something %(with9)s lots of interpolation (9 steps)")
eq(cf.get("Foo", "bar10"),
"something %(with10)s lots of interpolation (10 steps)")
eq(cf.get("Foo", "bar11"),
"something %(with11)s lots of interpolation (11 steps)")
def test_items(self):
self.check_items_config([('default', '<default>'),
('getdefault', '|%(default)s|'),
('getname', '|%(__name__)s|'),
('key', '|%(name)s|'),
('name', 'value')])
def test_set_nonstring_types(self):
cf = self.newconfig()
cf.add_section('non-string')
cf.set('non-string', 'int', 1)
cf.set('non-string', 'list', [0, 1, 1, 2, 3, 5, 8, 13])
cf.set('non-string', 'dict', {'pi': 3.14159})
self.assertEqual(cf.get('non-string', 'int'), 1)
self.assertEqual(cf.get('non-string', 'list'),
[0, 1, 1, 2, 3, 5, 8, 13])
self.assertEqual(cf.get('non-string', 'dict'), {'pi': 3.14159})
class SafeConfigParserTestCase(ConfigParserTestCase):
config_class = ConfigParser.SafeConfigParser
def test_safe_interpolation(self):
# See http://www.python.org/sf/511737
cf = self.fromstring("[section]\n"
"option1=xxx\n"
"option2=%(option1)s/xxx\n"
"ok=%(option1)s/%%s\n"
"not_ok=%(option2)s/%%s")
self.assertEqual(cf.get("section", "ok"), "xxx/%s")
self.assertEqual(cf.get("section", "not_ok"), "xxx/xxx/%s")
def test_set_malformatted_interpolation(self):
cf = self.fromstring("[sect]\n"
"option1=foo\n")
self.assertEqual(cf.get('sect', "option1"), "foo")
self.assertRaises(ValueError, cf.set, "sect", "option1", "%foo")
self.assertRaises(ValueError, cf.set, "sect", "option1", "foo%")
self.assertRaises(ValueError, cf.set, "sect", "option1", "f%oo")
self.assertEqual(cf.get('sect', "option1"), "foo")
# bug #5741: double percents are *not* malformed
cf.set("sect", "option2", "foo%%bar")
self.assertEqual(cf.get("sect", "option2"), "foo%bar")
def test_set_nonstring_types(self):
cf = self.fromstring("[sect]\n"
"option1=foo\n")
# Check that we get a TypeError when setting non-string values
# in an existing section:
self.assertRaises(TypeError, cf.set, "sect", "option1", 1)
self.assertRaises(TypeError, cf.set, "sect", "option1", 1.0)
self.assertRaises(TypeError, cf.set, "sect", "option1", object())
self.assertRaises(TypeError, cf.set, "sect", "option2", 1)
self.assertRaises(TypeError, cf.set, "sect", "option2", 1.0)
self.assertRaises(TypeError, cf.set, "sect", "option2", object())
def test_add_section_default_1(self):
cf = self.newconfig()
self.assertRaises(ValueError, cf.add_section, "default")
def test_add_section_default_2(self):
cf = self.newconfig()
self.assertRaises(ValueError, cf.add_section, "DEFAULT")
class SafeConfigParserTestCaseNoValue(SafeConfigParserTestCase):
allow_no_value = True
class TestChainMap(unittest.TestCase):
def test_issue_12717(self):
d1 = dict(red=1, green=2)
d2 = dict(green=3, blue=4)
dcomb = d2.copy()
dcomb.update(d1)
cm = ConfigParser._Chainmap(d1, d2)
self.assertIsInstance(cm.keys(), list)
self.assertEqual(set(cm.keys()), set(dcomb.keys())) # keys()
self.assertEqual(set(cm.values()), set(dcomb.values())) # values()
self.assertEqual(set(cm.items()), set(dcomb.items())) # items()
self.assertEqual(set(cm), set(dcomb)) # __iter__ ()
self.assertEqual(cm, dcomb) # __eq__()
self.assertEqual([cm[k] for k in dcomb], dcomb.values()) # __getitem__()
klist = 'red green blue black brown'.split()
self.assertEqual([cm.get(k, 10) for k in klist],
[dcomb.get(k, 10) for k in klist]) # get()
self.assertEqual([k in cm for k in klist],
[k in dcomb for k in klist]) # __contains__()
with test_support.check_py3k_warnings():
self.assertEqual([cm.has_key(k) for k in klist],
[dcomb.has_key(k) for k in klist]) # has_key()
class Issue7005TestCase(unittest.TestCase):
"""Test output when None is set() as a value and allow_no_value == False.
http://bugs.python.org/issue7005
"""
expected_output = "[section]\noption = None\n\n"
def prepare(self, config_class):
# This is the default, but that's the point.
cp = config_class(allow_no_value=False)
cp.add_section("section")
cp.set("section", "option", None)
sio = StringIO.StringIO()
cp.write(sio)
return sio.getvalue()
def test_none_as_value_stringified(self):
output = self.prepare(ConfigParser.ConfigParser)
self.assertEqual(output, self.expected_output)
def test_none_as_value_stringified_raw(self):
output = self.prepare(ConfigParser.RawConfigParser)
self.assertEqual(output, self.expected_output)
class SortedTestCase(RawConfigParserTestCase):
def newconfig(self, defaults=None):
self.cf = self.config_class(defaults=defaults, dict_type=SortedDict)
return self.cf
def test_sorted(self):
self.fromstring("[b]\n"
"o4=1\n"
"o3=2\n"
"o2=3\n"
"o1=4\n"
"[a]\n"
"k=v\n")
output = StringIO.StringIO()
self.cf.write(output)
self.assertEqual(output.getvalue(),
"[a]\n"
"k = v\n\n"
"[b]\n"
"o1 = 4\n"
"o2 = 3\n"
"o3 = 2\n"
"o4 = 1\n\n")
class ExceptionPicklingTestCase(unittest.TestCase):
"""Tests for issue #13760: ConfigParser exceptions are not picklable."""
def test_error(self):
import pickle
e1 = ConfigParser.Error('value')
pickled = pickle.dumps(e1)
e2 = pickle.loads(pickled)
self.assertEqual(e1.message, e2.message)
self.assertEqual(repr(e1), repr(e2))
def test_nosectionerror(self):
import pickle
e1 = ConfigParser.NoSectionError('section')
pickled = pickle.dumps(e1)
e2 = pickle.loads(pickled)
self.assertEqual(e1.message, e2.message)
self.assertEqual(e1.args, e2.args)
self.assertEqual(e1.section, e2.section)
self.assertEqual(repr(e1), repr(e2))
def test_nooptionerror(self):
import pickle
e1 = ConfigParser.NoOptionError('option', 'section')
pickled = pickle.dumps(e1)
e2 = pickle.loads(pickled)
self.assertEqual(e1.message, e2.message)
self.assertEqual(e1.args, e2.args)
self.assertEqual(e1.section, e2.section)
self.assertEqual(e1.option, e2.option)
self.assertEqual(repr(e1), repr(e2))
def test_duplicatesectionerror(self):
import pickle
e1 = ConfigParser.DuplicateSectionError('section')
pickled = pickle.dumps(e1)
e2 = pickle.loads(pickled)
self.assertEqual(e1.message, e2.message)
self.assertEqual(e1.args, e2.args)
self.assertEqual(e1.section, e2.section)
self.assertEqual(repr(e1), repr(e2))
def test_interpolationerror(self):
import pickle
e1 = ConfigParser.InterpolationError('option', 'section', 'msg')
pickled = pickle.dumps(e1)
e2 = pickle.loads(pickled)
self.assertEqual(e1.message, e2.message)
self.assertEqual(e1.args, e2.args)
self.assertEqual(e1.section, e2.section)
self.assertEqual(e1.option, e2.option)
self.assertEqual(repr(e1), repr(e2))
def test_interpolationmissingoptionerror(self):
import pickle
e1 = ConfigParser.InterpolationMissingOptionError('option', 'section',
'rawval', 'reference')
pickled = pickle.dumps(e1)
e2 = pickle.loads(pickled)
self.assertEqual(e1.message, e2.message)
self.assertEqual(e1.args, e2.args)
self.assertEqual(e1.section, e2.section)
self.assertEqual(e1.option, e2.option)
self.assertEqual(e1.reference, e2.reference)
self.assertEqual(repr(e1), repr(e2))
def test_interpolationsyntaxerror(self):
import pickle
e1 = ConfigParser.InterpolationSyntaxError('option', 'section', 'msg')
pickled = pickle.dumps(e1)
e2 = pickle.loads(pickled)
self.assertEqual(e1.message, e2.message)
self.assertEqual(e1.args, e2.args)
self.assertEqual(e1.section, e2.section)
self.assertEqual(e1.option, e2.option)
self.assertEqual(repr(e1), repr(e2))
def test_interpolationdeptherror(self):
import pickle
e1 = ConfigParser.InterpolationDepthError('option', 'section',
'rawval')
pickled = pickle.dumps(e1)
e2 = pickle.loads(pickled)
self.assertEqual(e1.message, e2.message)
self.assertEqual(e1.args, e2.args)
self.assertEqual(e1.section, e2.section)
self.assertEqual(e1.option, e2.option)
self.assertEqual(repr(e1), repr(e2))
def test_parsingerror(self):
import pickle
e1 = ConfigParser.ParsingError('source')
e1.append(1, 'line1')
e1.append(2, 'line2')
e1.append(3, 'line3')
pickled = pickle.dumps(e1)
e2 = pickle.loads(pickled)
self.assertEqual(e1.message, e2.message)
self.assertEqual(e1.args, e2.args)
self.assertEqual(e1.filename, e2.filename)
self.assertEqual(e1.errors, e2.errors)
self.assertEqual(repr(e1), repr(e2))
def test_missingsectionheadererror(self):
import pickle
e1 = ConfigParser.MissingSectionHeaderError('filename', 123, 'line')
pickled = pickle.dumps(e1)
e2 = pickle.loads(pickled)
self.assertEqual(e1.message, e2.message)
self.assertEqual(e1.args, e2.args)
self.assertEqual(e1.line, e2.line)
self.assertEqual(e1.filename, e2.filename)
self.assertEqual(e1.lineno, e2.lineno)
self.assertEqual(repr(e1), repr(e2))
def test_main():
test_support.run_unittest(
ConfigParserTestCase,
MultilineValuesTestCase,
RawConfigParserTestCase,
SafeConfigParserTestCase,
SafeConfigParserTestCaseNoValue,
SortedTestCase,
Issue7005TestCase,
TestChainMap,
ExceptionPicklingTestCase,
)
if __name__ == "__main__":
test_main()
| mit |
ondrejmular/pcs | pcs/utils.py | 3 | 87422 | # pylint: disable=too-many-lines
import os
import sys
import subprocess
import xml.dom.minidom
from xml.dom.minidom import parseString
import xml.etree.ElementTree as ET
import re
import json
import tempfile
import signal
import time
from io import BytesIO
import tarfile
import getpass
import base64
import threading
import logging
from functools import lru_cache
from urllib.parse import urlencode
from typing import (
Any,
Dict,
Sequence,
Tuple,
)
from pcs import settings, usage
from pcs.common import (
file as pcs_file,
file_type_codes,
pcs_pycurl as pycurl,
)
from pcs.common.host import PcsKnownHost
from pcs.common.reports import ReportProcessor
from pcs.common.reports.item import ReportItemList
from pcs.common.reports.messages import CibUpgradeFailedToMinimalRequiredVersion
from pcs.common.services.interfaces import ServiceManagerInterface
from pcs.common.services.errors import ManageServiceError
from pcs.cli.common import middleware
from pcs.cli.common.env_cli import Env
from pcs.cli.common.errors import CmdLineInputError
from pcs.cli.common.lib_wrapper import Library
from pcs.cli.common.parse_args import InputModifiers
from pcs.cli.reports import (
output as reports_output,
process_library_reports,
ReportProcessorToConsole,
)
import pcs.cli.booth.env
from pcs.cli.file import metadata as cli_file_metadata
import pcs.lib.corosync.config_parser as corosync_conf_parser
from pcs.lib.corosync.config_facade import ConfigFacade as corosync_conf_facade
from pcs.lib.env import LibraryEnvironment
from pcs.lib.errors import LibraryError
from pcs.lib.external import (
CommandRunner,
is_proxy_set,
)
from pcs.lib.file.instance import FileInstance as LibFileInstance
from pcs.lib.interface.config import ParserErrorException
from pcs.lib.pacemaker.live import get_cluster_status_dom
from pcs.lib.pacemaker.state import ClusterState
from pcs.lib.pacemaker.values import (
is_boolean,
is_score as is_score_value,
timeout_to_seconds as get_timeout_seconds,
validate_id,
)
from pcs.lib.services import (
get_service_manager as _get_service_manager,
service_exception_to_report,
)
# pylint: disable=invalid-name
# pylint: disable=too-many-branches
# pylint: disable=too-many-locals
# pylint: disable=too-many-nested-blocks
# pylint: disable=too-many-return-statements
# pylint: disable=too-many-statements
# usefile & filename variables are set in pcs module
usefile = False
filename = ""
# Note: not properly typed
pcs_options: Dict[Any, Any] = {}
class UnknownPropertyException(Exception):
pass
def getValidateWithVersion(dom):
"""
Commandline options: no options
"""
cib = dom.getElementsByTagName("cib")
if len(cib) != 1:
err("Bad cib")
cib = cib[0]
version = cib.getAttribute("validate-with")
r = re.compile(r"pacemaker-(\d+)\.(\d+)\.?(\d+)?")
m = r.match(version)
major = int(m.group(1))
minor = int(m.group(2))
rev = int(m.group(3) or 0)
return (major, minor, rev)
# Check the current pacemaker version in cib and upgrade it if necessary
# Returns False if not upgraded and True if upgraded
def checkAndUpgradeCIB(major, minor, rev):
"""
Commandline options:
* -f - CIB file
"""
cmajor, cminor, crev = getValidateWithVersion(get_cib_dom())
# pylint: disable=too-many-boolean-expressions
if (
cmajor > major
or (cmajor == major and cminor > minor)
or (cmajor == major and cminor == minor and crev >= rev)
):
return False
cluster_upgrade()
return True
def cluster_upgrade():
"""
Commandline options:
* -f - CIB file
"""
output, retval = run(["cibadmin", "--upgrade", "--force"])
if retval != 0:
err("unable to upgrade cluster: %s" % output)
if (
output.strip()
== "Upgrade unnecessary: Schema is already the latest available"
):
return
print("Cluster CIB has been upgraded to latest version")
def cluster_upgrade_to_version(required_version):
"""
Commandline options:
* -f - CIB file
"""
checkAndUpgradeCIB(*required_version)
dom = get_cib_dom()
current_version = getValidateWithVersion(dom)
if current_version < required_version:
err(
CibUpgradeFailedToMinimalRequiredVersion(
".".join([str(x) for x in current_version]),
".".join([str(x) for x in required_version]),
).message
)
return dom
# Check status of node
def checkStatus(node):
"""
Commandline options:
* --request-timeout - timeout for HTTP requests
"""
return sendHTTPRequest(node, "remote/status", None, False, False)
# Check and see if we're authorized (faster than a status check)
def checkAuthorization(node):
"""
Commandline options:
* --request-timeout - timeout for HTTP requests
"""
return sendHTTPRequest(node, "remote/check_auth", None, False, False)
def get_uid_gid_file_name(uid, gid):
"""
Commandline options: no options
"""
return "pcs-uidgid-%s-%s" % (uid, gid)
# Reads in uid file and returns dict of values {'uid':'theuid', 'gid':'thegid'}
def read_uid_gid_file(uidgid_filename):
"""
Commandline options: no options
"""
uidgid = {}
with open(
os.path.join(settings.corosync_uidgid_dir, uidgid_filename), "r"
) as myfile:
data = myfile.read().split("\n")
in_uidgid = False
for line in data:
line = re.sub(r"#.*", "", line)
if not in_uidgid:
if re.search(r"uidgid.*{", line):
in_uidgid = True
else:
continue
matches = re.search(r"uid:\s*(\S+)", line)
if matches:
uidgid["uid"] = matches.group(1)
matches = re.search(r"gid:\s*(\S+)", line)
if matches:
uidgid["gid"] = matches.group(1)
return uidgid
def write_uid_gid_file(uid, gid):
"""
Commandline options: no options
"""
orig_filename = get_uid_gid_file_name(uid, gid)
uidgid_filename = orig_filename
counter = 0
if find_uid_gid_files(uid, gid):
err("uidgid file with uid=%s and gid=%s already exists" % (uid, gid))
while os.path.exists(
os.path.join(settings.corosync_uidgid_dir, uidgid_filename)
):
counter = counter + 1
uidgid_filename = orig_filename + "-" + str(counter)
data = "uidgid {\n uid: %s\ngid: %s\n}\n" % (uid, gid)
with open(
os.path.join(settings.corosync_uidgid_dir, uidgid_filename), "w"
) as uidgid_file:
uidgid_file.write(data)
def find_uid_gid_files(uid, gid):
"""
Commandline options: no options
"""
if uid == "" and gid == "":
return []
found_files = []
uid_gid_files = os.listdir(settings.corosync_uidgid_dir)
for uidgid_file in uid_gid_files:
uid_gid_dict = read_uid_gid_file(uidgid_file)
if ("uid" in uid_gid_dict and uid == "") or (
"uid" not in uid_gid_dict and uid != ""
):
continue
if ("gid" in uid_gid_dict and gid == "") or (
"gid" not in uid_gid_dict and gid != ""
):
continue
if "uid" in uid_gid_dict and uid != uid_gid_dict["uid"]:
continue
if "gid" in uid_gid_dict and gid != uid_gid_dict["gid"]:
continue
found_files.append(uidgid_file)
return found_files
# Removes all uid/gid files with the specified uid/gid, returns false if we
# couldn't find one
def remove_uid_gid_file(uid, gid):
"""
Commandline options: no options
"""
if uid == "" and gid == "":
return False
file_removed = False
for uidgid_file in find_uid_gid_files(uid, gid):
os.remove(os.path.join(settings.corosync_uidgid_dir, uidgid_file))
file_removed = True
return file_removed
@lru_cache()
def read_known_hosts_file():
"""
Commandline options: no options
"""
data = {}
try:
if os.getuid() != 0:
known_hosts_raw_file = pcs_file.RawFile(
cli_file_metadata.for_file_type(file_type_codes.PCS_KNOWN_HOSTS)
)
# json.loads handles bytes, it expects utf-8, 16 or 32 encoding
known_hosts_struct = json.loads(known_hosts_raw_file.read())
else:
# TODO remove
# This is here to provide known-hosts to functions not yet
# overhauled to pcs.lib. Cli should never read known hosts from
# /var/lib/pcsd/.
known_hosts_instance = LibFileInstance.for_known_hosts()
known_hosts_struct = known_hosts_instance.read_to_structure()
# TODO use known hosts facade for getting info from json struct once the
# facade exists
data = {
name: PcsKnownHost.from_known_host_file_dict(name, host)
for name, host in known_hosts_struct["known_hosts"].items()
}
except LibraryError as e:
# TODO remove
# This is here to provide known-hosts to functions not yet
# overhauled to pcs.lib. Cli should never read known hosts from
# /var/lib/pcsd/.
process_library_reports(e.args)
except ParserErrorException as e:
# TODO remove
# This is here to provide known-hosts to functions not yet
# overhauled to pcs.lib. Cli should never read known hosts from
# /var/lib/pcsd/.
process_library_reports(
known_hosts_instance.parser_exception_to_report_list(e)
)
except pcs_file.RawFileError as e:
reports_output.warn("Unable to read the known-hosts file: " + e.reason)
except json.JSONDecodeError as e:
reports_output.warn(f"Unable to parse the known-hosts file: {e}")
except (TypeError, KeyError):
reports_output.warn("Warning: Unable to parse the known-hosts file.")
return data
def repeat_if_timeout(send_http_request_function, repeat_count=15):
"""
Commandline options: no options
NOTE: callback send_http_request_function may use --request-timeout
"""
def repeater(node, *args, **kwargs):
repeats_left = repeat_count
while True:
retval, output = send_http_request_function(node, *args, **kwargs)
if (
retval != 2
or "Operation timed out" not in output
or repeats_left < 1
):
# did not timed out OR repeat limit exceeded
return retval, output
repeats_left = repeats_left - 1
if "--debug" in pcs_options:
print("{0}: {1}, trying again...".format(node, output))
return repeater
# Set the corosync.conf file on the specified node
def getCorosyncConfig(node):
"""
Commandline options:
* --request-timeout - timeout for HTTP requests
"""
return sendHTTPRequest(node, "remote/get_corosync_conf", None, False, False)
def setCorosyncConfig(node, config):
"""
Commandline options:
* --request-timeout - timeout for HTTP requests
"""
data = urlencode({"corosync_conf": config})
(status, data) = sendHTTPRequest(node, "remote/set_corosync_conf", data)
if status != 0:
err("Unable to set corosync config: {0}".format(data))
def getPacemakerNodeStatus(node):
"""
Commandline options:
* --request-timeout - timeout for HTTP requests
"""
return sendHTTPRequest(
node, "remote/pacemaker_node_status", None, False, False
)
def startCluster(node, quiet=False, timeout=None):
"""
Commandline options:
* --request-timeout - timeout for HTTP requests
"""
return sendHTTPRequest(
node,
"remote/cluster_start",
printResult=False,
printSuccess=not quiet,
timeout=timeout,
)
def stopPacemaker(node, quiet=False, force=True):
"""
Commandline options:
* --request-timeout - timeout for HTTP requests
"""
return stopCluster(
node, pacemaker=True, corosync=False, quiet=quiet, force=force
)
def stopCorosync(node, quiet=False, force=True):
"""
Commandline options:
* --request-timeout - timeout for HTTP requests
"""
return stopCluster(
node, pacemaker=False, corosync=True, quiet=quiet, force=force
)
def stopCluster(node, quiet=False, pacemaker=True, corosync=True, force=True):
"""
Commandline options:
* --request-timeout - timeout for HTTP requests
"""
data = dict()
timeout = None
if pacemaker and not corosync:
data["component"] = "pacemaker"
timeout = 2 * 60
elif corosync and not pacemaker:
data["component"] = "corosync"
if force:
data["force"] = 1
data = urlencode(data)
return sendHTTPRequest(
node,
"remote/cluster_stop",
data,
printResult=False,
printSuccess=not quiet,
timeout=timeout,
)
def enableCluster(node):
"""
Commandline options:
* --request-timeout - timeout for HTTP requests
"""
return sendHTTPRequest(node, "remote/cluster_enable", None, False, True)
def disableCluster(node):
"""
Commandline options:
* --request-timeout - timeout for HTTP requests
"""
return sendHTTPRequest(node, "remote/cluster_disable", None, False, True)
def destroyCluster(node, quiet=False):
"""
Commandline options:
* --request-timeout - timeout for HTTP requests
"""
return sendHTTPRequest(
node, "remote/cluster_destroy", None, not quiet, not quiet
)
def restoreConfig(node, tarball_data):
"""
Commandline options:
* --request-timeout - timeout for HTTP requests
"""
data = urlencode({"tarball": tarball_data})
return sendHTTPRequest(node, "remote/config_restore", data, False, True)
def pauseConfigSyncing(node, delay_seconds=300):
"""
Commandline options:
* --request-timeout - timeout for HTTP requests
"""
data = urlencode({"sync_thread_pause": delay_seconds})
return sendHTTPRequest(node, "remote/set_sync_options", data, False, False)
def resumeConfigSyncing(node):
"""
Commandline options:
* --request-timeout - timeout for HTTP requests
"""
data = urlencode({"sync_thread_resume": 1})
return sendHTTPRequest(node, "remote/set_sync_options", data, False, False)
# Send an HTTP request to a node return a tuple with status, data
# If status is 0 then data contains server response
# Otherwise if non-zero then data contains error message
# Returns a tuple (error, error message)
# 0 = Success,
# 1 = HTTP Error
# 2 = No response,
# 3 = Auth Error
# 4 = Permission denied
def sendHTTPRequest(
host, request, data=None, printResult=True, printSuccess=True, timeout=None
):
"""
Commandline options:
* --request-timeout - timeout for HTTP requests
* --debug
"""
port = None
addr = host
token = None
known_host = read_known_hosts_file().get(host, None)
# TODO: do not allow communication with unknown host
if known_host:
port = known_host.dest.port
addr = known_host.dest.addr
token = known_host.token
if port is None:
port = settings.pcsd_default_port
url = "https://{host}:{port}/{request}".format(
host="[{0}]".format(addr) if ":" in addr else addr,
request=request,
port=port,
)
if "--debug" in pcs_options:
print("Sending HTTP Request to: " + url)
print("Data: {0}".format(data))
def __debug_callback(data_type, debug_data):
prefixes = {
# pylint: disable=no-member
pycurl.DEBUG_TEXT: b"* ",
pycurl.DEBUG_HEADER_IN: b"< ",
pycurl.DEBUG_HEADER_OUT: b"> ",
pycurl.DEBUG_DATA_IN: b"<< ",
pycurl.DEBUG_DATA_OUT: b">> ",
}
if data_type in prefixes:
debug_output.write(prefixes[data_type])
debug_output.write(debug_data)
if not debug_data.endswith(b"\n"):
debug_output.write(b"\n")
output = BytesIO()
debug_output = BytesIO()
cookies = __get_cookie_list(token)
if not timeout:
timeout = settings.default_request_timeout
timeout = pcs_options.get("--request-timeout", timeout)
handler = pycurl.Curl()
handler.setopt(pycurl.PROTOCOLS, pycurl.PROTO_HTTPS)
handler.setopt(pycurl.URL, url.encode("utf-8"))
handler.setopt(pycurl.WRITEFUNCTION, output.write)
handler.setopt(pycurl.VERBOSE, 1)
handler.setopt(pycurl.NOSIGNAL, 1) # required for multi-threading
handler.setopt(pycurl.DEBUGFUNCTION, __debug_callback)
handler.setopt(pycurl.TIMEOUT_MS, int(timeout * 1000))
handler.setopt(pycurl.SSL_VERIFYHOST, 0)
handler.setopt(pycurl.SSL_VERIFYPEER, 0)
handler.setopt(pycurl.HTTPHEADER, ["Expect: "])
if cookies:
handler.setopt(pycurl.COOKIE, ";".join(cookies).encode("utf-8"))
if data:
handler.setopt(pycurl.COPYPOSTFIELDS, data.encode("utf-8"))
try:
handler.perform()
response_data = output.getvalue().decode("utf-8")
response_code = handler.getinfo(pycurl.RESPONSE_CODE)
if printResult or printSuccess:
print(host + ": " + response_data.strip())
if "--debug" in pcs_options:
print("Response Code: {0}".format(response_code))
print("--Debug Response Start--\n{0}".format(response_data))
print("--Debug Response End--")
print("Communication debug info for calling: {0}".format(url))
print("--Debug Communication Output Start--")
print(debug_output.getvalue().decode("utf-8", "ignore"))
print("--Debug Communication Output End--")
print()
if response_code == 401:
output = (
3,
(
"Unable to authenticate to {node} - (HTTP error: {code}), "
"try running 'pcs host auth {node}'"
).format(node=host, code=response_code),
)
elif response_code == 403:
output = (
4,
"{node}: Permission denied - (HTTP error: {code})".format(
node=host, code=response_code
),
)
elif response_code >= 400:
output = (
1,
"Error connecting to {node} - (HTTP error: {code})".format(
node=host, code=response_code
),
)
else:
output = (0, response_data)
if printResult and output[0] != 0:
print(output[1])
return output
except pycurl.error as e:
if is_proxy_set(os.environ):
print(
"Warning: Proxy is set in environment variables, try "
"disabling it"
)
# pylint: disable=unbalanced-tuple-unpacking
dummy_errno, reason = e.args
if "--debug" in pcs_options:
print("Response Reason: {0}".format(reason))
msg = (
"Unable to connect to {host}, check if pcsd is running there or try "
"setting higher timeout with --request-timeout option ({reason})"
).format(host=host, reason=reason)
if printResult:
print(msg)
return (2, msg)
def __get_cookie_list(token):
"""
Commandline options: no options
"""
cookies = []
if token:
cookies.append("token=" + token)
if os.geteuid() == 0:
for name in ("CIB_user", "CIB_user_groups"):
if name in os.environ and os.environ[name].strip():
value = os.environ[name].strip()
# Let's be safe about characters in env variables and do base64.
# We cannot do it for CIB_user however to be backward compatible
# so we at least remove disallowed characters.
if name == "CIB_user":
value = re.sub(r"[^!-~]", "", value).replace(";", "")
else:
# python3 requires the value to be bytes not str
value = base64.b64encode(value.encode("utf8")).decode(
"utf-8"
)
cookies.append("{0}={1}".format(name, value))
return cookies
def get_corosync_conf_facade(conf_text=None):
"""
Commandline options:
* --corosync_conf - path to a mocked corosync.conf is set directly to
settings
"""
try:
return corosync_conf_facade(
corosync_conf_parser.Parser.parse(
(getCorosyncConf() if conf_text is None else conf_text).encode(
"utf-8"
)
)
)
except corosync_conf_parser.CorosyncConfParserException as e:
return err("Unable to parse corosync.conf: %s" % e)
def getNodeAttributesFromPacemaker():
"""
Commandline options: no options
"""
try:
return [
node.attrs
for node in ClusterState(
get_cluster_status_dom(cmd_runner())
).node_section.nodes
]
except LibraryError as e:
return process_library_reports(e.args)
def hasCorosyncConf():
"""
Commandline options:
* --corosync_conf - path to a mocked corosync.conf is set directly to
settings
"""
return os.path.isfile(settings.corosync_conf_file)
def getCorosyncConf():
"""
Commandline options:
* --corosync_conf - path to a mocked corosync.conf is set directly to
settings
"""
try:
out = open(settings.corosync_conf_file, "r", encoding="utf-8").read()
except IOError as e:
err("Unable to read %s: %s" % (settings.corosync_conf_file, e.strerror))
return out
def reloadCorosync():
"""
Commandline options: no options
"""
output, retval = run(["corosync-cfgtool", "-R"])
return output, retval
def getCorosyncActiveNodes():
"""
Commandline options: no options
"""
output, retval = run(["corosync-cmapctl"])
if retval != 0:
return []
nodename_re = re.compile(r"^nodelist\.node\.(\d+)\.name .*= (.*)", re.M)
nodestatus_re = re.compile(
r"^runtime\.members\.(\d+).status .*= (.*)", re.M
)
nodenameid_mapping_re = re.compile(
r"nodelist\.node\.(\d+)\.nodeid .*= (\d+)", re.M
)
node_names = nodename_re.findall(output)
index_to_id = dict(nodenameid_mapping_re.findall(output))
id_to_status = dict(nodestatus_re.findall(output))
node_status = {}
for index, node_name in node_names:
if index in index_to_id:
nodeid = index_to_id[index]
if nodeid in id_to_status:
node_status[node_name] = id_to_status[nodeid]
else:
print("Error mapping %s" % node_name)
nodes_active = []
for node, status in node_status.items():
if status == "joined":
nodes_active.append(node)
return nodes_active
# is it needed to handle corosync-qdevice service when managing cluster services
def need_to_handle_qdevice_service():
"""
Commandline options: no options
* --corosync_conf - path to a mocked corosync.conf is set directly to
settings but it doesn't make sense for contexts in which this function
is used
"""
try:
cfg = corosync_conf_facade(
corosync_conf_parser.Parser.parse(
open(settings.corosync_conf_file, "rb").read()
)
)
return cfg.has_quorum_device()
except (EnvironmentError, corosync_conf_parser.CorosyncConfParserException):
# corosync.conf not present or not valid => no qdevice specified
return False
# Restore default behavior before starting subprocesses
def subprocess_setup():
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
def touch_cib_file(cib_filename):
if not os.path.isfile(cib_filename):
try:
write_empty_cib(cib_filename)
except EnvironmentError as e:
err(
"Unable to write to file: '{0}': '{1}'".format(
cib_filename, str(e)
)
)
# Run command, with environment and return (output, retval)
# DEPRECATED, please use lib.external.CommandRunner via utils.cmd_runner()
def run(
args,
ignore_stderr=False,
string_for_stdin=None,
env_extend=None,
binary_output=False,
):
"""
Commandline options:
* -f - CIB file (effective only for some pacemaker tools)
* --debug
"""
if not env_extend:
env_extend = dict()
env_var = env_extend
env_var.update(dict(os.environ))
env_var["LC_ALL"] = "C"
if usefile:
env_var["CIB_file"] = filename
touch_cib_file(filename)
command = args[0]
if command[0:3] == "crm" or command in [
"cibadmin",
"iso8601",
"stonith_admin",
]:
args[0] = os.path.join(settings.pacemaker_binaries, command)
elif command[0:8] == "corosync":
args[0] = os.path.join(settings.corosync_binaries, command)
try:
if "--debug" in pcs_options:
print("Running: " + " ".join(args))
if string_for_stdin:
print("--Debug Input Start--\n" + string_for_stdin)
print("--Debug Input End--")
# Some commands react differently if you give them anything via stdin
if string_for_stdin is not None:
stdin_pipe = subprocess.PIPE
else:
stdin_pipe = subprocess.DEVNULL
# pylint: disable=subprocess-popen-preexec-fn, consider-using-with
p = subprocess.Popen(
args,
stdin=stdin_pipe,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if ignore_stderr else subprocess.STDOUT),
preexec_fn=subprocess_setup,
close_fds=True,
env=env_var,
# decodes newlines and in python3 also converts bytes to str
universal_newlines=(not binary_output),
)
output, dummy_stderror = p.communicate(string_for_stdin)
returnVal = p.returncode
if "--debug" in pcs_options:
print("Return Value: {0}".format(returnVal))
print(("--Debug Output Start--\n{0}".format(output)).rstrip())
print("--Debug Output End--")
print()
except OSError as e:
print(e.strerror)
err("unable to locate command: " + args[0])
return output, returnVal
@lru_cache()
def cmd_runner():
"""
Commandline options:
* -f - CIB file
"""
env_vars = dict()
if usefile:
env_vars["CIB_file"] = filename
env_vars.update(os.environ)
env_vars["LC_ALL"] = "C"
return CommandRunner(
logging.getLogger("pcs"), get_report_processor(), env_vars
)
def run_pcsdcli(command, data=None):
"""
Commandline options:
* --request-timeout - timeout for HTTP request, applicable for commands:
* remove_known_hosts - only when running on cluster node (sync will
be initiated)
* auth
* send_local_configs
"""
if not data:
data = dict()
env_var = dict()
if "--debug" in pcs_options:
env_var["PCSD_DEBUG"] = "true"
if "--request-timeout" in pcs_options:
env_var["PCSD_NETWORK_TIMEOUT"] = str(pcs_options["--request-timeout"])
else:
env_var["PCSD_NETWORK_TIMEOUT"] = str(settings.default_request_timeout)
pcsd_dir_path = settings.pcsd_exec_location
pcsdcli_path = os.path.join(pcsd_dir_path, "pcsd-cli.rb")
if settings.pcsd_gem_path is not None:
env_var["GEM_HOME"] = settings.pcsd_gem_path
stdout, dummy_stderr, retval = cmd_runner().run(
[settings.ruby_executable, "-I" + pcsd_dir_path, pcsdcli_path, command],
json.dumps(data),
env_var,
)
try:
output_json = json.loads(stdout)
for key in ["status", "text", "data"]:
if key not in output_json:
output_json[key] = None
output = "".join(output_json["log"])
# check if some requests timed out, if so print message about it
if "error: operation_timedout" in output:
print("Error: Operation timed out")
# check if there are any connection failures due to proxy in pcsd and
# print warning if so
proxy_msg = "Proxy is set in environment variables, try disabling it"
if proxy_msg in output:
print("Warning: {0}".format(proxy_msg))
except ValueError:
output_json = {
"status": "bad_json_output",
"text": stdout,
"data": None,
}
return output_json, retval
def set_token_to_accept(token):
output, retval = run_pcsdcli("set_token_to_accept", dict(token=token))
if retval == 0:
if output["status"] == "access_denied":
err("Access denied")
if output["status"] != "ok":
err("Unable to communicate with pcsd")
else:
err("Unable to communicate with pcsd")
def auth_hosts_token(host_dict):
output, retval = run_pcsdcli("auth_with_token", dict(nodes=host_dict))
if retval == 0:
if output["status"] == "access_denied":
err("Access denied")
if output["status"] != "ok":
err("Unable to communicate with pcsd")
else:
err("Unable to communicate with pcsd")
def auth_hosts(host_dict):
"""
Commandline options:
* --request-timeout - timeout for HTTP request
"""
output, retval = run_pcsdcli("auth", dict(nodes=host_dict))
if retval == 0 and output["status"] == "access_denied":
err("Access denied")
if retval == 0 and output["status"] == "ok" and output["data"]:
failed = False
try:
if not output["data"]["sync_successful"]:
err(
"Some nodes had a newer known-hosts than the local node. "
+ "Local node's known-hosts were updated. "
+ "Please repeat the authentication if needed."
)
for node, result in output["data"]["auth_responses"].items():
if result["status"] == "ok":
print("{0}: Authorized".format(node))
elif result["status"] == "bad_password":
err(f"{node}: Username and/or password is incorrect", False)
failed = True
elif result["status"] in ("noresponse", "error"):
err("Unable to communicate with {0}".format(node), False)
failed = True
else:
err("Unexpected response from {0}".format(node), False)
failed = True
if output["data"]["sync_nodes_err"]:
err(
(
"Unable to synchronize and save known-hosts on nodes: "
+ "{0}. Run 'pcs host auth {1}' to make sure the nodes "
+ "are authorized."
).format(
", ".join(output["data"]["sync_nodes_err"]),
" ".join(output["data"]["sync_nodes_err"]),
)
)
except (ValueError, KeyError):
err("Unable to communicate with pcsd")
if failed:
sys.exit(1)
return
err("Unable to communicate with pcsd")
def call_local_pcsd(argv, std_in=None):
"""
Commandline options:
* --request-timeout - timeout of call to local pcsd
"""
# some commands cannot be run under a non-root account
# so we pass those commands to locally running pcsd to execute them
# returns [list_of_errors, exit_code, stdout, stderr]
data = {
"command": json.dumps(argv),
}
if std_in:
data["stdin"] = std_in
data_send = urlencode(data)
code, output = sendHTTPRequest(
"localhost", "run_pcs", data_send, False, False
)
if code == 3: # not authenticated
return [
[
"Unable to authenticate against the local pcsd. Run the same "
"command as root or authenticate yourself to the local pcsd "
"using command 'pcs client local-auth'"
],
1,
"",
"",
]
if code != 0: # http error connecting to localhost
return [[output], 1, "", ""]
try:
output_json = json.loads(output)
for key in ["status", "data"]:
if key not in output_json:
output_json[key] = None
except ValueError:
return [["Unable to communicate with pcsd"], 1, "", ""]
if output_json["status"] == "bad_command":
return [["Command not allowed"], 1, "", ""]
if output_json["status"] == "access_denied":
return [["Access denied"], 1, "", ""]
if output_json["status"] != "ok" or not output_json["data"]:
return [["Unable to communicate with pcsd"], 1, "", ""]
try:
exitcode = output_json["data"]["code"]
std_out = output_json["data"]["stdout"]
std_err = output_json["data"]["stderr"]
return [[], exitcode, std_out, std_err]
except KeyError:
return [["Unable to communicate with pcsd"], 1, "", ""]
def map_for_error_list(callab, iterab):
"""
Commandline options: no options
NOTE: callback 'callab' may use some options
"""
error_list = []
for item in iterab:
retval, error = callab(item)
if retval != 0:
error_list.append(error)
return error_list
def run_parallel(worker_list, wait_seconds=1):
"""
Commandline options: no options
"""
thread_list = []
for worker in worker_list:
thread = threading.Thread(target=worker)
thread.daemon = True
thread.start()
thread_list.append(thread)
while thread_list:
for thread in thread_list:
thread.join(wait_seconds)
if not thread.is_alive():
thread_list.remove(thread)
def create_task(report, action, node, *args, **kwargs):
"""
Commandline options: no options
"""
def worker():
returncode, output = action(node, *args, **kwargs)
report(node, returncode, output)
return worker
def create_task_list(report, action, node_list, *args, **kwargs):
"""
Commandline options: no options
"""
return [
create_task(report, action, node, *args, **kwargs) for node in node_list
]
def parallel_for_nodes(action, node_list, *args, **kwargs):
"""
Commandline options: no options
NOTE: callback 'action' may use some cmd options
"""
node_errors = dict()
def report(node, returncode, output):
message = "{0}: {1}".format(node, output.strip())
print(message)
if returncode != 0:
node_errors[node] = message
run_parallel(create_task_list(report, action, node_list, *args, **kwargs))
return node_errors
# Check if something exists in the CIB
def does_exist(xpath_query):
"""
Commandline options:
* -f - CIB file
"""
args = ["cibadmin", "-Q", "--xpath", xpath_query]
dummy_output, retval = run(args)
if retval != 0:
return False
return True
def get_group_children(group_id):
"""
Commandline options: no options
"""
child_resources = []
dom = get_cib_dom()
groups = dom.getElementsByTagName("group")
for g in groups:
if g.getAttribute("id") == group_id:
for child in g.childNodes:
if child.nodeType != xml.dom.minidom.Node.ELEMENT_NODE:
continue
if child.tagName == "primitive":
child_resources.append(child.getAttribute("id"))
return child_resources
def dom_get_clone_ms_resource(dom, clone_ms_id):
"""
Commandline options: no options
"""
clone_ms = dom_get_clone(dom, clone_ms_id) or dom_get_master(
dom, clone_ms_id
)
if clone_ms:
return dom_elem_get_clone_ms_resource(clone_ms)
return None
def dom_elem_get_clone_ms_resource(clone_ms):
"""
Commandline options: no options
"""
for child in clone_ms.childNodes:
if (
child.nodeType == xml.dom.minidom.Node.ELEMENT_NODE
and child.tagName in ["group", "primitive"]
):
return child
return None
def dom_get_resource_clone_ms_parent(dom, resource_id):
"""
Commandline options: no options
"""
resource = dom_get_resource(dom, resource_id) or dom_get_group(
dom, resource_id
)
if resource:
return dom_get_parent_by_tag_names(resource, ["clone", "master"])
return None
def dom_get_resource_bundle_parent(dom, resource_id):
"""
Commandline options: no options
"""
resource = dom_get_resource(dom, resource_id)
if resource:
return dom_get_parent_by_tag_names(resource, ["bundle"])
return None
def dom_get_master(dom, master_id):
"""
Commandline options: no options
"""
for master in dom.getElementsByTagName("master"):
if master.getAttribute("id") == master_id:
return master
return None
def dom_get_clone(dom, clone_id):
"""
Commandline options: no options
"""
for clone in dom.getElementsByTagName("clone"):
if clone.getAttribute("id") == clone_id:
return clone
return None
def dom_get_group(dom, group_id):
"""
Commandline options: no options
"""
for group in dom.getElementsByTagName("group"):
if group.getAttribute("id") == group_id:
return group
return None
def dom_get_bundle(dom, bundle_id):
"""
Commandline options: no options
"""
for bundle in dom.getElementsByTagName("bundle"):
if bundle.getAttribute("id") == bundle_id:
return bundle
return None
def dom_get_resource_bundle(bundle_el):
"""
Commandline options: no options
"""
for child in bundle_el.childNodes:
if (
child.nodeType == xml.dom.minidom.Node.ELEMENT_NODE
and child.tagName == "primitive"
):
return child
return None
def dom_get_group_clone(dom, group_id):
"""
Commandline options: no options
"""
for clone in dom.getElementsByTagName("clone"):
group = dom_get_group(clone, group_id)
if group:
return group
return None
def dom_get_group_masterslave(dom, group_id):
"""
Commandline options: no options
"""
for master in dom.getElementsByTagName("master"):
group = dom_get_group(master, group_id)
if group:
return group
return None
def dom_get_resource(dom, resource_id):
"""
Commandline options: no options
"""
for primitive in dom.getElementsByTagName("primitive"):
if primitive.getAttribute("id") == resource_id:
return primitive
return None
def dom_get_any_resource(dom, resource_id):
"""
Commandline options: no options
"""
return (
dom_get_resource(dom, resource_id)
or dom_get_group(dom, resource_id)
or dom_get_clone(dom, resource_id)
or dom_get_master(dom, resource_id)
)
def is_stonith_resource(resource_id):
"""
Commandline options:
* -f - CIB file
"""
return does_exist(
"//primitive[@id='" + resource_id + "' and @class='stonith']"
)
def dom_get_resource_clone(dom, resource_id):
"""
Commandline options: no options
"""
for clone in dom.getElementsByTagName("clone"):
resource = dom_get_resource(clone, resource_id)
if resource:
return resource
return None
def dom_get_resource_masterslave(dom, resource_id):
"""
Commandline options: no options
"""
for master in dom.getElementsByTagName("master"):
resource = dom_get_resource(master, resource_id)
if resource:
return resource
return None
# returns tuple (is_valid, error_message, correct_resource_id_if_exists)
# there is a duplicate code in pcs/lib/cib/constraint/constraint.py
# please use function in pcs/lib/cib/constraint/constraint.py
def validate_constraint_resource(dom, resource_id):
"""
Commandline options:
* --force - allow constraint on any resource
"""
resource_el = (
dom_get_clone(dom, resource_id)
or dom_get_master(dom, resource_id)
or dom_get_bundle(dom, resource_id)
)
if resource_el:
# clones, masters and bundles are always valid
return True, "", resource_id
resource_el = dom_get_resource(dom, resource_id) or dom_get_group(
dom, resource_id
)
if not resource_el:
return False, "Resource '%s' does not exist" % resource_id, None
clone_el = dom_get_resource_clone_ms_parent(
dom, resource_id
) or dom_get_resource_bundle_parent(dom, resource_id)
if not clone_el:
# a primitive and a group is valid if not in a clone nor a master nor a
# bundle
return True, "", resource_id
if "--force" in pcs_options:
return True, "", clone_el.getAttribute("id")
if clone_el.tagName in ["clone", "master"]:
return (
False,
"%s is a clone resource, you should use the clone id: %s "
"when adding constraints. Use --force to override."
% (resource_id, clone_el.getAttribute("id")),
clone_el.getAttribute("id"),
)
if clone_el.tagName == "bundle":
return (
False,
"%s is a bundle resource, you should use the bundle id: %s "
"when adding constraints. Use --force to override."
% (resource_id, clone_el.getAttribute("id")),
clone_el.getAttribute("id"),
)
return True, "", resource_id
def dom_get_resource_remote_node_name(dom_resource):
"""
Commandline options: no options
"""
if dom_resource.tagName != "primitive":
return None
if (
dom_resource.getAttribute("class").lower() == "ocf"
and dom_resource.getAttribute("provider").lower() == "pacemaker"
and dom_resource.getAttribute("type").lower() == "remote"
):
return dom_resource.getAttribute("id")
return dom_get_meta_attr_value(dom_resource, "remote-node")
def dom_get_meta_attr_value(dom_resource, meta_name):
"""
Commandline options: no options
"""
for meta in dom_resource.getElementsByTagName("meta_attributes"):
for nvpair in meta.getElementsByTagName("nvpair"):
if nvpair.getAttribute("name") == meta_name:
return nvpair.getAttribute("value")
return None
def dom_get_element_with_id(dom, tag_name, element_id):
"""
Commandline options: no options
"""
for elem in dom.getElementsByTagName(tag_name):
if elem.hasAttribute("id") and elem.getAttribute("id") == element_id:
return elem
return None
def dom_get_node(dom, node_name):
"""
Commandline options: no options
"""
for e in dom.getElementsByTagName("node"):
if e.hasAttribute("uname") and e.getAttribute("uname") == node_name:
return e
return None
def dom_get_children_by_tag_name(dom_el, tag_name):
"""
Commandline options: no options
"""
return [
node
for node in dom_el.childNodes
if node.nodeType == xml.dom.minidom.Node.ELEMENT_NODE
and node.tagName == tag_name
]
def dom_get_parent_by_tag_names(dom_el, tag_names):
"""
Commandline options: no options
"""
parent = dom_el.parentNode
while parent:
if not isinstance(parent, xml.dom.minidom.Element):
return None
if parent.tagName in tag_names:
return parent
parent = parent.parentNode
return None
def dom_attrs_to_list(dom_el, with_id=False):
"""
Commandline options: no options
"""
attributes = [
"%s=%s" % (name, value)
for name, value in sorted(dom_el.attributes.items())
if name != "id"
]
if with_id:
attributes.append("(id:%s)" % (dom_el.getAttribute("id")))
return attributes
# moved to pcs.lib.pacemaker.state
def get_resource_for_running_check(cluster_state, resource_id, stopped=False):
"""
Commandline options: no options
"""
for clone in cluster_state.getElementsByTagName("clone"):
if clone.getAttribute("id") == resource_id:
for child in clone.childNodes:
if child.nodeType == child.ELEMENT_NODE and child.tagName in [
"resource",
"group",
]:
resource_id = child.getAttribute("id")
# in a clone a resource can have an id of '<name>:N'
if ":" in resource_id:
parts = resource_id.rsplit(":", 1)
if parts[1].isdigit():
resource_id = parts[0]
break
for group in cluster_state.getElementsByTagName("group"):
# If resource is a clone it can have an id of '<resource name>:N'
if group.getAttribute("id") == resource_id or group.getAttribute(
"id"
).startswith(resource_id + ":"):
if stopped:
elem = group.getElementsByTagName("resource")[0]
else:
elem = group.getElementsByTagName("resource")[-1]
resource_id = elem.getAttribute("id")
return resource_id
# moved to pcs.lib.pacemaker.state
# see pcs.lib.commands.resource for usage
def resource_running_on(resource, passed_state=None, stopped=False):
"""
Commandline options:
* -f - has effect but doesn't make sense to check state of resource
"""
nodes_started = []
nodes_master = []
nodes_slave = []
state = passed_state if passed_state else getClusterState()
resource_original = resource
resource = get_resource_for_running_check(state, resource, stopped)
resources = state.getElementsByTagName("resource")
for res in resources:
# If resource is a clone it can have an id of '<resource name>:N'
# If resource is a clone it will be found more than once - cannot break
if (
res.getAttribute("id") == resource
or res.getAttribute("id").startswith(resource + ":")
) and res.getAttribute("failed") != "true":
for node in res.getElementsByTagName("node"):
node_name = node.getAttribute("name")
if res.getAttribute("role") == "Started":
nodes_started.append(node_name)
elif res.getAttribute("role") == "Master":
nodes_master.append(node_name)
elif res.getAttribute("role") == "Slave":
nodes_slave.append(node_name)
if not nodes_started and not nodes_master and not nodes_slave:
message = "Resource '%s' is not running on any node" % resource_original
else:
message_parts = []
for alist, label in (
(nodes_started, "running"),
(nodes_master, "master"),
(nodes_slave, "slave"),
):
if alist:
alist.sort()
message_parts.append(
"%s on node%s %s"
% (label, "s" if len(alist) > 1 else "", ", ".join(alist))
)
message = "Resource '%s' is %s." % (
resource_original,
"; ".join(message_parts),
)
return {
"message": message,
"is_running": bool(nodes_started or nodes_master or nodes_slave),
"nodes_started": nodes_started,
"nodes_master": nodes_master,
"nodes_slave": nodes_slave,
}
def validate_wait_get_timeout(need_cib_support=True):
"""
Commandline options:
* --wait
* -f - to check if -f and --wait are not used simultaneously
"""
if need_cib_support and usefile:
err("Cannot use '-f' together with '--wait'")
wait_timeout = pcs_options["--wait"]
if wait_timeout is None:
return wait_timeout
wait_timeout = get_timeout_seconds(wait_timeout)
if wait_timeout is None:
err(
"%s is not a valid number of seconds to wait"
% pcs_options["--wait"]
)
return wait_timeout
# Return matches from the CIB with the xpath_query
def get_cib_xpath(xpath_query):
"""
Commandline options:
* -f - CIB file
"""
args = ["cibadmin", "-Q", "--xpath", xpath_query]
output, retval = run(args)
if retval != 0:
return ""
return output
def get_cib(scope=None):
"""
Commandline options:
* -f - CIB file
"""
command = ["cibadmin", "-l", "-Q"]
if scope:
command.append("--scope=%s" % scope)
output, retval = run(command)
if retval != 0:
if retval == 105 and scope:
err("unable to get cib, scope '%s' not present in cib" % scope)
else:
err("unable to get cib")
return output
def get_cib_dom(cib_xml=None):
"""
Commandline options:
* -f - CIB file
"""
# pylint: disable=bare-except
if cib_xml is None:
cib_xml = get_cib()
try:
dom = parseString(cib_xml)
return dom
except:
return err("unable to get cib")
def get_cib_etree(cib_xml=None):
"""
Commandline options:
* -f - CIB file
"""
# pylint: disable=bare-except
if cib_xml is None:
cib_xml = get_cib()
try:
root = ET.fromstring(cib_xml)
return root
except:
return err("unable to get cib")
def is_etree(var):
"""
Commandline options: no options
"""
return var.__class__ == xml.etree.ElementTree.Element
# Replace only configuration section of cib with dom passed
def replace_cib_configuration(dom):
"""
Commandline options:
* -f - CIB file
"""
if is_etree(dom):
# etree returns string in bytes: b'xml'
# python 3 removed .encode() from byte strings
# run(...) calls subprocess.Popen.communicate which calls encode...
# so there is bytes to str conversion
new_dom = ET.tostring(dom).decode()
elif hasattr(dom, "toxml"):
new_dom = dom.toxml()
else:
new_dom = dom
cmd = ["cibadmin", "--replace", "-V", "--xml-pipe", "-o", "configuration"]
output, retval = run(cmd, False, new_dom)
if retval != 0:
err("Unable to update cib\n" + output)
def is_valid_cib_scope(scope):
"""
Commandline options: no options
"""
return scope in [
"acls",
"alerts",
"configuration",
"constraints",
"crm_config",
"fencing-topology",
"nodes",
"op_defaults",
"resources",
"rsc_defaults",
"tags",
]
# Checks to see if id exists in the xml dom passed
# DEPRECATED use lxml version available in pcs.lib.cib.tools
def does_id_exist(dom, check_id):
"""
Commandline options: no options
"""
# do not search in /cib/status, it may contain references to previously
# existing and deleted resources and thus preventing creating them again
if is_etree(dom):
for elem in dom.findall(
str('(/cib/*[name()!="status"]|/*[name()!="cib"])/*')
):
if elem.get("id") == check_id:
return True
else:
document = (
dom
if isinstance(dom, xml.dom.minidom.Document)
else dom.ownerDocument
)
cib_found = False
for cib in dom_get_children_by_tag_name(document, "cib"):
cib_found = True
for section in cib.childNodes:
if section.nodeType != xml.dom.minidom.Node.ELEMENT_NODE:
continue
if section.tagName == "status":
continue
for elem in section.getElementsByTagName("*"):
if elem.getAttribute("id") == check_id:
return True
if not cib_found:
for elem in document.getElementsByTagName("*"):
if elem.getAttribute("id") == check_id:
return True
return False
# Returns check_id if it doesn't exist in the dom, otherwise it adds an integer
# to the end of the id and increments it until a unique id is found
# DEPRECATED use lxml version available in pcs.lib.cib.tools
def find_unique_id(dom, check_id):
"""
Commandline options: no options
"""
counter = 1
temp_id = check_id
while does_id_exist(dom, temp_id):
temp_id = check_id + "-" + str(counter)
counter += 1
return temp_id
# Checks to see if the specified operation already exists in passed set of
# operations
# pacemaker differentiates between operations only by name and interval
def operation_exists(operations_el, op_el):
"""
Commandline options: no options
"""
existing = []
op_name = op_el.getAttribute("name")
op_interval = get_timeout_seconds(op_el.getAttribute("interval"), True)
for op in operations_el.getElementsByTagName("op"):
if (
op.getAttribute("name") == op_name
and get_timeout_seconds(op.getAttribute("interval"), True)
== op_interval
):
existing.append(op)
return existing
def operation_exists_by_name(operations_el, op_el):
"""
Commandline options: no options
"""
existing = []
op_name = op_el.getAttribute("name")
op_role = op_el.getAttribute("role") or "Started"
ocf_check_level = None
if op_name == "monitor":
ocf_check_level = get_operation_ocf_check_level(op_el)
for op in operations_el.getElementsByTagName("op"):
if op.getAttribute("name") == op_name:
if op_name != "monitor":
existing.append(op)
elif (
op.getAttribute("role") or "Started"
) == op_role and ocf_check_level == get_operation_ocf_check_level(
op
):
existing.append(op)
return existing
def get_operation_ocf_check_level(operation_el):
"""
Commandline options: no options
"""
for attr_el in operation_el.getElementsByTagName("instance_attributes"):
for nvpair_el in attr_el.getElementsByTagName("nvpair"):
if nvpair_el.getAttribute("name") == "OCF_CHECK_LEVEL":
return nvpair_el.getAttribute("value")
return None
def get_node_attributes(filter_node=None, filter_attr=None):
"""
Commandline options:
* -f - CIB file
"""
node_config = get_cib_xpath("//nodes")
if node_config == "":
err("unable to get crm_config, is pacemaker running?")
dom = parseString(node_config).documentElement
nas = dict()
for node in dom.getElementsByTagName("node"):
nodename = node.getAttribute("uname")
if filter_node is not None and nodename != filter_node:
continue
for attributes in node.getElementsByTagName("instance_attributes"):
for nvp in attributes.getElementsByTagName("nvpair"):
attr_name = nvp.getAttribute("name")
if filter_attr is not None and attr_name != filter_attr:
continue
if nodename not in nas:
nas[nodename] = dict()
nas[nodename][attr_name] = nvp.getAttribute("value")
# Use just first element of attributes. We don't support
# attributes with rules just yet.
break
return nas
def set_node_attribute(prop, value, node):
"""
Commandline options:
* -f - CIB file
* --force - no error if attribute to delete doesn't exist
"""
if value == "":
o, r = run(
[
"crm_attribute",
"-t",
"nodes",
"--node",
node,
"--name",
prop,
"--query",
]
)
if r != 0 and "--force" not in pcs_options:
err(
"attribute: '%s' doesn't exist for node: '%s'" % (prop, node),
False,
)
# This return code is used by pcsd
sys.exit(2)
o, r = run(
[
"crm_attribute",
"-t",
"nodes",
"--node",
node,
"--name",
prop,
"--delete",
]
)
else:
o, r = run(
[
"crm_attribute",
"-t",
"nodes",
"--node",
node,
"--name",
prop,
"--update",
value,
]
)
if r != 0:
err("unable to set attribute %s\n%s" % (prop, o))
# If the property exists, remove it and replace it with the new property
# If the value is blank, then we just remove it
def set_cib_property(prop, value, cib_dom=None):
"""
Commandline options:
* -f - CIB file
* --force - no error when removing non existing property
"""
update_cib = cib_dom is None
if update_cib:
crm_config = get_cib_xpath("//crm_config")
if crm_config == "":
err("unable to get crm_config, is pacemaker running?")
crm_config = parseString(crm_config).documentElement
else:
document = cib_dom.getElementsByTagName("crm_config")
if not document:
err("unable to get crm_config, is pacemaker running?")
crm_config = document[0]
property_found = False
cluster_property_set = dom_prepare_child_element(
crm_config, "cluster_property_set", "cib-bootstrap-options"
)
for child in cluster_property_set.getElementsByTagName("nvpair"):
if child.getAttribute("name") == prop:
property_found = True
break
if not property_found and value == "" and "--force" not in pcs_options:
err("can't remove property: '{0}' that doesn't exist".format(prop))
dom_update_nv_pair(
cluster_property_set, prop, value, "cib-bootstrap-options-"
)
if update_cib:
replace_cib_configuration(crm_config)
def getTerminalSize(fd=1):
"""
Returns height and width of current terminal. First tries to get
size via termios.TIOCGWINSZ, then from environment. Defaults to 25
lines x 80 columns if both methods fail.
:param fd: file descriptor (default: 1=stdout)
Commandline options: no options
"""
# pylint: disable=bare-except
try:
# pylint: disable=import-outside-toplevel
import fcntl
import termios
import struct
hw = struct.unpack(
str("hh"), fcntl.ioctl(fd, termios.TIOCGWINSZ, "1234")
)
except:
try:
hw = (os.environ["LINES"], os.environ["COLUMNS"])
except:
hw = (25, 80)
return hw
def get_terminal_input(message=None):
"""
Commandline options: no options
"""
if message:
sys.stdout.write(message)
sys.stdout.flush()
try:
return input("")
except EOFError:
return ""
except KeyboardInterrupt:
print("Interrupted")
sys.exit(1)
def get_terminal_password(message="Password: "):
"""
Commandline options: no options
"""
if sys.stdin.isatty():
try:
return getpass.getpass(message)
except KeyboardInterrupt:
print("Interrupted")
sys.exit(1)
else:
return get_terminal_input(message)
# Returns an xml dom containing the current status of the cluster
# DEPRECATED, please use
# ClusterState(lib.pacemaker.live.get_cluster_status_dom()) instead
def getClusterState():
"""
Commandline options:
* -f - CIB file
"""
output, returncode = run(["crm_mon", "--help-all"])
format_option = (
"--output-as=xml" if "--output-as=" in output else "--as-xml"
)
xml_string, returncode = run(
["crm_mon", "--one-shot", format_option, "--inactive"],
ignore_stderr=True,
)
if returncode != 0:
err("error running crm_mon, is pacemaker running?")
return parseString(xml_string)
# DEPRECATED
# This should be all handle in pcs.lib. Currently, only pcs.config.config_show
# uses this, as it it still legacy architecture code.
def getClusterName():
"""
Commandline options:
* -f - CIB file if there is no corosync.conf
* --corosync_conf - path to a mocked corosync.conf is set directly to
settings
"""
try:
with open(settings.corosync_conf_file, "rb") as f:
conf = corosync_conf_facade(
corosync_conf_parser.Parser.parse(f.read())
)
cluster_name = conf.get_cluster_name()
if cluster_name:
return cluster_name
except (IOError, corosync_conf_parser.CorosyncConfParserException):
pass
# there is no corosync.conf on remote nodes, we can try to
# get cluster name from pacemaker
# pylint: disable=bare-except
try:
return get_set_properties("cluster-name")["cluster-name"]
except:
# we need to catch SystemExit (from utils.err), parse errors and so on
pass
return ""
def write_empty_cib(cibfile):
"""
Commandline options: no options
"""
empty_xml = """<?xml version="1.0" encoding="UTF-8"?>
<cib admin_epoch="0" epoch="1" num_updates="1" validate-with="pacemaker-1.2">
<configuration>
<crm_config/>
<nodes/>
<resources/>
<constraints/>
</configuration>
<status/>
</cib>"""
with open(cibfile, "w") as f:
f.write(empty_xml)
# Test if 'var' is a score or option (contains an '=')
def is_score_or_opt(var):
"""
Commandline options: no options
"""
if is_score(var):
return True
if var.find("=") != -1:
return True
return False
def is_score(var):
"""
Commandline options: no options
"""
return is_score_value(var)
def validate_xml_id(var: str, description: str = "id") -> Tuple[bool, str]:
"""
Commandline options: no options
"""
report_list: ReportItemList = []
validate_id(var, description, report_list)
if report_list:
return False, report_list[0].message.message
return True, ""
# deprecated, moved to pcs.lib.pacemaker.live
def is_iso8601_date(var):
"""
Commandline options: no options
"""
# using pacemaker tool to check if a value is a valid pacemaker iso8601 date
dummy_output, retVal = run(["iso8601", "-d", var])
return retVal == 0
def err(errorText, exit_after_error=True):
sys.stderr.write("Error: %s\n" % errorText)
if exit_after_error:
sys.exit(1)
@lru_cache(typed=True)
def get_service_manager() -> ServiceManagerInterface:
return _get_service_manager(cmd_runner(), get_report_processor())
def enableServices():
"""
Commandline options: no options
"""
# do NOT handle SBD in here, it is started by pacemaker not systemd or init
service_list = ["corosync", "pacemaker"]
if need_to_handle_qdevice_service():
service_list.append("corosync-qdevice")
service_manager = get_service_manager()
report_item_list = []
for service in service_list:
try:
service_manager.enable(service)
except ManageServiceError as e:
report_item_list.append(service_exception_to_report(e))
if report_item_list:
raise LibraryError(*report_item_list)
def disableServices():
"""
Commandline options: no options
"""
# do NOT handle SBD in here, it is started by pacemaker not systemd or init
service_list = ["corosync", "pacemaker"]
if need_to_handle_qdevice_service():
service_list.append("corosync-qdevice")
service_manager = get_service_manager()
report_item_list = []
for service in service_list:
try:
service_manager.disable(service)
except ManageServiceError as e:
report_item_list.append(service_exception_to_report(e))
if report_item_list:
raise LibraryError(*report_item_list)
def start_service(service):
"""
Commandline options: no options
"""
service_manager = get_service_manager()
try:
service_manager.start(service)
except ManageServiceError as e:
raise LibraryError(service_exception_to_report(e)) from e
def stop_service(service):
"""
Commandline options: no options
"""
service_manager = get_service_manager()
try:
service_manager.stop(service)
except ManageServiceError as e:
raise LibraryError(service_exception_to_report(e)) from e
def write_file(path, data, permissions=0o644, binary=False):
"""
Commandline options:
* --force - overwrite a file if it already exists
"""
if os.path.exists(path):
if "--force" not in pcs_options:
return False, "'%s' already exists, use --force to overwrite" % path
try:
os.remove(path)
except EnvironmentError as e:
return False, "unable to remove '%s': %s" % (path, e)
mode = "wb" if binary else "w"
try:
with os.fdopen(
os.open(path, os.O_WRONLY | os.O_CREAT, permissions), mode
) as outfile:
outfile.write(data)
except EnvironmentError as e:
return False, "unable to write to '%s': %s" % (path, e)
return True, ""
def tar_add_file_data(
tarball,
data,
name,
mode=None,
uid=None,
gid=None,
uname=None,
gname=None,
mtime=None,
):
# pylint: disable=too-many-arguments
"""
Commandline options: no options
"""
info = tarfile.TarInfo(name)
info.size = len(data)
info.type = tarfile.REGTYPE
info.mtime = int(time.time()) if mtime is None else mtime
if mode is not None:
info.mode = mode
if uid is not None:
info.uid = uid
if gid is not None:
info.gid = gid
if uname is not None:
info.uname = uname
if gname is not None:
info.gname = gname
data_io = BytesIO(data)
tarball.addfile(info, data_io)
data_io.close()
# DEPRECATED, please use pcs.lib.pacemaker.live.simulate_cib
def simulate_cib(cib_dom):
"""
Commandline options: no options
"""
try:
with tempfile.NamedTemporaryFile(
mode="w+", suffix=".pcs"
) as new_cib_file, tempfile.NamedTemporaryFile(
mode="w+", suffix=".pcs"
) as transitions_file:
output, retval = run(
[
"crm_simulate",
"--simulate",
"--save-output",
new_cib_file.name,
"--save-graph",
transitions_file.name,
"--xml-pipe",
],
string_for_stdin=cib_dom.toxml(),
)
if retval != 0:
return err("Unable to run crm_simulate:\n%s" % output)
new_cib_file.seek(0)
transitions_file.seek(0)
return (
output,
parseString(transitions_file.read()),
parseString(new_cib_file.read()),
)
except (EnvironmentError, xml.parsers.expat.ExpatError) as e:
return err("Unable to run crm_simulate:\n%s" % e)
except xml.etree.ElementTree.ParseError as e:
return err("Unable to run crm_simulate:\n%s" % e)
# DEPRECATED
# please use pcs.lib.pacemaker.simulate.get_operations_from_transitions
def get_operations_from_transitions(transitions_dom):
"""
Commandline options: no options
"""
operation_list = []
watched_operations = (
"start",
"stop",
"promote",
"demote",
"migrate_from",
"migrate_to",
)
for rsc_op in transitions_dom.getElementsByTagName("rsc_op"):
primitives = rsc_op.getElementsByTagName("primitive")
if not primitives:
continue
if rsc_op.getAttribute("operation").lower() not in watched_operations:
continue
for prim in primitives:
prim_id = prim.getAttribute("id")
operation_list.append(
(
int(rsc_op.getAttribute("id")),
{
"id": prim_id,
"long_id": prim.getAttribute("long-id") or prim_id,
"operation": rsc_op.getAttribute("operation").lower(),
"on_node": rsc_op.getAttribute("on_node"),
},
)
)
operation_list.sort(key=lambda x: x[0])
op_list = [op[1] for op in operation_list]
return op_list
def get_resources_location_from_operations(cib_dom, resources_operations):
"""
Commandline options:
* --force - allow constraints on any resource, may not have any effect as
an invalid constraint is ignored anyway
"""
locations = {}
for res_op in resources_operations:
operation = res_op["operation"]
if operation not in ("start", "promote", "migrate_from"):
continue
long_id = res_op["long_id"]
if long_id not in locations:
# Move clone instances as if they were non-cloned resources, it
# really works with current pacemaker (1.1.13-6). Otherwise there
# is probably no way to move them other then setting their
# stickiness to 0.
res_id = res_op["id"]
if ":" in res_id:
res_id = res_id.split(":")[0]
id_for_constraint = validate_constraint_resource(cib_dom, res_id)[2]
if not id_for_constraint:
continue
locations[long_id] = {
"id": res_op["id"],
"long_id": long_id,
"id_for_constraint": id_for_constraint,
}
if operation in ("start", "migrate_from"):
locations[long_id]["start_on_node"] = res_op["on_node"]
if operation == "promote":
locations[long_id]["promote_on_node"] = res_op["on_node"]
locations_clean = {
key: val
for key, val in locations.items()
if "start_on_node" in val or "promote_on_node" in val
}
return locations_clean
def get_remote_quorumtool_output(node):
"""
Commandline options:
* --request-timeout - timeout for HTTP requests
"""
return sendHTTPRequest(node, "remote/get_quorum_info", None, False, False)
# return True if quorumtool_output is a string returned when the node is off
def is_node_offline_by_quorumtool_output(quorum_info):
"""
Commandline options: no options
"""
return quorum_info.strip() == "Cannot initialize CMAP service"
def dom_prepare_child_element(dom_element, tag_name, id_candidate):
"""
Commandline options: no options
"""
child_elements = []
for child in dom_element.childNodes:
if child.nodeType == child.ELEMENT_NODE and child.tagName == tag_name:
child_elements.append(child)
if not child_elements:
dom = dom_element.ownerDocument
child_element = dom.createElement(tag_name)
child_element.setAttribute("id", find_unique_id(dom, id_candidate))
dom_element.appendChild(child_element)
else:
child_element = child_elements[0]
return child_element
def dom_update_nvset(dom_element, nvpair_tuples, tag_name, id_candidate):
"""
Commandline options: no options
"""
# Already ported to pcs.libcib.nvpair
# Do not ever remove the nvset element, even if it is empty. There may be
# ACLs set in pacemaker which allow "write" for nvpairs (adding, changing
# and removing) but not nvsets. In such a case, removing the nvset would
# cause the whole change to be rejected by pacemaker with a "permission
# denied" message.
# https://bugzilla.redhat.com/show_bug.cgi?id=1642514
if not nvpair_tuples:
return
only_removing = True
for name, value in nvpair_tuples:
if value != "":
only_removing = False
break
# Do not use dom.getElementsByTagName, that would get elements we do not
# want to. For example if dom_element is a clone, we would get the clones's
# as well as clone's primitive's attributes.
nvset_element_list = dom_get_children_by_tag_name(dom_element, tag_name)
# Do not create new nvset if we are only removing values from it.
if not nvset_element_list and only_removing:
return
if not nvset_element_list:
dom = dom_element.ownerDocument
nvset_element = dom.createElement(tag_name)
nvset_element.setAttribute("id", find_unique_id(dom, id_candidate))
dom_element.appendChild(nvset_element)
else:
nvset_element = nvset_element_list[0]
for name, value in nvpair_tuples:
dom_update_nv_pair(
nvset_element, name, value, nvset_element.getAttribute("id") + "-"
)
def dom_update_nv_pair(dom_element, name, value, id_prefix=""):
"""
Commandline options: no options
"""
# Do not ever remove the nvset element, even if it is empty. There may be
# ACLs set in pacemaker which allow "write" for nvpairs (adding, changing
# and removing) but not nvsets. In such a case, removing the nvset would
# cause the whole change to be rejected by pacemaker with a "permission
# denied" message.
# https://bugzilla.redhat.com/show_bug.cgi?id=1642514
dom = dom_element.ownerDocument
element_found = False
for el in dom_element.getElementsByTagName("nvpair"):
if el.getAttribute("name") == name:
element_found = True
if value == "":
dom_element.removeChild(el)
else:
el.setAttribute("value", value)
break
if not element_found and value != "":
el = dom.createElement("nvpair")
el.setAttribute("id", id_prefix + name)
el.setAttribute("name", name)
el.setAttribute("value", value)
dom_element.appendChild(el)
return dom_element
# Passed an array of strings ["a=b","c=d"], return array of tuples
# [("a","b"),("c","d")]
def convert_args_to_tuples(ra_values):
"""
Commandline options: no options
"""
ret = []
for ra_val in ra_values:
if ra_val.count("=") != 0:
split_val = ra_val.split("=", 1)
ret.append((split_val[0], split_val[1]))
return ret
def is_int(val):
try:
int(val)
return True
except ValueError:
return False
def dom_update_utilization(dom_element, attributes, id_prefix=""):
"""
Commandline options: no options
"""
attr_tuples = []
for name, value in sorted(attributes.items()):
if value != "" and not is_int(value):
err(
"Value of utilization attribute must be integer: "
"'{0}={1}'".format(name, value)
)
attr_tuples.append((name, value))
dom_update_nvset(
dom_element,
attr_tuples,
"utilization",
id_prefix + dom_element.getAttribute("id") + "-utilization",
)
def dom_update_meta_attr(dom_element, attributes):
"""
Commandline options: no options
"""
dom_update_nvset(
dom_element,
attributes,
"meta_attributes",
dom_element.getAttribute("id") + "-meta_attributes",
)
def dom_update_instance_attr(dom_element, attributes):
"""
Commandline options: no options
"""
dom_update_nvset(
dom_element,
attributes,
"instance_attributes",
dom_element.getAttribute("id") + "-instance_attributes",
)
def get_utilization(element, filter_name=None):
"""
Commandline options: no options
"""
utilization = {}
for e in element.getElementsByTagName("utilization"):
for u in e.getElementsByTagName("nvpair"):
name = u.getAttribute("name")
if filter_name is not None and name != filter_name:
continue
utilization[name] = u.getAttribute("value")
# Use just first element of utilization attributes. We don't support
# utilization with rules just yet.
break
return utilization
def get_utilization_str(element, filter_name=None):
"""
Commandline options: no options
"""
output = []
for name, value in sorted(get_utilization(element, filter_name).items()):
output.append(name + "=" + value)
return " ".join(output)
def is_valid_cluster_property(prop_def_dict, property_name, value):
"""
Commandline options: no options
"""
if property_name not in prop_def_dict:
raise UnknownPropertyException(
"unknown cluster property: '{0}'".format(property_name)
)
return is_valid_cib_value(
prop_def_dict[property_name]["type"],
value,
prop_def_dict[property_name].get("enum", []),
)
def is_valid_cib_value(value_type, value, enum_options=()):
"""
Commandline options: no options
"""
value_type = value_type.lower()
if value_type == "enum":
return value in enum_options
if value_type == "boolean":
return is_boolean(value)
if value_type == "integer":
return is_score(value)
if value_type == "time":
return get_timeout_seconds(value) is not None
return True
def get_cluster_properties_definition():
"""
Commandline options: no options
"""
# we don't want to change these properties
banned_props = ["dc-version", "cluster-infrastructure"]
basic_props = [
"batch-limit",
"no-quorum-policy",
"symmetric-cluster",
"enable-acl",
"stonith-enabled",
"stonith-action",
"pe-input-series-max",
"stop-orphan-resources",
"stop-orphan-actions",
"cluster-delay",
"start-failure-is-fatal",
"pe-error-series-max",
"pe-warn-series-max",
]
readable_names = {
"batch-limit": "Batch Limit",
"no-quorum-policy": "No Quorum Policy",
"symmetric-cluster": "Symmetric",
"stonith-enabled": "Stonith Enabled",
"stonith-action": "Stonith Action",
"cluster-delay": "Cluster Delay",
"stop-orphan-resources": "Stop Orphan Resources",
"stop-orphan-actions": "Stop Orphan Actions",
"start-failure-is-fatal": "Start Failure is Fatal",
"pe-error-series-max": "PE Error Storage",
"pe-warn-series-max": "PE Warning Storage",
"pe-input-series-max": "PE Input Storage",
"enable-acl": "Enable ACLs",
}
sources = [
{
"name": "pacemaker-schedulerd",
"path": settings.pacemaker_schedulerd,
},
{
"name": "pacemaker-controld",
"path": settings.pacemaker_controld,
},
{
"name": "pacemaker-based",
"path": settings.pacemaker_based,
},
]
definition = {}
for source in sources:
stdout, stderr, retval = cmd_runner().run([source["path"], "metadata"])
if retval != 0:
err("unable to run {0}\n{1}".format(source["name"], stderr))
try:
etree = ET.fromstring(stdout)
for e in etree.findall("./parameters/parameter"):
prop = get_cluster_property_from_xml(e)
if prop["name"] not in banned_props:
prop["source"] = source["name"]
prop["advanced"] = prop["name"] not in basic_props
if prop["name"] in readable_names:
prop["readable_name"] = readable_names[prop["name"]]
else:
prop["readable_name"] = prop["name"]
definition[prop["name"]] = prop
except xml.parsers.expat.ExpatError as e:
err(
"unable to parse {0} metadata definition: {1}".format(
source["name"], e
)
)
except ET.ParseError as e:
err(
"unable to parse {0} metadata definition: {1}".format(
source["name"], e
)
)
return definition
def get_cluster_property_from_xml(etree_el):
"""
Commandline options: no options
"""
prop = {
"name": etree_el.get("name", ""),
"shortdesc": "",
"longdesc": "",
}
for item in ["shortdesc", "longdesc"]:
item_el = etree_el.find(item)
if item_el is not None and item_el.text is not None:
prop[item] = item_el.text
content = etree_el.find("content")
if content is None:
prop["type"] = ""
prop["default"] = ""
else:
prop["type"] = content.get("type", "")
prop["default"] = content.get("default", "")
if prop["type"] == "enum":
prop["enum"] = []
if prop["longdesc"]:
values = prop["longdesc"].split(" Allowed values: ")
if len(values) == 2:
prop["enum"] = values[1].split(", ")
prop["longdesc"] = values[0]
if prop["default"] not in prop["enum"]:
prop["enum"].append(prop["default"])
if prop["longdesc"] == prop["shortdesc"]:
prop["longdesc"] = ""
return prop
def get_lib_env() -> LibraryEnvironment:
"""
Commandline options:
* -f - CIB file
* --corosync_conf - corosync.conf file
* --request-timeout - timeout of HTTP requests
"""
user = None
groups = None
if os.geteuid() == 0:
for name in ("CIB_user", "CIB_user_groups"):
if name in os.environ and os.environ[name].strip():
value = os.environ[name].strip()
if name == "CIB_user":
user = value
else:
groups = value.split(" ")
cib_data = None
if usefile:
cib_data = get_cib()
corosync_conf_data = None
if "--corosync_conf" in pcs_options:
conf = pcs_options["--corosync_conf"]
try:
corosync_conf_data = open(conf).read()
except IOError as e:
err("Unable to read %s: %s" % (conf, e.strerror))
return LibraryEnvironment(
logging.getLogger("pcs"),
get_report_processor(),
user,
groups,
cib_data,
corosync_conf_data,
known_hosts_getter=read_known_hosts_file,
request_timeout=pcs_options.get("--request-timeout"),
)
def get_cib_user_groups():
"""
Commandline options: no options
"""
user = None
groups = None
if os.geteuid() == 0:
for name in ("CIB_user", "CIB_user_groups"):
if name in os.environ and os.environ[name].strip():
value = os.environ[name].strip()
if name == "CIB_user":
user = value
else:
groups = value.split(" ")
return user, groups
def get_cli_env():
"""
Commandline options:
* --debug
* --request-timeout
"""
env = Env()
env.user, env.groups = get_cib_user_groups()
env.known_hosts_getter = read_known_hosts_file
env.report_processor = get_report_processor()
env.request_timeout = pcs_options.get("--request-timeout")
return env
def get_middleware_factory():
"""
Commandline options:
* --corosync_conf
* --name
* --booth-conf
* --booth-key
* -f
"""
return middleware.create_middleware_factory(
cib=middleware.cib(filename if usefile else None, touch_cib_file),
corosync_conf_existing=middleware.corosync_conf_existing(
pcs_options.get("--corosync_conf", None)
),
booth_conf=pcs.cli.booth.env.middleware_config(
pcs_options.get("--booth-conf", None),
pcs_options.get("--booth-key", None),
),
)
def get_library_wrapper():
"""
Commandline options:
* --debug
* --request-timeout
* --corosync_conf
* --name
* --booth-conf
* --booth-key
* -f
NOTE: usage of options may depend on used middleware for particular command
"""
return Library(get_cli_env(), get_middleware_factory())
def exit_on_cmdline_input_errror(
error: CmdLineInputError,
main_name: str,
usage_name: Sequence[str],
) -> None:
if not error or (not error.message or error.show_both_usage_and_message):
usage.show(main_name, usage_name)
if error and error.message:
err(error.message, exit_after_error=False)
if error and error.hint:
sys.stderr.write("Hint: {0}\n".format(error.hint))
sys.exit(1)
def get_report_processor() -> ReportProcessor:
return ReportProcessorToConsole(debug=("--debug" in pcs_options))
def get_set_properties(prop_name=None, defaults=None):
"""
Commandline options:
* -f - CIB file
"""
properties = {} if defaults is None else dict(defaults)
(output, retVal) = run(["cibadmin", "-Q", "--scope", "crm_config"])
if retVal != 0:
err("unable to get crm_config\n" + output)
dom = parseString(output)
de = dom.documentElement
crm_config_properties = de.getElementsByTagName("nvpair")
for prop in crm_config_properties:
if prop_name is None or (prop_name == prop.getAttribute("name")):
properties[prop.getAttribute("name")] = prop.getAttribute("value")
return properties
def get_user_and_pass():
"""
Commandline options:
* -u - username
* -p - password
"""
username = (
pcs_options["-u"]
if "-u" in pcs_options
else get_terminal_input("Username: ")
)
password = (
pcs_options["-p"] if "-p" in pcs_options else get_terminal_password()
)
return username, password
def get_input_modifiers():
return InputModifiers(pcs_options)
def get_token_from_file(file_name: str) -> str:
try:
with open(file_name, "rb") as file:
max_size = settings.pcsd_token_max_bytes # type: ignore
value_bytes = file.read(max_size + 1)
if len(value_bytes) > max_size:
err(f"Maximal token size of {max_size} bytes exceeded")
if not value_bytes:
err(f"File '{file_name}' is empty")
return base64.b64encode(value_bytes).decode("utf-8")
except OSError as e:
err(f"Unable to read file '{file_name}': {e}", exit_after_error=False)
raise SystemExit(1) from e
| gpl-2.0 |
woodpecker1/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/common/memoized.py | 211 | 2482 | # Copyright (c) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Python does not (yet) seem to provide automatic memoization. So we've
# written a small decorator to do so.
import functools
class memoized(object):
def __init__(self, function):
self._function = function
self._results_cache = {}
def __call__(self, *args):
try:
return self._results_cache[args]
except KeyError:
# If we didn't find the args in our cache, call and save the results.
result = self._function(*args)
self._results_cache[args] = result
return result
# FIXME: We may need to handle TypeError here in the case
# that "args" is not a valid dictionary key.
# Use python "descriptor" protocol __get__ to appear
# invisible during property access.
def __get__(self, instance, owner):
# Return a function partial with obj already bound as self.
return functools.partial(self.__call__, instance)
| bsd-3-clause |
MycChiu/tensorflow | tensorflow/contrib/tensor_forest/python/kernel_tests/sample_inputs_op_test.py | 56 | 6058 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.contrib.tensor_forest.ops.sample_inputs_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.tensor_forest.python.ops import data_ops
from tensorflow.contrib.tensor_forest.python.ops import tensor_forest_ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
class SampleInputsTest(test_util.TensorFlowTestCase):
def setUp(self):
self.input_data = [[-1., 10.], [-10., 2.], # node 1
[20., 50.], [1., -2.]] # node 2
self.node_map = [-1, 0, 1]
self.leaves = [1, 1, 2, 2]
self.split_features = [[-1, -1, -1], [1, 0, -1], [-1, -1, -1]]
self.split_thresholds = [[0., 0., 0.], [5., -2., 0.], [0., 0., 0.]]
spec_proto = data_ops.TensorForestDataSpec()
f1 = spec_proto.dense.add()
f1.name = 'f1'
f1.original_type = data_ops.DATA_FLOAT
f1.size = 1
f2 = spec_proto.dense.add()
f2.name = 'f2'
f2.original_type = data_ops.DATA_FLOAT
f2.size = 1
spec_proto.dense_features_size = 2
self.data_spec = spec_proto.SerializeToString()
def testSimple(self):
with self.test_session():
variables.global_variables_initializer().run()
(indices, feature_updates,
threshold_updates) = (tensor_forest_ops.sample_inputs(
self.input_data, [], [], [], [],
self.node_map,
self.leaves,
self.split_features,
self.split_thresholds,
split_initializations_per_input=1,
input_spec=self.data_spec,
split_sampling_random_seed=2))
self.assertAllEqual([1, 0], indices.eval())
self.assertAllEqual([[1, 0, 1], [1, 1, -1]], feature_updates.eval())
self.assertAllEqual([[5., -2., 50.], [10., 2., 0.]],
threshold_updates.eval())
def testSparse(self):
sparse_shape = [4, 10]
sparse_indices = [[0, 0], [0, 4], [0, 9],
[1, 0], [1, 7],
[2, 0],
[3, 1], [3, 4]]
sparse_values = [3.0, -1.0, 0.5,
1.5, 6.0,
-2.0,
-0.5, 2.0]
spec_proto = data_ops.TensorForestDataSpec()
f1 = spec_proto.sparse.add()
f1.name = 'f1'
f1.original_type = data_ops.DATA_FLOAT
f1.size = -1
spec_proto.dense_features_size = 0
data_spec = spec_proto.SerializeToString()
with self.test_session():
variables.global_variables_initializer().run()
(indices, feature_updates,
threshold_updates) = (tensor_forest_ops.sample_inputs(
[],
sparse_indices,
sparse_values,
sparse_shape, [],
self.node_map,
self.leaves,
self.split_features,
self.split_thresholds,
input_spec=data_spec,
split_initializations_per_input=1,
split_sampling_random_seed=3))
self.assertAllEqual([1, 0], indices.eval())
self.assertAllEqual([[1, 0, 0], [4, 0, -1]], feature_updates.eval())
self.assertAllEqual([[5., -2., -2.], [-1., 1.5, 0.]],
threshold_updates.eval())
def testWeights(self):
with self.test_session():
variables.global_variables_initializer().run()
(indices, feature_updates,
threshold_updates) = (tensor_forest_ops.sample_inputs(
self.input_data, [], [], [], [0.5, 0.1, 0.8, 0.7],
self.node_map,
self.leaves,
self.split_features,
self.split_thresholds,
input_spec=self.data_spec,
split_initializations_per_input=1,
split_sampling_random_seed=3))
self.assertAllEqual([1, 0], indices.eval())
self.assertAllEqual([[1, 0, 0], [-1, -1, -1]], feature_updates.eval())
self.assertAllEqual([[5., -2., 20.], [0., 0., 0.]],
threshold_updates.eval())
def testNoAccumulators(self):
with self.test_session():
variables.global_variables_initializer().run()
(indices, feature_updates,
threshold_updates) = (tensor_forest_ops.sample_inputs(
self.input_data, [], [], [], [], [-1] * 3,
self.leaves,
self.split_features,
self.split_thresholds,
input_spec=self.data_spec,
split_initializations_per_input=1,
split_sampling_random_seed=3))
self.assertAllEqual([], indices.eval())
self.assertAllEqual((0, 3), feature_updates.eval().shape)
self.assertAllEqual((0, 3), threshold_updates.eval().shape)
def testBadInput(self):
del self.split_features[1]
with self.test_session():
variables.global_variables_initializer().run()
with self.assertRaisesOpError(
'split_features and split_thresholds should be the same shape.'):
indices, _, _ = tensor_forest_ops.sample_inputs(
self.input_data, [], [], [], [],
self.node_map,
self.leaves,
self.split_features,
self.split_thresholds,
input_spec=self.data_spec,
split_initializations_per_input=1,
split_sampling_random_seed=3)
self.assertAllEqual([], indices.eval())
if __name__ == '__main__':
googletest.main()
| apache-2.0 |
loveward/yingsuo | shadowsocks/obfsplugin/http_simple.py | 4 | 12316 | #!/usr/bin/env python
#
# Copyright 2015-2015 breakwa11
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import os
import sys
import hashlib
import logging
import binascii
import struct
import base64
import datetime
import random
from shadowsocks import common
from shadowsocks.obfsplugin import plain
from shadowsocks.common import to_bytes, to_str, ord, chr
def create_http_simple_obfs(method):
return http_simple(method)
def create_http_post_obfs(method):
return http_post(method)
def create_random_head_obfs(method):
return random_head(method)
obfs_map = {
'http_simple': (create_http_simple_obfs,),
'http_simple_compatible': (create_http_simple_obfs,),
'http_post': (create_http_post_obfs,),
'http_post_compatible': (create_http_post_obfs,),
'random_head': (create_random_head_obfs,),
'random_head_compatible': (create_random_head_obfs,),
}
def match_begin(str1, str2):
if len(str1) >= len(str2):
if str1[:len(str2)] == str2:
return True
return False
class http_simple(plain.plain):
def __init__(self, method):
self.method = method
self.has_sent_header = False
self.has_recv_header = False
self.host = None
self.port = 0
self.recv_buffer = b''
self.user_agent = [b"Mozilla/5.0 (Windows NT 6.3; WOW64; rv:40.0) Gecko/20100101 Firefox/40.0",
b"Mozilla/5.0 (Windows NT 6.3; WOW64; rv:40.0) Gecko/20100101 Firefox/44.0",
b"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36",
b"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.11 (KHTML, like Gecko) Ubuntu/11.10 Chromium/27.0.1453.93 Chrome/27.0.1453.93 Safari/537.36",
b"Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:35.0) Gecko/20100101 Firefox/35.0",
b"Mozilla/5.0 (compatible; WOW64; MSIE 10.0; Windows NT 6.2)",
b"Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27",
b"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.3; Trident/7.0; .NET4.0E; .NET4.0C)",
b"Mozilla/5.0 (Windows NT 6.3; Trident/7.0; rv:11.0) like Gecko",
b"Mozilla/5.0 (Linux; Android 4.4; Nexus 5 Build/BuildID) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/30.0.0.0 Mobile Safari/537.36",
b"Mozilla/5.0 (iPad; CPU OS 5_0 like Mac OS X) AppleWebKit/534.46 (KHTML, like Gecko) Version/5.1 Mobile/9A334 Safari/7534.48.3",
b"Mozilla/5.0 (iPhone; CPU iPhone OS 5_0 like Mac OS X) AppleWebKit/534.46 (KHTML, like Gecko) Version/5.1 Mobile/9A334 Safari/7534.48.3"]
def encode_head(self, buf):
hexstr = binascii.hexlify(buf)
chs = []
for i in range(0, len(hexstr), 2):
chs.append(b"%" + hexstr[i:i+2])
return b''.join(chs)
def client_encode(self, buf):
if self.has_sent_header:
return buf
head_size = len(self.server_info.iv) + self.server_info.head_len
if len(buf) - head_size > 64:
headlen = head_size + random.randint(0, 64)
else:
headlen = len(buf)
headdata = buf[:headlen]
buf = buf[headlen:]
port = b''
if self.server_info.port != 80:
port = b':' + to_bytes(str(self.server_info.port))
body = None
hosts = (self.server_info.obfs_param or self.server_info.host)
pos = hosts.find("#")
if pos >= 0:
body = hosts[pos + 1:].replace("\n", "\r\n")
body = body.replace("\\n", "\r\n")
hosts = hosts[:pos]
hosts = hosts.split(',')
host = random.choice(hosts)
http_head = b"GET /" + self.encode_head(headdata) + b" HTTP/1.1\r\n"
http_head += b"Host: " + to_bytes(host) + port + b"\r\n"
if body:
http_head += body + "\r\n\r\n"
else:
http_head += b"User-Agent: " + random.choice(self.user_agent) + b"\r\n"
http_head += b"Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\r\nAccept-Language: en-US,en;q=0.8\r\nAccept-Encoding: gzip, deflate\r\nDNT: 1\r\nConnection: keep-alive\r\n\r\n"
self.has_sent_header = True
return http_head + buf
def client_decode(self, buf):
if self.has_recv_header:
return (buf, False)
pos = buf.find(b'\r\n\r\n')
if pos >= 0:
self.has_recv_header = True
return (buf[pos + 4:], False)
else:
return (b'', False)
def server_encode(self, buf):
if self.has_sent_header:
return buf
header = b'HTTP/1.1 200 OK\r\nConnection: keep-alive\r\nContent-Encoding: gzip\r\nContent-Type: text/html\r\nDate: '
header += to_bytes(datetime.datetime.now().strftime('%a, %d %b %Y %H:%M:%S GMT'))
header += b'\r\nServer: nginx\r\nVary: Accept-Encoding\r\n\r\n'
self.has_sent_header = True
return header + buf
def get_data_from_http_header(self, buf):
ret_buf = b''
lines = buf.split(b'\r\n')
if lines and len(lines) > 1:
hex_items = lines[0].split(b'%')
if hex_items and len(hex_items) > 1:
for index in range(1, len(hex_items)):
if len(hex_items[index]) < 2:
ret_buf += binascii.unhexlify('0' + hex_items[index])
break
elif len(hex_items[index]) > 2:
ret_buf += binascii.unhexlify(hex_items[index][:2])
break
else:
ret_buf += binascii.unhexlify(hex_items[index])
return ret_buf
return b''
def get_host_from_http_header(self, buf):
ret_buf = b''
lines = buf.split(b'\r\n')
if lines and len(lines) > 1:
for line in lines:
if match_begin(line, b"Host: "):
return common.to_str(line[6:])
def not_match_return(self, buf):
self.has_sent_header = True
self.has_recv_header = True
if self.method == 'http_simple':
return (b'E'*2048, False, False)
return (buf, True, False)
def error_return(self, buf):
self.has_sent_header = True
self.has_recv_header = True
return (b'E'*2048, False, False)
def server_decode(self, buf):
if self.has_recv_header:
return (buf, True, False)
self.recv_buffer += buf
buf = self.recv_buffer
if len(buf) > 10:
if match_begin(buf, b'GET ') or match_begin(buf, b'POST '):
if len(buf) > 65536:
self.recv_buffer = None
logging.warn('http_simple: over size')
return self.not_match_return(buf)
else: #not http header, run on original protocol
self.recv_buffer = None
logging.debug('http_simple: not match begin')
return self.not_match_return(buf)
else:
return (b'', True, False)
if b'\r\n\r\n' in buf:
datas = buf.split(b'\r\n\r\n', 1)
ret_buf = self.get_data_from_http_header(buf)
host = self.get_host_from_http_header(buf)
if host and self.server_info.obfs_param:
pos = host.find(":")
if pos >= 0:
host = host[:pos]
hosts = self.server_info.obfs_param.split(',')
if host not in hosts:
return self.not_match_return(buf)
if len(ret_buf) < 4:
return self.error_return(buf)
if len(datas) > 1:
ret_buf += datas[1]
if len(ret_buf) >= 13:
self.has_recv_header = True
return (ret_buf, True, False)
return self.not_match_return(buf)
else:
return (b'', True, False)
class http_post(http_simple):
def __init__(self, method):
super(http_post, self).__init__(method)
def boundary(self):
return b''.join([random.choice(b"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789") for i in range(32)])
def client_encode(self, buf):
if self.has_sent_header:
return buf
head_size = len(self.server_info.iv) + self.server_info.head_len
if len(buf) - head_size > 64:
headlen = head_size + random.randint(0, 64)
else:
headlen = len(buf)
headdata = buf[:headlen]
buf = buf[headlen:]
port = b''
if self.server_info.port != 80:
port = b':' + to_bytes(str(self.server_info.port))
body = None
hosts = (self.server_info.obfs_param or self.server_info.host)
pos = hosts.find("#")
if pos >= 0:
body = hosts[pos + 1:].replace("\\n", "\r\n")
hosts = hosts[:pos]
hosts = hosts.split(',')
host = random.choice(hosts)
http_head = b"POST /" + self.encode_head(headdata) + b" HTTP/1.1\r\n"
http_head += b"Host: " + to_bytes(host) + port + b"\r\n"
if body:
http_head += body + "\r\n\r\n"
else:
http_head += b"User-Agent: " + random.choice(self.user_agent) + b"\r\n"
http_head += b"Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\r\nAccept-Language: en-US,en;q=0.8\r\nAccept-Encoding: gzip, deflate\r\n"
http_head += b"Content-Type: multipart/form-data; boundary=" + self.boundary() + b"\r\nDNT: 1\r\n"
http_head += "Connection: keep-alive\r\n\r\n"
self.has_sent_header = True
return http_head + buf
def not_match_return(self, buf):
self.has_sent_header = True
self.has_recv_header = True
if self.method == 'http_post':
return (b'E'*2048, False, False)
return (buf, True, False)
class random_head(plain.plain):
def __init__(self, method):
self.method = method
self.has_sent_header = False
self.has_recv_header = False
self.raw_trans_sent = False
self.raw_trans_recv = False
self.send_buffer = b''
def client_encode(self, buf):
if self.raw_trans_sent:
return buf
self.send_buffer += buf
if not self.has_sent_header:
self.has_sent_header = True
data = os.urandom(common.ord(os.urandom(1)[0]) % 96 + 4)
crc = (0xffffffff - binascii.crc32(data)) & 0xffffffff
return data + struct.pack('<I', crc)
if self.raw_trans_recv:
ret = self.send_buffer
self.send_buffer = b''
self.raw_trans_sent = True
return ret
return b''
def client_decode(self, buf):
if self.raw_trans_recv:
return (buf, False)
self.raw_trans_recv = True
return (b'', True)
def server_encode(self, buf):
if self.has_sent_header:
return buf
self.has_sent_header = True
return os.urandom(common.ord(os.urandom(1)[0]) % 96 + 4)
def server_decode(self, buf):
if self.has_recv_header:
return (buf, True, False)
self.has_recv_header = True
crc = binascii.crc32(buf) & 0xffffffff
if crc != 0xffffffff:
self.has_sent_header = True
if self.method == 'random_head':
return (b'E'*2048, False, False)
return (buf, True, False)
# (buffer_to_recv, is_need_decrypt, is_need_to_encode_and_send_back)
return (b'', False, True)
| apache-2.0 |
amisrs/angular-flask | angular_flask/lib/python2.7/site-packages/pip/commands/uninstall.py | 798 | 2884 | from __future__ import absolute_import
import pip
from pip.wheel import WheelCache
from pip.req import InstallRequirement, RequirementSet, parse_requirements
from pip.basecommand import Command
from pip.exceptions import InstallationError
class UninstallCommand(Command):
"""
Uninstall packages.
pip is able to uninstall most installed packages. Known exceptions are:
- Pure distutils packages installed with ``python setup.py install``, which
leave behind no metadata to determine what files were installed.
- Script wrappers installed by ``python setup.py develop``.
"""
name = 'uninstall'
usage = """
%prog [options] <package> ...
%prog [options] -r <requirements file> ..."""
summary = 'Uninstall packages.'
def __init__(self, *args, **kw):
super(UninstallCommand, self).__init__(*args, **kw)
self.cmd_opts.add_option(
'-r', '--requirement',
dest='requirements',
action='append',
default=[],
metavar='file',
help='Uninstall all the packages listed in the given requirements '
'file. This option can be used multiple times.',
)
self.cmd_opts.add_option(
'-y', '--yes',
dest='yes',
action='store_true',
help="Don't ask for confirmation of uninstall deletions.")
self.parser.insert_option_group(0, self.cmd_opts)
def run(self, options, args):
with self._build_session(options) as session:
format_control = pip.index.FormatControl(set(), set())
wheel_cache = WheelCache(options.cache_dir, format_control)
requirement_set = RequirementSet(
build_dir=None,
src_dir=None,
download_dir=None,
isolated=options.isolated_mode,
session=session,
wheel_cache=wheel_cache,
)
for name in args:
requirement_set.add_requirement(
InstallRequirement.from_line(
name, isolated=options.isolated_mode,
wheel_cache=wheel_cache
)
)
for filename in options.requirements:
for req in parse_requirements(
filename,
options=options,
session=session,
wheel_cache=wheel_cache):
requirement_set.add_requirement(req)
if not requirement_set.has_requirements:
raise InstallationError(
'You must give at least one requirement to %(name)s (see '
'"pip help %(name)s")' % dict(name=self.name)
)
requirement_set.uninstall(auto_confirm=options.yes)
| mit |
else/mosquitto | test/lib/02-subscribe-qos2.py | 7 | 2349 | #!/usr/bin/env python
# Test whether a client sends a correct SUBSCRIBE to a topic with QoS 2.
# The client should connect to port 1888 with keepalive=60, clean session set,
# and client id subscribe-qos2-test
# The test will send a CONNACK message to the client with rc=0. Upon receiving
# the CONNACK and verifying that rc=0, the client should send a SUBSCRIBE
# message to subscribe to topic "qos2/test" with QoS=2. If rc!=0, the client
# should exit with an error.
# Upon receiving the correct SUBSCRIBE message, the test will reply with a
# SUBACK message with the accepted QoS set to 2. On receiving the SUBACK
# message, the client should send a DISCONNECT message.
import inspect
import os
import subprocess
import socket
import sys
import time
# From http://stackoverflow.com/questions/279237/python-import-a-module-from-a-folder
cmd_subfolder = os.path.realpath(os.path.abspath(os.path.join(os.path.split(inspect.getfile( inspect.currentframe() ))[0],"..")))
if cmd_subfolder not in sys.path:
sys.path.insert(0, cmd_subfolder)
import mosq_test
rc = 1
keepalive = 60
connect_packet = mosq_test.gen_connect("subscribe-qos2-test", keepalive=keepalive)
connack_packet = mosq_test.gen_connack(rc=0)
disconnect_packet = mosq_test.gen_disconnect()
mid = 1
subscribe_packet = mosq_test.gen_subscribe(mid, "qos2/test", 2)
suback_packet = mosq_test.gen_suback(mid, 2)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.settimeout(10)
sock.bind(('', 1888))
sock.listen(5)
client_args = sys.argv[1:]
env = dict(os.environ)
env['LD_LIBRARY_PATH'] = '../../lib:../../lib/cpp'
try:
pp = env['PYTHONPATH']
except KeyError:
pp = ''
env['PYTHONPATH'] = '../../lib/python:'+pp
client = mosq_test.start_client(filename=sys.argv[1].replace('/', '-'), cmd=client_args, env=env)
try:
(conn, address) = sock.accept()
conn.settimeout(10)
if mosq_test.expect_packet(conn, "connect", connect_packet):
conn.send(connack_packet)
if mosq_test.expect_packet(conn, "subscribe", subscribe_packet):
conn.send(suback_packet)
if mosq_test.expect_packet(conn, "disconnect", disconnect_packet):
rc = 0
conn.close()
finally:
client.terminate()
client.wait()
sock.close()
exit(rc)
| bsd-3-clause |
Scaravex/clue-hackathon | clustering/time_profile_cluster.py | 2 | 1438 | # -*- coding: utf-8 -*-
"""
Created on Sun Mar 19 11:21:47 2017
@author: mskara
"""
import pandas as pd
import matplotlib.pyplot as plt
from src.pre_process import load_binary
def create_profile_for_symptoms(df, date_range=15):
profiles = {}
for symptom in symptoms:
temp = df[df['symptom'] == symptom]
sympt_profile = temp.groupby(by=temp['day_in_cycle']).mean()[0:date_range]
plt.plot(sympt_profile)
profiles[symptom] = sympt_profile
return profiles
def check_probability_access(data):
'''find probability_access'''
df_active = data['active_days']
df_cycles = data['cycles']
access_prob = []
for i in range(1, 30):
access_prob.append((df_active['day_in_cycle'] == i).sum()
/(df_cycles['cycle_length'][df_cycles['cycle_length']>=i]).count())
# access_prob.plot(X)
return access_prob
df = pd.read_csv('result.txt')
# now is done until 15 day, afterwords our predictions are wrong
daily_profiles = create_profile_for_symptoms(df,date_range = 15)
data = load_binary()
access_profile = check_probability_access(data)
plt.plot (access_profile[0:29]) # probability of access
for symptom in symptoms:
real_prob = daily_profiles[symptom].copy()
for i in range(15):
real_prob.loc[i]=real_prob.loc[i]/access_profile[i]
plt.plot(real_prob)
| apache-2.0 |
bottompawn/kbengine | kbe/res/scripts/common/Lib/multiprocessing/process.py | 98 | 9144 | #
# Module providing the `Process` class which emulates `threading.Thread`
#
# multiprocessing/process.py
#
# Copyright (c) 2006-2008, R Oudkerk
# Licensed to PSF under a Contributor Agreement.
#
__all__ = ['BaseProcess', 'current_process', 'active_children']
#
# Imports
#
import os
import sys
import signal
import itertools
from _weakrefset import WeakSet
#
#
#
try:
ORIGINAL_DIR = os.path.abspath(os.getcwd())
except OSError:
ORIGINAL_DIR = None
#
# Public functions
#
def current_process():
'''
Return process object representing the current process
'''
return _current_process
def active_children():
'''
Return list of process objects corresponding to live child processes
'''
_cleanup()
return list(_children)
#
#
#
def _cleanup():
# check for processes which have finished
for p in list(_children):
if p._popen.poll() is not None:
_children.discard(p)
#
# The `Process` class
#
class BaseProcess(object):
'''
Process objects represent activity that is run in a separate process
The class is analogous to `threading.Thread`
'''
def _Popen(self):
raise NotImplementedError
def __init__(self, group=None, target=None, name=None, args=(), kwargs={},
*, daemon=None):
assert group is None, 'group argument must be None for now'
count = next(_process_counter)
self._identity = _current_process._identity + (count,)
self._config = _current_process._config.copy()
self._parent_pid = os.getpid()
self._popen = None
self._target = target
self._args = tuple(args)
self._kwargs = dict(kwargs)
self._name = name or type(self).__name__ + '-' + \
':'.join(str(i) for i in self._identity)
if daemon is not None:
self.daemon = daemon
_dangling.add(self)
def run(self):
'''
Method to be run in sub-process; can be overridden in sub-class
'''
if self._target:
self._target(*self._args, **self._kwargs)
def start(self):
'''
Start child process
'''
assert self._popen is None, 'cannot start a process twice'
assert self._parent_pid == os.getpid(), \
'can only start a process object created by current process'
assert not _current_process._config.get('daemon'), \
'daemonic processes are not allowed to have children'
_cleanup()
self._popen = self._Popen(self)
self._sentinel = self._popen.sentinel
_children.add(self)
def terminate(self):
'''
Terminate process; sends SIGTERM signal or uses TerminateProcess()
'''
self._popen.terminate()
def join(self, timeout=None):
'''
Wait until child process terminates
'''
assert self._parent_pid == os.getpid(), 'can only join a child process'
assert self._popen is not None, 'can only join a started process'
res = self._popen.wait(timeout)
if res is not None:
_children.discard(self)
def is_alive(self):
'''
Return whether process is alive
'''
if self is _current_process:
return True
assert self._parent_pid == os.getpid(), 'can only test a child process'
if self._popen is None:
return False
self._popen.poll()
return self._popen.returncode is None
@property
def name(self):
return self._name
@name.setter
def name(self, name):
assert isinstance(name, str), 'name must be a string'
self._name = name
@property
def daemon(self):
'''
Return whether process is a daemon
'''
return self._config.get('daemon', False)
@daemon.setter
def daemon(self, daemonic):
'''
Set whether process is a daemon
'''
assert self._popen is None, 'process has already started'
self._config['daemon'] = daemonic
@property
def authkey(self):
return self._config['authkey']
@authkey.setter
def authkey(self, authkey):
'''
Set authorization key of process
'''
self._config['authkey'] = AuthenticationString(authkey)
@property
def exitcode(self):
'''
Return exit code of process or `None` if it has yet to stop
'''
if self._popen is None:
return self._popen
return self._popen.poll()
@property
def ident(self):
'''
Return identifier (PID) of process or `None` if it has yet to start
'''
if self is _current_process:
return os.getpid()
else:
return self._popen and self._popen.pid
pid = ident
@property
def sentinel(self):
'''
Return a file descriptor (Unix) or handle (Windows) suitable for
waiting for process termination.
'''
try:
return self._sentinel
except AttributeError:
raise ValueError("process not started")
def __repr__(self):
if self is _current_process:
status = 'started'
elif self._parent_pid != os.getpid():
status = 'unknown'
elif self._popen is None:
status = 'initial'
else:
if self._popen.poll() is not None:
status = self.exitcode
else:
status = 'started'
if type(status) is int:
if status == 0:
status = 'stopped'
else:
status = 'stopped[%s]' % _exitcode_to_name.get(status, status)
return '<%s(%s, %s%s)>' % (type(self).__name__, self._name,
status, self.daemon and ' daemon' or '')
##
def _bootstrap(self):
from . import util, context
global _current_process, _process_counter, _children
try:
if self._start_method is not None:
context._force_start_method(self._start_method)
_process_counter = itertools.count(1)
_children = set()
if sys.stdin is not None:
try:
sys.stdin.close()
sys.stdin = open(os.devnull)
except (OSError, ValueError):
pass
old_process = _current_process
_current_process = self
try:
util._finalizer_registry.clear()
util._run_after_forkers()
finally:
# delay finalization of the old process object until after
# _run_after_forkers() is executed
del old_process
util.info('child process calling self.run()')
try:
self.run()
exitcode = 0
finally:
util._exit_function()
except SystemExit as e:
if not e.args:
exitcode = 1
elif isinstance(e.args[0], int):
exitcode = e.args[0]
else:
sys.stderr.write(str(e.args[0]) + '\n')
exitcode = 1
except:
exitcode = 1
import traceback
sys.stderr.write('Process %s:\n' % self.name)
traceback.print_exc()
finally:
util.info('process exiting with exitcode %d' % exitcode)
sys.stdout.flush()
sys.stderr.flush()
return exitcode
#
# We subclass bytes to avoid accidental transmission of auth keys over network
#
class AuthenticationString(bytes):
def __reduce__(self):
from .context import get_spawning_popen
if get_spawning_popen() is None:
raise TypeError(
'Pickling an AuthenticationString object is '
'disallowed for security reasons'
)
return AuthenticationString, (bytes(self),)
#
# Create object representing the main process
#
class _MainProcess(BaseProcess):
def __init__(self):
self._identity = ()
self._name = 'MainProcess'
self._parent_pid = None
self._popen = None
self._config = {'authkey': AuthenticationString(os.urandom(32)),
'semprefix': '/mp'}
# Note that some versions of FreeBSD only allow named
# semaphores to have names of up to 14 characters. Therefore
# we choose a short prefix.
#
# On MacOSX in a sandbox it may be necessary to use a
# different prefix -- see #19478.
#
# Everything in self._config will be inherited by descendant
# processes.
_current_process = _MainProcess()
_process_counter = itertools.count(1)
_children = set()
del _MainProcess
#
# Give names to some return codes
#
_exitcode_to_name = {}
for name, signum in list(signal.__dict__.items()):
if name[:3]=='SIG' and '_' not in name:
_exitcode_to_name[-signum] = name
# For debug and leak testing
_dangling = WeakSet()
| lgpl-3.0 |
onceuponatimeforever/oh-mainline | vendor/packages/oauthlib/oauthlib/oauth2/rfc6749/request_validator.py | 36 | 19514 | # -*- coding: utf-8 -*-
"""
oauthlib.oauth2.rfc6749.grant_types
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
from __future__ import unicode_literals, absolute_import
import logging
log = logging.getLogger(__name__)
class RequestValidator(object):
def client_authentication_required(self, request, *args, **kwargs):
"""Determine if client authentication is required for current request.
According to the rfc6749, client authentication is required in the following cases:
- Resource Owner Password Credentials Grant, when Client type is Confidential or when
Client was issued client credentials or whenever Client provided client
authentication, see `Section 4.3.2`_.
- Authorization Code Grant, when Client type is Confidential or when Client was issued
client credentials or whenever Client provided client authentication,
see `Section 4.1.3`_.
- Refresh Token Grant, when Client type is Confidential or when Client was issued
client credentials or whenever Client provided client authentication, see
`Section 6`_
:param request: oauthlib.common.Request
:rtype: True or False
Method is used by:
- Authorization Code Grant
- Resource Owner Password Credentials Grant
- Refresh Token Grant
.. _`Section 4.3.2`: http://tools.ietf.org/html/rfc6749#section-4.3.2
.. _`Section 4.1.3`: http://tools.ietf.org/html/rfc6749#section-4.1.3
.. _`Section 6`: http://tools.ietf.org/html/rfc6749#section-6
"""
return True
def authenticate_client(self, request, *args, **kwargs):
"""Authenticate client through means outside the OAuth 2 spec.
Means of authentication is negotiated beforehand and may for example
be `HTTP Basic Authentication Scheme`_ which utilizes the Authorization
header.
Headers may be accesses through request.headers and parameters found in
both body and query can be obtained by direct attribute access, i.e.
request.client_id for client_id in the URL query.
OBS! Certain grant types rely on this authentication, possibly with
other fallbacks, and for them to recognize this authorization please
set the client attribute on the request (request.client). Note that
preferably this client object should have a client_id attribute of
unicode type (request.client.client_id).
:param request: oauthlib.common.Request
:rtype: True or False
Method is used by:
- Authorization Code Grant
- Resource Owner Password Credentials Grant (may be disabled)
- Client Credentials Grant
- Refresh Token Grant
.. _`HTTP Basic Authentication Scheme`: http://tools.ietf.org/html/rfc1945#section-11.1
"""
raise NotImplementedError('Subclasses must implement this method.')
def authenticate_client_id(self, client_id, request, *args, **kwargs):
"""Ensure client_id belong to a non-confidential client.
A non-confidential client is one that is not required to authenticate
through other means, such as using HTTP Basic.
Note, while not strictly necessary it can often be very convenient
to set request.client to the client object associated with the
given client_id.
:param request: oauthlib.common.Request
:rtype: True or False
Method is used by:
- Authorization Code Grant
"""
raise NotImplementedError('Subclasses must implement this method.')
def confirm_redirect_uri(self, client_id, code, redirect_uri, client,
*args, **kwargs):
"""Ensure client is authorized to redirect to the redirect_uri requested.
If the client specifies a redirect_uri when obtaining code then
that redirect URI must be bound to the code and verified equal
in this method.
All clients should register the absolute URIs of all URIs they intend
to redirect to. The registration is outside of the scope of oauthlib.
:param client_id: Unicode client identifier
:param code: Unicode authorization_code.
:param redirect_uri: Unicode absolute URI
:param client: Client object set by you, see authenticate_client.
:param request: The HTTP Request (oauthlib.common.Request)
:rtype: True or False
Method is used by:
- Authorization Code Grant (during token request)
"""
raise NotImplementedError('Subclasses must implement this method.')
def get_default_redirect_uri(self, client_id, request, *args, **kwargs):
"""Get the default redirect URI for the client.
:param client_id: Unicode client identifier
:param request: The HTTP Request (oauthlib.common.Request)
:rtype: The default redirect URI for the client
Method is used by:
- Authorization Code Grant
- Implicit Grant
"""
raise NotImplementedError('Subclasses must implement this method.')
def get_default_scopes(self, client_id, request, *args, **kwargs):
"""Get the default scopes for the client.
:param client_id: Unicode client identifier
:param request: The HTTP Request (oauthlib.common.Request)
:rtype: List of default scopes
Method is used by all core grant types:
- Authorization Code Grant
- Implicit Grant
- Resource Owner Password Credentials Grant
- Client Credentials grant
"""
raise NotImplementedError('Subclasses must implement this method.')
def get_original_scopes(self, refresh_token, request, *args, **kwargs):
"""Get the list of scopes associated with the refresh token.
:param refresh_token: Unicode refresh token
:param request: The HTTP Request (oauthlib.common.Request)
:rtype: List of scopes.
Method is used by:
- Refresh token grant
"""
raise NotImplementedError('Subclasses must implement this method.')
def is_within_original_scope(self, request_scopes, refresh_token, request, *args, **kwargs):
"""Check if requested scopes are within a scope of the refresh token.
When access tokens are refreshed the scope of the new token
needs to be within the scope of the original token. This is
ensured by checking that all requested scopes strings are on
the list returned by the get_original_scopes. If this check
fails, is_within_original_scope is called. The method can be
used in situations where returning all valid scopes from the
get_original_scopes is not practical.
:param request_scopes: A list of scopes that were requested by client
:param refresh_token: Unicode refresh_token
:param request: The HTTP Request (oauthlib.common.Request)
:rtype: True or False
Method is used by:
- Refresh token grant
"""
return False
def invalidate_authorization_code(self, client_id, code, request, *args, **kwargs):
"""Invalidate an authorization code after use.
:param client_id: Unicode client identifier
:param code: The authorization code grant (request.code).
:param request: The HTTP Request (oauthlib.common.Request)
Method is used by:
- Authorization Code Grant
"""
raise NotImplementedError('Subclasses must implement this method.')
def revoke_token(self, token, token_type_hint, request, *args, **kwargs):
"""Revoke an access or refresh token.
:param token: The token string.
:param token_type_hint: access_token or refresh_token.
:param request: The HTTP Request (oauthlib.common.Request)
Method is used by:
- Revocation Endpoint
"""
raise NotImplementedError('Subclasses must implement this method.')
def rotate_refresh_token(self, request):
"""Determine whether to rotate the refresh token. Default, yes.
When access tokens are refreshed the old refresh token can be kept
or replaced with a new one (rotated). Return True to rotate and
and False for keeping original.
:param request: oauthlib.common.Request
:rtype: True or False
Method is used by:
- Refresh Token Grant
"""
return True
def save_authorization_code(self, client_id, code, request, *args, **kwargs):
"""Persist the authorization_code.
The code should at minimum be associated with:
- a client and it's client_id
- the redirect URI used (request.redirect_uri)
- whether the redirect URI used is the client default or not
- a resource owner / user (request.user)
- authorized scopes (request.scopes)
The authorization code grant dict (code) holds at least the key 'code'::
{'code': 'sdf345jsdf0934f'}
:param client_id: Unicode client identifier
:param code: A dict of the authorization code grant.
:param request: The HTTP Request (oauthlib.common.Request)
:rtype: The default redirect URI for the client
Method is used by:
- Authorization Code Grant
"""
raise NotImplementedError('Subclasses must implement this method.')
def save_bearer_token(self, token, request, *args, **kwargs):
"""Persist the Bearer token.
The Bearer token should at minimum be associated with:
- a client and it's client_id, if available
- a resource owner / user (request.user)
- authorized scopes (request.scopes)
- an expiration time
- a refresh token, if issued
The Bearer token dict may hold a number of items::
{
'token_type': 'Bearer',
'access_token': 'askfjh234as9sd8',
'expires_in': 3600,
'scope': 'string of space separated authorized scopes',
'refresh_token': '23sdf876234', # if issued
'state': 'given_by_client', # if supplied by client
}
Note that while "scope" is a string-separated list of authorized scopes,
the original list is still available in request.scopes
:param client_id: Unicode client identifier
:param token: A Bearer token dict
:param request: The HTTP Request (oauthlib.common.Request)
:rtype: The default redirect URI for the client
Method is used by all core grant types issuing Bearer tokens:
- Authorization Code Grant
- Implicit Grant
- Resource Owner Password Credentials Grant (might not associate a client)
- Client Credentials grant
"""
raise NotImplementedError('Subclasses must implement this method.')
def validate_bearer_token(self, token, scopes, request):
"""Ensure the Bearer token is valid and authorized access to scopes.
:param token: A string of random characters.
:param scopes: A list of scopes associated with the protected resource.
:param request: The HTTP Request (oauthlib.common.Request)
A key to OAuth 2 security and restricting impact of leaked tokens is
the short expiration time of tokens, *always ensure the token has not
expired!*.
Two different approaches to scope validation:
1) all(scopes). The token must be authorized access to all scopes
associated with the resource. For example, the
token has access to ``read-only`` and ``images``,
thus the client can view images but not upload new.
Allows for fine grained access control through
combining various scopes.
2) any(scopes). The token must be authorized access to one of the
scopes associated with the resource. For example,
token has access to ``read-only-images``.
Allows for fine grained, although arguably less
convenient, access control.
A powerful way to use scopes would mimic UNIX ACLs and see a scope
as a group with certain privileges. For a restful API these might
map to HTTP verbs instead of read, write and execute.
Note, the request.user attribute can be set to the resource owner
associated with this token. Similarly the request.client and
request.scopes attribute can be set to associated client object
and authorized scopes. If you then use a decorator such as the
one provided for django these attributes will be made available
in all protected views as keyword arguments.
:param token: Unicode Bearer token
:param scopes: List of scopes (defined by you)
:param request: The HTTP Request (oauthlib.common.Request)
:rtype: True or False
Method is indirectly used by all core Bearer token issuing grant types:
- Authorization Code Grant
- Implicit Grant
- Resource Owner Password Credentials Grant
- Client Credentials Grant
"""
raise NotImplementedError('Subclasses must implement this method.')
def validate_client_id(self, client_id, request, *args, **kwargs):
"""Ensure client_id belong to a valid and active client.
Note, while not strictly necessary it can often be very convenient
to set request.client to the client object associated with the
given client_id.
:param request: oauthlib.common.Request
:rtype: True or False
Method is used by:
- Authorization Code Grant
- Implicit Grant
"""
raise NotImplementedError('Subclasses must implement this method.')
def validate_code(self, client_id, code, client, request, *args, **kwargs):
"""Ensure the authorization_code is valid and assigned to client.
OBS! The request.user attribute should be set to the resource owner
associated with this authorization code. Similarly request.scopes and
request.state must also be set.
:param client_id: Unicode client identifier
:param code: Unicode authorization code
:param client: Client object set by you, see authenticate_client.
:param request: The HTTP Request (oauthlib.common.Request)
:rtype: True or False
Method is used by:
- Authorization Code Grant
"""
raise NotImplementedError('Subclasses must implement this method.')
def validate_grant_type(self, client_id, grant_type, client, request, *args, **kwargs):
"""Ensure client is authorized to use the grant_type requested.
:param client_id: Unicode client identifier
:param grant_type: Unicode grant type, i.e. authorization_code, password.
:param client: Client object set by you, see authenticate_client.
:param request: The HTTP Request (oauthlib.common.Request)
:rtype: True or False
Method is used by:
- Authorization Code Grant
- Resource Owner Password Credentials Grant
- Client Credentials Grant
- Refresh Token Grant
"""
raise NotImplementedError('Subclasses must implement this method.')
def validate_redirect_uri(self, client_id, redirect_uri, request, *args, **kwargs):
"""Ensure client is authorized to redirect to the redirect_uri requested.
All clients should register the absolute URIs of all URIs they intend
to redirect to. The registration is outside of the scope of oauthlib.
:param client_id: Unicode client identifier
:param redirect_uri: Unicode absolute URI
:param request: The HTTP Request (oauthlib.common.Request)
:rtype: True or False
Method is used by:
- Authorization Code Grant
- Implicit Grant
"""
raise NotImplementedError('Subclasses must implement this method.')
def validate_refresh_token(self, refresh_token, client, request, *args, **kwargs):
"""Ensure the Bearer token is valid and authorized access to scopes.
OBS! The request.user attribute should be set to the resource owner
associated with this refresh token.
:param refresh_token: Unicode refresh token
:param client: Client object set by you, see authenticate_client.
:param request: The HTTP Request (oauthlib.common.Request)
:rtype: True or False
Method is used by:
- Authorization Code Grant (indirectly by issuing refresh tokens)
- Resource Owner Password Credentials Grant (also indirectly)
- Refresh Token Grant
"""
raise NotImplementedError('Subclasses must implement this method.')
def validate_response_type(self, client_id, response_type, client, request, *args, **kwargs):
"""Ensure client is authorized to use the response_type requested.
:param client_id: Unicode client identifier
:param response_type: Unicode response type, i.e. code, token.
:param client: Client object set by you, see authenticate_client.
:param request: The HTTP Request (oauthlib.common.Request)
:rtype: True or False
Method is used by:
- Authorization Code Grant
- Implicit Grant
"""
raise NotImplementedError('Subclasses must implement this method.')
def validate_scopes(self, client_id, scopes, client, request, *args, **kwargs):
"""Ensure the client is authorized access to requested scopes.
:param client_id: Unicode client identifier
:param scopes: List of scopes (defined by you)
:param client: Client object set by you, see authenticate_client.
:param request: The HTTP Request (oauthlib.common.Request)
:rtype: True or False
Method is used by all core grant types:
- Authorization Code Grant
- Implicit Grant
- Resource Owner Password Credentials Grant
- Client Credentials Grant
"""
raise NotImplementedError('Subclasses must implement this method.')
def validate_user(self, username, password, client, request, *args, **kwargs):
"""Ensure the username and password is valid.
OBS! The validation should also set the user attribute of the request
to a valid resource owner, i.e. request.user = username or similar. If
not set you will be unable to associate a token with a user in the
persistance method used (commonly, save_bearer_token).
:param username: Unicode username
:param password: Unicode password
:param client: Client object set by you, see authenticate_client.
:param request: The HTTP Request (oauthlib.common.Request)
:rtype: True or False
Method is used by:
- Resource Owner Password Credentials Grant
"""
raise NotImplementedError('Subclasses must implement this method.')
| agpl-3.0 |
nathanaevitas/odoo | openerp/addons/purchase/company.py | 383 | 1576 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv,fields
class company(osv.osv):
_inherit = 'res.company'
_columns = {
'po_lead': fields.float(
'Purchase Lead Time', required=True,
help="Margin of error for supplier lead times. When the system"\
"generates Purchase Orders for procuring products,"\
"they will be scheduled that many days earlier "\
"to cope with unexpected supplier delays."),
}
_defaults = {
'po_lead': lambda *a: 1.0,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
willprice/arduino-sphere-project | scripts/example_direction_finder/temboo/Library/EnviroFacts/Toxins/FacilitiesSearchByZip.py | 5 | 4077 | # -*- coding: utf-8 -*-
###############################################################################
#
# FacilitiesSearchByZip
# Retrieves a list of EPA-regulated facilities in the Toxics Release Inventory (TRI) database within a given area code.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class FacilitiesSearchByZip(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the FacilitiesSearchByZip Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(FacilitiesSearchByZip, self).__init__(temboo_session, '/Library/EnviroFacts/Toxins/FacilitiesSearchByZip')
def new_input_set(self):
return FacilitiesSearchByZipInputSet()
def _make_result_set(self, result, path):
return FacilitiesSearchByZipResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return FacilitiesSearchByZipChoreographyExecution(session, exec_id, path)
class FacilitiesSearchByZipInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the FacilitiesSearchByZip
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_ResponseFormat(self, value):
"""
Set the value of the ResponseFormat input for this Choreo. ((optional, string) Specify the desired response format. Valid formats are: xml (the default) and csv.)
"""
super(FacilitiesSearchByZipInputSet, self)._set_input('ResponseFormat', value)
def set_RowEnd(self, value):
"""
Set the value of the RowEnd input for this Choreo. ((optional, integer) Number 1 or greater indicates the ending row number of the results displayed. Default is 4999 when RowStart is 0. Up to 5000 entries are returned in the output.)
"""
super(FacilitiesSearchByZipInputSet, self)._set_input('RowEnd', value)
def set_RowStart(self, value):
"""
Set the value of the RowStart input for this Choreo. ((optional, integer) Indicates the starting row number of the results displayed. Default is 0.)
"""
super(FacilitiesSearchByZipInputSet, self)._set_input('RowStart', value)
def set_Zip(self, value):
"""
Set the value of the Zip input for this Choreo. ((required, string) Zip code to be searched.)
"""
super(FacilitiesSearchByZipInputSet, self)._set_input('Zip', value)
class FacilitiesSearchByZipResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the FacilitiesSearchByZip Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The response from EnviroFacts.)
"""
return self._output.get('Response', None)
class FacilitiesSearchByZipChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return FacilitiesSearchByZipResultSet(response, path)
| gpl-2.0 |
swdream/neutron | neutron/notifiers/batch_notifier.py | 56 | 2337 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import eventlet
class BatchNotifier(object):
def __init__(self, batch_interval, callback):
self.pending_events = []
self._waiting_to_send = False
self.callback = callback
self.batch_interval = batch_interval
def queue_event(self, event):
"""Called to queue sending an event with the next batch of events.
Sending events individually, as they occur, has been problematic as it
can result in a flood of sends. Previously, there was a loopingcall
thread that would send batched events on a periodic interval. However,
maintaining a persistent thread in the loopingcall was also
problematic.
This replaces the loopingcall with a mechanism that creates a
short-lived thread on demand when the first event is queued. That
thread will sleep once for the same batch_duration to allow other
events to queue up in pending_events and then will send them when it
wakes.
If a thread is already alive and waiting, this call will simply queue
the event and return leaving it up to the thread to send it.
:param event: the event that occurred.
"""
if not event:
return
self.pending_events.append(event)
if self._waiting_to_send:
return
self._waiting_to_send = True
def last_out_sends():
eventlet.sleep(self.batch_interval)
self._waiting_to_send = False
self._notify()
eventlet.spawn_n(last_out_sends)
def _notify(self):
if not self.pending_events:
return
batched_events = self.pending_events
self.pending_events = []
self.callback(batched_events)
| apache-2.0 |
Wuteyan/VTK | Examples/Rendering/Python/FilterCADPart.py | 42 | 2338 | #!/usr/bin/env python
# This simple example shows how to do simple filtering in a pipeline.
# See CADPart.py and Cylinder.py for related information.
import vtk
from vtk.util.colors import light_grey
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# This creates a polygonal cylinder model with eight circumferential
# facets.
part = vtk.vtkSTLReader()
part.SetFileName(VTK_DATA_ROOT + "/Data/42400-IDGH.stl")
# A filter is a module that takes at least one input and produces at
# least one output. The SetInput and GetOutput methods are used to do
# the connection. What is returned by GetOutput is a particulat
# dataset type. If the type is compatible with the SetInput method,
# then the filters can be connected together.
#
# Here we add a filter that computes surface normals from the geometry.
shrink = vtk.vtkShrinkPolyData()
shrink.SetInputConnection(part.GetOutputPort())
shrink.SetShrinkFactor(0.85)
# The mapper is responsible for pushing the geometry into the graphics
# library. It may also do color mapping, if scalars or other
# attributes are defined.
partMapper = vtk.vtkPolyDataMapper()
partMapper.SetInputConnection(shrink.GetOutputPort())
# The LOD actor is a special type of actor. It will change appearance
# in order to render faster. At the highest resolution, it renders
# ewverything just like an actor. The middle level is a point cloud,
# and the lowest level is a simple bounding box.
partActor = vtk.vtkLODActor()
partActor.SetMapper(partMapper)
partActor.GetProperty().SetColor(light_grey)
partActor.RotateX(30.0)
partActor.RotateY(-45.0)
# Create the graphics structure. The renderer renders into the
# render window. The render window interactor captures mouse events
# and will perform appropriate camera or actor manipulation
# depending on the nature of the events.
ren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# Add the actors to the renderer, set the background and size
ren.AddActor(partActor)
ren.SetBackground(0.1, 0.2, 0.4)
renWin.SetSize(200, 200)
# We'll zoom in a little by accessing the camera and invoking a "Zoom"
# method on it.
ren.ResetCamera()
ren.GetActiveCamera().Zoom(1.5)
iren.Initialize()
renWin.Render()
# Start the event loop.
iren.Start()
| bsd-3-clause |
beckdaniel/GPy | GPy/util/mocap.py | 8 | 27243 | import os
import numpy as np
import math
from GPy.util import datasets as dat
class vertex:
def __init__(self, name, id, parents=[], children=[], meta = {}):
self.name = name
self.id = id
self.parents = parents
self.children = children
self.meta = meta
def __str__(self):
return self.name + '(' + str(self.id) + ').'
class tree:
def __init__(self):
self.vertices = []
self.vertices.append(vertex(name='root', id=0))
def __str__(self):
index = self.find_root()
return self.branch_str(index)
def branch_str(self, index, indent=''):
out = indent + str(self.vertices[index]) + '\n'
for child in self.vertices[index].children:
out+=self.branch_str(child, indent+' ')
return out
def find_children(self):
"""Take a tree and set the children according to the parents.
Takes a tree structure which lists the parents of each vertex
and computes the children for each vertex and places them in."""
for i in range(len(self.vertices)):
self.vertices[i].children = []
for i in range(len(self.vertices)):
for parent in self.vertices[i].parents:
if i not in self.vertices[parent].children:
self.vertices[parent].children.append(i)
def find_parents(self):
"""Take a tree and set the parents according to the children
Takes a tree structure which lists the children of each vertex
and computes the parents for each vertex and places them in."""
for i in range(len(self.vertices)):
self.vertices[i].parents = []
for i in range(len(self.vertices)):
for child in self.vertices[i].children:
if i not in self.vertices[child].parents:
self.vertices[child].parents.append(i)
def find_root(self):
"""Finds the index of the root node of the tree."""
self.find_parents()
index = 0
while len(self.vertices[index].parents)>0:
index = self.vertices[index].parents[0]
return index
def get_index_by_id(self, id):
"""Give the index associated with a given vertex id."""
for i in range(len(self.vertices)):
if self.vertices[i].id == id:
return i
raise ValueError('Reverse look up of id failed.')
def get_index_by_name(self, name):
"""Give the index associated with a given vertex name."""
for i in range(len(self.vertices)):
if self.vertices[i].name == name:
return i
raise ValueError('Reverse look up of name failed.')
def order_vertices(self):
"""Order vertices in the graph such that parents always have a lower index than children."""
ordered = False
while ordered == False:
for i in range(len(self.vertices)):
ordered = True
for parent in self.vertices[i].parents:
if parent>i:
ordered = False
self.swap_vertices(i, parent)
def swap_vertices(self, i, j):
"""
Swap two vertices in the tree structure array.
swap_vertex swaps the location of two vertices in a tree structure array.
:param tree: the tree for which two vertices are to be swapped.
:param i: the index of the first vertex to be swapped.
:param j: the index of the second vertex to be swapped.
:rval tree: the tree structure with the two vertex locations swapped.
"""
store_vertex_i = self.vertices[i]
store_vertex_j = self.vertices[j]
self.vertices[j] = store_vertex_i
self.vertices[i] = store_vertex_j
for k in range(len(self.vertices)):
for swap_list in [self.vertices[k].children, self.vertices[k].parents]:
if i in swap_list:
swap_list[swap_list.index(i)] = -1
if j in swap_list:
swap_list[swap_list.index(j)] = i
if -1 in swap_list:
swap_list[swap_list.index(-1)] = j
def rotation_matrix(xangle, yangle, zangle, order='zxy', degrees=False):
"""
Compute the rotation matrix for an angle in each direction.
This is a helper function for computing the rotation matrix for a given set of angles in a given order.
:param xangle: rotation for x-axis.
:param yangle: rotation for y-axis.
:param zangle: rotation for z-axis.
:param order: the order for the rotations.
"""
if degrees:
xangle = math.radians(xangle)
yangle = math.radians(yangle)
zangle = math.radians(zangle)
# Here we assume we rotate z, then x then y.
c1 = math.cos(xangle) # The x angle
c2 = math.cos(yangle) # The y angle
c3 = math.cos(zangle) # the z angle
s1 = math.sin(xangle)
s2 = math.sin(yangle)
s3 = math.sin(zangle)
# see http://en.wikipedia.org/wiki/Rotation_matrix for
# additional info.
if order=='zxy':
rot_mat = np.array([[c2*c3-s1*s2*s3, c2*s3+s1*s2*c3, -s2*c1],[-c1*s3, c1*c3, s1],[s2*c3+c2*s1*s3, s2*s3-c2*s1*c3, c2*c1]])
else:
rot_mat = np.eye(3)
for i in range(len(order)):
if order[i]=='x':
rot_mat = np.dot(np.array([[1, 0, 0], [0, c1, s1], [0, -s1, c1]]),rot_mat)
elif order[i] == 'y':
rot_mat = np.dot(np.array([[c2, 0, -s2], [0, 1, 0], [s2, 0, c2]]),rot_mat)
elif order[i] == 'z':
rot_mat = np.dot(np.array([[c3, s3, 0], [-s3, c3, 0], [0, 0, 1]]),rot_mat)
return rot_mat
# Motion capture data routines.
class skeleton(tree):
def __init__(self):
tree.__init__(self)
def connection_matrix(self):
connection = np.zeros((len(self.vertices), len(self.vertices)), dtype=bool)
for i in range(len(self.vertices)):
for j in range(len(self.vertices[i].children)):
connection[i, self.vertices[i].children[j]] = True
return connection
def to_xyz(self, channels):
raise NotImplementedError("this needs to be implemented to use the skeleton class")
def finalize(self):
"""After loading in a skeleton ensure parents are correct, vertex orders are correct and rotation matrices are correct."""
self.find_parents()
self.order_vertices()
self.set_rotation_matrices()
def smooth_angle_channels(self, channels):
"""Remove discontinuities in angle channels so that they don't cause artifacts in algorithms that rely on the smoothness of the functions."""
for vertex in self.vertices:
for col in vertex.meta['rot_ind']:
if col:
for k in range(1, channels.shape[0]):
diff=channels[k, col]-channels[k-1, col]
if abs(diff+360.)<abs(diff):
channels[k:, col]=channels[k:, col]+360.
elif abs(diff-360.)<abs(diff):
channels[k:, col]=channels[k:, col]-360.
# class bvh_skeleton(skeleton):
# def __init__(self):
# skeleton.__init__(self)
# def to_xyz(self, channels):
class acclaim_skeleton(skeleton):
def __init__(self, file_name=None):
skeleton.__init__(self)
self.documentation = []
self.angle = 'deg'
self.length = 1.0
self.mass = 1.0
self.type = 'acclaim'
self.vertices[0] = vertex(name='root', id=0,
parents = [0], children=[],
meta = {'orientation': [],
'axis': [0., 0., 0.],
'axis_order': [],
'C': np.eye(3),
'Cinv': np.eye(3),
'channels': [],
'bodymass': [],
'confmass': [],
'order': [],
'rot_ind': [],
'pos_ind': [],
'limits': [],
'xyz': np.array([0., 0., 0.]),
'rot': np.eye(3)})
if file_name:
self.load_skel(file_name)
def to_xyz(self, channels):
rot_val = list(self.vertices[0].meta['orientation'])
for i in range(len(self.vertices[0].meta['rot_ind'])):
rind = self.vertices[0].meta['rot_ind'][i]
if rind != -1:
rot_val[i] += channels[rind]
self.vertices[0].meta['rot'] = rotation_matrix(rot_val[0],
rot_val[1],
rot_val[2],
self.vertices[0].meta['axis_order'],
degrees=True)
# vertex based store of the xyz location
self.vertices[0].meta['xyz'] = list(self.vertices[0].meta['offset'])
for i in range(len(self.vertices[0].meta['pos_ind'])):
pind = self.vertices[0].meta['pos_ind'][i]
if pind != -1:
self.vertices[0].meta['xyz'][i] += channels[pind]
for i in range(len(self.vertices[0].children)):
ind = self.vertices[0].children[i]
self.get_child_xyz(ind, channels)
xyz = []
for vertex in self.vertices:
xyz.append(vertex.meta['xyz'])
return np.array(xyz)
def get_child_xyz(self, ind, channels):
parent = self.vertices[ind].parents[0]
children = self.vertices[ind].children
rot_val = np.zeros(3)
for j in range(len(self.vertices[ind].meta['rot_ind'])):
rind = self.vertices[ind].meta['rot_ind'][j]
if rind != -1:
rot_val[j] = channels[rind]
else:
rot_val[j] = 0
tdof = rotation_matrix(rot_val[0], rot_val[1], rot_val[2],
self.vertices[ind].meta['order'],
degrees=True)
torient = rotation_matrix(self.vertices[ind].meta['axis'][0],
self.vertices[ind].meta['axis'][1],
self.vertices[ind].meta['axis'][2],
self.vertices[ind].meta['axis_order'],
degrees=True)
torient_inv = rotation_matrix(-self.vertices[ind].meta['axis'][0],
-self.vertices[ind].meta['axis'][1],
-self.vertices[ind].meta['axis'][2],
self.vertices[ind].meta['axis_order'][::-1],
degrees=True)
self.vertices[ind].meta['rot'] = np.dot(np.dot(np.dot(torient_inv,tdof),torient),self.vertices[parent].meta['rot'])
self.vertices[ind].meta['xyz'] = self.vertices[parent].meta['xyz'] + np.dot(self.vertices[ind].meta['offset'],self.vertices[ind].meta['rot'])
for i in range(len(children)):
cind = children[i]
self.get_child_xyz(cind, channels)
def load_channels(self, file_name):
fid=open(file_name, 'r')
channels = self.read_channels(fid)
fid.close()
return channels
def load_skel(self, file_name):
"""
Loads an ASF file into a skeleton structure.
:param file_name: The file name to load in.
"""
fid = open(file_name, 'r')
self.read_skel(fid)
fid.close()
self.name = file_name
def read_bonedata(self, fid):
"""Read bone data from an acclaim skeleton file stream."""
bone_count = 0
lin = self.read_line(fid)
while lin[0]!=':':
parts = lin.split()
if parts[0] == 'begin':
bone_count += 1
self.vertices.append(vertex(name = '', id=np.NaN,
meta={'name': [],
'id': [],
'offset': [],
'orientation': [],
'axis': [0., 0., 0.],
'axis_order': [],
'C': np.eye(3),
'Cinv': np.eye(3),
'channels': [],
'bodymass': [],
'confmass': [],
'order': [],
'rot_ind': [],
'pos_ind': [],
'limits': [],
'xyz': np.array([0., 0., 0.]),
'rot': np.eye(3)}))
lin = self.read_line(fid)
elif parts[0]=='id':
self.vertices[bone_count].id = int(parts[1])
lin = self.read_line(fid)
self.vertices[bone_count].children = []
elif parts[0]=='name':
self.vertices[bone_count].name = parts[1]
lin = self.read_line(fid)
elif parts[0]=='direction':
direction = np.array([float(parts[1]), float(parts[2]), float(parts[3])])
lin = self.read_line(fid)
elif parts[0]=='length':
lgth = float(parts[1])
lin = self.read_line(fid)
elif parts[0]=='axis':
self.vertices[bone_count].meta['axis'] = np.array([float(parts[1]),
float(parts[2]),
float(parts[3])])
# order is reversed compared to bvh
self.vertices[bone_count].meta['axis_order'] = parts[-1][::-1].lower()
lin = self.read_line(fid)
elif parts[0]=='dof':
order = []
for i in range(1, len(parts)):
if parts[i]== 'rx':
chan = 'Xrotation'
order.append('x')
elif parts[i] =='ry':
chan = 'Yrotation'
order.append('y')
elif parts[i] == 'rz':
chan = 'Zrotation'
order.append('z')
elif parts[i] == 'tx':
chan = 'Xposition'
elif parts[i] == 'ty':
chan = 'Yposition'
elif parts[i] == 'tz':
chan = 'Zposition'
elif parts[i] == 'l':
chan = 'length'
self.vertices[bone_count].meta['channels'].append(chan)
# order is reversed compared to bvh
self.vertices[bone_count].meta['order'] = order[::-1]
lin = self.read_line(fid)
elif parts[0]=='limits':
self.vertices[bone_count].meta['limits'] = [[float(parts[1][1:]), float(parts[2][:-1])]]
lin = self.read_line(fid)
while lin !='end':
parts = lin.split()
self.vertices[bone_count].meta['limits'].append([float(parts[0][1:]), float(parts[1][:-1])])
lin = self.read_line(fid)
self.vertices[bone_count].meta['limits'] = np.array(self.vertices[bone_count].meta['limits'])
elif parts[0]=='end':
self.vertices[bone_count].meta['offset'] = direction*lgth
lin = self.read_line(fid)
return lin
def read_channels(self, fid):
"""Read channels from an acclaim file."""
bones = [[] for i in self.vertices]
num_channels = 0
for vertex in self.vertices:
num_channels = num_channels + len(vertex.meta['channels'])
lin = self.read_line(fid)
while lin != ':DEGREES':
lin = self.read_line(fid)
if lin == '':
raise ValueError('Could not find :DEGREES in ' + fid.name)
counter = 0
lin = self.read_line(fid)
while lin:
parts = lin.split()
if len(parts)==1:
frame_no = int(parts[0])
if frame_no:
counter += 1
if counter != frame_no:
raise ValueError('Unexpected frame number.')
else:
raise ValueError('Single bone name ...')
else:
ind = self.get_index_by_name(parts[0])
bones[ind].append(np.array([float(channel) for channel in parts[1:]]))
lin = self.read_line(fid)
num_frames = counter
channels = np.zeros((num_frames, num_channels))
end_val = 0
for i in range(len(self.vertices)):
vertex = self.vertices[i]
if len(vertex.meta['channels'])>0:
start_val = end_val
end_val = end_val + len(vertex.meta['channels'])
for j in range(num_frames):
channels[j, start_val:end_val] = bones[i][j]
self.resolve_indices(i, start_val)
self.smooth_angle_channels(channels)
return channels
def read_documentation(self, fid):
"""Read documentation from an acclaim skeleton file stream."""
lin = self.read_line(fid)
while lin[0] != ':':
self.documentation.append(lin)
lin = self.read_line(fid)
return lin
def read_hierarchy(self, fid):
"""Read hierarchy information from acclaim skeleton file stream."""
lin = self.read_line(fid)
while lin != 'end':
parts = lin.split()
if lin != 'begin':
ind = self.get_index_by_name(parts[0])
for i in range(1, len(parts)):
self.vertices[ind].children.append(self.get_index_by_name(parts[i]))
lin = self.read_line(fid)
lin = self.read_line(fid)
return lin
def read_line(self, fid):
"""Read a line from a file string and check it isn't either empty or commented before returning."""
lin = '#'
while lin[0] == '#':
lin = fid.readline().strip()
if lin == '':
return lin
return lin
def read_root(self, fid):
"""Read the root node from an acclaim skeleton file stream."""
lin = self.read_line(fid)
while lin[0] != ':':
parts = lin.split()
if parts[0]=='order':
order = []
for i in range(1, len(parts)):
if parts[i].lower()=='rx':
chan = 'Xrotation'
order.append('x')
elif parts[i].lower()=='ry':
chan = 'Yrotation'
order.append('y')
elif parts[i].lower()=='rz':
chan = 'Zrotation'
order.append('z')
elif parts[i].lower()=='tx':
chan = 'Xposition'
elif parts[i].lower()=='ty':
chan = 'Yposition'
elif parts[i].lower()=='tz':
chan = 'Zposition'
elif parts[i].lower()=='l':
chan = 'length'
self.vertices[0].meta['channels'].append(chan)
# order is reversed compared to bvh
self.vertices[0].meta['order'] = order[::-1]
elif parts[0]=='axis':
# order is reversed compared to bvh
self.vertices[0].meta['axis_order'] = parts[1][::-1].lower()
elif parts[0]=='position':
self.vertices[0].meta['offset'] = [float(parts[1]),
float(parts[2]),
float(parts[3])]
elif parts[0]=='orientation':
self.vertices[0].meta['orientation'] = [float(parts[1]),
float(parts[2]),
float(parts[3])]
lin = self.read_line(fid)
return lin
def read_skel(self, fid):
"""Loads an acclaim skeleton format from a file stream."""
lin = self.read_line(fid)
while lin:
if lin[0]==':':
if lin[1:]== 'name':
lin = self.read_line(fid)
self.name = lin
elif lin[1:]=='units':
lin = self.read_units(fid)
elif lin[1:]=='documentation':
lin = self.read_documentation(fid)
elif lin[1:]=='root':
lin = self.read_root(fid)
elif lin[1:]=='bonedata':
lin = self.read_bonedata(fid)
elif lin[1:]=='hierarchy':
lin = self.read_hierarchy(fid)
elif lin[1:8]=='version':
lin = self.read_line(fid)
continue
else:
if not lin:
self.finalize()
return
lin = self.read_line(fid)
else:
raise ValueError('Unrecognised file format')
self.finalize()
def read_units(self, fid):
"""Read units from an acclaim skeleton file stream."""
lin = self.read_line(fid)
while lin[0] != ':':
parts = lin.split()
if parts[0]=='mass':
self.mass = float(parts[1])
elif parts[0]=='length':
self.length = float(parts[1])
elif parts[0]=='angle':
self.angle = parts[1]
lin = self.read_line(fid)
return lin
def resolve_indices(self, index, start_val):
"""Get indices for the skeleton from the channels when loading in channel data."""
channels = self.vertices[index].meta['channels']
base_channel = start_val
rot_ind = -np.ones(3, dtype=int)
pos_ind = -np.ones(3, dtype=int)
for i in range(len(channels)):
if channels[i]== 'Xrotation':
rot_ind[0] = base_channel + i
elif channels[i]=='Yrotation':
rot_ind[1] = base_channel + i
elif channels[i]=='Zrotation':
rot_ind[2] = base_channel + i
elif channels[i]=='Xposition':
pos_ind[0] = base_channel + i
elif channels[i]=='Yposition':
pos_ind[1] = base_channel + i
elif channels[i]=='Zposition':
pos_ind[2] = base_channel + i
self.vertices[index].meta['rot_ind'] = list(rot_ind)
self.vertices[index].meta['pos_ind'] = list(pos_ind)
def set_rotation_matrices(self):
"""Set the meta information at each vertex to contain the correct matrices C and Cinv as prescribed by the rotations and rotation orders."""
for i in range(len(self.vertices)):
self.vertices[i].meta['C'] = rotation_matrix(self.vertices[i].meta['axis'][0],
self.vertices[i].meta['axis'][1],
self.vertices[i].meta['axis'][2],
self.vertices[i].meta['axis_order'],
degrees=True)
# Todo: invert this by applying angle operations in reverse order
self.vertices[i].meta['Cinv'] = np.linalg.inv(self.vertices[i].meta['C'])
# Utilities for loading in x,y,z data.
def load_text_data(dataset, directory, centre=True):
"""Load in a data set of marker points from the Ohio State University C3D motion capture files (http://accad.osu.edu/research/mocap/mocap_data.htm)."""
points, point_names = parse_text(os.path.join(directory, dataset + '.txt'))[0:2]
# Remove markers where there is a NaN
present_index = [i for i in range(points[0].shape[1]) if not (np.any(np.isnan(points[0][:, i])) or np.any(np.isnan(points[0][:, i])) or np.any(np.isnan(points[0][:, i])))]
point_names = point_names[present_index]
for i in range(3):
points[i] = points[i][:, present_index]
if centre:
points[i] = (points[i].T - points[i].mean(axis=1)).T
# Concatanate the X, Y and Z markers together
Y = np.concatenate((points[0], points[1], points[2]), axis=1)
Y = Y/400.
connect = read_connections(os.path.join(directory, 'connections.txt'), point_names)
return Y, connect
def parse_text(file_name):
"""Parse data from Ohio State University text mocap files (http://accad.osu.edu/research/mocap/mocap_data.htm)."""
# Read the header
fid = open(file_name, 'r')
point_names = np.array(fid.readline().split())[2:-1:3]
fid.close()
for i in range(len(point_names)):
point_names[i] = point_names[i][0:-2]
# Read the matrix data
S = np.loadtxt(file_name, skiprows=1)
field = np.uint(S[:, 0])
times = S[:, 1]
S = S[:, 2:]
# Set the -9999.99 markers to be not present
S[S==-9999.99] = np.NaN
# Store x, y and z in different arrays
points = []
points.append(S[:, 0:-1:3])
points.append(S[:, 1:-1:3])
points.append(S[:, 2:-1:3])
return points, point_names, times
def read_connections(file_name, point_names):
"""Read a file detailing which markers should be connected to which for motion capture data."""
connections = []
fid = open(file_name, 'r')
line=fid.readline()
while(line):
connections.append(np.array(line.split(',')))
connections[-1][0] = connections[-1][0].strip()
connections[-1][1] = connections[-1][1].strip()
line = fid.readline()
connect = np.zeros((len(point_names), len(point_names)),dtype=bool)
for i in range(len(point_names)):
for j in range(len(point_names)):
for k in range(len(connections)):
if connections[k][0] == point_names[i] and connections[k][1] == point_names[j]:
connect[i,j]=True
connect[j,i]=True
break
return connect
skel = acclaim_skeleton()
| bsd-3-clause |
JoseBlanca/franklin | test/utils/seqio_utils_test.py | 1 | 3293 | '''
Created on 2009 uzt 28
@author: peio
'''
# Copyright 2009 Jose Blanca, Peio Ziarsolo, COMAV-Univ. Politecnica Valencia
# This file is part of franklin.
# franklin is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# franklin is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with franklin. If not, see <http://www.gnu.org/licenses/>.
import unittest
import StringIO, tempfile, os
from franklin.utils.seqio_utils import cat, seqio
from franklin.utils.misc_utils import TEST_DATA_DIR
class TestSeqio(unittest.TestCase):
'It test the converter'
@staticmethod
def test_fastq_to_fasta_qual():
'It tests the conversion from fastq to fasta'
fcontent = '@seq1\n'
fcontent += 'CCCT\n'
fcontent += '+\n'
fcontent += ';;3;\n'
fcontent += '@SRR001666.1\n'
fcontent += 'GTTGC\n'
fcontent += '+\n'
fcontent += ';;;;;\n'
fhand = StringIO.StringIO(fcontent)
out_seq_fhand = tempfile.NamedTemporaryFile(suffix='.fasta')
out_qual_fhand = tempfile.NamedTemporaryFile(suffix='.qual')
seqio(in_seq_fhand=fhand, in_format='fastq',
out_seq_fhand=out_seq_fhand, out_qual_fhand=out_qual_fhand,
out_format='fasta')
result = '>seq1\nCCCT\n>SRR001666.1\nGTTGC\n'
assert open(out_seq_fhand.name).read() == result
qual = '>seq1\n26 26 18 26\n>SRR001666.1\n26 26 26 26 26\n'
assert open(out_qual_fhand.name).read() == qual
@staticmethod
def test_fastq_to_fastq_solexa():
'It tests the conversion using the Biopython convert function'
fcontent = '@seq1\n'
fcontent += 'CCCT\n'
fcontent += '+\n'
fcontent += ';;3;\n'
fcontent += '@SRR001666.1\n'
fcontent += 'GTTGC\n'
fcontent += '+\n'
fcontent += ';;;;;\n'
fhand = StringIO.StringIO(fcontent)
out_seq_fhand = StringIO.StringIO()
seqio(in_seq_fhand=fhand, in_format='fastq',
out_seq_fhand=out_seq_fhand, out_format='fastq-solexa')
result = '@seq1\nCCCT\n+\nZZRZ\[email protected]\nGTTGC\n+\nZZZZZ\n'
assert out_seq_fhand.getvalue() == result
class TestCat(unittest.TestCase):
'It tests the sequence converter'
@staticmethod
def test_cat():
'It tests the cat function'
inh1 = StringIO.StringIO('>seq1\nACTG\n')
inh2 = StringIO.StringIO('>seq2\nGTCA\n')
outh = StringIO.StringIO()
cat(infiles=[inh1, inh2], outfile=outh)
assert outh.getvalue() == '>seq1\nACTG\n>seq2\nGTCA\n'
#it works also with None Values
outh = StringIO.StringIO()
cat(infiles=[None, None], outfile=outh)
assert outh.getvalue() == ''
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
| agpl-3.0 |
mcfletch/AutobahnPython | examples/twisted/wamp/auth/persona/server.py | 7 | 9085 | ###############################################################################
##
## Copyright (C) 2011-2014 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
import datetime
from autobahn.twisted.wamp import ApplicationSession
class TimeService(ApplicationSession):
"""
A simple time service application component.
"""
def __init__(self, realm = "realm1"):
ApplicationSession.__init__(self)
self._realm = realm
def onConnect(self):
self.join(self._realm)
def onJoin(self, details):
def utcnow():
now = datetime.datetime.utcnow()
return now.strftime("%Y-%m-%dT%H:%M:%SZ")
self.register(utcnow, 'com.timeservice.now')
from twisted.python import log
from autobahn.twisted.websocket import WampWebSocketServerProtocol, WampWebSocketServerFactory
from twisted.internet.defer import Deferred
import json
import urllib
import Cookie
from autobahn.util import newid, utcnow
from autobahn.websocket import http
class ServerProtocol(WampWebSocketServerProtocol):
## authid -> cookie -> set(connection)
def onConnect(self, request):
protocol, headers = WampWebSocketServerProtocol.onConnect(self, request)
## our cookie tracking ID
self._cbtid = None
## see if there already is a cookie set ..
if request.headers.has_key('cookie'):
try:
cookie = Cookie.SimpleCookie()
cookie.load(str(request.headers['cookie']))
except Cookie.CookieError:
pass
else:
if cookie.has_key('cbtid'):
cbtid = cookie['cbtid'].value
if self.factory._cookies.has_key(cbtid):
self._cbtid = cbtid
log.msg("Cookie already set: %s" % self._cbtid)
## if no cookie is set, create a new one ..
if self._cbtid is None:
self._cbtid = newid()
maxAge = 86400
cbtData = {'created': utcnow(),
'authenticated': None,
'maxAge': maxAge,
'connections': set()}
self.factory._cookies[self._cbtid] = cbtData
## do NOT add the "secure" cookie attribute! "secure" refers to the
## scheme of the Web page that triggered the WS, not WS itself!!
##
headers['Set-Cookie'] = 'cbtid=%s;max-age=%d' % (self._cbtid, maxAge)
log.msg("Setting new cookie: %s" % self._cbtid)
## add this WebSocket connection to the set of connections
## associated with the same cookie
self.factory._cookies[self._cbtid]['connections'].add(self)
self._authenticated = self.factory._cookies[self._cbtid]['authenticated']
## accept the WebSocket connection, speaking subprotocol `protocol`
## and setting HTTP headers `headers`
return (protocol, headers)
from autobahn.twisted.wamp import RouterSession
from autobahn.wamp import types
class MyRouterSession(RouterSession):
def onOpen(self, transport):
RouterSession.onOpen(self, transport)
print "transport authenticated: {}".format(self._transport._authenticated)
def onHello(self, realm, details):
print "onHello: {} {}".format(realm, details)
if self._transport._authenticated is not None:
return types.Accept(authid = self._transport._authenticated)
else:
return types.Challenge("mozilla-persona")
return accept
def onLeave(self, details):
if details.reason == "wamp.close.logout":
cookie = self._transport.factory._cookies[self._transport._cbtid]
cookie['authenticated'] = None
for proto in cookie['connections']:
proto.sendClose()
def onAuthenticate(self, signature, extra):
print "onAuthenticate: {} {}".format(signature, extra)
dres = Deferred()
## The client did it's Mozilla Persona authentication thing
## and now wants to verify the authentication and login.
assertion = signature
audience = 'http://127.0.0.1:8080/'
## To verify the authentication, we need to send a HTTP/POST
## to Mozilla Persona. When successful, Persona will send us
## back something like:
# {
# "audience": "http://192.168.1.130:8080/",
# "expires": 1393681951257,
# "issuer": "gmail.login.persona.org",
# "email": "[email protected]",
# "status": "okay"
# }
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
body = urllib.urlencode({'audience': audience, 'assertion': assertion})
from twisted.web.client import getPage
d = getPage(url = "https://verifier.login.persona.org/verify",
method = 'POST',
postdata = body,
headers = headers)
log.msg("Authentication request sent.")
def done(res):
res = json.loads(res)
try:
if res['status'] == 'okay':
## Mozilla Persona successfully authenticated the user
## remember the user's email address. this marks the cookie as
## authenticated
self._transport.factory._cookies[self._transport._cbtid]['authenticated'] = res['email']
log.msg("Authenticated user {}".format(res['email']))
dres.callback(types.Accept(authid = res['email']))
else:
log.msg("Authentication failed!")
dres.callback(types.Deny())
except Exception as e:
print "ERRR", e
def error(err):
log.msg("Authentication request failed: {}".format(err.value))
dres.callback(types.Deny())
d.addCallbacks(done, error)
return dres
if __name__ == '__main__':
import sys, argparse
from twisted.python import log
from twisted.internet.endpoints import serverFromString
## parse command line arguments
##
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--debug", action = "store_true",
help = "Enable debug output.")
parser.add_argument("-c", "--component", type = str, default = None,
help = "Start WAMP-WebSocket server with this application component, e.g. 'timeservice.TimeServiceBackend', or None.")
parser.add_argument("--websocket", type = str, default = "tcp:8080",
help = 'WebSocket server Twisted endpoint descriptor, e.g. "tcp:9000" or "unix:/tmp/mywebsocket".')
parser.add_argument("--wsurl", type = str, default = "ws://localhost:8080",
help = 'WebSocket URL (must suit the endpoint), e.g. "ws://localhost:9000".')
args = parser.parse_args()
## start Twisted logging to stdout
##
if True or args.debug:
log.startLogging(sys.stdout)
## we use an Autobahn utility to install the "best" available Twisted reactor
##
from autobahn.twisted.choosereactor import install_reactor
reactor = install_reactor()
if args.debug:
print("Running on reactor {}".format(reactor))
## create a WAMP router factory
##
from autobahn.wamp.router import RouterFactory
router_factory = RouterFactory()
## create a WAMP router session factory
##
from autobahn.twisted.wamp import RouterSessionFactory
session_factory = RouterSessionFactory(router_factory)
session_factory.session = MyRouterSession
## start an embedded application component ..
##
session_factory.add(TimeService())
## create a WAMP-over-WebSocket transport server factory
##
from autobahn.twisted.websocket import WampWebSocketServerFactory
transport_factory = WampWebSocketServerFactory(session_factory, args.wsurl, debug_wamp = args.debug)
transport_factory.protocol = ServerProtocol
transport_factory._cookies = {}
transport_factory.setProtocolOptions(failByDrop = False)
from twisted.web.server import Site
from twisted.web.static import File
from autobahn.twisted.resource import WebSocketResource
## we serve static files under "/" ..
root = File(".")
## .. and our WebSocket server under "/ws"
resource = WebSocketResource(transport_factory)
root.putChild("ws", resource)
## run both under one Twisted Web Site
site = Site(root)
## start the WebSocket server from an endpoint
##
server = serverFromString(reactor, args.websocket)
server.listen(site)
## now enter the Twisted reactor loop
##
reactor.run()
| apache-2.0 |
JamesShaeffer/QGIS | python/plugins/db_manager/dlg_export_vector.py | 62 | 8183 | # -*- coding: utf-8 -*-
"""
/***************************************************************************
Name : DB Manager
Description : Database manager plugin for QGIS
Date : Oct 13, 2011
copyright : (C) 2011 by Giuseppe Sucameli
email : [email protected]
The content of this file is based on
- PG_Manager by Martin Dobias (GPLv2 license)
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from builtins import str
from qgis.PyQt.QtCore import Qt, QFileInfo
from qgis.PyQt.QtWidgets import QDialog, QFileDialog, QMessageBox, QApplication
from qgis.PyQt.QtGui import QCursor
from qgis.core import (QgsVectorFileWriter,
QgsVectorDataProvider,
QgsCoordinateReferenceSystem,
QgsVectorLayerExporter,
QgsSettings)
from qgis.utils import OverrideCursor
from .ui.ui_DlgExportVector import Ui_DbManagerDlgExportVector as Ui_Dialog
class DlgExportVector(QDialog, Ui_Dialog):
def __init__(self, inLayer, inDb, parent=None):
QDialog.__init__(self, parent)
self.inLayer = inLayer
self.db = inDb
self.setupUi(self)
vectorFilterName = "lastVectorFileFilter" # "lastRasterFileFilter"
self.lastUsedVectorFilterSettingsKey = u"/UI/{0}".format(vectorFilterName)
self.lastUsedVectorDirSettingsKey = u"/UI/{0}Dir".format(vectorFilterName)
# update UI
self.setupWorkingMode()
self.populateFileFilters()
self.populateEncodings()
def setupWorkingMode(self):
# set default values
inCrs = self.inLayer.crs()
srid = inCrs.postgisSrid() if inCrs.isValid() else 4236
self.editSourceSrid.setText("%s" % srid)
self.editTargetSrid.setText("%s" % srid)
self.btnChooseOutputFile.clicked.connect(self.chooseOutputFile)
self.checkSupports()
def checkSupports(self):
""" update options available for the current input layer """
allowSpatial = self.db.connector.hasSpatialSupport()
hasGeomType = self.inLayer and self.inLayer.isSpatial()
self.chkSourceSrid.setEnabled(allowSpatial and hasGeomType)
self.chkTargetSrid.setEnabled(allowSpatial and hasGeomType)
# self.chkSpatialIndex.setEnabled(allowSpatial and hasGeomType)
def chooseOutputFile(self):
# get last used dir
settings = QgsSettings()
lastUsedDir = settings.value(self.lastUsedVectorDirSettingsKey, ".")
# get selected filter
selected_driver = self.cboFileFormat.currentData()
selected_filter = QgsVectorFileWriter.filterForDriver(selected_driver)
# ask for a filename
filename, filter = QFileDialog.getSaveFileName(self, self.tr("Choose where to save the file"), lastUsedDir,
selected_filter)
if filename == "":
return
ext = selected_filter[selected_filter.find('.'):]
ext = ext[:ext.find(' ')]
if not filename.lower().endswith(ext):
filename += ext
# store the last used dir
settings.setValue(self.lastUsedVectorDirSettingsKey, QFileInfo(filename).filePath())
self.editOutputFile.setText(filename)
def populateEncodings(self):
# populate the combo with supported encodings
self.cboEncoding.addItems(QgsVectorDataProvider.availableEncodings())
# set the last used encoding
enc = self.inLayer.dataProvider().encoding()
idx = self.cboEncoding.findText(enc)
if idx < 0:
self.cboEncoding.insertItem(0, enc)
idx = 0
self.cboEncoding.setCurrentIndex(idx)
def populateFileFilters(self):
# populate the combo with supported vector file formats
for driver in QgsVectorFileWriter.ogrDriverList():
self.cboFileFormat.addItem(driver.longName, driver.driverName)
# set the last used filter
settings = QgsSettings()
filt = settings.value(self.lastUsedVectorFilterSettingsKey, "GPKG")
idx = self.cboFileFormat.findText(filt)
if idx < 0:
idx = 0
self.cboFileFormat.setCurrentIndex(idx)
def accept(self):
# sanity checks
if self.editOutputFile.text() == "":
QMessageBox.information(self, self.tr("Export to file"), self.tr("Output file name is required"))
return
if self.chkSourceSrid.isEnabled() and self.chkSourceSrid.isChecked():
try:
sourceSrid = int(self.editSourceSrid.text())
except ValueError:
QMessageBox.information(self, self.tr("Export to file"),
self.tr("Invalid source srid: must be an integer"))
return
if self.chkTargetSrid.isEnabled() and self.chkTargetSrid.isChecked():
try:
targetSrid = int(self.editTargetSrid.text())
except ValueError:
QMessageBox.information(self, self.tr("Export to file"),
self.tr("Invalid target srid: must be an integer"))
return
with OverrideCursor(Qt.WaitCursor):
# store current input layer crs, so I can restore it later
prevInCrs = self.inLayer.crs()
try:
uri = self.editOutputFile.text()
providerName = "ogr"
options = {}
# set the OGR driver will be used
driverName = self.cboFileFormat.currentData()
options['driverName'] = driverName
# set the output file encoding
if self.chkEncoding.isEnabled() and self.chkEncoding.isChecked():
enc = self.cboEncoding.currentText()
options['fileEncoding'] = enc
if self.chkDropTable.isChecked():
options['overwrite'] = True
outCrs = QgsCoordinateReferenceSystem()
if self.chkTargetSrid.isEnabled() and self.chkTargetSrid.isChecked():
targetSrid = int(self.editTargetSrid.text())
outCrs = QgsCoordinateReferenceSystem(targetSrid)
# update input layer crs
if self.chkSourceSrid.isEnabled() and self.chkSourceSrid.isChecked():
sourceSrid = int(self.editSourceSrid.text())
inCrs = QgsCoordinateReferenceSystem(sourceSrid)
self.inLayer.setCrs(inCrs)
# do the export!
ret, errMsg = QgsVectorLayerExporter.exportLayer(self.inLayer, uri, providerName, outCrs,
False, options)
except Exception as e:
ret = -1
errMsg = str(e)
finally:
# restore input layer crs and encoding
self.inLayer.setCrs(prevInCrs)
if ret != 0:
QMessageBox.warning(self, self.tr("Export to file"), self.tr("Error {0}\n{1}").format(ret, errMsg))
return
# create spatial index
# if self.chkSpatialIndex.isEnabled() and self.chkSpatialIndex.isChecked():
# self.db.connector.createSpatialIndex( (schema, table), geom )
QMessageBox.information(self, self.tr("Export to file"), self.tr("Export finished."))
return QDialog.accept(self)
| gpl-2.0 |
mookaka/mywebblog | www/pymonitor.py | 1 | 1762 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'mookaka'
import os, sys, time, subprocess
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
command = ['echo', 'ok']
process = None
def log(s):
print('[Monitor] %s' % s)
class MyFileSystemEventHandler(FileSystemEventHandler):
def __init__(self,fn):
super(MyFileSystemEventHandler, self).__init__()
self.restart = fn
def on_any_event(self, event):
if event.src_path.endswith('.py'):
log('Python source file changed: %s' % event.src_path)
self.restart
def kill_process():
global process
if process:
log('Kill process [%s]...' % process.pid)
process.kill()
process.wait()
log('Process ended with code %s.' % process.returncode)
process = None
def start_process():
global process, command
log('Start process %s...' & ' '.join(command))
process = subprocess.Popen(command, stdin=sys.stdin, stdout=sys.stdout, stderr=sys.stderr)
def restart_process():
kill_process()
start_process()
def start_watch(path, callback):
observer = Observer()
observer.schedule(MyFileSystemEventHandler(restart_process), path, recursive=True)
observer.start()
log('Watching directory %s...' % path)
start_process()
try:
while True:
time.sleep(0.5)
except KeyboardInterrupt:
observer.stop()
observer.join()
if __name__ == '__main__':
argv = sys.argv[1:]
if not argv:
print('Usage: ./pymonitor your-script.py')
exit(0)
if argv[0] != 'python3':
argv.insert(0, 'python3')
command = argv
path = os.path.abspath('.')
start_watch(path, None) | mit |
jpaalasm/pyglet | tests/window/WINDOW_SET_VSYNC.py | 29 | 2009 | #!/usr/bin/env python
'''Test that vsync can be set.
Expected behaviour:
A window will alternate between red and green fill.
- Press "v" to toggle vsync on/off. "Tearing" should only be visible
when vsync is off (as indicated at the terminal).
Not all video drivers support vsync. On Linux, check the output of
`tools/info.py`:
- If GLX_SGI_video_sync extension is present, should work as expected.
- If GLX_MESA_swap_control extension is present, should work as expected.
- If GLX_SGI_swap_control extension is present, vsync can be enabled,
but once enabled, it cannot be switched off (there will be no error
message).
- If none of these extensions are present, vsync is not supported by
your driver, but no error message or warning will be printed.
Close the window or press ESC to end the test.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
import unittest
from pyglet import window
from pyglet.window import key
from pyglet.gl import *
class WINDOW_SET_VSYNC(unittest.TestCase):
colors = [(1, 0, 0, 1), (0, 1, 0, 1)]
color_index = 0
def open_window(self):
return window.Window(200, 200, vsync=False)
def on_key_press(self, symbol, modifiers):
if symbol == key.V:
vsync = not self.w1.vsync
self.w1.set_vsync(vsync)
print 'vsync is %r' % self.w1.vsync
def draw_window(self, window, colour):
window.switch_to()
glClearColor(*colour)
glClear(GL_COLOR_BUFFER_BIT)
window.flip()
def test_open_window(self):
self.w1 = self.open_window()
self.w1.push_handlers(self)
print 'vsync is %r' % self.w1.vsync
while not self.w1.has_exit:
self.color_index = 1 - self.color_index
self.draw_window(self.w1, self.colors[self.color_index])
self.w1.dispatch_events()
self.w1.close()
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
PatKayongo/patkayongo.github.io | node_modules/pygmentize-bundled/vendor/pygments/build-3.3/pygments/lexers/_phpbuiltins.py | 95 | 122088 | # -*- coding: utf-8 -*-
"""
pygments.lexers._phpbuiltins
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This file loads the function names and their modules from the
php webpage and generates itself.
Do not alter the MODULES dict by hand!
WARNING: the generation transfers quite much data over your
internet connection. don't run that at home, use
a server ;-)
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
MODULES = {'.NET': ['dotnet_load'],
'APC': ['apc_add',
'apc_bin_dump',
'apc_bin_dumpfile',
'apc_bin_load',
'apc_bin_loadfile',
'apc_cache_info',
'apc_cas',
'apc_clear_cache',
'apc_compile_file',
'apc_dec',
'apc_define_constants',
'apc_delete_file',
'apc_delete',
'apc_exists',
'apc_fetch',
'apc_inc',
'apc_load_constants',
'apc_sma_info',
'apc_store'],
'APD': ['apd_breakpoint',
'apd_callstack',
'apd_clunk',
'apd_continue',
'apd_croak',
'apd_dump_function_table',
'apd_dump_persistent_resources',
'apd_dump_regular_resources',
'apd_echo',
'apd_get_active_symbols',
'apd_set_pprof_trace',
'apd_set_session_trace_socket',
'apd_set_session_trace',
'apd_set_session',
'override_function',
'rename_function'],
'Aliases and deprecated Mysqli': ['mysqli_bind_param',
'mysqli_bind_result',
'mysqli_client_encoding',
'mysqli_connect',
'mysqli_disable_reads_from_master',
'mysqli_disable_rpl_parse',
'mysqli_enable_reads_from_master',
'mysqli_enable_rpl_parse',
'mysqli_escape_string',
'mysqli_execute',
'mysqli_fetch',
'mysqli_get_metadata',
'mysqli_master_query',
'mysqli_param_count',
'mysqli_report',
'mysqli_rpl_parse_enabled',
'mysqli_rpl_probe',
'mysqli_rpl_query_type',
'mysqli_send_long_data',
'mysqli_send_query',
'mysqli_set_opt',
'mysqli_slave_query'],
'Apache': ['apache_child_terminate',
'apache_get_modules',
'apache_get_version',
'apache_getenv',
'apache_lookup_uri',
'apache_note',
'apache_request_headers',
'apache_reset_timeout',
'apache_response_headers',
'apache_setenv',
'getallheaders',
'virtual'],
'Array': ['array_change_key_case',
'array_chunk',
'array_combine',
'array_count_values',
'array_diff_assoc',
'array_diff_key',
'array_diff_uassoc',
'array_diff_ukey',
'array_diff',
'array_fill_keys',
'array_fill',
'array_filter',
'array_flip',
'array_intersect_assoc',
'array_intersect_key',
'array_intersect_uassoc',
'array_intersect_ukey',
'array_intersect',
'array_key_exists',
'array_keys',
'array_map',
'array_merge_recursive',
'array_merge',
'array_multisort',
'array_pad',
'array_pop',
'array_product',
'array_push',
'array_rand',
'array_reduce',
'array_replace_recursive',
'array_replace',
'array_reverse',
'array_search',
'array_shift',
'array_slice',
'array_splice',
'array_sum',
'array_udiff_assoc',
'array_udiff_uassoc',
'array_udiff',
'array_uintersect_assoc',
'array_uintersect_uassoc',
'array_uintersect',
'array_unique',
'array_unshift',
'array_values',
'array_walk_recursive',
'array_walk',
'array',
'arsort',
'asort',
'compact',
'count',
'current',
'each',
'end',
'extract',
'in_array',
'key',
'krsort',
'ksort',
'list',
'natcasesort',
'natsort',
'next',
'pos',
'prev',
'range',
'reset',
'rsort',
'shuffle',
'sizeof',
'sort',
'uasort',
'uksort',
'usort'],
'BBCode': ['bbcode_add_element',
'bbcode_add_smiley',
'bbcode_create',
'bbcode_destroy',
'bbcode_parse',
'bbcode_set_arg_parser',
'bbcode_set_flags'],
'BC Math': ['bcadd',
'bccomp',
'bcdiv',
'bcmod',
'bcmul',
'bcpow',
'bcpowmod',
'bcscale',
'bcsqrt',
'bcsub'],
'Bzip2': ['bzclose',
'bzcompress',
'bzdecompress',
'bzerrno',
'bzerror',
'bzerrstr',
'bzflush',
'bzopen',
'bzread',
'bzwrite'],
'COM': ['com_addref',
'com_create_guid',
'com_event_sink',
'com_get_active_object',
'com_get',
'com_invoke',
'com_isenum',
'com_load_typelib',
'com_load',
'com_message_pump',
'com_print_typeinfo',
'com_propget',
'com_propput',
'com_propset',
'com_release',
'com_set',
'variant_abs',
'variant_add',
'variant_and',
'variant_cast',
'variant_cat',
'variant_cmp',
'variant_date_from_timestamp',
'variant_date_to_timestamp',
'variant_div',
'variant_eqv',
'variant_fix',
'variant_get_type',
'variant_idiv',
'variant_imp',
'variant_int',
'variant_mod',
'variant_mul',
'variant_neg',
'variant_not',
'variant_or',
'variant_pow',
'variant_round',
'variant_set_type',
'variant_set',
'variant_sub',
'variant_xor'],
'CUBRID': ['cubrid_affected_rows',
'cubrid_bind',
'cubrid_close_prepare',
'cubrid_close_request',
'cubrid_col_get',
'cubrid_col_size',
'cubrid_column_names',
'cubrid_column_types',
'cubrid_commit',
'cubrid_connect_with_url',
'cubrid_connect',
'cubrid_current_oid',
'cubrid_disconnect',
'cubrid_drop',
'cubrid_error_code_facility',
'cubrid_error_code',
'cubrid_error_msg',
'cubrid_execute',
'cubrid_fetch',
'cubrid_free_result',
'cubrid_get_charset',
'cubrid_get_class_name',
'cubrid_get_client_info',
'cubrid_get_db_parameter',
'cubrid_get_server_info',
'cubrid_get',
'cubrid_insert_id',
'cubrid_is_instance',
'cubrid_lob_close',
'cubrid_lob_export',
'cubrid_lob_get',
'cubrid_lob_send',
'cubrid_lob_size',
'cubrid_lock_read',
'cubrid_lock_write',
'cubrid_move_cursor',
'cubrid_num_cols',
'cubrid_num_rows',
'cubrid_prepare',
'cubrid_put',
'cubrid_rollback',
'cubrid_schema',
'cubrid_seq_drop',
'cubrid_seq_insert',
'cubrid_seq_put',
'cubrid_set_add',
'cubrid_set_drop',
'cubrid_version'],
'Cairo': ['cairo_create',
'cairo_font_face_get_type',
'cairo_font_face_status',
'cairo_font_options_create',
'cairo_font_options_equal',
'cairo_font_options_get_antialias',
'cairo_font_options_get_hint_metrics',
'cairo_font_options_get_hint_style',
'cairo_font_options_get_subpixel_order',
'cairo_font_options_hash',
'cairo_font_options_merge',
'cairo_font_options_set_antialias',
'cairo_font_options_set_hint_metrics',
'cairo_font_options_set_hint_style',
'cairo_font_options_set_subpixel_order',
'cairo_font_options_status',
'cairo_format_stride_for_width',
'cairo_image_surface_create_for_data',
'cairo_image_surface_create_from_png',
'cairo_image_surface_create',
'cairo_image_surface_get_data',
'cairo_image_surface_get_format',
'cairo_image_surface_get_height',
'cairo_image_surface_get_stride',
'cairo_image_surface_get_width',
'cairo_matrix_create_scale',
'cairo_matrix_create_translate',
'cairo_matrix_invert',
'cairo_matrix_multiply',
'cairo_matrix_rotate',
'cairo_matrix_transform_distance',
'cairo_matrix_transform_point',
'cairo_matrix_translate',
'cairo_pattern_add_color_stop_rgb',
'cairo_pattern_add_color_stop_rgba',
'cairo_pattern_create_for_surface',
'cairo_pattern_create_linear',
'cairo_pattern_create_radial',
'cairo_pattern_create_rgb',
'cairo_pattern_create_rgba',
'cairo_pattern_get_color_stop_count',
'cairo_pattern_get_color_stop_rgba',
'cairo_pattern_get_extend',
'cairo_pattern_get_filter',
'cairo_pattern_get_linear_points',
'cairo_pattern_get_matrix',
'cairo_pattern_get_radial_circles',
'cairo_pattern_get_rgba',
'cairo_pattern_get_surface',
'cairo_pattern_get_type',
'cairo_pattern_set_extend',
'cairo_pattern_set_filter',
'cairo_pattern_set_matrix',
'cairo_pattern_status',
'cairo_pdf_surface_create',
'cairo_pdf_surface_set_size',
'cairo_ps_get_levels',
'cairo_ps_level_to_string',
'cairo_ps_surface_create',
'cairo_ps_surface_dsc_begin_page_setup',
'cairo_ps_surface_dsc_begin_setup',
'cairo_ps_surface_dsc_comment',
'cairo_ps_surface_get_eps',
'cairo_ps_surface_restrict_to_level',
'cairo_ps_surface_set_eps',
'cairo_ps_surface_set_size',
'cairo_scaled_font_create',
'cairo_scaled_font_extents',
'cairo_scaled_font_get_ctm',
'cairo_scaled_font_get_font_face',
'cairo_scaled_font_get_font_matrix',
'cairo_scaled_font_get_font_options',
'cairo_scaled_font_get_scale_matrix',
'cairo_scaled_font_get_type',
'cairo_scaled_font_glyph_extents',
'cairo_scaled_font_status',
'cairo_scaled_font_text_extents',
'cairo_surface_copy_page',
'cairo_surface_create_similar',
'cairo_surface_finish',
'cairo_surface_flush',
'cairo_surface_get_content',
'cairo_surface_get_device_offset',
'cairo_surface_get_font_options',
'cairo_surface_get_type',
'cairo_surface_mark_dirty_rectangle',
'cairo_surface_mark_dirty',
'cairo_surface_set_device_offset',
'cairo_surface_set_fallback_resolution',
'cairo_surface_show_page',
'cairo_surface_status',
'cairo_surface_write_to_png',
'cairo_svg_surface_create',
'cairo_svg_surface_restrict_to_version',
'cairo_svg_version_to_string'],
'Calendar': ['cal_days_in_month',
'cal_from_jd',
'cal_info',
'cal_to_jd',
'easter_date',
'easter_days',
'FrenchToJD',
'GregorianToJD',
'JDDayOfWeek',
'JDMonthName',
'JDToFrench',
'JDToGregorian',
'jdtojewish',
'JDToJulian',
'jdtounix',
'JewishToJD',
'JulianToJD',
'unixtojd'],
'Classes/Object': ['call_user_method_array',
'call_user_method',
'class_alias',
'class_exists',
'get_called_class',
'get_class_methods',
'get_class_vars',
'get_class',
'get_declared_classes',
'get_declared_interfaces',
'get_object_vars',
'get_parent_class',
'interface_exists',
'is_a',
'is_subclass_of',
'method_exists',
'property_exists'],
'Classkit': ['classkit_import',
'classkit_method_add',
'classkit_method_copy',
'classkit_method_redefine',
'classkit_method_remove',
'classkit_method_rename'],
'Crack': ['crack_check',
'crack_closedict',
'crack_getlastmessage',
'crack_opendict'],
'Ctype': ['ctype_alnum',
'ctype_alpha',
'ctype_cntrl',
'ctype_digit',
'ctype_graph',
'ctype_lower',
'ctype_print',
'ctype_punct'],
'Cyrus': ['cyrus_authenticate',
'cyrus_bind',
'cyrus_close',
'cyrus_connect',
'cyrus_query',
'cyrus_unbind'],
'DB++': ['dbplus_add',
'dbplus_aql',
'dbplus_chdir',
'dbplus_close',
'dbplus_curr',
'dbplus_errcode',
'dbplus_errno',
'dbplus_find',
'dbplus_first',
'dbplus_flush',
'dbplus_freealllocks',
'dbplus_freelock',
'dbplus_freerlocks',
'dbplus_getlock',
'dbplus_getunique',
'dbplus_info',
'dbplus_last',
'dbplus_lockrel',
'dbplus_next',
'dbplus_open',
'dbplus_prev',
'dbplus_rchperm',
'dbplus_rcreate',
'dbplus_rcrtexact',
'dbplus_rcrtlike',
'dbplus_resolve',
'dbplus_restorepos',
'dbplus_rkeys',
'dbplus_ropen',
'dbplus_rquery',
'dbplus_rrename',
'dbplus_rsecindex',
'dbplus_runlink',
'dbplus_rzap',
'dbplus_savepos',
'dbplus_setindex',
'dbplus_setindexbynumber',
'dbplus_sql',
'dbplus_tcl',
'dbplus_tremove',
'dbplus_undo',
'dbplus_undoprepare',
'dbplus_unlockrel',
'dbplus_unselect',
'dbplus_update',
'dbplus_xlockrel',
'dbplus_xunlockrel'],
'DBA': ['dba_close',
'dba_delete',
'dba_exists',
'dba_fetch',
'dba_firstkey',
'dba_handlers',
'dba_insert',
'dba_key_split',
'dba_list',
'dba_nextkey',
'dba_open',
'dba_optimize',
'dba_popen',
'dba_replace',
'dba_sync'],
'DOM': ['dom_import_simplexml'],
'DOM XML (PHP 4)': ['domxml_new_doc',
'domxml_open_file',
'domxml_open_mem',
'domxml_version',
'domxml_xmltree',
'domxml_xslt_stylesheet_doc',
'domxml_xslt_stylesheet_file',
'domxml_xslt_stylesheet',
'domxml_xslt_version',
'xpath_eval_expression',
'xpath_eval',
'xpath_new_context',
'xpath_register_ns_auto',
'xpath_register_ns',
'xptr_eval',
'xptr_new_context'],
'Date/Time': ['checkdate',
'date_add',
'date_create_from_format',
'date_create',
'date_date_set',
'date_default_timezone_get',
'date_default_timezone_set',
'date_diff',
'date_format',
'date_get_last_errors',
'date_interval_create_from_date_string',
'date_interval_format',
'date_isodate_set',
'date_modify',
'date_offset_get',
'date_parse_from_format',
'date_parse',
'date_sub',
'date_sun_info',
'date_sunrise',
'date_sunset',
'date_time_set',
'date_timestamp_get',
'date_timestamp_set',
'date_timezone_get',
'date_timezone_set',
'date',
'getdate',
'gettimeofday',
'gmdate',
'gmmktime',
'gmstrftime',
'idate',
'localtime',
'microtime',
'mktime',
'strftime',
'strptime',
'strtotime',
'time',
'timezone_abbreviations_list',
'timezone_identifiers_list',
'timezone_location_get',
'timezone_name_from_abbr',
'timezone_name_get',
'timezone_offset_get',
'timezone_open',
'timezone_transitions_get',
'timezone_version_get'],
'Direct IO': ['dio_close', 'dio_fcntl', 'dio_open'],
'Directory': ['chdir',
'chroot',
'closedir',
'getcwd',
'opendir',
'readdir',
'rewinddir',
'scandir'],
'Enchant': ['enchant_broker_describe',
'enchant_broker_dict_exists',
'enchant_broker_free_dict',
'enchant_broker_free',
'enchant_broker_get_error',
'enchant_broker_init',
'enchant_broker_list_dicts',
'enchant_broker_request_dict',
'enchant_broker_request_pwl_dict',
'enchant_broker_set_ordering',
'enchant_dict_add_to_personal',
'enchant_dict_add_to_session',
'enchant_dict_check',
'enchant_dict_describe',
'enchant_dict_get_error',
'enchant_dict_is_in_session',
'enchant_dict_quick_check',
'enchant_dict_store_replacement',
'enchant_dict_suggest'],
'Error Handling': ['debug_backtrace',
'debug_print_backtrace',
'error_get_last',
'error_log',
'error_reporting',
'restore_error_handler',
'restore_exception_handler',
'set_error_handler',
'set_exception_handler',
'trigger_error',
'user_error'],
'Exif': ['exif_imagetype',
'exif_read_data',
'exif_tagname',
'exif_thumbnail',
'read_exif_data'],
'Expect': ['expect_expectl'],
'FAM': ['fam_cancel_monitor',
'fam_close',
'fam_monitor_collection',
'fam_monitor_directory',
'fam_monitor_file',
'fam_next_event',
'fam_open',
'fam_pending',
'fam_resume_monitor',
'fam_suspend_monitor'],
'FDF': ['fdf_add_doc_javascript',
'fdf_add_template',
'fdf_close',
'fdf_create',
'fdf_enum_values',
'fdf_errno',
'fdf_error',
'fdf_get_ap',
'fdf_get_attachment',
'fdf_get_encoding',
'fdf_get_file',
'fdf_get_flags',
'fdf_get_opt',
'fdf_get_status',
'fdf_get_value',
'fdf_get_version',
'fdf_header',
'fdf_next_field_name',
'fdf_open_string',
'fdf_open',
'fdf_remove_item',
'fdf_save_string',
'fdf_save',
'fdf_set_ap',
'fdf_set_encoding',
'fdf_set_file',
'fdf_set_flags',
'fdf_set_javascript_action',
'fdf_set_on_import_javascript',
'fdf_set_opt',
'fdf_set_status',
'fdf_set_submit_form_action',
'fdf_set_target_frame',
'fdf_set_value',
'fdf_set_version'],
'FTP': ['ftp_alloc',
'ftp_cdup',
'ftp_chdir',
'ftp_chmod',
'ftp_close',
'ftp_connect',
'ftp_delete',
'ftp_exec',
'ftp_fget',
'ftp_fput',
'ftp_get_option',
'ftp_get',
'ftp_login',
'ftp_mdtm',
'ftp_mkdir',
'ftp_nb_continue',
'ftp_nb_fget',
'ftp_nb_fput',
'ftp_nb_get',
'ftp_nb_put',
'ftp_nlist',
'ftp_pasv',
'ftp_put',
'ftp_pwd',
'ftp_quit',
'ftp_raw',
'ftp_rawlist',
'ftp_rename',
'ftp_rmdir',
'ftp_set_option',
'ftp_site',
'ftp_size',
'ftp_ssl_connect',
'ftp_systype'],
'Fileinfo': ['finfo_buffer',
'finfo_close',
'finfo_file',
'finfo_open',
'finfo_set_flags',
'mime_content_type'],
'Filesystem': ['basename',
'chgrp',
'chmod',
'chown',
'clearstatcache',
'copy',
'dirname',
'disk_free_space',
'disk_total_space',
'diskfreespace',
'fclose',
'feof',
'fflush',
'fgetc',
'fgetcsv',
'fgets',
'fgetss',
'file_exists',
'file_get_contents',
'file_put_contents',
'file',
'fileatime',
'filectime',
'filegroup',
'fileinode',
'filemtime',
'fileowner',
'fileperms',
'filesize',
'filetype',
'flock',
'fnmatch',
'fopen',
'fpassthru',
'fputcsv',
'fputs',
'fread',
'fscanf',
'fseek',
'fstat',
'ftell',
'ftruncate',
'fwrite',
'glob',
'is_dir',
'is_executable',
'is_file',
'is_link',
'is_readable',
'is_uploaded_file',
'is_writable',
'is_writeable',
'lchgrp',
'lchown',
'link',
'linkinfo',
'lstat',
'mkdir',
'move_uploaded_file',
'parse_ini_file',
'parse_ini_string',
'pathinfo',
'pclose',
'popen',
'readfile',
'readlink',
'realpath_cache_get',
'realpath_cache_size',
'realpath',
'rename',
'rewind',
'rmdir',
'set_file_buffer',
'stat',
'symlink',
'tempnam',
'tmpfile',
'touch',
'umask',
'unlink'],
'Filter': ['filter_has_var',
'filter_id',
'filter_input_array',
'filter_input',
'filter_list',
'filter_var_array',
'filter_var'],
'Firebird/InterBase': ['ibase_add_user',
'ibase_affected_rows',
'ibase_backup',
'ibase_blob_add',
'ibase_blob_cancel',
'ibase_blob_close',
'ibase_blob_create',
'ibase_blob_echo',
'ibase_blob_get',
'ibase_blob_import',
'ibase_blob_info',
'ibase_blob_open',
'ibase_close',
'ibase_commit_ret',
'ibase_commit',
'ibase_connect',
'ibase_db_info',
'ibase_delete_user',
'ibase_drop_db',
'ibase_errcode',
'ibase_errmsg',
'ibase_execute',
'ibase_fetch_assoc',
'ibase_fetch_object',
'ibase_fetch_row',
'ibase_field_info',
'ibase_free_event_handler',
'ibase_free_query',
'ibase_free_result',
'ibase_gen_id',
'ibase_maintain_db',
'ibase_modify_user',
'ibase_name_result',
'ibase_num_fields',
'ibase_num_params',
'ibase_param_info',
'ibase_pconnect',
'ibase_prepare',
'ibase_query',
'ibase_restore',
'ibase_rollback_ret',
'ibase_rollback',
'ibase_server_info',
'ibase_service_attach',
'ibase_service_detach',
'ibase_set_event_handler',
'ibase_timefmt',
'ibase_trans',
'ibase_wait_event'],
'FriBiDi': ['fribidi_log2vis'],
'FrontBase': ['fbsql_affected_rows',
'fbsql_autocommit',
'fbsql_blob_size',
'fbsql_change_user',
'fbsql_clob_size',
'fbsql_close',
'fbsql_commit',
'fbsql_connect',
'fbsql_create_blob',
'fbsql_create_clob',
'fbsql_create_db',
'fbsql_data_seek',
'fbsql_database_password',
'fbsql_database',
'fbsql_db_query',
'fbsql_db_status',
'fbsql_drop_db',
'fbsql_errno',
'fbsql_error',
'fbsql_fetch_array',
'fbsql_fetch_assoc',
'fbsql_fetch_field',
'fbsql_fetch_lengths',
'fbsql_fetch_object',
'fbsql_fetch_row',
'fbsql_field_flags',
'fbsql_field_len',
'fbsql_field_name',
'fbsql_field_seek',
'fbsql_field_table',
'fbsql_field_type',
'fbsql_free_result',
'fbsql_get_autostart_info',
'fbsql_hostname',
'fbsql_insert_id',
'fbsql_list_dbs',
'fbsql_list_fields',
'fbsql_list_tables',
'fbsql_next_result',
'fbsql_num_fields',
'fbsql_num_rows',
'fbsql_password',
'fbsql_pconnect',
'fbsql_query',
'fbsql_read_blob',
'fbsql_read_clob',
'fbsql_result',
'fbsql_rollback',
'fbsql_rows_fetched',
'fbsql_select_db',
'fbsql_set_characterset',
'fbsql_set_lob_mode',
'fbsql_set_password',
'fbsql_set_transaction',
'fbsql_start_db',
'fbsql_stop_db',
'fbsql_table_name',
'fbsql_tablename',
'fbsql_username',
'fbsql_warnings'],
'Function handling': ['call_user_func_array',
'call_user_func',
'create_function',
'forward_static_call_array',
'forward_static_call',
'func_get_arg',
'func_get_args',
'func_num_args',
'function_exists',
'get_defined_functions',
'register_shutdown_function',
'register_tick_function',
'unregister_tick_function'],
'GD and Image': ['gd_info',
'getimagesize',
'image_type_to_extension',
'image_type_to_mime_type'],
'GMP': ['gmp_abs',
'gmp_add',
'gmp_and',
'gmp_clrbit',
'gmp_cmp',
'gmp_com',
'gmp_div_q',
'gmp_div_qr',
'gmp_div_r',
'gmp_div',
'gmp_divexact',
'gmp_fact',
'gmp_gcd',
'gmp_gcdext',
'gmp_hamdist',
'gmp_init',
'gmp_intval',
'gmp_invert',
'gmp_jacobi',
'gmp_legendre',
'gmp_mod',
'gmp_mul',
'gmp_neg',
'gmp_nextprime',
'gmp_or',
'gmp_perfect_square',
'gmp_popcount',
'gmp_pow',
'gmp_powm',
'gmp_prob_prime',
'gmp_random',
'gmp_scan0',
'gmp_scan1',
'gmp_setbit',
'gmp_sign',
'gmp_sqrt',
'gmp_sqrtrem',
'gmp_strval',
'gmp_sub',
'gmp_testbit',
'gmp_xor'],
'GeoIP': ['geoip_continent_code_by_name',
'geoip_country_code_by_name',
'geoip_country_code3_by_name',
'geoip_country_name_by_name',
'geoip_database_info',
'geoip_db_avail',
'geoip_db_filename',
'geoip_db_get_all_info',
'geoip_id_by_name',
'geoip_isp_by_name',
'geoip_org_by_name',
'geoip_record_by_name',
'geoip_region_by_name',
'geoip_region_name_by_code',
'geoip_time_zone_by_country_and_region'],
'Gettext': ['bind_textdomain_codeset',
'bindtextdomain',
'dcgettext',
'dcngettext',
'dgettext',
'dngettext',
'gettext',
'ngettext',
'textdomain'],
'GnuPG': ['gnupg_adddecryptkey',
'gnupg_addencryptkey',
'gnupg_addsignkey',
'gnupg_cleardecryptkeys',
'gnupg_clearencryptkeys',
'gnupg_clearsignkeys',
'gnupg_decrypt',
'gnupg_decryptverify',
'gnupg_encrypt',
'gnupg_encryptsign',
'gnupg_export',
'gnupg_geterror',
'gnupg_getprotocol',
'gnupg_import',
'gnupg_init',
'gnupg_keyinfo',
'gnupg_setarmor',
'gnupg_seterrormode',
'gnupg_setsignmode',
'gnupg_sign',
'gnupg_verify'],
'Gopher': ['gopher_parsedir'],
'Grapheme': ['grapheme_extract',
'grapheme_stripos',
'grapheme_stristr',
'grapheme_strlen',
'grapheme_strpos',
'grapheme_strripos',
'grapheme_strrpos',
'grapheme_strstr',
'grapheme_substr'],
'Gupnp': ['gupnp_context_get_host_ip',
'gupnp_context_get_port',
'gupnp_context_get_subscription_timeout',
'gupnp_context_host_path',
'gupnp_context_new',
'gupnp_context_set_subscription_timeout',
'gupnp_context_timeout_add',
'gupnp_context_unhost_path',
'gupnp_control_point_browse_start',
'gupnp_control_point_browse_stop',
'gupnp_control_point_callback_set',
'gupnp_control_point_new',
'gupnp_device_action_callback_set',
'gupnp_device_info_get_service',
'gupnp_device_info_get',
'gupnp_root_device_get_available',
'gupnp_root_device_get_relative_location',
'gupnp_root_device_new',
'gupnp_root_device_set_available',
'gupnp_root_device_start',
'gupnp_root_device_stop',
'gupnp_service_action_get',
'gupnp_service_action_return_error',
'gupnp_service_action_return',
'gupnp_service_action_set',
'gupnp_service_freeze_notify',
'gupnp_service_info_get_introspection',
'gupnp_service_info_get',
'gupnp_service_introspection_get_state_variable',
'gupnp_service_notify',
'gupnp_service_proxy_action_get',
'gupnp_service_proxy_action_set',
'gupnp_service_proxy_add_notify',
'gupnp_service_proxy_callback_set',
'gupnp_service_proxy_get_subscribed',
'gupnp_service_proxy_remove_notify',
'gupnp_service_proxy_set_subscribed',
'gupnp_service_thaw_notify'],
'HTTP': ['http_cache_etag',
'http_cache_last_modified',
'http_chunked_decode',
'http_deflate',
'http_inflate',
'http_build_cookie',
'http_date',
'http_get_request_body_stream',
'http_get_request_body',
'http_get_request_headers',
'http_match_etag',
'http_match_modified',
'http_match_request_header',
'http_support',
'http_negotiate_charset',
'http_negotiate_content_type',
'http_negotiate_language',
'ob_deflatehandler',
'ob_etaghandler',
'ob_inflatehandler',
'http_parse_cookie',
'http_parse_headers',
'http_parse_message',
'http_parse_params',
'http_persistent_handles_clean',
'http_persistent_handles_count',
'http_persistent_handles_ident',
'http_get',
'http_head',
'http_post_data',
'http_post_fields',
'http_put_data',
'http_put_file',
'http_put_stream',
'http_request_body_encode',
'http_request_method_exists',
'http_request_method_name',
'http_request_method_register',
'http_request_method_unregister',
'http_request',
'http_redirect',
'http_send_content_disposition',
'http_send_content_type',
'http_send_data',
'http_send_file',
'http_send_last_modified',
'http_send_status',
'http_send_stream',
'http_throttle',
'http_build_str',
'http_build_url'],
'Hash': ['hash_algos',
'hash_copy',
'hash_file',
'hash_final',
'hash_hmac_file',
'hash_hmac',
'hash_init',
'hash_update_file',
'hash_update_stream',
'hash_update',
'hash'],
'Hyperwave': ['hw_Array2Objrec',
'hw_changeobject',
'hw_Children',
'hw_ChildrenObj',
'hw_Close',
'hw_Connect',
'hw_connection_info',
'hw_cp',
'hw_Deleteobject',
'hw_DocByAnchor',
'hw_DocByAnchorObj',
'hw_Document_Attributes',
'hw_Document_BodyTag',
'hw_Document_Content',
'hw_Document_SetContent',
'hw_Document_Size',
'hw_dummy',
'hw_EditText',
'hw_Error',
'hw_ErrorMsg',
'hw_Free_Document',
'hw_GetAnchors',
'hw_GetAnchorsObj',
'hw_GetAndLock',
'hw_GetChildColl',
'hw_GetChildCollObj',
'hw_GetChildDocColl',
'hw_GetChildDocCollObj',
'hw_GetObject',
'hw_GetObjectByQuery',
'hw_GetObjectByQueryColl',
'hw_GetObjectByQueryCollObj',
'hw_GetObjectByQueryObj',
'hw_GetParents',
'hw_GetParentsObj',
'hw_getrellink',
'hw_GetRemote',
'hw_getremotechildren',
'hw_GetSrcByDestObj',
'hw_GetText',
'hw_getusername',
'hw_Identify',
'hw_InCollections',
'hw_Info',
'hw_InsColl',
'hw_InsDoc',
'hw_insertanchors',
'hw_InsertDocument',
'hw_InsertObject',
'hw_mapid',
'hw_Modifyobject',
'hw_mv',
'hw_New_Document',
'hw_objrec2array',
'hw_Output_Document',
'hw_pConnect',
'hw_PipeDocument',
'hw_Root',
'hw_setlinkroot',
'hw_stat',
'hw_Unlock',
'hw_Who'],
'Hyperwave API': ['hw_api_attribute',
'hwapi_hgcsp',
'hw_api_content',
'hw_api_object'],
'IBM DB2': ['db2_autocommit',
'db2_bind_param',
'db2_client_info',
'db2_close',
'db2_column_privileges',
'db2_columns',
'db2_commit',
'db2_conn_error',
'db2_conn_errormsg',
'db2_connect',
'db2_cursor_type',
'db2_escape_string',
'db2_exec',
'db2_execute',
'db2_fetch_array',
'db2_fetch_assoc',
'db2_fetch_both',
'db2_fetch_object',
'db2_fetch_row',
'db2_field_display_size',
'db2_field_name',
'db2_field_num',
'db2_field_precision',
'db2_field_scale',
'db2_field_type',
'db2_field_width',
'db2_foreign_keys',
'db2_free_result',
'db2_free_stmt',
'db2_get_option',
'db2_last_insert_id'],
'ID3': ['id3_get_frame_long_name',
'id3_get_frame_short_name',
'id3_get_genre_id',
'id3_get_genre_list',
'id3_get_genre_name',
'id3_get_tag',
'id3_get_version',
'id3_remove_tag',
'id3_set_tag'],
'IDN': ['idn_to_ascii', 'idn_to_unicode', 'idn_to_utf8'],
'IIS': ['iis_add_server',
'iis_get_dir_security',
'iis_get_script_map',
'iis_get_server_by_comment',
'iis_get_server_by_path',
'iis_get_server_rights',
'iis_get_service_state',
'iis_remove_server',
'iis_set_app_settings',
'iis_set_dir_security',
'iis_set_script_map',
'iis_set_server_rights',
'iis_start_server',
'iis_start_service',
'iis_stop_server',
'iis_stop_service'],
'IMAP': ['imap_8bit',
'imap_alerts',
'imap_append',
'imap_base64',
'imap_binary',
'imap_body',
'imap_bodystruct',
'imap_check',
'imap_clearflag_full',
'imap_close',
'imap_createmailbox',
'imap_delete',
'imap_deletemailbox',
'imap_errors',
'imap_expunge',
'imap_fetch_overview',
'imap_fetchbody',
'imap_fetchheader',
'imap_fetchmime',
'imap_fetchstructure',
'imap_gc',
'imap_get_quota',
'imap_get_quotaroot',
'imap_getacl',
'imap_getmailboxes',
'imap_getsubscribed',
'imap_header',
'imap_headerinfo',
'imap_headers',
'imap_last_error',
'imap_list',
'imap_listmailbox',
'imap_listscan',
'imap_listsubscribed',
'imap_lsub',
'imap_mail_compose',
'imap_mail_copy',
'imap_mail_move',
'imap_mail',
'imap_mailboxmsginfo',
'imap_mime_header_decode',
'imap_msgno',
'imap_num_msg',
'imap_num_recent',
'imap_open',
'imap_ping',
'imap_qprint',
'imap_renamemailbox',
'imap_reopen',
'imap_rfc822_parse_adrlist',
'imap_rfc822_parse_headers',
'imap_rfc822_write_address',
'imap_savebody',
'imap_scanmailbox',
'imap_search',
'imap_set_quota',
'imap_setacl',
'imap_setflag_full',
'imap_sort',
'imap_status',
'imap_subscribe',
'imap_thread',
'imap_timeout',
'imap_uid',
'imap_undelete',
'imap_unsubscribe',
'imap_utf7_decode',
'imap_utf7_encode',
'imap_utf8'],
'Informix': ['ifx_affected_rows',
'ifx_blobinfile_mode',
'ifx_byteasvarchar',
'ifx_close',
'ifx_connect',
'ifx_copy_blob',
'ifx_create_blob',
'ifx_create_char',
'ifx_do',
'ifx_error',
'ifx_errormsg',
'ifx_fetch_row',
'ifx_fieldproperties',
'ifx_fieldtypes',
'ifx_free_blob',
'ifx_free_char',
'ifx_free_result',
'ifx_get_blob',
'ifx_get_char',
'ifx_getsqlca',
'ifx_htmltbl_result',
'ifx_nullformat',
'ifx_num_fields',
'ifx_num_rows',
'ifx_pconnect',
'ifx_prepare',
'ifx_query',
'ifx_textasvarchar',
'ifx_update_blob',
'ifx_update_char',
'ifxus_close_slob',
'ifxus_create_slob',
'ifxus_free_slob',
'ifxus_open_slob',
'ifxus_read_slob',
'ifxus_seek_slob',
'ifxus_tell_slob',
'ifxus_write_slob'],
'Ingres': ['ingres_autocommit_state',
'ingres_autocommit',
'ingres_charset',
'ingres_close',
'ingres_commit',
'ingres_connect',
'ingres_cursor',
'ingres_errno',
'ingres_error',
'ingres_errsqlstate',
'ingres_escape_string',
'ingres_execute',
'ingres_fetch_array',
'ingres_fetch_assoc',
'ingres_fetch_object',
'ingres_fetch_proc_return',
'ingres_fetch_row',
'ingres_field_length',
'ingres_field_name',
'ingres_field_nullable',
'ingres_field_precision',
'ingres_field_scale',
'ingres_field_type',
'ingres_free_result',
'ingres_next_error',
'ingres_num_fields',
'ingres_num_rows',
'ingres_pconnect',
'ingres_prepare',
'ingres_query',
'ingres_result_seek',
'ingres_rollback',
'ingres_set_environment',
'ingres_unbuffered_query'],
'Inotify': ['inotify_add_watch',
'inotify_init',
'inotify_queue_len',
'inotify_read',
'inotify_rm_watch'],
'JSON': ['json_decode', 'json_encode', 'json_last_error'],
'Java': ['java_last_exception_clear', 'java_last_exception_get'],
'Judy': ['judy_type', 'judy_version'],
'KADM5': ['kadm5_chpass_principal',
'kadm5_create_principal',
'kadm5_delete_principal',
'kadm5_destroy',
'kadm5_flush',
'kadm5_get_policies',
'kadm5_get_principal',
'kadm5_get_principals',
'kadm5_init_with_password',
'kadm5_modify_principal'],
'LDAP': ['ldap_8859_to_t61',
'ldap_add',
'ldap_bind',
'ldap_close',
'ldap_compare',
'ldap_connect',
'ldap_count_entries',
'ldap_delete',
'ldap_dn2ufn',
'ldap_err2str',
'ldap_errno',
'ldap_error',
'ldap_explode_dn',
'ldap_first_attribute',
'ldap_first_entry',
'ldap_first_reference',
'ldap_free_result',
'ldap_get_attributes',
'ldap_get_dn',
'ldap_get_entries',
'ldap_get_option',
'ldap_get_values_len',
'ldap_get_values',
'ldap_list',
'ldap_mod_add',
'ldap_mod_del',
'ldap_mod_replace',
'ldap_modify',
'ldap_next_attribute',
'ldap_next_entry',
'ldap_next_reference',
'ldap_parse_reference',
'ldap_parse_result',
'ldap_read',
'ldap_rename',
'ldap_sasl_bind',
'ldap_search',
'ldap_set_option',
'ldap_set_rebind_proc',
'ldap_sort',
'ldap_start_tls',
'ldap_t61_to_8859',
'ldap_unbind'],
'LZF': ['lzf_compress', 'lzf_decompress', 'lzf_optimized_for'],
'Libevent': ['event_add',
'event_base_free',
'event_base_loop',
'event_base_loopbreak',
'event_base_loopexit',
'event_base_new',
'event_base_priority_init',
'event_base_set',
'event_buffer_base_set',
'event_buffer_disable',
'event_buffer_enable',
'event_buffer_fd_set',
'event_buffer_free',
'event_buffer_new',
'event_buffer_priority_set',
'event_buffer_read',
'event_buffer_set_callback',
'event_buffer_timeout_set',
'event_buffer_watermark_set',
'event_buffer_write',
'event_del',
'event_free',
'event_new',
'event_set'],
'Lotus Notes': ['notes_body',
'notes_copy_db',
'notes_create_db',
'notes_create_note',
'notes_drop_db',
'notes_find_note',
'notes_header_info',
'notes_list_msgs',
'notes_mark_read',
'notes_mark_unread',
'notes_nav_create',
'notes_search',
'notes_unread',
'notes_version'],
'MCVE': ['m_checkstatus',
'm_completeauthorizations',
'm_connect',
'm_connectionerror',
'm_deletetrans',
'm_destroyconn',
'm_destroyengine',
'm_getcell',
'm_getcellbynum',
'm_getcommadelimited',
'm_getheader',
'm_initconn',
'm_initengine',
'm_iscommadelimited',
'm_maxconntimeout',
'm_monitor',
'm_numcolumns',
'm_numrows',
'm_parsecommadelimited',
'm_responsekeys'],
'Mail': ['ezmlm_hash', 'mail'],
'Mailparse': ['mailparse_determine_best_xfer_encoding',
'mailparse_msg_create',
'mailparse_msg_extract_part_file',
'mailparse_msg_extract_part',
'mailparse_msg_extract_whole_part_file',
'mailparse_msg_free',
'mailparse_msg_get_part_data',
'mailparse_msg_get_part',
'mailparse_msg_get_structure',
'mailparse_msg_parse_file',
'mailparse_msg_parse',
'mailparse_rfc822_parse_addresses',
'mailparse_stream_encode',
'mailparse_uudecode_all'],
'Math': ['abs',
'acos',
'acosh',
'asin',
'asinh',
'atan2',
'atan',
'atanh',
'base_convert',
'bindec',
'ceil',
'cos',
'cosh',
'decbin',
'dechex',
'decoct',
'deg2rad',
'exp',
'expm1'],
'MaxDB': ['maxdb_affected_rows',
'maxdb_autocommit',
'maxdb_bind_param',
'maxdb_bind_result',
'maxdb_change_user',
'maxdb_character_set_name',
'maxdb_client_encoding',
'maxdb_close_long_data',
'maxdb_close',
'maxdb_commit',
'maxdb_connect_errno',
'maxdb_connect_error',
'maxdb_connect',
'maxdb_data_seek',
'maxdb_debug',
'maxdb_disable_reads_from_master',
'maxdb_disable_rpl_parse',
'maxdb_dump_debug_info',
'maxdb_embedded_connect',
'maxdb_enable_reads_from_master',
'maxdb_enable_rpl_parse',
'maxdb_errno',
'maxdb_error',
'maxdb_escape_string',
'maxdb_execute',
'maxdb_fetch_array',
'maxdb_fetch_assoc',
'maxdb_fetch_field_direct',
'maxdb_fetch_field',
'maxdb_fetch_fields',
'maxdb_fetch_lengths',
'maxdb_fetch_object',
'maxdb_fetch_row',
'maxdb_fetch',
'maxdb_field_count',
'maxdb_field_seek',
'maxdb_field_tell',
'maxdb_free_result',
'maxdb_get_client_info',
'maxdb_get_client_version',
'maxdb_get_host_info',
'maxdb_get_metadata',
'maxdb_get_proto_info',
'maxdb_get_server_info',
'maxdb_get_server_version',
'maxdb_info',
'maxdb_init',
'maxdb_insert_id',
'maxdb_kill',
'maxdb_master_query',
'maxdb_more_results',
'maxdb_multi_query',
'maxdb_next_result',
'maxdb_num_fields',
'maxdb_num_rows',
'maxdb_options',
'maxdb_param_count',
'maxdb_ping',
'maxdb_prepare',
'maxdb_query',
'maxdb_real_connect',
'maxdb_real_escape_string',
'maxdb_real_query',
'maxdb_report',
'maxdb_rollback',
'maxdb_rpl_parse_enabled',
'maxdb_rpl_probe',
'maxdb_rpl_query_type',
'maxdb_select_db',
'maxdb_send_long_data',
'maxdb_send_query',
'maxdb_server_end',
'maxdb_server_init',
'maxdb_set_opt',
'maxdb_sqlstate',
'maxdb_ssl_set',
'maxdb_stat',
'maxdb_stmt_affected_rows'],
'Mcrypt': ['mcrypt_cbc',
'mcrypt_cfb',
'mcrypt_create_iv',
'mcrypt_decrypt',
'mcrypt_ecb',
'mcrypt_enc_get_algorithms_name',
'mcrypt_enc_get_block_size',
'mcrypt_enc_get_iv_size',
'mcrypt_enc_get_key_size',
'mcrypt_enc_get_modes_name',
'mcrypt_enc_get_supported_key_sizes',
'mcrypt_enc_is_block_algorithm_mode',
'mcrypt_enc_is_block_algorithm',
'mcrypt_enc_is_block_mode',
'mcrypt_enc_self_test',
'mcrypt_encrypt',
'mcrypt_generic_deinit',
'mcrypt_generic_end',
'mcrypt_generic_init',
'mcrypt_generic',
'mcrypt_get_block_size',
'mcrypt_get_cipher_name',
'mcrypt_get_iv_size',
'mcrypt_get_key_size',
'mcrypt_list_algorithms',
'mcrypt_list_modes',
'mcrypt_module_close',
'mcrypt_module_get_algo_block_size',
'mcrypt_module_get_algo_key_size',
'mcrypt_module_get_supported_key_sizes',
'mcrypt_module_is_block_algorithm_mode',
'mcrypt_module_is_block_algorithm',
'mcrypt_module_is_block_mode',
'mcrypt_module_open',
'mcrypt_module_self_test',
'mcrypt_ofb',
'mdecrypt_generic'],
'Memcache': ['memcache_debug'],
'Mhash': ['mhash_count',
'mhash_get_block_size',
'mhash_get_hash_name',
'mhash_keygen_s2k',
'mhash'],
'Ming': ['ming_keypress',
'ming_setcubicthreshold',
'ming_setscale',
'ming_setswfcompression',
'ming_useconstants',
'ming_useswfversion'],
'Misc.': ['connection_aborted',
'connection_status',
'connection_timeout',
'constant',
'define',
'defined',
'die',
'eval',
'exit',
'get_browser',
'__halt_compiler',
'highlight_file',
'highlight_string',
'ignore_user_abort',
'pack',
'php_check_syntax',
'php_strip_whitespace',
'show_source',
'sleep',
'sys_getloadavg',
'time_nanosleep',
'time_sleep_until',
'uniqid',
'unpack',
'usleep'],
'Mongo': ['bson_decode', 'bson_encode'],
'Msession': ['msession_connect',
'msession_count',
'msession_create',
'msession_destroy',
'msession_disconnect',
'msession_find',
'msession_get_array',
'msession_get_data',
'msession_get',
'msession_inc',
'msession_list',
'msession_listvar',
'msession_lock',
'msession_plugin',
'msession_randstr',
'msession_set_array',
'msession_set_data',
'msession_set',
'msession_timeout',
'msession_uniq',
'msession_unlock'],
'Mssql': ['mssql_bind',
'mssql_close',
'mssql_connect',
'mssql_data_seek',
'mssql_execute',
'mssql_fetch_array',
'mssql_fetch_assoc',
'mssql_fetch_batch',
'mssql_fetch_field',
'mssql_fetch_object',
'mssql_fetch_row',
'mssql_field_length',
'mssql_field_name',
'mssql_field_seek',
'mssql_field_type',
'mssql_free_result',
'mssql_free_statement',
'mssql_get_last_message',
'mssql_guid_string',
'mssql_init',
'mssql_min_error_severity',
'mssql_min_message_severity',
'mssql_next_result',
'mssql_num_fields',
'mssql_num_rows',
'mssql_pconnect',
'mssql_query',
'mssql_result',
'mssql_rows_affected',
'mssql_select_db'],
'Multibyte String': ['mb_check_encoding',
'mb_convert_case',
'mb_convert_encoding',
'mb_convert_kana',
'mb_convert_variables',
'mb_decode_mimeheader',
'mb_decode_numericentity',
'mb_detect_encoding',
'mb_detect_order',
'mb_encode_mimeheader',
'mb_encode_numericentity',
'mb_encoding_aliases',
'mb_ereg_match',
'mb_ereg_replace',
'mb_ereg_search_getpos',
'mb_ereg_search_getregs',
'mb_ereg_search_init',
'mb_ereg_search_pos',
'mb_ereg_search_regs',
'mb_ereg_search_setpos',
'mb_ereg_search',
'mb_ereg',
'mb_eregi_replace',
'mb_eregi',
'mb_get_info',
'mb_http_input',
'mb_http_output',
'mb_internal_encoding',
'mb_language',
'mb_list_encodings',
'mb_output_handler',
'mb_parse_str',
'mb_preferred_mime_name',
'mb_regex_encoding',
'mb_regex_set_options',
'mb_send_mail',
'mb_split',
'mb_strcut',
'mb_strimwidth',
'mb_stripos',
'mb_stristr',
'mb_strlen',
'mb_strpos',
'mb_strrchr',
'mb_strrichr',
'mb_strripos',
'mb_strrpos',
'mb_strstr',
'mb_strtolower',
'mb_strtoupper',
'mb_strwidth',
'mb_substitute_character',
'mb_substr_count',
'mb_substr'],
'MySQL': ['mysql_affected_rows',
'mysql_client_encoding',
'mysql_close',
'mysql_connect',
'mysql_create_db',
'mysql_data_seek',
'mysql_db_name',
'mysql_db_query',
'mysql_drop_db',
'mysql_errno',
'mysql_error',
'mysql_escape_string',
'mysql_fetch_array',
'mysql_fetch_assoc',
'mysql_fetch_field',
'mysql_fetch_lengths',
'mysql_fetch_object',
'mysql_fetch_row',
'mysql_field_flags',
'mysql_field_len',
'mysql_field_name',
'mysql_field_seek',
'mysql_field_table',
'mysql_field_type',
'mysql_free_result',
'mysql_get_client_info',
'mysql_get_host_info',
'mysql_get_proto_info',
'mysql_get_server_info',
'mysql_info',
'mysql_insert_id',
'mysql_list_dbs',
'mysql_list_fields',
'mysql_list_processes',
'mysql_list_tables',
'mysql_num_fields',
'mysql_num_rows',
'mysql_pconnect',
'mysql_ping',
'mysql_query',
'mysql_real_escape_string',
'mysql_result',
'mysql_select_db',
'mysql_set_charset',
'mysql_stat',
'mysql_tablename',
'mysql_thread_id',
'mysql_unbuffered_query'],
'NSAPI': ['nsapi_request_headers', 'nsapi_response_headers', 'nsapi_virtual'],
'Ncurses': ['ncurses_addch',
'ncurses_addchnstr',
'ncurses_addchstr',
'ncurses_addnstr',
'ncurses_addstr',
'ncurses_assume_default_colors',
'ncurses_attroff',
'ncurses_attron',
'ncurses_attrset',
'ncurses_baudrate',
'ncurses_beep',
'ncurses_bkgd',
'ncurses_bkgdset',
'ncurses_border',
'ncurses_bottom_panel',
'ncurses_can_change_color',
'ncurses_cbreak',
'ncurses_clear',
'ncurses_clrtobot',
'ncurses_clrtoeol',
'ncurses_color_content',
'ncurses_color_set',
'ncurses_curs_set',
'ncurses_def_prog_mode',
'ncurses_def_shell_mode',
'ncurses_define_key',
'ncurses_del_panel',
'ncurses_delay_output',
'ncurses_delch',
'ncurses_deleteln',
'ncurses_delwin',
'ncurses_doupdate',
'ncurses_echo',
'ncurses_echochar',
'ncurses_end',
'ncurses_erase',
'ncurses_erasechar',
'ncurses_filter',
'ncurses_flash',
'ncurses_flushinp',
'ncurses_getch',
'ncurses_getmaxyx',
'ncurses_getmouse',
'ncurses_getyx',
'ncurses_halfdelay',
'ncurses_has_colors',
'ncurses_has_ic',
'ncurses_has_il',
'ncurses_has_key',
'ncurses_hide_panel',
'ncurses_hline',
'ncurses_inch',
'ncurses_init_color',
'ncurses_init_pair',
'ncurses_init',
'ncurses_insch',
'ncurses_insdelln',
'ncurses_insertln',
'ncurses_insstr',
'ncurses_instr',
'ncurses_isendwin',
'ncurses_keyok',
'ncurses_keypad',
'ncurses_killchar',
'ncurses_longname',
'ncurses_meta',
'ncurses_mouse_trafo',
'ncurses_mouseinterval',
'ncurses_mousemask',
'ncurses_move_panel',
'ncurses_move',
'ncurses_mvaddch',
'ncurses_mvaddchnstr',
'ncurses_mvaddchstr',
'ncurses_mvaddnstr',
'ncurses_mvaddstr',
'ncurses_mvcur',
'ncurses_mvdelch',
'ncurses_mvgetch',
'ncurses_mvhline',
'ncurses_mvinch',
'ncurses_mvvline',
'ncurses_mvwaddstr',
'ncurses_napms',
'ncurses_new_panel',
'ncurses_newpad',
'ncurses_newwin',
'ncurses_nl',
'ncurses_nocbreak',
'ncurses_noecho',
'ncurses_nonl',
'ncurses_noqiflush',
'ncurses_noraw',
'ncurses_pair_content',
'ncurses_panel_above',
'ncurses_panel_below',
'ncurses_panel_window',
'ncurses_pnoutrefresh',
'ncurses_prefresh',
'ncurses_putp',
'ncurses_qiflush',
'ncurses_raw',
'ncurses_refresh',
'ncurses_replace_panel',
'ncurses_reset_prog_mode',
'ncurses_reset_shell_mode',
'ncurses_resetty',
'ncurses_savetty',
'ncurses_scr_dump',
'ncurses_scr_init',
'ncurses_scr_restore',
'ncurses_scr_set',
'ncurses_scrl',
'ncurses_show_panel',
'ncurses_slk_attr',
'ncurses_slk_attroff',
'ncurses_slk_attron',
'ncurses_slk_attrset',
'ncurses_slk_clear',
'ncurses_slk_color',
'ncurses_slk_init',
'ncurses_slk_noutrefresh',
'ncurses_slk_refresh',
'ncurses_slk_restore',
'ncurses_slk_set',
'ncurses_slk_touch',
'ncurses_standend',
'ncurses_standout',
'ncurses_start_color',
'ncurses_termattrs',
'ncurses_termname',
'ncurses_timeout',
'ncurses_top_panel',
'ncurses_typeahead',
'ncurses_ungetch',
'ncurses_ungetmouse',
'ncurses_update_panels',
'ncurses_use_default_colors',
'ncurses_use_env',
'ncurses_use_extended_names',
'ncurses_vidattr',
'ncurses_vline',
'ncurses_waddch',
'ncurses_waddstr',
'ncurses_wattroff',
'ncurses_wattron',
'ncurses_wattrset',
'ncurses_wborder',
'ncurses_wclear',
'ncurses_wcolor_set',
'ncurses_werase',
'ncurses_wgetch',
'ncurses_whline',
'ncurses_wmouse_trafo',
'ncurses_wmove',
'ncurses_wnoutrefresh',
'ncurses_wrefresh',
'ncurses_wstandend',
'ncurses_wstandout',
'ncurses_wvline'],
'Network': ['checkdnsrr',
'closelog',
'define_syslog_variables',
'dns_check_record',
'dns_get_mx',
'dns_get_record',
'fsockopen',
'gethostbyaddr',
'gethostbyname',
'gethostbynamel'],
'Newt': ['newt_bell',
'newt_button_bar',
'newt_button',
'newt_centered_window',
'newt_checkbox_get_value',
'newt_checkbox_set_flags',
'newt_checkbox_set_value',
'newt_checkbox_tree_add_item',
'newt_checkbox_tree_find_item',
'newt_checkbox_tree_get_current',
'newt_checkbox_tree_get_entry_value',
'newt_checkbox_tree_get_multi_selection',
'newt_checkbox_tree_get_selection',
'newt_checkbox_tree_multi',
'newt_checkbox_tree_set_current',
'newt_checkbox_tree_set_entry_value',
'newt_checkbox_tree_set_entry',
'newt_checkbox_tree_set_width',
'newt_checkbox_tree',
'newt_checkbox',
'newt_clear_key_buffer'],
'OAuth': ['oauth_get_sbs', 'oauth_urlencode'],
'OCI8': ['oci_bind_array_by_name',
'oci_bind_by_name',
'oci_cancel',
'oci_close',
'oci_commit',
'oci_connect',
'oci_define_by_name',
'oci_error',
'oci_execute',
'oci_fetch_all',
'oci_fetch_array',
'oci_fetch_assoc',
'oci_fetch_object',
'oci_fetch_row',
'oci_fetch',
'oci_field_is_null',
'oci_field_name',
'oci_field_precision',
'oci_field_scale',
'oci_field_size',
'oci_field_type_raw',
'oci_field_type',
'oci_free_statement',
'oci_internal_debug',
'oci_lob_copy',
'oci_lob_is_equal',
'oci_new_collection',
'oci_new_connect',
'oci_new_cursor',
'oci_new_descriptor',
'oci_num_fields',
'oci_num_rows',
'oci_parse',
'oci_password_change',
'oci_pconnect',
'oci_result',
'oci_rollback',
'oci_server_version',
'oci_set_action',
'oci_set_client_identifier',
'oci_set_client_info',
'oci_set_edition',
'oci_set_module_name',
'oci_set_prefetch',
'oci_statement_type'],
'ODBC': ['odbc_autocommit',
'odbc_binmode',
'odbc_close_all',
'odbc_close',
'odbc_columnprivileges',
'odbc_columns',
'odbc_commit',
'odbc_connect',
'odbc_cursor',
'odbc_data_source',
'odbc_do',
'odbc_error',
'odbc_errormsg',
'odbc_exec',
'odbc_execute',
'odbc_fetch_array',
'odbc_fetch_into',
'odbc_fetch_object',
'odbc_fetch_row',
'odbc_field_len',
'odbc_field_name',
'odbc_field_num',
'odbc_field_precision',
'odbc_field_scale',
'odbc_field_type',
'odbc_foreignkeys',
'odbc_free_result',
'odbc_gettypeinfo',
'odbc_longreadlen',
'odbc_next_result',
'odbc_num_fields',
'odbc_num_rows',
'odbc_pconnect',
'odbc_prepare',
'odbc_primarykeys',
'odbc_procedurecolumns',
'odbc_procedures',
'odbc_result_all',
'odbc_result',
'odbc_rollback',
'odbc_setoption',
'odbc_specialcolumns',
'odbc_statistics',
'odbc_tableprivileges',
'odbc_tables'],
'Object Aggregation': ['aggregate_info',
'aggregate_methods_by_list',
'aggregate_methods_by_regexp'],
'Object overloading': ['overload'],
'OpenAL': ['openal_buffer_create',
'openal_buffer_data',
'openal_buffer_destroy',
'openal_buffer_get',
'openal_buffer_loadwav',
'openal_context_create',
'openal_context_current',
'openal_context_destroy',
'openal_context_process',
'openal_context_suspend',
'openal_device_close',
'openal_device_open',
'openal_listener_get',
'openal_listener_set',
'openal_source_create',
'openal_source_destroy',
'openal_source_get',
'openal_source_pause',
'openal_source_play',
'openal_source_rewind',
'openal_source_set',
'openal_source_stop',
'openal_stream'],
'OpenSSL': ['openssl_csr_export_to_file',
'openssl_csr_export',
'openssl_csr_get_public_key',
'openssl_csr_get_subject',
'openssl_csr_new',
'openssl_csr_sign',
'openssl_decrypt',
'openssl_dh_compute_key',
'openssl_digest',
'openssl_encrypt',
'openssl_error_string',
'openssl_free_key',
'openssl_get_cipher_methods',
'openssl_get_md_methods',
'openssl_get_privatekey',
'openssl_get_publickey',
'openssl_open',
'openssl_pkcs12_export_to_file',
'openssl_pkcs12_export',
'openssl_pkcs12_read',
'openssl_pkcs7_decrypt',
'openssl_pkcs7_encrypt',
'openssl_pkcs7_sign',
'openssl_pkcs7_verify',
'openssl_pkey_export_to_file',
'openssl_pkey_export',
'openssl_pkey_free',
'openssl_pkey_get_details',
'openssl_pkey_get_private',
'openssl_pkey_get_public',
'openssl_pkey_new',
'openssl_private_decrypt',
'openssl_private_encrypt',
'openssl_public_decrypt',
'openssl_public_encrypt',
'openssl_random_pseudo_bytes',
'openssl_seal',
'openssl_sign',
'openssl_verify',
'openssl_x509_check_private_key',
'openssl_x509_checkpurpose',
'openssl_x509_export_to_file',
'openssl_x509_export',
'openssl_x509_free',
'openssl_x509_parse',
'openssl_x509_read'],
'Output Control': ['flush',
'ob_clean',
'ob_end_clean',
'ob_end_flush',
'ob_flush',
'ob_get_clean',
'ob_get_contents',
'ob_get_flush',
'ob_get_length',
'ob_get_level',
'ob_get_status',
'ob_gzhandler',
'ob_implicit_flush',
'ob_list_handlers',
'ob_start',
'output_add_rewrite_var',
'output_reset_rewrite_vars'],
'Ovrimos SQL': ['ovrimos_close',
'ovrimos_commit',
'ovrimos_connect',
'ovrimos_cursor',
'ovrimos_exec',
'ovrimos_execute',
'ovrimos_fetch_into',
'ovrimos_fetch_row',
'ovrimos_field_len',
'ovrimos_field_name',
'ovrimos_field_num',
'ovrimos_field_type',
'ovrimos_free_result',
'ovrimos_longreadlen',
'ovrimos_num_fields',
'ovrimos_num_rows',
'ovrimos_prepare',
'ovrimos_result_all',
'ovrimos_result',
'ovrimos_rollback'],
'PCNTL': ['pcntl_alarm',
'pcntl_exec',
'pcntl_fork',
'pcntl_getpriority',
'pcntl_setpriority',
'pcntl_signal_dispatch',
'pcntl_signal',
'pcntl_sigprocmask',
'pcntl_sigtimedwait',
'pcntl_sigwaitinfo',
'pcntl_wait',
'pcntl_waitpid',
'pcntl_wexitstatus',
'pcntl_wifexited',
'pcntl_wifsignaled',
'pcntl_wifstopped',
'pcntl_wstopsig',
'pcntl_wtermsig'],
'PCRE': ['preg_filter',
'preg_grep',
'preg_last_error',
'preg_match_all',
'preg_match',
'preg_quote',
'preg_replace_callback',
'preg_replace',
'preg_split'],
'PDF': ['PDF_activate_item',
'PDF_add_annotation',
'PDF_add_bookmark',
'PDF_add_launchlink',
'PDF_add_locallink',
'PDF_add_nameddest',
'PDF_add_note',
'PDF_add_outline',
'PDF_add_pdflink',
'PDF_add_table_cell',
'PDF_add_textflow',
'PDF_add_thumbnail',
'PDF_add_weblink',
'PDF_arc',
'PDF_arcn',
'PDF_attach_file',
'PDF_begin_document',
'PDF_begin_font',
'PDF_begin_glyph',
'PDF_begin_item',
'PDF_begin_layer',
'PDF_begin_page_ext',
'PDF_begin_page',
'PDF_begin_pattern',
'PDF_begin_template_ext',
'PDF_begin_template',
'PDF_circle',
'PDF_clip',
'PDF_close_image',
'PDF_close_pdi_page',
'PDF_close_pdi',
'PDF_close',
'PDF_closepath_fill_stroke',
'PDF_closepath_stroke',
'PDF_closepath',
'PDF_concat',
'PDF_continue_text',
'PDF_create_3dview',
'PDF_create_action',
'PDF_create_annotation',
'PDF_create_bookmark',
'PDF_create_field',
'PDF_create_fieldgroup',
'PDF_create_gstate',
'PDF_create_pvf',
'PDF_create_textflow',
'PDF_curveto',
'PDF_define_layer',
'PDF_delete_pvf',
'PDF_delete_table',
'PDF_delete_textflow',
'PDF_delete',
'PDF_encoding_set_char',
'PDF_end_document',
'PDF_end_font',
'PDF_end_glyph',
'PDF_end_item',
'PDF_end_layer',
'PDF_end_page_ext',
'PDF_end_page',
'PDF_end_pattern',
'PDF_end_template',
'PDF_endpath',
'PDF_fill_imageblock',
'PDF_fill_pdfblock',
'PDF_fill_stroke',
'PDF_fill_textblock',
'PDF_fill',
'PDF_findfont',
'PDF_fit_image',
'PDF_fit_pdi_page',
'PDF_fit_table',
'PDF_fit_textflow',
'PDF_fit_textline',
'PDF_get_apiname',
'PDF_get_buffer',
'PDF_get_errmsg',
'PDF_get_errnum',
'PDF_get_font',
'PDF_get_fontname',
'PDF_get_fontsize',
'PDF_get_image_height',
'PDF_get_image_width',
'PDF_get_majorversion',
'PDF_get_minorversion',
'PDF_get_parameter',
'PDF_get_pdi_parameter',
'PDF_get_pdi_value',
'PDF_get_value',
'PDF_info_font',
'PDF_info_matchbox',
'PDF_info_table',
'PDF_info_textflow',
'PDF_info_textline',
'PDF_initgraphics',
'PDF_lineto',
'PDF_load_3ddata',
'PDF_load_font',
'PDF_load_iccprofile',
'PDF_load_image',
'PDF_makespotcolor',
'PDF_moveto',
'PDF_new',
'PDF_open_ccitt',
'PDF_open_file',
'PDF_open_gif',
'PDF_open_image_file',
'PDF_open_image',
'PDF_open_jpeg',
'PDF_open_memory_image',
'PDF_open_pdi_document',
'PDF_open_pdi_page',
'PDF_open_pdi',
'PDF_open_tiff',
'PDF_pcos_get_number',
'PDF_pcos_get_stream',
'PDF_pcos_get_string',
'PDF_place_image',
'PDF_place_pdi_page',
'PDF_process_pdi',
'PDF_rect',
'PDF_restore',
'PDF_resume_page',
'PDF_rotate',
'PDF_save',
'PDF_scale',
'PDF_set_border_color',
'PDF_set_border_dash',
'PDF_set_border_style',
'PDF_set_char_spacing',
'PDF_set_duration',
'PDF_set_gstate',
'PDF_set_horiz_scaling',
'PDF_set_info_author',
'PDF_set_info_creator',
'PDF_set_info_keywords',
'PDF_set_info_subject',
'PDF_set_info_title',
'PDF_set_info',
'PDF_set_layer_dependency',
'PDF_set_leading',
'PDF_set_parameter',
'PDF_set_text_matrix',
'PDF_set_text_pos',
'PDF_set_text_rendering',
'PDF_set_text_rise',
'PDF_set_value',
'PDF_set_word_spacing',
'PDF_setcolor',
'PDF_setdash',
'PDF_setdashpattern',
'PDF_setflat',
'PDF_setfont',
'PDF_setgray_fill',
'PDF_setgray_stroke',
'PDF_setgray',
'PDF_setlinecap',
'PDF_setlinejoin',
'PDF_setlinewidth',
'PDF_setmatrix',
'PDF_setmiterlimit',
'PDF_setpolydash',
'PDF_setrgbcolor_fill',
'PDF_setrgbcolor_stroke',
'PDF_setrgbcolor',
'PDF_shading_pattern',
'PDF_shading',
'PDF_shfill',
'PDF_show_boxed',
'PDF_show_xy',
'PDF_show',
'PDF_skew',
'PDF_stringwidth',
'PDF_stroke',
'PDF_suspend_page',
'PDF_translate',
'PDF_utf16_to_utf8',
'PDF_utf32_to_utf16',
'PDF_utf8_to_utf16'],
'PHP Options/Info': ['assert_options',
'assert',
'dl',
'extension_loaded',
'gc_collect_cycles',
'gc_disable',
'gc_enable',
'gc_enabled',
'get_cfg_var',
'get_current_user',
'get_defined_constants',
'get_extension_funcs',
'get_include_path',
'get_included_files',
'get_loaded_extensions',
'get_magic_quotes_gpc',
'get_magic_quotes_runtime',
'get_required_files',
'getenv',
'getlastmod',
'getmygid',
'getmyinode',
'getmypid',
'getmyuid',
'getopt',
'getrusage',
'ini_alter',
'ini_get_all',
'ini_get',
'ini_restore',
'ini_set',
'magic_quotes_runtime',
'memory_get_peak_usage',
'memory_get_usage',
'php_ini_loaded_file',
'php_ini_scanned_files',
'php_logo_guid',
'php_sapi_name',
'php_uname',
'phpcredits',
'phpinfo',
'phpversion',
'putenv',
'restore_include_path',
'set_include_path',
'set_magic_quotes_runtime',
'set_time_limit',
'sys_get_temp_dir',
'version_compare',
'zend_logo_guid',
'zend_thread_id',
'zend_version'],
'POSIX': ['posix_access',
'posix_ctermid',
'posix_errno',
'posix_get_last_error',
'posix_getcwd',
'posix_getegid',
'posix_geteuid',
'posix_getgid',
'posix_getgrgid',
'posix_getgrnam',
'posix_getgroups',
'posix_getlogin',
'posix_getpgid',
'posix_getpgrp',
'posix_getpid',
'posix_getppid',
'posix_getpwnam',
'posix_getpwuid',
'posix_getrlimit',
'posix_getsid',
'posix_getuid',
'posix_initgroups',
'posix_isatty',
'posix_kill',
'posix_mkfifo',
'posix_mknod',
'posix_setegid',
'posix_seteuid',
'posix_setgid',
'posix_setpgid',
'posix_setsid',
'posix_setuid',
'posix_strerror',
'posix_times',
'posix_ttyname',
'posix_uname'],
'POSIX Regex': ['ereg_replace',
'ereg',
'eregi_replace',
'eregi',
'split',
'spliti',
'sql_regcase'],
'PS': ['ps_add_bookmark',
'ps_add_launchlink',
'ps_add_locallink',
'ps_add_note',
'ps_add_pdflink',
'ps_add_weblink',
'ps_arc',
'ps_arcn',
'ps_begin_page',
'ps_begin_pattern',
'ps_begin_template',
'ps_circle',
'ps_clip',
'ps_close_image',
'ps_close',
'ps_closepath_stroke',
'ps_closepath',
'ps_continue_text',
'ps_curveto',
'ps_delete',
'ps_end_page',
'ps_end_pattern',
'ps_end_template',
'ps_fill_stroke',
'ps_fill',
'ps_findfont',
'ps_get_buffer',
'ps_get_parameter',
'ps_get_value',
'ps_hyphenate',
'ps_include_file',
'ps_lineto',
'ps_makespotcolor',
'ps_moveto',
'ps_new',
'ps_open_file',
'ps_open_image_file',
'ps_open_image',
'ps_open_memory_image',
'ps_place_image',
'ps_rect',
'ps_restore',
'ps_rotate',
'ps_save',
'ps_scale',
'ps_set_border_color',
'ps_set_border_dash',
'ps_set_border_style',
'ps_set_info',
'ps_set_parameter',
'ps_set_text_pos',
'ps_set_value',
'ps_setcolor',
'ps_setdash',
'ps_setflat',
'ps_setfont',
'ps_setgray',
'ps_setlinecap',
'ps_setlinejoin',
'ps_setlinewidth',
'ps_setmiterlimit',
'ps_setoverprintmode',
'ps_setpolydash',
'ps_shading_pattern',
'ps_shading',
'ps_shfill',
'ps_show_boxed',
'ps_show_xy2',
'ps_show_xy',
'ps_show2',
'ps_show',
'ps_string_geometry',
'ps_stringwidth',
'ps_stroke',
'ps_symbol_name',
'ps_symbol_width',
'ps_symbol',
'ps_translate'],
'Paradox': ['px_close',
'px_create_fp',
'px_date2string',
'px_delete_record',
'px_delete',
'px_get_field',
'px_get_info',
'px_get_parameter',
'px_get_record',
'px_get_schema',
'px_get_value',
'px_insert_record',
'px_new',
'px_numfields',
'px_numrecords',
'px_open_fp',
'px_put_record',
'px_retrieve_record',
'px_set_blob_file',
'px_set_parameter',
'px_set_tablename',
'px_set_targetencoding',
'px_set_value',
'px_timestamp2string',
'px_update_record'],
'Parsekit': ['parsekit_compile_file',
'parsekit_compile_string',
'parsekit_func_arginfo'],
'PostgreSQL': ['pg_affected_rows',
'pg_cancel_query',
'pg_client_encoding',
'pg_close',
'pg_connect',
'pg_connection_busy',
'pg_connection_reset',
'pg_connection_status',
'pg_convert',
'pg_copy_from',
'pg_copy_to',
'pg_dbname',
'pg_delete',
'pg_end_copy',
'pg_escape_bytea',
'pg_escape_string',
'pg_execute',
'pg_fetch_all_columns',
'pg_fetch_all',
'pg_fetch_array',
'pg_fetch_assoc',
'pg_fetch_object',
'pg_fetch_result',
'pg_fetch_row',
'pg_field_is_null',
'pg_field_name',
'pg_field_num',
'pg_field_prtlen',
'pg_field_size',
'pg_field_table',
'pg_field_type_oid',
'pg_field_type',
'pg_free_result',
'pg_get_notify',
'pg_get_pid',
'pg_get_result',
'pg_host',
'pg_insert',
'pg_last_error',
'pg_last_notice',
'pg_last_oid',
'pg_lo_close',
'pg_lo_create',
'pg_lo_export',
'pg_lo_import',
'pg_lo_open',
'pg_lo_read_all',
'pg_lo_read',
'pg_lo_seek',
'pg_lo_tell',
'pg_lo_unlink',
'pg_lo_write',
'pg_meta_data',
'pg_num_fields',
'pg_num_rows',
'pg_options',
'pg_parameter_status',
'pg_pconnect',
'pg_ping',
'pg_port',
'pg_prepare'],
'Printer': ['printer_abort',
'printer_close',
'printer_create_brush',
'printer_create_dc',
'printer_create_font',
'printer_create_pen',
'printer_delete_brush',
'printer_delete_dc',
'printer_delete_font',
'printer_delete_pen',
'printer_draw_bmp',
'printer_draw_chord',
'printer_draw_elipse',
'printer_draw_line',
'printer_draw_pie',
'printer_draw_rectangle',
'printer_draw_roundrect',
'printer_draw_text',
'printer_end_doc',
'printer_end_page',
'printer_get_option',
'printer_list',
'printer_logical_fontheight',
'printer_open',
'printer_select_brush',
'printer_select_font',
'printer_select_pen',
'printer_set_option',
'printer_start_doc',
'printer_start_page',
'printer_write'],
'Program execution': ['escapeshellarg',
'escapeshellcmd',
'exec',
'passthru',
'proc_close',
'proc_get_status',
'proc_nice',
'proc_open',
'proc_terminate',
'shell_exec',
'system'],
'Pspell': ['pspell_add_to_personal',
'pspell_add_to_session',
'pspell_check',
'pspell_clear_session',
'pspell_config_create',
'pspell_config_data_dir',
'pspell_config_dict_dir',
'pspell_config_ignore',
'pspell_config_mode',
'pspell_config_personal',
'pspell_config_repl',
'pspell_config_runtogether',
'pspell_config_save_repl'],
'RPM Reader': ['rpm_close',
'rpm_get_tag',
'rpm_is_valid',
'rpm_open',
'rpm_version'],
'RRD': ['rrd_create',
'rrd_error',
'rrd_fetch',
'rrd_first',
'rrd_graph',
'rrd_info',
'rrd_last',
'rrd_lastupdate',
'rrd_restore',
'rrd_tune',
'rrd_update',
'rrd_xport'],
'Radius': ['radius_acct_open',
'radius_add_server',
'radius_auth_open',
'radius_close',
'radius_config',
'radius_create_request',
'radius_cvt_addr',
'radius_cvt_int',
'radius_cvt_string',
'radius_demangle_mppe_key',
'radius_demangle',
'radius_get_attr',
'radius_get_vendor_attr',
'radius_put_addr',
'radius_put_attr',
'radius_put_int',
'radius_put_string',
'radius_put_vendor_addr',
'radius_put_vendor_attr',
'radius_put_vendor_int',
'radius_put_vendor_string',
'radius_request_authenticator',
'radius_send_request',
'radius_server_secret',
'radius_strerror'],
'Rar': ['rar_wrapper_cache_stats'],
'Readline': ['readline_add_history',
'readline_callback_handler_install',
'readline_callback_handler_remove',
'readline_callback_read_char',
'readline_clear_history',
'readline_completion_function',
'readline_info',
'readline_list_history',
'readline_on_new_line',
'readline_read_history',
'readline_redisplay',
'readline_write_history',
'readline'],
'Recode': ['recode_file', 'recode_string', 'recode'],
'SNMP': ['snmp_get_quick_print',
'snmp_get_valueretrieval',
'snmp_read_mib',
'snmp_set_enum_print',
'snmp_set_oid_numeric_print',
'snmp_set_oid_output_format',
'snmp_set_quick_print',
'snmp_set_valueretrieval',
'snmp2_get',
'snmp2_getnext',
'snmp2_real_walk',
'snmp2_set',
'snmp2_walk',
'snmp3_get',
'snmp3_getnext',
'snmp3_real_walk',
'snmp3_set',
'snmp3_walk',
'snmpget',
'snmpgetnext',
'snmprealwalk',
'snmpset',
'snmpwalk',
'snmpwalkoid'],
'SOAP': ['is_soap_fault', 'use_soap_error_handler'],
'SPL': ['class_implements',
'class_parents',
'iterator_apply',
'iterator_count',
'iterator_to_array',
'spl_autoload_call',
'spl_autoload_extensions',
'spl_autoload_functions',
'spl_autoload_register',
'spl_autoload_unregister',
'spl_autoload',
'spl_classes',
'spl_object_hash'],
'SPPLUS': ['calcul_hmac', 'calculhmac', 'nthmac', 'signeurlpaiement'],
'SQLite': ['sqlite_array_query', 'sqlite_busy_timeout', 'sqlite_changes'],
'SSH2': ['ssh2_auth_hostbased_file',
'ssh2_auth_none',
'ssh2_auth_password',
'ssh2_auth_pubkey_file',
'ssh2_connect',
'ssh2_exec',
'ssh2_fetch_stream',
'ssh2_fingerprint',
'ssh2_methods_negotiated',
'ssh2_publickey_add',
'ssh2_publickey_init',
'ssh2_publickey_list',
'ssh2_publickey_remove',
'ssh2_scp_recv',
'ssh2_scp_send',
'ssh2_sftp_lstat',
'ssh2_sftp_mkdir',
'ssh2_sftp_readlink',
'ssh2_sftp_realpath',
'ssh2_sftp_rename',
'ssh2_sftp_rmdir',
'ssh2_sftp_stat',
'ssh2_sftp_symlink',
'ssh2_sftp_unlink',
'ssh2_sftp',
'ssh2_shell',
'ssh2_tunnel'],
'SVN': ['svn_add',
'svn_auth_get_parameter',
'svn_auth_set_parameter',
'svn_blame',
'svn_cat',
'svn_checkout',
'svn_cleanup',
'svn_client_version',
'svn_commit',
'svn_delete',
'svn_diff',
'svn_export',
'svn_fs_abort_txn',
'svn_fs_apply_text',
'svn_fs_begin_txn2',
'svn_fs_change_node_prop',
'svn_fs_check_path',
'svn_fs_contents_changed',
'svn_fs_copy',
'svn_fs_delete',
'svn_fs_dir_entries',
'svn_fs_file_contents',
'svn_fs_file_length',
'svn_fs_is_dir',
'svn_fs_is_file',
'svn_fs_make_dir',
'svn_fs_make_file',
'svn_fs_node_created_rev',
'svn_fs_node_prop',
'svn_fs_props_changed',
'svn_fs_revision_prop',
'svn_fs_revision_root',
'svn_fs_txn_root',
'svn_fs_youngest_rev',
'svn_import',
'svn_log',
'svn_ls',
'svn_mkdir',
'svn_repos_create',
'svn_repos_fs_begin_txn_for_commit',
'svn_repos_fs_commit_txn',
'svn_repos_fs',
'svn_repos_hotcopy',
'svn_repos_open',
'svn_repos_recover',
'svn_revert',
'svn_status',
'svn_update'],
'SWF': ['swf_actiongeturl',
'swf_actiongotoframe',
'swf_actiongotolabel',
'swf_actionnextframe',
'swf_actionplay',
'swf_actionprevframe',
'swf_actionsettarget',
'swf_actionstop',
'swf_actiontogglequality',
'swf_actionwaitforframe',
'swf_addbuttonrecord',
'swf_addcolor',
'swf_closefile',
'swf_definebitmap',
'swf_definefont',
'swf_defineline',
'swf_definepoly',
'swf_definerect',
'swf_definetext',
'swf_endbutton',
'swf_enddoaction',
'swf_endshape',
'swf_endsymbol',
'swf_fontsize',
'swf_fontslant',
'swf_fonttracking',
'swf_getbitmapinfo',
'swf_getfontinfo',
'swf_getframe',
'swf_labelframe',
'swf_lookat',
'swf_modifyobject',
'swf_mulcolor',
'swf_nextid',
'swf_oncondition',
'swf_openfile',
'swf_ortho2',
'swf_ortho',
'swf_perspective',
'swf_placeobject',
'swf_polarview',
'swf_popmatrix',
'swf_posround',
'swf_pushmatrix',
'swf_removeobject',
'swf_rotate',
'swf_scale',
'swf_setfont',
'swf_setframe',
'swf_shapearc',
'swf_shapecurveto3',
'swf_shapecurveto',
'swf_shapefillbitmapclip',
'swf_shapefillbitmaptile',
'swf_shapefilloff',
'swf_shapefillsolid',
'swf_shapelinesolid',
'swf_shapelineto',
'swf_shapemoveto',
'swf_showframe',
'swf_startbutton',
'swf_startdoaction',
'swf_startshape',
'swf_startsymbol',
'swf_textwidth',
'swf_translate',
'swf_viewport'],
'Semaphore': ['ftok',
'msg_get_queue',
'msg_queue_exists',
'msg_receive',
'msg_remove_queue',
'msg_send',
'msg_set_queue',
'msg_stat_queue',
'sem_acquire',
'sem_get',
'sem_release',
'sem_remove',
'shm_attach',
'shm_detach',
'shm_get_var',
'shm_has_var',
'shm_put_var',
'shm_remove_var',
'shm_remove'],
'Session': ['session_cache_expire',
'session_cache_limiter',
'session_commit',
'session_decode',
'session_destroy',
'session_encode',
'session_get_cookie_params',
'session_id',
'session_is_registered',
'session_module_name',
'session_name',
'session_regenerate_id',
'session_register',
'session_save_path',
'session_set_cookie_params',
'session_set_save_handler',
'session_start',
'session_unregister',
'session_unset',
'session_write_close'],
'Session PgSQL': ['session_pgsql_add_error',
'session_pgsql_get_error',
'session_pgsql_get_field',
'session_pgsql_reset',
'session_pgsql_set_field',
'session_pgsql_status'],
'Shared Memory': ['shmop_close',
'shmop_delete',
'shmop_open',
'shmop_read',
'shmop_size',
'shmop_write'],
'SimpleXML': ['simplexml_import_dom',
'simplexml_load_file',
'simplexml_load_string'],
'Socket': ['socket_accept',
'socket_bind',
'socket_clear_error',
'socket_close',
'socket_connect',
'socket_create_listen',
'socket_create_pair',
'socket_create',
'socket_get_option',
'socket_getpeername',
'socket_getsockname',
'socket_last_error',
'socket_listen',
'socket_read',
'socket_recv',
'socket_recvfrom',
'socket_select',
'socket_send',
'socket_sendto',
'socket_set_block',
'socket_set_nonblock',
'socket_set_option',
'socket_shutdown',
'socket_strerror',
'socket_write'],
'Solr': ['solr_get_version'],
'Statistic': ['stats_absolute_deviation',
'stats_cdf_beta',
'stats_cdf_binomial',
'stats_cdf_cauchy',
'stats_cdf_chisquare',
'stats_cdf_exponential',
'stats_cdf_f',
'stats_cdf_gamma',
'stats_cdf_laplace',
'stats_cdf_logistic',
'stats_cdf_negative_binomial',
'stats_cdf_noncentral_chisquare',
'stats_cdf_noncentral_f',
'stats_cdf_poisson',
'stats_cdf_t',
'stats_cdf_uniform',
'stats_cdf_weibull',
'stats_covariance',
'stats_den_uniform',
'stats_dens_beta',
'stats_dens_cauchy',
'stats_dens_chisquare',
'stats_dens_exponential',
'stats_dens_f',
'stats_dens_gamma',
'stats_dens_laplace',
'stats_dens_logistic',
'stats_dens_negative_binomial',
'stats_dens_normal',
'stats_dens_pmf_binomial',
'stats_dens_pmf_hypergeometric',
'stats_dens_pmf_poisson',
'stats_dens_t',
'stats_dens_weibull',
'stats_harmonic_mean',
'stats_kurtosis',
'stats_rand_gen_beta',
'stats_rand_gen_chisquare',
'stats_rand_gen_exponential',
'stats_rand_gen_f',
'stats_rand_gen_funiform',
'stats_rand_gen_gamma',
'stats_rand_gen_ibinomial_negative',
'stats_rand_gen_ibinomial',
'stats_rand_gen_int',
'stats_rand_gen_ipoisson',
'stats_rand_gen_iuniform',
'stats_rand_gen_noncenral_chisquare',
'stats_rand_gen_noncentral_f',
'stats_rand_gen_noncentral_t',
'stats_rand_gen_normal',
'stats_rand_gen_t',
'stats_rand_get_seeds',
'stats_rand_phrase_to_seeds',
'stats_rand_ranf',
'stats_rand_setall',
'stats_skew',
'stats_standard_deviation',
'stats_stat_binomial_coef',
'stats_stat_correlation',
'stats_stat_gennch',
'stats_stat_independent_t',
'stats_stat_innerproduct',
'stats_stat_noncentral_t',
'stats_stat_paired_t',
'stats_stat_percentile',
'stats_stat_powersum',
'stats_variance'],
'Stomp': ['stomp_connect_error', 'stomp_version'],
'Stream': ['set_socket_blocking',
'stream_bucket_append',
'stream_bucket_make_writeable',
'stream_bucket_new',
'stream_bucket_prepend',
'stream_context_create',
'stream_context_get_default',
'stream_context_get_options',
'stream_context_get_params',
'stream_context_set_default',
'stream_context_set_option',
'stream_context_set_params',
'stream_copy_to_stream',
'stream_encoding',
'stream_filter_append',
'stream_filter_prepend',
'stream_filter_register',
'stream_filter_remove',
'stream_get_contents',
'stream_get_filters',
'stream_get_line',
'stream_get_meta_data',
'stream_get_transports',
'stream_get_wrappers',
'stream_is_local',
'stream_notification_callback',
'stream_register_wrapper',
'stream_resolve_include_path',
'stream_select'],
'String': ['addcslashes',
'addslashes',
'bin2hex',
'chop',
'chr',
'chunk_split',
'convert_cyr_string',
'convert_uudecode',
'convert_uuencode',
'count_chars',
'crc32',
'crypt',
'echo',
'explode',
'fprintf',
'get_html_translation_table',
'hebrev',
'hebrevc',
'html_entity_decode',
'htmlentities',
'htmlspecialchars_decode',
'htmlspecialchars',
'implode',
'join',
'lcfirst',
'levenshtein',
'localeconv',
'ltrim',
'md5_file',
'md5',
'metaphone',
'money_format',
'nl_langinfo',
'nl2br',
'number_format',
'ord',
'parse_str',
'print',
'printf',
'quoted_printable_decode',
'quoted_printable_encode',
'quotemeta',
'rtrim',
'setlocale',
'sha1_file',
'sha1',
'similar_text',
'soundex',
'sprintf',
'sscanf',
'str_getcsv',
'str_ireplace',
'str_pad',
'str_repeat',
'str_replace',
'str_rot13',
'str_shuffle',
'str_split',
'str_word_count',
'strcasecmp',
'strchr',
'strcmp',
'strcoll',
'strcspn',
'strip_tags',
'stripcslashes',
'stripos',
'stripslashes',
'stristr',
'strlen',
'strnatcasecmp',
'strnatcmp',
'strncasecmp',
'strncmp',
'strpbrk',
'strpos',
'strrchr',
'strrev',
'strripos',
'strrpos',
'strspn'],
'Sybase': ['sybase_affected_rows',
'sybase_close',
'sybase_connect',
'sybase_data_seek',
'sybase_deadlock_retry_count',
'sybase_fetch_array',
'sybase_fetch_assoc',
'sybase_fetch_field',
'sybase_fetch_object',
'sybase_fetch_row',
'sybase_field_seek',
'sybase_free_result',
'sybase_get_last_message',
'sybase_min_client_severity',
'sybase_min_error_severity',
'sybase_min_message_severity',
'sybase_min_server_severity',
'sybase_num_fields',
'sybase_num_rows',
'sybase_pconnect',
'sybase_query',
'sybase_result',
'sybase_select_db',
'sybase_set_message_handler',
'sybase_unbuffered_query'],
'TCP': ['tcpwrap_check'],
'Tidy': ['ob_tidyhandler',
'tidy_access_count',
'tidy_config_count',
'tidy_error_count',
'tidy_get_error_buffer',
'tidy_get_output',
'tidy_load_config',
'tidy_reset_config',
'tidy_save_config',
'tidy_set_encoding',
'tidy_setopt',
'tidy_warning_count'],
'Tokenizer': ['token_get_all', 'token_name'],
'URL': ['base64_decode',
'base64_encode',
'get_headers',
'get_meta_tags',
'http_build_query',
'parse_url',
'rawurldecode',
'rawurlencode',
'urldecode',
'urlencode'],
'Variable handling': ['debug_zval_dump',
'doubleval',
'empty',
'floatval',
'get_defined_vars',
'get_resource_type',
'gettype',
'import_request_variables',
'intval',
'is_array',
'is_bool',
'is_callable',
'is_double',
'is_float',
'is_int',
'is_integer',
'is_long',
'is_null',
'is_numeric',
'is_object',
'is_real',
'is_resource',
'is_scalar',
'is_string',
'isset',
'print_r',
'serialize',
'settype',
'strval',
'unserialize',
'unset',
'var_dump',
'var_export'],
'W32api': ['w32api_deftype',
'w32api_init_dtype',
'w32api_invoke_function',
'w32api_register_function',
'w32api_set_call_method'],
'WDDX': ['wddx_add_vars',
'wddx_deserialize',
'wddx_packet_end',
'wddx_packet_start',
'wddx_serialize_value',
'wddx_serialize_vars',
'wddx_unserialize'],
'WinCache': ['wincache_fcache_fileinfo',
'wincache_fcache_meminfo',
'wincache_lock',
'wincache_ocache_fileinfo',
'wincache_ocache_meminfo',
'wincache_refresh_if_changed',
'wincache_rplist_fileinfo',
'wincache_rplist_meminfo',
'wincache_scache_info',
'wincache_scache_meminfo',
'wincache_ucache_add',
'wincache_ucache_cas',
'wincache_ucache_clear',
'wincache_ucache_dec',
'wincache_ucache_delete',
'wincache_ucache_exists',
'wincache_ucache_get',
'wincache_ucache_inc',
'wincache_ucache_info',
'wincache_ucache_meminfo',
'wincache_ucache_set',
'wincache_unlock'],
'XML Parser': ['utf8_decode'],
'XML-RPC': ['xmlrpc_decode_request',
'xmlrpc_decode',
'xmlrpc_encode_request',
'xmlrpc_encode',
'xmlrpc_get_type',
'xmlrpc_is_fault',
'xmlrpc_parse_method_descriptions',
'xmlrpc_server_add_introspection_data',
'xmlrpc_server_call_method',
'xmlrpc_server_create',
'xmlrpc_server_destroy',
'xmlrpc_server_register_introspection_callback',
'xmlrpc_server_register_method',
'xmlrpc_set_type'],
'XSLT (PHP4)': ['xslt_backend_info',
'xslt_backend_name',
'xslt_backend_version',
'xslt_create',
'xslt_errno',
'xslt_error',
'xslt_free',
'xslt_getopt',
'xslt_process',
'xslt_set_base',
'xslt_set_encoding',
'xslt_set_error_handler',
'xslt_set_log',
'xslt_set_object',
'xslt_set_sax_handler',
'xslt_set_sax_handlers',
'xslt_set_scheme_handler',
'xslt_set_scheme_handlers',
'xslt_setopt'],
'YAZ': ['yaz_addinfo',
'yaz_ccl_conf',
'yaz_ccl_parse',
'yaz_close',
'yaz_connect',
'yaz_database',
'yaz_element',
'yaz_errno',
'yaz_error',
'yaz_es_result',
'yaz_es',
'yaz_get_option',
'yaz_hits',
'yaz_itemorder',
'yaz_present',
'yaz_range',
'yaz_record',
'yaz_scan_result',
'yaz_scan',
'yaz_schema',
'yaz_search',
'yaz_set_option',
'yaz_sort',
'yaz_syntax',
'yaz_wait'],
'YP/NIS': ['yp_all',
'yp_cat',
'yp_err_string',
'yp_errno',
'yp_first',
'yp_get_default_domain',
'yp_master',
'yp_match',
'yp_next',
'yp_order'],
'Yaml': ['yaml_emit_file',
'yaml_emit',
'yaml_parse_file',
'yaml_parse_url',
'yaml_parse'],
'Zip': ['zip_close',
'zip_entry_close',
'zip_entry_compressedsize',
'zip_entry_compressionmethod',
'zip_entry_filesize',
'zip_entry_name',
'zip_entry_open',
'zip_entry_read',
'zip_open',
'zip_read'],
'Zlib': ['gzclose',
'gzcompress',
'gzdecode',
'gzdeflate',
'gzencode',
'gzeof',
'gzfile',
'gzgetc',
'gzgets',
'gzgetss',
'gzinflate',
'gzopen',
'gzpassthru',
'gzputs',
'gzread',
'gzrewind',
'gzseek',
'gztell',
'gzuncompress',
'gzwrite',
'readgzfile',
'zlib_get_coding_type'],
'bcompiler': ['bcompiler_load_exe',
'bcompiler_load',
'bcompiler_parse_class',
'bcompiler_read',
'bcompiler_write_class',
'bcompiler_write_constant',
'bcompiler_write_exe_footer',
'bcompiler_write_file',
'bcompiler_write_footer',
'bcompiler_write_function',
'bcompiler_write_functions_from_file',
'bcompiler_write_header',
'bcompiler_write_included_filename'],
'cURL': ['curl_close',
'curl_copy_handle',
'curl_errno',
'curl_error',
'curl_exec',
'curl_getinfo',
'curl_init',
'curl_multi_add_handle',
'curl_multi_close',
'curl_multi_exec',
'curl_multi_getcontent',
'curl_multi_info_read',
'curl_multi_init',
'curl_multi_remove_handle',
'curl_multi_select',
'curl_setopt_array',
'curl_setopt',
'curl_version'],
'chdb': ['chdb_create'],
'dBase': ['dbase_add_record',
'dbase_close',
'dbase_create',
'dbase_delete_record',
'dbase_get_header_info',
'dbase_get_record_with_names',
'dbase_get_record',
'dbase_numfields',
'dbase_numrecords',
'dbase_open',
'dbase_pack',
'dbase_replace_record'],
'dbx': ['dbx_close',
'dbx_compare',
'dbx_connect',
'dbx_error',
'dbx_escape_string',
'dbx_fetch_row'],
'filePro': ['filepro_fieldcount',
'filepro_fieldname',
'filepro_fieldtype',
'filepro_fieldwidth',
'filepro_retrieve',
'filepro_rowcount',
'filepro'],
'iconv': ['iconv_get_encoding',
'iconv_mime_decode_headers',
'iconv_mime_decode',
'iconv_mime_encode',
'iconv_set_encoding',
'iconv_strlen',
'iconv_strpos',
'iconv_strrpos',
'iconv_substr',
'iconv',
'ob_iconv_handler'],
'inclued': ['inclued_get_data'],
'intl': ['intl_error_name',
'intl_get_error_code',
'intl_get_error_message',
'intl_is_failure'],
'libxml': ['libxml_clear_errors',
'libxml_disable_entity_loader',
'libxml_get_errors',
'libxml_get_last_error',
'libxml_set_streams_context',
'libxml_use_internal_errors'],
'mSQL': ['msql_affected_rows',
'msql_close',
'msql_connect',
'msql_create_db',
'msql_createdb',
'msql_data_seek',
'msql_db_query',
'msql_dbname',
'msql_drop_db',
'msql_error',
'msql_fetch_array',
'msql_fetch_field',
'msql_fetch_object',
'msql_fetch_row',
'msql_field_flags',
'msql_field_len',
'msql_field_name',
'msql_field_seek',
'msql_field_table',
'msql_field_type',
'msql_fieldflags',
'msql_fieldlen',
'msql_fieldname',
'msql_fieldtable',
'msql_fieldtype',
'msql_free_result',
'msql_list_dbs',
'msql_list_fields',
'msql_list_tables',
'msql_num_fields',
'msql_num_rows',
'msql_numfields',
'msql_numrows',
'msql_pconnect',
'msql_query',
'msql_regcase',
'msql_result',
'msql_select_db',
'msql_tablename',
'msql'],
'mnoGoSearch': ['udm_add_search_limit',
'udm_alloc_agent_array',
'udm_alloc_agent',
'udm_api_version',
'udm_cat_list',
'udm_cat_path',
'udm_check_charset',
'udm_check_stored',
'udm_clear_search_limits',
'udm_close_stored',
'udm_crc32',
'udm_errno',
'udm_error',
'udm_find',
'udm_free_agent',
'udm_free_ispell_data',
'udm_free_res',
'udm_get_doc_count',
'udm_get_res_field',
'udm_get_res_param',
'udm_hash32',
'udm_load_ispell_data',
'udm_open_stored',
'udm_set_agent_param'],
'mqseries': ['mqseries_back',
'mqseries_begin',
'mqseries_close',
'mqseries_cmit',
'mqseries_conn',
'mqseries_connx',
'mqseries_disc',
'mqseries_get',
'mqseries_inq',
'mqseries_open',
'mqseries_put1',
'mqseries_put',
'mqseries_set',
'mqseries_strerror'],
'mysqlnd_qc': ['mysqlnd_qc_change_handler',
'mysqlnd_qc_clear_cache',
'mysqlnd_qc_get_cache_info',
'mysqlnd_qc_get_core_stats',
'mysqlnd_qc_get_handler',
'mysqlnd_qc_get_query_trace_log',
'mysqlnd_qc_set_user_handlers'],
'qtdom': ['qdom_error', 'qdom_tree'],
'runkit': ['runkit_class_adopt',
'runkit_class_emancipate',
'runkit_constant_add',
'runkit_constant_redefine',
'runkit_constant_remove',
'runkit_function_add',
'runkit_function_copy',
'runkit_function_redefine',
'runkit_function_remove',
'runkit_function_rename',
'runkit_import',
'runkit_lint_file',
'runkit_lint',
'runkit_method_add',
'runkit_method_copy',
'runkit_method_redefine',
'runkit_method_remove',
'runkit_method_rename',
'runkit_return_value_used',
'runkit_sandbox_output_handler',
'runkit_superglobals'],
'ssdeep': ['ssdeep_fuzzy_compare',
'ssdeep_fuzzy_hash_filename',
'ssdeep_fuzzy_hash'],
'vpopmail': ['vpopmail_add_alias_domain_ex',
'vpopmail_add_alias_domain',
'vpopmail_add_domain_ex',
'vpopmail_add_domain',
'vpopmail_add_user',
'vpopmail_alias_add',
'vpopmail_alias_del_domain',
'vpopmail_alias_del',
'vpopmail_alias_get_all',
'vpopmail_alias_get',
'vpopmail_auth_user',
'vpopmail_del_domain_ex',
'vpopmail_del_domain',
'vpopmail_del_user',
'vpopmail_error',
'vpopmail_passwd',
'vpopmail_set_user_quota'],
'win32ps': ['win32_ps_list_procs', 'win32_ps_stat_mem', 'win32_ps_stat_proc'],
'win32service': ['win32_continue_service',
'win32_create_service',
'win32_delete_service',
'win32_get_last_control_message',
'win32_pause_service',
'win32_query_service_status',
'win32_set_service_status',
'win32_start_service_ctrl_dispatcher',
'win32_start_service',
'win32_stop_service'],
'xattr': ['xattr_get',
'xattr_list',
'xattr_remove',
'xattr_set',
'xattr_supported'],
'xdiff': ['xdiff_file_bdiff_size',
'xdiff_file_bdiff',
'xdiff_file_bpatch',
'xdiff_file_diff_binary',
'xdiff_file_diff',
'xdiff_file_merge3',
'xdiff_file_patch_binary',
'xdiff_file_patch',
'xdiff_file_rabdiff',
'xdiff_string_bdiff_size',
'xdiff_string_bdiff',
'xdiff_string_bpatch',
'xdiff_string_diff_binary',
'xdiff_string_diff',
'xdiff_string_merge3',
'xdiff_string_patch_binary',
'xdiff_string_patch',
'xdiff_string_rabdiff']}
if __name__ == '__main__':
import glob
import os
import pprint
import re
import shutil
import tarfile
import urllib.request, urllib.parse, urllib.error
PHP_MANUAL_URL = 'http://us3.php.net/distributions/manual/php_manual_en.tar.gz'
PHP_MANUAL_DIR = './php-chunked-xhtml/'
PHP_REFERENCE_GLOB = 'ref.*'
PHP_FUNCTION_RE = '<a href="function\..*?\.html">(.*?)</a>'
PHP_MODULE_RE = '<title>(.*?) Functions</title>'
def get_php_functions():
function_re = re.compile(PHP_FUNCTION_RE)
module_re = re.compile(PHP_MODULE_RE)
modules = {}
for file in get_php_references():
module = ''
for line in open(file):
if not module:
search = module_re.search(line)
if search:
module = search.group(1)
modules[module] = []
elif '<h2>Table of Contents</h2>' in line:
for match in function_re.finditer(line):
fn = match.group(1)
if '->' not in fn and '::' not in fn:
modules[module].append(fn)
# These are dummy manual pages, not actual functions
if module == 'PHP Options/Info':
modules[module].remove('main')
elif module == 'Filesystem':
modules[module].remove('delete')
if not modules[module]:
del modules[module]
break
return modules
def get_php_references():
download = urllib.request.urlretrieve(PHP_MANUAL_URL)
tar = tarfile.open(download[0])
tar.extractall()
tar.close()
for file in glob.glob("%s%s" % (PHP_MANUAL_DIR, PHP_REFERENCE_GLOB)):
yield file
os.remove(download[0])
def regenerate(filename, modules):
f = open(filename)
try:
content = f.read()
finally:
f.close()
header = content[:content.find('MODULES = {')]
footer = content[content.find("if __name__ == '__main__':"):]
f = open(filename, 'w')
f.write(header)
f.write('MODULES = %s\n\n' % pprint.pformat(modules))
f.write(footer)
f.close()
def run():
print('>> Downloading Function Index')
modules = get_php_functions()
total = sum(len(v) for v in modules.values())
print('%d functions found' % total)
regenerate(__file__, modules)
shutil.rmtree(PHP_MANUAL_DIR)
run()
| mit |
mkennedy04/knodj | env/Lib/site-packages/django/contrib/auth/decorators.py | 117 | 3021 | from functools import wraps
from django.conf import settings
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.core.exceptions import PermissionDenied
from django.shortcuts import resolve_url
from django.utils.decorators import available_attrs
from django.utils.six.moves.urllib.parse import urlparse
def user_passes_test(test_func, login_url=None, redirect_field_name=REDIRECT_FIELD_NAME):
"""
Decorator for views that checks that the user passes the given test,
redirecting to the log-in page if necessary. The test should be a callable
that takes the user object and returns True if the user passes.
"""
def decorator(view_func):
@wraps(view_func, assigned=available_attrs(view_func))
def _wrapped_view(request, *args, **kwargs):
if test_func(request.user):
return view_func(request, *args, **kwargs)
path = request.build_absolute_uri()
resolved_login_url = resolve_url(login_url or settings.LOGIN_URL)
# If the login url is the same scheme and net location then just
# use the path as the "next" url.
login_scheme, login_netloc = urlparse(resolved_login_url)[:2]
current_scheme, current_netloc = urlparse(path)[:2]
if ((not login_scheme or login_scheme == current_scheme) and
(not login_netloc or login_netloc == current_netloc)):
path = request.get_full_path()
from django.contrib.auth.views import redirect_to_login
return redirect_to_login(
path, resolved_login_url, redirect_field_name)
return _wrapped_view
return decorator
def login_required(function=None, redirect_field_name=REDIRECT_FIELD_NAME, login_url=None):
"""
Decorator for views that checks that the user is logged in, redirecting
to the log-in page if necessary.
"""
actual_decorator = user_passes_test(
lambda u: u.is_authenticated(),
login_url=login_url,
redirect_field_name=redirect_field_name
)
if function:
return actual_decorator(function)
return actual_decorator
def permission_required(perm, login_url=None, raise_exception=False):
"""
Decorator for views that checks whether a user has a particular permission
enabled, redirecting to the log-in page if necessary.
If the raise_exception parameter is given the PermissionDenied exception
is raised.
"""
def check_perms(user):
if not isinstance(perm, (list, tuple)):
perms = (perm, )
else:
perms = perm
# First check if the user has the permission (even anon users)
if user.has_perms(perms):
return True
# In case the 403 handler should be called raise the exception
if raise_exception:
raise PermissionDenied
# As the last resort, show the login form
return False
return user_passes_test(check_perms, login_url=login_url)
| mit |
ihipi/Sick-Beard | sickbeard/name_cache.py | 14 | 2259 | # Author: Nic Wolfe <[email protected]>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of Sick Beard.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
from sickbeard import db
from sickbeard.helpers import sanitizeSceneName
def addNameToCache(name, tvdb_id):
"""
Adds the show & tvdb id to the scene_names table in cache.db.
name: The show name to cache
tvdb_id: The tvdb id that this show should be cached with (can be None/0 for unknown)
"""
# standardize the name we're using to account for small differences in providers
name = sanitizeSceneName(name)
if not tvdb_id:
tvdb_id = 0
cacheDB = db.DBConnection('cache.db')
cacheDB.action("INSERT INTO scene_names (tvdb_id, name) VALUES (?, ?)", [tvdb_id, name])
def retrieveNameFromCache(name):
"""
Looks up the given name in the scene_names table in cache.db.
name: The show name to look up.
Returns: the tvdb id that resulted from the cache lookup or None if the show wasn't found in the cache
"""
# standardize the name we're using to account for small differences in providers
name = sanitizeSceneName(name)
cacheDB = db.DBConnection('cache.db')
cache_results = cacheDB.select("SELECT * FROM scene_names WHERE name = ?", [name])
if not cache_results:
return None
return int(cache_results[0]["tvdb_id"])
def clearCache():
"""
Deletes all "unknown" entries from the cache (names with tvdb_id of 0).
"""
cacheDB = db.DBConnection('cache.db')
cacheDB.action("DELETE FROM scene_names WHERE tvdb_id = ?", [0])
| gpl-3.0 |
roy2220/srs | trunk/research/code-statistic/csr.py | 5 | 3514 | #!/usr/bin/python
'''
The MIT License (MIT)
Copyright (c) 2013-2016 SRS(ossrs)
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'''
#################################################################################
# to stat the code and comments lines
#################################################################################
import sys, os, cs
from cs import info, trace
if __name__ != "__main__":
print "donot support lib"
sys.exit(-1)
filters="*.*pp,*.h,*.c,*.cc"
except_filters="utest,doc"
if len(sys.argv) <= 1:
print "to stat the code and comments lines"
print "Usage: python %s <dir> [filters] [except_filters]"%(sys.argv[0])
print " dir: the dir contains the files to stat"
print " filters: the file filters, default: *.*pp,*.h,*.c,*.cc"
print " except_filters: the except file filters, default: utest,doc"
print "Example:"
print " python %s src"%(sys.argv[0])
print " python %s src *.*pp,*.cc utest,doc"%(sys.argv[0])
sys.exit(-1)
dir = sys.argv[1]
if len(sys.argv) > 2:
filters = sys.argv[2]
if len(sys.argv) > 3:
except_filters = sys.argv[3]
info("stat dir:%s, filters:%s, except_filters:%s"%(dir, filters, except_filters))
# filters to array
filters = filters.split(",")
except_filters = except_filters.split(",")
# find src -name "*.*pp"|grep -v utest
(totals, stat_codes, commentss, stat_block_commentss, stat_line_commentss) = (0, 0, 0, 0, 0)
for filter in filters:
cmd = 'find %s -name "%s"'%(dir, filter)
for ef in except_filters:
cmd = '%s|%s'%(cmd, 'grep -v "%s"'%(ef))
cmd = "%s 2>&1"%(cmd)
info("scan dir, cmd:%s"%cmd)
pipe = os.popen(cmd)
files = pipe.read()
info("scan dir, files:%s"%files)
pipe.close()
files = files.split("\n")
for file in files:
file = file.strip()
if len(file) == 0:
continue;
info("start stat file:%s"%file)
(code, total, stat_code, comments, stat_block_comments, stat_line_comments, code_file) = cs.do_stat(file)
if code != 0:
continue;
totals += total
stat_codes += stat_code
commentss += comments
stat_block_commentss += stat_block_comments
stat_line_commentss += stat_line_comments
if totals == 0:
trace("no code or comments found.")
else:
trace("total:%s code:%s comments:%s(%.2f%%) block:%s line:%s"%(
totals, stat_codes, commentss, commentss * 100.0 / totals, stat_block_commentss, stat_line_commentss
))
| mit |
hawk-lord/gnucash | src/optional/python-bindings/example_scripts/quotes_historic.py | 13 | 2473 | #!/usr/bin/env python
# quotes_historic.py -- Example Script to read historic quote data into gnucash
#
## @file
# @brief Example Script to read historic stock data into gnucash
# @author Peter Holtermann
# @date January 2011
# @ingroup python_bindings_examples
#
# Call the perl-script @code
# ./get_quotes.pl INTC
# @endcode first to achieve data into file INTC which can thereafter be imported to GnuCash using this script.
#
# For explanation of use have a look at the wiki:
# http://wiki.gnucash.org/wiki/Stocks/get_prices
#
from gnucash import Session, Account, Split
import gnucash
import datetime
from fractions import Fraction
from gnc_convenience import find_account
FILE = "./test.gnucash"
url = "xml://"+FILE
# Read data from file
f = open('INTC')
data = []
while 1:
tmp = f.readline()
if(len(tmp)<2):
break
data.append(tmp)
f.close()
stock_date = []
stock_price = []
for i in range(1,len(data)):
year = int(data[i].rsplit(',')[1].rsplit('/')[0])
month = int(data[i].rsplit(',')[1].rsplit('/')[1])
day = int(data[i].rsplit(',')[1].rsplit('/')[2])
stock_date.append(datetime.datetime(year,month,day))
stock_price.append(float(data[i].rsplit(',')[5]))
# Initialize Gnucash session
session = Session(url, True, False, False)
root = session.book.get_root_account()
book = session.book
account = book.get_root_account()
pdb = book.get_price_db()
comm_table = book.get_table()
ac = find_account(account,'Intel')[0]
stock = ac.GetCommodity()
# Add the prices
pdb = book.get_price_db()
if len(ac.GetSplitList())<1:
print 'Need at least one Split to get currency info ... '
raise SystemExit
cur = ac.GetSplitList()[0].GetParent().GetCurrency()
# Get stock data
pl = pdb.get_prices(stock,cur)
if len(pl)<1:
print 'Need at least one database entry to clone ...'
raise SystemExit
pl0 = pl[0]
for i in range(1,len(pl)):
pdb.remove_price(pl[i])
for i in range(0,len(stock_date)):
p_new = pl0.clone(book)
p_new = gnucash.GncPrice(instance=p_new)
print 'Adding',i,stock_date[i],stock_price[i]
p_new.set_time(stock_date[i])
v = p_new.get_value()
v.num = int(Fraction.from_float(stock_price[i]).limit_denominator(100000).numerator)
v.denom = int(Fraction.from_float(stock_price[i]).limit_denominator(100000).denominator)
p_new.set_value(v)
p_new.set_source("Finance::Quotes::Historic")
pdb.add_price(p_new)
# Clean up
session.save()
session.end()
session.destroy()
| gpl-2.0 |
PopCap/GameIdea | Engine/Source/ThirdParty/HTML5/emsdk/Win64/python/2.7.5.3_64bit/Lib/ctypes/test/test_cast.py | 81 | 3212 | from ctypes import *
import unittest
import sys
class Test(unittest.TestCase):
def test_array2pointer(self):
array = (c_int * 3)(42, 17, 2)
# casting an array to a pointer works.
ptr = cast(array, POINTER(c_int))
self.assertEqual([ptr[i] for i in range(3)], [42, 17, 2])
if 2*sizeof(c_short) == sizeof(c_int):
ptr = cast(array, POINTER(c_short))
if sys.byteorder == "little":
self.assertEqual([ptr[i] for i in range(6)],
[42, 0, 17, 0, 2, 0])
else:
self.assertEqual([ptr[i] for i in range(6)],
[0, 42, 0, 17, 0, 2])
def test_address2pointer(self):
array = (c_int * 3)(42, 17, 2)
address = addressof(array)
ptr = cast(c_void_p(address), POINTER(c_int))
self.assertEqual([ptr[i] for i in range(3)], [42, 17, 2])
ptr = cast(address, POINTER(c_int))
self.assertEqual([ptr[i] for i in range(3)], [42, 17, 2])
def test_p2a_objects(self):
array = (c_char_p * 5)()
self.assertEqual(array._objects, None)
array[0] = "foo bar"
self.assertEqual(array._objects, {'0': "foo bar"})
p = cast(array, POINTER(c_char_p))
# array and p share a common _objects attribute
self.assertTrue(p._objects is array._objects)
self.assertEqual(array._objects, {'0': "foo bar", id(array): array})
p[0] = "spam spam"
self.assertEqual(p._objects, {'0': "spam spam", id(array): array})
self.assertTrue(array._objects is p._objects)
p[1] = "foo bar"
self.assertEqual(p._objects, {'1': 'foo bar', '0': "spam spam", id(array): array})
self.assertTrue(array._objects is p._objects)
def test_other(self):
p = cast((c_int * 4)(1, 2, 3, 4), POINTER(c_int))
self.assertEqual(p[:4], [1,2, 3, 4])
self.assertEqual(p[:4:], [1, 2, 3, 4])
self.assertEqual(p[3:-1:-1], [4, 3, 2, 1])
self.assertEqual(p[:4:3], [1, 4])
c_int()
self.assertEqual(p[:4], [1, 2, 3, 4])
self.assertEqual(p[:4:], [1, 2, 3, 4])
self.assertEqual(p[3:-1:-1], [4, 3, 2, 1])
self.assertEqual(p[:4:3], [1, 4])
p[2] = 96
self.assertEqual(p[:4], [1, 2, 96, 4])
self.assertEqual(p[:4:], [1, 2, 96, 4])
self.assertEqual(p[3:-1:-1], [4, 96, 2, 1])
self.assertEqual(p[:4:3], [1, 4])
c_int()
self.assertEqual(p[:4], [1, 2, 96, 4])
self.assertEqual(p[:4:], [1, 2, 96, 4])
self.assertEqual(p[3:-1:-1], [4, 96, 2, 1])
self.assertEqual(p[:4:3], [1, 4])
def test_char_p(self):
# This didn't work: bad argument to internal function
s = c_char_p("hiho")
self.assertEqual(cast(cast(s, c_void_p), c_char_p).value,
"hiho")
try:
c_wchar_p
except NameError:
pass
else:
def test_wchar_p(self):
s = c_wchar_p("hiho")
self.assertEqual(cast(cast(s, c_void_p), c_wchar_p).value,
"hiho")
if __name__ == "__main__":
unittest.main()
| bsd-2-clause |
zuku1985/scikit-learn | sklearn/utils/tests/test_multiclass.py | 58 | 14316 |
from __future__ import division
import numpy as np
import scipy.sparse as sp
from itertools import product
from sklearn.externals.six.moves import xrange
from sklearn.externals.six import iteritems
from scipy.sparse import issparse
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import coo_matrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.multiclass import unique_labels
from sklearn.utils.multiclass import is_multilabel
from sklearn.utils.multiclass import type_of_target
from sklearn.utils.multiclass import class_distribution
from sklearn.utils.multiclass import check_classification_targets
from sklearn.utils.metaestimators import _safe_split
from sklearn.model_selection import ShuffleSplit
from sklearn.svm import SVC
from sklearn import datasets
class NotAnArray(object):
"""An object that is convertable to an array. This is useful to
simulate a Pandas timeseries."""
def __init__(self, data):
self.data = data
def __array__(self, dtype=None):
return self.data
EXAMPLES = {
'multilabel-indicator': [
# valid when the data is formatted as sparse or dense, identified
# by CSR format when the testing takes place
csr_matrix(np.random.RandomState(42).randint(2, size=(10, 10))),
csr_matrix(np.array([[0, 1], [1, 0]])),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.bool)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.int8)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.uint8)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.float)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.float32)),
csr_matrix(np.array([[0, 0], [0, 0]])),
csr_matrix(np.array([[0, 1]])),
# Only valid when data is dense
np.array([[-1, 1], [1, -1]]),
np.array([[-3, 3], [3, -3]]),
NotAnArray(np.array([[-3, 3], [3, -3]])),
],
'multiclass': [
[1, 0, 2, 2, 1, 4, 2, 4, 4, 4],
np.array([1, 0, 2]),
np.array([1, 0, 2], dtype=np.int8),
np.array([1, 0, 2], dtype=np.uint8),
np.array([1, 0, 2], dtype=np.float),
np.array([1, 0, 2], dtype=np.float32),
np.array([[1], [0], [2]]),
NotAnArray(np.array([1, 0, 2])),
[0, 1, 2],
['a', 'b', 'c'],
np.array([u'a', u'b', u'c']),
np.array([u'a', u'b', u'c'], dtype=object),
np.array(['a', 'b', 'c'], dtype=object),
],
'multiclass-multioutput': [
np.array([[1, 0, 2, 2], [1, 4, 2, 4]]),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.int8),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.uint8),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.float),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.float32),
np.array([['a', 'b'], ['c', 'd']]),
np.array([[u'a', u'b'], [u'c', u'd']]),
np.array([[u'a', u'b'], [u'c', u'd']], dtype=object),
np.array([[1, 0, 2]]),
NotAnArray(np.array([[1, 0, 2]])),
],
'binary': [
[0, 1],
[1, 1],
[],
[0],
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1]),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.bool),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.int8),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.uint8),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.float),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.float32),
np.array([[0], [1]]),
NotAnArray(np.array([[0], [1]])),
[1, -1],
[3, 5],
['a'],
['a', 'b'],
['abc', 'def'],
np.array(['abc', 'def']),
[u'a', u'b'],
np.array(['abc', 'def'], dtype=object),
],
'continuous': [
[1e-5],
[0, .5],
np.array([[0], [.5]]),
np.array([[0], [.5]], dtype=np.float32),
],
'continuous-multioutput': [
np.array([[0, .5], [.5, 0]]),
np.array([[0, .5], [.5, 0]], dtype=np.float32),
np.array([[0, .5]]),
],
'unknown': [
[[]],
[()],
# sequence of sequences that weren't supported even before deprecation
np.array([np.array([]), np.array([1, 2, 3])], dtype=object),
[np.array([]), np.array([1, 2, 3])],
[set([1, 2, 3]), set([1, 2])],
[frozenset([1, 2, 3]), frozenset([1, 2])],
# and also confusable as sequences of sequences
[{0: 'a', 1: 'b'}, {0: 'a'}],
# empty second dimension
np.array([[], []]),
# 3d
np.array([[[0, 1], [2, 3]], [[4, 5], [6, 7]]]),
]
}
NON_ARRAY_LIKE_EXAMPLES = [
set([1, 2, 3]),
{0: 'a', 1: 'b'},
{0: [5], 1: [5]},
'abc',
frozenset([1, 2, 3]),
None,
]
MULTILABEL_SEQUENCES = [
[[1], [2], [0, 1]],
[(), (2), (0, 1)],
np.array([[], [1, 2]], dtype='object'),
NotAnArray(np.array([[], [1, 2]], dtype='object'))
]
def test_unique_labels():
# Empty iterable
assert_raises(ValueError, unique_labels)
# Multiclass problem
assert_array_equal(unique_labels(xrange(10)), np.arange(10))
assert_array_equal(unique_labels(np.arange(10)), np.arange(10))
assert_array_equal(unique_labels([4, 0, 2]), np.array([0, 2, 4]))
# Multilabel indicator
assert_array_equal(unique_labels(np.array([[0, 0, 1],
[1, 0, 1],
[0, 0, 0]])),
np.arange(3))
assert_array_equal(unique_labels(np.array([[0, 0, 1],
[0, 0, 0]])),
np.arange(3))
# Several arrays passed
assert_array_equal(unique_labels([4, 0, 2], xrange(5)),
np.arange(5))
assert_array_equal(unique_labels((0, 1, 2), (0,), (2, 1)),
np.arange(3))
# Border line case with binary indicator matrix
assert_raises(ValueError, unique_labels, [4, 0, 2], np.ones((5, 5)))
assert_raises(ValueError, unique_labels, np.ones((5, 4)), np.ones((5, 5)))
assert_array_equal(unique_labels(np.ones((4, 5)), np.ones((5, 5))),
np.arange(5))
def test_unique_labels_non_specific():
# Test unique_labels with a variety of collected examples
# Smoke test for all supported format
for format in ["binary", "multiclass", "multilabel-indicator"]:
for y in EXAMPLES[format]:
unique_labels(y)
# We don't support those format at the moment
for example in NON_ARRAY_LIKE_EXAMPLES:
assert_raises(ValueError, unique_labels, example)
for y_type in ["unknown", "continuous", 'continuous-multioutput',
'multiclass-multioutput']:
for example in EXAMPLES[y_type]:
assert_raises(ValueError, unique_labels, example)
def test_unique_labels_mixed_types():
# Mix with binary or multiclass and multilabel
mix_clf_format = product(EXAMPLES["multilabel-indicator"],
EXAMPLES["multiclass"] +
EXAMPLES["binary"])
for y_multilabel, y_multiclass in mix_clf_format:
assert_raises(ValueError, unique_labels, y_multiclass, y_multilabel)
assert_raises(ValueError, unique_labels, y_multilabel, y_multiclass)
assert_raises(ValueError, unique_labels, [[1, 2]], [["a", "d"]])
assert_raises(ValueError, unique_labels, ["1", 2])
assert_raises(ValueError, unique_labels, [["1", 2], [1, 3]])
assert_raises(ValueError, unique_labels, [["1", "2"], [2, 3]])
def test_is_multilabel():
for group, group_examples in iteritems(EXAMPLES):
if group in ['multilabel-indicator']:
dense_assert_, dense_exp = assert_true, 'True'
else:
dense_assert_, dense_exp = assert_false, 'False'
for example in group_examples:
# Only mark explicitly defined sparse examples as valid sparse
# multilabel-indicators
if group == 'multilabel-indicator' and issparse(example):
sparse_assert_, sparse_exp = assert_true, 'True'
else:
sparse_assert_, sparse_exp = assert_false, 'False'
if (issparse(example) or
(hasattr(example, '__array__') and
np.asarray(example).ndim == 2 and
np.asarray(example).dtype.kind in 'biuf' and
np.asarray(example).shape[1] > 0)):
examples_sparse = [sparse_matrix(example)
for sparse_matrix in [coo_matrix,
csc_matrix,
csr_matrix,
dok_matrix,
lil_matrix]]
for exmpl_sparse in examples_sparse:
sparse_assert_(is_multilabel(exmpl_sparse),
msg=('is_multilabel(%r)'
' should be %s')
% (exmpl_sparse, sparse_exp))
# Densify sparse examples before testing
if issparse(example):
example = example.toarray()
dense_assert_(is_multilabel(example),
msg='is_multilabel(%r) should be %s'
% (example, dense_exp))
def test_check_classification_targets():
for y_type in EXAMPLES.keys():
if y_type in ["unknown", "continuous", 'continuous-multioutput']:
for example in EXAMPLES[y_type]:
msg = 'Unknown label type: '
assert_raises_regex(ValueError, msg,
check_classification_targets, example)
else:
for example in EXAMPLES[y_type]:
check_classification_targets(example)
# @ignore_warnings
def test_type_of_target():
for group, group_examples in iteritems(EXAMPLES):
for example in group_examples:
assert_equal(type_of_target(example), group,
msg=('type_of_target(%r) should be %r, got %r'
% (example, group, type_of_target(example))))
for example in NON_ARRAY_LIKE_EXAMPLES:
msg_regex = 'Expected array-like \(array or non-string sequence\).*'
assert_raises_regex(ValueError, msg_regex, type_of_target, example)
for example in MULTILABEL_SEQUENCES:
msg = ('You appear to be using a legacy multi-label data '
'representation. Sequence of sequences are no longer supported;'
' use a binary array or sparse matrix instead.')
assert_raises_regex(ValueError, msg, type_of_target, example)
def test_class_distribution():
y = np.array([[1, 0, 0, 1],
[2, 2, 0, 1],
[1, 3, 0, 1],
[4, 2, 0, 1],
[2, 0, 0, 1],
[1, 3, 0, 1]])
# Define the sparse matrix with a mix of implicit and explicit zeros
data = np.array([1, 2, 1, 4, 2, 1, 0, 2, 3, 2, 3, 1, 1, 1, 1, 1, 1])
indices = np.array([0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 5, 0, 1, 2, 3, 4, 5])
indptr = np.array([0, 6, 11, 11, 17])
y_sp = sp.csc_matrix((data, indices, indptr), shape=(6, 4))
classes, n_classes, class_prior = class_distribution(y)
classes_sp, n_classes_sp, class_prior_sp = class_distribution(y_sp)
classes_expected = [[1, 2, 4],
[0, 2, 3],
[0],
[1]]
n_classes_expected = [3, 3, 1, 1]
class_prior_expected = [[3/6, 2/6, 1/6],
[1/3, 1/3, 1/3],
[1.0],
[1.0]]
for k in range(y.shape[1]):
assert_array_almost_equal(classes[k], classes_expected[k])
assert_array_almost_equal(n_classes[k], n_classes_expected[k])
assert_array_almost_equal(class_prior[k], class_prior_expected[k])
assert_array_almost_equal(classes_sp[k], classes_expected[k])
assert_array_almost_equal(n_classes_sp[k], n_classes_expected[k])
assert_array_almost_equal(class_prior_sp[k], class_prior_expected[k])
# Test again with explicit sample weights
(classes,
n_classes,
class_prior) = class_distribution(y, [1.0, 2.0, 1.0, 2.0, 1.0, 2.0])
(classes_sp,
n_classes_sp,
class_prior_sp) = class_distribution(y, [1.0, 2.0, 1.0, 2.0, 1.0, 2.0])
class_prior_expected = [[4/9, 3/9, 2/9],
[2/9, 4/9, 3/9],
[1.0],
[1.0]]
for k in range(y.shape[1]):
assert_array_almost_equal(classes[k], classes_expected[k])
assert_array_almost_equal(n_classes[k], n_classes_expected[k])
assert_array_almost_equal(class_prior[k], class_prior_expected[k])
assert_array_almost_equal(classes_sp[k], classes_expected[k])
assert_array_almost_equal(n_classes_sp[k], n_classes_expected[k])
assert_array_almost_equal(class_prior_sp[k], class_prior_expected[k])
def test_safe_split_with_precomputed_kernel():
clf = SVC()
clfp = SVC(kernel="precomputed")
iris = datasets.load_iris()
X, y = iris.data, iris.target
K = np.dot(X, X.T)
cv = ShuffleSplit(test_size=0.25, random_state=0)
train, test = list(cv.split(X))[0]
X_train, y_train = _safe_split(clf, X, y, train)
K_train, y_train2 = _safe_split(clfp, K, y, train)
assert_array_almost_equal(K_train, np.dot(X_train, X_train.T))
assert_array_almost_equal(y_train, y_train2)
X_test, y_test = _safe_split(clf, X, y, test, train)
K_test, y_test2 = _safe_split(clfp, K, y, test, train)
assert_array_almost_equal(K_test, np.dot(X_test, X_train.T))
assert_array_almost_equal(y_test, y_test2)
| bsd-3-clause |
bformet/django-admin-bootstrapped | django_admin_bootstrapped/renderers.py | 20 | 2302 | from __future__ import absolute_import
from django.contrib.auth.forms import ReadOnlyPasswordHashWidget
from django.contrib.admin.widgets import (AdminDateWidget, AdminTimeWidget,
AdminSplitDateTime, RelatedFieldWidgetWrapper)
from django.forms import (FileInput, CheckboxInput, RadioSelect, CheckboxSelectMultiple)
from bootstrap3 import renderers
try:
from bootstrap3.utils import add_css_class
except ImportError:
from bootstrap3.html import add_css_class
from bootstrap3.text import text_value
class BootstrapFieldRenderer(renderers.FieldRenderer):
"""
A django-bootstrap3 field renderer that renders just the field
"""
def render(self):
# Hidden input requires no special treatment
if self.field.is_hidden:
return text_value(self.field)
# Render the widget
self.add_widget_attrs()
html = self.field.as_widget(attrs=self.widget.attrs)
return html
def add_class_attrs(self, widget=None):
if not widget:
widget = self.widget
# for multiwidgets we recursively update classes for each sub-widget
if isinstance(widget, AdminSplitDateTime):
for w in widget.widgets:
self.add_class_attrs(w)
return
classes = widget.attrs.get('class', '')
if isinstance(widget, ReadOnlyPasswordHashWidget):
classes = add_css_class(classes, 'form-control-static', prepend=True)
elif isinstance(widget, (AdminDateWidget,
AdminTimeWidget,
RelatedFieldWidgetWrapper)):
# for some admin widgets we don't want the input to take full horizontal space
classes = add_css_class(classes, 'form-control form-control-inline', prepend=True)
elif not isinstance(widget, (CheckboxInput,
RadioSelect,
CheckboxSelectMultiple,
FileInput)):
classes = add_css_class(classes, 'form-control', prepend=True)
# For these widget types, add the size class here
classes = add_css_class(classes, self.get_size_class())
widget.attrs['class'] = classes
| apache-2.0 |