repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
kushalbhola/MyStuff | Practice/PythonApplication/env/Lib/site-packages/pip/_vendor/html5lib/filters/optionaltags.py | 156 | 10588 | from __future__ import absolute_import, division, unicode_literals
from . import base
class Filter(base.Filter):
"""Removes optional tags from the token stream"""
def slider(self):
previous1 = previous2 = None
for token in self.source:
if previous1 is not None:
yield previous2, previous1, token
previous2 = previous1
previous1 = token
if previous1 is not None:
yield previous2, previous1, None
def __iter__(self):
for previous, token, next in self.slider():
type = token["type"]
if type == "StartTag":
if (token["data"] or
not self.is_optional_start(token["name"], previous, next)):
yield token
elif type == "EndTag":
if not self.is_optional_end(token["name"], next):
yield token
else:
yield token
def is_optional_start(self, tagname, previous, next):
type = next and next["type"] or None
if tagname in 'html':
# An html element's start tag may be omitted if the first thing
# inside the html element is not a space character or a comment.
return type not in ("Comment", "SpaceCharacters")
elif tagname == 'head':
# A head element's start tag may be omitted if the first thing
# inside the head element is an element.
# XXX: we also omit the start tag if the head element is empty
if type in ("StartTag", "EmptyTag"):
return True
elif type == "EndTag":
return next["name"] == "head"
elif tagname == 'body':
# A body element's start tag may be omitted if the first thing
# inside the body element is not a space character or a comment,
# except if the first thing inside the body element is a script
# or style element and the node immediately preceding the body
# element is a head element whose end tag has been omitted.
if type in ("Comment", "SpaceCharacters"):
return False
elif type == "StartTag":
# XXX: we do not look at the preceding event, so we never omit
# the body element's start tag if it's followed by a script or
# a style element.
return next["name"] not in ('script', 'style')
else:
return True
elif tagname == 'colgroup':
# A colgroup element's start tag may be omitted if the first thing
# inside the colgroup element is a col element, and if the element
# is not immediately preceded by another colgroup element whose
# end tag has been omitted.
if type in ("StartTag", "EmptyTag"):
# XXX: we do not look at the preceding event, so instead we never
# omit the colgroup element's end tag when it is immediately
# followed by another colgroup element. See is_optional_end.
return next["name"] == "col"
else:
return False
elif tagname == 'tbody':
# A tbody element's start tag may be omitted if the first thing
# inside the tbody element is a tr element, and if the element is
# not immediately preceded by a tbody, thead, or tfoot element
# whose end tag has been omitted.
if type == "StartTag":
# omit the thead and tfoot elements' end tag when they are
# immediately followed by a tbody element. See is_optional_end.
if previous and previous['type'] == 'EndTag' and \
previous['name'] in ('tbody', 'thead', 'tfoot'):
return False
return next["name"] == 'tr'
else:
return False
return False
def is_optional_end(self, tagname, next):
type = next and next["type"] or None
if tagname in ('html', 'head', 'body'):
# An html element's end tag may be omitted if the html element
# is not immediately followed by a space character or a comment.
return type not in ("Comment", "SpaceCharacters")
elif tagname in ('li', 'optgroup', 'tr'):
# A li element's end tag may be omitted if the li element is
# immediately followed by another li element or if there is
# no more content in the parent element.
# An optgroup element's end tag may be omitted if the optgroup
# element is immediately followed by another optgroup element,
# or if there is no more content in the parent element.
# A tr element's end tag may be omitted if the tr element is
# immediately followed by another tr element, or if there is
# no more content in the parent element.
if type == "StartTag":
return next["name"] == tagname
else:
return type == "EndTag" or type is None
elif tagname in ('dt', 'dd'):
# A dt element's end tag may be omitted if the dt element is
# immediately followed by another dt element or a dd element.
# A dd element's end tag may be omitted if the dd element is
# immediately followed by another dd element or a dt element,
# or if there is no more content in the parent element.
if type == "StartTag":
return next["name"] in ('dt', 'dd')
elif tagname == 'dd':
return type == "EndTag" or type is None
else:
return False
elif tagname == 'p':
# A p element's end tag may be omitted if the p element is
# immediately followed by an address, article, aside,
# blockquote, datagrid, dialog, dir, div, dl, fieldset,
# footer, form, h1, h2, h3, h4, h5, h6, header, hr, menu,
# nav, ol, p, pre, section, table, or ul, element, or if
# there is no more content in the parent element.
if type in ("StartTag", "EmptyTag"):
return next["name"] in ('address', 'article', 'aside',
'blockquote', 'datagrid', 'dialog',
'dir', 'div', 'dl', 'fieldset', 'footer',
'form', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6',
'header', 'hr', 'menu', 'nav', 'ol',
'p', 'pre', 'section', 'table', 'ul')
else:
return type == "EndTag" or type is None
elif tagname == 'option':
# An option element's end tag may be omitted if the option
# element is immediately followed by another option element,
# or if it is immediately followed by an <code>optgroup</code>
# element, or if there is no more content in the parent
# element.
if type == "StartTag":
return next["name"] in ('option', 'optgroup')
else:
return type == "EndTag" or type is None
elif tagname in ('rt', 'rp'):
# An rt element's end tag may be omitted if the rt element is
# immediately followed by an rt or rp element, or if there is
# no more content in the parent element.
# An rp element's end tag may be omitted if the rp element is
# immediately followed by an rt or rp element, or if there is
# no more content in the parent element.
if type == "StartTag":
return next["name"] in ('rt', 'rp')
else:
return type == "EndTag" or type is None
elif tagname == 'colgroup':
# A colgroup element's end tag may be omitted if the colgroup
# element is not immediately followed by a space character or
# a comment.
if type in ("Comment", "SpaceCharacters"):
return False
elif type == "StartTag":
# XXX: we also look for an immediately following colgroup
# element. See is_optional_start.
return next["name"] != 'colgroup'
else:
return True
elif tagname in ('thead', 'tbody'):
# A thead element's end tag may be omitted if the thead element
# is immediately followed by a tbody or tfoot element.
# A tbody element's end tag may be omitted if the tbody element
# is immediately followed by a tbody or tfoot element, or if
# there is no more content in the parent element.
# A tfoot element's end tag may be omitted if the tfoot element
# is immediately followed by a tbody element, or if there is no
# more content in the parent element.
# XXX: we never omit the end tag when the following element is
# a tbody. See is_optional_start.
if type == "StartTag":
return next["name"] in ['tbody', 'tfoot']
elif tagname == 'tbody':
return type == "EndTag" or type is None
else:
return False
elif tagname == 'tfoot':
# A tfoot element's end tag may be omitted if the tfoot element
# is immediately followed by a tbody element, or if there is no
# more content in the parent element.
# XXX: we never omit the end tag when the following element is
# a tbody. See is_optional_start.
if type == "StartTag":
return next["name"] == 'tbody'
else:
return type == "EndTag" or type is None
elif tagname in ('td', 'th'):
# A td element's end tag may be omitted if the td element is
# immediately followed by a td or th element, or if there is
# no more content in the parent element.
# A th element's end tag may be omitted if the th element is
# immediately followed by a td or th element, or if there is
# no more content in the parent element.
if type == "StartTag":
return next["name"] in ('td', 'th')
else:
return type == "EndTag" or type is None
return False
| apache-2.0 |
asajeffrey/servo | tests/wpt/web-platform-tests/tools/wptrunner/wptrunner/executors/pytestrunner/runner.py | 7 | 4885 | """
Provides interface to deal with pytest.
Usage::
session = webdriver.client.Session("127.0.0.1", "4444", "/")
harness_result = ("OK", None)
subtest_results = pytestrunner.run("/path/to/test", session.url)
return (harness_result, subtest_results)
"""
import errno
import json
import os
import shutil
import tempfile
pytest = None
def do_delayed_imports():
global pytest
import pytest
def run(path, server_config, session_config, timeout=0):
"""
Run Python test at ``path`` in pytest. The provided ``session``
is exposed as a fixture available in the scope of the test functions.
:param path: Path to the test file.
:param session_config: dictionary of host, port,capabilities parameters
to pass through to the webdriver session
:param timeout: Duration before interrupting potentially hanging
tests. If 0, there is no timeout.
:returns: (<harness result>, [<subtest result>, ...]),
where <subtest result> is (test id, status, message, stacktrace).
"""
if pytest is None:
do_delayed_imports()
os.environ["WD_HOST"] = session_config["host"]
os.environ["WD_PORT"] = str(session_config["port"])
os.environ["WD_CAPABILITIES"] = json.dumps(session_config["capabilities"])
os.environ["WD_SERVER_CONFIG"] = json.dumps(server_config.as_dict_for_wd_env_variable())
harness = HarnessResultRecorder()
subtests = SubtestResultRecorder()
with TemporaryDirectory() as cache:
try:
pytest.main(["--strict", # turn warnings into errors
"-vv", # show each individual subtest and full failure logs
"--capture", "no", # enable stdout/stderr from tests
"--basetemp", cache, # temporary directory
"--showlocals", # display contents of variables in local scope
"-p", "no:mozlog", # use the WPT result recorder
"-p", "no:cacheprovider", # disable state preservation across invocations
"-o=console_output_style=classic", # disable test progress bar
path],
plugins=[harness, subtests])
except Exception as e:
harness.outcome = ("INTERNAL-ERROR", str(e))
return (harness.outcome, subtests.results)
class HarnessResultRecorder(object):
outcomes = {
"failed": "ERROR",
"passed": "OK",
"skipped": "SKIP",
}
def __init__(self):
# we are ok unless told otherwise
self.outcome = ("OK", None)
def pytest_collectreport(self, report):
harness_result = self.outcomes[report.outcome]
self.outcome = (harness_result, None)
class SubtestResultRecorder(object):
def __init__(self):
self.results = []
def pytest_runtest_logreport(self, report):
if report.passed and report.when == "call":
self.record_pass(report)
elif report.failed:
if report.when != "call":
self.record_error(report)
else:
self.record_fail(report)
elif report.skipped:
self.record_skip(report)
def record_pass(self, report):
self.record(report.nodeid, "PASS")
def record_fail(self, report):
# pytest outputs the stacktrace followed by an error message prefixed
# with "E ", e.g.
#
# def test_example():
# > assert "fuu" in "foobar"
# > E AssertionError: assert 'fuu' in 'foobar'
message = ""
for line in report.longreprtext.splitlines():
if line.startswith("E "):
message = line[1:].strip()
break
self.record(report.nodeid, "FAIL", message=message, stack=report.longrepr)
def record_error(self, report):
# error in setup/teardown
if report.when != "call":
message = "%s error" % report.when
self.record(report.nodeid, "ERROR", message, report.longrepr)
def record_skip(self, report):
self.record(report.nodeid, "ERROR",
"In-test skip decorators are disallowed, "
"please use WPT metadata to ignore tests.")
def record(self, test, status, message=None, stack=None):
if stack is not None:
stack = str(stack)
new_result = (test.split("::")[-1], status, message, stack)
self.results.append(new_result)
class TemporaryDirectory(object):
def __enter__(self):
self.path = tempfile.mkdtemp(prefix="pytest-")
return self.path
def __exit__(self, *args):
try:
shutil.rmtree(self.path)
except OSError as e:
# no such file or directory
if e.errno != errno.ENOENT:
raise
| mpl-2.0 |
Denisolt/Tensorflow_Chat_Bot | local/lib/python2.7/site-packages/scipy/weave/tests/test_build_tools.py | 96 | 2480 | from __future__ import absolute_import, print_function
# still needed
# tests for MingW32Compiler
# don't know how to test gcc_exists() and msvc_exists()...
import os
import sys
import tempfile
import warnings
from numpy.testing import TestCase, assert_, run_module_suite
from scipy.weave import build_tools
# filter warnings generated by checking for bad paths
warnings.filterwarnings('ignore',
message="specified build_dir",
module='scipy.weave')
def is_writable(val):
return os.access(val,os.W_OK)
class TestConfigureBuildDir(TestCase):
def test_default(self):
# default behavior is to return current directory
d = build_tools.configure_build_dir()
if is_writable('.'):
assert_(d == os.path.abspath('.'))
assert_(is_writable(d))
def test_curdir(self):
# make sure it handles relative values.
d = build_tools.configure_build_dir('.')
if is_writable('.'):
assert_(d == os.path.abspath('.'))
assert_(is_writable(d))
def test_pardir(self):
# make sure it handles relative values
d = build_tools.configure_build_dir('..')
if is_writable('..'):
assert_(d == os.path.abspath('..'))
assert_(is_writable(d))
def test_bad_path(self):
# bad path should return same as default (and warn)
d = build_tools.configure_build_dir('_bad_path_')
d2 = build_tools.configure_build_dir()
assert_(d == d2)
assert_(is_writable(d))
class TestConfigureTempDir(TestConfigureBuildDir):
def test_default(self):
# default behavior returns tempdir
# Note: this'll fail if the temp directory isn't writable.
d = build_tools.configure_temp_dir()
assert_(d == tempfile.gettempdir())
assert_(is_writable(d))
class TestConfigureSysArgv(TestCase):
def test_simple(self):
build_dir = 'build_dir'
temp_dir = 'temp_dir'
compiler = 'compiler'
pre_argv = sys.argv[:]
build_tools.configure_sys_argv(compiler,temp_dir,build_dir)
argv = sys.argv[:]
bd = argv[argv.index('--build-lib')+1]
assert_(bd == build_dir)
td = argv[argv.index('--build-temp')+1]
assert_(td == temp_dir)
argv.index('--compiler='+compiler)
build_tools.restore_sys_argv()
assert_(pre_argv == sys.argv[:])
if __name__ == "__main__":
run_module_suite()
| gpl-3.0 |
katsikas/gnuradio | grc/grc_gnuradio/blks2/packet.py | 1 | 8921 | # Copyright 2008, 2009 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, digital
from gnuradio.digital import packet_utils
import gnuradio.gr.gr_threading as _threading
##payload length in bytes
DEFAULT_PAYLOAD_LEN = 512
##how many messages in a queue
DEFAULT_MSGQ_LIMIT = 2
##threshold for unmaking packets
DEFAULT_THRESHOLD = 12
##################################################
## Options Class for OFDM
##################################################
class options(object):
def __init__(self, **kwargs):
for key, value in kwargs.iteritems(): setattr(self, key, value)
##################################################
## Packet Encoder
##################################################
class _packet_encoder_thread(_threading.Thread):
def __init__(self, msgq, payload_length, send):
self._msgq = msgq
self._payload_length = payload_length
self._send = send
_threading.Thread.__init__(self)
self.setDaemon(1)
self.keep_running = True
self.start()
def run(self):
sample = '' #residual sample
while self.keep_running:
msg = self._msgq.delete_head() #blocking read of message queue
sample = sample + msg.to_string() #get the body of the msg as a string
while len(sample) >= self._payload_length:
payload = sample[:self._payload_length]
sample = sample[self._payload_length:]
self._send(payload)
class packet_encoder(gr.hier_block2):
"""
Hierarchical block for wrapping packet-based modulators.
"""
def __init__(self, samples_per_symbol, bits_per_symbol, access_code='', pad_for_usrp=True):
"""
packet_mod constructor.
@param samples_per_symbol number of samples per symbol
@param bits_per_symbol number of bits per symbol
@param access_code AKA sync vector
@param pad_for_usrp If true, packets are padded such that they end up a multiple of 128 samples
@param payload_length number of bytes in a data-stream slice
"""
#setup parameters
self._samples_per_symbol = samples_per_symbol
self._bits_per_symbol = bits_per_symbol
self._pad_for_usrp = pad_for_usrp
if not access_code: #get access code
access_code = packet_utils.default_access_code
if not packet_utils.is_1_0_string(access_code):
raise ValueError, "Invalid access_code %r. Must be string of 1's and 0's" % (access_code,)
self._access_code = access_code
self._pad_for_usrp = pad_for_usrp
#create blocks
msg_source = gr.message_source(gr.sizeof_char, DEFAULT_MSGQ_LIMIT)
self._msgq_out = msg_source.msgq()
#initialize hier2
gr.hier_block2.__init__(
self,
"packet_encoder",
gr.io_signature(0, 0, 0), # Input signature
gr.io_signature(1, 1, gr.sizeof_char) # Output signature
)
#connect
self.connect(msg_source, self)
def send_pkt(self, payload):
"""
Wrap the payload in a packet and push onto the message queue.
@param payload string, data to send
"""
packet = packet_utils.make_packet(
payload,
self._samples_per_symbol,
self._bits_per_symbol,
self._access_code,
self._pad_for_usrp
)
msg = gr.message_from_string(packet)
self._msgq_out.insert_tail(msg)
##################################################
## Packet Decoder
##################################################
class _packet_decoder_thread(_threading.Thread):
def __init__(self, msgq, callback):
_threading.Thread.__init__(self)
self.setDaemon(1)
self._msgq = msgq
self.callback = callback
self.keep_running = True
self.start()
def run(self):
while self.keep_running:
msg = self._msgq.delete_head()
ok, payload = packet_utils.unmake_packet(msg.to_string(), int(msg.arg1()))
if self.callback:
self.callback(ok, payload)
class packet_decoder(gr.hier_block2):
"""
Hierarchical block for wrapping packet-based demodulators.
"""
def __init__(self, access_code='', threshold=-1, callback=None):
"""
packet_demod constructor.
@param access_code AKA sync vector
@param threshold detect access_code with up to threshold bits wrong (0 -> use default)
@param callback a function of args: ok, payload
"""
#access code
if not access_code: #get access code
access_code = packet_utils.default_access_code
if not packet_utils.is_1_0_string(access_code):
raise ValueError, "Invalid access_code %r. Must be string of 1's and 0's" % (access_code,)
self._access_code = access_code
#threshold
if threshold < 0: threshold = DEFAULT_THRESHOLD
self._threshold = threshold
#blocks
msgq = gr.msg_queue(DEFAULT_MSGQ_LIMIT) #holds packets from the PHY
correlator = digital.correlate_access_code_bb(self._access_code, self._threshold)
framer_sink = gr.framer_sink_1(msgq)
#initialize hier2
gr.hier_block2.__init__(
self,
"packet_decoder",
gr.io_signature(1, 1, gr.sizeof_char), # Input signature
gr.io_signature(0, 0, 0) # Output signature
)
#connect
self.connect(self, correlator, framer_sink)
#start thread
_packet_decoder_thread(msgq, callback)
##################################################
## Packet Mod for OFDM Mod and Packet Encoder
##################################################
class packet_mod_base(gr.hier_block2):
"""
Hierarchical block for wrapping packet source block.
"""
def __init__(self, packet_source=None, payload_length=0):
if not payload_length: #get payload length
payload_length = DEFAULT_PAYLOAD_LEN
if payload_length%self._item_size_in != 0: #verify that packet length is a multiple of the stream size
raise ValueError, 'The payload length: "%d" is not a mutiple of the stream size: "%d".'%(payload_length, self._item_size_in)
#initialize hier2
gr.hier_block2.__init__(
self,
"ofdm_mod",
gr.io_signature(1, 1, self._item_size_in), # Input signature
gr.io_signature(1, 1, packet_source._hb.output_signature().sizeof_stream_item(0)) # Output signature
)
#create blocks
msgq = gr.msg_queue(DEFAULT_MSGQ_LIMIT)
msg_sink = gr.message_sink(self._item_size_in, msgq, False) #False -> blocking
#connect
self.connect(self, msg_sink)
self.connect(packet_source, self)
#start thread
_packet_encoder_thread(msgq, payload_length, packet_source.send_pkt)
class packet_mod_b(packet_mod_base): _item_size_in = gr.sizeof_char
class packet_mod_s(packet_mod_base): _item_size_in = gr.sizeof_short
class packet_mod_i(packet_mod_base): _item_size_in = gr.sizeof_int
class packet_mod_f(packet_mod_base): _item_size_in = gr.sizeof_float
class packet_mod_c(packet_mod_base): _item_size_in = gr.sizeof_gr_complex
##################################################
## Packet Demod for OFDM Demod and Packet Decoder
##################################################
class packet_demod_base(gr.hier_block2):
"""
Hierarchical block for wrapping packet sink block.
"""
def __init__(self, packet_sink=None):
#we want outputs of different types
# NOTE: The output signature depends on the number of the subcarriers
signature_sizes = [self._item_size_out, gr.sizeof_gr_complex * packet_sink._occupied_tones]
#initialize hier2
gr.hier_block2.__init__(
self,
"ofdm_demod",
gr.io_signature(1, 1, packet_sink._hb.input_signature().sizeof_stream_item(0)), # Input signature
gr.io_signaturev(2, 2, signature_sizes) # Output signature
)
#create blocks
msg_source = gr.message_source(self._item_size_out, DEFAULT_MSGQ_LIMIT)
self._msgq_out = msg_source.msgq()
#connect
self.connect(self, packet_sink)
self.connect(msg_source, self)
# For the vector analyzer connection
self.connect((packet_sink, 1), (self, 1))
if packet_sink._hb.output_signature().sizeof_stream_item(0):
self.connect(packet_sink, gr.null_sink(packet_sink._hb.output_signature().sizeof_stream_item(0)))
def recv_pkt(self, ok, payload):
msg = gr.message_from_string(payload, 0, self._item_size_out, len(payload)/self._item_size_out)
if ok: self._msgq_out.insert_tail(msg)
class packet_demod_b(packet_demod_base): _item_size_out = gr.sizeof_char
class packet_demod_s(packet_demod_base): _item_size_out = gr.sizeof_short
class packet_demod_i(packet_demod_base): _item_size_out = gr.sizeof_int
class packet_demod_f(packet_demod_base): _item_size_out = gr.sizeof_float
class packet_demod_c(packet_demod_base): _item_size_out = gr.sizeof_gr_complex
| gpl-3.0 |
AlexProfi/django-cms | cms/test_utils/project/placeholderapp/views.py | 37 | 3569 | from django.http import HttpResponse
from django.shortcuts import render
from django.template import RequestContext
from django.template.base import Template
from django.views.generic import DetailView
from cms.test_utils.project.placeholderapp.models import (
Example1, MultilingualExample1, CharPksExample)
from cms.utils import get_language_from_request
def example_view(request):
context = {}
context['examples'] = Example1.objects.all()
return render(request, 'placeholderapp.html', context)
def _base_detail(request, instance, template_name='detail.html',
item_name="char_1", template_string='',):
context = {}
context['instance'] = instance
context['instance_class'] = instance.__class__()
context['item_name'] = item_name
if hasattr(request, 'toolbar'):
request.toolbar.set_object(instance)
if template_string:
template = Template(template_string)
return HttpResponse(template.render(RequestContext(request=request, dict_=context)))
else:
return render(request, template_name, context)
def list_view_multi(request):
context = {}
context['examples'] = MultilingualExample1.objects.language(
get_language_from_request(request)).all()
context['instance_class'] = MultilingualExample1
return render(request, 'list.html', context)
def detail_view_multi(request, pk, template_name='detail_multi.html',
item_name="char_1", template_string='',):
instance = MultilingualExample1.objects.language(
get_language_from_request(request)).get(pk=pk)
return _base_detail(request, instance, template_name, item_name,
template_string)
def detail_view_multi_unfiltered(request, pk, template_name='detail_multi.html',
item_name="char_1", template_string='',):
instance = MultilingualExample1.objects.get(pk=pk)
return _base_detail(request, instance, template_name, item_name,
template_string)
def list_view(request):
context = {}
context['examples'] = Example1.objects.all()
context['instance_class'] = Example1
return render(request, 'list.html', context)
def detail_view(request, pk, template_name='detail.html', item_name="char_1",
template_string='',):
if request.user.is_staff and request.toolbar:
instance = Example1.objects.get(pk=pk)
else:
instance = Example1.objects.get(pk=pk, publish=True)
return _base_detail(request, instance, template_name, item_name,
template_string)
def detail_view_char(request, pk, template_name='detail.html', item_name="char_1",
template_string='',):
instance = CharPksExample.objects.get(pk=pk)
return _base_detail(request, instance, template_name, item_name,
template_string)
class ClassDetail(DetailView):
model = Example1
template_name = "detail.html"
template_string = ''
def render_to_response(self, context, **response_kwargs):
if self.template_string:
context = RequestContext(self.request, context)
template = Template(self.template_string)
return HttpResponse(template.render(context))
else:
return super(ClassDetail, self).render_to_response(context, **response_kwargs)
def get_context_data(self, **kwargs):
context = super(ClassDetail, self).get_context_data(**kwargs)
context['instance_class'] = self.model
return context
| bsd-3-clause |
psolbach/metadoc | setup.py | 1 | 2387 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os.path
import sys
import re
from subprocess import call
from setuptools import setup, find_packages
from setuptools.command.install import install as _install
from setuptools.command.sdist import sdist as _sdist
from wheel.bdist_wheel import bdist_wheel as _bdist_wheel
with open('./README.md') as f:
long_description = f.read()
requirements_txt = open("./requirements.txt").read()
main_py = open('metadoc/__init__.py').read()
metadata = dict(re.findall("__([a-z]+)__ = '([^']+)'", main_py))
def _post_install():
from metadoc.install import install_nltk_sets
install_nltk_sets()
class DevInstall(_install):
def run(self):
call(["pip install -r ./requirements-dev.txt --no-clean"], shell=True)
self.execute(_post_install, (), msg="Installing nltk sets!")
_install.run(self)
class CustomInstall(_sdist):
def run(self):
call(["pip install -r ./requirements.txt --no-clean"], shell=True)
self.execute(_post_install, (), msg="Installing nltk sets!")
_sdist.run(self)
class BdistEggInstall(_bdist_wheel):
def run(self):
call(["pip install -r ./requirements.txt --no-clean"], shell=True)
self.execute(_post_install, (), msg="Installing nltk sets!")
_bdist_wheel.run(self)
setup(
name='metadoc',
version=metadata["version"],
description="Post-truth era news article metadata service.",
long_description=long_description,
long_description_content_type='text/markdown',
classifiers=[ # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
"Programming Language :: Python :: 3.5",
"Topic :: Internet :: WWW/HTTP",
"Development Status :: 3 - Alpha",
"License :: OSI Approved :: MIT License",
"Intended Audience :: Developers",
"Operating System :: POSIX :: Linux",
"Environment :: Web Environment",
],
keywords=["scraping", "metadata", "news article"],
author=metadata["author"],
author_email='[email protected]',
url='https://github.com/praise-internet/metadoc',
license=metadata["license"],
cmdclass={'sdist': CustomInstall, 'develop': DevInstall},
packages=find_packages(exclude=['tests']),
install_requires=requirements_txt.strip().split("\n"),
include_package_data=True,
zip_safe=False
)
| mit |
idosekely/python-lessons | lesson_5/client.py | 1 | 1263 | import socket
import sys
__author__ = 'sekely'
class SimpleClient(object):
def __init__(self, addr='localhost', port=50000, buf=1024):
self.buf = buf
server_address = (addr, port)
# Create a TCP/IP socket
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print('connecting to %s port %s' % server_address)
self.sock.connect(server_address)
def send(self, message=None):
try:
# Send data
message = message or 'This is the default message. It will be repeated.'
print('sending "%s"' % message)
self.sock.sendall(message)
# Look for the response
amount_received = 0
amount_expected = len(message)
while amount_received < amount_expected:
data = self.sock.recv(self.buf)
amount_received += len(data)
print('received "%s"' % data)
except Exception as e:
print('caught exception: %s' % str(e))
self.sock.close()
def close(self):
print('closing socket')
self.sock.close()
if __name__ == '__main__':
client = SimpleClient()
msg = sys.argv[1]
client.send(msg)
client.close()
| mit |
pivanof/vitess | examples/local/client.py | 13 | 2555 | #!/usr/bin/env python
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Sample Vitess client in Python.
This is a sample for using the Python Vitess client.
It's a script that inserts some random messages on random pages of the
guestbook sample app.
Before running this, start up a local example cluster as described in the
README.md file.
Then run client.sh, which sets up PYTHONPATH before running client.py:
vitess/examples/local$ ./client.sh
"""
import argparse
import random
import time
from vtdb import vtgate_client
# register the python gRPC client upon import
from vtdb import grpc_vtgate_client # pylint: disable=unused-import
# Parse args
parser = argparse.ArgumentParser()
parser.add_argument('--server', dest='server', default='localhost:15991')
parser.add_argument('--timeout', dest='timeout', type=float, default='10.0')
args = parser.parse_args()
# Connect
conn = vtgate_client.connect('grpc', args.server, args.timeout)
try:
# Insert some messages on random pages.
print 'Inserting into master...'
cursor = conn.cursor(tablet_type='master', writable=True)
for i in range(3):
page = random.randint(1, 100)
cursor.begin()
cursor.execute(
'INSERT INTO messages (page, time_created_ns, message)'
' VALUES (:page, :time_created_ns, :message)',
{
'page': page,
'time_created_ns': int(time.time() * 1e9),
'message': 'V is for speed',
})
cursor.commit()
# Read it back from the master.
print 'Reading from master...'
cursor.execute('SELECT page, time_created_ns, message FROM messages', {})
for row in cursor.fetchall():
print row
cursor.close()
# Read from a replica.
# Note that this may be behind master due to replication lag.
print 'Reading from replica...'
cursor = conn.cursor(tablet_type='replica')
cursor.execute('SELECT page, time_created_ns, message FROM messages', {})
for row in cursor.fetchall():
print row
cursor.close()
finally:
# Clean up
conn.close()
| apache-2.0 |
scailer/drf-docs-from-tests | docsfromtests/collector.py | 1 | 1878 | # -*- coding: utf-8 -*-
import os
import json
import codecs
from collections import defaultdict
from django.template.loader import render_to_string
from . import conf
def json_pretty_print(data):
if not data:
return u''
if isinstance(data, (dict, list)):
return json.dumps(data, sort_keys=True,
indent=4, separators=(',', ': '))
try:
return json.dumps(json.loads(data), sort_keys=True,
indent=4, separators=(',', ': '))
except ValueError:
return u''
class Singleton(object):
_instance = None
def __new__(cls, *args, **kwargs):
if not cls._instance:
cls._instance = super(Singleton, cls).__new__(cls, *args, **kwargs)
return cls._instance
class DocCollector(Singleton):
_data = defaultdict(lambda: defaultdict(lambda: []))
def add(self, testcase, text):
self._data[testcase.__class__][testcase._testMethodName].append(text)
def _make_name(self, testcase_class, file_format=conf.FILE_FORMAT):
return u'{}_{}.{}'.format(
testcase_class.__module__.replace('.', '_'),
testcase_class.__name__, file_format)
def render_to_file(self, testcase_class):
name = self._make_name(testcase_class)
path = os.path.join(conf.DOCS_PATH, name)
context = {
'this': self,
'file_name': name,
'caption': testcase_class.caption,
'description': testcase_class.description,
'testcase_class': testcase_class,
'data': dict(self._data[testcase_class]),
}
with codecs.open(path, 'w', encoding="utf-8") as f:
f.write(render_to_string(conf.BASE_TEMPLATE, context))
def render(self):
for testcase_class in self._data:
self.render_to_file(testcase_class)
| mit |
GoSteven/Diary | django/contrib/gis/tests/layermap/models.py | 12 | 2245 | from django.contrib.gis.db import models
class State(models.Model):
name = models.CharField(max_length=20)
objects = models.GeoManager()
class County(models.Model):
name = models.CharField(max_length=25)
state = models.ForeignKey(State)
mpoly = models.MultiPolygonField(srid=4269) # Multipolygon in NAD83
objects = models.GeoManager()
class CountyFeat(models.Model):
name = models.CharField(max_length=25)
poly = models.PolygonField(srid=4269)
objects = models.GeoManager()
class City(models.Model):
name = models.CharField(max_length=25)
population = models.IntegerField()
density = models.DecimalField(max_digits=7, decimal_places=1)
dt = models.DateField()
point = models.PointField()
objects = models.GeoManager()
class Interstate(models.Model):
name = models.CharField(max_length=20)
length = models.DecimalField(max_digits=6, decimal_places=2)
path = models.LineStringField()
objects = models.GeoManager()
# Same as `City` above, but for testing model inheritance.
class CityBase(models.Model):
name = models.CharField(max_length=25)
population = models.IntegerField()
density = models.DecimalField(max_digits=7, decimal_places=1)
point = models.PointField()
objects = models.GeoManager()
class ICity1(CityBase):
dt = models.DateField()
class ICity2(ICity1):
dt_time = models.DateTimeField(auto_now=True)
# Mapping dictionaries for the models above.
co_mapping = {'name' : 'Name',
'state' : {'name' : 'State'}, # ForeignKey's use another mapping dictionary for the _related_ Model (State in this case).
'mpoly' : 'MULTIPOLYGON', # Will convert POLYGON features into MULTIPOLYGONS.
}
cofeat_mapping = {'name' : 'Name',
'poly' : 'POLYGON',
}
city_mapping = {'name' : 'Name',
'population' : 'Population',
'density' : 'Density',
'dt' : 'Created',
'point' : 'POINT',
}
inter_mapping = {'name' : 'Name',
'length' : 'Length',
'path' : 'LINESTRING',
}
| bsd-3-clause |
kivatu/kivy-bak | kivy/uix/sandbox.py | 9 | 5648 | '''
Sandbox
=======
.. versionadded:: 1.8.0
.. warning::
This is experimental and subject to change as long as this warning notice
is present.
This is a widget that runs itself and all of its children in a Sandbox. That
means if a child raises an Exception, it will be caught. The Sandbox
itself runs its own Clock, Cache, etc.
The SandBox widget is still experimental and required for the Kivy designer.
When the user designs their own widget, if they do something wrong (wrong size
value,
invalid python code), it will be caught correctly without breaking the whole
application. Because it has been designed that way, we are still
enhancing this widget and the :mod:`kivy.context` module.
Don't use it unless you know what you are doing :)
'''
__all__ = ('Sandbox', )
from kivy.context import Context
from kivy.base import ExceptionManagerBase
from kivy.clock import Clock
from kivy.uix.widget import Widget
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.relativelayout import RelativeLayout
from kivy.lang import Builder
def sandbox(f):
def _f2(self, *args, **kwargs):
ret = None
with self:
ret = f(self, *args, **kwargs)
return ret
return _f2
class SandboxExceptionManager(ExceptionManagerBase):
def __init__(self, sandbox):
ExceptionManagerBase.__init__(self)
self.sandbox = sandbox
def handle_exception(self, e):
if not self.sandbox.on_exception(e):
return ExceptionManagerBase.RAISE
return ExceptionManagerBase.PASS
class SandboxContent(RelativeLayout):
pass
class Sandbox(FloatLayout):
'''Sandbox widget, used to trap all the exceptions raised by child
widgets.
'''
def __init__(self, **kwargs):
self._context = Context(init=True)
self._context['ExceptionManager'] = SandboxExceptionManager(self)
self._context.sandbox = self
self._context.push()
self.on_context_created()
self._container = None
super(Sandbox, self).__init__(**kwargs)
self._container = SandboxContent(size=self.size, pos=self.pos)
super(Sandbox, self).add_widget(self._container)
self._context.pop()
# force SandboxClock's scheduling
Clock.schedule_interval(self._clock_sandbox, 0)
Clock.schedule_once(self._clock_sandbox_draw, -1)
self.main_clock = object.__getattribute__(Clock, '_obj')
def __enter__(self):
self._context.push()
def __exit__(self, _type, value, traceback):
self._context.pop()
if _type is not None:
return self.on_exception(value, _traceback=traceback)
def on_context_created(self):
'''Override this method in order to load your kv file or do anything
else with the newly created context.
'''
pass
def on_exception(self, exception, _traceback=None):
'''Override this method in order to catch all the exceptions from
children.
If you return True, it will not reraise the exception.
If you return False, the exception will be raised to the parent.
'''
import traceback
traceback.print_tb(_traceback)
return True
on_touch_down = sandbox(Widget.on_touch_down)
on_touch_move = sandbox(Widget.on_touch_move)
on_touch_up = sandbox(Widget.on_touch_up)
@sandbox
def add_widget(self, *args, **kwargs):
self._container.add_widget(*args, **kwargs)
@sandbox
def remove_widget(self, *args, **kwargs):
self._container.remove_widget(*args, **kwargs)
@sandbox
def clear_widgets(self, *args, **kwargs):
self._container.clear_widgets()
@sandbox
def on_size(self, *args):
if self._container:
self._container.size = self.size
@sandbox
def on_pos(self, *args):
if self._container:
self._container.pos = self.pos
@sandbox
def _clock_sandbox(self, dt):
#import pdb; pdb.set_trace()
Clock.tick()
Builder.sync()
@sandbox
def _clock_sandbox_draw(self, dt):
Clock.tick_draw()
Builder.sync()
self.main_clock.schedule_once(self._call_draw, 0)
def _call_draw(self, dt):
self.main_clock.schedule_once(self._clock_sandbox_draw, -1)
if __name__ == '__main__':
from kivy.base import runTouchApp
from kivy.uix.button import Button
class TestButton(Button):
def on_touch_up(self, touch):
#raise Exception('fdfdfdfdfdfdfd')
return super(TestButton, self).on_touch_up(touch)
def on_touch_down(self, touch):
#raise Exception('')
return super(TestButton, self).on_touch_down(touch)
s = Sandbox()
with s:
Builder.load_string('''
<TestButton>:
canvas:
Color:
rgb: (.3, .2, 0) if self.state == 'normal' else (.7, .7, 0)
Rectangle:
pos: self.pos
size: self.size
Color:
rgb: 1, 1, 1
Rectangle:
size: self.texture_size
pos: self.center_x - self.texture_size[0] / 2.,\
self.center_y - self.texture_size[1] / 2.
texture: self.texture
# invalid... for testing.
#on_touch_up: root.d()
#on_touch_down: root.f()
on_release: root.args()
#on_press: root.args()
''')
b = TestButton(text='Hello World')
s.add_widget(b)
# this exception is within the "with" block, but will be ignored by
# default because the sandbox on_exception will return True
raise Exception('hello')
runTouchApp(s)
| mit |
unicefuganda/uSurvey | survey/tests/forms/test_batch_form.py | 1 | 2440 | from django.test import TestCase
from survey.forms.question_set import BatchForm
from survey.models.locations import *
from survey.models import EnumerationArea
from survey.models import Interviewer
from survey.models.access_channels import *
from survey.models.batch import Batch
from survey.models.surveys import Survey
class BatchFormTest(TestCase):
def test_valid(self):
self.country = LocationType.objects.create(
name='Country', slug='country')
self.africa = Location.objects.create(name='Africa', type=self.country)
self.city_ea = EnumerationArea.objects.create(name="CITY EA")
self.city_ea.locations.add(self.africa)
self.investigator_1 = Interviewer.objects.create(name="Investigator",
ea=self.city_ea,
gender='1', level_of_education='Primary',
language='Eglish', weights=0)
odk = ODKAccess.objects.create(interviewer=self.investigator_1, user_identifier='Test', is_active=True, reponse_timeout=1000,
duration='H', odk_token='Test')
form_data = {
'name': 'Batch 1',
'description': 'description goes here',
}
batch_form = BatchForm(form_data)
self.assertFalse(batch_form.is_valid())
def test_invalid(self):
form_data = {
'name': 'test',
'description': 'description goes here',
}
batch_form = BatchForm(form_data)
self.assertFalse(batch_form.is_valid())
def test_field_required(self):
data = {'name': '', 'description': ''}
batch_form = BatchForm(data)
self.assertFalse(batch_form.is_valid())
self.assertEqual(['This field is required.'],
batch_form.errors['name'])
def test_form_should_be_invalid_if_name_already_exists_on_the_same_survey(self):
survey = Survey.objects.create(name="very fast")
Batch.objects.create(survey=survey, name='Batch A',
description='description')
form_data = {
'name': 'Batch A',
'description': 'description goes here',
}
batch_form = BatchForm(data=form_data, instance=Batch(survey=survey))
self.assertFalse(batch_form.is_valid()) | bsd-3-clause |
aimas/TuniErp-8.0 | openerp/tools/misc.py | 62 | 45907 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
# Copyright (C) 2010-2014 OpenERP s.a. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
"""
Miscellaneous tools used by OpenERP.
"""
from functools import wraps
import cProfile
from contextlib import contextmanager
import subprocess
import logging
import os
import socket
import sys
import threading
import time
import werkzeug.utils
import zipfile
from collections import defaultdict, Mapping, OrderedDict
from datetime import datetime
from itertools import islice, izip, groupby
from lxml import etree
from which import which
from threading import local
import traceback
try:
from html2text import html2text
except ImportError:
html2text = None
from config import config
from cache import *
from .parse_version import parse_version
import openerp
# get_encodings, ustr and exception_to_unicode were originally from tools.misc.
# There are moved to loglevels until we refactor tools.
from openerp.loglevels import get_encodings, ustr, exception_to_unicode # noqa
_logger = logging.getLogger(__name__)
# List of etree._Element subclasses that we choose to ignore when parsing XML.
# We include the *Base ones just in case, currently they seem to be subclasses of the _* ones.
SKIPPED_ELEMENT_TYPES = (etree._Comment, etree._ProcessingInstruction, etree.CommentBase, etree.PIBase)
#----------------------------------------------------------
# Subprocesses
#----------------------------------------------------------
def find_in_path(name):
path = os.environ.get('PATH', os.defpath).split(os.pathsep)
if config.get('bin_path') and config['bin_path'] != 'None':
path.append(config['bin_path'])
try:
return which(name, path=os.pathsep.join(path))
except IOError:
return None
def _exec_pipe(prog, args, env=None):
cmd = (prog,) + args
# on win32, passing close_fds=True is not compatible
# with redirecting std[in/err/out]
close_fds = os.name=="posix"
pop = subprocess.Popen(cmd, bufsize=-1, stdin=subprocess.PIPE, stdout=subprocess.PIPE, close_fds=close_fds, env=env)
return pop.stdin, pop.stdout
def exec_command_pipe(name, *args):
prog = find_in_path(name)
if not prog:
raise Exception('Command `%s` not found.' % name)
return _exec_pipe(prog, args)
#----------------------------------------------------------
# Postgres subprocesses
#----------------------------------------------------------
def find_pg_tool(name):
path = None
if config['pg_path'] and config['pg_path'] != 'None':
path = config['pg_path']
try:
return which(name, path=path)
except IOError:
raise Exception('Command `%s` not found.' % name)
def exec_pg_environ():
""" On systems where pg_restore/pg_dump require an explicit password (i.e.
on Windows where TCP sockets are used), it is necessary to pass the
postgres user password in the PGPASSWORD environment variable or in a
special .pgpass file.
See also http://www.postgresql.org/docs/8.4/static/libpq-envars.html
"""
env = os.environ.copy()
if not env.get('PGPASSWORD') and openerp.tools.config['db_password']:
env['PGPASSWORD'] = openerp.tools.config['db_password']
return env
def exec_pg_command(name, *args):
prog = find_pg_tool(name)
env = exec_pg_environ()
with open(os.devnull) as dn:
args2 = (prog,) + args
rc = subprocess.call(args2, env=env, stdout=dn, stderr=subprocess.STDOUT)
if rc:
raise Exception('Postgres subprocess %s error %s' % (args2, rc))
def exec_pg_command_pipe(name, *args):
prog = find_pg_tool(name)
env = exec_pg_environ()
return _exec_pipe(prog, args, env)
#----------------------------------------------------------
# File paths
#----------------------------------------------------------
#file_path_root = os.getcwd()
#file_path_addons = os.path.join(file_path_root, 'addons')
def file_open(name, mode="r", subdir='addons', pathinfo=False):
"""Open a file from the OpenERP root, using a subdir folder.
Example::
>>> file_open('hr/report/timesheer.xsl')
>>> file_open('addons/hr/report/timesheet.xsl')
>>> file_open('../../base/report/rml_template.xsl', subdir='addons/hr/report', pathinfo=True)
@param name name of the file
@param mode file open mode
@param subdir subdirectory
@param pathinfo if True returns tuple (fileobject, filepath)
@return fileobject if pathinfo is False else (fileobject, filepath)
"""
import openerp.modules as addons
adps = addons.module.ad_paths
rtp = os.path.normcase(os.path.abspath(config['root_path']))
basename = name
if os.path.isabs(name):
# It is an absolute path
# Is it below 'addons_path' or 'root_path'?
name = os.path.normcase(os.path.normpath(name))
for root in adps + [rtp]:
root = os.path.normcase(os.path.normpath(root)) + os.sep
if name.startswith(root):
base = root.rstrip(os.sep)
name = name[len(base) + 1:]
break
else:
# It is outside the OpenERP root: skip zipfile lookup.
base, name = os.path.split(name)
return _fileopen(name, mode=mode, basedir=base, pathinfo=pathinfo, basename=basename)
if name.replace(os.sep, '/').startswith('addons/'):
subdir = 'addons'
name2 = name[7:]
elif subdir:
name = os.path.join(subdir, name)
if name.replace(os.sep, '/').startswith('addons/'):
subdir = 'addons'
name2 = name[7:]
else:
name2 = name
# First, try to locate in addons_path
if subdir:
for adp in adps:
try:
return _fileopen(name2, mode=mode, basedir=adp,
pathinfo=pathinfo, basename=basename)
except IOError:
pass
# Second, try to locate in root_path
return _fileopen(name, mode=mode, basedir=rtp, pathinfo=pathinfo, basename=basename)
def _fileopen(path, mode, basedir, pathinfo, basename=None):
name = os.path.normpath(os.path.join(basedir, path))
if basename is None:
basename = name
# Give higher priority to module directories, which is
# a more common case than zipped modules.
if os.path.isfile(name):
fo = open(name, mode)
if pathinfo:
return fo, name
return fo
# Support for loading modules in zipped form.
# This will not work for zipped modules that are sitting
# outside of known addons paths.
head = os.path.normpath(path)
zipname = False
while os.sep in head:
head, tail = os.path.split(head)
if not tail:
break
if zipname:
zipname = os.path.join(tail, zipname)
else:
zipname = tail
zpath = os.path.join(basedir, head + '.zip')
if zipfile.is_zipfile(zpath):
from cStringIO import StringIO
zfile = zipfile.ZipFile(zpath)
try:
fo = StringIO()
fo.write(zfile.read(os.path.join(
os.path.basename(head), zipname).replace(
os.sep, '/')))
fo.seek(0)
if pathinfo:
return fo, name
return fo
except Exception:
pass
# Not found
if name.endswith('.rml'):
raise IOError('Report %r doesn\'t exist or deleted' % basename)
raise IOError('File not found: %s' % basename)
#----------------------------------------------------------
# iterables
#----------------------------------------------------------
def flatten(list):
"""Flatten a list of elements into a uniqu list
Author: Christophe Simonis ([email protected])
Examples::
>>> flatten(['a'])
['a']
>>> flatten('b')
['b']
>>> flatten( [] )
[]
>>> flatten( [[], [[]]] )
[]
>>> flatten( [[['a','b'], 'c'], 'd', ['e', [], 'f']] )
['a', 'b', 'c', 'd', 'e', 'f']
>>> t = (1,2,(3,), [4, 5, [6, [7], (8, 9), ([10, 11, (12, 13)]), [14, [], (15,)], []]])
>>> flatten(t)
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
"""
def isiterable(x):
return hasattr(x, "__iter__")
r = []
for e in list:
if isiterable(e):
map(r.append, flatten(e))
else:
r.append(e)
return r
def reverse_enumerate(l):
"""Like enumerate but in the other sens
Usage::
>>> a = ['a', 'b', 'c']
>>> it = reverse_enumerate(a)
>>> it.next()
(2, 'c')
>>> it.next()
(1, 'b')
>>> it.next()
(0, 'a')
>>> it.next()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
StopIteration
"""
return izip(xrange(len(l)-1, -1, -1), reversed(l))
def topological_sort(elems):
""" Return a list of elements sorted so that their dependencies are listed
before them in the result.
:param elems: specifies the elements to sort with their dependencies; it is
a dictionary like `{element: dependencies}` where `dependencies` is a
collection of elements that must appear before `element`. The elements
of `dependencies` are not required to appear in `elems`; they will
simply not appear in the result.
:returns: a list with the keys of `elems` sorted according to their
specification.
"""
# the algorithm is inspired by [Tarjan 1976],
# http://en.wikipedia.org/wiki/Topological_sorting#Algorithms
result = []
visited = set()
def visit(n):
if n not in visited:
visited.add(n)
if n in elems:
# first visit all dependencies of n, then append n to result
map(visit, elems[n])
result.append(n)
map(visit, elems)
return result
class UpdateableStr(local):
""" Class that stores an updateable string (used in wizards)
"""
def __init__(self, string=''):
self.string = string
def __str__(self):
return str(self.string)
def __repr__(self):
return str(self.string)
def __nonzero__(self):
return bool(self.string)
class UpdateableDict(local):
"""Stores an updateable dict to use in wizards
"""
def __init__(self, dict=None):
if dict is None:
dict = {}
self.dict = dict
def __str__(self):
return str(self.dict)
def __repr__(self):
return str(self.dict)
def clear(self):
return self.dict.clear()
def keys(self):
return self.dict.keys()
def __setitem__(self, i, y):
self.dict.__setitem__(i, y)
def __getitem__(self, i):
return self.dict.__getitem__(i)
def copy(self):
return self.dict.copy()
def iteritems(self):
return self.dict.iteritems()
def iterkeys(self):
return self.dict.iterkeys()
def itervalues(self):
return self.dict.itervalues()
def pop(self, k, d=None):
return self.dict.pop(k, d)
def popitem(self):
return self.dict.popitem()
def setdefault(self, k, d=None):
return self.dict.setdefault(k, d)
def update(self, E, **F):
return self.dict.update(E, F)
def values(self):
return self.dict.values()
def get(self, k, d=None):
return self.dict.get(k, d)
def has_key(self, k):
return self.dict.has_key(k)
def items(self):
return self.dict.items()
def __cmp__(self, y):
return self.dict.__cmp__(y)
def __contains__(self, k):
return self.dict.__contains__(k)
def __delitem__(self, y):
return self.dict.__delitem__(y)
def __eq__(self, y):
return self.dict.__eq__(y)
def __ge__(self, y):
return self.dict.__ge__(y)
def __gt__(self, y):
return self.dict.__gt__(y)
def __hash__(self):
return self.dict.__hash__()
def __iter__(self):
return self.dict.__iter__()
def __le__(self, y):
return self.dict.__le__(y)
def __len__(self):
return self.dict.__len__()
def __lt__(self, y):
return self.dict.__lt__(y)
def __ne__(self, y):
return self.dict.__ne__(y)
class currency(float):
""" Deprecate
.. warning::
Don't use ! Use res.currency.round()
"""
def __init__(self, value, accuracy=2, rounding=None):
if rounding is None:
rounding=10**-accuracy
self.rounding=rounding
self.accuracy=accuracy
def __new__(cls, value, accuracy=2, rounding=None):
return float.__new__(cls, round(value, accuracy))
#def __str__(self):
# display_value = int(self*(10**(-self.accuracy))/self.rounding)*self.rounding/(10**(-self.accuracy))
# return str(display_value)
def to_xml(s):
return s.replace('&','&').replace('<','<').replace('>','>')
def get_iso_codes(lang):
if lang.find('_') != -1:
if lang.split('_')[0] == lang.split('_')[1].lower():
lang = lang.split('_')[0]
return lang
ALL_LANGUAGES = {
'am_ET': u'Amharic / አምሃርኛ',
'ar_SY': u'Arabic / الْعَرَبيّة',
'bg_BG': u'Bulgarian / български език',
'bs_BA': u'Bosnian / bosanski jezik',
'ca_ES': u'Catalan / Català',
'cs_CZ': u'Czech / Čeština',
'da_DK': u'Danish / Dansk',
'de_DE': u'German / Deutsch',
'el_GR': u'Greek / Ελληνικά',
'en_AU': u'English (AU)',
'en_GB': u'English (UK)',
'en_US': u'English (US)',
'es_AR': u'Spanish (AR) / Español (AR)',
'es_BO': u'Spanish (BO) / Español (BO)',
'es_CL': u'Spanish (CL) / Español (CL)',
'es_CO': u'Spanish (CO) / Español (CO)',
'es_CR': u'Spanish (CR) / Español (CR)',
'es_DO': u'Spanish (DO) / Español (DO)',
'es_EC': u'Spanish (EC) / Español (EC)',
'es_ES': u'Spanish / Español',
'es_GT': u'Spanish (GT) / Español (GT)',
'es_MX': u'Spanish (MX) / Español (MX)',
'es_PA': u'Spanish (PA) / Español (PA)',
'es_PE': u'Spanish (PE) / Español (PE)',
'es_PY': u'Spanish (PY) / Español (PY)',
'es_UY': u'Spanish (UY) / Español (UY)',
'es_VE': u'Spanish (VE) / Español (VE)',
'et_EE': u'Estonian / Eesti keel',
'fa_IR': u'Persian / فارس',
'fi_FI': u'Finnish / Suomi',
'fr_BE': u'French (BE) / Français (BE)',
'fr_CA': u'French (CA) / Français (CA)',
'fr_CH': u'French (CH) / Français (CH)',
'fr_CA': u'French (CA) / Français (CA)',
'fr_FR': u'French / Français',
'gl_ES': u'Galician / Galego',
'gu_IN': u'Gujarati / ગુજરાતી',
'he_IL': u'Hebrew / עִבְרִי',
'hi_IN': u'Hindi / हिंदी',
'hr_HR': u'Croatian / hrvatski jezik',
'hu_HU': u'Hungarian / Magyar',
'id_ID': u'Indonesian / Bahasa Indonesia',
'it_IT': u'Italian / Italiano',
'ja_JP': u'Japanese / 日本語',
'ka_GE': u'Georgian / ქართული ენა',
'kab_DZ': u'Kabyle / Taqbaylit',
'ko_KP': u'Korean (KP) / 한국어 (KP)',
'ko_KR': u'Korean (KR) / 한국어 (KR)',
'lo_LA': u'Lao / ພາສາລາວ',
'lt_LT': u'Lithuanian / Lietuvių kalba',
'lv_LV': u'Latvian / latviešu valoda',
'mk_MK': u'Macedonian / македонски јазик',
'mn_MN': u'Mongolian / монгол',
'nb_NO': u'Norwegian Bokmål / Norsk bokmål',
'nl_NL': u'Dutch / Nederlands',
'nl_BE': u'Dutch (BE) / Nederlands (BE)',
'pl_PL': u'Polish / Język polski',
'pt_BR': u'Portuguese (BR) / Português (BR)',
'pt_PT': u'Portuguese / Português',
'ro_RO': u'Romanian / română',
'ru_RU': u'Russian / русский язык',
'sl_SI': u'Slovenian / slovenščina',
'sk_SK': u'Slovak / Slovenský jazyk',
'sq_AL': u'Albanian / Shqip',
'sr_RS': u'Serbian (Cyrillic) / српски',
'sr@latin': u'Serbian (Latin) / srpski',
'sv_SE': u'Swedish / svenska',
'te_IN': u'Telugu / తెలుగు',
'tr_TR': u'Turkish / Türkçe',
'vi_VN': u'Vietnamese / Tiếng Việt',
'uk_UA': u'Ukrainian / українська',
'zh_CN': u'Chinese (CN) / 简体中文',
'zh_HK': u'Chinese (HK)',
'zh_TW': u'Chinese (TW) / 正體字',
'th_TH': u'Thai / ภาษาไทย',
}
def scan_languages():
""" Returns all languages supported by OpenERP for translation
:returns: a list of (lang_code, lang_name) pairs
:rtype: [(str, unicode)]
"""
return sorted(ALL_LANGUAGES.iteritems(), key=lambda k: k[1])
def get_user_companies(cr, user):
def _get_company_children(cr, ids):
if not ids:
return []
cr.execute('SELECT id FROM res_company WHERE parent_id IN %s', (tuple(ids),))
res = [x[0] for x in cr.fetchall()]
res.extend(_get_company_children(cr, res))
return res
cr.execute('SELECT company_id FROM res_users WHERE id=%s', (user,))
user_comp = cr.fetchone()[0]
if not user_comp:
return []
return [user_comp] + _get_company_children(cr, [user_comp])
def mod10r(number):
"""
Input number : account or invoice number
Output return: the same number completed with the recursive mod10
key
"""
codec=[0,9,4,6,8,2,7,1,3,5]
report = 0
result=""
for digit in number:
result += digit
if digit.isdigit():
report = codec[ (int(digit) + report) % 10 ]
return result + str((10 - report) % 10)
def human_size(sz):
"""
Return the size in a human readable format
"""
if not sz:
return False
units = ('bytes', 'Kb', 'Mb', 'Gb')
if isinstance(sz,basestring):
sz=len(sz)
s, i = float(sz), 0
while s >= 1024 and i < len(units)-1:
s /= 1024
i += 1
return "%0.2f %s" % (s, units[i])
def logged(f):
@wraps(f)
def wrapper(*args, **kwargs):
from pprint import pformat
vector = ['Call -> function: %r' % f]
for i, arg in enumerate(args):
vector.append(' arg %02d: %s' % (i, pformat(arg)))
for key, value in kwargs.items():
vector.append(' kwarg %10s: %s' % (key, pformat(value)))
timeb4 = time.time()
res = f(*args, **kwargs)
vector.append(' result: %s' % pformat(res))
vector.append(' time delta: %s' % (time.time() - timeb4))
_logger.debug('\n'.join(vector))
return res
return wrapper
class profile(object):
def __init__(self, fname=None):
self.fname = fname
def __call__(self, f):
@wraps(f)
def wrapper(*args, **kwargs):
profile = cProfile.Profile()
result = profile.runcall(f, *args, **kwargs)
profile.dump_stats(self.fname or ("%s.cprof" % (f.func_name,)))
return result
return wrapper
__icons_list = ['STOCK_ABOUT', 'STOCK_ADD', 'STOCK_APPLY', 'STOCK_BOLD',
'STOCK_CANCEL', 'STOCK_CDROM', 'STOCK_CLEAR', 'STOCK_CLOSE', 'STOCK_COLOR_PICKER',
'STOCK_CONNECT', 'STOCK_CONVERT', 'STOCK_COPY', 'STOCK_CUT', 'STOCK_DELETE',
'STOCK_DIALOG_AUTHENTICATION', 'STOCK_DIALOG_ERROR', 'STOCK_DIALOG_INFO',
'STOCK_DIALOG_QUESTION', 'STOCK_DIALOG_WARNING', 'STOCK_DIRECTORY', 'STOCK_DISCONNECT',
'STOCK_DND', 'STOCK_DND_MULTIPLE', 'STOCK_EDIT', 'STOCK_EXECUTE', 'STOCK_FILE',
'STOCK_FIND', 'STOCK_FIND_AND_REPLACE', 'STOCK_FLOPPY', 'STOCK_GOTO_BOTTOM',
'STOCK_GOTO_FIRST', 'STOCK_GOTO_LAST', 'STOCK_GOTO_TOP', 'STOCK_GO_BACK',
'STOCK_GO_DOWN', 'STOCK_GO_FORWARD', 'STOCK_GO_UP', 'STOCK_HARDDISK',
'STOCK_HELP', 'STOCK_HOME', 'STOCK_INDENT', 'STOCK_INDEX', 'STOCK_ITALIC',
'STOCK_JUMP_TO', 'STOCK_JUSTIFY_CENTER', 'STOCK_JUSTIFY_FILL',
'STOCK_JUSTIFY_LEFT', 'STOCK_JUSTIFY_RIGHT', 'STOCK_MEDIA_FORWARD',
'STOCK_MEDIA_NEXT', 'STOCK_MEDIA_PAUSE', 'STOCK_MEDIA_PLAY',
'STOCK_MEDIA_PREVIOUS', 'STOCK_MEDIA_RECORD', 'STOCK_MEDIA_REWIND',
'STOCK_MEDIA_STOP', 'STOCK_MISSING_IMAGE', 'STOCK_NETWORK', 'STOCK_NEW',
'STOCK_NO', 'STOCK_OK', 'STOCK_OPEN', 'STOCK_PASTE', 'STOCK_PREFERENCES',
'STOCK_PRINT', 'STOCK_PRINT_PREVIEW', 'STOCK_PROPERTIES', 'STOCK_QUIT',
'STOCK_REDO', 'STOCK_REFRESH', 'STOCK_REMOVE', 'STOCK_REVERT_TO_SAVED',
'STOCK_SAVE', 'STOCK_SAVE_AS', 'STOCK_SELECT_COLOR', 'STOCK_SELECT_FONT',
'STOCK_SORT_ASCENDING', 'STOCK_SORT_DESCENDING', 'STOCK_SPELL_CHECK',
'STOCK_STOP', 'STOCK_STRIKETHROUGH', 'STOCK_UNDELETE', 'STOCK_UNDERLINE',
'STOCK_UNDO', 'STOCK_UNINDENT', 'STOCK_YES', 'STOCK_ZOOM_100',
'STOCK_ZOOM_FIT', 'STOCK_ZOOM_IN', 'STOCK_ZOOM_OUT',
'terp-account', 'terp-crm', 'terp-mrp', 'terp-product', 'terp-purchase',
'terp-sale', 'terp-tools', 'terp-administration', 'terp-hr', 'terp-partner',
'terp-project', 'terp-report', 'terp-stock', 'terp-calendar', 'terp-graph',
'terp-check','terp-go-month','terp-go-year','terp-go-today','terp-document-new','terp-camera_test',
'terp-emblem-important','terp-gtk-media-pause','terp-gtk-stop','terp-gnome-cpu-frequency-applet+',
'terp-dialog-close','terp-gtk-jump-to-rtl','terp-gtk-jump-to-ltr','terp-accessories-archiver',
'terp-stock_align_left_24','terp-stock_effects-object-colorize','terp-go-home','terp-gtk-go-back-rtl',
'terp-gtk-go-back-ltr','terp-personal','terp-personal-','terp-personal+','terp-accessories-archiver-minus',
'terp-accessories-archiver+','terp-stock_symbol-selection','terp-call-start','terp-dolar',
'terp-face-plain','terp-folder-blue','terp-folder-green','terp-folder-orange','terp-folder-yellow',
'terp-gdu-smart-failing','terp-go-week','terp-gtk-select-all','terp-locked','terp-mail-forward',
'terp-mail-message-new','terp-mail-replied','terp-rating-rated','terp-stage','terp-stock_format-scientific',
'terp-dolar_ok!','terp-idea','terp-stock_format-default','terp-mail-','terp-mail_delete'
]
def icons(*a, **kw):
global __icons_list
return [(x, x) for x in __icons_list ]
def detect_ip_addr():
"""Try a very crude method to figure out a valid external
IP or hostname for the current machine. Don't rely on this
for binding to an interface, but it could be used as basis
for constructing a remote URL to the server.
"""
def _detect_ip_addr():
from array import array
from struct import pack, unpack
try:
import fcntl
except ImportError:
fcntl = None
ip_addr = None
if not fcntl: # not UNIX:
host = socket.gethostname()
ip_addr = socket.gethostbyname(host)
else: # UNIX:
# get all interfaces:
nbytes = 128 * 32
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
names = array('B', '\0' * nbytes)
#print 'names: ', names
outbytes = unpack('iL', fcntl.ioctl( s.fileno(), 0x8912, pack('iL', nbytes, names.buffer_info()[0])))[0]
namestr = names.tostring()
# try 64 bit kernel:
for i in range(0, outbytes, 40):
name = namestr[i:i+16].split('\0', 1)[0]
if name != 'lo':
ip_addr = socket.inet_ntoa(namestr[i+20:i+24])
break
# try 32 bit kernel:
if ip_addr is None:
ifaces = filter(None, [namestr[i:i+32].split('\0', 1)[0] for i in range(0, outbytes, 32)])
for ifname in [iface for iface in ifaces if iface != 'lo']:
ip_addr = socket.inet_ntoa(fcntl.ioctl(s.fileno(), 0x8915, pack('256s', ifname[:15]))[20:24])
break
return ip_addr or 'localhost'
try:
ip_addr = _detect_ip_addr()
except Exception:
ip_addr = 'localhost'
return ip_addr
# RATIONALE BEHIND TIMESTAMP CALCULATIONS AND TIMEZONE MANAGEMENT:
# The server side never does any timestamp calculation, always
# sends them in a naive (timezone agnostic) format supposed to be
# expressed within the server timezone, and expects the clients to
# provide timestamps in the server timezone as well.
# It stores all timestamps in the database in naive format as well,
# which also expresses the time in the server timezone.
# For this reason the server makes its timezone name available via the
# common/timezone_get() rpc method, which clients need to read
# to know the appropriate time offset to use when reading/writing
# times.
def get_win32_timezone():
"""Attempt to return the "standard name" of the current timezone on a win32 system.
@return the standard name of the current win32 timezone, or False if it cannot be found.
"""
res = False
if sys.platform == "win32":
try:
import _winreg
hklm = _winreg.ConnectRegistry(None,_winreg.HKEY_LOCAL_MACHINE)
current_tz_key = _winreg.OpenKey(hklm, r"SYSTEM\CurrentControlSet\Control\TimeZoneInformation", 0,_winreg.KEY_ALL_ACCESS)
res = str(_winreg.QueryValueEx(current_tz_key,"StandardName")[0]) # [0] is value, [1] is type code
_winreg.CloseKey(current_tz_key)
_winreg.CloseKey(hklm)
except Exception:
pass
return res
def detect_server_timezone():
"""Attempt to detect the timezone to use on the server side.
Defaults to UTC if no working timezone can be found.
@return the timezone identifier as expected by pytz.timezone.
"""
try:
import pytz
except Exception:
_logger.warning("Python pytz module is not available. "
"Timezone will be set to UTC by default.")
return 'UTC'
# Option 1: the configuration option (did not exist before, so no backwards compatibility issue)
# Option 2: to be backwards compatible with 5.0 or earlier, the value from time.tzname[0], but only if it is known to pytz
# Option 3: the environment variable TZ
sources = [ (config['timezone'], 'OpenERP configuration'),
(time.tzname[0], 'time.tzname'),
(os.environ.get('TZ',False),'TZ environment variable'), ]
# Option 4: OS-specific: /etc/timezone on Unix
if os.path.exists("/etc/timezone"):
tz_value = False
try:
f = open("/etc/timezone")
tz_value = f.read(128).strip()
except Exception:
pass
finally:
f.close()
sources.append((tz_value,"/etc/timezone file"))
# Option 5: timezone info from registry on Win32
if sys.platform == "win32":
# Timezone info is stored in windows registry.
# However this is not likely to work very well as the standard name
# of timezones in windows is rarely something that is known to pytz.
# But that's ok, it is always possible to use a config option to set
# it explicitly.
sources.append((get_win32_timezone(),"Windows Registry"))
for (value,source) in sources:
if value:
try:
tz = pytz.timezone(value)
_logger.info("Using timezone %s obtained from %s.", tz.zone, source)
return value
except pytz.UnknownTimeZoneError:
_logger.warning("The timezone specified in %s (%s) is invalid, ignoring it.", source, value)
_logger.warning("No valid timezone could be detected, using default UTC "
"timezone. You can specify it explicitly with option 'timezone' in "
"the server configuration.")
return 'UTC'
def get_server_timezone():
return "UTC"
DEFAULT_SERVER_DATE_FORMAT = "%Y-%m-%d"
DEFAULT_SERVER_TIME_FORMAT = "%H:%M:%S"
DEFAULT_SERVER_DATETIME_FORMAT = "%s %s" % (
DEFAULT_SERVER_DATE_FORMAT,
DEFAULT_SERVER_TIME_FORMAT)
# Python's strftime supports only the format directives
# that are available on the platform's libc, so in order to
# be cross-platform we map to the directives required by
# the C standard (1989 version), always available on platforms
# with a C standard implementation.
DATETIME_FORMATS_MAP = {
'%C': '', # century
'%D': '%m/%d/%Y', # modified %y->%Y
'%e': '%d',
'%E': '', # special modifier
'%F': '%Y-%m-%d',
'%g': '%Y', # modified %y->%Y
'%G': '%Y',
'%h': '%b',
'%k': '%H',
'%l': '%I',
'%n': '\n',
'%O': '', # special modifier
'%P': '%p',
'%R': '%H:%M',
'%r': '%I:%M:%S %p',
'%s': '', #num of seconds since epoch
'%T': '%H:%M:%S',
'%t': ' ', # tab
'%u': ' %w',
'%V': '%W',
'%y': '%Y', # Even if %y works, it's ambiguous, so we should use %Y
'%+': '%Y-%m-%d %H:%M:%S',
# %Z is a special case that causes 2 problems at least:
# - the timezone names we use (in res_user.context_tz) come
# from pytz, but not all these names are recognized by
# strptime(), so we cannot convert in both directions
# when such a timezone is selected and %Z is in the format
# - %Z is replaced by an empty string in strftime() when
# there is not tzinfo in a datetime value (e.g when the user
# did not pick a context_tz). The resulting string does not
# parse back if the format requires %Z.
# As a consequence, we strip it completely from format strings.
# The user can always have a look at the context_tz in
# preferences to check the timezone.
'%z': '',
'%Z': '',
}
POSIX_TO_LDML = {
'a': 'E',
'A': 'EEEE',
'b': 'MMM',
'B': 'MMMM',
#'c': '',
'd': 'dd',
'H': 'HH',
'I': 'hh',
'j': 'DDD',
'm': 'MM',
'M': 'mm',
'p': 'a',
'S': 'ss',
'U': 'w',
'w': 'e',
'W': 'w',
'y': 'yy',
'Y': 'yyyy',
# see comments above, and babel's format_datetime assumes an UTC timezone
# for naive datetime objects
#'z': 'Z',
#'Z': 'z',
}
def posix_to_ldml(fmt, locale):
""" Converts a posix/strftime pattern into an LDML date format pattern.
:param fmt: non-extended C89/C90 strftime pattern
:param locale: babel locale used for locale-specific conversions (e.g. %x and %X)
:return: unicode
"""
buf = []
pc = False
quoted = []
for c in fmt:
# LDML date format patterns uses letters, so letters must be quoted
if not pc and c.isalpha():
quoted.append(c if c != "'" else "''")
continue
if quoted:
buf.append("'")
buf.append(''.join(quoted))
buf.append("'")
quoted = []
if pc:
if c == '%': # escaped percent
buf.append('%')
elif c == 'x': # date format, short seems to match
buf.append(locale.date_formats['short'].pattern)
elif c == 'X': # time format, seems to include seconds. short does not
buf.append(locale.time_formats['medium'].pattern)
else: # look up format char in static mapping
buf.append(POSIX_TO_LDML[c])
pc = False
elif c == '%':
pc = True
else:
buf.append(c)
# flush anything remaining in quoted buffer
if quoted:
buf.append("'")
buf.append(''.join(quoted))
buf.append("'")
return ''.join(buf)
def server_to_local_timestamp(src_tstamp_str, src_format, dst_format, dst_tz_name,
tz_offset=True, ignore_unparsable_time=True):
"""
Convert a source timestamp string into a destination timestamp string, attempting to apply the
correct offset if both the server and local timezone are recognized, or no
offset at all if they aren't or if tz_offset is false (i.e. assuming they are both in the same TZ).
WARNING: This method is here to allow formatting dates correctly for inclusion in strings where
the client would not be able to format/offset it correctly. DO NOT use it for returning
date fields directly, these are supposed to be handled by the client!!
@param src_tstamp_str: the str value containing the timestamp in the server timezone.
@param src_format: the format to use when parsing the server timestamp.
@param dst_format: the format to use when formatting the resulting timestamp for the local/client timezone.
@param dst_tz_name: name of the destination timezone (such as the 'tz' value of the client context)
@param ignore_unparsable_time: if True, return False if src_tstamp_str cannot be parsed
using src_format or formatted using dst_format.
@return local/client formatted timestamp, expressed in the local/client timezone if possible
and if tz_offset is true, or src_tstamp_str if timezone offset could not be determined.
"""
if not src_tstamp_str:
return False
res = src_tstamp_str
if src_format and dst_format:
# find out server timezone
server_tz = get_server_timezone()
try:
# dt_value needs to be a datetime.datetime object (so no time.struct_time or mx.DateTime.DateTime here!)
dt_value = datetime.strptime(src_tstamp_str, src_format)
if tz_offset and dst_tz_name:
try:
import pytz
src_tz = pytz.timezone(server_tz)
dst_tz = pytz.timezone(dst_tz_name)
src_dt = src_tz.localize(dt_value, is_dst=True)
dt_value = src_dt.astimezone(dst_tz)
except Exception:
pass
res = dt_value.strftime(dst_format)
except Exception:
# Normal ways to end up here are if strptime or strftime failed
if not ignore_unparsable_time:
return False
return res
def split_every(n, iterable, piece_maker=tuple):
"""Splits an iterable into length-n pieces. The last piece will be shorter
if ``n`` does not evenly divide the iterable length.
@param ``piece_maker``: function to build the pieces
from the slices (tuple,list,...)
"""
iterator = iter(iterable)
piece = piece_maker(islice(iterator, n))
while piece:
yield piece
piece = piece_maker(islice(iterator, n))
if __name__ == '__main__':
import doctest
doctest.testmod()
class upload_data_thread(threading.Thread):
def __init__(self, email, data, type):
self.args = [('email',email),('type',type),('data',data)]
super(upload_data_thread,self).__init__()
def run(self):
try:
import urllib
args = urllib.urlencode(self.args)
fp = urllib.urlopen('http://www.openerp.com/scripts/survey.php', args)
fp.read()
fp.close()
except Exception:
pass
def upload_data(email, data, type='SURVEY'):
a = upload_data_thread(email, data, type)
a.start()
return True
def get_and_group_by_field(cr, uid, obj, ids, field, context=None):
""" Read the values of ``field´´ for the given ``ids´´ and group ids by value.
:param string field: name of the field we want to read and group by
:return: mapping of field values to the list of ids that have it
:rtype: dict
"""
res = {}
for record in obj.read(cr, uid, ids, [field], context=context):
key = record[field]
res.setdefault(key[0] if isinstance(key, tuple) else key, []).append(record['id'])
return res
def get_and_group_by_company(cr, uid, obj, ids, context=None):
return get_and_group_by_field(cr, uid, obj, ids, field='company_id', context=context)
# port of python 2.6's attrgetter with support for dotted notation
def resolve_attr(obj, attr):
for name in attr.split("."):
obj = getattr(obj, name)
return obj
def attrgetter(*items):
if len(items) == 1:
attr = items[0]
def g(obj):
return resolve_attr(obj, attr)
else:
def g(obj):
return tuple(resolve_attr(obj, attr) for attr in items)
return g
class unquote(str):
"""A subclass of str that implements repr() without enclosing quotation marks
or escaping, keeping the original string untouched. The name come from Lisp's unquote.
One of the uses for this is to preserve or insert bare variable names within dicts during eval()
of a dict's repr(). Use with care.
Some examples (notice that there are never quotes surrounding
the ``active_id`` name:
>>> unquote('active_id')
active_id
>>> d = {'test': unquote('active_id')}
>>> d
{'test': active_id}
>>> print d
{'test': active_id}
"""
def __repr__(self):
return self
class UnquoteEvalContext(defaultdict):
"""Defaultdict-based evaluation context that returns
an ``unquote`` string for any missing name used during
the evaluation.
Mostly useful for evaluating OpenERP domains/contexts that
may refer to names that are unknown at the time of eval,
so that when the context/domain is converted back to a string,
the original names are preserved.
**Warning**: using an ``UnquoteEvalContext`` as context for ``eval()`` or
``safe_eval()`` will shadow the builtins, which may cause other
failures, depending on what is evaluated.
Example (notice that ``section_id`` is preserved in the final
result) :
>>> context_str = "{'default_user_id': uid, 'default_section_id': section_id}"
>>> eval(context_str, UnquoteEvalContext(uid=1))
{'default_user_id': 1, 'default_section_id': section_id}
"""
def __init__(self, *args, **kwargs):
super(UnquoteEvalContext, self).__init__(None, *args, **kwargs)
def __missing__(self, key):
return unquote(key)
class mute_logger(object):
"""Temporary suppress the logging.
Can be used as context manager or decorator.
@mute_logger('openerp.plic.ploc')
def do_stuff():
blahblah()
with mute_logger('openerp.foo.bar'):
do_suff()
"""
def __init__(self, *loggers):
self.loggers = loggers
def filter(self, record):
return 0
def __enter__(self):
for logger in self.loggers:
assert isinstance(logger, basestring),\
"A logger name must be a string, got %s" % type(logger)
logging.getLogger(logger).addFilter(self)
def __exit__(self, exc_type=None, exc_val=None, exc_tb=None):
for logger in self.loggers:
logging.getLogger(logger).removeFilter(self)
def __call__(self, func):
@wraps(func)
def deco(*args, **kwargs):
with self:
return func(*args, **kwargs)
return deco
_ph = object()
class CountingStream(object):
""" Stream wrapper counting the number of element it has yielded. Similar
role to ``enumerate``, but for use when the iteration process of the stream
isn't fully under caller control (the stream can be iterated from multiple
points including within a library)
``start`` allows overriding the starting index (the index before the first
item is returned).
On each iteration (call to :meth:`~.next`), increases its :attr:`~.index`
by one.
.. attribute:: index
``int``, index of the last yielded element in the stream. If the stream
has ended, will give an index 1-past the stream
"""
def __init__(self, stream, start=-1):
self.stream = iter(stream)
self.index = start
self.stopped = False
def __iter__(self):
return self
def next(self):
if self.stopped: raise StopIteration()
self.index += 1
val = next(self.stream, _ph)
if val is _ph:
self.stopped = True
raise StopIteration()
return val
def stripped_sys_argv(*strip_args):
"""Return sys.argv with some arguments stripped, suitable for reexecution or subprocesses"""
strip_args = sorted(set(strip_args) | set(['-s', '--save', '-d', '--database', '-u', '--update', '-i', '--init']))
assert all(config.parser.has_option(s) for s in strip_args)
takes_value = dict((s, config.parser.get_option(s).takes_value()) for s in strip_args)
longs, shorts = list(tuple(y) for _, y in groupby(strip_args, lambda x: x.startswith('--')))
longs_eq = tuple(l + '=' for l in longs if takes_value[l])
args = sys.argv[:]
def strip(args, i):
return args[i].startswith(shorts) \
or args[i].startswith(longs_eq) or (args[i] in longs) \
or (i >= 1 and (args[i - 1] in strip_args) and takes_value[args[i - 1]])
return [x for i, x in enumerate(args) if not strip(args, i)]
class ConstantMapping(Mapping):
"""
An immutable mapping returning the provided value for every single key.
Useful for default value to methods
"""
__slots__ = ['_value']
def __init__(self, val):
self._value = val
def __len__(self):
"""
defaultdict updates its length for each individually requested key, is
that really useful?
"""
return 0
def __iter__(self):
"""
same as len, defaultdict udpates its iterable keyset with each key
requested, is there a point for this?
"""
return iter([])
def __getitem__(self, item):
return self._value
def dumpstacks(sig=None, frame=None):
""" Signal handler: dump a stack trace for each existing thread."""
code = []
def extract_stack(stack):
for filename, lineno, name, line in traceback.extract_stack(stack):
yield 'File: "%s", line %d, in %s' % (filename, lineno, name)
if line:
yield " %s" % (line.strip(),)
# code from http://stackoverflow.com/questions/132058/getting-stack-trace-from-a-running-python-application#answer-2569696
# modified for python 2.5 compatibility
threads_info = dict([(th.ident, {'name': th.name, 'uid': getattr(th, 'uid', 'n/a')})
for th in threading.enumerate()])
for threadId, stack in sys._current_frames().items():
thread_info = threads_info.get(threadId)
code.append("\n# Thread: %s (id:%s) (uid:%s)" %
(thread_info and thread_info['name'] or 'n/a',
threadId,
thread_info and thread_info['uid'] or 'n/a'))
for line in extract_stack(stack):
code.append(line)
if openerp.evented:
# code from http://stackoverflow.com/questions/12510648/in-gevent-how-can-i-dump-stack-traces-of-all-running-greenlets
import gc
from greenlet import greenlet
for ob in gc.get_objects():
if not isinstance(ob, greenlet) or not ob:
continue
code.append("\n# Greenlet: %r" % (ob,))
for line in extract_stack(ob.gr_frame):
code.append(line)
_logger.info("\n".join(code))
class frozendict(dict):
""" An implementation of an immutable dictionary. """
def __delitem__(self, key):
raise NotImplementedError("'__delitem__' not supported on frozendict")
def __setitem__(self, key, val):
raise NotImplementedError("'__setitem__' not supported on frozendict")
def clear(self):
raise NotImplementedError("'clear' not supported on frozendict")
def pop(self, key, default=None):
raise NotImplementedError("'pop' not supported on frozendict")
def popitem(self):
raise NotImplementedError("'popitem' not supported on frozendict")
def setdefault(self, key, default=None):
raise NotImplementedError("'setdefault' not supported on frozendict")
def update(self, *args, **kwargs):
raise NotImplementedError("'update' not supported on frozendict")
class OrderedSet(OrderedDict):
""" A simple collection that remembers the elements insertion order. """
def __init__(self, seq=()):
super(OrderedSet, self).__init__([(x, None) for x in seq])
def add(self, elem):
self[elem] = None
def discard(self, elem):
self.pop(elem, None)
@contextmanager
def ignore(*exc):
try:
yield
except exc:
pass
# Avoid DeprecationWarning while still remaining compatible with werkzeug pre-0.9
if parse_version(getattr(werkzeug, '__version__', '0.0')) < parse_version('0.9.0'):
def html_escape(text):
return werkzeug.utils.escape(text, quote=True)
else:
def html_escape(text):
return werkzeug.utils.escape(text)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
phenoxim/nova | nova/tests/unit/api/openstack/compute/test_server_external_events.py | 3 | 9010 | # Copyright 2014 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import webob
from nova.api.openstack.compute import server_external_events \
as server_external_events_v21
from nova import exception
from nova import objects
from nova.objects import instance as instance_obj
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests import uuidsentinel as uuids
fake_instances = {
'00000000-0000-0000-0000-000000000001': objects.Instance(id=1,
uuid='00000000-0000-0000-0000-000000000001', host='host1'),
'00000000-0000-0000-0000-000000000002': objects.Instance(id=2,
uuid='00000000-0000-0000-0000-000000000002', host='host1'),
'00000000-0000-0000-0000-000000000003': objects.Instance(id=3,
uuid='00000000-0000-0000-0000-000000000003', host='host2'),
'00000000-0000-0000-0000-000000000004': objects.Instance(id=4,
uuid='00000000-0000-0000-0000-000000000004', host=None),
}
fake_instance_uuids = sorted(fake_instances.keys())
MISSING_UUID = '00000000-0000-0000-0000-000000000005'
fake_cells = [objects.CellMapping(uuid=uuids.cell1, database_connection="db1"),
objects.CellMapping(uuid=uuids.cell2, database_connection="db2")]
fake_instance_mappings = [
objects.InstanceMapping(cell_mapping=fake_cells[instance.id % 2],
instance_uuid=instance.uuid)
for instance in fake_instances.values()]
@classmethod
def fake_get_by_filters(cls, context, filters, expected_attrs=None):
if expected_attrs:
# This is a regression check for bug 1645479.
expected_attrs_set = set(expected_attrs)
full_expected_attrs_set = set(instance_obj.INSTANCE_OPTIONAL_ATTRS)
assert expected_attrs_set.issubset(full_expected_attrs_set), \
('%s is not a subset of %s' % (expected_attrs_set,
full_expected_attrs_set))
l = objects.InstanceList(objects=[
inst for inst in fake_instances.values()
if inst.uuid in filters['uuid']])
return l
@classmethod
def fake_get_by_instance_uuids(cls, context, uuids):
mappings = [im for im in fake_instance_mappings
if im.instance_uuid in uuids]
return objects.InstanceMappingList(objects=mappings)
@mock.patch('nova.objects.InstanceMappingList.get_by_instance_uuids',
fake_get_by_instance_uuids)
@mock.patch('nova.objects.InstanceList.get_by_filters',
fake_get_by_filters)
class ServerExternalEventsTestV21(test.NoDBTestCase):
server_external_events = server_external_events_v21
invalid_error = exception.ValidationError
wsgi_api_version = '2.1'
def setUp(self):
super(ServerExternalEventsTestV21, self).setUp()
self.api = \
self.server_external_events.ServerExternalEventsController()
self.event_1 = {'name': 'network-vif-plugged',
'tag': 'foo',
'server_uuid': fake_instance_uuids[0],
'status': 'completed'}
self.event_2 = {'name': 'network-changed',
'server_uuid': fake_instance_uuids[1]}
self.default_body = {'events': [self.event_1, self.event_2]}
self.resp_event_1 = dict(self.event_1)
self.resp_event_1['code'] = 200
self.resp_event_2 = dict(self.event_2)
self.resp_event_2['code'] = 200
self.resp_event_2['status'] = 'completed'
self.default_resp_body = {'events': [self.resp_event_1,
self.resp_event_2]}
self.req = fakes.HTTPRequest.blank('', use_admin_context=True,
version=self.wsgi_api_version)
def _assert_call(self, body, expected_uuids, expected_events):
with mock.patch.object(self.api.compute_api,
'external_instance_event') as api_method:
response = self.api.create(self.req, body=body)
result = response.obj
code = response._code
self.assertEqual(1, api_method.call_count)
call = api_method.call_args_list[0]
args = call[0]
call_instances = args[1]
call_events = args[2]
self.assertEqual(set(expected_uuids),
set([instance.uuid for instance in call_instances]))
self.assertEqual(len(expected_uuids), len(call_instances))
self.assertEqual(set(expected_events),
set([event.name for event in call_events]))
self.assertEqual(len(expected_events),
len(call_events))
return result, code
def test_create(self):
result, code = self._assert_call(self.default_body,
fake_instance_uuids[:2],
['network-vif-plugged',
'network-changed'])
self.assertEqual(self.default_resp_body, result)
self.assertEqual(200, code)
def test_create_one_bad_instance(self):
body = self.default_body
body['events'][1]['server_uuid'] = MISSING_UUID
result, code = self._assert_call(body, [fake_instance_uuids[0]],
['network-vif-plugged'])
self.assertEqual('failed', result['events'][1]['status'])
self.assertEqual(200, result['events'][0]['code'])
self.assertEqual(404, result['events'][1]['code'])
self.assertEqual(207, code)
def test_create_event_instance_has_no_host(self):
body = self.default_body
body['events'][0]['server_uuid'] = fake_instance_uuids[-1]
# the instance without host should not be passed to the compute layer
result, code = self._assert_call(body,
[fake_instance_uuids[1]],
['network-changed'])
self.assertEqual(422, result['events'][0]['code'])
self.assertEqual('failed', result['events'][0]['status'])
self.assertEqual(200, result['events'][1]['code'])
self.assertEqual(207, code)
def test_create_no_good_instances(self):
body = self.default_body
body['events'][0]['server_uuid'] = MISSING_UUID
body['events'][1]['server_uuid'] = MISSING_UUID
self.assertRaises(webob.exc.HTTPNotFound,
self.api.create, self.req, body=body)
def test_create_bad_status(self):
body = self.default_body
body['events'][1]['status'] = 'foo'
self.assertRaises(self.invalid_error,
self.api.create, self.req, body=body)
def test_create_extra_gorp(self):
body = self.default_body
body['events'][0]['foobar'] = 'bad stuff'
self.assertRaises(self.invalid_error,
self.api.create, self.req, body=body)
def test_create_bad_events(self):
body = {'events': 'foo'}
self.assertRaises(self.invalid_error,
self.api.create, self.req, body=body)
def test_create_bad_body(self):
body = {'foo': 'bar'}
self.assertRaises(self.invalid_error,
self.api.create, self.req, body=body)
def test_create_unknown_events(self):
self.event_1['name'] = 'unkown_event'
body = {'events': self.event_1}
self.assertRaises(self.invalid_error,
self.api.create, self.req, body=body)
@mock.patch('nova.objects.InstanceMappingList.get_by_instance_uuids',
fake_get_by_instance_uuids)
@mock.patch('nova.objects.InstanceList.get_by_filters',
fake_get_by_filters)
class ServerExternalEventsTestV251(ServerExternalEventsTestV21):
wsgi_api_version = '2.51'
def test_create_with_missing_tag(self):
body = self.default_body
body['events'][1]['name'] = 'volume-extended'
result, code = self._assert_call(body,
[fake_instance_uuids[0]],
['network-vif-plugged'])
self.assertEqual(200, result['events'][0]['code'])
self.assertEqual('completed', result['events'][0]['status'])
self.assertEqual(400, result['events'][1]['code'])
self.assertEqual('failed', result['events'][1]['status'])
self.assertEqual(207, code)
| apache-2.0 |
Jorge-Rodriguez/ansible | lib/ansible/module_utils/network/ftd/configuration.py | 21 | 24087 | # Copyright (c) 2018 Cisco and/or its affiliates.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
import copy
from functools import partial
from ansible.module_utils.network.ftd.common import HTTPMethod, equal_objects, FtdConfigurationError, \
FtdServerError, ResponseParams, copy_identity_properties, FtdUnexpectedResponse
from ansible.module_utils.network.ftd.fdm_swagger_client import OperationField, ValidationError
from ansible.module_utils.six import iteritems
DEFAULT_PAGE_SIZE = 10
DEFAULT_OFFSET = 0
UNPROCESSABLE_ENTITY_STATUS = 422
INVALID_UUID_ERROR_MESSAGE = "Validation failed due to an invalid UUID"
DUPLICATE_NAME_ERROR_MESSAGE = "Validation failed due to a duplicate name"
MULTIPLE_DUPLICATES_FOUND_ERROR = (
"Cannot add a new object. An object(s) with the same attributes exists."
"Multiple objects returned according to filters being specified. "
"Please specify more specific filters which can find exact object that caused duplication error")
class OperationNamePrefix:
ADD = 'add'
EDIT = 'edit'
GET = 'get'
DELETE = 'delete'
UPSERT = 'upsert'
class QueryParams:
FILTER = 'filter'
class ParamName:
QUERY_PARAMS = 'query_params'
PATH_PARAMS = 'path_params'
DATA = 'data'
FILTERS = 'filters'
class CheckModeException(Exception):
pass
class FtdInvalidOperationNameError(Exception):
def __init__(self, operation_name):
super(FtdInvalidOperationNameError, self).__init__(operation_name)
self.operation_name = operation_name
class OperationChecker(object):
@classmethod
def is_add_operation(cls, operation_name, operation_spec):
"""
Check if operation defined with 'operation_name' is add object operation according to 'operation_spec'.
:param operation_name: name of the operation being called by the user
:type operation_name: str
:param operation_spec: specification of the operation being called by the user
:type operation_spec: dict
:return: True if the called operation is add object operation, otherwise False
:rtype: bool
"""
# Some endpoints have non-CRUD operations, so checking operation name is required in addition to the HTTP method
return operation_name.startswith(OperationNamePrefix.ADD) and is_post_request(operation_spec)
@classmethod
def is_edit_operation(cls, operation_name, operation_spec):
"""
Check if operation defined with 'operation_name' is edit object operation according to 'operation_spec'.
:param operation_name: name of the operation being called by the user
:type operation_name: str
:param operation_spec: specification of the operation being called by the user
:type operation_spec: dict
:return: True if the called operation is edit object operation, otherwise False
:rtype: bool
"""
# Some endpoints have non-CRUD operations, so checking operation name is required in addition to the HTTP method
return operation_name.startswith(OperationNamePrefix.EDIT) and is_put_request(operation_spec)
@classmethod
def is_delete_operation(cls, operation_name, operation_spec):
"""
Check if operation defined with 'operation_name' is delete object operation according to 'operation_spec'.
:param operation_name: name of the operation being called by the user
:type operation_name: str
:param operation_spec: specification of the operation being called by the user
:type operation_spec: dict
:return: True if the called operation is delete object operation, otherwise False
:rtype: bool
"""
# Some endpoints have non-CRUD operations, so checking operation name is required in addition to the HTTP method
return operation_name.startswith(OperationNamePrefix.DELETE) \
and operation_spec[OperationField.METHOD] == HTTPMethod.DELETE
@classmethod
def is_get_list_operation(cls, operation_name, operation_spec):
"""
Check if operation defined with 'operation_name' is get list of objects operation according to 'operation_spec'.
:param operation_name: name of the operation being called by the user
:type operation_name: str
:param operation_spec: specification of the operation being called by the user
:type operation_spec: dict
:return: True if the called operation is get a list of objects operation, otherwise False
:rtype: bool
"""
return operation_spec[OperationField.METHOD] == HTTPMethod.GET \
and operation_spec[OperationField.RETURN_MULTIPLE_ITEMS]
@classmethod
def is_get_operation(cls, operation_name, operation_spec):
"""
Check if operation defined with 'operation_name' is get objects operation according to 'operation_spec'.
:param operation_name: name of the operation being called by the user
:type operation_name: str
:param operation_spec: specification of the operation being called by the user
:type operation_spec: dict
:return: True if the called operation is get object operation, otherwise False
:rtype: bool
"""
return operation_spec[OperationField.METHOD] == HTTPMethod.GET \
and not operation_spec[OperationField.RETURN_MULTIPLE_ITEMS]
@classmethod
def is_upsert_operation(cls, operation_name):
"""
Check if operation defined with 'operation_name' is upsert objects operation according to 'operation_name'.
:param operation_name: name of the operation being called by the user
:type operation_name: str
:return: True if the called operation is upsert object operation, otherwise False
:rtype: bool
"""
return operation_name.startswith(OperationNamePrefix.UPSERT)
@classmethod
def is_find_by_filter_operation(cls, operation_name, params, operation_spec):
"""
Checks whether the called operation is 'find by filter'. This operation fetches all objects and finds
the matching ones by the given filter. As filtering is done on the client side, this operation should be used
only when selected filters are not implemented on the server side.
:param operation_name: name of the operation being called by the user
:type operation_name: str
:param operation_spec: specification of the operation being called by the user
:type operation_spec: dict
:param params: params - params should contain 'filters'
:return: True if the called operation is find by filter, otherwise False
:rtype: bool
"""
is_get_list = cls.is_get_list_operation(operation_name, operation_spec)
return is_get_list and ParamName.FILTERS in params and params[ParamName.FILTERS]
@classmethod
def is_upsert_operation_supported(cls, operations):
"""
Checks if all operations required for upsert object operation are defined in 'operations'.
:param operations: specification of the operations supported by model
:type operations: dict
:return: True if all criteria required to provide requested called operation are satisfied, otherwise False
:rtype: bool
"""
amount_operations_need_for_upsert_operation = 3
amount_supported_operations = 0
for operation_name, operation_spec in operations.items():
if cls.is_add_operation(operation_name, operation_spec) \
or cls.is_edit_operation(operation_name, operation_spec) \
or cls.is_get_list_operation(operation_name, operation_spec):
amount_supported_operations += 1
return amount_supported_operations == amount_operations_need_for_upsert_operation
class BaseConfigurationResource(object):
def __init__(self, conn, check_mode=False):
self._conn = conn
self.config_changed = False
self._operation_spec_cache = {}
self._models_operations_specs_cache = {}
self._check_mode = check_mode
self._operation_checker = OperationChecker
def execute_operation(self, op_name, params):
"""
Allow user request execution of simple operations(natively supported by API provider) as well as complex
operations(operations that are implemented as a set of simple operations).
:param op_name: name of the operation being called by the user
:type op_name: str
:param params: definition of the params that operation should be executed with
:type params: dict
:return: Result of the operation being executed
:rtype: dict
"""
if self._operation_checker.is_upsert_operation(op_name):
return self.upsert_object(op_name, params)
else:
return self.crud_operation(op_name, params)
def crud_operation(self, op_name, params):
"""
Allow user request execution of simple operations(natively supported by API provider) only.
:param op_name: name of the operation being called by the user
:type op_name: str
:param params: definition of the params that operation should be executed with
:type params: dict
:return: Result of the operation being executed
:rtype: dict
"""
op_spec = self.get_operation_spec(op_name)
if op_spec is None:
raise FtdInvalidOperationNameError(op_name)
if self._operation_checker.is_add_operation(op_name, op_spec):
resp = self.add_object(op_name, params)
elif self._operation_checker.is_edit_operation(op_name, op_spec):
resp = self.edit_object(op_name, params)
elif self._operation_checker.is_delete_operation(op_name, op_spec):
resp = self.delete_object(op_name, params)
elif self._operation_checker.is_find_by_filter_operation(op_name, params, op_spec):
resp = list(self.get_objects_by_filter(op_name, params))
else:
resp = self.send_general_request(op_name, params)
return resp
def get_operation_spec(self, operation_name):
if operation_name not in self._operation_spec_cache:
self._operation_spec_cache[operation_name] = self._conn.get_operation_spec(operation_name)
return self._operation_spec_cache[operation_name]
def get_operation_specs_by_model_name(self, model_name):
if model_name not in self._models_operations_specs_cache:
model_op_specs = self._conn.get_operation_specs_by_model_name(model_name)
self._models_operations_specs_cache[model_name] = model_op_specs
for op_name, op_spec in iteritems(model_op_specs):
self._operation_spec_cache.setdefault(op_name, op_spec)
return self._models_operations_specs_cache[model_name]
def get_objects_by_filter(self, operation_name, params):
def transform_filters_to_query_param(filter_params):
return ';'.join(['%s:%s' % (key, val) for key, val in sorted(iteritems(filter_params))])
def match_filters(filter_params, obj):
for k, v in iteritems(filter_params):
if k not in obj or obj[k] != v:
return False
return True
dummy, query_params, path_params = _get_user_params(params)
# copy required params to avoid mutation of passed `params` dict
get_list_params = {ParamName.QUERY_PARAMS: dict(query_params), ParamName.PATH_PARAMS: dict(path_params)}
filters = params.get(ParamName.FILTERS) or {}
if filters:
get_list_params[ParamName.QUERY_PARAMS][QueryParams.FILTER] = transform_filters_to_query_param(filters)
item_generator = iterate_over_pageable_resource(
partial(self.send_general_request, operation_name=operation_name), get_list_params
)
return (i for i in item_generator if match_filters(filters, i))
def add_object(self, operation_name, params):
def is_duplicate_name_error(err):
return err.code == UNPROCESSABLE_ENTITY_STATUS and DUPLICATE_NAME_ERROR_MESSAGE in str(err)
try:
return self.send_general_request(operation_name, params)
except FtdServerError as e:
if is_duplicate_name_error(e):
return self._check_if_the_same_object(operation_name, params, e)
else:
raise e
def _check_if_the_same_object(self, operation_name, params, e):
"""
Special check used in the scope of 'add_object' operation, which can be requested as a standalone operation or
in the scope of 'upsert_object' operation. This method executed in case 'add_object' failed and should try to
find the object that caused "object duplicate" error. In case single object found and it's equal to one we are
trying to create - the existing object will be returned (attempt to have kind of idempotency for add action).
In the case when we got more than one object returned as a result of the request to API - it will be hard to
find exact duplicate so the exception will be raised.
"""
model_name = self.get_operation_spec(operation_name)[OperationField.MODEL_NAME]
get_list_operation = self._find_get_list_operation(model_name)
if get_list_operation:
data = params[ParamName.DATA]
if not params.get(ParamName.FILTERS):
params[ParamName.FILTERS] = {'name': data['name']}
existing_obj = None
existing_objs = self.get_objects_by_filter(get_list_operation, params)
for i, obj in enumerate(existing_objs):
if i > 0:
raise FtdConfigurationError(MULTIPLE_DUPLICATES_FOUND_ERROR)
existing_obj = obj
if existing_obj is not None:
if equal_objects(existing_obj, data):
return existing_obj
else:
raise FtdConfigurationError(
'Cannot add new object. '
'An object with the same name but different parameters already exists.',
existing_obj)
raise e
def _find_get_list_operation(self, model_name):
operations = self.get_operation_specs_by_model_name(model_name) or {}
return next((
op for op, op_spec in operations.items()
if self._operation_checker.is_get_list_operation(op, op_spec)), None)
def _find_get_operation(self, model_name):
operations = self.get_operation_specs_by_model_name(model_name) or {}
return next((
op for op, op_spec in operations.items()
if self._operation_checker.is_get_operation(op, op_spec)), None)
def delete_object(self, operation_name, params):
def is_invalid_uuid_error(err):
return err.code == UNPROCESSABLE_ENTITY_STATUS and INVALID_UUID_ERROR_MESSAGE in str(err)
try:
return self.send_general_request(operation_name, params)
except FtdServerError as e:
if is_invalid_uuid_error(e):
return {'status': 'Referenced object does not exist'}
else:
raise e
def edit_object(self, operation_name, params):
data, dummy, path_params = _get_user_params(params)
model_name = self.get_operation_spec(operation_name)[OperationField.MODEL_NAME]
get_operation = self._find_get_operation(model_name)
if get_operation:
existing_object = self.send_general_request(get_operation, {ParamName.PATH_PARAMS: path_params})
if not existing_object:
raise FtdConfigurationError('Referenced object does not exist')
elif equal_objects(existing_object, data):
return existing_object
return self.send_general_request(operation_name, params)
def send_general_request(self, operation_name, params):
self.validate_params(operation_name, params)
if self._check_mode:
raise CheckModeException()
data, query_params, path_params = _get_user_params(params)
op_spec = self.get_operation_spec(operation_name)
url, method = op_spec[OperationField.URL], op_spec[OperationField.METHOD]
return self._send_request(url, method, data, path_params, query_params)
def _send_request(self, url_path, http_method, body_params=None, path_params=None, query_params=None):
def raise_for_failure(resp):
if not resp[ResponseParams.SUCCESS]:
raise FtdServerError(resp[ResponseParams.RESPONSE], resp[ResponseParams.STATUS_CODE])
response = self._conn.send_request(url_path=url_path, http_method=http_method, body_params=body_params,
path_params=path_params, query_params=query_params)
raise_for_failure(response)
if http_method != HTTPMethod.GET:
self.config_changed = True
return response[ResponseParams.RESPONSE]
def validate_params(self, operation_name, params):
report = {}
op_spec = self.get_operation_spec(operation_name)
data, query_params, path_params = _get_user_params(params)
def validate(validation_method, field_name, user_params):
key = 'Invalid %s provided' % field_name
try:
is_valid, validation_report = validation_method(operation_name, user_params)
if not is_valid:
report[key] = validation_report
except Exception as e:
report[key] = str(e)
return report
validate(self._conn.validate_query_params, ParamName.QUERY_PARAMS, query_params)
validate(self._conn.validate_path_params, ParamName.PATH_PARAMS, path_params)
if is_post_request(op_spec) or is_put_request(op_spec):
validate(self._conn.validate_data, ParamName.DATA, data)
if report:
raise ValidationError(report)
def is_upsert_operation_supported(self, op_name):
"""
Checks if all operations required for upsert object operation are defined in 'operations'.
:param op_name: upsert operation name
:type op_name: str
:return: True if all criteria required to provide requested called operation are satisfied, otherwise False
:rtype: bool
"""
model_name = _extract_model_from_upsert_operation(op_name)
operations = self.get_operation_specs_by_model_name(model_name)
return self._operation_checker.is_upsert_operation_supported(operations)
@staticmethod
def _get_operation_name(checker, operations):
for operation_name, op_spec in operations.items():
if checker(operation_name, op_spec):
return operation_name
raise FtdConfigurationError("Operation is not supported")
def _add_upserted_object(self, model_operations, params):
add_op_name = self._get_operation_name(self._operation_checker.is_add_operation, model_operations)
return self.add_object(add_op_name, params)
def _edit_upserted_object(self, model_operations, existing_object, params):
edit_op_name = self._get_operation_name(self._operation_checker.is_edit_operation, model_operations)
_set_default(params, 'path_params', {})
_set_default(params, 'data', {})
params['path_params']['objId'] = existing_object['id']
copy_identity_properties(existing_object, params['data'])
return self.edit_object(edit_op_name, params)
def upsert_object(self, op_name, params):
"""
The wrapper on top of add object operation, get a list of objects and edit object operations that implement
upsert object operation. As a result, the object will be created if the object does not exist, if a single
object exists with requested 'params' this object will be updated otherwise, Exception will be raised.
:param op_name: upsert operation name
:type op_name: str
:param params: params that upsert operation should be executed with
:type params: dict
:return: upserted object representation
:rtype: dict
"""
if not self.is_upsert_operation_supported(op_name):
raise FtdInvalidOperationNameError(op_name)
model_name = _extract_model_from_upsert_operation(op_name)
model_operations = self.get_operation_specs_by_model_name(model_name)
try:
return self._add_upserted_object(model_operations, params)
except FtdConfigurationError as e:
if e.obj:
return self._edit_upserted_object(model_operations, e.obj, params)
raise e
def _set_default(params, field_name, value):
if field_name not in params or params[field_name] is None:
params[field_name] = value
def is_post_request(operation_spec):
return operation_spec[OperationField.METHOD] == HTTPMethod.POST
def is_put_request(operation_spec):
return operation_spec[OperationField.METHOD] == HTTPMethod.PUT
def _extract_model_from_upsert_operation(op_name):
return op_name[len(OperationNamePrefix.UPSERT):]
def _get_user_params(params):
return params.get(ParamName.DATA) or {}, params.get(ParamName.QUERY_PARAMS) or {}, params.get(
ParamName.PATH_PARAMS) or {}
def iterate_over_pageable_resource(resource_func, params):
"""
A generator function that iterates over a resource that supports pagination and lazily returns present items
one by one.
:param resource_func: function that receives `params` argument and returns a page of objects
:type resource_func: callable
:param params: initial dictionary of parameters that will be passed to the resource_func.
Should contain `query_params` inside.
:type params: dict
:return: an iterator containing returned items
:rtype: iterator of dict
"""
# creating a copy not to mutate passed dict
params = copy.deepcopy(params)
params[ParamName.QUERY_PARAMS].setdefault('limit', DEFAULT_PAGE_SIZE)
params[ParamName.QUERY_PARAMS].setdefault('offset', DEFAULT_OFFSET)
limit = int(params[ParamName.QUERY_PARAMS]['limit'])
def received_less_items_than_requested(items_in_response, items_expected):
if items_in_response == items_expected:
return False
elif items_in_response < items_expected:
return True
raise FtdUnexpectedResponse(
"Get List of Objects Response from the server contains more objects than requested. "
"There are {0} item(s) in the response while {1} was(ere) requested".format(items_in_response,
items_expected)
)
while True:
result = resource_func(params=params)
for item in result['items']:
yield item
if received_less_items_than_requested(len(result['items']), limit):
break
# creating a copy not to mutate existing dict
params = copy.deepcopy(params)
query_params = params[ParamName.QUERY_PARAMS]
query_params['offset'] = int(query_params['offset']) + limit
| gpl-3.0 |
miipl-naveen/optibizz | addons/base_report_designer/plugin/openerp_report_designer/bin/script/Fields.py | 384 | 12340 | #########################################################################
#
# Copyright (c) 2003-2004 Danny Brewer [email protected]
# Copyright (C) 2004-2010 OpenERP SA (<http://openerp.com>).
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# See: http://www.gnu.org/licenses/lgpl.html
#
#############################################################################
import uno
import string
import unohelper
import xmlrpclib
from com.sun.star.task import XJobExecutor
if __name__<>"package":
from lib.gui import *
from lib.functions import *
from lib.error import ErrorDialog
from LoginTest import *
from lib.logreport import *
from lib.rpc import *
database="report"
uid = 3
class Fields(unohelper.Base, XJobExecutor ):
def __init__(self, sVariable="", sFields="", sDisplayName="", bFromModify=False):
LoginTest()
if not loginstatus and __name__=="package":
exit(1)
self.logobj=Logger()
self.win = DBModalDialog(60, 50, 200, 225, "Field Builder")
self.win.addFixedText("lblVariable", 27, 12, 60, 15, "Variable :")
self.win.addComboBox("cmbVariable", 180-120-2, 10, 130, 15,True, itemListenerProc=self.cmbVariable_selected)
self.insVariable = self.win.getControl( "cmbVariable" )
self.win.addFixedText("lblFields", 10, 32, 60, 15, "Variable Fields :")
self.win.addComboListBox("lstFields", 180-120-2, 30, 130, 150, False,True,itemListenerProc=self.lstbox_selected)
self.insField = self.win.getControl( "lstFields" )
self.win.addFixedText("lblUName", 8, 187, 60, 15, "Displayed name :")
self.win.addEdit("txtUName", 180-120-2, 185, 130, 15,)
self.win.addButton('btnOK',-5 ,-5,45,15,'Ok' ,actionListenerProc = self.btnOk_clicked )
self.win.addButton('btnCancel',-5 - 45 - 5 ,-5,45,15,'Cancel' ,actionListenerProc = self.btnCancel_clicked )
global passwd
self.password = passwd
global url
self.sock=RPCSession(url)
self.sValue=None
self.sObj=None
self.aSectionList=[]
self.sGDisplayName=sDisplayName
self.aItemList=[]
self.aComponentAdd=[]
self.aObjectList=[]
self.aListFields=[]
self.aVariableList=[]
EnumDocument(self.aItemList,self.aComponentAdd)
desktop=getDesktop()
doc =desktop.getCurrentComponent()
docinfo=doc.getDocumentInfo()
self.sMyHost= ""
if not docinfo.getUserFieldValue(3) == "" and not docinfo.getUserFieldValue(0)=="":
self.sMyHost = docinfo.getUserFieldValue(0)
self.count = 0
oParEnum = doc.getTextFields().createEnumeration()
while oParEnum.hasMoreElements():
oPar = oParEnum.nextElement()
if oPar.supportsService("com.sun.star.text.TextField.DropDown"):
self.count += 1
getList(self.aObjectList, self.sMyHost,self.count)
cursor = doc.getCurrentController().getViewCursor()
text = cursor.getText()
tcur = text.createTextCursorByRange(cursor)
self.aVariableList.extend( filter( lambda obj: obj[:obj.find("(")] == "Objects", self.aObjectList ) )
for i in range(len(self.aItemList)):
try:
anItem = self.aItemList[i][1]
component = self.aComponentAdd[i]
if component == "Document":
sLVal = anItem[anItem.find(",'") + 2:anItem.find("')")]
self.aVariableList.extend( filter( lambda obj: obj[:obj.find("(")] == sLVal, self.aObjectList ) )
if tcur.TextSection:
getRecersiveSection(tcur.TextSection,self.aSectionList)
if component in self.aSectionList:
sLVal = anItem[anItem.find(",'") + 2:anItem.find("')")]
self.aVariableList.extend( filter( lambda obj: obj[:obj.find("(")] == sLVal, self.aObjectList ) )
if tcur.TextTable:
if not component == "Document" and component[component.rfind(".")+1:] == tcur.TextTable.Name:
VariableScope(tcur, self.aVariableList, self.aObjectList, self.aComponentAdd, self.aItemList, component)
except:
import traceback,sys
info = reduce(lambda x, y: x+y, traceback.format_exception(sys.exc_type, sys.exc_value, sys.exc_traceback))
self.logobj.log_write('Fields', LOG_ERROR, info)
self.bModify=bFromModify
if self.bModify==True:
sItem=""
for anObject in self.aObjectList:
if anObject[:anObject.find("(")] == sVariable:
sItem = anObject
self.insVariable.setText(sItem)
genTree(
sItem[sItem.find("(")+1:sItem.find(")")],
self.aListFields,
self.insField,
self.sMyHost,
2,
ending_excl=['one2many','many2one','many2many','reference'],
recur=['many2one']
)
self.sValue= self.win.getListBoxItem("lstFields",self.aListFields.index(sFields))
for var in self.aVariableList:
self.model_ids =self.sock.execute(database, uid, self.password, 'ir.model' , 'search', [('model','=',var[var.find("(")+1:var.find(")")])])
fields=['name','model']
self.model_res = self.sock.execute(database, uid, self.password, 'ir.model', 'read', self.model_ids,fields)
if self.model_res <> []:
self.insVariable.addItem(var[:var.find("(")+1] + self.model_res[0]['name'] + ")" ,self.insVariable.getItemCount())
else:
self.insVariable.addItem(var ,self.insVariable.getItemCount())
self.win.doModalDialog("lstFields",self.sValue)
else:
ErrorDialog("Please insert user define field Field-1 or Field-4","Just go to File->Properties->User Define \nField-1 E.g. http://localhost:8069 \nOR \nField-4 E.g. account.invoice")
self.win.endExecute()
def lstbox_selected(self, oItemEvent):
try:
desktop=getDesktop()
doc =desktop.getCurrentComponent()
docinfo=doc.getDocumentInfo()
sItem= self.win.getComboBoxText("cmbVariable")
for var in self.aVariableList:
if var[:var.find("(")+1]==sItem[:sItem.find("(")+1]:
sItem = var
sMain=self.aListFields[self.win.getListBoxSelectedItemPos("lstFields")]
sObject=self.getRes(self.sock,sItem[sItem.find("(")+1:-1],sMain[1:])
ids = self.sock.execute(database, uid, self.password, sObject , 'search', [])
res = self.sock.execute(database, uid, self.password, sObject , 'read',[ids[0]])
self.win.setEditText("txtUName",res[0][sMain[sMain.rfind("/")+1:]])
except:
import traceback,sys
info = reduce(lambda x, y: x+y, traceback.format_exception(sys.exc_type, sys.exc_value, sys.exc_traceback))
self.logobj.log_write('Fields', LOG_ERROR, info)
self.win.setEditText("txtUName","TTT")
if self.bModify:
self.win.setEditText("txtUName",self.sGDisplayName)
def getRes(self, sock, sObject, sVar):
desktop=getDesktop()
doc =desktop.getCurrentComponent()
docinfo=doc.getDocumentInfo()
res = sock.execute(database, uid, self.password, sObject , 'fields_get')
key = res.keys()
key.sort()
myval=None
if not sVar.find("/")==-1:
myval=sVar[:sVar.find("/")]
else:
myval=sVar
if myval in key:
if (res[myval]['type'] in ['many2one']):
sObject = res[myval]['relation']
return self.getRes(sock,res[myval]['relation'], sVar[sVar.find("/")+1:])
else:
return sObject
def cmbVariable_selected(self, oItemEvent):
if self.count > 0 :
try:
desktop=getDesktop()
doc =desktop.getCurrentComponent()
docinfo=doc.getDocumentInfo()
self.win.removeListBoxItems("lstFields", 0, self.win.getListBoxItemCount("lstFields"))
self.aListFields=[]
tempItem = self.win.getComboBoxText("cmbVariable")
for var in self.aVariableList:
if var[:var.find("(")] == tempItem[:tempItem.find("(")]:
sItem=var
genTree(
sItem[sItem.find("(")+1:sItem.find(")")],
self.aListFields,
self.insField,
self.sMyHost,
2,
ending_excl=['one2many','many2one','many2many','reference'],
recur=['many2one']
)
except:
import traceback,sys
info = reduce(lambda x, y: x+y, traceback.format_exception(sys.exc_type, sys.exc_value, sys.exc_traceback))
self.logobj.log_write('Fields', LOG_ERROR, info)
def btnOk_clicked(self, oActionEvent):
desktop=getDesktop()
doc = desktop.getCurrentComponent()
cursor = doc.getCurrentController().getViewCursor()
for i in self.win.getListBoxSelectedItemsPos("lstFields"):
itemSelected = self.aListFields[i]
itemSelectedPos = i
txtUName=self.win.getEditText("txtUName")
sKey=u""+txtUName
if itemSelected != "" and txtUName != "" and self.bModify==True :
txtUName=self.sGDisplayName
sKey=u""+txtUName
txtUName=self.sGDisplayName
oCurObj=cursor.TextField
sObjName=self.insVariable.getText()
sObjName=sObjName[:sObjName.find("(")]
sValue=u"[[ " + sObjName + self.aListFields[itemSelectedPos].replace("/",".") + " ]]"
oCurObj.Items = (sKey,sValue)
oCurObj.update()
self.win.endExecute()
elif itemSelected != "" and txtUName != "" :
oInputList = doc.createInstance("com.sun.star.text.TextField.DropDown")
sObjName=self.win.getComboBoxText("cmbVariable")
sObjName=sObjName[:sObjName.find("(")]
widget = ( cursor.TextTable and cursor.TextTable.getCellByName( cursor.Cell.CellName ) or doc.Text )
sValue = u"[[ " + sObjName + self.aListFields[itemSelectedPos].replace("/",".") + " ]]"
oInputList.Items = (sKey,sValue)
widget.insertTextContent(cursor,oInputList,False)
self.win.endExecute()
else:
ErrorDialog("Please fill appropriate data in Name field \nor select particular value from the list of fields.")
def btnCancel_clicked(self, oActionEvent):
self.win.endExecute()
if __name__<>"package" and __name__=="__main__":
Fields()
elif __name__=="package":
g_ImplementationHelper.addImplementation( Fields, "org.openoffice.openerp.report.fields", ("com.sun.star.task.Job",),)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
fivejjs/python-linkedin | linkedin/utils.py | 7 | 2291 | # -*- coding: utf-8 -*-
import requests
from .exceptions import LinkedInError, get_exception_for_error_code
import sys
from io import StringIO
try:
import simplejson as json
except ImportError:
try:
from django.utils import simplejson as json
except ImportError:
import json
if sys.version_info < (3,):
import __builtin__
def to_utf8(x):
return __builtin__.unicode(x)
def to_string(x):
return str(x)
else:
def to_utf8(x):
return x
def to_string(x):
return x
def enum(enum_type='enum', base_classes=None, methods=None, **attrs):
"""
Generates a enumeration with the given attributes.
"""
# Enumerations can not be initalized as a new instance
def __init__(instance, *args, **kwargs):
raise RuntimeError('%s types can not be initialized.' % enum_type)
if base_classes is None:
base_classes = ()
if methods is None:
methods = {}
base_classes = base_classes + (object,)
for k, v in methods.items():
methods[k] = classmethod(v)
attrs['enums'] = attrs.copy()
methods.update(attrs)
methods['__init__'] = __init__
return type(to_string(enum_type), base_classes, methods)
def raise_for_error(response):
try:
response.raise_for_status()
except (requests.HTTPError, requests.ConnectionError) as error:
try:
if len(response.content) == 0:
# There is nothing we can do here since LinkedIn has neither sent
# us a 2xx response nor a response content.
return
response = response.json()
if ('error' in response) or ('errorCode' in response):
message = '%s: %s' % (response.get('error', str(error)),
response.get('message', 'Unknown Error'))
error_code = response.get('status')
ex = get_exception_for_error_code(error_code)
raise ex(message)
else:
raise LinkedInError(error.message)
except (ValueError, TypeError):
raise LinkedInError(error.message)
HTTP_METHODS = enum('HTTPMethod', GET='GET', POST='POST',
PUT='PUT', DELETE='DELETE', PATCH='PATCH')
| mit |
brianhouse/housepy | lib/beanstalkc.py | 1 | 11489 | #!/usr/bin/env python
"""beanstalkc - A beanstalkd Client Library for Python"""
import logging
import socket
import sys
__license__ = '''
Copyright (C) 2008-2015 Andreas Bolka
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
__version__ = '0.4.0'
DEFAULT_HOST = 'localhost'
DEFAULT_PORT = 11300
DEFAULT_PRIORITY = 2 ** 31
DEFAULT_TTR = 120
PY3 = sys.version_info[0] > 2
if PY3:
b = lambda x: isinstance(x, bytes) and x or bytes(x, 'us-ascii')
s = lambda x: x.decode('us-ascii')
else:
b = lambda x: x
s = lambda x: x
class BeanstalkcException(Exception): pass
class UnexpectedResponse(BeanstalkcException): pass
class CommandFailed(BeanstalkcException): pass
class DeadlineSoon(BeanstalkcException): pass
class SocketError(BeanstalkcException):
@staticmethod
def wrap(wrapped_function, *args, **kwargs):
try:
return wrapped_function(*args, **kwargs)
except socket.error:
err = sys.exc_info()[1]
raise SocketError(err)
class Connection(object):
def __init__(self, host=DEFAULT_HOST, port=DEFAULT_PORT, parse_yaml=True,
connect_timeout=socket.getdefaulttimeout(), encoding=sys.getdefaultencoding()):
if parse_yaml is True:
try:
parse_yaml = __import__('yaml').load
except ImportError:
logging.error('Failed to load PyYAML, will not parse YAML')
parse_yaml = False
self._connect_timeout = connect_timeout
self._parse_yaml = parse_yaml or (lambda x: x)
self._encoding = encoding
self.host = host
self.port = port
self.connect()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def connect(self):
"""Connect to beanstalkd server."""
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._socket.settimeout(self._connect_timeout)
SocketError.wrap(self._socket.connect, (self.host, self.port))
self._socket.settimeout(None)
self._socket_file = self._socket.makefile('rb')
def close(self):
"""Close connection to server."""
try:
self._socket.sendall(b('quit\r\n'))
except socket.error:
pass
try:
self._socket.close()
except socket.error:
pass
def reconnect(self):
"""Re-connect to server."""
self.close()
self.connect()
def _interact(self, command, expected_ok, expected_err=[]):
SocketError.wrap(self._socket.sendall, b(command))
status, results = self._read_response()
if status in expected_ok:
return results
elif status in expected_err:
raise CommandFailed(command.split()[0], status, results)
else:
raise UnexpectedResponse(command.split()[0], status, results)
def _read_response(self):
line = SocketError.wrap(self._socket_file.readline)
if not line:
raise SocketError()
response = s(line).split()
return response[0], response[1:]
def _read_body(self, size):
body = SocketError.wrap(self._socket_file.read, size)
SocketError.wrap(self._socket_file.read, 2) # trailing crlf
if size > 0 and not body:
raise SocketError()
# if PY3 and self._encoding: ## bh
# body = body.decode(self._encoding)
return body
def _interact_value(self, command, expected_ok, expected_err=[]):
return self._interact(command, expected_ok, expected_err)[0]
def _interact_job(self, command, expected_ok, expected_err, reserved=True):
jid, size = self._interact(command, expected_ok, expected_err)
body = self._read_body(int(size))
return Job(self, int(jid), body, reserved)
def _interact_yaml(self, command, expected_ok, expected_err=[]):
size, = self._interact(command, expected_ok, expected_err)
body = self._read_body(int(size))
return self._parse_yaml(body)
def _interact_peek(self, command):
try:
return self._interact_job(command, ['FOUND'], ['NOT_FOUND'], False)
except CommandFailed:
return None
# -- public interface --
def put(self, body, priority=DEFAULT_PRIORITY, delay=0, ttr=DEFAULT_TTR):
"""Put a job into the current tube. Returns job id."""
if not isinstance(body, str) and not isinstance(body, bytes):
raise ValueError('Job body must be a str or bytes instance')
if PY3 and isinstance(body, str):
if not self._encoding:
raise ValueError('Job body must be a bytes instance when no encoding is specified')
body = bytes(body, self._encoding)
jid = self._interact_value(b('put %d %d %d %d\r\n' % (priority, delay, ttr, len(body))) +
body + b('\r\n'),
['INSERTED'],
['JOB_TOO_BIG', 'BURIED', 'DRAINING'])
return int(jid)
def reserve(self, timeout=None):
"""Reserve a job from one of the watched tubes, with optional timeout
in seconds. Returns a Job object, or None if the request times out."""
if timeout is not None:
command = 'reserve-with-timeout %d\r\n' % timeout
else:
command = 'reserve\r\n'
try:
return self._interact_job(command,
['RESERVED'],
['DEADLINE_SOON', 'TIMED_OUT'])
except CommandFailed:
exc = sys.exc_info()[1]
_, status, results = exc.args
if status == 'TIMED_OUT':
return None
elif status == 'DEADLINE_SOON':
raise DeadlineSoon(results)
def kick(self, bound=1):
"""Kick at most bound jobs into the ready queue."""
return int(self._interact_value('kick %d\r\n' % bound, ['KICKED']))
def kick_job(self, jid):
"""Kick a specific job into the ready queue."""
self._interact('kick-job %d\r\n' % jid, ['KICKED'], ['NOT_FOUND'])
def peek(self, jid):
"""Peek at a job. Returns a Job, or None."""
return self._interact_peek('peek %d\r\n' % jid)
def peek_ready(self):
"""Peek at next ready job. Returns a Job, or None."""
return self._interact_peek('peek-ready\r\n')
def peek_delayed(self):
"""Peek at next delayed job. Returns a Job, or None."""
return self._interact_peek('peek-delayed\r\n')
def peek_buried(self):
"""Peek at next buried job. Returns a Job, or None."""
return self._interact_peek('peek-buried\r\n')
def tubes(self):
"""Return a list of all existing tubes."""
return self._interact_yaml('list-tubes\r\n', ['OK'])
def using(self):
"""Return the tube currently being used."""
return self._interact_value('list-tube-used\r\n', ['USING'])
def use(self, name):
"""Use a given tube."""
return self._interact_value('use %s\r\n' % name, ['USING'])
def watching(self):
"""Return a list of all tubes being watched."""
return self._interact_yaml('list-tubes-watched\r\n', ['OK'])
def watch(self, name):
"""Watch a given tube."""
return int(self._interact_value('watch %s\r\n' % name, ['WATCHING']))
def ignore(self, name):
"""Stop watching a given tube."""
try:
return int(self._interact_value('ignore %s\r\n' % name,
['WATCHING'],
['NOT_IGNORED']))
except CommandFailed:
return 1
def stats(self):
"""Return a dict of beanstalkd statistics."""
return self._interact_yaml('stats\r\n', ['OK'])
def stats_tube(self, name):
"""Return a dict of stats about a given tube."""
return self._interact_yaml('stats-tube %s\r\n' % name,
['OK'],
['NOT_FOUND'])
def pause_tube(self, name, delay):
"""Pause a tube for a given delay time, in seconds."""
self._interact('pause-tube %s %d\r\n' % (name, delay),
['PAUSED'],
['NOT_FOUND'])
# -- job interactors --
def delete(self, jid):
"""Delete a job, by job id."""
self._interact('delete %d\r\n' % jid, ['DELETED'], ['NOT_FOUND'])
def release(self, jid, priority=DEFAULT_PRIORITY, delay=0):
"""Release a reserved job back into the ready queue."""
self._interact('release %d %d %d\r\n' % (jid, priority, delay),
['RELEASED', 'BURIED'],
['NOT_FOUND'])
def bury(self, jid, priority=DEFAULT_PRIORITY):
"""Bury a job, by job id."""
self._interact('bury %d %d\r\n' % (jid, priority),
['BURIED'],
['NOT_FOUND'])
def touch(self, jid):
"""Touch a job, by job id, requesting more time to work on a reserved
job before it expires."""
self._interact('touch %d\r\n' % jid, ['TOUCHED'], ['NOT_FOUND'])
def stats_job(self, jid):
"""Return a dict of stats about a job, by job id."""
return self._interact_yaml('stats-job %d\r\n' % jid,
['OK'],
['NOT_FOUND'])
class Job(object):
def __init__(self, conn, jid, body, reserved=True):
self.conn = conn
self.jid = jid
self.body = body
self.reserved = reserved
def _priority(self):
stats = self.stats()
if isinstance(stats, dict):
return stats['pri']
return DEFAULT_PRIORITY
# -- public interface --
def delete(self):
"""Delete this job."""
self.conn.delete(self.jid)
self.reserved = False
def release(self, priority=None, delay=0):
"""Release this job back into the ready queue."""
if self.reserved:
self.conn.release(self.jid, priority or self._priority(), delay)
self.reserved = False
def bury(self, priority=None):
"""Bury this job."""
if self.reserved:
self.conn.bury(self.jid, priority or self._priority())
self.reserved = False
def kick(self):
"""Kick this job alive."""
self.conn.kick_job(self.jid)
def touch(self):
"""Touch this reserved job, requesting more time to work on it before
it expires."""
if self.reserved:
self.conn.touch(self.jid)
def stats(self):
"""Return a dict of stats about this job."""
return self.conn.stats_job(self.jid)
if __name__ == '__main__':
import nose
nose.main(argv=['nosetests', '-c', '.nose.cfg']) | mit |
zuotingbing/spark | python/pyspark/ml/param/_shared_params_code_gen.py | 8 | 9216 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
header = """#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#"""
# Code generator for shared params (shared.py). Run under this folder with:
# python _shared_params_code_gen.py > shared.py
def _gen_param_header(name, doc, defaultValueStr, typeConverter):
"""
Generates the header part for shared variables
:param name: param name
:param doc: param doc
"""
template = '''class Has$Name(Params):
"""
Mixin for param $name: $doc
"""
$name = Param(Params._dummy(), "$name", "$doc", typeConverter=$typeConverter)
def __init__(self):
super(Has$Name, self).__init__()'''
if defaultValueStr is not None:
template += '''
self._setDefault($name=$defaultValueStr)'''
Name = name[0].upper() + name[1:]
if typeConverter is None:
typeConverter = str(None)
return template \
.replace("$name", name) \
.replace("$Name", Name) \
.replace("$doc", doc) \
.replace("$defaultValueStr", str(defaultValueStr)) \
.replace("$typeConverter", typeConverter)
def _gen_param_code(name, doc, defaultValueStr):
"""
Generates Python code for a shared param class.
:param name: param name
:param doc: param doc
:param defaultValueStr: string representation of the default value
:return: code string
"""
# TODO: How to correctly inherit instance attributes?
template = '''
def get$Name(self):
"""
Gets the value of $name or its default value.
"""
return self.getOrDefault(self.$name)'''
Name = name[0].upper() + name[1:]
return template \
.replace("$name", name) \
.replace("$Name", Name) \
.replace("$doc", doc) \
.replace("$defaultValueStr", str(defaultValueStr))
if __name__ == "__main__":
print(header)
print("\n# DO NOT MODIFY THIS FILE! It was generated by _shared_params_code_gen.py.\n")
print("from pyspark.ml.param import *\n\n")
shared = [
("maxIter", "max number of iterations (>= 0).", None, "TypeConverters.toInt"),
("regParam", "regularization parameter (>= 0).", None, "TypeConverters.toFloat"),
("featuresCol", "features column name.", "'features'", "TypeConverters.toString"),
("labelCol", "label column name.", "'label'", "TypeConverters.toString"),
("predictionCol", "prediction column name.", "'prediction'", "TypeConverters.toString"),
("probabilityCol", "Column name for predicted class conditional probabilities. " +
"Note: Not all models output well-calibrated probability estimates! These probabilities " +
"should be treated as confidences, not precise probabilities.", "'probability'",
"TypeConverters.toString"),
("rawPredictionCol", "raw prediction (a.k.a. confidence) column name.", "'rawPrediction'",
"TypeConverters.toString"),
("inputCol", "input column name.", None, "TypeConverters.toString"),
("inputCols", "input column names.", None, "TypeConverters.toListString"),
("outputCol", "output column name.", "self.uid + '__output'", "TypeConverters.toString"),
("outputCols", "output column names.", None, "TypeConverters.toListString"),
("numFeatures", "Number of features. Should be greater than 0.", "262144",
"TypeConverters.toInt"),
("checkpointInterval", "set checkpoint interval (>= 1) or disable checkpoint (-1). " +
"E.g. 10 means that the cache will get checkpointed every 10 iterations. Note: " +
"this setting will be ignored if the checkpoint directory is not set in the SparkContext.",
None, "TypeConverters.toInt"),
("seed", "random seed.", "hash(type(self).__name__)", "TypeConverters.toInt"),
("tol", "the convergence tolerance for iterative algorithms (>= 0).", None,
"TypeConverters.toFloat"),
("relativeError", "the relative target precision for the approximate quantile " +
"algorithm. Must be in the range [0, 1]", "0.001", "TypeConverters.toFloat"),
("stepSize", "Step size to be used for each iteration of optimization (>= 0).", None,
"TypeConverters.toFloat"),
("handleInvalid", "how to handle invalid entries. Options are skip (which will filter " +
"out rows with bad values), or error (which will throw an error). More options may be " +
"added later.", None, "TypeConverters.toString"),
("elasticNetParam", "the ElasticNet mixing parameter, in range [0, 1]. For alpha = 0, " +
"the penalty is an L2 penalty. For alpha = 1, it is an L1 penalty.", "0.0",
"TypeConverters.toFloat"),
("fitIntercept", "whether to fit an intercept term.", "True", "TypeConverters.toBoolean"),
("standardization", "whether to standardize the training features before fitting the " +
"model.", "True", "TypeConverters.toBoolean"),
("thresholds", "Thresholds in multi-class classification to adjust the probability of " +
"predicting each class. Array must have length equal to the number of classes, with " +
"values > 0, excepting that at most one value may be 0. " +
"The class with largest value p/t is predicted, where p is the original " +
"probability of that class and t is the class's threshold.", None,
"TypeConverters.toListFloat"),
("threshold", "threshold in binary classification prediction, in range [0, 1]",
"0.5", "TypeConverters.toFloat"),
("weightCol", "weight column name. If this is not set or empty, we treat " +
"all instance weights as 1.0.", None, "TypeConverters.toString"),
("solver", "the solver algorithm for optimization. If this is not set or empty, " +
"default value is 'auto'.", "'auto'", "TypeConverters.toString"),
("varianceCol", "column name for the biased sample variance of prediction.",
None, "TypeConverters.toString"),
("aggregationDepth", "suggested depth for treeAggregate (>= 2).", "2",
"TypeConverters.toInt"),
("parallelism", "the number of threads to use when running parallel algorithms (>= 1).",
"1", "TypeConverters.toInt"),
("collectSubModels", "Param for whether to collect a list of sub-models trained during " +
"tuning. If set to false, then only the single best sub-model will be available after " +
"fitting. If set to true, then all sub-models will be available. Warning: For large " +
"models, collecting all sub-models can cause OOMs on the Spark driver.",
"False", "TypeConverters.toBoolean"),
("loss", "the loss function to be optimized.", None, "TypeConverters.toString"),
("distanceMeasure", "the distance measure. Supported options: 'euclidean' and 'cosine'.",
"'euclidean'", "TypeConverters.toString"),
("validationIndicatorCol", "name of the column that indicates whether each row is for " +
"training or for validation. False indicates training; true indicates validation.",
None, "TypeConverters.toString"),
("blockSize", "block size for stacking input data in matrices. Data is stacked within "
"partitions. If block size is more than remaining data in a partition then it is "
"adjusted to the size of this data.", None, "TypeConverters.toInt")]
code = []
for name, doc, defaultValueStr, typeConverter in shared:
param_code = _gen_param_header(name, doc, defaultValueStr, typeConverter)
code.append(param_code + "\n" + _gen_param_code(name, doc, defaultValueStr))
print("\n\n\n".join(code))
| apache-2.0 |
MrLoick/python-for-android | python3-alpha/python3-src/Lib/encodings/undefined.py | 860 | 1299 | """ Python 'undefined' Codec
This codec will always raise a ValueError exception when being
used. It is intended for use by the site.py file to switch off
automatic string to Unicode coercion.
Written by Marc-Andre Lemburg ([email protected]).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
"""
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
raise UnicodeError("undefined encoding")
def decode(self,input,errors='strict'):
raise UnicodeError("undefined encoding")
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
raise UnicodeError("undefined encoding")
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
raise UnicodeError("undefined encoding")
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='undefined',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamwriter=StreamWriter,
streamreader=StreamReader,
)
| apache-2.0 |
belmiromoreira/nova | nova/cells/weights/__init__.py | 141 | 1284 | # Copyright (c) 2012-2013 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Cell Scheduler weights
"""
from nova import weights
class WeightedCell(weights.WeighedObject):
def __repr__(self):
return "WeightedCell [cell: %s, weight: %s]" % (
self.obj.name, self.weight)
class BaseCellWeigher(weights.BaseWeigher):
"""Base class for cell weights."""
pass
class CellWeightHandler(weights.BaseWeightHandler):
object_class = WeightedCell
def __init__(self):
super(CellWeightHandler, self).__init__(BaseCellWeigher)
def all_weighers():
"""Return a list of weight plugin classes found in this directory."""
return CellWeightHandler().get_all_classes()
| apache-2.0 |
scripnichenko/nova | nova/tests/functional/api_sample_tests/test_cells.py | 17 | 4818 | # Copyright 2012 Nebula, Inc.
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from six.moves import range
from nova.cells import rpcapi as cells_rpcapi
from nova.cells import state
from nova import db
from nova.db.sqlalchemy import models
from nova import exception
from nova.tests.functional.api_sample_tests import api_sample_base
CONF = cfg.CONF
CONF.import_opt('osapi_compute_extension',
'nova.api.openstack.compute.legacy_v2.extensions')
class CellsSampleJsonTest(api_sample_base.ApiSampleTestBaseV21):
extension_name = "os-cells"
def _get_flags(self):
f = super(CellsSampleJsonTest, self)._get_flags()
f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
f['osapi_compute_extension'].append(
'nova.api.openstack.compute.contrib.cells.Cells')
f['osapi_compute_extension'].append('nova.api.openstack.compute.'
'contrib.cell_capacities.Cell_capacities')
return f
def setUp(self):
# db_check_interval < 0 makes cells manager always hit the DB
self.flags(enable=True, db_check_interval=-1, group='cells')
super(CellsSampleJsonTest, self).setUp()
self._stub_cells()
def _stub_cells(self, num_cells=5):
self.cell_list = []
self.cells_next_id = 1
def _fake_cell_get_all(context):
return self.cell_list
def _fake_cell_get(inst, context, cell_name):
for cell in self.cell_list:
if cell['name'] == cell_name:
return cell
raise exception.CellNotFound(cell_name=cell_name)
for x in range(num_cells):
cell = models.Cell()
our_id = self.cells_next_id
self.cells_next_id += 1
cell.update({'id': our_id,
'name': 'cell%s' % our_id,
'transport_url': 'rabbit://username%s@/' % our_id,
'is_parent': our_id % 2 == 0})
self.cell_list.append(cell)
self.stubs.Set(db, 'cell_get_all', _fake_cell_get_all)
self.stubs.Set(cells_rpcapi.CellsAPI, 'cell_get', _fake_cell_get)
def test_cells_empty_list(self):
# Override this
self._stub_cells(num_cells=0)
response = self._do_get('os-cells')
subs = self._get_regexes()
self._verify_response('cells-list-empty-resp', subs, response, 200)
def test_cells_list(self):
response = self._do_get('os-cells')
subs = self._get_regexes()
self._verify_response('cells-list-resp', subs, response, 200)
def test_cells_get(self):
response = self._do_get('os-cells/cell3')
subs = self._get_regexes()
self._verify_response('cells-get-resp', subs, response, 200)
def test_get_cell_capacity(self):
self._mock_cell_capacity()
state_manager = state.CellStateManager()
my_state = state_manager.get_my_state()
response = self._do_get('os-cells/%s/capacities' %
my_state.name)
subs = self._get_regexes()
return self._verify_response('cells-capacities-resp',
subs, response, 200)
def test_get_all_cells_capacity(self):
self._mock_cell_capacity()
response = self._do_get('os-cells/capacities')
subs = self._get_regexes()
return self._verify_response('cells-capacities-resp',
subs, response, 200)
def _mock_cell_capacity(self):
self.mox.StubOutWithMock(self.cells.manager.state_manager,
'get_our_capacities')
response = {"ram_free":
{"units_by_mb": {"8192": 0, "512": 13,
"4096": 1, "2048": 3, "16384": 0},
"total_mb": 7680},
"disk_free":
{"units_by_mb": {"81920": 11, "20480": 46,
"40960": 23, "163840": 5, "0": 0},
"total_mb": 1052672}
}
self.cells.manager.state_manager.get_our_capacities(). \
AndReturn(response)
self.mox.ReplayAll()
| apache-2.0 |
LabAdvComp/tukey_middleware | tukey_middleware/couch.py | 1 | 2583 | # Copyright 2013 Open Cloud Consortium
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
''' Couchdb wrapper wrapper '''
import couchdb
import json
import requests
from .flask_utils import NotFound
class Couch(object):
'''Implementation of persistant keyvalue store interface with unique
id generation'''
def __init__(self, db_name=None, url=None):
'''url is the url of couchdb like: 'http://localhost:5984/'. Default
is 'http://localhost:5984/' '''
self.url = 'http://localhost:5984'
if url:
self.couch = couchdb.Server(url=url)
self.url = url
else:
self.couch = couchdb.Server()
self.db_name = db_name
if self.db_name and self.db_name not in self.couch:
raise NotFound("db %s does not exist" % self.db_name)
def new_id(self):
''' Generate and return new id '''
return self.couch.uuids()[0]
def _as_documents(self, text):
'''Deserialize and return the documents'''
return [item["doc"] for item in json.loads(text)["rows"]]
def __getitem__(self, key):
''' Provides dict interface get '''
if type(key) is list:
resp = requests.post("%s/%s/_all_docs?include_docs=true" % (
self.url, self.db_name), data=json.dumps({"keys": key}))
return self._as_documents(resp.text)
else:
return self.couch[self.db_name][key]
def __setitem__(self, key, value):
''' Provides dict interface set '''
self.couch[self.db_name][key] = value
def save(self, doc):
''' Wrapper to couchdb.save() '''
return self.couch[self.db_name].save(doc)[0]
def raw_db(self):
''' Access the raw couchdb.Server methods '''
return self.couch[self.db_name]
def list_all(self):
''' List all databases stored in this CouchDB server'''
resp = requests.get("%s/%s/_all_docs?include_docs=true" % (self.url,
self.db_name))
return self._as_documents(resp.text)
| apache-2.0 |
jonashaag/django-nonrel-nohistory | tests/regressiontests/utils/feedgenerator.py | 51 | 2526 | import datetime
from django.utils import feedgenerator, tzinfo, unittest
class FeedgeneratorTest(unittest.TestCase):
"""
Tests for the low-level syndication feed framework.
"""
def test_get_tag_uri(self):
"""
Test get_tag_uri() correctly generates TagURIs.
"""
self.assertEqual(
feedgenerator.get_tag_uri('http://example.org/foo/bar#headline', datetime.date(2004, 10, 25)),
u'tag:example.org,2004-10-25:/foo/bar/headline')
def test_get_tag_uri_with_port(self):
"""
Test that get_tag_uri() correctly generates TagURIs from URLs with port
numbers.
"""
self.assertEqual(
feedgenerator.get_tag_uri('http://www.example.org:8000/2008/11/14/django#headline', datetime.datetime(2008, 11, 14, 13, 37, 0)),
u'tag:www.example.org,2008-11-14:/2008/11/14/django/headline')
def test_rfc2822_date(self):
"""
Test rfc2822_date() correctly formats datetime objects.
"""
self.assertEqual(
feedgenerator.rfc2822_date(datetime.datetime(2008, 11, 14, 13, 37, 0)),
"Fri, 14 Nov 2008 13:37:00 -0000"
)
def test_rfc2822_date_with_timezone(self):
"""
Test rfc2822_date() correctly formats datetime objects with tzinfo.
"""
self.assertEqual(
feedgenerator.rfc2822_date(datetime.datetime(2008, 11, 14, 13, 37, 0, tzinfo=tzinfo.FixedOffset(datetime.timedelta(minutes=60)))),
"Fri, 14 Nov 2008 13:37:00 +0100"
)
def test_rfc3339_date(self):
"""
Test rfc3339_date() correctly formats datetime objects.
"""
self.assertEqual(
feedgenerator.rfc3339_date(datetime.datetime(2008, 11, 14, 13, 37, 0)),
"2008-11-14T13:37:00Z"
)
def test_rfc3339_date_with_timezone(self):
"""
Test rfc3339_date() correctly formats datetime objects with tzinfo.
"""
self.assertEqual(
feedgenerator.rfc3339_date(datetime.datetime(2008, 11, 14, 13, 37, 0, tzinfo=tzinfo.FixedOffset(datetime.timedelta(minutes=120)))),
"2008-11-14T13:37:00+02:00"
)
def test_atom1_mime_type(self):
"""
Test to make sure Atom MIME type has UTF8 Charset parameter set
"""
atom_feed = feedgenerator.Atom1Feed("title", "link", "description")
self.assertEqual(
atom_feed.mime_type, "application/atom+xml; charset=utf8"
)
| bsd-3-clause |
campbe13/openhatch | vendor/packages/Django/tests/regressiontests/inline_formsets/tests.py | 51 | 6211 | from __future__ import absolute_import, unicode_literals
from django.forms.models import inlineformset_factory
from django.test import TestCase
from django.utils import six
from .models import Poet, Poem, School, Parent, Child
class DeletionTests(TestCase):
def test_deletion(self):
PoemFormSet = inlineformset_factory(Poet, Poem, can_delete=True)
poet = Poet.objects.create(name='test')
poem = poet.poem_set.create(name='test poem')
data = {
'poem_set-TOTAL_FORMS': '1',
'poem_set-INITIAL_FORMS': '1',
'poem_set-MAX_NUM_FORMS': '0',
'poem_set-0-id': str(poem.pk),
'poem_set-0-poet': str(poet.pk),
'poem_set-0-name': 'test',
'poem_set-0-DELETE': 'on',
}
formset = PoemFormSet(data, instance=poet)
formset.save()
self.assertTrue(formset.is_valid())
self.assertEqual(Poem.objects.count(), 0)
def test_add_form_deletion_when_invalid(self):
"""
Make sure that an add form that is filled out, but marked for deletion
doesn't cause validation errors.
"""
PoemFormSet = inlineformset_factory(Poet, Poem, can_delete=True)
poet = Poet.objects.create(name='test')
data = {
'poem_set-TOTAL_FORMS': '1',
'poem_set-INITIAL_FORMS': '0',
'poem_set-MAX_NUM_FORMS': '0',
'poem_set-0-id': '',
'poem_set-0-poem': '1',
'poem_set-0-name': 'x' * 1000,
}
formset = PoemFormSet(data, instance=poet)
# Make sure this form doesn't pass validation.
self.assertEqual(formset.is_valid(), False)
self.assertEqual(Poem.objects.count(), 0)
# Then make sure that it *does* pass validation and delete the object,
# even though the data isn't actually valid.
data['poem_set-0-DELETE'] = 'on'
formset = PoemFormSet(data, instance=poet)
self.assertEqual(formset.is_valid(), True)
formset.save()
self.assertEqual(Poem.objects.count(), 0)
def test_change_form_deletion_when_invalid(self):
"""
Make sure that a change form that is filled out, but marked for deletion
doesn't cause validation errors.
"""
PoemFormSet = inlineformset_factory(Poet, Poem, can_delete=True)
poet = Poet.objects.create(name='test')
poem = poet.poem_set.create(name='test poem')
data = {
'poem_set-TOTAL_FORMS': '1',
'poem_set-INITIAL_FORMS': '1',
'poem_set-MAX_NUM_FORMS': '0',
'poem_set-0-id': six.text_type(poem.id),
'poem_set-0-poem': six.text_type(poem.id),
'poem_set-0-name': 'x' * 1000,
}
formset = PoemFormSet(data, instance=poet)
# Make sure this form doesn't pass validation.
self.assertEqual(formset.is_valid(), False)
self.assertEqual(Poem.objects.count(), 1)
# Then make sure that it *does* pass validation and delete the object,
# even though the data isn't actually valid.
data['poem_set-0-DELETE'] = 'on'
formset = PoemFormSet(data, instance=poet)
self.assertEqual(formset.is_valid(), True)
formset.save()
self.assertEqual(Poem.objects.count(), 0)
def test_save_new(self):
"""
Make sure inlineformsets respect commit=False
regression for #10750
"""
# exclude some required field from the forms
ChildFormSet = inlineformset_factory(School, Child, exclude=['father', 'mother'])
school = School.objects.create(name='test')
mother = Parent.objects.create(name='mother')
father = Parent.objects.create(name='father')
data = {
'child_set-TOTAL_FORMS': '1',
'child_set-INITIAL_FORMS': '0',
'child_set-MAX_NUM_FORMS': '0',
'child_set-0-name': 'child',
}
formset = ChildFormSet(data, instance=school)
self.assertEqual(formset.is_valid(), True)
objects = formset.save(commit=False)
for obj in objects:
obj.mother = mother
obj.father = father
obj.save()
self.assertEqual(school.child_set.count(), 1)
class InlineFormsetFactoryTest(TestCase):
def test_inline_formset_factory(self):
"""
These should both work without a problem.
"""
inlineformset_factory(Parent, Child, fk_name='mother')
inlineformset_factory(Parent, Child, fk_name='father')
def test_exception_on_unspecified_foreign_key(self):
"""
Child has two ForeignKeys to Parent, so if we don't specify which one
to use for the inline formset, we should get an exception.
"""
six.assertRaisesRegex(self, Exception,
"<class 'regressiontests.inline_formsets.models.Child'> has more than 1 ForeignKey to <class 'regressiontests.inline_formsets.models.Parent'>",
inlineformset_factory, Parent, Child
)
def test_fk_name_not_foreign_key_field_from_child(self):
"""
If we specify fk_name, but it isn't a ForeignKey from the child model
to the parent model, we should get an exception.
"""
self.assertRaises(Exception,
"fk_name 'school' is not a ForeignKey to <class 'regressiontests.inline_formsets.models.Parent'>",
inlineformset_factory, Parent, Child, fk_name='school'
)
def test_non_foreign_key_field(self):
"""
If the field specified in fk_name is not a ForeignKey, we should get an
exception.
"""
six.assertRaisesRegex(self, Exception,
"<class 'regressiontests.inline_formsets.models.Child'> has no field named 'test'",
inlineformset_factory, Parent, Child, fk_name='test'
)
def test_any_iterable_allowed_as_argument_to_exclude(self):
# Regression test for #9171.
inlineformset_factory(
Parent, Child, exclude=['school'], fk_name='mother'
)
inlineformset_factory(
Parent, Child, exclude=('school',), fk_name='mother'
)
| agpl-3.0 |
Vicente-Cheng/ceph-deploy | ceph_deploy/install.py | 1 | 19366 | import argparse
import logging
import os
from ceph_deploy import hosts
from ceph_deploy.cliutil import priority
from ceph_deploy.lib import remoto
from ceph_deploy.util.constants import default_components
from ceph_deploy.util.paths import gpg
LOG = logging.getLogger(__name__)
def sanitize_args(args):
"""
args may need a bunch of logic to set proper defaults that argparse is
not well suited for.
"""
if args.release is None:
args.release = 'jewel'
args.default_release = True
# XXX This whole dance is because --stable is getting deprecated
if args.stable is not None:
LOG.warning('the --stable flag is deprecated, use --release instead')
args.release = args.stable
# XXX Tango ends here.
return args
def detect_components(args, distro):
"""
Since the package split, now there are various different Ceph components to
install like:
* ceph
* ceph-mon
* ceph-osd
* ceph-mds
This helper function should parse the args that may contain specifics about
these flags and return the default if none are passed in (which is, install
everything)
"""
# the flag that prevents all logic here is the `--repo` flag which is used
# when no packages should be installed, just the repo files, so check for
# that here and return an empty list (which is equivalent to say 'no
# packages should be installed')
if args.repo:
return []
flags = {
'install_osd': 'ceph-osd',
'install_rgw': 'ceph-radosgw',
'install_mds': 'ceph-mds',
'install_mon': 'ceph-mon',
'install_common': 'ceph-common',
'install_tests': 'ceph-test',
}
if distro.is_rpm:
defaults = default_components.rpm
else:
defaults = default_components.deb
# different naming convention for deb than rpm for radosgw
flags['install_rgw'] = 'radosgw'
if args.install_all:
return defaults
else:
components = []
for k, v in flags.items():
if getattr(args, k, False):
components.append(v)
# if we have some components selected from flags then return that,
# otherwise return defaults because no flags and no `--repo` means we
# should get all of them by default
return components or defaults
def install(args):
args = sanitize_args(args)
if args.repo:
return install_repo(args)
if args.version_kind == 'stable':
version = args.release
else:
version = getattr(args, args.version_kind)
version_str = args.version_kind
if version:
version_str += ' version {version}'.format(version=version)
LOG.debug(
'Installing %s on cluster %s hosts %s',
version_str,
args.cluster,
' '.join(args.host),
)
for hostname in args.host:
LOG.debug('Detecting platform for host %s ...', hostname)
distro = hosts.get(
hostname,
username=args.username,
# XXX this should get removed once ceph packages are split for
# upstream. If default_release is True, it means that the user is
# trying to install on a RHEL machine and should expect to get RHEL
# packages. Otherwise, it will need to specify either a specific
# version, or repo, or a development branch. Other distro users
# should not see any differences.
use_rhceph=args.default_release,
)
LOG.info(
'Distro info: %s %s %s',
distro.name,
distro.release,
distro.codename
)
components = detect_components(args, distro)
if distro.init == 'sysvinit' and args.cluster != 'ceph':
LOG.error('refusing to install on host: %s, with custom cluster name: %s' % (
hostname,
args.cluster,
)
)
LOG.error('custom cluster names are not supported on sysvinit hosts')
continue
rlogger = logging.getLogger(hostname)
rlogger.info('installing Ceph on %s' % hostname)
cd_conf = getattr(args, 'cd_conf', None)
# custom repo arguments
repo_url = os.environ.get('CEPH_DEPLOY_REPO_URL') or args.repo_url
gpg_url = os.environ.get('CEPH_DEPLOY_GPG_URL') or args.gpg_url
gpg_fallback = gpg.url('release')
if gpg_url is None and repo_url:
LOG.warning('--gpg-url was not used, will fallback')
LOG.warning('using GPG fallback: %s', gpg_fallback)
gpg_url = gpg_fallback
if args.local_mirror:
if args.username:
hostname = "%s@%s" % (args.username, hostname)
remoto.rsync(hostname, args.local_mirror, '/opt/ceph-deploy/repo', distro.conn.logger, sudo=True)
repo_url = 'file:///opt/ceph-deploy/repo'
gpg_url = 'file:///opt/ceph-deploy/repo/release.asc'
if repo_url: # triggers using a custom repository
# the user used a custom repo url, this should override anything
# we can detect from the configuration, so warn about it
if cd_conf:
if cd_conf.get_default_repo():
rlogger.warning('a default repo was found but it was \
overridden on the CLI')
if args.release in cd_conf.get_repos():
rlogger.warning('a custom repo was found but it was \
overridden on the CLI')
rlogger.info('using custom repository location: %s', repo_url)
distro.mirror_install(
distro,
repo_url,
gpg_url,
args.adjust_repos,
components=components,
)
# Detect and install custom repos here if needed
elif should_use_custom_repo(args, cd_conf, repo_url):
LOG.info('detected valid custom repositories from config file')
custom_repo(distro, args, cd_conf, rlogger)
else: # otherwise a normal installation
distro.install(
distro,
args.version_kind,
version,
args.adjust_repos,
components=components,
)
# Check the ceph version we just installed
hosts.common.ceph_version(distro.conn)
distro.conn.exit()
def should_use_custom_repo(args, cd_conf, repo_url):
"""
A boolean to determine the logic needed to proceed with a custom repo
installation instead of cramming everything nect to the logic operator.
"""
if repo_url:
# repo_url signals a CLI override, return False immediately
return False
if cd_conf:
if cd_conf.has_repos:
has_valid_release = args.release in cd_conf.get_repos()
has_default_repo = cd_conf.get_default_repo()
if has_valid_release or has_default_repo:
return True
return False
def custom_repo(distro, args, cd_conf, rlogger, install_ceph=None):
"""
A custom repo install helper that will go through config checks to retrieve
repos (and any extra repos defined) and install those
``cd_conf`` is the object built from argparse that holds the flags and
information needed to determine what metadata from the configuration to be
used.
"""
default_repo = cd_conf.get_default_repo()
components = detect_components(args, distro)
if args.release in cd_conf.get_repos():
LOG.info('will use repository from conf: %s' % args.release)
default_repo = args.release
elif default_repo:
LOG.info('will use default repository: %s' % default_repo)
# At this point we know there is a cd_conf and that it has custom
# repos make sure we were able to detect and actual repo
if not default_repo:
LOG.warning('a ceph-deploy config was found with repos \
but could not default to one')
else:
options = dict(cd_conf.items(default_repo))
options['install_ceph'] = False if install_ceph is False else True
extra_repos = cd_conf.get_list(default_repo, 'extra-repos')
rlogger.info('adding custom repository file')
try:
distro.repo_install(
distro,
default_repo,
options.pop('baseurl'),
options.pop('gpgkey'),
components=components,
**options
)
except KeyError as err:
raise RuntimeError('missing required key: %s in config section: %s' % (err, default_repo))
for xrepo in extra_repos:
rlogger.info('adding extra repo file: %s.repo' % xrepo)
options = dict(cd_conf.items(xrepo))
try:
distro.repo_install(
distro,
xrepo,
options.pop('baseurl'),
options.pop('gpgkey'),
components=components,
**options
)
except KeyError as err:
raise RuntimeError('missing required key: %s in config section: %s' % (err, xrepo))
def install_repo(args):
"""
For a user that only wants to install the repository only (and avoid
installing Ceph and its dependencies).
"""
cd_conf = getattr(args, 'cd_conf', None)
for hostname in args.host:
LOG.debug('Detecting platform for host %s ...', hostname)
distro = hosts.get(
hostname,
username=args.username,
# XXX this should get removed once Ceph packages are split for
# upstream. If default_release is True, it means that the user is
# trying to install on a RHEL machine and should expect to get RHEL
# packages. Otherwise, it will need to specify either a specific
# version, or repo, or a development branch. Other distro users should
# not see any differences.
use_rhceph=args.default_release,
)
rlogger = logging.getLogger(hostname)
LOG.info(
'Distro info: %s %s %s',
distro.name,
distro.release,
distro.codename
)
custom_repo(distro, args, cd_conf, rlogger, install_ceph=False)
def remove(args, purge):
LOG.info('note that some dependencies *will not* be removed because they can cause issues with qemu-kvm')
LOG.info('like: librbd1 and librados2')
remove_action = 'Uninstalling'
if purge:
remove_action = 'Purging'
LOG.debug(
'%s on cluster %s hosts %s',
remove_action,
args.cluster,
' '.join(args.host),
)
for hostname in args.host:
LOG.debug('Detecting platform for host %s ...', hostname)
distro = hosts.get(
hostname,
username=args.username,
use_rhceph=True)
LOG.info('Distro info: %s %s %s', distro.name, distro.release, distro.codename)
rlogger = logging.getLogger(hostname)
rlogger.info('%s Ceph on %s' % (remove_action, hostname))
distro.uninstall(distro, purge=purge)
distro.conn.exit()
def uninstall(args):
remove(args, False)
def purge(args):
remove(args, True)
def purgedata(args):
LOG.debug(
'Purging data from cluster %s hosts %s',
args.cluster,
' '.join(args.host),
)
installed_hosts = []
for hostname in args.host:
distro = hosts.get(hostname, username=args.username)
ceph_is_installed = distro.conn.remote_module.which('ceph')
if ceph_is_installed:
installed_hosts.append(hostname)
distro.conn.exit()
if installed_hosts:
LOG.error("Ceph is still installed on: %s", installed_hosts)
raise RuntimeError("refusing to purge data while Ceph is still installed")
for hostname in args.host:
distro = hosts.get(hostname, username=args.username)
LOG.info(
'Distro info: %s %s %s',
distro.name,
distro.release,
distro.codename
)
rlogger = logging.getLogger(hostname)
rlogger.info('purging data on %s' % hostname)
# Try to remove the contents of /var/lib/ceph first, don't worry
# about errors here, we deal with them later on
remoto.process.check(
distro.conn,
[
'rm', '-rf', '--one-file-system', '--', '/var/lib/ceph',
]
)
# If we failed in the previous call, then we probably have OSDs
# still mounted, so we unmount them here
if distro.conn.remote_module.path_exists('/var/lib/ceph'):
rlogger.warning(
'OSDs may still be mounted, trying to unmount them'
)
remoto.process.run(
distro.conn,
[
'find', '/var/lib/ceph',
'-mindepth', '1',
'-maxdepth', '2',
'-type', 'd',
'-exec', 'umount', '{}', ';',
]
)
# And now we try again to remove the contents, since OSDs should be
# unmounted, but this time we do check for errors
remoto.process.run(
distro.conn,
[
'rm', '-rf', '--one-file-system', '--', '/var/lib/ceph',
]
)
remoto.process.run(
distro.conn,
[
'rm', '-rf', '--one-file-system', '--', '/etc/ceph/',
]
)
distro.conn.exit()
class StoreVersion(argparse.Action):
"""
Like ``"store"`` but also remember which one of the exclusive
options was set.
There are three kinds of versions: stable, testing and dev.
This sets ``version_kind`` to be the right one of the above.
This kludge essentially lets us differentiate explicitly set
values from defaults.
"""
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, values)
if self.dest == 'release':
self.dest = 'stable'
namespace.version_kind = self.dest
@priority(20)
def make(parser):
"""
Install Ceph packages on remote hosts.
"""
version = parser.add_mutually_exclusive_group()
# XXX deprecated in favor of release
version.add_argument(
'--stable',
nargs='?',
action=StoreVersion,
metavar='CODENAME',
help='[DEPRECATED] install a release known as CODENAME\
(done by default) (default: %(default)s)',
)
version.add_argument(
'--release',
nargs='?',
action=StoreVersion,
metavar='CODENAME',
help='install a release known as CODENAME\
(done by default) (default: %(default)s)',
)
version.add_argument(
'--testing',
nargs=0,
action=StoreVersion,
help='install the latest development release',
)
version.add_argument(
'--dev',
nargs='?',
action=StoreVersion,
const='master',
metavar='BRANCH_OR_TAG',
help='install a bleeding edge build from Git branch\
or tag (default: %(default)s)',
)
version.add_argument(
'--dev-commit',
nargs='?',
action=StoreVersion,
metavar='COMMIT',
help='install a bleeding edge build from Git commit',
)
version.set_defaults(
stable=None, # XXX deprecated in favor of release
release=None, # Set the default release in sanitize_args()
dev='master',
version_kind='stable',
)
parser.add_argument(
'--mon',
dest='install_mon',
action='store_true',
help='install the mon component only',
)
parser.add_argument(
'--mds',
dest='install_mds',
action='store_true',
help='install the mds component only',
)
parser.add_argument(
'--rgw',
dest='install_rgw',
action='store_true',
help='install the rgw component only',
)
parser.add_argument(
'--osd',
dest='install_osd',
action='store_true',
help='install the osd component only',
)
parser.add_argument(
'--tests',
dest='install_tests',
action='store_true',
help='install the testing components',
)
parser.add_argument(
'--cli', '--common',
dest='install_common',
action='store_true',
help='install the common component only',
)
parser.add_argument(
'--all',
dest='install_all',
action='store_true',
help='install all Ceph components (mon, osd, mds, rgw) except tests. This is the default',
)
repo = parser.add_mutually_exclusive_group()
repo.add_argument(
'--adjust-repos',
dest='adjust_repos',
action='store_true',
help='install packages modifying source repos',
)
repo.add_argument(
'--no-adjust-repos',
dest='adjust_repos',
action='store_false',
help='install packages without modifying source repos',
)
repo.add_argument(
'--repo',
action='store_true',
help='install repo files only (skips package installation)',
)
repo.set_defaults(
adjust_repos=True,
)
parser.add_argument(
'host',
metavar='HOST',
nargs='+',
help='hosts to install on',
)
parser.add_argument(
'--local-mirror',
nargs='?',
const='PATH',
default=None,
help='Fetch packages and push them to hosts for a local repo mirror',
)
parser.add_argument(
'--repo-url',
nargs='?',
dest='repo_url',
help='specify a repo URL that mirrors/contains Ceph packages',
)
parser.add_argument(
'--gpg-url',
nargs='?',
dest='gpg_url',
help='specify a GPG key URL to be used with custom repos\
(defaults to ceph.com)'
)
parser.set_defaults(
func=install,
)
@priority(80)
def make_uninstall(parser):
"""
Remove Ceph packages from remote hosts.
"""
parser.add_argument(
'host',
metavar='HOST',
nargs='+',
help='hosts to uninstall Ceph from',
)
parser.set_defaults(
func=uninstall,
)
@priority(80)
def make_purge(parser):
"""
Remove Ceph packages from remote hosts and purge all data.
"""
parser.add_argument(
'host',
metavar='HOST',
nargs='+',
help='hosts to purge Ceph from',
)
parser.set_defaults(
func=purge,
)
@priority(80)
def make_purge_data(parser):
"""
Purge (delete, destroy, discard, shred) any Ceph data from /var/lib/ceph
"""
parser.add_argument(
'host',
metavar='HOST',
nargs='+',
help='hosts to purge Ceph data from',
)
parser.set_defaults(
func=purgedata,
)
| mit |
Tagar/incubator-airflow | airflow/sensors/sql_sensor.py | 5 | 2072 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from builtins import str
from airflow.hooks.base_hook import BaseHook
from airflow.sensors.base_sensor_operator import BaseSensorOperator
from airflow.utils.decorators import apply_defaults
class SqlSensor(BaseSensorOperator):
"""
Runs a sql statement until a criteria is met. It will keep trying while
sql returns no row, or if the first cell in (0, '0', '').
:param conn_id: The connection to run the sensor against
:type conn_id: string
:param sql: The sql to run. To pass, it needs to return at least one cell
that contains a non-zero / empty string value.
"""
template_fields = ('sql',)
template_ext = ('.hql', '.sql',)
ui_color = '#7c7287'
@apply_defaults
def __init__(self, conn_id, sql, *args, **kwargs):
self.sql = sql
self.conn_id = conn_id
super(SqlSensor, self).__init__(*args, **kwargs)
def poke(self, context):
hook = BaseHook.get_connection(self.conn_id).get_hook()
self.log.info('Poking: %s', self.sql)
records = hook.get_records(self.sql)
if not records:
return False
else:
if str(records[0][0]) in ('0', '',):
return False
else:
return True
| apache-2.0 |
weisongchen/flaskapp | venv/lib/python2.7/site-packages/click/parser.py | 199 | 15510 | # -*- coding: utf-8 -*-
"""
click.parser
~~~~~~~~~~~~
This module started out as largely a copy paste from the stdlib's
optparse module with the features removed that we do not need from
optparse because we implement them in Click on a higher level (for
instance type handling, help formatting and a lot more).
The plan is to remove more and more from here over time.
The reason this is a different module and not optparse from the stdlib
is that there are differences in 2.x and 3.x about the error messages
generated and optparse in the stdlib uses gettext for no good reason
and might cause us issues.
"""
import re
from collections import deque
from .exceptions import UsageError, NoSuchOption, BadOptionUsage, \
BadArgumentUsage
def _unpack_args(args, nargs_spec):
"""Given an iterable of arguments and an iterable of nargs specifications,
it returns a tuple with all the unpacked arguments at the first index
and all remaining arguments as the second.
The nargs specification is the number of arguments that should be consumed
or `-1` to indicate that this position should eat up all the remainders.
Missing items are filled with `None`.
"""
args = deque(args)
nargs_spec = deque(nargs_spec)
rv = []
spos = None
def _fetch(c):
try:
if spos is None:
return c.popleft()
else:
return c.pop()
except IndexError:
return None
while nargs_spec:
nargs = _fetch(nargs_spec)
if nargs == 1:
rv.append(_fetch(args))
elif nargs > 1:
x = [_fetch(args) for _ in range(nargs)]
# If we're reversed, we're pulling in the arguments in reverse,
# so we need to turn them around.
if spos is not None:
x.reverse()
rv.append(tuple(x))
elif nargs < 0:
if spos is not None:
raise TypeError('Cannot have two nargs < 0')
spos = len(rv)
rv.append(None)
# spos is the position of the wildcard (star). If it's not `None`,
# we fill it with the remainder.
if spos is not None:
rv[spos] = tuple(args)
args = []
rv[spos + 1:] = reversed(rv[spos + 1:])
return tuple(rv), list(args)
def _error_opt_args(nargs, opt):
if nargs == 1:
raise BadOptionUsage('%s option requires an argument' % opt)
raise BadOptionUsage('%s option requires %d arguments' % (opt, nargs))
def split_opt(opt):
first = opt[:1]
if first.isalnum():
return '', opt
if opt[1:2] == first:
return opt[:2], opt[2:]
return first, opt[1:]
def normalize_opt(opt, ctx):
if ctx is None or ctx.token_normalize_func is None:
return opt
prefix, opt = split_opt(opt)
return prefix + ctx.token_normalize_func(opt)
def split_arg_string(string):
"""Given an argument string this attempts to split it into small parts."""
rv = []
for match in re.finditer(r"('([^'\\]*(?:\\.[^'\\]*)*)'"
r'|"([^"\\]*(?:\\.[^"\\]*)*)"'
r'|\S+)\s*', string, re.S):
arg = match.group().strip()
if arg[:1] == arg[-1:] and arg[:1] in '"\'':
arg = arg[1:-1].encode('ascii', 'backslashreplace') \
.decode('unicode-escape')
try:
arg = type(string)(arg)
except UnicodeError:
pass
rv.append(arg)
return rv
class Option(object):
def __init__(self, opts, dest, action=None, nargs=1, const=None, obj=None):
self._short_opts = []
self._long_opts = []
self.prefixes = set()
for opt in opts:
prefix, value = split_opt(opt)
if not prefix:
raise ValueError('Invalid start character for option (%s)'
% opt)
self.prefixes.add(prefix[0])
if len(prefix) == 1 and len(value) == 1:
self._short_opts.append(opt)
else:
self._long_opts.append(opt)
self.prefixes.add(prefix)
if action is None:
action = 'store'
self.dest = dest
self.action = action
self.nargs = nargs
self.const = const
self.obj = obj
@property
def takes_value(self):
return self.action in ('store', 'append')
def process(self, value, state):
if self.action == 'store':
state.opts[self.dest] = value
elif self.action == 'store_const':
state.opts[self.dest] = self.const
elif self.action == 'append':
state.opts.setdefault(self.dest, []).append(value)
elif self.action == 'append_const':
state.opts.setdefault(self.dest, []).append(self.const)
elif self.action == 'count':
state.opts[self.dest] = state.opts.get(self.dest, 0) + 1
else:
raise ValueError('unknown action %r' % self.action)
state.order.append(self.obj)
class Argument(object):
def __init__(self, dest, nargs=1, obj=None):
self.dest = dest
self.nargs = nargs
self.obj = obj
def process(self, value, state):
if self.nargs > 1:
holes = sum(1 for x in value if x is None)
if holes == len(value):
value = None
elif holes != 0:
raise BadArgumentUsage('argument %s takes %d values'
% (self.dest, self.nargs))
state.opts[self.dest] = value
state.order.append(self.obj)
class ParsingState(object):
def __init__(self, rargs):
self.opts = {}
self.largs = []
self.rargs = rargs
self.order = []
class OptionParser(object):
"""The option parser is an internal class that is ultimately used to
parse options and arguments. It's modelled after optparse and brings
a similar but vastly simplified API. It should generally not be used
directly as the high level Click classes wrap it for you.
It's not nearly as extensible as optparse or argparse as it does not
implement features that are implemented on a higher level (such as
types or defaults).
:param ctx: optionally the :class:`~click.Context` where this parser
should go with.
"""
def __init__(self, ctx=None):
#: The :class:`~click.Context` for this parser. This might be
#: `None` for some advanced use cases.
self.ctx = ctx
#: This controls how the parser deals with interspersed arguments.
#: If this is set to `False`, the parser will stop on the first
#: non-option. Click uses this to implement nested subcommands
#: safely.
self.allow_interspersed_args = True
#: This tells the parser how to deal with unknown options. By
#: default it will error out (which is sensible), but there is a
#: second mode where it will ignore it and continue processing
#: after shifting all the unknown options into the resulting args.
self.ignore_unknown_options = False
if ctx is not None:
self.allow_interspersed_args = ctx.allow_interspersed_args
self.ignore_unknown_options = ctx.ignore_unknown_options
self._short_opt = {}
self._long_opt = {}
self._opt_prefixes = set(['-', '--'])
self._args = []
def add_option(self, opts, dest, action=None, nargs=1, const=None,
obj=None):
"""Adds a new option named `dest` to the parser. The destination
is not inferred (unlike with optparse) and needs to be explicitly
provided. Action can be any of ``store``, ``store_const``,
``append``, ``appnd_const`` or ``count``.
The `obj` can be used to identify the option in the order list
that is returned from the parser.
"""
if obj is None:
obj = dest
opts = [normalize_opt(opt, self.ctx) for opt in opts]
option = Option(opts, dest, action=action, nargs=nargs,
const=const, obj=obj)
self._opt_prefixes.update(option.prefixes)
for opt in option._short_opts:
self._short_opt[opt] = option
for opt in option._long_opts:
self._long_opt[opt] = option
def add_argument(self, dest, nargs=1, obj=None):
"""Adds a positional argument named `dest` to the parser.
The `obj` can be used to identify the option in the order list
that is returned from the parser.
"""
if obj is None:
obj = dest
self._args.append(Argument(dest=dest, nargs=nargs, obj=obj))
def parse_args(self, args):
"""Parses positional arguments and returns ``(values, args, order)``
for the parsed options and arguments as well as the leftover
arguments if there are any. The order is a list of objects as they
appear on the command line. If arguments appear multiple times they
will be memorized multiple times as well.
"""
state = ParsingState(args)
try:
self._process_args_for_options(state)
self._process_args_for_args(state)
except UsageError:
if self.ctx is None or not self.ctx.resilient_parsing:
raise
return state.opts, state.largs, state.order
def _process_args_for_args(self, state):
pargs, args = _unpack_args(state.largs + state.rargs,
[x.nargs for x in self._args])
for idx, arg in enumerate(self._args):
arg.process(pargs[idx], state)
state.largs = args
state.rargs = []
def _process_args_for_options(self, state):
while state.rargs:
arg = state.rargs.pop(0)
arglen = len(arg)
# Double dashes always handled explicitly regardless of what
# prefixes are valid.
if arg == '--':
return
elif arg[:1] in self._opt_prefixes and arglen > 1:
self._process_opts(arg, state)
elif self.allow_interspersed_args:
state.largs.append(arg)
else:
state.rargs.insert(0, arg)
return
# Say this is the original argument list:
# [arg0, arg1, ..., arg(i-1), arg(i), arg(i+1), ..., arg(N-1)]
# ^
# (we are about to process arg(i)).
#
# Then rargs is [arg(i), ..., arg(N-1)] and largs is a *subset* of
# [arg0, ..., arg(i-1)] (any options and their arguments will have
# been removed from largs).
#
# The while loop will usually consume 1 or more arguments per pass.
# If it consumes 1 (eg. arg is an option that takes no arguments),
# then after _process_arg() is done the situation is:
#
# largs = subset of [arg0, ..., arg(i)]
# rargs = [arg(i+1), ..., arg(N-1)]
#
# If allow_interspersed_args is false, largs will always be
# *empty* -- still a subset of [arg0, ..., arg(i-1)], but
# not a very interesting subset!
def _match_long_opt(self, opt, explicit_value, state):
if opt not in self._long_opt:
possibilities = [word for word in self._long_opt
if word.startswith(opt)]
raise NoSuchOption(opt, possibilities=possibilities)
option = self._long_opt[opt]
if option.takes_value:
# At this point it's safe to modify rargs by injecting the
# explicit value, because no exception is raised in this
# branch. This means that the inserted value will be fully
# consumed.
if explicit_value is not None:
state.rargs.insert(0, explicit_value)
nargs = option.nargs
if len(state.rargs) < nargs:
_error_opt_args(nargs, opt)
elif nargs == 1:
value = state.rargs.pop(0)
else:
value = tuple(state.rargs[:nargs])
del state.rargs[:nargs]
elif explicit_value is not None:
raise BadOptionUsage('%s option does not take a value' % opt)
else:
value = None
option.process(value, state)
def _match_short_opt(self, arg, state):
stop = False
i = 1
prefix = arg[0]
unknown_options = []
for ch in arg[1:]:
opt = normalize_opt(prefix + ch, self.ctx)
option = self._short_opt.get(opt)
i += 1
if not option:
if self.ignore_unknown_options:
unknown_options.append(ch)
continue
raise NoSuchOption(opt)
if option.takes_value:
# Any characters left in arg? Pretend they're the
# next arg, and stop consuming characters of arg.
if i < len(arg):
state.rargs.insert(0, arg[i:])
stop = True
nargs = option.nargs
if len(state.rargs) < nargs:
_error_opt_args(nargs, opt)
elif nargs == 1:
value = state.rargs.pop(0)
else:
value = tuple(state.rargs[:nargs])
del state.rargs[:nargs]
else:
value = None
option.process(value, state)
if stop:
break
# If we got any unknown options we re-combinate the string of the
# remaining options and re-attach the prefix, then report that
# to the state as new larg. This way there is basic combinatorics
# that can be achieved while still ignoring unknown arguments.
if self.ignore_unknown_options and unknown_options:
state.largs.append(prefix + ''.join(unknown_options))
def _process_opts(self, arg, state):
explicit_value = None
# Long option handling happens in two parts. The first part is
# supporting explicitly attached values. In any case, we will try
# to long match the option first.
if '=' in arg:
long_opt, explicit_value = arg.split('=', 1)
else:
long_opt = arg
norm_long_opt = normalize_opt(long_opt, self.ctx)
# At this point we will match the (assumed) long option through
# the long option matching code. Note that this allows options
# like "-foo" to be matched as long options.
try:
self._match_long_opt(norm_long_opt, explicit_value, state)
except NoSuchOption:
# At this point the long option matching failed, and we need
# to try with short options. However there is a special rule
# which says, that if we have a two character options prefix
# (applies to "--foo" for instance), we do not dispatch to the
# short option code and will instead raise the no option
# error.
if arg[:2] not in self._opt_prefixes:
return self._match_short_opt(arg, state)
if not self.ignore_unknown_options:
raise
state.largs.append(arg)
| mit |
mathspace/django | tests/queryset_pickle/models.py | 281 | 1904 | import datetime
from django.db import DJANGO_VERSION_PICKLE_KEY, models
from django.utils import six
from django.utils.translation import ugettext_lazy as _
def standalone_number():
return 1
class Numbers(object):
@staticmethod
def get_static_number():
return 2
class PreviousDjangoVersionQuerySet(models.QuerySet):
def __getstate__(self):
state = super(PreviousDjangoVersionQuerySet, self).__getstate__()
state[DJANGO_VERSION_PICKLE_KEY] = '1.0'
return state
class MissingDjangoVersionQuerySet(models.QuerySet):
def __getstate__(self):
state = super(MissingDjangoVersionQuerySet, self).__getstate__()
del state[DJANGO_VERSION_PICKLE_KEY]
return state
class Group(models.Model):
name = models.CharField(_('name'), max_length=100)
objects = models.Manager()
previous_django_version_objects = PreviousDjangoVersionQuerySet.as_manager()
missing_django_version_objects = MissingDjangoVersionQuerySet.as_manager()
class Event(models.Model):
title = models.CharField(max_length=100)
group = models.ForeignKey(Group, models.CASCADE)
class Happening(models.Model):
when = models.DateTimeField(blank=True, default=datetime.datetime.now)
name = models.CharField(blank=True, max_length=100, default="test")
number1 = models.IntegerField(blank=True, default=standalone_number)
if six.PY3:
# default serializable on Python 3 only
number2 = models.IntegerField(blank=True, default=Numbers.get_static_number)
class Container(object):
# To test pickling we need a class that isn't defined on module, but
# is still available from app-cache. So, the Container class moves
# SomeModel outside of module level
class SomeModel(models.Model):
somefield = models.IntegerField()
class M2MModel(models.Model):
groups = models.ManyToManyField(Group)
| bsd-3-clause |
hosford42/xcs | xcs/__init__.py | 1 | 6684 | # -*- coding: utf-8 -*-
# -------------------------------------------------------------------------
# xcs
# ---
# Accuracy-based Classifier Systems for Python 3
#
# http://hosford42.github.io/xcs/
#
# (c) Aaron Hosford 2015, all rights reserved
# Revised (3 Clause) BSD License
#
# Implements the XCS (Accuracy-based Classifier System) algorithm,
# as described in the 2001 paper, "An Algorithmic Description of XCS,"
# by Martin Butz and Stewart Wilson.
#
# -------------------------------------------------------------------------
"""
Accuracy-based Classifier Systems for Python 3
This package implements the XCS (Accuracy-based Classifier System)
algorithm, as described in the 2001 paper, "An Algorithmic Description of
XCS," by Martin Butz and Stewart Wilson.[1] The module also provides a
framework for implementing and experimenting with learning classifier
systems in general.
Usage:
import logging
from xcs import XCSAlgorithm
from xcs.scenarios import MUXProblem, ScenarioObserver
# Create a scenario instance, either by instantiating one of the
# predefined scenarios provided in xcs.scenarios, or by creating your
# own subclass of the xcs.scenarios.Scenario base class and
# instantiating it.
scenario = MUXProblem(training_cycles=50000)
# If you want to log the process of the run as it proceeds, set the
# logging level with the built-in logging module, and wrap the
# scenario with an OnLineObserver.
logging.root.setLevel(logging.INFO)
scenario = ScenarioObserver(scenario)
# Instantiate the algorithm and set the parameters to values that are
# appropriate for the scenario. Calling help(XCSAlgorithm) will give
# you a description of each parameter's meaning.
algorithm = XCSAlgorithm()
algorithm.exploration_probability = .1
algorithm.do_ga_subsumption = True
algorithm.do_action_set_subsumption = True
# Create a classifier set from the algorithm, tailored for the
# scenario you have selected.
model = algorithm.new_model(scenario)
# Run the classifier set in the scenario, optimizing it as the
# scenario unfolds.
model.run(scenario, learn=True)
# Use the built-in pickle module to save/reload your model for reuse.
import pickle
pickle.dump(model, open('model.bin', 'wb'))
reloaded_model = pickle.load(open('model.bin', 'rb'))
# Or just print the results out.
print(model)
# Or get a quick list of the best classifiers discovered.
for rule in model:
if rule.fitness <= .5 or rule.experience < 10:
continue
print(rule.condition, '=>', rule.action, ' [%.5f]' % rule.fitness)
A quick explanation of the XCS algorithm:
The XCS algorithm attempts to solve the reinforcement learning
problem, which is to maximize a reward signal by learning the optimal
mapping from inputs to outputs, where inputs are represented as
sequences of bits and outputs are selected from a finite set of
predetermined actions. It does so by using a genetic algorithm to
evolve a competing population of classifier rules, of the form
condition => action => prediction
where the condition is a bit template (a string of 1s, 0s, and
wildcards, represented as #s) which matches against one or more
possible inputs, and the prediction is a floating point value that
indicates the observed reward level when the condition matches the
input and the indicated action is selected. The fitness of each rule in
the classifier set is determined not by the size of the prediction, but
by its observed accuracy, as well as by the degree to which the rule
fills a niche that many other rules do not already fill. The reason for
using accuracy rather than reward is that it was found that using r
eward destabilizes the population.
More extensive help is available online at https://pythonhosted.org/xcs/
References:
[1] Butz, M. and Wilson, S. (2001). An algorithmic description of XCS. In
Lanzi, P., Stolzmann, W., and Wilson, S., editors, Advances in
Learning Classifier Systems: Proceedings of the Third International
Workshop, volume 1996 of Lecture Notes in Artificial Intelligence,
pages 253–272. Springer-Verlag Berlin Heidelberg.
Copyright (c) 2015, Aaron Hosford
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of xcs nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
# Attempt to import numpy. If unsuccessful, set numpy = None.
try:
import numpy
except ImportError:
numpy = None
else:
# This is necessary because sometimes the empty numpy folder is left in
# place when it is uninstalled.
try:
numpy.ndarray
except AttributeError:
numpy = None
from . import bitstrings, scenarios
from .framework import ActionSet, ClassifierRule, ClassifierSet, LCSAlgorithm, MatchSet
from .algorithms.xcs import XCSClassifierRule, XCSAlgorithm
from .testing import test
__author__ = 'Aaron Hosford'
__version__ = '1.0.0'
__all__ = [
# Module Metadata
'__author__',
'__version__',
# Preloaded Submodules
'bitstrings',
'scenarios',
# Classes
'ActionSet',
'ClassifierRule',
'XCSClassifierRule',
'ClassifierSet',
'LCSAlgorithm',
'XCSAlgorithm',
'MatchSet',
# Functions
'test',
]
| bsd-3-clause |
cleverhans-lab/cleverhans | cleverhans_v3.1.0/examples/nips17_adversarial_competition/eval_infra/code/eval_lib/tests/submissions_test.py | 1 | 7953 | """Tests for eval_lib.submissions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
from six import assertCountEqual
from eval_lib import submissions
from eval_lib.tests import fake_cloud_client
ROUND_NAME = "round-name"
class ParticipantFromSubmissionPathTest(unittest.TestCase):
def test_team_id(self):
self.assertDictEqual(
{"team_id": 42}, submissions.participant_from_submission_path("path/42.zip")
)
def test_baseline_id(self):
self.assertDictEqual(
{"baseline_id": "a_1"},
submissions.participant_from_submission_path("path/baseline_a_1.zip"),
)
def test_tar_extension(self):
self.assertDictEqual(
{"team_id": 42}, submissions.participant_from_submission_path("path/42.tar")
)
def test_tar_gz_extension(self):
self.assertDictEqual(
{"team_id": 42},
submissions.participant_from_submission_path("path/42.tar.gz"),
)
class SubmissionsTest(unittest.TestCase):
def setUp(self):
storage_blobs = [
ROUND_NAME + "/submissions/nontargeted/1.zip",
ROUND_NAME + "/submissions/nontargeted/baseline_nt.zip",
ROUND_NAME + "/submissions/targeted/1.zip",
ROUND_NAME + "/submissions/targeted/2.zip",
ROUND_NAME + "/submissions/defense/3.zip",
ROUND_NAME + "/submissions/defense/baseline_adv_train.zip",
]
self.storage_client = fake_cloud_client.FakeStorageClient(storage_blobs)
self.datastore_client = fake_cloud_client.FakeDatastoreClient()
self.submissions = submissions.CompetitionSubmissions(
datastore_client=self.datastore_client,
storage_client=self.storage_client,
round_name=ROUND_NAME,
)
def verify_submissions(self):
assertCountEqual(
self,
[
submissions.SubmissionDescriptor(
path=(ROUND_NAME + "/submissions/nontargeted/1.zip"),
participant_id={"team_id": 1},
),
submissions.SubmissionDescriptor(
path=(ROUND_NAME + "/submissions/nontargeted/baseline_nt.zip"),
participant_id={"baseline_id": "nt"},
),
],
self.submissions.attacks.values(),
)
assertCountEqual(
self,
[
submissions.SubmissionDescriptor(
path=(ROUND_NAME + "/submissions/targeted/1.zip"),
participant_id={"team_id": 1},
),
submissions.SubmissionDescriptor(
path=(ROUND_NAME + "/submissions/targeted/2.zip"),
participant_id={"team_id": 2},
),
],
self.submissions.targeted_attacks.values(),
)
assertCountEqual(
self,
[
submissions.SubmissionDescriptor(
path=(ROUND_NAME + "/submissions/defense/3.zip"),
participant_id={"team_id": 3},
),
submissions.SubmissionDescriptor(
path=(ROUND_NAME + "/submissions/defense/baseline_adv_train.zip"),
participant_id={"baseline_id": "adv_train"},
),
],
self.submissions.defenses.values(),
)
self.assertEqual(
len(self.submissions.attacks)
+ len(self.submissions.targeted_attacks)
+ len(self.submissions.defenses),
len(
set(self.submissions.attacks.keys())
| set(self.submissions.targeted_attacks.keys())
| set(self.submissions.defenses.keys())
),
)
def verify_datastore_entities(self):
# Verify 'SubmissionType' entities
assertCountEqual(
self,
[
self.datastore_client.entity(
fake_cloud_client.FakeDatastoreKey("SubmissionType", "Attacks")
),
self.datastore_client.entity(
fake_cloud_client.FakeDatastoreKey(
"SubmissionType", "TargetedAttacks"
)
),
self.datastore_client.entity(
fake_cloud_client.FakeDatastoreKey("SubmissionType", "Defenses")
),
],
self.datastore_client.query_fetch(kind="SubmissionType"),
)
# Verify 'Submission' entities
expected_submission_entities = []
for key_prefix, submission_entries in [
(("SubmissionType", "Attacks"), self.submissions.attacks),
(("SubmissionType", "TargetedAttacks"), self.submissions.targeted_attacks),
(("SubmissionType", "Defenses"), self.submissions.defenses),
]:
for k, v in submission_entries.items():
entity = self.datastore_client.entity(
fake_cloud_client.FakeDatastoreKey(
*(key_prefix + ("Submission", k))
)
)
entity["submission_path"] = v.path
entity.update(v.participant_id)
expected_submission_entities.append(entity)
assertCountEqual(
self,
expected_submission_entities,
self.datastore_client.query_fetch(kind="Submission"),
)
def test_init_from_storage(self):
self.submissions.init_from_storage_write_to_datastore()
self.verify_submissions()
self.verify_datastore_entities()
def test_init_from_datastore(self):
# first we need to populate datastore
self.submissions.init_from_storage_write_to_datastore()
# now reset submission class and load data from datastore
self.submissions = submissions.CompetitionSubmissions(
datastore_client=self.datastore_client,
storage_client=self.storage_client,
round_name=ROUND_NAME,
)
self.assertFalse(self.submissions.attacks)
self.assertFalse(self.submissions.targeted_attacks)
self.assertFalse(self.submissions.defenses)
self.submissions.init_from_datastore()
self.verify_submissions()
def test_get_all_attacks_ids(self):
self.submissions.init_from_storage_write_to_datastore()
# total will be two targeted and two not-targeted attacks,
# their IDs are generated sequentially
assertCountEqual(
self,
["SUBA000", "SUBA001", "SUBT000", "SUBT001"],
self.submissions.get_all_attack_ids(),
)
def test_find_by_id(self):
self.submissions.init_from_storage_write_to_datastore()
self.assertEqual(
self.submissions.attacks["SUBA000"], self.submissions.find_by_id("SUBA000")
)
self.assertEqual(
self.submissions.targeted_attacks["SUBT001"],
self.submissions.find_by_id("SUBT001"),
)
self.assertEqual(
self.submissions.defenses["SUBD001"], self.submissions.find_by_id("SUBD001")
)
def test_get_external_id(self):
self.submissions.init_from_storage_write_to_datastore()
assertCountEqual(
self,
[3, "baseline_adv_train"],
[
self.submissions.get_external_id("SUBD000"),
self.submissions.get_external_id("SUBD001"),
],
)
assertCountEqual(
self,
[1, "baseline_nt"],
[
self.submissions.get_external_id("SUBA000"),
self.submissions.get_external_id("SUBA001"),
],
)
if __name__ == "__main__":
unittest.main()
| mit |
morganbengtsson/io_mos | mos/materials.py | 1 | 5751 | import bpy
import json
from shutil import copyfile
from .common import *
def copy_linked_map(input_name, directory, blender_material, node):
if not node:
return None
node_input = node.inputs.get(input_name)
if not node_input:
return None
image_path = None
texture_path = None
if node_input.is_linked:
texture_node = node_input.links[0].from_node
image = texture_node.image
filename = image.name
source_filepath = bpy.path.abspath(image.filepath, library=image.library)
image_path = library_path(blender_material) + "images/" + filename
full_image_path = directory + '/' + image_path
os.makedirs(os.path.dirname(full_image_path), exist_ok=True)
copyfile(source_filepath, full_image_path)
texture_filter = texture_node.interpolation.lower()
if texture_filter not in {"linear", "closest"}:
raise Exception("Interpolation not supported")
texture_wrap = "repeat" if texture_node.extension.lower() == "repeat" else "clamp"
texture = {"filter": texture_filter,
"wrap": texture_wrap,
"image": image_path}
texture_path = library_path(blender_material) + "textures/" + image.name + ".texture"
texture_path = texture_path.strip('/')
path = directory + '/' + texture_path
os.makedirs(os.path.dirname(path), exist_ok=True)
json_file = open(path, 'w')
json.dump(texture, json_file)
json_file.close()
return texture_path
def material_path(blender_material: bpy.types.Material):
path = library_path(blender_material) + "materials/" + blender_material.name + ".material"
return path.strip('/')
def write(report, directory):
blender_materials = bpy.data.materials
for blender_material in blender_materials:
print(blender_material.name)
print(blender_material.use_nodes)
print(blender_material.node_tree)
if blender_material.use_nodes and blender_material.node_tree:
print("WRITING " + str(blender_material.name))
report({'INFO'}, "Writing: " + str(blender_material.name))
try:
node = next(n for n in blender_material.node_tree.nodes.values() if n.bl_idname == "ShaderNodeOutputMaterial").inputs[0].links[0].from_node
if node.bl_idname != "ShaderNodeBsdfPrincipled":
raise Exception("Material node must be Principled.")
albedo_input = node.inputs.get("Base Color")
albedo_map = copy_linked_map("Base Color", directory, blender_material, node)
albedo_value = (0.0, 0.0, 0.0) if not albedo_input.default_value[:3] else albedo_input.default_value[:3]
normal_input = node.inputs.get("Normal")
normal_map_node = normal_input.links[0].from_node if normal_input.is_linked else None
normal_map = copy_linked_map("Color", directory, blender_material, normal_map_node) if normal_map_node else None
metallic_map = copy_linked_map("Metallic", directory, blender_material, node)
metallic_value = node.inputs.get("Metallic").default_value
roughness_map = copy_linked_map("Roughness", directory, blender_material, node)
roughness_value = node.inputs.get("Roughness").default_value
emission_map = copy_linked_map("Emission", directory, blender_material, node)
emission_value = node.inputs.get("Emission").default_value
mos_node = next((n for n in blender_material.node_tree.nodes.values() if n.name == "MOS"), None)
ambient_occlusion_input = mos_node.inputs.get("Ambient Occlusion") if mos_node else None
ambient_occlusion_value = ambient_occlusion_input.default_value if ambient_occlusion_input else 1.0
ambient_occlusion_map = copy_linked_map("Ambient Occlusion", directory, blender_material, mos_node)
alpha = node.inputs.get("Alpha").default_value
index_of_refraction = node.inputs.get("IOR").default_value
transmission = node.inputs.get("Transmission").default_value
material = {"albedo": {"value": tuple(albedo_value), "texture": albedo_map},
"roughness": {"value": float(roughness_value), "texture": roughness_map},
"metallic": {"value": float(metallic_value), "texture": metallic_map},
"emission": {"value": tuple(emission_value), "texture": emission_map},
"ambient_occlusion": {"value": float(ambient_occlusion_value),
"texture": ambient_occlusion_map},
"normal": {"texture": normal_map},
"alpha": float(alpha),
"index_of_refraction": index_of_refraction,
"transmission": transmission,
}
path = directory + '/' + material_path(blender_material)
os.makedirs(os.path.dirname(path), exist_ok=True)
json_file = open(path, 'w')
json.dump(material, json_file)
json_file.close()
except Exception as e:
raise Exception('Error writing material ' + blender_material.name) from e
report({'INFO'}, "Wrote: " + path)
report({'INFO'}, "Wrote material " + blender_material.name)
else:
report({'WARNING'}, "Did not write material {}".format(blender_material.name))
report({'INFO'}, "Wrote all materials.")
| mit |
ProkopHapala/SimpleSimulationEngine | python/pySimE/space/exp/pykep/lambert_MarsEarthDeltaV_2D.py | 1 | 2872 |
from pylab import *
from PyKEP import epoch, DAY2SEC, planet_ss, AU, MU_SUN, lambert_problem
from PyKEP.orbit_plots import plot_planet, plot_lambert
plEarth = planet_ss('earth');
plMars = planet_ss('mars');
def opt_dt(tt1,tt2):
t1 = epoch(tt1)
t2 = epoch(tt2)
#print t1
dt = (t2.mjd2000 - t1.mjd2000) * DAY2SEC
rE, vE = plEarth.eph(t1); vE=array(vE)
rM, vM = plMars .eph(t2); vM=array(vM)
l = lambert_problem(rE,rM,dt,MU_SUN)
vEl = array(l.get_v1()[0]); dvE = (vEl - vE)
vMl = array(l.get_v2()[0]); dvM = (vMl - vM)
dvMTot = linalg.norm(dvM); dvETot= linalg.norm(dvE)
dvTot = dvMTot+dvETot
print " t1 " ,tt1," t2 ", tt2," dt ",(tt2-tt1)," dv ", dvTot
return vE, vM, vEl, vMl
tt1min = 5000
tt1max = tt1min+365*10
#tt1max = 600
dttmin = 100
dttmax = 400
step = 5
dvMapEarth = []
dvMapMars = []
for tt1 in range(tt1min,tt1max,step):
dvRowE = []
dvRowM = []
for dtt in range(dttmin,dttmax,step):
vE, vM, vEl, vMl = opt_dt(tt1,tt1+dtt)
dvRowE.append(linalg.norm(vE-vEl))
dvRowM.append(linalg.norm(vM-vMl))
dvMapEarth.append(dvRowE)
dvMapMars .append(dvRowM)
dvMapEarth = array( dvMapEarth )
dvMapMars = array( dvMapMars )
#print shape(dvMap)
#print dvMap
#figure(figsize=(10,8))
#figure(figsize=(18,3))
figure(figsize=(20,6))
xtcks = range(tt1min,tt1max,365); #print xtcks
xtck_labes = [ str(epoch(tt))[:12] for tt in xtcks ]; #print xtck_labes
subplot (3,1,1)
title('Total Earth to Mars delta-v [km/s]')
FF = transpose((dvMapEarth+dvMapMars)/1000)
imshow( FF, extent=( tt1min,tt1max, dttmin, dttmax ), vmax=10 , origin='image', interpolation='bicubic', aspect='equal');
colorbar(use_gridspec=True, shrink=0.9, pad = 0.005, fraction = 0.005 );
xticks( xtcks, xtck_labes )
xlabel('Departure date')
ylabel('Time of flight [days]')
subplot (3,1,2)
title('Earth departure delta-v [km/s]')
FF = transpose(dvMapEarth/1000)
imshow( FF, extent=( tt1min,tt1max, dttmin, dttmax ), vmax=7 , origin='image', interpolation='bicubic', aspect='equal');
colorbar(use_gridspec=True, shrink=0.9, pad = 0.005, fraction = 0.005 );
xticks( xtcks, xtck_labes )
xlabel('Departure date')
ylabel('Time of flight [days]')
subplot (3,1,3)
title('Mars arrival delta-v [km/s]')
FF = transpose(dvMapMars/1000)
imshow( FF, extent=( tt1min,tt1max, dttmin, dttmax ), vmax=5 , origin='image', interpolation='bicubic', aspect='equal');
colorbar(use_gridspec=True, shrink=0.9, pad = 0.005, fraction = 0.005 );
xticks( xtcks, xtck_labes )
xlabel('Departure date')
ylabel('Time of flight [days]')
#cfig = contour( FF , extent=( tt1min,tt1max, dttmin, dttmax ), colors='black', levels=arange(0,10,0.5))
#clabel(cfig, inline=1, fontsize=10)
tight_layout()
savefig('porkchop_Earth_Mars.png', transparent=True, bbox_inches='tight', pad_inches=0)
plt.show()
| mit |
MeigaraJuma/XQS-Website-Angular | node_modules/node-gyp/gyp/PRESUBMIT.py | 1369 | 3662 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Top-level presubmit script for GYP.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into gcl.
"""
PYLINT_BLACKLIST = [
# TODO: fix me.
# From SCons, not done in google style.
'test/lib/TestCmd.py',
'test/lib/TestCommon.py',
'test/lib/TestGyp.py',
]
PYLINT_DISABLED_WARNINGS = [
# TODO: fix me.
# Many tests include modules they don't use.
'W0611',
# Possible unbalanced tuple unpacking with sequence.
'W0632',
# Attempting to unpack a non-sequence.
'W0633',
# Include order doesn't properly include local files?
'F0401',
# Some use of built-in names.
'W0622',
# Some unused variables.
'W0612',
# Operator not preceded/followed by space.
'C0323',
'C0322',
# Unnecessary semicolon.
'W0301',
# Unused argument.
'W0613',
# String has no effect (docstring in wrong place).
'W0105',
# map/filter on lambda could be replaced by comprehension.
'W0110',
# Use of eval.
'W0123',
# Comma not followed by space.
'C0324',
# Access to a protected member.
'W0212',
# Bad indent.
'W0311',
# Line too long.
'C0301',
# Undefined variable.
'E0602',
# Not exception type specified.
'W0702',
# No member of that name.
'E1101',
# Dangerous default {}.
'W0102',
# Cyclic import.
'R0401',
# Others, too many to sort.
'W0201', 'W0232', 'E1103', 'W0621', 'W0108', 'W0223', 'W0231',
'R0201', 'E0101', 'C0321',
# ************* Module copy
# W0104:427,12:_test.odict.__setitem__: Statement seems to have no effect
'W0104',
]
def CheckChangeOnUpload(input_api, output_api):
report = []
report.extend(input_api.canned_checks.PanProjectChecks(
input_api, output_api))
return report
def CheckChangeOnCommit(input_api, output_api):
report = []
# Accept any year number from 2009 to the current year.
current_year = int(input_api.time.strftime('%Y'))
allowed_years = (str(s) for s in reversed(xrange(2009, current_year + 1)))
years_re = '(' + '|'.join(allowed_years) + ')'
# The (c) is deprecated, but tolerate it until it's removed from all files.
license = (
r'.*? Copyright (\(c\) )?%(year)s Google Inc\. All rights reserved\.\n'
r'.*? Use of this source code is governed by a BSD-style license that '
r'can be\n'
r'.*? found in the LICENSE file\.\n'
) % {
'year': years_re,
}
report.extend(input_api.canned_checks.PanProjectChecks(
input_api, output_api, license_header=license))
report.extend(input_api.canned_checks.CheckTreeIsOpen(
input_api, output_api,
'http://gyp-status.appspot.com/status',
'http://gyp-status.appspot.com/current'))
import os
import sys
old_sys_path = sys.path
try:
sys.path = ['pylib', 'test/lib'] + sys.path
blacklist = PYLINT_BLACKLIST
if sys.platform == 'win32':
blacklist = [os.path.normpath(x).replace('\\', '\\\\')
for x in PYLINT_BLACKLIST]
report.extend(input_api.canned_checks.RunPylint(
input_api,
output_api,
black_list=blacklist,
disabled_warnings=PYLINT_DISABLED_WARNINGS))
finally:
sys.path = old_sys_path
return report
TRYBOTS = [
'linux_try',
'mac_try',
'win_try',
]
def GetPreferredTryMasters(_, change):
return {
'client.gyp': { t: set(['defaulttests']) for t in TRYBOTS },
}
| mit |
DmitryYurov/BornAgain | Examples/python/simulation/ex01_BasicParticles/TwoTypesOfCylindersWithSizeDistribution.py | 2 | 2580 | """
Mixture cylinder particles with different size distribution
"""
import bornagain as ba
from bornagain import deg, angstrom, nm
def get_sample():
"""
Returns a sample with cylinders in a homogeneous medium ("air").
The cylinders are a 95:5 mixture of two different size distributions.
"""
# defining materials
m_air = ba.HomogeneousMaterial("Air", 0.0, 0.0)
m_particle = ba.HomogeneousMaterial("Particle", 6e-4, 2e-8)
# collection of particles #1
radius1 = 5.0*nm
height1 = radius1
sigma1 = radius1*0.2
cylinder_ff1 = ba.FormFactorCylinder(radius1, height1)
cylinder1 = ba.Particle(m_particle, cylinder_ff1)
gauss_distr1 = ba.DistributionGaussian(radius1, sigma1)
nparticles = 150
sigma_factor = 3.0
# limits will assure, that generated Radius'es are >=0
limits = ba.RealLimits.nonnegative()
par_distr1 = ba.ParameterDistribution(
"/Particle/Cylinder/Radius", gauss_distr1, nparticles, sigma_factor, limits)
part_coll1 = ba.ParticleDistribution(cylinder1, par_distr1)
# collection of particles #2
radius2 = 10.0*nm
height2 = radius2
sigma2 = radius2*0.02
cylinder_ff2 = ba.FormFactorCylinder(radius2, height2)
cylinder2 = ba.Particle(m_particle, cylinder_ff2)
gauss_distr2 = ba.DistributionGaussian(radius2, sigma2)
par_distr2 = ba.ParameterDistribution(
"/Particle/Cylinder/Radius", gauss_distr2, nparticles, sigma_factor, limits)
part_coll2 = ba.ParticleDistribution(cylinder2, par_distr2)
# assembling the sample
particle_layout = ba.ParticleLayout()
particle_layout.addParticle(part_coll1, 0.95)
particle_layout.addParticle(part_coll2, 0.05)
air_layer = ba.Layer(m_air)
air_layer.addLayout(particle_layout)
multi_layer = ba.MultiLayer()
multi_layer.addLayer(air_layer)
return multi_layer
def get_simulation():
"""
Create and return GISAXS simulation with beam and detector defined
"""
simulation = ba.GISASSimulation()
simulation.setDetectorParameters(200, 0.0*deg, 2.0*deg,
200, 0.0*deg, 2.0*deg)
simulation.setBeamParameters(1.0*angstrom, 0.2*deg, 0.0*deg)
return simulation
def run_simulation():
"""
Runs simulation and returns intensity map.
"""
simulation = get_simulation()
simulation.setSample(get_sample())
simulation.runSimulation()
return simulation.result()
if __name__ == '__main__':
result = run_simulation()
ba.plot_simulation_result(result, cmap='jet', aspect='auto')
| gpl-3.0 |
marcoantoniooliveira/labweb | oscar/lib/python2.7/site-packages/nose/plugins/prof.py | 106 | 5357 | """This plugin will run tests using the hotshot profiler, which is part
of the standard library. To turn it on, use the ``--with-profile`` option
or set the NOSE_WITH_PROFILE environment variable. Profiler output can be
controlled with the ``--profile-sort`` and ``--profile-restrict`` options,
and the profiler output file may be changed with ``--profile-stats-file``.
See the `hotshot documentation`_ in the standard library documentation for
more details on the various output options.
.. _hotshot documentation: http://docs.python.org/library/hotshot.html
"""
try:
import hotshot
from hotshot import stats
except ImportError:
hotshot, stats = None, None
import logging
import os
import sys
import tempfile
from nose.plugins.base import Plugin
from nose.util import tolist
log = logging.getLogger('nose.plugins')
class Profile(Plugin):
"""
Use this plugin to run tests using the hotshot profiler.
"""
pfile = None
clean_stats_file = False
def options(self, parser, env):
"""Register commandline options.
"""
if not self.available():
return
Plugin.options(self, parser, env)
parser.add_option('--profile-sort', action='store', dest='profile_sort',
default=env.get('NOSE_PROFILE_SORT', 'cumulative'),
metavar="SORT",
help="Set sort order for profiler output")
parser.add_option('--profile-stats-file', action='store',
dest='profile_stats_file',
metavar="FILE",
default=env.get('NOSE_PROFILE_STATS_FILE'),
help='Profiler stats file; default is a new '
'temp file on each run')
parser.add_option('--profile-restrict', action='append',
dest='profile_restrict',
metavar="RESTRICT",
default=env.get('NOSE_PROFILE_RESTRICT'),
help="Restrict profiler output. See help for "
"pstats.Stats for details")
def available(cls):
return hotshot is not None
available = classmethod(available)
def begin(self):
"""Create profile stats file and load profiler.
"""
if not self.available():
return
self._create_pfile()
self.prof = hotshot.Profile(self.pfile)
def configure(self, options, conf):
"""Configure plugin.
"""
if not self.available():
self.enabled = False
return
Plugin.configure(self, options, conf)
self.conf = conf
if options.profile_stats_file:
self.pfile = options.profile_stats_file
self.clean_stats_file = False
else:
self.pfile = None
self.clean_stats_file = True
self.fileno = None
self.sort = options.profile_sort
self.restrict = tolist(options.profile_restrict)
def prepareTest(self, test):
"""Wrap entire test run in :func:`prof.runcall`.
"""
if not self.available():
return
log.debug('preparing test %s' % test)
def run_and_profile(result, prof=self.prof, test=test):
self._create_pfile()
prof.runcall(test, result)
return run_and_profile
def report(self, stream):
"""Output profiler report.
"""
log.debug('printing profiler report')
self.prof.close()
prof_stats = stats.load(self.pfile)
prof_stats.sort_stats(self.sort)
# 2.5 has completely different stream handling from 2.4 and earlier.
# Before 2.5, stats objects have no stream attribute; in 2.5 and later
# a reference sys.stdout is stored before we can tweak it.
compat_25 = hasattr(prof_stats, 'stream')
if compat_25:
tmp = prof_stats.stream
prof_stats.stream = stream
else:
tmp = sys.stdout
sys.stdout = stream
try:
if self.restrict:
log.debug('setting profiler restriction to %s', self.restrict)
prof_stats.print_stats(*self.restrict)
else:
prof_stats.print_stats()
finally:
if compat_25:
prof_stats.stream = tmp
else:
sys.stdout = tmp
def finalize(self, result):
"""Clean up stats file, if configured to do so.
"""
if not self.available():
return
try:
self.prof.close()
except AttributeError:
# TODO: is this trying to catch just the case where not
# hasattr(self.prof, "close")? If so, the function call should be
# moved out of the try: suite.
pass
if self.clean_stats_file:
if self.fileno:
try:
os.close(self.fileno)
except OSError:
pass
try:
os.unlink(self.pfile)
except OSError:
pass
return None
def _create_pfile(self):
if not self.pfile:
self.fileno, self.pfile = tempfile.mkstemp()
self.clean_stats_file = True
| bsd-3-clause |
BackupTheBerlios/espressopp | src/FixedQuadrupleList.py | 1 | 2673 | # Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
*******************************
**espresso.FixedQuadrupleList**
*******************************
"""
from espresso import pmi
import _espresso
import espresso
from espresso.esutil import cxxinit
class FixedQuadrupleListLocal(_espresso.FixedQuadrupleList):
'The (local) fixed quadruple list.'
def __init__(self, storage):
'Local construction of a fixed quadruple list'
if pmi.workerIsActive():
cxxinit(self, _espresso.FixedQuadrupleList, storage)
def add(self, pid1, pid2, pid3, pid4):
'add quadruple to fixed quadruple list'
if pmi.workerIsActive():
return self.cxxclass.add(self, pid1, pid2, pid3, pid4)
def size(self):
'count number of Quadruples in GlobalQuadrupleList, involves global reduction'
if pmi.workerIsActive():
return self.cxxclass.size(self)
def addQuadruples(self, quadruplelist):
"""
Each processor takes the broadcasted quadruplelist and
adds those quadruples whose first particle is owned by
this processor.
"""
if pmi.workerIsActive():
for quadruple in quadruplelist:
pid1, pid2, pid3, pid4 = quadruple
self.cxxclass.add(self, pid1, pid2, pid3, pid4)
def getQuadruples(self):
'return the quadruples of the GlobalQuadrupleList'
if pmi.workerIsActive():
quadruple = self.cxxclass.getQuadruples(self)
return quadruple
if pmi.isController:
class FixedQuadrupleList(object):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espresso.FixedQuadrupleListLocal',
localcall = [ "add" ],
pmicall = [ "addQuadruples" ],
pmiinvoke = ["getQuadruples", "size"]
)
| gpl-3.0 |
bigswitch/nova | nova/tests/unit/test_availability_zones.py | 15 | 11973 | # Copyright 2013 Netease Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests for availability zones
"""
import mock
import six
from nova import availability_zones as az
import nova.conf
from nova import context
from nova import db
from nova import objects
from nova import test
CONF = nova.conf.CONF
class AvailabilityZoneTestCases(test.TestCase):
"""Test case for aggregate based availability zone."""
def setUp(self):
super(AvailabilityZoneTestCases, self).setUp()
self.host = 'me'
self.availability_zone = 'nova-test'
self.default_az = CONF.default_availability_zone
self.default_in_az = CONF.internal_service_availability_zone
self.context = context.get_admin_context()
self.agg = self._create_az('az_agg', self.availability_zone)
def tearDown(self):
db.aggregate_delete(self.context, self.agg['id'])
super(AvailabilityZoneTestCases, self).tearDown()
def _create_az(self, agg_name, az_name):
agg_meta = {'name': agg_name}
agg = db.aggregate_create(self.context, agg_meta)
metadata = {'availability_zone': az_name}
db.aggregate_metadata_add(self.context, agg['id'], metadata)
return agg
def _update_az(self, aggregate, az_name):
metadata = {'availability_zone': az_name}
db.aggregate_update(self.context, aggregate['id'], metadata)
def _create_service_with_topic(self, topic, host, disabled=False):
values = {
'binary': 'bin',
'host': host,
'topic': topic,
'disabled': disabled,
}
return db.service_create(self.context, values)
def _destroy_service(self, service):
return db.service_destroy(self.context, service['id'])
def _add_to_aggregate(self, service, aggregate):
return db.aggregate_host_add(self.context,
aggregate['id'], service['host'])
def _delete_from_aggregate(self, service, aggregate):
return db.aggregate_host_delete(self.context,
aggregate['id'], service['host'])
def test_rest_availability_zone_reset_cache(self):
az._get_cache().add('cache', 'fake_value')
az.reset_cache()
self.assertIsNone(az._get_cache().get('cache'))
def test_update_host_availability_zone_cache(self):
"""Test availability zone cache could be update."""
service = self._create_service_with_topic('compute', self.host)
# Create a new aggregate with an AZ and add the host to the AZ
az_name = 'az1'
cache_key = az._make_cache_key(self.host)
agg_az1 = self._create_az('agg-az1', az_name)
self._add_to_aggregate(service, agg_az1)
az.update_host_availability_zone_cache(self.context, self.host)
self.assertEqual('az1', az._get_cache().get(cache_key))
az.update_host_availability_zone_cache(self.context, self.host, 'az2')
self.assertEqual('az2', az._get_cache().get(cache_key))
def test_set_availability_zone_compute_service(self):
"""Test for compute service get right availability zone."""
service = self._create_service_with_topic('compute', self.host)
services = db.service_get_all(self.context)
# The service is not add into aggregate, so confirm it is default
# availability zone.
new_service = az.set_availability_zones(self.context, services)[0]
self.assertEqual(self.default_az, new_service['availability_zone'])
# The service is added into aggregate, confirm return the aggregate
# availability zone.
self._add_to_aggregate(service, self.agg)
new_service = az.set_availability_zones(self.context, services)[0]
self.assertEqual(self.availability_zone,
new_service['availability_zone'])
self._destroy_service(service)
def test_set_availability_zone_unicode_key(self):
"""Test set availability zone cache key is unicode."""
service = self._create_service_with_topic('network', self.host)
services = db.service_get_all(self.context)
az.set_availability_zones(self.context, services)
self.assertIsInstance(services[0]['host'], six.text_type)
cached_key = az._make_cache_key(services[0]['host'])
self.assertIsInstance(cached_key, str)
self._destroy_service(service)
def test_set_availability_zone_not_compute_service(self):
"""Test not compute service get right availability zone."""
service = self._create_service_with_topic('network', self.host)
services = db.service_get_all(self.context)
new_service = az.set_availability_zones(self.context, services)[0]
self.assertEqual(self.default_in_az, new_service['availability_zone'])
self._destroy_service(service)
def test_get_host_availability_zone(self):
"""Test get right availability zone by given host."""
self.assertEqual(self.default_az,
az.get_host_availability_zone(self.context, self.host))
service = self._create_service_with_topic('compute', self.host)
self._add_to_aggregate(service, self.agg)
self.assertEqual(self.availability_zone,
az.get_host_availability_zone(self.context, self.host))
def test_update_host_availability_zone(self):
"""Test availability zone could be update by given host."""
service = self._create_service_with_topic('compute', self.host)
# Create a new aggregate with an AZ and add the host to the AZ
az_name = 'az1'
agg_az1 = self._create_az('agg-az1', az_name)
self._add_to_aggregate(service, agg_az1)
self.assertEqual(az_name,
az.get_host_availability_zone(self.context, self.host))
# Update AZ
new_az_name = 'az2'
self._update_az(agg_az1, new_az_name)
self.assertEqual(new_az_name,
az.get_host_availability_zone(self.context, self.host))
def test_delete_host_availability_zone(self):
"""Test availability zone could be deleted successfully."""
service = self._create_service_with_topic('compute', self.host)
# Create a new aggregate with an AZ and add the host to the AZ
az_name = 'az1'
agg_az1 = self._create_az('agg-az1', az_name)
self._add_to_aggregate(service, agg_az1)
self.assertEqual(az_name,
az.get_host_availability_zone(self.context, self.host))
# Delete the AZ via deleting the aggregate
self._delete_from_aggregate(service, agg_az1)
self.assertEqual(self.default_az,
az.get_host_availability_zone(self.context, self.host))
def test_get_availability_zones(self):
"""Test get_availability_zones."""
# When the param get_only_available of get_availability_zones is set
# to default False, it returns two lists, zones with at least one
# enabled services, and zones with no enabled services,
# when get_only_available is set to True, only return a list of zones
# with at least one enabled services.
# Use the following test data:
#
# zone host enabled
# nova-test host1 Yes
# nova-test host2 No
# nova-test2 host3 Yes
# nova-test3 host4 No
# <default> host5 No
agg2 = self._create_az('agg-az2', 'nova-test2')
agg3 = self._create_az('agg-az3', 'nova-test3')
service1 = self._create_service_with_topic('compute', 'host1',
disabled=False)
service2 = self._create_service_with_topic('compute', 'host2',
disabled=True)
service3 = self._create_service_with_topic('compute', 'host3',
disabled=False)
service4 = self._create_service_with_topic('compute', 'host4',
disabled=True)
self._create_service_with_topic('compute', 'host5',
disabled=True)
self._add_to_aggregate(service1, self.agg)
self._add_to_aggregate(service2, self.agg)
self._add_to_aggregate(service3, agg2)
self._add_to_aggregate(service4, agg3)
zones, not_zones = az.get_availability_zones(self.context)
self.assertEqual(['nova-test', 'nova-test2'], zones)
self.assertEqual(['nova-test3', 'nova'], not_zones)
zones = az.get_availability_zones(self.context, True)
self.assertEqual(['nova-test', 'nova-test2'], zones)
zones, not_zones = az.get_availability_zones(self.context,
with_hosts=True)
self.assertJsonEqual(zones,
[(u'nova-test2', set([u'host3'])),
(u'nova-test', set([u'host1']))])
self.assertJsonEqual(not_zones,
[(u'nova-test3', set([u'host4'])),
(u'nova', set([u'host5']))])
def test_get_instance_availability_zone_default_value(self):
"""Test get right availability zone by given an instance."""
fake_inst = objects.Instance(host=self.host,
availability_zone=None)
self.assertEqual(self.default_az,
az.get_instance_availability_zone(self.context, fake_inst))
def test_get_instance_availability_zone_from_aggregate(self):
"""Test get availability zone from aggregate by given an instance."""
host = 'host170'
service = self._create_service_with_topic('compute', host)
self._add_to_aggregate(service, self.agg)
fake_inst = objects.Instance(host=host,
availability_zone=self.availability_zone)
self.assertEqual(self.availability_zone,
az.get_instance_availability_zone(self.context, fake_inst))
@mock.patch.object(az._get_cache(), 'get')
def test_get_instance_availability_zone_cache_differs(self, cache_get):
host = 'host170'
service = self._create_service_with_topic('compute', host)
self._add_to_aggregate(service, self.agg)
cache_get.return_value = self.default_az
fake_inst = objects.Instance(host=host,
availability_zone=self.availability_zone)
self.assertEqual(
self.availability_zone,
az.get_instance_availability_zone(self.context, fake_inst))
def test_get_instance_availability_zone_no_host(self):
"""Test get availability zone from instance if host not set."""
fake_inst = objects.Instance(host=None, availability_zone='inst-az')
result = az.get_instance_availability_zone(self.context, fake_inst)
self.assertEqual('inst-az', result)
def test_get_instance_availability_zone_no_host_no_az(self):
"""Test get availability zone if neither host nor az is set."""
fake_inst = objects.Instance(host=None, availability_zone=None)
result = az.get_instance_availability_zone(self.context, fake_inst)
self.assertIsNone(result)
| apache-2.0 |
uclouvain/osis | base/models/learning_unit_enrollment.py | 1 | 3112 | ##############################################################################
#
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2019 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
#
##############################################################################
from django.db import models
from base.models.enums import learning_unit_enrollment_state
from osis_common.models.serializable_model import SerializableModelAdmin, SerializableModel
class LearningUnitEnrollmentAdmin(SerializableModelAdmin):
list_display = ('student', 'learning_unit_year', 'offer', 'date_enrollment', 'enrollment_state', 'changed')
list_filter = ('learning_unit_year__academic_year', 'enrollment_state',)
search_fields = ['learning_unit_year__acronym',
'offer_enrollment__education_group_year__acronym',
'offer_enrollment__student__registration_id',
'offer_enrollment__student__person__first_name',
'offer_enrollment__student__person__last_name']
class LearningUnitEnrollment(SerializableModel):
external_id = models.CharField(max_length=100, blank=True, null=True, db_index=True)
changed = models.DateTimeField(null=True, auto_now=True)
date_enrollment = models.DateField()
learning_unit_year = models.ForeignKey('LearningUnitYear', on_delete=models.CASCADE)
offer_enrollment = models.ForeignKey('OfferEnrollment', on_delete=models.PROTECT)
enrollment_state = models.CharField(max_length=20, choices=learning_unit_enrollment_state.STATES, default="")
class Meta:
unique_together = ('offer_enrollment', 'learning_unit_year', 'enrollment_state',)
@property
def student(self):
return self.offer_enrollment.student
@property
def offer(self):
return self.offer_enrollment.education_group_year
def __str__(self):
return u"%s - %s" % (self.learning_unit_year, self.offer_enrollment.student)
def find_by_learning_unit_year(a_learning_unit_year):
return LearningUnitEnrollment.objects.filter(learning_unit_year=a_learning_unit_year)
| agpl-3.0 |
remitamine/youtube-dl | youtube_dl/extractor/douyutv.py | 51 | 6878 | # coding: utf-8
from __future__ import unicode_literals
import time
import hashlib
import re
from .common import InfoExtractor
from ..utils import (
ExtractorError,
unescapeHTML,
unified_strdate,
urljoin,
)
class DouyuTVIE(InfoExtractor):
IE_DESC = '斗鱼'
_VALID_URL = r'https?://(?:www\.)?douyu(?:tv)?\.com/(?:[^/]+/)*(?P<id>[A-Za-z0-9]+)'
_TESTS = [{
'url': 'http://www.douyutv.com/iseven',
'info_dict': {
'id': '17732',
'display_id': 'iseven',
'ext': 'flv',
'title': 're:^清晨醒脑!根本停不下来! [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
'description': r're:.*m7show@163\.com.*',
'thumbnail': r're:^https?://.*\.jpg$',
'uploader': '7师傅',
'is_live': True,
},
'params': {
'skip_download': True,
},
}, {
'url': 'http://www.douyutv.com/85982',
'info_dict': {
'id': '85982',
'display_id': '85982',
'ext': 'flv',
'title': 're:^小漠从零单排记!——CSOL2躲猫猫 [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
'description': 'md5:746a2f7a253966a06755a912f0acc0d2',
'thumbnail': r're:^https?://.*\.jpg$',
'uploader': 'douyu小漠',
'is_live': True,
},
'params': {
'skip_download': True,
},
'skip': 'Room not found',
}, {
'url': 'http://www.douyutv.com/17732',
'info_dict': {
'id': '17732',
'display_id': '17732',
'ext': 'flv',
'title': 're:^清晨醒脑!根本停不下来! [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
'description': r're:.*m7show@163\.com.*',
'thumbnail': r're:^https?://.*\.jpg$',
'uploader': '7师傅',
'is_live': True,
},
'params': {
'skip_download': True,
},
}, {
'url': 'http://www.douyu.com/xiaocang',
'only_matching': True,
}, {
# \"room_id\"
'url': 'http://www.douyu.com/t/lpl',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
if video_id.isdigit():
room_id = video_id
else:
page = self._download_webpage(url, video_id)
room_id = self._html_search_regex(
r'"room_id\\?"\s*:\s*(\d+),', page, 'room id')
# Grab metadata from mobile API
room = self._download_json(
'http://m.douyu.com/html5/live?roomId=%s' % room_id, video_id,
note='Downloading room info')['data']
# 1 = live, 2 = offline
if room.get('show_status') == '2':
raise ExtractorError('Live stream is offline', expected=True)
# Grab the URL from PC client API
# The m3u8 url from mobile API requires re-authentication every 5 minutes
tt = int(time.time())
signContent = 'lapi/live/thirdPart/getPlay/%s?aid=pcclient&rate=0&time=%d9TUk5fjjUjg9qIMH3sdnh' % (room_id, tt)
sign = hashlib.md5(signContent.encode('ascii')).hexdigest()
video_url = self._download_json(
'http://coapi.douyucdn.cn/lapi/live/thirdPart/getPlay/' + room_id,
video_id, note='Downloading video URL info',
query={'rate': 0}, headers={
'auth': sign,
'time': str(tt),
'aid': 'pcclient'
})['data']['live_url']
title = self._live_title(unescapeHTML(room['room_name']))
description = room.get('show_details')
thumbnail = room.get('room_src')
uploader = room.get('nickname')
return {
'id': room_id,
'display_id': video_id,
'url': video_url,
'title': title,
'description': description,
'thumbnail': thumbnail,
'uploader': uploader,
'is_live': True,
}
class DouyuShowIE(InfoExtractor):
_VALID_URL = r'https?://v(?:mobile)?\.douyu\.com/show/(?P<id>[0-9a-zA-Z]+)'
_TESTS = [{
'url': 'https://v.douyu.com/show/rjNBdvnVXNzvE2yw',
'md5': '0c2cfd068ee2afe657801269b2d86214',
'info_dict': {
'id': 'rjNBdvnVXNzvE2yw',
'ext': 'mp4',
'title': '陈一发儿:砒霜 我有个室友系列!04-01 22点场',
'duration': 7150.08,
'thumbnail': r're:^https?://.*\.jpg$',
'uploader': '陈一发儿',
'uploader_id': 'XrZwYelr5wbK',
'uploader_url': 'https://v.douyu.com/author/XrZwYelr5wbK',
'upload_date': '20170402',
},
}, {
'url': 'https://vmobile.douyu.com/show/rjNBdvnVXNzvE2yw',
'only_matching': True,
}]
def _real_extract(self, url):
url = url.replace('vmobile.', 'v.')
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
room_info = self._parse_json(self._search_regex(
r'var\s+\$ROOM\s*=\s*({.+});', webpage, 'room info'), video_id)
video_info = None
for trial in range(5):
# Sometimes Douyu rejects our request. Let's try it more times
try:
video_info = self._download_json(
'https://vmobile.douyu.com/video/getInfo', video_id,
query={'vid': video_id},
headers={
'Referer': url,
'x-requested-with': 'XMLHttpRequest',
})
break
except ExtractorError:
self._sleep(1, video_id)
if not video_info:
raise ExtractorError('Can\'t fetch video info')
formats = self._extract_m3u8_formats(
video_info['data']['video_url'], video_id,
entry_protocol='m3u8_native', ext='mp4')
upload_date = unified_strdate(self._html_search_regex(
r'<em>上传时间:</em><span>([^<]+)</span>', webpage,
'upload date', fatal=False))
uploader = uploader_id = uploader_url = None
mobj = re.search(
r'(?m)<a[^>]+href="/author/([0-9a-zA-Z]+)".+?<strong[^>]+title="([^"]+)"',
webpage)
if mobj:
uploader_id, uploader = mobj.groups()
uploader_url = urljoin(url, '/author/' + uploader_id)
return {
'id': video_id,
'title': room_info['name'],
'formats': formats,
'duration': room_info.get('duration'),
'thumbnail': room_info.get('pic'),
'upload_date': upload_date,
'uploader': uploader,
'uploader_id': uploader_id,
'uploader_url': uploader_url,
}
| unlicense |
xxxIsaacPeralxxx/anim-studio-tools | grind/python/util/glWidget.py | 5 | 11356 |
import sys
import math
from pimath import *
from PyQt4 import QtCore, QtGui, QtOpenGL
from camera import Camera
import grind
#-----------------------------------------------------------------------------
from rodin import logging
log = logging.get_logger('grind.mangle.gl_widget')
try:
from OpenGL.GL import *
from OpenGL.GLU import *
except ImportError:
app = QtGui.QApplication(sys.argv)
QtGui.QMessageBox.critical(None, "mangle", "PyOpenGL must be installed to run this example.")
sys.exit(1)
class GLWidget(QtOpenGL.QGLWidget):
xRotationChanged = QtCore.pyqtSignal(int)
yRotationChanged = QtCore.pyqtSignal(int)
zRotationChanged = QtCore.pyqtSignal(int)
def __init__(self, parent=None):
super(GLWidget, self).__init__(parent)
self.renderable = None
self.object = 0
self.xRot = 0
self.yRot = 0
self.zRot = 0
self.width = 0
self.height = 0
self.display_selection_marker = False
self.selection_marker_bbox = grind.BBox()
self.selection_marker_bbox.set_colour(0xFF0000) # R G B
self.lastPos = QtCore.QPoint()
self.backgroundColour = QtGui.QColor.fromCmykF(0.28, 0.28, 0.28, 0.0)
self.foregroundColour = QtGui.QColor.fromCmykF(0.7, 0.7, 0.7, 0.0)
self.dist = 1.0
self.up = 1.0
self.drawGrid = True
self.drawDefaultObject = True
self.followBBox = False
self.moveGrid = False
self.camera = Camera()
self.frameView()
def setFollowBBox(self,follow):
self.followBBox = follow
self.updateGL()
def setCenterBBox(self,centered):
self.moveGrid = not centered
self.updateGL()
def setRenderable(self,renderable,callframeview=True):
self.renderable = renderable
if callframeview == True:
self.frameView()
self.resizeGL(self.width,self.height)
self.updateGL()
def minimumSizeHint(self):
return QtCore.QSize(50, 50)
def sizeHint(self):
return QtCore.QSize(640, 480)
def frameView(self,update=False):
if self.renderable is None:
self.camera.frame(V3f(0,0,0),1)
if update:
self.updateGL()
return
bb = self.renderable.getBounds()
height = bb.size().y
c = bb.center()
center = V3f(c.x,c.y,c.z)
self.camera.frame(center,height)
self.up = height*1.2
self.dist = self.camera.distanceNeeded(height)
if update:
self.updateGL()
def setXRotation(self, angle):
angle = self.normalizeAngle(angle)
if angle != self.xRot:
self.xRot = angle
self.xRotationChanged.emit(angle)
self.updateGL()
def setYRotation(self, angle):
angle = self.normalizeAngle(angle)
if angle != self.yRot:
self.yRot = angle
self.yRotationChanged.emit(angle)
self.updateGL()
def setZRotation(self, angle):
angle = self.normalizeAngle(angle)
if angle != self.zRot:
self.zRot = angle
self.zRotationChanged.emit(angle)
self.updateGL()
def initializeGL(self):
self.qglClearColor(self.foregroundColour.dark())
self.object = self.makeObject()
self.grid = self.makeGrid()
glShadeModel(GL_FLAT)
glEnable(GL_DEPTH_TEST)
def paintGL(self):
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glLoadIdentity()
glTranslatef( 0, -self.up, -self.dist )
if self.followBBox:
self.frameView(False)
self.camera.update(1/60.0)
if self.drawGrid:
move = self.moveGrid and self.renderable is not None
if move:
glPushMatrix()
center = self.renderable.getBounds().center()
glTranslatef(round(center.x/5)*5,round(center.y/5)*5,round(center.z/5)*5)
glCallList(self.grid)
if move:
glPopMatrix()
if self.renderable is None:
if self.drawDefaultObject:
glCallList(self.object)
else:
self.renderable.update()
self.renderable.render()
if self.display_selection_marker == True:
x = self.lastPos.x()
y = self.height - self.lastPos.y()
z = (GLfloat * 1)(0)
glReadPixels(x, y, 1, 1, GL_DEPTH_COMPONENT, GL_FLOAT, z)
if z[0] < 1: # ignore void click
proj = (ctypes.c_double*16)()
proj = glGetDoublev(GL_PROJECTION_MATRIX)
model = (ctypes.c_double*16)()
model = glGetDoublev(GL_MODELVIEW_MATRIX)
(wx,wy,wz) = gluUnProject( x,y,z[0], model, proj, (0, 0, self.width, self.height) ) # model proj view
scale = (self.camera.pos - V3f(wx,wy,wz)).length() * 0.0025
self.selection_marker_bbox.min = V3f(wx - scale, wy - scale, wz - scale)
self.selection_marker_bbox.max = V3f(wx + scale, wy + scale, wz + scale)
glDisable(GL_DEPTH_TEST)
glPolygonMode(GL_FRONT_AND_BACK, GL_LINE)
self.selection_marker_bbox.render(1)
glPolygonMode(GL_FRONT_AND_BACK, GL_FILL)
glEnable(GL_DEPTH_TEST)
def resizeGL(self, width, height):
self.width = width
self.height = height
side = min(width, height)
if side < 0:
return
self.camera.aspect = float(self.width)/float(self.height)
glViewport(0, 0, width, height)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(35, float(self.width)/float(self.height), 0.01, 100)
glMatrixMode(GL_MODELVIEW)
def target_selection(self):
if self.display_selection_marker == True:
self.camera.lookat = V3f(self.selection_marker_bbox.center().x, self.selection_marker_bbox.center().y, self.selection_marker_bbox.center().z)
newdir = (self.camera.pos - self.camera.lookat).normalized()
self.camera.pos = self.camera.lookat + newdir * self.camera.dist
self.display_selection_marker = False
def mousePressEvent(self, event):
self.lastPos = event.pos()
self.camera.mouseButton(event.button(), True, self.lastPos.x(), self.lastPos.y())
self.updateGL()
def mouseReleaseEvent(self, event):
self.camera.mouseButton(event.button(), False, self.lastPos.x(), self.lastPos.y())
self.updateGL()
def mouseMoveEvent(self, event):
self.camera.mouseMotion(event.x(), event.y())
self.updateGL()
self.lastPos = event.pos()
def wheelEvent(self,event):
self.updateGL()
def makeObject(self):
genList = glGenLists(1)
glNewList(genList, GL_COMPILE)
NumSectors = 13
Length = 10.0
LengthSec = 25
Outer = 0.5
Inner = 0.4
ZInner = -Length/2.0
ZOuter = ZInner+0.04
ZInner = 0.01
ZOuter = -0.01
for j in range(LengthSec+1):
glBegin(GL_QUADS)
for i in range(NumSectors):
angle1 = (i * 2 * math.pi) / NumSectors
x5 = Outer * math.sin(angle1)
y5 = Outer * math.cos(angle1)
x6 = Inner * math.sin(angle1)
y6 = Inner * math.cos(angle1)
angle2 = ((i + 1) * 2 * math.pi) / NumSectors
x7 = Inner * math.sin(angle2)
y7 = Inner * math.cos(angle2)
x8 = Outer * math.sin(angle2)
y8 = Outer * math.cos(angle2)
#self.quad(x5, y5, x6, y6, x7, y7, x8, y8, ZOuter, ZInner)
self.extrude(x6, y6, x7, y7, ZOuter, ZInner)
#self.extrude(x8, y8, x5, y5, ZOuter, ZInner)
glEnd()
#glTranslate(0,0,Length/LengthSec)
glRotate(6.8,0,1.91231233,0)
glEndList()
return genList
def quad(self, x1, y1, x2, y2, x3, y3, x4, y4, z1, z2):
self.qglColor(self.backgroundColour)
glVertex3d(x1, y1, z2)
glVertex3d(x2, y2, z2)
glVertex3d(x3, y3, z2)
glVertex3d(x4, y4, z2)
glVertex3d(x4, y4, z1)
glVertex3d(x3, y3, z1)
glVertex3d(x2, y2, z1)
glVertex3d(x1, y1, z1)
def extrude(self, x1, y1, x2, y2, z1, z2):
self.qglColor(self.backgroundColour.dark(250 + int(100 * x1)))
glVertex3d(x1, y1, z1)
glVertex3d(x2, y2, z1)
glVertex3d(x2, y2, z2)
glVertex3d(x1, y1, z2)
def normalizeAngle(self, angle):
while angle < 0:
angle += 360 * 16
while angle > 360 * 16:
angle -= 360 * 16
return angle
def makeGrid(self):
genList = glGenLists(1)
glNewList(genList, GL_COMPILE)
glBegin(GL_LINES)
self.qglColor(self.backgroundColour.dark(150))
self.qglColor(QtGui.QColor(70,70,80))
size = 10.0
count = 10.0
xs = []
ys = []
for x in range(int(count)):
xpos = (x/count-0.5)*size
xs.append(xpos)
for y in range(int(count)):
ypos = (y/count-0.5)*size
ys.append(ypos)
a = ( xpos,0, ypos)
b = ( xpos,0,-ypos)
c = (-xpos,0,-ypos)
d = (-xpos,0, ypos)
glVertex3d(*a)
glVertex3d(*b)
glVertex3d(*d)
glVertex3d(*c)
glVertex3d(*a)
glVertex3d(*d)
glVertex3d(*b)
glVertex3d(*c)
self.qglColor(QtGui.QColor(54,54,54))
size = 10.0
count = 100.0
for x in range(int(count)):
xpos = (x/count-0.5)*size
if xpos in xs: continue
for y in range(int(count)):
ypos = (y/count-0.5)*size
if ypos in ys: continue
a = ( xpos,0, ypos)
b = ( xpos,0,-ypos)
c = (-xpos,0,-ypos)
d = (-xpos,0, ypos)
glVertex3d(*a)
glVertex3d(*b)
glVertex3d(*d)
glVertex3d(*c)
glVertex3d(*a)
glVertex3d(*d)
glVertex3d(*b)
glVertex3d(*c)
glEnd()
glEndList()
return genList
# Copyright 2008-2012 Dr D Studios Pty Limited (ACN 127 184 954) (Dr. D Studios)
#
# This file is part of anim-studio-tools.
#
# anim-studio-tools is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# anim-studio-tools is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with anim-studio-tools. If not, see <http://www.gnu.org/licenses/>.
| gpl-3.0 |
h2oai/h2o-3 | h2o-py/tests/testdir_algos/naivebayes/pyunit_grid_carsNB.py | 3 | 4260 | from __future__ import print_function
import sys
sys.path.insert(1,"../../../")
import h2o
from tests import pyunit_utils
import random
import copy
from h2o.estimators.naive_bayes import H2ONaiveBayesEstimator
from h2o.grid.grid_search import H2OGridSearch
def grid_cars_NB():
cars = h2o.import_file(path=pyunit_utils.locate("smalldata/junit/cars_20mpg.csv"))
r = cars[0].runif(seed=42)
train = cars[r > .2]
validation_scheme = random.randint(1,3) # 1:none, 2:cross-validation, 3:validation set
print("Validation scheme: {0}".format(validation_scheme))
if validation_scheme == 2:
nfolds = 2
print("Nfolds: 2")
if validation_scheme == 3:
valid = cars[r <= .2]
grid_space = pyunit_utils.make_random_grid_space(algo="naiveBayes")
print("Grid space: {0}".format(grid_space))
problem = random.sample(["binomial","multinomial"],1)
predictors = ["displacement","power","weight","acceleration","year"]
if problem == "binomial":
response_col = "economy_20mpg"
else:
response_col = "cylinders"
print("Predictors: {0}".format(predictors))
print("Response: {0}".format(response_col))
print("Converting the response column to a factor...")
train[response_col] = train[response_col].asfactor()
if validation_scheme == 3:
valid[response_col] = valid[response_col].asfactor()
print("Grid space: {0}".format(grid_space))
print("Constructing the grid of nb models...")
cars_nb_grid = H2OGridSearch(H2ONaiveBayesEstimator, hyper_params=grid_space)
if validation_scheme == 1:
cars_nb_grid.train(x=predictors,y=response_col,training_frame=train)
elif validation_scheme == 2:
cars_nb_grid.train(x=predictors,y=response_col,training_frame=train,nfolds=nfolds)
else:
cars_nb_grid.train(x=predictors,y=response_col,training_frame=train,validation_frame=valid)
for model in cars_nb_grid:
assert isinstance(model, H2ONaiveBayesEstimator)
print("Performing various checks of the constructed grid...")
print("Check cardinality of grid, that is, the correct number of models have been created...")
size_of_grid_space = 1
print(grid_space)
for v in list(grid_space.values()):
v2 = [v] if type(v) != list else v
size_of_grid_space = size_of_grid_space * len(v2)
actual_size = len(cars_nb_grid)
assert size_of_grid_space == actual_size, "Expected size of grid to be {0}, but got {1}" \
"".format(size_of_grid_space,actual_size)
print("Check correct type value....")
model_type = cars_nb_grid[0].type
true_model_type = "classifier"
assert model_type == true_model_type, "Type of model ({0}) is incorrect, expected value is {1}.".format(model_type, true_model_type)
print("Duplicate-entries-in-grid-space check")
new_grid_space = copy.deepcopy(grid_space)
for name in list(grid_space.keys()):
new_grid_space[name] = grid_space[name] + grid_space[name]
print("The new search space: {0}".format(new_grid_space))
print("Constructing the new grid of nb models...")
cars_nb_grid2 = H2OGridSearch(H2ONaiveBayesEstimator, hyper_params=new_grid_space)
if validation_scheme == 1:
cars_nb_grid2.train(x=predictors,y=response_col,training_frame=train)
elif validation_scheme == 2:
cars_nb_grid2.train(x=predictors,y=response_col,training_frame=train,nfolds=nfolds)
else:
cars_nb_grid2.train(x=predictors,y=response_col,training_frame=train,validation_frame=valid)
actual_size2 = len(cars_nb_grid2)
assert actual_size == actual_size2, "Expected duplicates to be ignored. Without dups grid size: {0}. With dups " \
"size: {1}".format(actual_size, actual_size2)
for model in cars_nb_grid2:
assert isinstance(model, H2ONaiveBayesEstimator)
print("Check that the hyper_params that were passed to grid, were used to construct the models...")
for name in list(grid_space.keys()):
print(name)
pyunit_utils.expect_model_param(cars_nb_grid, name, grid_space[name])
if __name__ == "__main__":
pyunit_utils.standalone_test(grid_cars_NB)
else:
grid_cars_NB()
| apache-2.0 |
sk413025/thug | src/DOM/W3C/HTML/HTMLElement.py | 7 | 2487 | #!/usr/bin/env python
import sys, os
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir)))
try:
from io import StringIO
except ImportError:
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
import bs4 as BeautifulSoup
import logging
from Element import Element
from Style.CSS.ElementCSSInlineStyle import ElementCSSInlineStyle
from .attr_property import attr_property
from .text_property import text_property
log = logging.getLogger("Thug")
class HTMLElement(Element, ElementCSSInlineStyle):
id = attr_property("id")
title = attr_property("title")
lang = attr_property("lang")
dir = attr_property("dir")
className = attr_property("class", default = "")
def getInnerHTML(self):
if not self.hasChildNodes():
return ""
html = StringIO()
for tag in self.tag.contents:
html.write(unicode(tag))
return html.getvalue()
def setInnerHTML(self, html):
self.tag.clear()
soup = BeautifulSoup.BeautifulSoup(html, "html5lib")
for node in list(soup.head.descendants):
self.tag.append(node)
name = getattr(node, 'name', None)
if name is None:
continue
handler = getattr(log.DFT, 'handle_%s' % (name, ), None)
if handler:
handler(node)
for node in list(soup.body.children):
self.tag.append(node)
name = getattr(node, 'name', None)
if name is None:
continue
handler = getattr(log.DFT, 'handle_%s' % (name, ), None)
if handler:
handler(node)
#soup.head.unwrap()
#soup.body.unwrap()
#soup.html.wrap(self.tag)
#self.tag.html.unwrap()
for node in self.tag.descendants:
name = getattr(node, 'name', None)
if not name:
continue
try:
p = getattr(self.doc.window.doc.DFT, 'handle_%s' % (name, ), None)
except:
p = getattr(log.DFT, 'handle_%s' % (name, ), None)
if p:
p(node)
innerHTML = property(getInnerHTML, setInnerHTML)
# WARNING: NOT DEFINED IN W3C SPECS!
def focus(self):
pass
@property
def sourceIndex(self):
return None
| gpl-2.0 |
Khroki/MCEdit-Unified | pymclevel/test/templevel.py | 13 | 1394 | import atexit
import os
from os.path import join
import shutil
import tempfile
from pymclevel import mclevel
__author__ = 'Rio'
tempdir = os.path.join(tempfile.gettempdir(), "pymclevel_test")
if not os.path.exists(tempdir):
os.mkdir(tempdir)
def mktemp(suffix):
td = tempfile.mkdtemp(suffix, dir=tempdir)
os.rmdir(td)
return td
class TempLevel(object):
def __init__(self, filename, createFunc=None):
if not os.path.exists(filename):
filename = join("testfiles", filename)
tmpname = mktemp(os.path.basename(filename))
if os.path.exists(filename):
if os.path.isdir(filename):
shutil.copytree(filename, tmpname)
else:
shutil.copy(filename, tmpname)
elif createFunc:
createFunc(tmpname)
else:
raise IOError("File %s not found." % filename)
self.tmpname = tmpname
self.level = mclevel.fromFile(tmpname)
atexit.register(self.removeTemp)
def __del__(self):
if hasattr(self, 'level'):
self.level.close()
del self.level
self.removeTemp()
def removeTemp(self):
if hasattr(self, 'tmpname'):
filename = self.tmpname
if os.path.isdir(filename):
shutil.rmtree(filename)
else:
os.unlink(filename)
| isc |
Eaglemania/TOL | pyglet/gl/glxext_mesa.py | 46 | 2050 | # ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''This file is currently hand-coded; I don't have a MESA header file to build
off.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
import ctypes
from ctypes import *
from pyglet.gl.lib import link_GLX as _link_function
glXSwapIntervalMESA = _link_function('glXSwapIntervalMESA', c_int, [c_int], 'MESA_swap_control')
| gpl-2.0 |
DirectXMan12/nova-hacking | nova/tests/api/openstack/compute/plugins/v3/test_evacuate.py | 1 | 6490 | # Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from oslo.config import cfg
import webob
from nova.compute import api as compute_api
from nova.compute import vm_states
from nova import context
from nova.openstack.common import jsonutils
from nova import test
from nova.tests.api.openstack import fakes
CONF = cfg.CONF
CONF.import_opt('password_length', 'nova.utils')
def fake_compute_api(*args, **kwargs):
return True
def fake_compute_api_get(self, context, instance_id):
return {
'id': 1,
'uuid': instance_id,
'vm_state': vm_states.ACTIVE,
'task_state': None, 'host': 'host1'
}
class EvacuateTest(test.TestCase):
_methods = ('resize', 'evacuate')
def setUp(self):
super(EvacuateTest, self).setUp()
self.stubs.Set(compute_api.API, 'get', fake_compute_api_get)
self.UUID = uuid.uuid4()
for _method in self._methods:
self.stubs.Set(compute_api.API, _method, fake_compute_api)
def test_evacuate_instance_with_no_target(self):
ctxt = context.get_admin_context()
ctxt.user_id = 'fake'
ctxt.project_id = 'fake'
ctxt.is_admin = True
app = fakes.wsgi_app_v3(fake_auth_context=ctxt)
req = webob.Request.blank('/v3/servers/%s/action' % self.UUID)
req.method = 'POST'
req.body = jsonutils.dumps({
'evacuate': {
'onSharedStorage': 'False',
'adminPass': 'MyNewPass'
}
})
req.content_type = 'application/json'
res = req.get_response(app)
self.assertEqual(res.status_int, 400)
def test_evacuate_instance_with_target(self):
ctxt = context.get_admin_context()
ctxt.user_id = 'fake'
ctxt.project_id = 'fake'
ctxt.is_admin = True
app = fakes.wsgi_app_v3(fake_auth_context=ctxt)
uuid1 = self.UUID
req = webob.Request.blank('/v3/servers/%s/action' % uuid1)
req.method = 'POST'
req.body = jsonutils.dumps({
'evacuate': {
'host': 'my_host',
'onSharedStorage': 'false',
'adminPass': 'MyNewPass'
}
})
req.content_type = 'application/json'
def fake_update(inst, context, instance,
task_state, expected_task_state):
return None
self.stubs.Set(compute_api.API, 'update', fake_update)
resp = req.get_response(app)
self.assertEqual(resp.status_int, 200)
resp_json = jsonutils.loads(resp.body)
self.assertEqual("MyNewPass", resp_json['adminPass'])
def test_evacuate_shared_and_pass(self):
ctxt = context.get_admin_context()
ctxt.user_id = 'fake'
ctxt.project_id = 'fake'
ctxt.is_admin = True
app = fakes.wsgi_app_v3(fake_auth_context=ctxt)
uuid1 = self.UUID
req = webob.Request.blank('/v3/servers/%s/action' % uuid1)
req.method = 'POST'
req.body = jsonutils.dumps({
'evacuate': {
'host': 'my_host',
'onSharedStorage': 'True',
'adminPass': 'MyNewPass'
}
})
req.content_type = 'application/json'
def fake_update(inst, context, instance,
task_state, expected_task_state):
return None
self.stubs.Set(compute_api.API, 'update', fake_update)
res = req.get_response(app)
self.assertEqual(res.status_int, 400)
def test_evacuate_not_shared_pass_generated(self):
ctxt = context.get_admin_context()
ctxt.user_id = 'fake'
ctxt.project_id = 'fake'
ctxt.is_admin = True
app = fakes.wsgi_app_v3(fake_auth_context=ctxt)
uuid1 = self.UUID
req = webob.Request.blank('/v3/servers/%s/action' % uuid1)
req.method = 'POST'
req.body = jsonutils.dumps({
'evacuate': {
'host': 'my_host',
'onSharedStorage': 'False',
}
})
req.content_type = 'application/json'
def fake_update(inst, context, instance,
task_state, expected_task_state):
return None
self.stubs.Set(compute_api.API, 'update', fake_update)
resp = req.get_response(app)
self.assertEqual(resp.status_int, 200)
resp_json = jsonutils.loads(resp.body)
self.assertEqual(CONF.password_length, len(resp_json['adminPass']))
def test_evacuate_shared(self):
ctxt = context.get_admin_context()
ctxt.user_id = 'fake'
ctxt.project_id = 'fake'
ctxt.is_admin = True
app = fakes.wsgi_app_v3(fake_auth_context=ctxt)
uuid1 = self.UUID
req = webob.Request.blank('/v3/servers/%s/action' % uuid1)
req.method = 'POST'
req.body = jsonutils.dumps({
'evacuate': {
'host': 'my_host',
'onSharedStorage': 'True',
}
})
req.content_type = 'application/json'
def fake_update(inst, context, instance,
task_state, expected_task_state):
return None
self.stubs.Set(compute_api.API, 'update', fake_update)
res = req.get_response(app)
self.assertEqual(res.status_int, 200)
def test_not_admin(self):
ctxt = context.RequestContext('fake', 'fake', is_admin=False)
app = fakes.wsgi_app_v3(fake_auth_context=ctxt)
uuid1 = self.UUID
req = webob.Request.blank('/v3/servers/%s/action' % uuid1)
req.method = 'POST'
req.body = jsonutils.dumps({
'evacuate': {
'host': 'my_host',
'onSharedStorage': 'True',
}
})
req.content_type = 'application/json'
res = req.get_response(app)
self.assertEqual(res.status_int, 403)
| apache-2.0 |
SaschaMester/delicium | third_party/python_gflags/gflags.py | 448 | 104236 | #!/usr/bin/env python
#
# Copyright (c) 2002, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# ---
# Author: Chad Lester
# Design and style contributions by:
# Amit Patel, Bogdan Cocosel, Daniel Dulitz, Eric Tiedemann,
# Eric Veach, Laurence Gonsalves, Matthew Springer
# Code reorganized a bit by Craig Silverstein
"""This module is used to define and parse command line flags.
This module defines a *distributed* flag-definition policy: rather than
an application having to define all flags in or near main(), each python
module defines flags that are useful to it. When one python module
imports another, it gains access to the other's flags. (This is
implemented by having all modules share a common, global registry object
containing all the flag information.)
Flags are defined through the use of one of the DEFINE_xxx functions.
The specific function used determines how the flag is parsed, checked,
and optionally type-converted, when it's seen on the command line.
IMPLEMENTATION: DEFINE_* creates a 'Flag' object and registers it with a
'FlagValues' object (typically the global FlagValues FLAGS, defined
here). The 'FlagValues' object can scan the command line arguments and
pass flag arguments to the corresponding 'Flag' objects for
value-checking and type conversion. The converted flag values are
available as attributes of the 'FlagValues' object.
Code can access the flag through a FlagValues object, for instance
gflags.FLAGS.myflag. Typically, the __main__ module passes the command
line arguments to gflags.FLAGS for parsing.
At bottom, this module calls getopt(), so getopt functionality is
supported, including short- and long-style flags, and the use of -- to
terminate flags.
Methods defined by the flag module will throw 'FlagsError' exceptions.
The exception argument will be a human-readable string.
FLAG TYPES: This is a list of the DEFINE_*'s that you can do. All flags
take a name, default value, help-string, and optional 'short' name
(one-letter name). Some flags have other arguments, which are described
with the flag.
DEFINE_string: takes any input, and interprets it as a string.
DEFINE_bool or
DEFINE_boolean: typically does not take an argument: say --myflag to
set FLAGS.myflag to true, or --nomyflag to set
FLAGS.myflag to false. Alternately, you can say
--myflag=true or --myflag=t or --myflag=1 or
--myflag=false or --myflag=f or --myflag=0
DEFINE_float: takes an input and interprets it as a floating point
number. Takes optional args lower_bound and upper_bound;
if the number specified on the command line is out of
range, it will raise a FlagError.
DEFINE_integer: takes an input and interprets it as an integer. Takes
optional args lower_bound and upper_bound as for floats.
DEFINE_enum: takes a list of strings which represents legal values. If
the command-line value is not in this list, raise a flag
error. Otherwise, assign to FLAGS.flag as a string.
DEFINE_list: Takes a comma-separated list of strings on the commandline.
Stores them in a python list object.
DEFINE_spaceseplist: Takes a space-separated list of strings on the
commandline. Stores them in a python list object.
Example: --myspacesepflag "foo bar baz"
DEFINE_multistring: The same as DEFINE_string, except the flag can be
specified more than once on the commandline. The
result is a python list object (list of strings),
even if the flag is only on the command line once.
DEFINE_multi_int: The same as DEFINE_integer, except the flag can be
specified more than once on the commandline. The
result is a python list object (list of ints), even if
the flag is only on the command line once.
SPECIAL FLAGS: There are a few flags that have special meaning:
--help prints a list of all the flags in a human-readable fashion
--helpshort prints a list of all key flags (see below).
--helpxml prints a list of all flags, in XML format. DO NOT parse
the output of --help and --helpshort. Instead, parse
the output of --helpxml. For more info, see
"OUTPUT FOR --helpxml" below.
--flagfile=foo read flags from file foo.
--undefok=f1,f2 ignore unrecognized option errors for f1,f2.
For boolean flags, you should use --undefok=boolflag, and
--boolflag and --noboolflag will be accepted. Do not use
--undefok=noboolflag.
-- as in getopt(), terminates flag-processing
FLAGS VALIDATORS: If your program:
- requires flag X to be specified
- needs flag Y to match a regular expression
- or requires any more general constraint to be satisfied
then validators are for you!
Each validator represents a constraint over one flag, which is enforced
starting from the initial parsing of the flags and until the program
terminates.
Also, lower_bound and upper_bound for numerical flags are enforced using flag
validators.
Howto:
If you want to enforce a constraint over one flag, use
gflags.RegisterValidator(flag_name,
checker,
message='Flag validation failed',
flag_values=FLAGS)
After flag values are initially parsed, and after any change to the specified
flag, method checker(flag_value) will be executed. If constraint is not
satisfied, an IllegalFlagValue exception will be raised. See
RegisterValidator's docstring for a detailed explanation on how to construct
your own checker.
EXAMPLE USAGE:
FLAGS = gflags.FLAGS
gflags.DEFINE_integer('my_version', 0, 'Version number.')
gflags.DEFINE_string('filename', None, 'Input file name', short_name='f')
gflags.RegisterValidator('my_version',
lambda value: value % 2 == 0,
message='--my_version must be divisible by 2')
gflags.MarkFlagAsRequired('filename')
NOTE ON --flagfile:
Flags may be loaded from text files in addition to being specified on
the commandline.
Any flags you don't feel like typing, throw them in a file, one flag per
line, for instance:
--myflag=myvalue
--nomyboolean_flag
You then specify your file with the special flag '--flagfile=somefile'.
You CAN recursively nest flagfile= tokens OR use multiple files on the
command line. Lines beginning with a single hash '#' or a double slash
'//' are comments in your flagfile.
Any flagfile=<file> will be interpreted as having a relative path from
the current working directory rather than from the place the file was
included from:
myPythonScript.py --flagfile=config/somefile.cfg
If somefile.cfg includes further --flagfile= directives, these will be
referenced relative to the original CWD, not from the directory the
including flagfile was found in!
The caveat applies to people who are including a series of nested files
in a different dir than they are executing out of. Relative path names
are always from CWD, not from the directory of the parent include
flagfile. We do now support '~' expanded directory names.
Absolute path names ALWAYS work!
EXAMPLE USAGE:
FLAGS = gflags.FLAGS
# Flag names are globally defined! So in general, we need to be
# careful to pick names that are unlikely to be used by other libraries.
# If there is a conflict, we'll get an error at import time.
gflags.DEFINE_string('name', 'Mr. President', 'your name')
gflags.DEFINE_integer('age', None, 'your age in years', lower_bound=0)
gflags.DEFINE_boolean('debug', False, 'produces debugging output')
gflags.DEFINE_enum('gender', 'male', ['male', 'female'], 'your gender')
def main(argv):
try:
argv = FLAGS(argv) # parse flags
except gflags.FlagsError, e:
print '%s\\nUsage: %s ARGS\\n%s' % (e, sys.argv[0], FLAGS)
sys.exit(1)
if FLAGS.debug: print 'non-flag arguments:', argv
print 'Happy Birthday', FLAGS.name
if FLAGS.age is not None:
print 'You are a %d year old %s' % (FLAGS.age, FLAGS.gender)
if __name__ == '__main__':
main(sys.argv)
KEY FLAGS:
As we already explained, each module gains access to all flags defined
by all the other modules it transitively imports. In the case of
non-trivial scripts, this means a lot of flags ... For documentation
purposes, it is good to identify the flags that are key (i.e., really
important) to a module. Clearly, the concept of "key flag" is a
subjective one. When trying to determine whether a flag is key to a
module or not, assume that you are trying to explain your module to a
potential user: which flags would you really like to mention first?
We'll describe shortly how to declare which flags are key to a module.
For the moment, assume we know the set of key flags for each module.
Then, if you use the app.py module, you can use the --helpshort flag to
print only the help for the flags that are key to the main module, in a
human-readable format.
NOTE: If you need to parse the flag help, do NOT use the output of
--help / --helpshort. That output is meant for human consumption, and
may be changed in the future. Instead, use --helpxml; flags that are
key for the main module are marked there with a <key>yes</key> element.
The set of key flags for a module M is composed of:
1. Flags defined by module M by calling a DEFINE_* function.
2. Flags that module M explictly declares as key by using the function
DECLARE_key_flag(<flag_name>)
3. Key flags of other modules that M specifies by using the function
ADOPT_module_key_flags(<other_module>)
This is a "bulk" declaration of key flags: each flag that is key for
<other_module> becomes key for the current module too.
Notice that if you do not use the functions described at points 2 and 3
above, then --helpshort prints information only about the flags defined
by the main module of our script. In many cases, this behavior is good
enough. But if you move part of the main module code (together with the
related flags) into a different module, then it is nice to use
DECLARE_key_flag / ADOPT_module_key_flags and make sure --helpshort
lists all relevant flags (otherwise, your code refactoring may confuse
your users).
Note: each of DECLARE_key_flag / ADOPT_module_key_flags has its own
pluses and minuses: DECLARE_key_flag is more targeted and may lead a
more focused --helpshort documentation. ADOPT_module_key_flags is good
for cases when an entire module is considered key to the current script.
Also, it does not require updates to client scripts when a new flag is
added to the module.
EXAMPLE USAGE 2 (WITH KEY FLAGS):
Consider an application that contains the following three files (two
auxiliary modules and a main module)
File libfoo.py:
import gflags
gflags.DEFINE_integer('num_replicas', 3, 'Number of replicas to start')
gflags.DEFINE_boolean('rpc2', True, 'Turn on the usage of RPC2.')
... some code ...
File libbar.py:
import gflags
gflags.DEFINE_string('bar_gfs_path', '/gfs/path',
'Path to the GFS files for libbar.')
gflags.DEFINE_string('email_for_bar_errors', '[email protected]',
'Email address for bug reports about module libbar.')
gflags.DEFINE_boolean('bar_risky_hack', False,
'Turn on an experimental and buggy optimization.')
... some code ...
File myscript.py:
import gflags
import libfoo
import libbar
gflags.DEFINE_integer('num_iterations', 0, 'Number of iterations.')
# Declare that all flags that are key for libfoo are
# key for this module too.
gflags.ADOPT_module_key_flags(libfoo)
# Declare that the flag --bar_gfs_path (defined in libbar) is key
# for this module.
gflags.DECLARE_key_flag('bar_gfs_path')
... some code ...
When myscript is invoked with the flag --helpshort, the resulted help
message lists information about all the key flags for myscript:
--num_iterations, --num_replicas, --rpc2, and --bar_gfs_path.
Of course, myscript uses all the flags declared by it (in this case,
just --num_replicas) or by any of the modules it transitively imports
(e.g., the modules libfoo, libbar). E.g., it can access the value of
FLAGS.bar_risky_hack, even if --bar_risky_hack is not declared as a key
flag for myscript.
OUTPUT FOR --helpxml:
The --helpxml flag generates output with the following structure:
<?xml version="1.0"?>
<AllFlags>
<program>PROGRAM_BASENAME</program>
<usage>MAIN_MODULE_DOCSTRING</usage>
(<flag>
[<key>yes</key>]
<file>DECLARING_MODULE</file>
<name>FLAG_NAME</name>
<meaning>FLAG_HELP_MESSAGE</meaning>
<default>DEFAULT_FLAG_VALUE</default>
<current>CURRENT_FLAG_VALUE</current>
<type>FLAG_TYPE</type>
[OPTIONAL_ELEMENTS]
</flag>)*
</AllFlags>
Notes:
1. The output is intentionally similar to the output generated by the
C++ command-line flag library. The few differences are due to the
Python flags that do not have a C++ equivalent (at least not yet),
e.g., DEFINE_list.
2. New XML elements may be added in the future.
3. DEFAULT_FLAG_VALUE is in serialized form, i.e., the string you can
pass for this flag on the command-line. E.g., for a flag defined
using DEFINE_list, this field may be foo,bar, not ['foo', 'bar'].
4. CURRENT_FLAG_VALUE is produced using str(). This means that the
string 'false' will be represented in the same way as the boolean
False. Using repr() would have removed this ambiguity and simplified
parsing, but would have broken the compatibility with the C++
command-line flags.
5. OPTIONAL_ELEMENTS describe elements relevant for certain kinds of
flags: lower_bound, upper_bound (for flags that specify bounds),
enum_value (for enum flags), list_separator (for flags that consist of
a list of values, separated by a special token).
6. We do not provide any example here: please use --helpxml instead.
This module requires at least python 2.2.1 to run.
"""
import cgi
import getopt
import os
import re
import string
import struct
import sys
# pylint: disable-msg=C6204
try:
import fcntl
except ImportError:
fcntl = None
try:
# Importing termios will fail on non-unix platforms.
import termios
except ImportError:
termios = None
import gflags_validators
# pylint: enable-msg=C6204
# Are we running under pychecker?
_RUNNING_PYCHECKER = 'pychecker.python' in sys.modules
def _GetCallingModuleObjectAndName():
"""Returns the module that's calling into this module.
We generally use this function to get the name of the module calling a
DEFINE_foo... function.
"""
# Walk down the stack to find the first globals dict that's not ours.
for depth in range(1, sys.getrecursionlimit()):
if not sys._getframe(depth).f_globals is globals():
globals_for_frame = sys._getframe(depth).f_globals
module, module_name = _GetModuleObjectAndName(globals_for_frame)
if module_name is not None:
return module, module_name
raise AssertionError("No module was found")
def _GetCallingModule():
"""Returns the name of the module that's calling into this module."""
return _GetCallingModuleObjectAndName()[1]
def _GetThisModuleObjectAndName():
"""Returns: (module object, module name) for this module."""
return _GetModuleObjectAndName(globals())
# module exceptions:
class FlagsError(Exception):
"""The base class for all flags errors."""
pass
class DuplicateFlag(FlagsError):
"""Raised if there is a flag naming conflict."""
pass
class CantOpenFlagFileError(FlagsError):
"""Raised if flagfile fails to open: doesn't exist, wrong permissions, etc."""
pass
class DuplicateFlagCannotPropagateNoneToSwig(DuplicateFlag):
"""Special case of DuplicateFlag -- SWIG flag value can't be set to None.
This can be raised when a duplicate flag is created. Even if allow_override is
True, we still abort if the new value is None, because it's currently
impossible to pass None default value back to SWIG. See FlagValues.SetDefault
for details.
"""
pass
class DuplicateFlagError(DuplicateFlag):
"""A DuplicateFlag whose message cites the conflicting definitions.
A DuplicateFlagError conveys more information than a DuplicateFlag,
namely the modules where the conflicting definitions occur. This
class was created to avoid breaking external modules which depend on
the existing DuplicateFlags interface.
"""
def __init__(self, flagname, flag_values, other_flag_values=None):
"""Create a DuplicateFlagError.
Args:
flagname: Name of the flag being redefined.
flag_values: FlagValues object containing the first definition of
flagname.
other_flag_values: If this argument is not None, it should be the
FlagValues object where the second definition of flagname occurs.
If it is None, we assume that we're being called when attempting
to create the flag a second time, and we use the module calling
this one as the source of the second definition.
"""
self.flagname = flagname
first_module = flag_values.FindModuleDefiningFlag(
flagname, default='<unknown>')
if other_flag_values is None:
second_module = _GetCallingModule()
else:
second_module = other_flag_values.FindModuleDefiningFlag(
flagname, default='<unknown>')
msg = "The flag '%s' is defined twice. First from %s, Second from %s" % (
self.flagname, first_module, second_module)
DuplicateFlag.__init__(self, msg)
class IllegalFlagValue(FlagsError):
"""The flag command line argument is illegal."""
pass
class UnrecognizedFlag(FlagsError):
"""Raised if a flag is unrecognized."""
pass
# An UnrecognizedFlagError conveys more information than an UnrecognizedFlag.
# Since there are external modules that create DuplicateFlags, the interface to
# DuplicateFlag shouldn't change. The flagvalue will be assigned the full value
# of the flag and its argument, if any, allowing handling of unrecognized flags
# in an exception handler.
# If flagvalue is the empty string, then this exception is an due to a
# reference to a flag that was not already defined.
class UnrecognizedFlagError(UnrecognizedFlag):
def __init__(self, flagname, flagvalue=''):
self.flagname = flagname
self.flagvalue = flagvalue
UnrecognizedFlag.__init__(
self, "Unknown command line flag '%s'" % flagname)
# Global variable used by expvar
_exported_flags = {}
_help_width = 80 # width of help output
def GetHelpWidth():
"""Returns: an integer, the width of help lines that is used in TextWrap."""
if (not sys.stdout.isatty()) or (termios is None) or (fcntl is None):
return _help_width
try:
data = fcntl.ioctl(sys.stdout, termios.TIOCGWINSZ, '1234')
columns = struct.unpack('hh', data)[1]
# Emacs mode returns 0.
# Here we assume that any value below 40 is unreasonable
if columns >= 40:
return columns
# Returning an int as default is fine, int(int) just return the int.
return int(os.getenv('COLUMNS', _help_width))
except (TypeError, IOError, struct.error):
return _help_width
def CutCommonSpacePrefix(text):
"""Removes a common space prefix from the lines of a multiline text.
If the first line does not start with a space, it is left as it is and
only in the remaining lines a common space prefix is being searched
for. That means the first line will stay untouched. This is especially
useful to turn doc strings into help texts. This is because some
people prefer to have the doc comment start already after the
apostrophe and then align the following lines while others have the
apostrophes on a separate line.
The function also drops trailing empty lines and ignores empty lines
following the initial content line while calculating the initial
common whitespace.
Args:
text: text to work on
Returns:
the resulting text
"""
text_lines = text.splitlines()
# Drop trailing empty lines
while text_lines and not text_lines[-1]:
text_lines = text_lines[:-1]
if text_lines:
# We got some content, is the first line starting with a space?
if text_lines[0] and text_lines[0][0].isspace():
text_first_line = []
else:
text_first_line = [text_lines.pop(0)]
# Calculate length of common leading whitespace (only over content lines)
common_prefix = os.path.commonprefix([line for line in text_lines if line])
space_prefix_len = len(common_prefix) - len(common_prefix.lstrip())
# If we have a common space prefix, drop it from all lines
if space_prefix_len:
for index in xrange(len(text_lines)):
if text_lines[index]:
text_lines[index] = text_lines[index][space_prefix_len:]
return '\n'.join(text_first_line + text_lines)
return ''
def TextWrap(text, length=None, indent='', firstline_indent=None, tabs=' '):
"""Wraps a given text to a maximum line length and returns it.
We turn lines that only contain whitespace into empty lines. We keep
new lines and tabs (e.g., we do not treat tabs as spaces).
Args:
text: text to wrap
length: maximum length of a line, includes indentation
if this is None then use GetHelpWidth()
indent: indent for all but first line
firstline_indent: indent for first line; if None, fall back to indent
tabs: replacement for tabs
Returns:
wrapped text
Raises:
FlagsError: if indent not shorter than length
FlagsError: if firstline_indent not shorter than length
"""
# Get defaults where callee used None
if length is None:
length = GetHelpWidth()
if indent is None:
indent = ''
if len(indent) >= length:
raise FlagsError('Indent must be shorter than length')
# In line we will be holding the current line which is to be started
# with indent (or firstline_indent if available) and then appended
# with words.
if firstline_indent is None:
firstline_indent = ''
line = indent
else:
line = firstline_indent
if len(firstline_indent) >= length:
raise FlagsError('First line indent must be shorter than length')
# If the callee does not care about tabs we simply convert them to
# spaces If callee wanted tabs to be single space then we do that
# already here.
if not tabs or tabs == ' ':
text = text.replace('\t', ' ')
else:
tabs_are_whitespace = not tabs.strip()
line_regex = re.compile('([ ]*)(\t*)([^ \t]+)', re.MULTILINE)
# Split the text into lines and the lines with the regex above. The
# resulting lines are collected in result[]. For each split we get the
# spaces, the tabs and the next non white space (e.g. next word).
result = []
for text_line in text.splitlines():
# Store result length so we can find out whether processing the next
# line gave any new content
old_result_len = len(result)
# Process next line with line_regex. For optimization we do an rstrip().
# - process tabs (changes either line or word, see below)
# - process word (first try to squeeze on line, then wrap or force wrap)
# Spaces found on the line are ignored, they get added while wrapping as
# needed.
for spaces, current_tabs, word in line_regex.findall(text_line.rstrip()):
# If tabs weren't converted to spaces, handle them now
if current_tabs:
# If the last thing we added was a space anyway then drop
# it. But let's not get rid of the indentation.
if (((result and line != indent) or
(not result and line != firstline_indent)) and line[-1] == ' '):
line = line[:-1]
# Add the tabs, if that means adding whitespace, just add it at
# the line, the rstrip() code while shorten the line down if
# necessary
if tabs_are_whitespace:
line += tabs * len(current_tabs)
else:
# if not all tab replacement is whitespace we prepend it to the word
word = tabs * len(current_tabs) + word
# Handle the case where word cannot be squeezed onto current last line
if len(line) + len(word) > length and len(indent) + len(word) <= length:
result.append(line.rstrip())
line = indent + word
word = ''
# No space left on line or can we append a space?
if len(line) + 1 >= length:
result.append(line.rstrip())
line = indent
else:
line += ' '
# Add word and shorten it up to allowed line length. Restart next
# line with indent and repeat, or add a space if we're done (word
# finished) This deals with words that cannot fit on one line
# (e.g. indent + word longer than allowed line length).
while len(line) + len(word) >= length:
line += word
result.append(line[:length])
word = line[length:]
line = indent
# Default case, simply append the word and a space
if word:
line += word + ' '
# End of input line. If we have content we finish the line. If the
# current line is just the indent but we had content in during this
# original line then we need to add an empty line.
if (result and line != indent) or (not result and line != firstline_indent):
result.append(line.rstrip())
elif len(result) == old_result_len:
result.append('')
line = indent
return '\n'.join(result)
def DocToHelp(doc):
"""Takes a __doc__ string and reformats it as help."""
# Get rid of starting and ending white space. Using lstrip() or even
# strip() could drop more than maximum of first line and right space
# of last line.
doc = doc.strip()
# Get rid of all empty lines
whitespace_only_line = re.compile('^[ \t]+$', re.M)
doc = whitespace_only_line.sub('', doc)
# Cut out common space at line beginnings
doc = CutCommonSpacePrefix(doc)
# Just like this module's comment, comments tend to be aligned somehow.
# In other words they all start with the same amount of white space
# 1) keep double new lines
# 2) keep ws after new lines if not empty line
# 3) all other new lines shall be changed to a space
# Solution: Match new lines between non white space and replace with space.
doc = re.sub('(?<=\S)\n(?=\S)', ' ', doc, re.M)
return doc
def _GetModuleObjectAndName(globals_dict):
"""Returns the module that defines a global environment, and its name.
Args:
globals_dict: A dictionary that should correspond to an environment
providing the values of the globals.
Returns:
A pair consisting of (1) module object and (2) module name (a
string). Returns (None, None) if the module could not be
identified.
"""
# The use of .items() (instead of .iteritems()) is NOT a mistake: if
# a parallel thread imports a module while we iterate over
# .iteritems() (not nice, but possible), we get a RuntimeError ...
# Hence, we use the slightly slower but safer .items().
for name, module in sys.modules.items():
if getattr(module, '__dict__', None) is globals_dict:
if name == '__main__':
# Pick a more informative name for the main module.
name = sys.argv[0]
return (module, name)
return (None, None)
def _GetMainModule():
"""Returns: string, name of the module from which execution started."""
# First, try to use the same logic used by _GetCallingModuleObjectAndName(),
# i.e., call _GetModuleObjectAndName(). For that we first need to
# find the dictionary that the main module uses to store the
# globals.
#
# That's (normally) the same dictionary object that the deepest
# (oldest) stack frame is using for globals.
deepest_frame = sys._getframe(0)
while deepest_frame.f_back is not None:
deepest_frame = deepest_frame.f_back
globals_for_main_module = deepest_frame.f_globals
main_module_name = _GetModuleObjectAndName(globals_for_main_module)[1]
# The above strategy fails in some cases (e.g., tools that compute
# code coverage by redefining, among other things, the main module).
# If so, just use sys.argv[0]. We can probably always do this, but
# it's safest to try to use the same logic as _GetCallingModuleObjectAndName()
if main_module_name is None:
main_module_name = sys.argv[0]
return main_module_name
class FlagValues:
"""Registry of 'Flag' objects.
A 'FlagValues' can then scan command line arguments, passing flag
arguments through to the 'Flag' objects that it owns. It also
provides easy access to the flag values. Typically only one
'FlagValues' object is needed by an application: gflags.FLAGS
This class is heavily overloaded:
'Flag' objects are registered via __setitem__:
FLAGS['longname'] = x # register a new flag
The .value attribute of the registered 'Flag' objects can be accessed
as attributes of this 'FlagValues' object, through __getattr__. Both
the long and short name of the original 'Flag' objects can be used to
access its value:
FLAGS.longname # parsed flag value
FLAGS.x # parsed flag value (short name)
Command line arguments are scanned and passed to the registered 'Flag'
objects through the __call__ method. Unparsed arguments, including
argv[0] (e.g. the program name) are returned.
argv = FLAGS(sys.argv) # scan command line arguments
The original registered Flag objects can be retrieved through the use
of the dictionary-like operator, __getitem__:
x = FLAGS['longname'] # access the registered Flag object
The str() operator of a 'FlagValues' object provides help for all of
the registered 'Flag' objects.
"""
def __init__(self):
# Since everything in this class is so heavily overloaded, the only
# way of defining and using fields is to access __dict__ directly.
# Dictionary: flag name (string) -> Flag object.
self.__dict__['__flags'] = {}
# Dictionary: module name (string) -> list of Flag objects that are defined
# by that module.
self.__dict__['__flags_by_module'] = {}
# Dictionary: module id (int) -> list of Flag objects that are defined by
# that module.
self.__dict__['__flags_by_module_id'] = {}
# Dictionary: module name (string) -> list of Flag objects that are
# key for that module.
self.__dict__['__key_flags_by_module'] = {}
# Set if we should use new style gnu_getopt rather than getopt when parsing
# the args. Only possible with Python 2.3+
self.UseGnuGetOpt(False)
def UseGnuGetOpt(self, use_gnu_getopt=True):
"""Use GNU-style scanning. Allows mixing of flag and non-flag arguments.
See http://docs.python.org/library/getopt.html#getopt.gnu_getopt
Args:
use_gnu_getopt: wether or not to use GNU style scanning.
"""
self.__dict__['__use_gnu_getopt'] = use_gnu_getopt
def IsGnuGetOpt(self):
return self.__dict__['__use_gnu_getopt']
def FlagDict(self):
return self.__dict__['__flags']
def FlagsByModuleDict(self):
"""Returns the dictionary of module_name -> list of defined flags.
Returns:
A dictionary. Its keys are module names (strings). Its values
are lists of Flag objects.
"""
return self.__dict__['__flags_by_module']
def FlagsByModuleIdDict(self):
"""Returns the dictionary of module_id -> list of defined flags.
Returns:
A dictionary. Its keys are module IDs (ints). Its values
are lists of Flag objects.
"""
return self.__dict__['__flags_by_module_id']
def KeyFlagsByModuleDict(self):
"""Returns the dictionary of module_name -> list of key flags.
Returns:
A dictionary. Its keys are module names (strings). Its values
are lists of Flag objects.
"""
return self.__dict__['__key_flags_by_module']
def _RegisterFlagByModule(self, module_name, flag):
"""Records the module that defines a specific flag.
We keep track of which flag is defined by which module so that we
can later sort the flags by module.
Args:
module_name: A string, the name of a Python module.
flag: A Flag object, a flag that is key to the module.
"""
flags_by_module = self.FlagsByModuleDict()
flags_by_module.setdefault(module_name, []).append(flag)
def _RegisterFlagByModuleId(self, module_id, flag):
"""Records the module that defines a specific flag.
Args:
module_id: An int, the ID of the Python module.
flag: A Flag object, a flag that is key to the module.
"""
flags_by_module_id = self.FlagsByModuleIdDict()
flags_by_module_id.setdefault(module_id, []).append(flag)
def _RegisterKeyFlagForModule(self, module_name, flag):
"""Specifies that a flag is a key flag for a module.
Args:
module_name: A string, the name of a Python module.
flag: A Flag object, a flag that is key to the module.
"""
key_flags_by_module = self.KeyFlagsByModuleDict()
# The list of key flags for the module named module_name.
key_flags = key_flags_by_module.setdefault(module_name, [])
# Add flag, but avoid duplicates.
if flag not in key_flags:
key_flags.append(flag)
def _GetFlagsDefinedByModule(self, module):
"""Returns the list of flags defined by a module.
Args:
module: A module object or a module name (a string).
Returns:
A new list of Flag objects. Caller may update this list as he
wishes: none of those changes will affect the internals of this
FlagValue object.
"""
if not isinstance(module, str):
module = module.__name__
return list(self.FlagsByModuleDict().get(module, []))
def _GetKeyFlagsForModule(self, module):
"""Returns the list of key flags for a module.
Args:
module: A module object or a module name (a string)
Returns:
A new list of Flag objects. Caller may update this list as he
wishes: none of those changes will affect the internals of this
FlagValue object.
"""
if not isinstance(module, str):
module = module.__name__
# Any flag is a key flag for the module that defined it. NOTE:
# key_flags is a fresh list: we can update it without affecting the
# internals of this FlagValues object.
key_flags = self._GetFlagsDefinedByModule(module)
# Take into account flags explicitly declared as key for a module.
for flag in self.KeyFlagsByModuleDict().get(module, []):
if flag not in key_flags:
key_flags.append(flag)
return key_flags
def FindModuleDefiningFlag(self, flagname, default=None):
"""Return the name of the module defining this flag, or default.
Args:
flagname: Name of the flag to lookup.
default: Value to return if flagname is not defined. Defaults
to None.
Returns:
The name of the module which registered the flag with this name.
If no such module exists (i.e. no flag with this name exists),
we return default.
"""
for module, flags in self.FlagsByModuleDict().iteritems():
for flag in flags:
if flag.name == flagname or flag.short_name == flagname:
return module
return default
def FindModuleIdDefiningFlag(self, flagname, default=None):
"""Return the ID of the module defining this flag, or default.
Args:
flagname: Name of the flag to lookup.
default: Value to return if flagname is not defined. Defaults
to None.
Returns:
The ID of the module which registered the flag with this name.
If no such module exists (i.e. no flag with this name exists),
we return default.
"""
for module_id, flags in self.FlagsByModuleIdDict().iteritems():
for flag in flags:
if flag.name == flagname or flag.short_name == flagname:
return module_id
return default
def AppendFlagValues(self, flag_values):
"""Appends flags registered in another FlagValues instance.
Args:
flag_values: registry to copy from
"""
for flag_name, flag in flag_values.FlagDict().iteritems():
# Each flags with shortname appears here twice (once under its
# normal name, and again with its short name). To prevent
# problems (DuplicateFlagError) with double flag registration, we
# perform a check to make sure that the entry we're looking at is
# for its normal name.
if flag_name == flag.name:
try:
self[flag_name] = flag
except DuplicateFlagError:
raise DuplicateFlagError(flag_name, self,
other_flag_values=flag_values)
def RemoveFlagValues(self, flag_values):
"""Remove flags that were previously appended from another FlagValues.
Args:
flag_values: registry containing flags to remove.
"""
for flag_name in flag_values.FlagDict():
self.__delattr__(flag_name)
def __setitem__(self, name, flag):
"""Registers a new flag variable."""
fl = self.FlagDict()
if not isinstance(flag, Flag):
raise IllegalFlagValue(flag)
if not isinstance(name, type("")):
raise FlagsError("Flag name must be a string")
if len(name) == 0:
raise FlagsError("Flag name cannot be empty")
# If running under pychecker, duplicate keys are likely to be
# defined. Disable check for duplicate keys when pycheck'ing.
if (name in fl and not flag.allow_override and
not fl[name].allow_override and not _RUNNING_PYCHECKER):
module, module_name = _GetCallingModuleObjectAndName()
if (self.FindModuleDefiningFlag(name) == module_name and
id(module) != self.FindModuleIdDefiningFlag(name)):
# If the flag has already been defined by a module with the same name,
# but a different ID, we can stop here because it indicates that the
# module is simply being imported a subsequent time.
return
raise DuplicateFlagError(name, self)
short_name = flag.short_name
if short_name is not None:
if (short_name in fl and not flag.allow_override and
not fl[short_name].allow_override and not _RUNNING_PYCHECKER):
raise DuplicateFlagError(short_name, self)
fl[short_name] = flag
fl[name] = flag
global _exported_flags
_exported_flags[name] = flag
def __getitem__(self, name):
"""Retrieves the Flag object for the flag --name."""
return self.FlagDict()[name]
def __getattr__(self, name):
"""Retrieves the 'value' attribute of the flag --name."""
fl = self.FlagDict()
if name not in fl:
raise AttributeError(name)
return fl[name].value
def __setattr__(self, name, value):
"""Sets the 'value' attribute of the flag --name."""
fl = self.FlagDict()
fl[name].value = value
self._AssertValidators(fl[name].validators)
return value
def _AssertAllValidators(self):
all_validators = set()
for flag in self.FlagDict().itervalues():
for validator in flag.validators:
all_validators.add(validator)
self._AssertValidators(all_validators)
def _AssertValidators(self, validators):
"""Assert if all validators in the list are satisfied.
Asserts validators in the order they were created.
Args:
validators: Iterable(gflags_validators.Validator), validators to be
verified
Raises:
AttributeError: if validators work with a non-existing flag.
IllegalFlagValue: if validation fails for at least one validator
"""
for validator in sorted(
validators, key=lambda validator: validator.insertion_index):
try:
validator.Verify(self)
except gflags_validators.Error, e:
message = validator.PrintFlagsWithValues(self)
raise IllegalFlagValue('%s: %s' % (message, str(e)))
def _FlagIsRegistered(self, flag_obj):
"""Checks whether a Flag object is registered under some name.
Note: this is non trivial: in addition to its normal name, a flag
may have a short name too. In self.FlagDict(), both the normal and
the short name are mapped to the same flag object. E.g., calling
only "del FLAGS.short_name" is not unregistering the corresponding
Flag object (it is still registered under the longer name).
Args:
flag_obj: A Flag object.
Returns:
A boolean: True iff flag_obj is registered under some name.
"""
flag_dict = self.FlagDict()
# Check whether flag_obj is registered under its long name.
name = flag_obj.name
if flag_dict.get(name, None) == flag_obj:
return True
# Check whether flag_obj is registered under its short name.
short_name = flag_obj.short_name
if (short_name is not None and
flag_dict.get(short_name, None) == flag_obj):
return True
# The flag cannot be registered under any other name, so we do not
# need to do a full search through the values of self.FlagDict().
return False
def __delattr__(self, flag_name):
"""Deletes a previously-defined flag from a flag object.
This method makes sure we can delete a flag by using
del flag_values_object.<flag_name>
E.g.,
gflags.DEFINE_integer('foo', 1, 'Integer flag.')
del gflags.FLAGS.foo
Args:
flag_name: A string, the name of the flag to be deleted.
Raises:
AttributeError: When there is no registered flag named flag_name.
"""
fl = self.FlagDict()
if flag_name not in fl:
raise AttributeError(flag_name)
flag_obj = fl[flag_name]
del fl[flag_name]
if not self._FlagIsRegistered(flag_obj):
# If the Flag object indicated by flag_name is no longer
# registered (please see the docstring of _FlagIsRegistered), then
# we delete the occurrences of the flag object in all our internal
# dictionaries.
self.__RemoveFlagFromDictByModule(self.FlagsByModuleDict(), flag_obj)
self.__RemoveFlagFromDictByModule(self.FlagsByModuleIdDict(), flag_obj)
self.__RemoveFlagFromDictByModule(self.KeyFlagsByModuleDict(), flag_obj)
def __RemoveFlagFromDictByModule(self, flags_by_module_dict, flag_obj):
"""Removes a flag object from a module -> list of flags dictionary.
Args:
flags_by_module_dict: A dictionary that maps module names to lists of
flags.
flag_obj: A flag object.
"""
for unused_module, flags_in_module in flags_by_module_dict.iteritems():
# while (as opposed to if) takes care of multiple occurrences of a
# flag in the list for the same module.
while flag_obj in flags_in_module:
flags_in_module.remove(flag_obj)
def SetDefault(self, name, value):
"""Changes the default value of the named flag object."""
fl = self.FlagDict()
if name not in fl:
raise AttributeError(name)
fl[name].SetDefault(value)
self._AssertValidators(fl[name].validators)
def __contains__(self, name):
"""Returns True if name is a value (flag) in the dict."""
return name in self.FlagDict()
has_key = __contains__ # a synonym for __contains__()
def __iter__(self):
return iter(self.FlagDict())
def __call__(self, argv):
"""Parses flags from argv; stores parsed flags into this FlagValues object.
All unparsed arguments are returned. Flags are parsed using the GNU
Program Argument Syntax Conventions, using getopt:
http://www.gnu.org/software/libc/manual/html_mono/libc.html#Getopt
Args:
argv: argument list. Can be of any type that may be converted to a list.
Returns:
The list of arguments not parsed as options, including argv[0]
Raises:
FlagsError: on any parsing error
"""
# Support any sequence type that can be converted to a list
argv = list(argv)
shortopts = ""
longopts = []
fl = self.FlagDict()
# This pre parses the argv list for --flagfile=<> options.
argv = argv[:1] + self.ReadFlagsFromFiles(argv[1:], force_gnu=False)
# Correct the argv to support the google style of passing boolean
# parameters. Boolean parameters may be passed by using --mybool,
# --nomybool, --mybool=(true|false|1|0). getopt does not support
# having options that may or may not have a parameter. We replace
# instances of the short form --mybool and --nomybool with their
# full forms: --mybool=(true|false).
original_argv = list(argv) # list() makes a copy
shortest_matches = None
for name, flag in fl.items():
if not flag.boolean:
continue
if shortest_matches is None:
# Determine the smallest allowable prefix for all flag names
shortest_matches = self.ShortestUniquePrefixes(fl)
no_name = 'no' + name
prefix = shortest_matches[name]
no_prefix = shortest_matches[no_name]
# Replace all occurrences of this boolean with extended forms
for arg_idx in range(1, len(argv)):
arg = argv[arg_idx]
if arg.find('=') >= 0: continue
if arg.startswith('--'+prefix) and ('--'+name).startswith(arg):
argv[arg_idx] = ('--%s=true' % name)
elif arg.startswith('--'+no_prefix) and ('--'+no_name).startswith(arg):
argv[arg_idx] = ('--%s=false' % name)
# Loop over all of the flags, building up the lists of short options
# and long options that will be passed to getopt. Short options are
# specified as a string of letters, each letter followed by a colon
# if it takes an argument. Long options are stored in an array of
# strings. Each string ends with an '=' if it takes an argument.
for name, flag in fl.items():
longopts.append(name + "=")
if len(name) == 1: # one-letter option: allow short flag type also
shortopts += name
if not flag.boolean:
shortopts += ":"
longopts.append('undefok=')
undefok_flags = []
# In case --undefok is specified, loop to pick up unrecognized
# options one by one.
unrecognized_opts = []
args = argv[1:]
while True:
try:
if self.__dict__['__use_gnu_getopt']:
optlist, unparsed_args = getopt.gnu_getopt(args, shortopts, longopts)
else:
optlist, unparsed_args = getopt.getopt(args, shortopts, longopts)
break
except getopt.GetoptError, e:
if not e.opt or e.opt in fl:
# Not an unrecognized option, re-raise the exception as a FlagsError
raise FlagsError(e)
# Remove offender from args and try again
for arg_index in range(len(args)):
if ((args[arg_index] == '--' + e.opt) or
(args[arg_index] == '-' + e.opt) or
(args[arg_index].startswith('--' + e.opt + '='))):
unrecognized_opts.append((e.opt, args[arg_index]))
args = args[0:arg_index] + args[arg_index+1:]
break
else:
# We should have found the option, so we don't expect to get
# here. We could assert, but raising the original exception
# might work better.
raise FlagsError(e)
for name, arg in optlist:
if name == '--undefok':
flag_names = arg.split(',')
undefok_flags.extend(flag_names)
# For boolean flags, if --undefok=boolflag is specified, then we should
# also accept --noboolflag, in addition to --boolflag.
# Since we don't know the type of the undefok'd flag, this will affect
# non-boolean flags as well.
# NOTE: You shouldn't use --undefok=noboolflag, because then we will
# accept --nonoboolflag here. We are choosing not to do the conversion
# from noboolflag -> boolflag because of the ambiguity that flag names
# can start with 'no'.
undefok_flags.extend('no' + name for name in flag_names)
continue
if name.startswith('--'):
# long option
name = name[2:]
short_option = 0
else:
# short option
name = name[1:]
short_option = 1
if name in fl:
flag = fl[name]
if flag.boolean and short_option: arg = 1
flag.Parse(arg)
# If there were unrecognized options, raise an exception unless
# the options were named via --undefok.
for opt, value in unrecognized_opts:
if opt not in undefok_flags:
raise UnrecognizedFlagError(opt, value)
if unparsed_args:
if self.__dict__['__use_gnu_getopt']:
# if using gnu_getopt just return the program name + remainder of argv.
ret_val = argv[:1] + unparsed_args
else:
# unparsed_args becomes the first non-flag detected by getopt to
# the end of argv. Because argv may have been modified above,
# return original_argv for this region.
ret_val = argv[:1] + original_argv[-len(unparsed_args):]
else:
ret_val = argv[:1]
self._AssertAllValidators()
return ret_val
def Reset(self):
"""Resets the values to the point before FLAGS(argv) was called."""
for f in self.FlagDict().values():
f.Unparse()
def RegisteredFlags(self):
"""Returns: a list of the names and short names of all registered flags."""
return list(self.FlagDict())
def FlagValuesDict(self):
"""Returns: a dictionary that maps flag names to flag values."""
flag_values = {}
for flag_name in self.RegisteredFlags():
flag = self.FlagDict()[flag_name]
flag_values[flag_name] = flag.value
return flag_values
def __str__(self):
"""Generates a help string for all known flags."""
return self.GetHelp()
def GetHelp(self, prefix=''):
"""Generates a help string for all known flags."""
helplist = []
flags_by_module = self.FlagsByModuleDict()
if flags_by_module:
modules = sorted(flags_by_module)
# Print the help for the main module first, if possible.
main_module = _GetMainModule()
if main_module in modules:
modules.remove(main_module)
modules = [main_module] + modules
for module in modules:
self.__RenderOurModuleFlags(module, helplist)
self.__RenderModuleFlags('gflags',
_SPECIAL_FLAGS.FlagDict().values(),
helplist)
else:
# Just print one long list of flags.
self.__RenderFlagList(
self.FlagDict().values() + _SPECIAL_FLAGS.FlagDict().values(),
helplist, prefix)
return '\n'.join(helplist)
def __RenderModuleFlags(self, module, flags, output_lines, prefix=""):
"""Generates a help string for a given module."""
if not isinstance(module, str):
module = module.__name__
output_lines.append('\n%s%s:' % (prefix, module))
self.__RenderFlagList(flags, output_lines, prefix + " ")
def __RenderOurModuleFlags(self, module, output_lines, prefix=""):
"""Generates a help string for a given module."""
flags = self._GetFlagsDefinedByModule(module)
if flags:
self.__RenderModuleFlags(module, flags, output_lines, prefix)
def __RenderOurModuleKeyFlags(self, module, output_lines, prefix=""):
"""Generates a help string for the key flags of a given module.
Args:
module: A module object or a module name (a string).
output_lines: A list of strings. The generated help message
lines will be appended to this list.
prefix: A string that is prepended to each generated help line.
"""
key_flags = self._GetKeyFlagsForModule(module)
if key_flags:
self.__RenderModuleFlags(module, key_flags, output_lines, prefix)
def ModuleHelp(self, module):
"""Describe the key flags of a module.
Args:
module: A module object or a module name (a string).
Returns:
string describing the key flags of a module.
"""
helplist = []
self.__RenderOurModuleKeyFlags(module, helplist)
return '\n'.join(helplist)
def MainModuleHelp(self):
"""Describe the key flags of the main module.
Returns:
string describing the key flags of a module.
"""
return self.ModuleHelp(_GetMainModule())
def __RenderFlagList(self, flaglist, output_lines, prefix=" "):
fl = self.FlagDict()
special_fl = _SPECIAL_FLAGS.FlagDict()
flaglist = [(flag.name, flag) for flag in flaglist]
flaglist.sort()
flagset = {}
for (name, flag) in flaglist:
# It's possible this flag got deleted or overridden since being
# registered in the per-module flaglist. Check now against the
# canonical source of current flag information, the FlagDict.
if fl.get(name, None) != flag and special_fl.get(name, None) != flag:
# a different flag is using this name now
continue
# only print help once
if flag in flagset: continue
flagset[flag] = 1
flaghelp = ""
if flag.short_name: flaghelp += "-%s," % flag.short_name
if flag.boolean:
flaghelp += "--[no]%s" % flag.name + ":"
else:
flaghelp += "--%s" % flag.name + ":"
flaghelp += " "
if flag.help:
flaghelp += flag.help
flaghelp = TextWrap(flaghelp, indent=prefix+" ",
firstline_indent=prefix)
if flag.default_as_str:
flaghelp += "\n"
flaghelp += TextWrap("(default: %s)" % flag.default_as_str,
indent=prefix+" ")
if flag.parser.syntactic_help:
flaghelp += "\n"
flaghelp += TextWrap("(%s)" % flag.parser.syntactic_help,
indent=prefix+" ")
output_lines.append(flaghelp)
def get(self, name, default):
"""Returns the value of a flag (if not None) or a default value.
Args:
name: A string, the name of a flag.
default: Default value to use if the flag value is None.
"""
value = self.__getattr__(name)
if value is not None: # Can't do if not value, b/c value might be '0' or ""
return value
else:
return default
def ShortestUniquePrefixes(self, fl):
"""Returns: dictionary; maps flag names to their shortest unique prefix."""
# Sort the list of flag names
sorted_flags = []
for name, flag in fl.items():
sorted_flags.append(name)
if flag.boolean:
sorted_flags.append('no%s' % name)
sorted_flags.sort()
# For each name in the sorted list, determine the shortest unique
# prefix by comparing itself to the next name and to the previous
# name (the latter check uses cached info from the previous loop).
shortest_matches = {}
prev_idx = 0
for flag_idx in range(len(sorted_flags)):
curr = sorted_flags[flag_idx]
if flag_idx == (len(sorted_flags) - 1):
next = None
else:
next = sorted_flags[flag_idx+1]
next_len = len(next)
for curr_idx in range(len(curr)):
if (next is None
or curr_idx >= next_len
or curr[curr_idx] != next[curr_idx]):
# curr longer than next or no more chars in common
shortest_matches[curr] = curr[:max(prev_idx, curr_idx) + 1]
prev_idx = curr_idx
break
else:
# curr shorter than (or equal to) next
shortest_matches[curr] = curr
prev_idx = curr_idx + 1 # next will need at least one more char
return shortest_matches
def __IsFlagFileDirective(self, flag_string):
"""Checks whether flag_string contain a --flagfile=<foo> directive."""
if isinstance(flag_string, type("")):
if flag_string.startswith('--flagfile='):
return 1
elif flag_string == '--flagfile':
return 1
elif flag_string.startswith('-flagfile='):
return 1
elif flag_string == '-flagfile':
return 1
else:
return 0
return 0
def ExtractFilename(self, flagfile_str):
"""Returns filename from a flagfile_str of form -[-]flagfile=filename.
The cases of --flagfile foo and -flagfile foo shouldn't be hitting
this function, as they are dealt with in the level above this
function.
"""
if flagfile_str.startswith('--flagfile='):
return os.path.expanduser((flagfile_str[(len('--flagfile=')):]).strip())
elif flagfile_str.startswith('-flagfile='):
return os.path.expanduser((flagfile_str[(len('-flagfile=')):]).strip())
else:
raise FlagsError('Hit illegal --flagfile type: %s' % flagfile_str)
def __GetFlagFileLines(self, filename, parsed_file_list):
"""Returns the useful (!=comments, etc) lines from a file with flags.
Args:
filename: A string, the name of the flag file.
parsed_file_list: A list of the names of the files we have
already read. MUTATED BY THIS FUNCTION.
Returns:
List of strings. See the note below.
NOTE(springer): This function checks for a nested --flagfile=<foo>
tag and handles the lower file recursively. It returns a list of
all the lines that _could_ contain command flags. This is
EVERYTHING except whitespace lines and comments (lines starting
with '#' or '//').
"""
line_list = [] # All line from flagfile.
flag_line_list = [] # Subset of lines w/o comments, blanks, flagfile= tags.
try:
file_obj = open(filename, 'r')
except IOError, e_msg:
raise CantOpenFlagFileError('ERROR:: Unable to open flagfile: %s' % e_msg)
line_list = file_obj.readlines()
file_obj.close()
parsed_file_list.append(filename)
# This is where we check each line in the file we just read.
for line in line_list:
if line.isspace():
pass
# Checks for comment (a line that starts with '#').
elif line.startswith('#') or line.startswith('//'):
pass
# Checks for a nested "--flagfile=<bar>" flag in the current file.
# If we find one, recursively parse down into that file.
elif self.__IsFlagFileDirective(line):
sub_filename = self.ExtractFilename(line)
# We do a little safety check for reparsing a file we've already done.
if not sub_filename in parsed_file_list:
included_flags = self.__GetFlagFileLines(sub_filename,
parsed_file_list)
flag_line_list.extend(included_flags)
else: # Case of hitting a circularly included file.
sys.stderr.write('Warning: Hit circular flagfile dependency: %s\n' %
(sub_filename,))
else:
# Any line that's not a comment or a nested flagfile should get
# copied into 2nd position. This leaves earlier arguments
# further back in the list, thus giving them higher priority.
flag_line_list.append(line.strip())
return flag_line_list
def ReadFlagsFromFiles(self, argv, force_gnu=True):
"""Processes command line args, but also allow args to be read from file.
Args:
argv: A list of strings, usually sys.argv[1:], which may contain one or
more flagfile directives of the form --flagfile="./filename".
Note that the name of the program (sys.argv[0]) should be omitted.
force_gnu: If False, --flagfile parsing obeys normal flag semantics.
If True, --flagfile parsing instead follows gnu_getopt semantics.
*** WARNING *** force_gnu=False may become the future default!
Returns:
A new list which has the original list combined with what we read
from any flagfile(s).
References: Global gflags.FLAG class instance.
This function should be called before the normal FLAGS(argv) call.
This function scans the input list for a flag that looks like:
--flagfile=<somefile>. Then it opens <somefile>, reads all valid key
and value pairs and inserts them into the input list between the
first item of the list and any subsequent items in the list.
Note that your application's flags are still defined the usual way
using gflags DEFINE_flag() type functions.
Notes (assuming we're getting a commandline of some sort as our input):
--> Flags from the command line argv _should_ always take precedence!
--> A further "--flagfile=<otherfile.cfg>" CAN be nested in a flagfile.
It will be processed after the parent flag file is done.
--> For duplicate flags, first one we hit should "win".
--> In a flagfile, a line beginning with # or // is a comment.
--> Entirely blank lines _should_ be ignored.
"""
parsed_file_list = []
rest_of_args = argv
new_argv = []
while rest_of_args:
current_arg = rest_of_args[0]
rest_of_args = rest_of_args[1:]
if self.__IsFlagFileDirective(current_arg):
# This handles the case of -(-)flagfile foo. In this case the
# next arg really is part of this one.
if current_arg == '--flagfile' or current_arg == '-flagfile':
if not rest_of_args:
raise IllegalFlagValue('--flagfile with no argument')
flag_filename = os.path.expanduser(rest_of_args[0])
rest_of_args = rest_of_args[1:]
else:
# This handles the case of (-)-flagfile=foo.
flag_filename = self.ExtractFilename(current_arg)
new_argv.extend(
self.__GetFlagFileLines(flag_filename, parsed_file_list))
else:
new_argv.append(current_arg)
# Stop parsing after '--', like getopt and gnu_getopt.
if current_arg == '--':
break
# Stop parsing after a non-flag, like getopt.
if not current_arg.startswith('-'):
if not force_gnu and not self.__dict__['__use_gnu_getopt']:
break
if rest_of_args:
new_argv.extend(rest_of_args)
return new_argv
def FlagsIntoString(self):
"""Returns a string with the flags assignments from this FlagValues object.
This function ignores flags whose value is None. Each flag
assignment is separated by a newline.
NOTE: MUST mirror the behavior of the C++ CommandlineFlagsIntoString
from http://code.google.com/p/google-gflags
"""
s = ''
for flag in self.FlagDict().values():
if flag.value is not None:
s += flag.Serialize() + '\n'
return s
def AppendFlagsIntoFile(self, filename):
"""Appends all flags assignments from this FlagInfo object to a file.
Output will be in the format of a flagfile.
NOTE: MUST mirror the behavior of the C++ AppendFlagsIntoFile
from http://code.google.com/p/google-gflags
"""
out_file = open(filename, 'a')
out_file.write(self.FlagsIntoString())
out_file.close()
def WriteHelpInXMLFormat(self, outfile=None):
"""Outputs flag documentation in XML format.
NOTE: We use element names that are consistent with those used by
the C++ command-line flag library, from
http://code.google.com/p/google-gflags
We also use a few new elements (e.g., <key>), but we do not
interfere / overlap with existing XML elements used by the C++
library. Please maintain this consistency.
Args:
outfile: File object we write to. Default None means sys.stdout.
"""
outfile = outfile or sys.stdout
outfile.write('<?xml version=\"1.0\"?>\n')
outfile.write('<AllFlags>\n')
indent = ' '
_WriteSimpleXMLElement(outfile, 'program', os.path.basename(sys.argv[0]),
indent)
usage_doc = sys.modules['__main__'].__doc__
if not usage_doc:
usage_doc = '\nUSAGE: %s [flags]\n' % sys.argv[0]
else:
usage_doc = usage_doc.replace('%s', sys.argv[0])
_WriteSimpleXMLElement(outfile, 'usage', usage_doc, indent)
# Get list of key flags for the main module.
key_flags = self._GetKeyFlagsForModule(_GetMainModule())
# Sort flags by declaring module name and next by flag name.
flags_by_module = self.FlagsByModuleDict()
all_module_names = list(flags_by_module.keys())
all_module_names.sort()
for module_name in all_module_names:
flag_list = [(f.name, f) for f in flags_by_module[module_name]]
flag_list.sort()
for unused_flag_name, flag in flag_list:
is_key = flag in key_flags
flag.WriteInfoInXMLFormat(outfile, module_name,
is_key=is_key, indent=indent)
outfile.write('</AllFlags>\n')
outfile.flush()
def AddValidator(self, validator):
"""Register new flags validator to be checked.
Args:
validator: gflags_validators.Validator
Raises:
AttributeError: if validators work with a non-existing flag.
"""
for flag_name in validator.GetFlagsNames():
flag = self.FlagDict()[flag_name]
flag.validators.append(validator)
# end of FlagValues definition
# The global FlagValues instance
FLAGS = FlagValues()
def _StrOrUnicode(value):
"""Converts value to a python string or, if necessary, unicode-string."""
try:
return str(value)
except UnicodeEncodeError:
return unicode(value)
def _MakeXMLSafe(s):
"""Escapes <, >, and & from s, and removes XML 1.0-illegal chars."""
s = cgi.escape(s) # Escape <, >, and &
# Remove characters that cannot appear in an XML 1.0 document
# (http://www.w3.org/TR/REC-xml/#charsets).
#
# NOTE: if there are problems with current solution, one may move to
# XML 1.1, which allows such chars, if they're entity-escaped (&#xHH;).
s = re.sub(r'[\x00-\x08\x0b\x0c\x0e-\x1f]', '', s)
# Convert non-ascii characters to entities. Note: requires python >=2.3
s = s.encode('ascii', 'xmlcharrefreplace') # u'\xce\x88' -> 'uΈ'
return s
def _WriteSimpleXMLElement(outfile, name, value, indent):
"""Writes a simple XML element.
Args:
outfile: File object we write the XML element to.
name: A string, the name of XML element.
value: A Python object, whose string representation will be used
as the value of the XML element.
indent: A string, prepended to each line of generated output.
"""
value_str = _StrOrUnicode(value)
if isinstance(value, bool):
# Display boolean values as the C++ flag library does: no caps.
value_str = value_str.lower()
safe_value_str = _MakeXMLSafe(value_str)
outfile.write('%s<%s>%s</%s>\n' % (indent, name, safe_value_str, name))
class Flag:
"""Information about a command-line flag.
'Flag' objects define the following fields:
.name - the name for this flag
.default - the default value for this flag
.default_as_str - default value as repr'd string, e.g., "'true'" (or None)
.value - the most recent parsed value of this flag; set by Parse()
.help - a help string or None if no help is available
.short_name - the single letter alias for this flag (or None)
.boolean - if 'true', this flag does not accept arguments
.present - true if this flag was parsed from command line flags.
.parser - an ArgumentParser object
.serializer - an ArgumentSerializer object
.allow_override - the flag may be redefined without raising an error
The only public method of a 'Flag' object is Parse(), but it is
typically only called by a 'FlagValues' object. The Parse() method is
a thin wrapper around the 'ArgumentParser' Parse() method. The parsed
value is saved in .value, and the .present attribute is updated. If
this flag was already present, a FlagsError is raised.
Parse() is also called during __init__ to parse the default value and
initialize the .value attribute. This enables other python modules to
safely use flags even if the __main__ module neglects to parse the
command line arguments. The .present attribute is cleared after
__init__ parsing. If the default value is set to None, then the
__init__ parsing step is skipped and the .value attribute is
initialized to None.
Note: The default value is also presented to the user in the help
string, so it is important that it be a legal value for this flag.
"""
def __init__(self, parser, serializer, name, default, help_string,
short_name=None, boolean=0, allow_override=0):
self.name = name
if not help_string:
help_string = '(no help available)'
self.help = help_string
self.short_name = short_name
self.boolean = boolean
self.present = 0
self.parser = parser
self.serializer = serializer
self.allow_override = allow_override
self.value = None
self.validators = []
self.SetDefault(default)
def __hash__(self):
return hash(id(self))
def __eq__(self, other):
return self is other
def __lt__(self, other):
if isinstance(other, Flag):
return id(self) < id(other)
return NotImplemented
def __GetParsedValueAsString(self, value):
if value is None:
return None
if self.serializer:
return repr(self.serializer.Serialize(value))
if self.boolean:
if value:
return repr('true')
else:
return repr('false')
return repr(_StrOrUnicode(value))
def Parse(self, argument):
try:
self.value = self.parser.Parse(argument)
except ValueError, e: # recast ValueError as IllegalFlagValue
raise IllegalFlagValue("flag --%s=%s: %s" % (self.name, argument, e))
self.present += 1
def Unparse(self):
if self.default is None:
self.value = None
else:
self.Parse(self.default)
self.present = 0
def Serialize(self):
if self.value is None:
return ''
if self.boolean:
if self.value:
return "--%s" % self.name
else:
return "--no%s" % self.name
else:
if not self.serializer:
raise FlagsError("Serializer not present for flag %s" % self.name)
return "--%s=%s" % (self.name, self.serializer.Serialize(self.value))
def SetDefault(self, value):
"""Changes the default value (and current value too) for this Flag."""
# We can't allow a None override because it may end up not being
# passed to C++ code when we're overriding C++ flags. So we
# cowardly bail out until someone fixes the semantics of trying to
# pass None to a C++ flag. See swig_flags.Init() for details on
# this behavior.
# TODO(olexiy): Users can directly call this method, bypassing all flags
# validators (we don't have FlagValues here, so we can not check
# validators).
# The simplest solution I see is to make this method private.
# Another approach would be to store reference to the corresponding
# FlagValues with each flag, but this seems to be an overkill.
if value is None and self.allow_override:
raise DuplicateFlagCannotPropagateNoneToSwig(self.name)
self.default = value
self.Unparse()
self.default_as_str = self.__GetParsedValueAsString(self.value)
def Type(self):
"""Returns: a string that describes the type of this Flag."""
# NOTE: we use strings, and not the types.*Type constants because
# our flags can have more exotic types, e.g., 'comma separated list
# of strings', 'whitespace separated list of strings', etc.
return self.parser.Type()
def WriteInfoInXMLFormat(self, outfile, module_name, is_key=False, indent=''):
"""Writes common info about this flag, in XML format.
This is information that is relevant to all flags (e.g., name,
meaning, etc.). If you defined a flag that has some other pieces of
info, then please override _WriteCustomInfoInXMLFormat.
Please do NOT override this method.
Args:
outfile: File object we write to.
module_name: A string, the name of the module that defines this flag.
is_key: A boolean, True iff this flag is key for main module.
indent: A string that is prepended to each generated line.
"""
outfile.write(indent + '<flag>\n')
inner_indent = indent + ' '
if is_key:
_WriteSimpleXMLElement(outfile, 'key', 'yes', inner_indent)
_WriteSimpleXMLElement(outfile, 'file', module_name, inner_indent)
# Print flag features that are relevant for all flags.
_WriteSimpleXMLElement(outfile, 'name', self.name, inner_indent)
if self.short_name:
_WriteSimpleXMLElement(outfile, 'short_name', self.short_name,
inner_indent)
if self.help:
_WriteSimpleXMLElement(outfile, 'meaning', self.help, inner_indent)
# The default flag value can either be represented as a string like on the
# command line, or as a Python object. We serialize this value in the
# latter case in order to remain consistent.
if self.serializer and not isinstance(self.default, str):
default_serialized = self.serializer.Serialize(self.default)
else:
default_serialized = self.default
_WriteSimpleXMLElement(outfile, 'default', default_serialized, inner_indent)
_WriteSimpleXMLElement(outfile, 'current', self.value, inner_indent)
_WriteSimpleXMLElement(outfile, 'type', self.Type(), inner_indent)
# Print extra flag features this flag may have.
self._WriteCustomInfoInXMLFormat(outfile, inner_indent)
outfile.write(indent + '</flag>\n')
def _WriteCustomInfoInXMLFormat(self, outfile, indent):
"""Writes extra info about this flag, in XML format.
"Extra" means "not already printed by WriteInfoInXMLFormat above."
Args:
outfile: File object we write to.
indent: A string that is prepended to each generated line.
"""
# Usually, the parser knows the extra details about the flag, so
# we just forward the call to it.
self.parser.WriteCustomInfoInXMLFormat(outfile, indent)
# End of Flag definition
class _ArgumentParserCache(type):
"""Metaclass used to cache and share argument parsers among flags."""
_instances = {}
def __call__(mcs, *args, **kwargs):
"""Returns an instance of the argument parser cls.
This method overrides behavior of the __new__ methods in
all subclasses of ArgumentParser (inclusive). If an instance
for mcs with the same set of arguments exists, this instance is
returned, otherwise a new instance is created.
If any keyword arguments are defined, or the values in args
are not hashable, this method always returns a new instance of
cls.
Args:
args: Positional initializer arguments.
kwargs: Initializer keyword arguments.
Returns:
An instance of cls, shared or new.
"""
if kwargs:
return type.__call__(mcs, *args, **kwargs)
else:
instances = mcs._instances
key = (mcs,) + tuple(args)
try:
return instances[key]
except KeyError:
# No cache entry for key exists, create a new one.
return instances.setdefault(key, type.__call__(mcs, *args))
except TypeError:
# An object in args cannot be hashed, always return
# a new instance.
return type.__call__(mcs, *args)
class ArgumentParser(object):
"""Base class used to parse and convert arguments.
The Parse() method checks to make sure that the string argument is a
legal value and convert it to a native type. If the value cannot be
converted, it should throw a 'ValueError' exception with a human
readable explanation of why the value is illegal.
Subclasses should also define a syntactic_help string which may be
presented to the user to describe the form of the legal values.
Argument parser classes must be stateless, since instances are cached
and shared between flags. Initializer arguments are allowed, but all
member variables must be derived from initializer arguments only.
"""
__metaclass__ = _ArgumentParserCache
syntactic_help = ""
def Parse(self, argument):
"""Default implementation: always returns its argument unmodified."""
return argument
def Type(self):
return 'string'
def WriteCustomInfoInXMLFormat(self, outfile, indent):
pass
class ArgumentSerializer:
"""Base class for generating string representations of a flag value."""
def Serialize(self, value):
return _StrOrUnicode(value)
class ListSerializer(ArgumentSerializer):
def __init__(self, list_sep):
self.list_sep = list_sep
def Serialize(self, value):
return self.list_sep.join([_StrOrUnicode(x) for x in value])
# Flags validators
def RegisterValidator(flag_name,
checker,
message='Flag validation failed',
flag_values=FLAGS):
"""Adds a constraint, which will be enforced during program execution.
The constraint is validated when flags are initially parsed, and after each
change of the corresponding flag's value.
Args:
flag_name: string, name of the flag to be checked.
checker: method to validate the flag.
input - value of the corresponding flag (string, boolean, etc.
This value will be passed to checker by the library). See file's
docstring for examples.
output - Boolean.
Must return True if validator constraint is satisfied.
If constraint is not satisfied, it should either return False or
raise gflags_validators.Error(desired_error_message).
message: error text to be shown to the user if checker returns False.
If checker raises gflags_validators.Error, message from the raised
Error will be shown.
flag_values: FlagValues
Raises:
AttributeError: if flag_name is not registered as a valid flag name.
"""
flag_values.AddValidator(gflags_validators.SimpleValidator(flag_name,
checker,
message))
def MarkFlagAsRequired(flag_name, flag_values=FLAGS):
"""Ensure that flag is not None during program execution.
Registers a flag validator, which will follow usual validator
rules.
Args:
flag_name: string, name of the flag
flag_values: FlagValues
Raises:
AttributeError: if flag_name is not registered as a valid flag name.
"""
RegisterValidator(flag_name,
lambda value: value is not None,
message='Flag --%s must be specified.' % flag_name,
flag_values=flag_values)
def _RegisterBoundsValidatorIfNeeded(parser, name, flag_values):
"""Enforce lower and upper bounds for numeric flags.
Args:
parser: NumericParser (either FloatParser or IntegerParser). Provides lower
and upper bounds, and help text to display.
name: string, name of the flag
flag_values: FlagValues
"""
if parser.lower_bound is not None or parser.upper_bound is not None:
def Checker(value):
if value is not None and parser.IsOutsideBounds(value):
message = '%s is not %s' % (value, parser.syntactic_help)
raise gflags_validators.Error(message)
return True
RegisterValidator(name,
Checker,
flag_values=flag_values)
# The DEFINE functions are explained in mode details in the module doc string.
def DEFINE(parser, name, default, help, flag_values=FLAGS, serializer=None,
**args):
"""Registers a generic Flag object.
NOTE: in the docstrings of all DEFINE* functions, "registers" is short
for "creates a new flag and registers it".
Auxiliary function: clients should use the specialized DEFINE_<type>
function instead.
Args:
parser: ArgumentParser that is used to parse the flag arguments.
name: A string, the flag name.
default: The default value of the flag.
help: A help string.
flag_values: FlagValues object the flag will be registered with.
serializer: ArgumentSerializer that serializes the flag value.
args: Dictionary with extra keyword args that are passes to the
Flag __init__.
"""
DEFINE_flag(Flag(parser, serializer, name, default, help, **args),
flag_values)
def DEFINE_flag(flag, flag_values=FLAGS):
"""Registers a 'Flag' object with a 'FlagValues' object.
By default, the global FLAGS 'FlagValue' object is used.
Typical users will use one of the more specialized DEFINE_xxx
functions, such as DEFINE_string or DEFINE_integer. But developers
who need to create Flag objects themselves should use this function
to register their flags.
"""
# copying the reference to flag_values prevents pychecker warnings
fv = flag_values
fv[flag.name] = flag
# Tell flag_values who's defining the flag.
if isinstance(flag_values, FlagValues):
# Regarding the above isinstance test: some users pass funny
# values of flag_values (e.g., {}) in order to avoid the flag
# registration (in the past, there used to be a flag_values ==
# FLAGS test here) and redefine flags with the same name (e.g.,
# debug). To avoid breaking their code, we perform the
# registration only if flag_values is a real FlagValues object.
module, module_name = _GetCallingModuleObjectAndName()
flag_values._RegisterFlagByModule(module_name, flag)
flag_values._RegisterFlagByModuleId(id(module), flag)
def _InternalDeclareKeyFlags(flag_names,
flag_values=FLAGS, key_flag_values=None):
"""Declares a flag as key for the calling module.
Internal function. User code should call DECLARE_key_flag or
ADOPT_module_key_flags instead.
Args:
flag_names: A list of strings that are names of already-registered
Flag objects.
flag_values: A FlagValues object that the flags listed in
flag_names have registered with (the value of the flag_values
argument from the DEFINE_* calls that defined those flags).
This should almost never need to be overridden.
key_flag_values: A FlagValues object that (among possibly many
other things) keeps track of the key flags for each module.
Default None means "same as flag_values". This should almost
never need to be overridden.
Raises:
UnrecognizedFlagError: when we refer to a flag that was not
defined yet.
"""
key_flag_values = key_flag_values or flag_values
module = _GetCallingModule()
for flag_name in flag_names:
if flag_name not in flag_values:
raise UnrecognizedFlagError(flag_name)
flag = flag_values.FlagDict()[flag_name]
key_flag_values._RegisterKeyFlagForModule(module, flag)
def DECLARE_key_flag(flag_name, flag_values=FLAGS):
"""Declares one flag as key to the current module.
Key flags are flags that are deemed really important for a module.
They are important when listing help messages; e.g., if the
--helpshort command-line flag is used, then only the key flags of the
main module are listed (instead of all flags, as in the case of
--help).
Sample usage:
gflags.DECLARED_key_flag('flag_1')
Args:
flag_name: A string, the name of an already declared flag.
(Redeclaring flags as key, including flags implicitly key
because they were declared in this module, is a no-op.)
flag_values: A FlagValues object. This should almost never
need to be overridden.
"""
if flag_name in _SPECIAL_FLAGS:
# Take care of the special flags, e.g., --flagfile, --undefok.
# These flags are defined in _SPECIAL_FLAGS, and are treated
# specially during flag parsing, taking precedence over the
# user-defined flags.
_InternalDeclareKeyFlags([flag_name],
flag_values=_SPECIAL_FLAGS,
key_flag_values=flag_values)
return
_InternalDeclareKeyFlags([flag_name], flag_values=flag_values)
def ADOPT_module_key_flags(module, flag_values=FLAGS):
"""Declares that all flags key to a module are key to the current module.
Args:
module: A module object.
flag_values: A FlagValues object. This should almost never need
to be overridden.
Raises:
FlagsError: When given an argument that is a module name (a
string), instead of a module object.
"""
# NOTE(salcianu): an even better test would be if not
# isinstance(module, types.ModuleType) but I didn't want to import
# types for such a tiny use.
if isinstance(module, str):
raise FlagsError('Received module name %s; expected a module object.'
% module)
_InternalDeclareKeyFlags(
[f.name for f in flag_values._GetKeyFlagsForModule(module.__name__)],
flag_values=flag_values)
# If module is this flag module, take _SPECIAL_FLAGS into account.
if module == _GetThisModuleObjectAndName()[0]:
_InternalDeclareKeyFlags(
# As we associate flags with _GetCallingModuleObjectAndName(), the
# special flags defined in this module are incorrectly registered with
# a different module. So, we can't use _GetKeyFlagsForModule.
# Instead, we take all flags from _SPECIAL_FLAGS (a private
# FlagValues, where no other module should register flags).
[f.name for f in _SPECIAL_FLAGS.FlagDict().values()],
flag_values=_SPECIAL_FLAGS,
key_flag_values=flag_values)
#
# STRING FLAGS
#
def DEFINE_string(name, default, help, flag_values=FLAGS, **args):
"""Registers a flag whose value can be any string."""
parser = ArgumentParser()
serializer = ArgumentSerializer()
DEFINE(parser, name, default, help, flag_values, serializer, **args)
#
# BOOLEAN FLAGS
#
class BooleanParser(ArgumentParser):
"""Parser of boolean values."""
def Convert(self, argument):
"""Converts the argument to a boolean; raise ValueError on errors."""
if type(argument) == str:
if argument.lower() in ['true', 't', '1']:
return True
elif argument.lower() in ['false', 'f', '0']:
return False
bool_argument = bool(argument)
if argument == bool_argument:
# The argument is a valid boolean (True, False, 0, or 1), and not just
# something that always converts to bool (list, string, int, etc.).
return bool_argument
raise ValueError('Non-boolean argument to boolean flag', argument)
def Parse(self, argument):
val = self.Convert(argument)
return val
def Type(self):
return 'bool'
class BooleanFlag(Flag):
"""Basic boolean flag.
Boolean flags do not take any arguments, and their value is either
True (1) or False (0). The false value is specified on the command
line by prepending the word 'no' to either the long or the short flag
name.
For example, if a Boolean flag was created whose long name was
'update' and whose short name was 'x', then this flag could be
explicitly unset through either --noupdate or --nox.
"""
def __init__(self, name, default, help, short_name=None, **args):
p = BooleanParser()
Flag.__init__(self, p, None, name, default, help, short_name, 1, **args)
if not self.help: self.help = "a boolean value"
def DEFINE_boolean(name, default, help, flag_values=FLAGS, **args):
"""Registers a boolean flag.
Such a boolean flag does not take an argument. If a user wants to
specify a false value explicitly, the long option beginning with 'no'
must be used: i.e. --noflag
This flag will have a value of None, True or False. None is possible
if default=None and the user does not specify the flag on the command
line.
"""
DEFINE_flag(BooleanFlag(name, default, help, **args), flag_values)
# Match C++ API to unconfuse C++ people.
DEFINE_bool = DEFINE_boolean
class HelpFlag(BooleanFlag):
"""
HelpFlag is a special boolean flag that prints usage information and
raises a SystemExit exception if it is ever found in the command
line arguments. Note this is called with allow_override=1, so other
apps can define their own --help flag, replacing this one, if they want.
"""
def __init__(self):
BooleanFlag.__init__(self, "help", 0, "show this help",
short_name="?", allow_override=1)
def Parse(self, arg):
if arg:
doc = sys.modules["__main__"].__doc__
flags = str(FLAGS)
print doc or ("\nUSAGE: %s [flags]\n" % sys.argv[0])
if flags:
print "flags:"
print flags
sys.exit(1)
class HelpXMLFlag(BooleanFlag):
"""Similar to HelpFlag, but generates output in XML format."""
def __init__(self):
BooleanFlag.__init__(self, 'helpxml', False,
'like --help, but generates XML output',
allow_override=1)
def Parse(self, arg):
if arg:
FLAGS.WriteHelpInXMLFormat(sys.stdout)
sys.exit(1)
class HelpshortFlag(BooleanFlag):
"""
HelpshortFlag is a special boolean flag that prints usage
information for the "main" module, and rasies a SystemExit exception
if it is ever found in the command line arguments. Note this is
called with allow_override=1, so other apps can define their own
--helpshort flag, replacing this one, if they want.
"""
def __init__(self):
BooleanFlag.__init__(self, "helpshort", 0,
"show usage only for this module", allow_override=1)
def Parse(self, arg):
if arg:
doc = sys.modules["__main__"].__doc__
flags = FLAGS.MainModuleHelp()
print doc or ("\nUSAGE: %s [flags]\n" % sys.argv[0])
if flags:
print "flags:"
print flags
sys.exit(1)
#
# Numeric parser - base class for Integer and Float parsers
#
class NumericParser(ArgumentParser):
"""Parser of numeric values.
Parsed value may be bounded to a given upper and lower bound.
"""
def IsOutsideBounds(self, val):
return ((self.lower_bound is not None and val < self.lower_bound) or
(self.upper_bound is not None and val > self.upper_bound))
def Parse(self, argument):
val = self.Convert(argument)
if self.IsOutsideBounds(val):
raise ValueError("%s is not %s" % (val, self.syntactic_help))
return val
def WriteCustomInfoInXMLFormat(self, outfile, indent):
if self.lower_bound is not None:
_WriteSimpleXMLElement(outfile, 'lower_bound', self.lower_bound, indent)
if self.upper_bound is not None:
_WriteSimpleXMLElement(outfile, 'upper_bound', self.upper_bound, indent)
def Convert(self, argument):
"""Default implementation: always returns its argument unmodified."""
return argument
# End of Numeric Parser
#
# FLOAT FLAGS
#
class FloatParser(NumericParser):
"""Parser of floating point values.
Parsed value may be bounded to a given upper and lower bound.
"""
number_article = "a"
number_name = "number"
syntactic_help = " ".join((number_article, number_name))
def __init__(self, lower_bound=None, upper_bound=None):
super(FloatParser, self).__init__()
self.lower_bound = lower_bound
self.upper_bound = upper_bound
sh = self.syntactic_help
if lower_bound is not None and upper_bound is not None:
sh = ("%s in the range [%s, %s]" % (sh, lower_bound, upper_bound))
elif lower_bound == 0:
sh = "a non-negative %s" % self.number_name
elif upper_bound == 0:
sh = "a non-positive %s" % self.number_name
elif upper_bound is not None:
sh = "%s <= %s" % (self.number_name, upper_bound)
elif lower_bound is not None:
sh = "%s >= %s" % (self.number_name, lower_bound)
self.syntactic_help = sh
def Convert(self, argument):
"""Converts argument to a float; raises ValueError on errors."""
return float(argument)
def Type(self):
return 'float'
# End of FloatParser
def DEFINE_float(name, default, help, lower_bound=None, upper_bound=None,
flag_values=FLAGS, **args):
"""Registers a flag whose value must be a float.
If lower_bound or upper_bound are set, then this flag must be
within the given range.
"""
parser = FloatParser(lower_bound, upper_bound)
serializer = ArgumentSerializer()
DEFINE(parser, name, default, help, flag_values, serializer, **args)
_RegisterBoundsValidatorIfNeeded(parser, name, flag_values=flag_values)
#
# INTEGER FLAGS
#
class IntegerParser(NumericParser):
"""Parser of an integer value.
Parsed value may be bounded to a given upper and lower bound.
"""
number_article = "an"
number_name = "integer"
syntactic_help = " ".join((number_article, number_name))
def __init__(self, lower_bound=None, upper_bound=None):
super(IntegerParser, self).__init__()
self.lower_bound = lower_bound
self.upper_bound = upper_bound
sh = self.syntactic_help
if lower_bound is not None and upper_bound is not None:
sh = ("%s in the range [%s, %s]" % (sh, lower_bound, upper_bound))
elif lower_bound == 1:
sh = "a positive %s" % self.number_name
elif upper_bound == -1:
sh = "a negative %s" % self.number_name
elif lower_bound == 0:
sh = "a non-negative %s" % self.number_name
elif upper_bound == 0:
sh = "a non-positive %s" % self.number_name
elif upper_bound is not None:
sh = "%s <= %s" % (self.number_name, upper_bound)
elif lower_bound is not None:
sh = "%s >= %s" % (self.number_name, lower_bound)
self.syntactic_help = sh
def Convert(self, argument):
__pychecker__ = 'no-returnvalues'
if type(argument) == str:
base = 10
if len(argument) > 2 and argument[0] == "0" and argument[1] == "x":
base = 16
return int(argument, base)
else:
return int(argument)
def Type(self):
return 'int'
def DEFINE_integer(name, default, help, lower_bound=None, upper_bound=None,
flag_values=FLAGS, **args):
"""Registers a flag whose value must be an integer.
If lower_bound, or upper_bound are set, then this flag must be
within the given range.
"""
parser = IntegerParser(lower_bound, upper_bound)
serializer = ArgumentSerializer()
DEFINE(parser, name, default, help, flag_values, serializer, **args)
_RegisterBoundsValidatorIfNeeded(parser, name, flag_values=flag_values)
#
# ENUM FLAGS
#
class EnumParser(ArgumentParser):
"""Parser of a string enum value (a string value from a given set).
If enum_values (see below) is not specified, any string is allowed.
"""
def __init__(self, enum_values=None):
super(EnumParser, self).__init__()
self.enum_values = enum_values
def Parse(self, argument):
if self.enum_values and argument not in self.enum_values:
raise ValueError("value should be one of <%s>" %
"|".join(self.enum_values))
return argument
def Type(self):
return 'string enum'
class EnumFlag(Flag):
"""Basic enum flag; its value can be any string from list of enum_values."""
def __init__(self, name, default, help, enum_values=None,
short_name=None, **args):
enum_values = enum_values or []
p = EnumParser(enum_values)
g = ArgumentSerializer()
Flag.__init__(self, p, g, name, default, help, short_name, **args)
if not self.help: self.help = "an enum string"
self.help = "<%s>: %s" % ("|".join(enum_values), self.help)
def _WriteCustomInfoInXMLFormat(self, outfile, indent):
for enum_value in self.parser.enum_values:
_WriteSimpleXMLElement(outfile, 'enum_value', enum_value, indent)
def DEFINE_enum(name, default, enum_values, help, flag_values=FLAGS,
**args):
"""Registers a flag whose value can be any string from enum_values."""
DEFINE_flag(EnumFlag(name, default, help, enum_values, ** args),
flag_values)
#
# LIST FLAGS
#
class BaseListParser(ArgumentParser):
"""Base class for a parser of lists of strings.
To extend, inherit from this class; from the subclass __init__, call
BaseListParser.__init__(self, token, name)
where token is a character used to tokenize, and name is a description
of the separator.
"""
def __init__(self, token=None, name=None):
assert name
super(BaseListParser, self).__init__()
self._token = token
self._name = name
self.syntactic_help = "a %s separated list" % self._name
def Parse(self, argument):
if isinstance(argument, list):
return argument
elif argument == '':
return []
else:
return [s.strip() for s in argument.split(self._token)]
def Type(self):
return '%s separated list of strings' % self._name
class ListParser(BaseListParser):
"""Parser for a comma-separated list of strings."""
def __init__(self):
BaseListParser.__init__(self, ',', 'comma')
def WriteCustomInfoInXMLFormat(self, outfile, indent):
BaseListParser.WriteCustomInfoInXMLFormat(self, outfile, indent)
_WriteSimpleXMLElement(outfile, 'list_separator', repr(','), indent)
class WhitespaceSeparatedListParser(BaseListParser):
"""Parser for a whitespace-separated list of strings."""
def __init__(self):
BaseListParser.__init__(self, None, 'whitespace')
def WriteCustomInfoInXMLFormat(self, outfile, indent):
BaseListParser.WriteCustomInfoInXMLFormat(self, outfile, indent)
separators = list(string.whitespace)
separators.sort()
for ws_char in string.whitespace:
_WriteSimpleXMLElement(outfile, 'list_separator', repr(ws_char), indent)
def DEFINE_list(name, default, help, flag_values=FLAGS, **args):
"""Registers a flag whose value is a comma-separated list of strings."""
parser = ListParser()
serializer = ListSerializer(',')
DEFINE(parser, name, default, help, flag_values, serializer, **args)
def DEFINE_spaceseplist(name, default, help, flag_values=FLAGS, **args):
"""Registers a flag whose value is a whitespace-separated list of strings.
Any whitespace can be used as a separator.
"""
parser = WhitespaceSeparatedListParser()
serializer = ListSerializer(' ')
DEFINE(parser, name, default, help, flag_values, serializer, **args)
#
# MULTI FLAGS
#
class MultiFlag(Flag):
"""A flag that can appear multiple time on the command-line.
The value of such a flag is a list that contains the individual values
from all the appearances of that flag on the command-line.
See the __doc__ for Flag for most behavior of this class. Only
differences in behavior are described here:
* The default value may be either a single value or a list of values.
A single value is interpreted as the [value] singleton list.
* The value of the flag is always a list, even if the option was
only supplied once, and even if the default value is a single
value
"""
def __init__(self, *args, **kwargs):
Flag.__init__(self, *args, **kwargs)
self.help += ';\n repeat this option to specify a list of values'
def Parse(self, arguments):
"""Parses one or more arguments with the installed parser.
Args:
arguments: a single argument or a list of arguments (typically a
list of default values); a single argument is converted
internally into a list containing one item.
"""
if not isinstance(arguments, list):
# Default value may be a list of values. Most other arguments
# will not be, so convert them into a single-item list to make
# processing simpler below.
arguments = [arguments]
if self.present:
# keep a backup reference to list of previously supplied option values
values = self.value
else:
# "erase" the defaults with an empty list
values = []
for item in arguments:
# have Flag superclass parse argument, overwriting self.value reference
Flag.Parse(self, item) # also increments self.present
values.append(self.value)
# put list of option values back in the 'value' attribute
self.value = values
def Serialize(self):
if not self.serializer:
raise FlagsError("Serializer not present for flag %s" % self.name)
if self.value is None:
return ''
s = ''
multi_value = self.value
for self.value in multi_value:
if s: s += ' '
s += Flag.Serialize(self)
self.value = multi_value
return s
def Type(self):
return 'multi ' + self.parser.Type()
def DEFINE_multi(parser, serializer, name, default, help, flag_values=FLAGS,
**args):
"""Registers a generic MultiFlag that parses its args with a given parser.
Auxiliary function. Normal users should NOT use it directly.
Developers who need to create their own 'Parser' classes for options
which can appear multiple times can call this module function to
register their flags.
"""
DEFINE_flag(MultiFlag(parser, serializer, name, default, help, **args),
flag_values)
def DEFINE_multistring(name, default, help, flag_values=FLAGS, **args):
"""Registers a flag whose value can be a list of any strings.
Use the flag on the command line multiple times to place multiple
string values into the list. The 'default' may be a single string
(which will be converted into a single-element list) or a list of
strings.
"""
parser = ArgumentParser()
serializer = ArgumentSerializer()
DEFINE_multi(parser, serializer, name, default, help, flag_values, **args)
def DEFINE_multi_int(name, default, help, lower_bound=None, upper_bound=None,
flag_values=FLAGS, **args):
"""Registers a flag whose value can be a list of arbitrary integers.
Use the flag on the command line multiple times to place multiple
integer values into the list. The 'default' may be a single integer
(which will be converted into a single-element list) or a list of
integers.
"""
parser = IntegerParser(lower_bound, upper_bound)
serializer = ArgumentSerializer()
DEFINE_multi(parser, serializer, name, default, help, flag_values, **args)
def DEFINE_multi_float(name, default, help, lower_bound=None, upper_bound=None,
flag_values=FLAGS, **args):
"""Registers a flag whose value can be a list of arbitrary floats.
Use the flag on the command line multiple times to place multiple
float values into the list. The 'default' may be a single float
(which will be converted into a single-element list) or a list of
floats.
"""
parser = FloatParser(lower_bound, upper_bound)
serializer = ArgumentSerializer()
DEFINE_multi(parser, serializer, name, default, help, flag_values, **args)
# Now register the flags that we want to exist in all applications.
# These are all defined with allow_override=1, so user-apps can use
# these flagnames for their own purposes, if they want.
DEFINE_flag(HelpFlag())
DEFINE_flag(HelpshortFlag())
DEFINE_flag(HelpXMLFlag())
# Define special flags here so that help may be generated for them.
# NOTE: Please do NOT use _SPECIAL_FLAGS from outside this module.
_SPECIAL_FLAGS = FlagValues()
DEFINE_string(
'flagfile', "",
"Insert flag definitions from the given file into the command line.",
_SPECIAL_FLAGS)
DEFINE_string(
'undefok', "",
"comma-separated list of flag names that it is okay to specify "
"on the command line even if the program does not define a flag "
"with that name. IMPORTANT: flags in this list that have "
"arguments MUST use the --flag=value format.", _SPECIAL_FLAGS)
| bsd-3-clause |
albertomurillo/ansible | lib/ansible/modules/network/onyx/onyx_mlag_ipl.py | 118 | 6779 | #!/usr/bin/python
#
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: onyx_mlag_ipl
version_added: "2.5"
author: "Samer Deeb (@samerd)"
short_description: Manage IPL (inter-peer link) on Mellanox ONYX network devices
description:
- This module provides declarative management of IPL (inter-peer link)
management on Mellanox ONYX network devices.
notes:
- Tested on ONYX 3.6.4000
options:
name:
description:
- Name of the interface (port-channel) IPL should be configured on.
required: true
vlan_interface:
description:
- Name of the IPL vlan interface.
state:
description:
- IPL state.
default: present
choices: ['present', 'absent']
peer_address:
description:
- IPL peer IP address.
"""
EXAMPLES = """
- name: run configure ipl
onyx_mlag_ipl:
name: Po1
vlan_interface: Vlan 322
state: present
peer_address: 192.168.7.1
- name: run remove ipl
onyx_mlag_ipl:
name: Po1
state: absent
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device.
returned: always
type: list
sample:
- interface port-channel 1 ipl 1
- interface vlan 1024 ipl 1 peer-address 10.10.10.10
"""
import re
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.onyx.onyx import BaseOnyxModule
from ansible.module_utils.network.onyx.onyx import show_cmd
class OnyxMlagIplModule(BaseOnyxModule):
VLAN_IF_REGEX = re.compile(r'^Vlan \d+')
@classmethod
def _get_element_spec(cls):
return dict(
name=dict(required=True),
state=dict(default='present',
choices=['present', 'absent']),
peer_address=dict(),
vlan_interface=dict(),
)
def init_module(self):
""" module initialization
"""
element_spec = self._get_element_spec()
argument_spec = dict()
argument_spec.update(element_spec)
self._module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True)
def get_required_config(self):
module_params = self._module.params
self._required_config = dict(
name=module_params['name'],
state=module_params['state'],
peer_address=module_params['peer_address'],
vlan_interface=module_params['vlan_interface'])
self.validate_param_values(self._required_config)
def _update_mlag_data(self, mlag_data):
if not mlag_data:
return
mlag_summary = mlag_data.get("MLAG IPLs Summary", {})
ipl_id = "1"
ipl_list = mlag_summary.get(ipl_id)
if ipl_list:
ipl_data = ipl_list[0]
vlan_id = ipl_data.get("Vlan Interface")
vlan_interface = ""
if vlan_id != "N/A":
vlan_interface = "Vlan %s" % vlan_id
peer_address = ipl_data.get("Peer IP address")
name = ipl_data.get("Group Port-Channel")
self._current_config = dict(
name=name,
peer_address=peer_address,
vlan_interface=vlan_interface)
def _show_mlag_data(self):
cmd = "show mlag"
return show_cmd(self._module, cmd, json_fmt=True, fail_on_error=False)
def load_current_config(self):
# called in base class in run function
self._current_config = dict()
mlag_data = self._show_mlag_data()
self._update_mlag_data(mlag_data)
def _get_interface_cmd_name(self, if_name):
if if_name.startswith('Po'):
return if_name.replace("Po", "port-channel ")
self._module.fail_json(
msg='invalid interface name: %s' % if_name)
def _generate_port_channel_command(self, if_name, enable):
if_cmd_name = self._get_interface_cmd_name(if_name)
if enable:
ipl_cmd = 'ipl 1'
else:
ipl_cmd = "no ipl 1"
cmd = "interface %s %s" % (if_cmd_name, ipl_cmd)
return cmd
def _generate_vlan_if_command(self, if_name, enable, peer_address):
if_cmd_name = if_name.lower()
if enable:
ipl_cmd = 'ipl 1 peer-address %s' % peer_address
else:
ipl_cmd = "no ipl 1"
cmd = "interface %s %s" % (if_cmd_name, ipl_cmd)
return cmd
def _generate_no_ipl_commands(self):
curr_interface = self._current_config.get('name')
req_interface = self._required_config.get('name')
if curr_interface == req_interface:
cmd = self._generate_port_channel_command(
req_interface, enable=False)
self._commands.append(cmd)
def _generate_ipl_commands(self):
curr_interface = self._current_config.get('name')
req_interface = self._required_config.get('name')
if curr_interface != req_interface:
if curr_interface and curr_interface != 'N/A':
cmd = self._generate_port_channel_command(
curr_interface, enable=False)
self._commands.append(cmd)
cmd = self._generate_port_channel_command(
req_interface, enable=True)
self._commands.append(cmd)
curr_vlan = self._current_config.get('vlan_interface')
req_vlan = self._required_config.get('vlan_interface')
add_peer = False
if curr_vlan != req_vlan:
add_peer = True
if curr_vlan:
cmd = self._generate_vlan_if_command(curr_vlan, enable=False,
peer_address=None)
self._commands.append(cmd)
curr_peer = self._current_config.get('peer_address')
req_peer = self._required_config.get('peer_address')
if req_peer != curr_peer:
add_peer = True
if add_peer and req_peer:
cmd = self._generate_vlan_if_command(req_vlan, enable=True,
peer_address=req_peer)
self._commands.append(cmd)
def generate_commands(self):
state = self._required_config['state']
if state == 'absent':
self._generate_no_ipl_commands()
else:
self._generate_ipl_commands()
def main():
""" main entry point for module execution
"""
OnyxMlagIplModule.main()
if __name__ == '__main__':
main()
| gpl-3.0 |
shanemcd/ansible | lib/ansible/modules/network/nxos/nxos_rollback.py | 4 | 3936 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = '''
---
module: nxos_rollback
extends_documentation_fragment: nxos
version_added: "2.2"
short_description: Set a checkpoint or rollback to a checkpoint.
description:
- This module offers the ability to set a configuration checkpoint
file or rollback to a configuration checkpoint file on Cisco NXOS
switches.
author:
- Jason Edelman (@jedelman8)
- Gabriele Gerbino (@GGabriele)
notes:
- Sometimes C(transport=nxapi) may cause a timeout error.
options:
checkpoint_file:
description:
- Name of checkpoint file to create. Mutually exclusive
with rollback_to.
required: false
default: null
rollback_to:
description:
- Name of checkpoint file to rollback to. Mutually exclusive
with checkpoint_file.
required: false
default: null
'''
EXAMPLES = '''
- nxos_rollback:
checkpoint_file: backup.cfg
username: "{{ un }}"
password: "{{ pwd }}"
host: "{{ inventory_hostname }}"
- nxos_rollback:
rollback_to: backup.cfg
username: "{{ un }}"
password: "{{ pwd }}"
host: "{{ inventory_hostname }}"
'''
RETURN = '''
filename:
description: The filename of the checkpoint/rollback file.
returned: success
type: string
sample: 'backup.cfg'
status:
description: Which operation took place and whether it was successful.
returned: success
type: string
sample: 'rollback executed'
'''
from ansible.module_utils.nxos import nxos_argument_spec, run_commands
from ansible.module_utils.basic import AnsibleModule
def checkpoint(filename, module):
commands = [{
'command': 'terminal dont-ask',
'output': 'text', }, {
'command': 'checkpoint file %s' % filename,
'output': 'text',
}]
run_commands(module, commands)
def rollback(filename, module):
commands = [{
'command': 'rollback running-config file %s' % filename,
'output': 'text',
}]
run_commands(module, commands)
def main():
argument_spec = dict(
checkpoint_file=dict(required=False),
rollback_to=dict(required=False),
include_defaults=dict(default=True),
config=dict(),
save=dict(type='bool', default=False)
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
mutually_exclusive=[['checkpoint_file',
'rollback_to']],
supports_check_mode=False)
checkpoint_file = module.params['checkpoint_file']
rollback_to = module.params['rollback_to']
status = None
filename = None
changed = False
if checkpoint_file:
checkpoint(checkpoint_file, module)
status = 'checkpoint file created'
elif rollback_to:
rollback(rollback_to, module)
status = 'rollback executed'
changed = True
filename = rollback_to or checkpoint_file
module.exit_json(changed=changed, status=status, filename=filename)
if __name__ == '__main__':
main()
| gpl-3.0 |
bellwethers-in-se/defects | src/old/axe/sk.py | 1 | 16773 | """
## Hyptotheis Testing Stuff
### Standard Stuff
#### Standard Headers
"""
from __future__ import division
import sys
import random
import math
from functools import reduce
sys.dont_write_bytecode = True
"""
#### Standard Utils
"""
class o():
"Anonymous container"
def __init__(i, **fields):
i.override(fields)
def override(i, d):
i.__dict__.update(d)
return i
def __repr__(i):
d = i.__dict__
name = i.__class__.__name__
return name + '{' + ' '.join([':%s %s' % (k, pretty(d[k]))
for k in i.show()]) + '}'
def show(i):
return [k for k in sorted(i.__dict__.keys())
if not "_" in k]
"""
Misc functions:
"""
rand = random.random
any = random.choice
seed = random.seed
exp = lambda n: math.e ** n
ln = lambda n: math.log(n, math.e)
g = lambda n: round(n, 2)
def median(lst, ordered=False):
if not ordered:
lst = sorted(lst)
n = len(lst)
p = n // 2
if n % 2:
return lst[p]
q = p - 1
q = max(0, min(q, n))
return (lst[p] + lst[q]) / 2
def msecs(f):
import time
t1 = time.time()
f()
return (time.time() - t1) * 1000
def pairs(lst):
"Return all pairs of items i,i+1 from a list."
last = lst[0]
for i in lst[1:]:
yield last, i
last = i
def xtile(lst, lo=0, hi=1, width=50,
chops=[0.25, 0.5, 0.75],
marks=["-", " ", "-"],
bar="|", star="*", show=" %0.2f"):
"""The function _xtile_ takes a list of (possibly)
unsorted numbers and presents them as a horizontal
xtile chart (in ascii format). The default is a
contracted _quintile_ that shows the
10,30,50,70,90 breaks in the data (but this can be
changed- see the optional flags of the function).
"""
def pos(p):
return ordered[int(len(lst) * p)]
def place(x):
return int(width * float((x - lo)) / (hi - lo + 0.00001))
def pretty(lst):
return ', '.join([str(show % x) for x in lst])
ordered = sorted(lst)
lo = min(lo, ordered[0])
hi = max(hi, ordered[-1])
what = [pos(p) for p in chops]
where = [place(n) for n in what]
out = [" "] * width
for one, two in pairs(where):
for i in range(one, two):
out[i] = marks[0]
marks = marks[1:]
out[int(width / 2)] = bar
out[place(pos(0.5))] = star
return '(' + ''.join(out) + ")," + pretty(what)
def _tileX():
import random
random.seed(1)
nums = [random.random() ** 2 for _ in range(100)]
print xtile(nums, lo=0, hi=1.0, width=25, show=" %0.3E")
"""
### Standard Accumulator for Numbers
Note the _lt_ method: this accumulator can be sorted by median values.
Warning: this accumulator keeps _all_ numbers. Might be better to use
a bounded cache.
"""
class Num:
"An Accumulator for numbers"
def __init__(i, name, inits=[]):
i.n = i.m2 = i.mu = 0.0
i.all = []
i._median = None
i.name = name
i.rank = 0
for x in inits:
i.add(x)
def s(i):
return (i.m2 / (i.n - 1)) ** 0.5
def add(i, x):
i._median = None
i.n += 1
i.all += [x]
delta = x - i.mu
i.mu += delta * 1.0 / i.n
i.m2 += delta * (x - i.mu)
def __add__(i, j):
return Num(i.name + j.name, i.all + j.all)
def quartiles(i):
def p(x):
return int(100 * g(xs[x]))
i.median()
xs = i.all
n = int(len(xs) * 0.25)
return p(n), p(2 * n), p(3 * n)
def median(i):
if not i._median:
i.all = sorted(i.all)
i._median = median(i.all)
return i._median
def __lt__(i, j):
return i.median() < j.median()
def spread(i):
i.all = sorted(i.all)
n1 = i.n * 0.25
n2 = i.n * 0.75
if len(i.all) <= 1:
return 0
if len(i.all) == 2:
return i.all[1] - i.all[0]
else:
return i.all[int(n2)] - i.all[int(n1)]
"""
### The A12 Effect Size Test
"""
def a12slow(lst1, lst2):
"how often is x in lst1 more than y in lst2?"
more = same = 0.0
for x in lst1:
for y in lst2:
if x == y:
same += 1
elif x > y:
more += 1
x = (more + 0.5 * same) / (len(lst1) * len(lst2))
return x
def a12(lst1, lst2):
"how often is x in lst1 more than y in lst2?"
def loop(t, t1, t2):
while t1.j < t1.n and t2.j < t2.n:
h1 = t1.l[t1.j]
h2 = t2.l[t2.j]
h3 = t2.l[t2.j + 1] if t2.j + 1 < t2.n else None
if h1 > h2:
t1.j += 1
t1.gt += t2.n - t2.j
elif h1 == h2:
if h3 and h1 > h3:
t1.gt += t2.n - t2.j - 1
t1.j += 1
t1.eq += 1
t2.eq += 1
else:
t2, t1 = t1, t2
return t.gt * 1.0, t.eq * 1.0
#--------------------------
lst1 = sorted(lst1, reverse=True)
lst2 = sorted(lst2, reverse=True)
n1 = len(lst1)
n2 = len(lst2)
t1 = o(l=lst1, j=0, eq=0, gt=0, n=n1)
t2 = o(l=lst2, j=0, eq=0, gt=0, n=n2)
gt, eq = loop(t1, t1, t2)
return gt / (n1 * n2) + eq / 2 / (n1 * n2)
def _a12():
def f1():
return a12slow(l1, l2)
def f2():
return a12(l1, l2)
for n in [100, 200, 400, 800, 1600, 3200, 6400]:
l1 = [rand() for _ in xrange(n)]
l2 = [rand() for _ in xrange(n)]
t1 = msecs(f1)
t2 = msecs(f2)
print n, g(f1()), g(f2()), int((t1 / t2))
"""Output:
````
n a12(fast) a12(slow) tfast / tslow
--- --------------- -------------- --------------
100 0.53 0.53 4
200 0.48 0.48 6
400 0.49 0.49 28
800 0.5 0.5 26
1600 0.51 0.51 72
3200 0.49 0.49 109
6400 0.5 0.5 244
````
## Non-Parametric Hypothesis Testing
The following _bootstrap_ method was introduced in
1979 by Bradley Efron at Stanford University. It
was inspired by earlier work on the
jackknife.
Improved estimates of the variance were [developed later][efron01].
[efron01]: http://goo.gl/14n8Wf "Bradley Efron and R.J. Tibshirani. An Introduction to the Bootstrap (Chapman & Hall/CRC Monographs on Statistics & Applied Probability), 1993"
To check if two populations _(y0,z0)_
are different, many times sample with replacement
from both to generate _(y1,z1), (y2,z2), (y3,z3)_.. etc.
"""
def sampleWithReplacement(lst):
"returns a list same size as list"
def any(n):
return random.uniform(0, n)
def one(lst):
return lst[int(any(len(lst)))]
return [one(lst) for _ in lst]
"""
Then, for all those samples,
check if some *testStatistic* in the original pair
hold for all the other pairs. If it does more than (say) 99%
of the time, then we are 99% confident in that the
populations are the same.
In such a _bootstrap_ hypothesis test, the *some property*
is the difference between the two populations, muted by the
joint standard deviation of the populations.
"""
def testStatistic(y, z):
"""Checks if two means are different, tempered
by the sample size of 'y' and 'z'"""
tmp1 = tmp2 = 0
for y1 in y.all:
tmp1 += (y1 - y.mu) ** 2
for z1 in z.all:
tmp2 += (z1 - z.mu) ** 2
s1 = (float(tmp1) / (y.n - 1)) ** 0.5
s2 = (float(tmp2) / (z.n - 1)) ** 0.5
delta = z.mu - y.mu
if s1 + s2:
delta = delta / ((s1 / y.n + s2 / z.n) ** 0.5)
return delta
"""
The rest is just details:
+ Efron advises
to make the mean of the populations the same (see
the _yhat,zhat_ stuff shown below).
+ The class _total_ is a just a quick and dirty accumulation class.
+ For more details see [the Efron text][efron01].
"""
def bootstrap(y0, z0, conf=0.01, b=1000):
"""The bootstrap hypothesis test from
p220 to 223 of Efron's book 'An
introduction to the boostrap."""
class total():
"quick and dirty data collector"
def __init__(i, some=[]):
i.sum = i.n = i.mu = 0
i.all = []
for one in some:
i.put(one)
def put(i, x):
i.all.append(x)
i.sum += x
i.n += 1
i.mu = float(i.sum) / i.n
def __add__(i1, i2):
return total(i1.all + i2.all)
y, z = total(y0), total(z0)
x = y + z
tobs = testStatistic(y, z)
yhat = [y1 - y.mu + x.mu for y1 in y.all]
zhat = [z1 - z.mu + x.mu for z1 in z.all]
bigger = 0.0
for i in range(b):
if testStatistic(total(sampleWithReplacement(yhat)),
total(sampleWithReplacement(zhat))) > tobs:
bigger += 1
return bigger / b < conf
"""
#### Examples
"""
def _bootstraped():
def worker(n=1000,
mu1=10, sigma1=1,
mu2=10.2, sigma2=1):
def g(mu, sigma):
return random.gauss(mu, sigma)
x = [g(mu1, sigma1) for i in range(n)]
y = [g(mu2, sigma2) for i in range(n)]
return n, mu1, sigma1, mu2, sigma2, \
'different' if bootstrap(x, y) else 'same'
# very different means, same std
print worker(mu1=10, sigma1=10,
mu2=100, sigma2=10)
# similar means and std
print worker(mu1=10.1, sigma1=1,
mu2=10.2, sigma2=1)
# slightly different means, same std
print worker(mu1=10.1, sigma1=1,
mu2=10.8, sigma2=1)
# different in mu eater by large std
print worker(mu1=10.1, sigma1=10,
mu2=10.8, sigma2=1)
"""
Output:
````
_bootstraped()
(1000, 10, 10, 100, 10, 'different')
(1000, 10.1, 1, 10.2, 1, 'same')
(1000, 10.1, 1, 10.8, 1, 'different')
(1000, 10.1, 10, 10.8, 1, 'same')
````
Warning- the above took 8 seconds to generate since we used 1000 bootstraps.
As to how many bootstraps are enough, that depends on the data. There are
results saying 200 to 400 are enough but, since I am suspicious man, I run it for 1000.
Which means the runtimes associated with bootstrapping is a significant issue.
To reduce that runtime, I avoid things like an all-pairs comparison of all treatments
(see below: Scott-knott). Also, BEFORE I do the boostrap, I first run
the effect size test (and only go to bootstrapping in effect size passes:
"""
def different(l1, l2):
# return bootstrap(l1,l2) and a12(l2,l1)
return a12(l2, l1) and bootstrap(l1, l2)
"""
## Saner Hypothesis Testing
The following code, which you should use verbatim does the following:
+ All treatments are clustered into _ranks_. In practice, dozens
of treatments end up generating just a handful of ranks.
+ The numbers of calls to the hypothesis tests are minimized:
+ Treatments are sorted by their median value.
+ Treatments are divided into two groups such that the
expected value of the mean values _after_ the split is minimized;
+ Hypothesis tests are called to test if the two groups are truly difference.
+ All hypothesis tests are non-parametric and include (1) effect size tests
and (2) tests for statistically significant numbers;
+ Slow bootstraps are executed if the faster _A12_ tests are passed;
In practice, this means that the hypothesis tests (with confidence of say, 95%)
are called on only a logarithmic number of times. So...
+ With this method, 16 treatments can be studied using less than _∑<sub>1,2,4,8,16</sub>log<sub>2</sub>i =15_ hypothesis tests and confidence _0.99<sup>15</sup>=0.86_.
+ But if did this with the 120 all-pairs comparisons of the 16 treatments, we would have total confidence _0.99<sup>120</sup>=0.30.
For examples on using this code, see _rdivDemo_ (below).
"""
def scottknott(data, cohen=0.3, small=3, useA12=False, epsilon=0.01):
"""Recursively split data, maximizing delta of
the expected value of the mean before and
after the splits.
Reject splits with under 3 items"""
all = reduce(lambda x, y: x + y, data)
same = lambda l, r: abs(l.median() - r.median()) <= all.s() * cohen
if useA12:
same = lambda l, r: not different(l.all, r.all)
big = lambda n: n > small
return rdiv(data, all, minMu, big, same, epsilon)
def rdiv(data, # a list of class Nums
all, # all the data combined into one num
div, # function: find the best split
big, # function: rejects small splits
same, # function: rejects similar splits
epsilon): # small enough to split two parts
"""Looks for ways to split sorted data,
Recurses into each split. Assigns a 'rank' number
to all the leaf splits found in this way.
"""
def recurse(parts, all, rank=0):
"Split, then recurse on each part."
cut, left, right = maybeIgnore(div(parts, all, big, epsilon),
same, parts)
if cut:
# if cut, rank "right" higher than "left"
rank = recurse(parts[:cut], left, rank) + 1
rank = recurse(parts[cut:], right, rank)
else:
# if no cut, then all get same rank
for part in parts:
part.rank = rank
return rank
recurse(sorted(data), all)
return data
def maybeIgnore(xxx_todo_changeme, same, parts):
(cut, left, right) = xxx_todo_changeme
if cut:
if same(sum(parts[:cut], Num('upto')),
sum(parts[cut:], Num('above'))):
cut = left = right = None
return cut, left, right
def minMu(parts, all, big, epsilon):
"""Find a cut in the parts that maximizes
the expected value of the difference in
the mean before and after the cut.
Reject splits that are insignificantly
different or that generate very small subsets.
"""
cut, left, right = None, None, None
before, mu = 0, all.mu
for i, l, r in leftRight(parts, epsilon):
if big(l.n) and big(r.n):
n = all.n * 1.0
now = l.n / n * (mu - l.mu) ** 2 + r.n / n * (mu - r.mu) ** 2
if now > before:
before, cut, left, right = now, i, l, r
return cut, left, right
def leftRight(parts, epsilon=0.01):
"""Iterator. For all items in 'parts',
return everything to the left and everything
from here to the end. For reasons of
efficiency, take a first pass over the data
to pre-compute and cache right-hand-sides
"""
rights = {}
n = j = len(parts) - 1
while j > 0:
rights[j] = parts[j]
if j < n:
rights[j] += rights[j + 1]
j -= 1
left = parts[0]
for i, one in enumerate(parts):
if i > 0:
if parts[i]._median - parts[i - 1]._median > epsilon:
yield i, left, rights[i]
left += one
"""
## Putting it All Together
Driver for the demos:
"""
def rdivDemo(data, isLatex=False, globalMinMax=True, high=100, low=0):
if isLatex:
# print(r"""\documentclass{article}
# \usepackage{colortbl} % not sure if needed
# \usepackage[table]{xcolor} % not sure if needed
# %%%% needed %%%
# \usepackage{picture}
# \newcommand{\quart}[4]{\begin{picture}(100,6)%1
# {\color{black}\put(#3,3){\circle*{4}}\put(#1,3){\line(1,0){#2}}}\end{picture}}
# \begin{document}
# """)
def z(x):
return int(80 * (x - lo) / (hi - lo + 0.00001))
data = map(lambda lst: Num(lst[0], lst[1:]),
data)
print ""
ranks = []
for x in scottknott(data, useA12=True):
ranks += [(x.rank, x.median(), x)]
all = []
for _, __, x in sorted(ranks):
all += x.quartiles()
all = sorted(all)
if globalMinMax:
lo, hi = min(low, all[0]), max(all[-1], high)
else:
lo, hi = all[0], all[-1]
print r'{\scriptsize \begin{tabular}{l@{~~~}l@{~~~}r@{~~~}r@{~~~}c}'
print r'\arrayrulecolor{lightgray}'
# min= %s, max= %s\\\\' % (int(lo),int(hi))
print r'\textbf{Rank} & \textbf{Treatment} & \textbf{Median} & \textbf{IQR} & \\\hline'
last = None
for _, __, x in sorted(ranks):
q1, q2, q3 = x.quartiles()
pre = ""
if not last is None and not last == x.rank:
pre = "\\hline"
print pre, r'%2s & %12s & %s & %s & \quart{%s}{%s}{%s}{%s} \\' % \
(x.rank + 1,
x.name,
float(q2 / 100),
float((q3 - q1) / 100),
z(q1),
z(q3) - z(q1),
z(q2),
z(100))
last = x.rank
print r"\hline \end{tabular}}"
return ranks
# print('''
# \end{document}
# ''')
else:
def z(x):
return int(100 * (x - lo) / (hi - lo + 0.00001))
data = map(lambda lst: Num(lst[0], lst[1:]),
data)
print ""
ranks = []
for x in scottknott(data, useA12=True):
ranks += [(x.rank, x.median(), x)]
all = []
for _, __, x in sorted(ranks):
all += x.all
all = sorted(all)
if globalMinMax:
lo, hi = min(low, all[0]), max(all[-1], high)
else:
lo, hi = all[0], all[-1]
line = "----------------------------------------------------"
last = None
print ('%4s , %12s , %s , %4s ' %
('rank', 'name', 'med', 'iqr')) + "\n" + line
for _, __, x in sorted(ranks):
q1, q2, q3 = x.quartiles()
print ('%4s , %12s , %0.2f , %0.2f ' %
(x.rank + 1, x.name, x.median(), x.spread())) + \
xtile(x.all, lo=lo, hi=hi, width=30)
last = x.rank
return ranks
| mit |
wuhengzhi/chromium-crosswalk | tools/perf/page_sets/page_reload_cases.py | 22 | 1393 | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page import shared_page_state
from telemetry import story
from page_sets import top_pages
def _Reload(action_runner):
# Numbers below are chosen arbitrarily. For the V8DetachedContextAgeInGC
# the number of reloads should be high enough so that V8 could do few
# incremental GCs.
NUMBER_OF_RELOADS = 7
WAIT_TIME = 2
for _ in xrange(NUMBER_OF_RELOADS):
action_runner.ReloadPage()
action_runner.Wait(WAIT_TIME)
def _CreatePageClassWithReload(page_cls):
class DerivedSmoothPage(page_cls): # pylint: disable=no-init
def RunPageInteractions(self, action_runner):
_Reload(action_runner)
return DerivedSmoothPage
class PageReloadCasesPageSet(story.StorySet):
""" Pages for testing GC efficiency on page reload. """
def __init__(self):
super(PageReloadCasesPageSet, self).__init__(
archive_data_file='data/top_25.json',
cloud_storage_bucket=story.PARTNER_BUCKET)
shared_desktop_state = shared_page_state.SharedDesktopPageState
self.AddStory(_CreatePageClassWithReload(
top_pages.GoogleWebSearchPage)(self, shared_desktop_state))
self.AddStory(_CreatePageClassWithReload(
top_pages.GoogleDocPage)(self, shared_desktop_state))
| bsd-3-clause |
donniexyz/calligra | plugins/formulashape/scripts/DictionaryGenerator.py | 10 | 4762 | #! /usr/bin/env python
"""This file is part of the KDE project
Copyright (C) 2007 Martin Pfeiffer <[email protected]>
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Library General Public
License as published by the Free Software Foundation; either
version 2 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Library General Public License for more details.
You should have received a copy of the GNU Library General Public License
along with this library; see the file COPYING.LIB. If not, write to
the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
Boston, MA 02110-1301, USA.
"""
"""
This script generates the Dictionaty.cpp file which serves as an operator dictionary.
The source of information for generation is the operator.list file. This is copied
from the appendix of the MathML spec with whitespace striped. The url is:
http://www.w3.org/TR/2003/REC-MathML2-20031021/appendixf.html .
Further this script generates the entity mapping which maps MathML entities to unicode
characters. The raw data in entities.list is taken from the MathML specification
http://www.w3.org/TR/2003/REC-MathML2-20031021/byalpha.html .
"""
import codecs
import time
'''
Write the standart KDE file header with copyright and time signature. Write also the
constructor of the Dictionary class.
'''
def write_file_header( file ):
print >> file,'''// Created: ''' + time.ctime( time.time() ) + '''
// WARNING! All changes made in this file will be lost!
/* This file is part of the KDE project
Copyright (C) 2007 <[email protected]>
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Library General Public
License as published by the Free Software Foundation; either
version 2 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Library General Public License for more details.
You should have received a copy of the GNU Library General Public License
along with this library; see the file COPYING.LIB. If not, write to
the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
Boston, MA 02110-1301, USA.
*/
#include "Dictionary.h"
Dictionary::Dictionary()
{
m_lspace = "thickmathspace";
m_rspace = "thickmathspace";
m_maxsize = "infinity";
m_minsize = "1";
m_fence = false;
m_separator = false;
m_stretchy = false;
m_symmetric = true;
m_largeop = false;
m_movablelimits = false;
m_accent = false;
}
'''
def write_entity_mapping( file ):
print >> file, 'QChar Dictionary::mapEntity( const QString& entity )'
print >> file, '{\n if( entity.isEmpty() ) return QChar();'
entity_list = open( 'entities.list' )
for line in entity_list:
tokens = line.split( ',' )
if tokens[ 1 ].find( '-' ) > -1 :
continue
file.write( ' else if( entity == "' + tokens[ 0 ] + '" ) return QChar( 0x' )
file.write( tokens[ 1 ].strip()[1:] + ' );\n' )
print >> file, ' else return QChar();\n}\n'
def write_operator_dictionary( file ):
print >> file, 'bool Dictionary::queryOperator( const QString& queriedOperator, Form form )'
print >> file, '{\n if( queriedOperator.isEmpty() || queriedOperator.isNull() )\n return false;'
operator_list = open( 'operator.list' )
for line in operator_list:
for token in line.split():
if token.startswith( '"' ) and token.endswith( '"' ):
file.write( ' else if( queriedOperator == ' + token + ' && ' )
elif token.find( 'form=' ) > -1:
print >> file, 'form == ' + token.strip( '"' )[6:].capitalize() + ' ) {'
else:
print >> file, parse_token( token )
print >> file, ' return true;'
print >> file, ' }'
print >> file, '\n return false;'
print >> file, '}'
operator_list.close()
def parse_token( token ):
subtokens = token.split( '=' )
if token.find( 'true' ) > -1 or token.find( 'false' ) > -1:
return ' m_' + subtokens[0] + ' = ' + subtokens[1].strip( '"' ) + ';'
else:
return ' m_' + subtokens[0] + ' = ' + subtokens[1] + ';'
if __name__ == '__main__':
source_file = codecs.open( '../Dictionary.cpp', 'w', 'utf-8' )
write_file_header( source_file )
write_entity_mapping( source_file )
write_operator_dictionary( source_file )
source_file.close()
| gpl-2.0 |
CyanogenMod/android_external_chromium_org | mojo/public/tools/bindings/pylib/mojom/parse/parser.py | 8 | 8912 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Generates a syntax tree from a Mojo IDL file."""
import imp
import os.path
import sys
# Disable lint check for finding modules:
# pylint: disable=F0401
def _GetDirAbove(dirname):
"""Returns the directory "above" this file containing |dirname| (which must
also be "above" this file)."""
path = os.path.abspath(__file__)
while True:
path, tail = os.path.split(path)
assert tail
if tail == dirname:
return path
try:
imp.find_module("ply")
except ImportError:
sys.path.append(os.path.join(_GetDirAbove("mojo"), "third_party"))
from ply import lex
from ply import yacc
from ..error import Error
import ast
from lexer import Lexer
_MAX_ORDINAL_VALUE = 0xffffffff
def _ListFromConcat(*items):
"""Generate list by concatenating inputs (note: only concatenates lists, not
tuples or other iterables)."""
itemsout = []
for item in items:
if item is None:
continue
if type(item) is not type([]):
itemsout.append(item)
else:
itemsout.extend(item)
return itemsout
# Disable lint check for exceptions deriving from Exception:
# pylint: disable=W0710
class ParseError(Error):
"""Class for errors from the parser."""
def __init__(self, filename, message, lineno=None, snippet=None):
Error.__init__(self, filename, message, lineno=lineno,
addenda=([snippet] if snippet else None))
# We have methods which look like they could be functions:
# pylint: disable=R0201
class Parser(object):
def __init__(self, lexer, source, filename):
self.tokens = lexer.tokens
self.source = source
self.filename = filename
def p_root(self, p):
"""root : import root
| module
| definitions"""
if len(p) > 2:
p[0] = _ListFromConcat(p[1], p[2])
else:
# Generator expects a module. If one wasn't specified insert one with an
# empty name.
if p[1][0] != 'MODULE':
p[0] = [('MODULE', '', None, p[1])]
else:
p[0] = [p[1]]
def p_import(self, p):
"""import : IMPORT STRING_LITERAL"""
# 'eval' the literal to strip the quotes.
p[0] = ('IMPORT', eval(p[2]))
def p_module(self, p):
"""module : attribute_section MODULE identifier LBRACE definitions RBRACE"""
p[0] = ('MODULE', p[3], p[1], p[5])
def p_definitions(self, p):
"""definitions : definition definitions
| """
if len(p) > 1:
p[0] = _ListFromConcat(p[1], p[2])
def p_definition(self, p):
"""definition : struct
| interface
| enum
| const"""
p[0] = p[1]
def p_attribute_section(self, p):
"""attribute_section : LBRACKET attributes RBRACKET
| """
if len(p) > 3:
p[0] = p[2]
def p_attributes(self, p):
"""attributes : attribute
| attribute COMMA attributes
| """
if len(p) == 2:
p[0] = _ListFromConcat(p[1])
elif len(p) > 3:
p[0] = _ListFromConcat(p[1], p[3])
def p_attribute(self, p):
"""attribute : NAME EQUALS evaled_literal
| NAME EQUALS NAME"""
p[0] = ('ATTRIBUTE', p[1], p[3])
def p_evaled_literal(self, p):
"""evaled_literal : literal"""
# 'eval' the literal to strip the quotes.
p[0] = eval(p[1])
def p_struct(self, p):
"""struct : attribute_section STRUCT NAME LBRACE struct_body RBRACE SEMI"""
p[0] = ('STRUCT', p[3], p[1], p[5])
def p_struct_body(self, p):
"""struct_body : field struct_body
| enum struct_body
| const struct_body
| """
if len(p) > 1:
p[0] = _ListFromConcat(p[1], p[2])
def p_field(self, p):
"""field : typename NAME ordinal default SEMI"""
p[0] = ('FIELD', p[1], p[2], p[3], p[4])
def p_default(self, p):
"""default : EQUALS constant
| """
if len(p) > 2:
p[0] = p[2]
def p_interface(self, p):
"""interface : attribute_section INTERFACE NAME LBRACE interface_body \
RBRACE SEMI"""
p[0] = ('INTERFACE', p[3], p[1], p[5])
def p_interface_body(self, p):
"""interface_body : method interface_body
| enum interface_body
| const interface_body
| """
if len(p) > 1:
p[0] = _ListFromConcat(p[1], p[2])
def p_response(self, p):
"""response : RESPONSE LPAREN parameters RPAREN
| """
if len(p) > 3:
p[0] = p[3]
def p_method(self, p):
"""method : NAME ordinal LPAREN parameters RPAREN response SEMI"""
p[0] = ('METHOD', p[1], p[4], p[2], p[6])
def p_parameters(self, p):
"""parameters : parameter
| parameter COMMA parameters
| """
if len(p) == 1:
p[0] = []
elif len(p) == 2:
p[0] = _ListFromConcat(p[1])
elif len(p) > 3:
p[0] = _ListFromConcat(p[1], p[3])
def p_parameter(self, p):
"""parameter : typename NAME ordinal"""
p[0] = ('PARAM', p[1], p[2], p[3])
def p_typename(self, p):
"""typename : basictypename
| array
| interfacerequest"""
p[0] = p[1]
def p_basictypename(self, p):
"""basictypename : identifier
| handletype"""
p[0] = p[1]
def p_handletype(self, p):
"""handletype : HANDLE
| HANDLE LANGLE NAME RANGLE"""
if len(p) == 2:
p[0] = p[1]
else:
if p[3] not in ('data_pipe_consumer',
'data_pipe_producer',
'message_pipe',
'shared_buffer'):
# Note: We don't enable tracking of line numbers for everything, so we
# can't use |p.lineno(3)|.
raise ParseError(self.filename, "Invalid handle type %r:" % p[3],
lineno=p.lineno(1),
snippet=self._GetSnippet(p.lineno(1)))
p[0] = "handle<" + p[3] + ">"
def p_array(self, p):
"""array : typename LBRACKET RBRACKET"""
p[0] = p[1] + "[]"
def p_interfacerequest(self, p):
"""interfacerequest : identifier AMP"""
p[0] = p[1] + "&"
def p_ordinal(self, p):
"""ordinal : ORDINAL
| """
if len(p) > 1:
value = int(p[1][1:])
if value > _MAX_ORDINAL_VALUE:
raise ParseError(self.filename, "Ordinal value %d too large:" % value,
lineno=p.lineno(1),
snippet=self._GetSnippet(p.lineno(1)))
p[0] = ast.Ordinal(value, filename=self.filename, lineno=p.lineno(1))
else:
p[0] = ast.Ordinal(None)
def p_enum(self, p):
"""enum : ENUM NAME LBRACE enum_fields RBRACE SEMI"""
p[0] = ('ENUM', p[2], p[4])
def p_enum_fields(self, p):
"""enum_fields : enum_field
| enum_field COMMA enum_fields
| """
if len(p) == 2:
p[0] = _ListFromConcat(p[1])
elif len(p) > 3:
p[0] = _ListFromConcat(p[1], p[3])
def p_enum_field(self, p):
"""enum_field : NAME
| NAME EQUALS constant"""
if len(p) == 2:
p[0] = ('ENUM_FIELD', p[1], None)
else:
p[0] = ('ENUM_FIELD', p[1], p[3])
def p_const(self, p):
"""const : CONST typename NAME EQUALS constant SEMI"""
p[0] = ('CONST', p[2], p[3], p[5])
def p_constant(self, p):
"""constant : literal
| identifier_wrapped"""
p[0] = p[1]
def p_identifier_wrapped(self, p):
"""identifier_wrapped : identifier"""
p[0] = ('IDENTIFIER', p[1])
def p_identifier(self, p):
"""identifier : NAME
| NAME DOT identifier"""
p[0] = ''.join(p[1:])
def p_literal(self, p):
"""literal : number
| CHAR_CONST
| TRUE
| FALSE
| DEFAULT
| STRING_LITERAL"""
p[0] = p[1]
def p_number(self, p):
"""number : digits
| PLUS digits
| MINUS digits"""
p[0] = ''.join(p[1:])
def p_digits(self, p):
"""digits : INT_CONST_DEC
| INT_CONST_HEX
| FLOAT_CONST"""
p[0] = p[1]
def p_error(self, e):
if e is None:
# Unexpected EOF.
# TODO(vtl): Can we figure out what's missing?
raise ParseError(self.filename, "Unexpected end of file")
raise ParseError(self.filename, "Unexpected %r:" % e.value, lineno=e.lineno,
snippet=self._GetSnippet(e.lineno))
def _GetSnippet(self, lineno):
return self.source.split('\n')[lineno - 1]
def Parse(source, filename):
lexer = Lexer(filename)
parser = Parser(lexer, source, filename)
lex.lex(object=lexer)
yacc.yacc(module=parser, debug=0, write_tables=0)
tree = yacc.parse(source)
return tree
| bsd-3-clause |
ftomassetti/intellij-community | python/helpers/coverage/__init__.py | 208 | 4505 | """Code coverage measurement for Python.
Ned Batchelder
http://nedbatchelder.com/code/coverage
"""
from coverage.version import __version__, __url__
from coverage.control import coverage, process_startup
from coverage.data import CoverageData
from coverage.cmdline import main, CoverageScript
from coverage.misc import CoverageException
# Module-level functions. The original API to this module was based on
# functions defined directly in the module, with a singleton of the coverage()
# class. That design hampered programmability, so the current api uses
# explicitly-created coverage objects. But for backward compatibility, here we
# define the top-level functions to create the singleton when they are first
# called.
# Singleton object for use with module-level functions. The singleton is
# created as needed when one of the module-level functions is called.
_the_coverage = None
def _singleton_method(name):
"""Return a function to the `name` method on a singleton `coverage` object.
The singleton object is created the first time one of these functions is
called.
"""
# Disable pylint msg W0612, because a bunch of variables look unused, but
# they're accessed via locals().
# pylint: disable=W0612
def wrapper(*args, **kwargs):
"""Singleton wrapper around a coverage method."""
global _the_coverage
if not _the_coverage:
_the_coverage = coverage(auto_data=True)
return getattr(_the_coverage, name)(*args, **kwargs)
import inspect
meth = getattr(coverage, name)
args, varargs, kw, defaults = inspect.getargspec(meth)
argspec = inspect.formatargspec(args[1:], varargs, kw, defaults)
docstring = meth.__doc__
wrapper.__doc__ = ("""\
A first-use-singleton wrapper around coverage.%(name)s.
This wrapper is provided for backward compatibility with legacy code.
New code should use coverage.%(name)s directly.
%(name)s%(argspec)s:
%(docstring)s
""" % locals()
)
return wrapper
# Define the module-level functions.
use_cache = _singleton_method('use_cache')
start = _singleton_method('start')
stop = _singleton_method('stop')
erase = _singleton_method('erase')
exclude = _singleton_method('exclude')
analysis = _singleton_method('analysis')
analysis2 = _singleton_method('analysis2')
report = _singleton_method('report')
annotate = _singleton_method('annotate')
# On Windows, we encode and decode deep enough that something goes wrong and
# the encodings.utf_8 module is loaded and then unloaded, I don't know why.
# Adding a reference here prevents it from being unloaded. Yuk.
import encodings.utf_8
# Because of the "from coverage.control import fooey" lines at the top of the
# file, there's an entry for coverage.coverage in sys.modules, mapped to None.
# This makes some inspection tools (like pydoc) unable to find the class
# coverage.coverage. So remove that entry.
import sys
try:
del sys.modules['coverage.coverage']
except KeyError:
pass
# COPYRIGHT AND LICENSE
#
# Copyright 2001 Gareth Rees. All rights reserved.
# Copyright 2004-2013 Ned Batchelder. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDERS AND CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
| apache-2.0 |
Andygmb/python-social-auth | social/backends/github.py | 53 | 3946 | """
Github OAuth2 backend, docs at:
http://psa.matiasaguirre.net/docs/backends/github.html
"""
from requests import HTTPError
from six.moves.urllib.parse import urljoin
from social.backends.oauth import BaseOAuth2
from social.exceptions import AuthFailed
class GithubOAuth2(BaseOAuth2):
"""Github OAuth authentication backend"""
name = 'github'
API_URL = 'https://api.github.com/'
AUTHORIZATION_URL = 'https://github.com/login/oauth/authorize'
ACCESS_TOKEN_URL = 'https://github.com/login/oauth/access_token'
ACCESS_TOKEN_METHOD = 'POST'
SCOPE_SEPARATOR = ','
EXTRA_DATA = [
('id', 'id'),
('expires', 'expires'),
('login', 'login')
]
def api_url(self):
return self.API_URL
def get_user_details(self, response):
"""Return user details from Github account"""
fullname, first_name, last_name = self.get_user_names(
response.get('name')
)
return {'username': response.get('login'),
'email': response.get('email') or '',
'fullname': fullname,
'first_name': first_name,
'last_name': last_name}
def user_data(self, access_token, *args, **kwargs):
"""Loads user data from service"""
data = self._user_data(access_token)
if not data.get('email'):
try:
emails = self._user_data(access_token, '/emails')
except (HTTPError, ValueError, TypeError):
emails = []
if emails:
email = emails[0]
primary_emails = [
e for e in emails
if not isinstance(e, dict) or e.get('primary')
]
if primary_emails:
email = primary_emails[0]
if isinstance(email, dict):
email = email.get('email', '')
data['email'] = email
return data
def _user_data(self, access_token, path=None):
url = urljoin(self.api_url(), 'user{0}'.format(path or ''))
return self.get_json(url, params={'access_token': access_token})
class GithubMemberOAuth2(GithubOAuth2):
no_member_string = ''
def user_data(self, access_token, *args, **kwargs):
"""Loads user data from service"""
user_data = super(GithubMemberOAuth2, self).user_data(
access_token, *args, **kwargs
)
try:
self.request(self.member_url(user_data), params={
'access_token': access_token
})
except HTTPError as err:
# if the user is a member of the organization, response code
# will be 204, see http://bit.ly/ZS6vFl
if err.response.status_code != 204:
raise AuthFailed(self,
'User doesn\'t belong to the organization')
return user_data
def member_url(self, user_data):
raise NotImplementedError('Implement in subclass')
class GithubOrganizationOAuth2(GithubMemberOAuth2):
"""Github OAuth2 authentication backend for organizations"""
name = 'github-org'
no_member_string = 'User doesn\'t belong to the organization'
def member_url(self, user_data):
return urljoin(
self.api_url(),
'orgs/{org}/members/{username}'.format(
org=self.setting('NAME'),
username=user_data.get('login')
)
)
class GithubTeamOAuth2(GithubMemberOAuth2):
"""Github OAuth2 authentication backend for teams"""
name = 'github-team'
no_member_string = 'User doesn\'t belong to the team'
def member_url(self, user_data):
return urljoin(
self.api_url(),
'teams/{team_id}/members/{username}'.format(
team_id=self.setting('ID'),
username=user_data.get('login')
)
)
| bsd-3-clause |
tomer8007/kik-bot-api-unofficial | kik_unofficial/protobuf/messagepath/v1/feature_payment_pb2.py | 1 | 3906 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: messagepath/v1/feature_payment.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
import kik_unofficial.protobuf.protobuf_validation_pb2 as protobuf__validation__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='messagepath/v1/feature_payment.proto',
package='common.messagepath.v1',
syntax='proto3',
serialized_pb=_b('\n$messagepath/v1/feature_payment.proto\x12\x15\x63ommon.messagepath.v1\x1a\x19protobuf_validation.proto\"\x9c\x01\n\x1cTransactionDetailsAttachment\x12J\n\x06target\x18\x01 \x01(\x0e\x32:.common.messagepath.v1.TransactionDetailsAttachment.Target\"0\n\x06Target\x12\x0b\n\x07UNKNOWN\x10\x00\x12\n\n\x06SENDER\x10\x01\x12\r\n\tRECIPIENT\x10\x02\x42y\n\x19\x63om.kik.messagepath.modelZVgithub.com/kikinteractive/xiphias-model-common/generated/go/messagepath/v1;messagepath\xa2\x02\x03KPBb\x06proto3')
,
dependencies=[protobuf__validation__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_TRANSACTIONDETAILSATTACHMENT_TARGET = _descriptor.EnumDescriptor(
name='Target',
full_name='common.messagepath.v1.TransactionDetailsAttachment.Target',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='UNKNOWN', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SENDER', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='RECIPIENT', index=2, number=2,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=199,
serialized_end=247,
)
_sym_db.RegisterEnumDescriptor(_TRANSACTIONDETAILSATTACHMENT_TARGET)
_TRANSACTIONDETAILSATTACHMENT = _descriptor.Descriptor(
name='TransactionDetailsAttachment',
full_name='common.messagepath.v1.TransactionDetailsAttachment',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='target', full_name='common.messagepath.v1.TransactionDetailsAttachment.target', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_TRANSACTIONDETAILSATTACHMENT_TARGET,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=91,
serialized_end=247,
)
_TRANSACTIONDETAILSATTACHMENT.fields_by_name['target'].enum_type = _TRANSACTIONDETAILSATTACHMENT_TARGET
_TRANSACTIONDETAILSATTACHMENT_TARGET.containing_type = _TRANSACTIONDETAILSATTACHMENT
DESCRIPTOR.message_types_by_name['TransactionDetailsAttachment'] = _TRANSACTIONDETAILSATTACHMENT
TransactionDetailsAttachment = _reflection.GeneratedProtocolMessageType('TransactionDetailsAttachment', (_message.Message,), dict(
DESCRIPTOR = _TRANSACTIONDETAILSATTACHMENT,
__module__ = 'messagepath.v1.feature_payment_pb2'
# @@protoc_insertion_point(class_scope:common.messagepath.v1.TransactionDetailsAttachment)
))
_sym_db.RegisterMessage(TransactionDetailsAttachment)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\031com.kik.messagepath.modelZVgithub.com/kikinteractive/xiphias-model-common/generated/go/messagepath/v1;messagepath\242\002\003KPB'))
# @@protoc_insertion_point(module_scope)
| mit |
pacoqueen/ginn | ginn/formularios/listado_productos.py | 1 | 9474 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright (C) 2005-2008 Francisco José Rodríguez Bogado, #
# Diego Muñoz Escalante. #
# ([email protected], [email protected]) #
# #
# This file is part of GeotexInn. #
# #
# GeotexInn is free software; you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation; either version 2 of the License, or #
# (at your option) any later version. #
# #
# GeotexInn is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with GeotexInn; if not, write to the Free Software #
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA #
###############################################################################
###################################################################
## listado_productos.py - Listado simple de productos para imprimir
###################################################################
## TODO: Barra de progreso. Aunque habría varios puntos donde
## meterla, no solo en el rellenar_tabla. También en las
## sugerencias, en el buscar...
###################################################################
from ventana import Ventana
from formularios import utils
import pygtk
pygtk.require('2.0')
import gtk
from framework import pclases
import mx.DateTime
class ListadoProductos(Ventana):
inicio = None
fin = None
cliente = None
resultado = []
def __init__(self, objeto = None, usuario = None):
"""
Constructor. objeto puede ser un objeto de pclases con el que
comenzar la ventana (en lugar del primero de la tabla, que es
el que se muestra por defecto).
"""
self.usuario = usuario
global fin
Ventana.__init__(self, 'listado_productos.glade', objeto, usuario = usuario)
connections = {'b_salir/clicked': self.salir,
'b_limpiar/clicked': self.limpiar_tv,
'b_buscar/clicked': self.buscar,
'b_imprimir/clicked': self.imprimir,
'b_exportar/clicked': self.exportar}
self.add_connections(connections)
cols = (('Código', 'gobject.TYPE_STRING', False, True, False, None),
('Descripción', 'gobject.TYPE_STRING', False,True,False,None),
('PVP', 'gobject.TYPE_STRING', False,True,False,None),
('ID', 'gobject.TYPE_STRING', False, False, False, None))
utils.preparar_listview(self.wids['tv_datos'], cols)
col = self.wids['tv_datos'].get_column(2)
for cell in col.get_cell_renderers():
cell.set_property("xalign", 1)
self.wids['tv_datos'].connect("row-activated", self.abrir_producto)
self.wids['e_buscar'].grab_focus()
gtk.main()
def exportar(self, boton):
"""
Exporta el contenido del TreeView a un fichero csv.
"""
from informes.treeview2csv import treeview2csv
from formularios.reports import abrir_csv
tv = self.wids['tv_datos']
abrir_csv(treeview2csv(tv))
def abrir_producto(self, tv, path, column):
"""
Abre el producto al que se le ha hecho doble clic en una ventana nueva.
"""
model = tv.get_model()
idproducto = model[path][-1]
try:
if "PV" in idproducto:
producto = pclases.ProductoVenta.get(idproducto.split(":")[1])
if producto.es_rollo():
from formularios import productos_de_venta_rollos
V = productos_de_venta_rollos.ProductosDeVentaRollos
ventana_producto = V(producto, usuario = self.usuario) # @UnusedVariable
elif producto.es_bala() or producto.es_bigbag():
from formularios import productos_de_venta_balas
V = productos_de_venta_balas.ProductosDeVentaBalas
ventana_producto = V(producto, usuario = self.usuario) # @UnusedVariable
elif "PC" in idproducto:
producto = pclases.ProductoCompra.get(idproducto.split(":")[1])
from formularios import productos_compra
V = productos_compra.ProductosCompra
ventana_producto = V(producto, usuario = self.usuario) # @UnusedVariable
except Exception, e:
utils.dialogo_info(titulo = "ERROR RECUPERANDO PRODUCTO",
texto = "El producto ID %d no se ha encontrado."
"\n\n"
"Compruebe que no haya sido eliminado recargand"
"o la consulta y vuelva a intentarlo."
"\n\n\n"
"Información de depuración:"
"\n%s" % (idproducto, e),
padre = self.wids['ventana'])
def chequear_cambios(self):
pass
def rellenar_tabla(self, items):
"""
Rellena el model con los items de la consulta
"""
model = self.wids['tv_datos'].get_model()
#model.clear()
try:
dde = pclases.DatosDeLaEmpresa.select()[0]
iva = dde.iva
except IndexError:
iva = 0.21
tarifa = pclases.Tarifa.get_tarifa_defecto()
for i in items:
if tarifa:
pvp = tarifa.obtener_precio(i) * (1 + iva)
else:
pvp = i.precioDefecto * (1 + iva)
model.append((i.codigo,
i.descripcion,
utils.float2str(pvp),
i.get_puid()))
def buscar(self, boton):
a_buscar = self.wids['e_buscar'].get_text()
productos = []
for p in utils.buscar_productos_compra(a_buscar):
productos.append(p)
for p in utils.buscar_productos_venta(a_buscar):
productos.append(p)
if not len(productos) and len(a_buscar):
# Busca algo de texto pero no se encontró
try:
productos = self.sugerir_productos(a_buscar)
except (ImportError, ValueError):
utils.dialogo_info(titulo = "SIN RESULTADOS",
texto = "No se encontraron productos con el"
"texto «%s»." % a_buscar,
padre = self.wids['ventana'])
self.rellenar_tabla(productos)
def sugerir_productos(self, txt):
"""
Intenta sugerir productos según el corrector Norving.
"""
from lib import spelling
palabras = []
for pc in pclases.ProductoCompra.select():
palabras.append(pc.codigo.lower())
palabras.append(pc.descripcion.lower())
for pc in pclases.ProductoVenta.select():
palabras.append(pc.codigo.lower())
palabras.append(pc.descripcion.lower())
palabras = " ".join(palabras)
corrector = spelling.SpellCorrector(palabras)
sugerencia = corrector.correct(txt.lower())
if sugerencia != txt:
res = utils.dialogo(titulo = "SUGERENCIA DE BÚSQUEDA",
texto="No se encontró «%s», ¿tal vez quiso decir «%s»?" % (
txt, sugerencia),
padre = self.wids['ventana'])
if res:
res = ([p for p in utils.buscar_productos_compra(sugerencia)]+
[p for p in utils.buscar_productos_venta(sugerencia)])
else:
res = []
else:
raise ValueError, "Sin alternativas que sugerir."
return res
def limpiar_tv(self, boton):
"""
Limpia el TreeView.
"""
model = self.wids['tv_datos'].get_model()
model.clear()
def imprimir(self, boton):
"""
Prepara la vista preliminar para la impresión del informe
"""
from informes.treeview2pdf import treeview2pdf
from formularios.reports import abrir_pdf
strfecha = utils.str_fecha(mx.DateTime.localtime())
informe = treeview2pdf(self.wids['tv_datos'],
titulo="Listado de productos con PVP (IVA incluido)",
fecha = strfecha)
if informe:
abrir_pdf(informe)
if __name__ == '__main__':
t = ListadoProductos()
| gpl-2.0 |
AMOboxTV/AMOBox.LegoBuild | script.module.youtube.dl/lib/youtube_dl/extractor/udn.py | 24 | 2302 | # coding: utf-8
from __future__ import unicode_literals
import json
from .common import InfoExtractor
from ..utils import (
js_to_json,
ExtractorError,
)
from ..compat import compat_urlparse
class UDNEmbedIE(InfoExtractor):
IE_DESC = '聯合影音'
_PROTOCOL_RELATIVE_VALID_URL = r'//video\.udn\.com/(?:embed|play)/news/(?P<id>\d+)'
_VALID_URL = r'https?:' + _PROTOCOL_RELATIVE_VALID_URL
_TESTS = [{
'url': 'http://video.udn.com/embed/news/300040',
'md5': 'de06b4c90b042c128395a88f0384817e',
'info_dict': {
'id': '300040',
'ext': 'mp4',
'title': '生物老師男變女 全校挺"做自己"',
'thumbnail': 're:^https?://.*\.jpg$',
}
}, {
'url': 'https://video.udn.com/embed/news/300040',
'only_matching': True,
}, {
# From https://video.udn.com/news/303776
'url': 'https://video.udn.com/play/news/303776',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
page = self._download_webpage(url, video_id)
options = json.loads(js_to_json(self._html_search_regex(
r'var options\s*=\s*([^;]+);', page, 'video urls dictionary')))
video_urls = options['video']
if video_urls.get('youtube'):
return self.url_result(video_urls.get('youtube'), 'Youtube')
try:
del video_urls['youtube']
except KeyError:
pass
formats = [{
'url': self._download_webpage(
compat_urlparse.urljoin(url, api_url), video_id,
'retrieve url for %s video' % video_type),
'format_id': video_type,
'preference': 0 if video_type == 'mp4' else -1,
} for video_type, api_url in video_urls.items() if api_url]
if not formats:
raise ExtractorError('No videos found', expected=True)
self._sort_formats(formats)
thumbnail = None
if options.get('gallery') and len(options['gallery']):
thumbnail = options['gallery'][0].get('original')
return {
'id': video_id,
'formats': formats,
'title': options['title'],
'thumbnail': thumbnail
}
| gpl-2.0 |
AdamHull/namebench | nb_third_party/simplejson/ordered_dict.py | 1039 | 3370 | """Drop-in replacement for collections.OrderedDict by Raymond Hettinger
http://code.activestate.com/recipes/576693/
"""
from UserDict import DictMixin
# Modified from original to support Python 2.4, see
# http://code.google.com/p/simplejson/issues/detail?id=53
try:
all
except NameError:
def all(seq):
for elem in seq:
if not elem:
return False
return True
class OrderedDict(dict, DictMixin):
def __init__(self, *args, **kwds):
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__end
except AttributeError:
self.clear()
self.update(*args, **kwds)
def clear(self):
self.__end = end = []
end += [None, end, end] # sentinel node for doubly linked list
self.__map = {} # key --> [key, prev, next]
dict.clear(self)
def __setitem__(self, key, value):
if key not in self:
end = self.__end
curr = end[1]
curr[2] = end[1] = self.__map[key] = [key, curr, end]
dict.__setitem__(self, key, value)
def __delitem__(self, key):
dict.__delitem__(self, key)
key, prev, next = self.__map.pop(key)
prev[2] = next
next[1] = prev
def __iter__(self):
end = self.__end
curr = end[2]
while curr is not end:
yield curr[0]
curr = curr[2]
def __reversed__(self):
end = self.__end
curr = end[1]
while curr is not end:
yield curr[0]
curr = curr[1]
def popitem(self, last=True):
if not self:
raise KeyError('dictionary is empty')
# Modified from original to support Python 2.4, see
# http://code.google.com/p/simplejson/issues/detail?id=53
if last:
key = reversed(self).next()
else:
key = iter(self).next()
value = self.pop(key)
return key, value
def __reduce__(self):
items = [[k, self[k]] for k in self]
tmp = self.__map, self.__end
del self.__map, self.__end
inst_dict = vars(self).copy()
self.__map, self.__end = tmp
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def keys(self):
return list(self)
setdefault = DictMixin.setdefault
update = DictMixin.update
pop = DictMixin.pop
values = DictMixin.values
items = DictMixin.items
iterkeys = DictMixin.iterkeys
itervalues = DictMixin.itervalues
iteritems = DictMixin.iteritems
def __repr__(self):
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
def copy(self):
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
if isinstance(other, OrderedDict):
return len(self)==len(other) and \
all(p==q for p, q in zip(self.items(), other.items()))
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
| apache-2.0 |
louietsai/python-for-android | python3-alpha/python3-src/Lib/distutils/tests/test_text_file.py | 171 | 3436 | """Tests for distutils.text_file."""
import os
import unittest
from distutils.text_file import TextFile
from distutils.tests import support
from test.support import run_unittest
TEST_DATA = """# test file
line 3 \\
# intervening comment
continues on next line
"""
class TextFileTestCase(support.TempdirManager, unittest.TestCase):
def test_class(self):
# old tests moved from text_file.__main__
# so they are really called by the buildbots
# result 1: no fancy options
result1 = ['# test file\n', '\n', 'line 3 \\\n',
'# intervening comment\n',
' continues on next line\n']
# result 2: just strip comments
result2 = ["\n",
"line 3 \\\n",
" continues on next line\n"]
# result 3: just strip blank lines
result3 = ["# test file\n",
"line 3 \\\n",
"# intervening comment\n",
" continues on next line\n"]
# result 4: default, strip comments, blank lines,
# and trailing whitespace
result4 = ["line 3 \\",
" continues on next line"]
# result 5: strip comments and blanks, plus join lines (but don't
# "collapse" joined lines
result5 = ["line 3 continues on next line"]
# result 6: strip comments and blanks, plus join lines (and
# "collapse" joined lines
result6 = ["line 3 continues on next line"]
def test_input(count, description, file, expected_result):
result = file.readlines()
self.assertEqual(result, expected_result)
tmpdir = self.mkdtemp()
filename = os.path.join(tmpdir, "test.txt")
out_file = open(filename, "w")
try:
out_file.write(TEST_DATA)
finally:
out_file.close()
in_file = TextFile(filename, strip_comments=0, skip_blanks=0,
lstrip_ws=0, rstrip_ws=0)
try:
test_input(1, "no processing", in_file, result1)
finally:
in_file.close()
in_file = TextFile(filename, strip_comments=1, skip_blanks=0,
lstrip_ws=0, rstrip_ws=0)
try:
test_input(2, "strip comments", in_file, result2)
finally:
in_file.close()
in_file = TextFile(filename, strip_comments=0, skip_blanks=1,
lstrip_ws=0, rstrip_ws=0)
try:
test_input(3, "strip blanks", in_file, result3)
finally:
in_file.close()
in_file = TextFile(filename)
try:
test_input(4, "default processing", in_file, result4)
finally:
in_file.close()
in_file = TextFile(filename, strip_comments=1, skip_blanks=1,
join_lines=1, rstrip_ws=1)
try:
test_input(5, "join lines without collapsing", in_file, result5)
finally:
in_file.close()
in_file = TextFile(filename, strip_comments=1, skip_blanks=1,
join_lines=1, rstrip_ws=1, collapse_join=1)
try:
test_input(6, "join lines with collapsing", in_file, result6)
finally:
in_file.close()
def test_suite():
return unittest.makeSuite(TextFileTestCase)
if __name__ == "__main__":
run_unittest(test_suite())
| apache-2.0 |
ztianai/INFO370-Project | code/cole/blockchain_index.py | 1 | 3061 | import time
import datetime
from blockchain.exceptions import APIException
from blockchain import blockexplorer
from dateutil.relativedelta import relativedelta
from urllib.error import HTTPError
curr_block_height = 277198
def get_transactions(height):
fail = True
while fail:
try:
block = blockexplorer.get_block_height(height)[0]
fail = False
except:
fail = True
curr_date = time.strftime('%Y-%m-%d', time.localtime(block.received_time))
new_date = curr_date
date_transactions = []
#while curr_date == new_date:
while True:
height = height + 1
for block in blockexplorer.get_block_height(height):
new_date = time.strftime('%Y-%m-%d-%H-%m-%s', time.localtime(block.received_time))
print(block.received_time)
#dt = "%d-%02d-%02d-%02d-%02d-%02d"%(block_datetime.year, block_datetime.month, block_datetime.day, block_datetime.hour, block_datetime.minute, block_datetime.second)
#field specification: ["in", transaction_key, referent_transaction_key, index, public_key, date]
date_transactions = date_transactions + block.transactions
#print(len(date_transactions))
return
def find_date_height(date, curr_block_height):
unix_time = int(time.mktime(date.timetuple()))
block_time = get_time(curr_block_height)
while unix_time > block_time:
print(curr_block_height)
curr_block_height = curr_block_height + 500
block_time = get_time(curr_block_height)
while unix_time < block_time:
curr_block_height = curr_block_height - 100
block_time = get_time(curr_block_height)
while unix_time > block_time:
curr_block_height = curr_block_height + 50
block_time = get_time(curr_block_height)
while unix_time < block_time:
curr_block_height = curr_block_height - 10
block_time = get_time(curr_block_height)
while unix_time > block_time:
curr_block_height = curr_block_height + 1
block_time = get_time(curr_block_height)
return curr_block_height
def get_time(height):
try:
return blockexplorer.get_block_height(height)[0].received_time
except KeyError:
return get_time(height + 1)
except APIException:
time.sleep(20)
return get_time(height)
def print_block(block):
try:
print(block.value)
print(block.tx_index)
print(block.address)
print(block.n)
except AttributeError:
pass
def find_start_blocks():
dates = []
date = datetime.datetime.strptime('2013-12-27', '%Y-%m-%d')
end_date = datetime.datetime.strptime('2014-11-6', '%Y-%m-%d')
while date < end_date:
curr = find_date_height(date, curr_block_height)
row = str(date.strftime('%Y-%m-%d')) + ',' + str(curr)
print(row)
date = date + datetime.timedelta(days=7)
dates.append(row)
with open('dates.txt', 'w') as date_out:
date_out.write('\n'.join(dates))
find_start_blocks() | mit |
mancoast/CPythonPyc_test | fail/325_test_zipimport.py | 55 | 17221 | import sys
import os
import marshal
import imp
import struct
import time
import unittest
from test import support
from test.test_importhooks import ImportHooksBaseTestCase, test_src, test_co
# some tests can be ran even without zlib
try:
import zlib
except ImportError:
zlib = None
from zipfile import ZipFile, ZipInfo, ZIP_STORED, ZIP_DEFLATED
import zipimport
import linecache
import doctest
import inspect
import io
from traceback import extract_tb, extract_stack, print_tb
raise_src = 'def do_raise(): raise TypeError\n'
def make_pyc(co, mtime):
data = marshal.dumps(co)
if type(mtime) is type(0.0):
# Mac mtimes need a bit of special casing
if mtime < 0x7fffffff:
mtime = int(mtime)
else:
mtime = int(-0x100000000 + int(mtime))
pyc = imp.get_magic() + struct.pack("<i", int(mtime)) + data
return pyc
def module_path_to_dotted_name(path):
return path.replace(os.sep, '.')
NOW = time.time()
test_pyc = make_pyc(test_co, NOW)
TESTMOD = "ziptestmodule"
TESTPACK = "ziptestpackage"
TESTPACK2 = "ziptestpackage2"
TEMP_ZIP = os.path.abspath("junk95142.zip")
pyc_file = imp.cache_from_source(TESTMOD + '.py')
pyc_ext = ('.pyc' if __debug__ else '.pyo')
class UncompressedZipImportTestCase(ImportHooksBaseTestCase):
compression = ZIP_STORED
def setUp(self):
# We're reusing the zip archive path, so we must clear the
# cached directory info and linecache
linecache.clearcache()
zipimport._zip_directory_cache.clear()
ImportHooksBaseTestCase.setUp(self)
def doTest(self, expected_ext, files, *modules, **kw):
z = ZipFile(TEMP_ZIP, "w")
try:
for name, (mtime, data) in files.items():
zinfo = ZipInfo(name, time.localtime(mtime))
zinfo.compress_type = self.compression
z.writestr(zinfo, data)
z.close()
stuff = kw.get("stuff", None)
if stuff is not None:
# Prepend 'stuff' to the start of the zipfile
with open(TEMP_ZIP, "rb") as f:
data = f.read()
with open(TEMP_ZIP, "wb") as f:
f.write(stuff)
f.write(data)
sys.path.insert(0, TEMP_ZIP)
mod = __import__(".".join(modules), globals(), locals(),
["__dummy__"])
call = kw.get('call')
if call is not None:
call(mod)
if expected_ext:
file = mod.get_file()
self.assertEqual(file, os.path.join(TEMP_ZIP,
*modules) + expected_ext)
finally:
z.close()
os.remove(TEMP_ZIP)
def testAFakeZlib(self):
#
# This could cause a stack overflow before: importing zlib.py
# from a compressed archive would cause zlib to be imported
# which would find zlib.py in the archive, which would... etc.
#
# This test *must* be executed first: it must be the first one
# to trigger zipimport to import zlib (zipimport caches the
# zlib.decompress function object, after which the problem being
# tested here wouldn't be a problem anymore...
# (Hence the 'A' in the test method name: to make it the first
# item in a list sorted by name, like unittest.makeSuite() does.)
#
# This test fails on platforms on which the zlib module is
# statically linked, but the problem it tests for can't
# occur in that case (builtin modules are always found first),
# so we'll simply skip it then. Bug #765456.
#
if "zlib" in sys.builtin_module_names:
return
if "zlib" in sys.modules:
del sys.modules["zlib"]
files = {"zlib.py": (NOW, test_src)}
try:
self.doTest(".py", files, "zlib")
except ImportError:
if self.compression != ZIP_DEFLATED:
self.fail("expected test to not raise ImportError")
else:
if self.compression != ZIP_STORED:
self.fail("expected test to raise ImportError")
def testPy(self):
files = {TESTMOD + ".py": (NOW, test_src)}
self.doTest(".py", files, TESTMOD)
def testPyc(self):
files = {TESTMOD + pyc_ext: (NOW, test_pyc)}
self.doTest(pyc_ext, files, TESTMOD)
def testBoth(self):
files = {TESTMOD + ".py": (NOW, test_src),
TESTMOD + pyc_ext: (NOW, test_pyc)}
self.doTest(pyc_ext, files, TESTMOD)
def testEmptyPy(self):
files = {TESTMOD + ".py": (NOW, "")}
self.doTest(None, files, TESTMOD)
def testBadMagic(self):
# make pyc magic word invalid, forcing loading from .py
badmagic_pyc = bytearray(test_pyc)
badmagic_pyc[0] ^= 0x04 # flip an arbitrary bit
files = {TESTMOD + ".py": (NOW, test_src),
TESTMOD + pyc_ext: (NOW, badmagic_pyc)}
self.doTest(".py", files, TESTMOD)
def testBadMagic2(self):
# make pyc magic word invalid, causing an ImportError
badmagic_pyc = bytearray(test_pyc)
badmagic_pyc[0] ^= 0x04 # flip an arbitrary bit
files = {TESTMOD + pyc_ext: (NOW, badmagic_pyc)}
try:
self.doTest(".py", files, TESTMOD)
except ImportError:
pass
else:
self.fail("expected ImportError; import from bad pyc")
def testBadMTime(self):
badtime_pyc = bytearray(test_pyc)
# flip the second bit -- not the first as that one isn't stored in the
# .py's mtime in the zip archive.
badtime_pyc[7] ^= 0x02
files = {TESTMOD + ".py": (NOW, test_src),
TESTMOD + pyc_ext: (NOW, badtime_pyc)}
self.doTest(".py", files, TESTMOD)
def testPackage(self):
packdir = TESTPACK + os.sep
files = {packdir + "__init__" + pyc_ext: (NOW, test_pyc),
packdir + TESTMOD + pyc_ext: (NOW, test_pyc)}
self.doTest(pyc_ext, files, TESTPACK, TESTMOD)
def testDeepPackage(self):
packdir = TESTPACK + os.sep
packdir2 = packdir + TESTPACK2 + os.sep
files = {packdir + "__init__" + pyc_ext: (NOW, test_pyc),
packdir2 + "__init__" + pyc_ext: (NOW, test_pyc),
packdir2 + TESTMOD + pyc_ext: (NOW, test_pyc)}
self.doTest(pyc_ext, files, TESTPACK, TESTPACK2, TESTMOD)
def testZipImporterMethods(self):
packdir = TESTPACK + os.sep
packdir2 = packdir + TESTPACK2 + os.sep
files = {packdir + "__init__" + pyc_ext: (NOW, test_pyc),
packdir2 + "__init__" + pyc_ext: (NOW, test_pyc),
packdir2 + TESTMOD + pyc_ext: (NOW, test_pyc)}
z = ZipFile(TEMP_ZIP, "w")
try:
for name, (mtime, data) in files.items():
zinfo = ZipInfo(name, time.localtime(mtime))
zinfo.compress_type = self.compression
z.writestr(zinfo, data)
z.close()
zi = zipimport.zipimporter(TEMP_ZIP)
self.assertEqual(zi.archive, TEMP_ZIP)
self.assertEqual(zi.is_package(TESTPACK), True)
mod = zi.load_module(TESTPACK)
self.assertEqual(zi.get_filename(TESTPACK), mod.__file__)
self.assertEqual(zi.is_package(packdir + '__init__'), False)
self.assertEqual(zi.is_package(packdir + TESTPACK2), True)
self.assertEqual(zi.is_package(packdir2 + TESTMOD), False)
mod_path = packdir2 + TESTMOD
mod_name = module_path_to_dotted_name(mod_path)
__import__(mod_name)
mod = sys.modules[mod_name]
self.assertEqual(zi.get_source(TESTPACK), None)
self.assertEqual(zi.get_source(mod_path), None)
self.assertEqual(zi.get_filename(mod_path), mod.__file__)
# To pass in the module name instead of the path, we must use the
# right importer
loader = mod.__loader__
self.assertEqual(loader.get_source(mod_name), None)
self.assertEqual(loader.get_filename(mod_name), mod.__file__)
# test prefix and archivepath members
zi2 = zipimport.zipimporter(TEMP_ZIP + os.sep + TESTPACK)
self.assertEqual(zi2.archive, TEMP_ZIP)
self.assertEqual(zi2.prefix, TESTPACK + os.sep)
finally:
z.close()
os.remove(TEMP_ZIP)
def testZipImporterMethodsInSubDirectory(self):
packdir = TESTPACK + os.sep
packdir2 = packdir + TESTPACK2 + os.sep
files = {packdir2 + "__init__" + pyc_ext: (NOW, test_pyc),
packdir2 + TESTMOD + pyc_ext: (NOW, test_pyc)}
z = ZipFile(TEMP_ZIP, "w")
try:
for name, (mtime, data) in files.items():
zinfo = ZipInfo(name, time.localtime(mtime))
zinfo.compress_type = self.compression
z.writestr(zinfo, data)
z.close()
zi = zipimport.zipimporter(TEMP_ZIP + os.sep + packdir)
self.assertEqual(zi.archive, TEMP_ZIP)
self.assertEqual(zi.prefix, packdir)
self.assertEqual(zi.is_package(TESTPACK2), True)
mod = zi.load_module(TESTPACK2)
self.assertEqual(zi.get_filename(TESTPACK2), mod.__file__)
self.assertEqual(
zi.is_package(TESTPACK2 + os.sep + '__init__'), False)
self.assertEqual(
zi.is_package(TESTPACK2 + os.sep + TESTMOD), False)
mod_path = TESTPACK2 + os.sep + TESTMOD
mod_name = module_path_to_dotted_name(mod_path)
__import__(mod_name)
mod = sys.modules[mod_name]
self.assertEqual(zi.get_source(TESTPACK2), None)
self.assertEqual(zi.get_source(mod_path), None)
self.assertEqual(zi.get_filename(mod_path), mod.__file__)
# To pass in the module name instead of the path, we must use the
# right importer
loader = mod.__loader__
self.assertEqual(loader.get_source(mod_name), None)
self.assertEqual(loader.get_filename(mod_name), mod.__file__)
finally:
z.close()
os.remove(TEMP_ZIP)
def testGetData(self):
z = ZipFile(TEMP_ZIP, "w")
z.compression = self.compression
try:
name = "testdata.dat"
data = bytes(x for x in range(256))
z.writestr(name, data)
z.close()
zi = zipimport.zipimporter(TEMP_ZIP)
self.assertEqual(data, zi.get_data(name))
self.assertIn('zipimporter object', repr(zi))
finally:
z.close()
os.remove(TEMP_ZIP)
def testImporterAttr(self):
src = """if 1: # indent hack
def get_file():
return __file__
if __loader__.get_data("some.data") != b"some data":
raise AssertionError("bad data")\n"""
pyc = make_pyc(compile(src, "<???>", "exec"), NOW)
files = {TESTMOD + pyc_ext: (NOW, pyc),
"some.data": (NOW, "some data")}
self.doTest(pyc_ext, files, TESTMOD)
def testImport_WithStuff(self):
# try importing from a zipfile which contains additional
# stuff at the beginning of the file
files = {TESTMOD + ".py": (NOW, test_src)}
self.doTest(".py", files, TESTMOD,
stuff=b"Some Stuff"*31)
def assertModuleSource(self, module):
self.assertEqual(inspect.getsource(module), test_src)
def testGetSource(self):
files = {TESTMOD + ".py": (NOW, test_src)}
self.doTest(".py", files, TESTMOD, call=self.assertModuleSource)
def testGetCompiledSource(self):
pyc = make_pyc(compile(test_src, "<???>", "exec"), NOW)
files = {TESTMOD + ".py": (NOW, test_src),
TESTMOD + pyc_ext: (NOW, pyc)}
self.doTest(pyc_ext, files, TESTMOD, call=self.assertModuleSource)
def runDoctest(self, callback):
files = {TESTMOD + ".py": (NOW, test_src),
"xyz.txt": (NOW, ">>> log.append(True)\n")}
self.doTest(".py", files, TESTMOD, call=callback)
def doDoctestFile(self, module):
log = []
old_master, doctest.master = doctest.master, None
try:
doctest.testfile(
'xyz.txt', package=module, module_relative=True,
globs=locals()
)
finally:
doctest.master = old_master
self.assertEqual(log,[True])
def testDoctestFile(self):
self.runDoctest(self.doDoctestFile)
def doDoctestSuite(self, module):
log = []
doctest.DocFileTest(
'xyz.txt', package=module, module_relative=True,
globs=locals()
).run()
self.assertEqual(log,[True])
def testDoctestSuite(self):
self.runDoctest(self.doDoctestSuite)
def doTraceback(self, module):
try:
module.do_raise()
except:
tb = sys.exc_info()[2].tb_next
f,lno,n,line = extract_tb(tb, 1)[0]
self.assertEqual(line, raise_src.strip())
f,lno,n,line = extract_stack(tb.tb_frame, 1)[0]
self.assertEqual(line, raise_src.strip())
s = io.StringIO()
print_tb(tb, 1, s)
self.assertTrue(s.getvalue().endswith(raise_src))
else:
raise AssertionError("This ought to be impossible")
def testTraceback(self):
files = {TESTMOD + ".py": (NOW, raise_src)}
self.doTest(None, files, TESTMOD, call=self.doTraceback)
@unittest.skipIf(support.TESTFN_UNENCODABLE is None,
"need an unencodable filename")
def testUnencodable(self):
filename = support.TESTFN_UNENCODABLE + ".zip"
z = ZipFile(filename, "w")
zinfo = ZipInfo(TESTMOD + ".py", time.localtime(NOW))
zinfo.compress_type = self.compression
z.writestr(zinfo, test_src)
z.close()
try:
zipimport.zipimporter(filename)
finally:
os.remove(filename)
@unittest.skipUnless(zlib, "requires zlib")
class CompressedZipImportTestCase(UncompressedZipImportTestCase):
compression = ZIP_DEFLATED
class BadFileZipImportTestCase(unittest.TestCase):
def assertZipFailure(self, filename):
self.assertRaises(zipimport.ZipImportError,
zipimport.zipimporter, filename)
def testNoFile(self):
self.assertZipFailure('AdfjdkFJKDFJjdklfjs')
def testEmptyFilename(self):
self.assertZipFailure('')
def testBadArgs(self):
self.assertRaises(TypeError, zipimport.zipimporter, None)
self.assertRaises(TypeError, zipimport.zipimporter, TESTMOD, kwd=None)
def testFilenameTooLong(self):
self.assertZipFailure('A' * 33000)
def testEmptyFile(self):
support.unlink(TESTMOD)
open(TESTMOD, 'w+').close()
self.assertZipFailure(TESTMOD)
def testFileUnreadable(self):
support.unlink(TESTMOD)
fd = os.open(TESTMOD, os.O_CREAT, 000)
try:
os.close(fd)
self.assertZipFailure(TESTMOD)
finally:
# If we leave "the read-only bit" set on Windows, nothing can
# delete TESTMOD, and later tests suffer bogus failures.
os.chmod(TESTMOD, 0o666)
support.unlink(TESTMOD)
def testNotZipFile(self):
support.unlink(TESTMOD)
fp = open(TESTMOD, 'w+')
fp.write('a' * 22)
fp.close()
self.assertZipFailure(TESTMOD)
# XXX: disabled until this works on Big-endian machines
def _testBogusZipFile(self):
support.unlink(TESTMOD)
fp = open(TESTMOD, 'w+')
fp.write(struct.pack('=I', 0x06054B50))
fp.write('a' * 18)
fp.close()
z = zipimport.zipimporter(TESTMOD)
try:
self.assertRaises(TypeError, z.find_module, None)
self.assertRaises(TypeError, z.load_module, None)
self.assertRaises(TypeError, z.is_package, None)
self.assertRaises(TypeError, z.get_code, None)
self.assertRaises(TypeError, z.get_data, None)
self.assertRaises(TypeError, z.get_source, None)
error = zipimport.ZipImportError
self.assertEqual(z.find_module('abc'), None)
self.assertRaises(error, z.load_module, 'abc')
self.assertRaises(error, z.get_code, 'abc')
self.assertRaises(IOError, z.get_data, 'abc')
self.assertRaises(error, z.get_source, 'abc')
self.assertRaises(error, z.is_package, 'abc')
finally:
zipimport._zip_directory_cache.clear()
def test_main():
try:
support.run_unittest(
UncompressedZipImportTestCase,
CompressedZipImportTestCase,
BadFileZipImportTestCase,
)
finally:
support.unlink(TESTMOD)
if __name__ == "__main__":
test_main()
| gpl-3.0 |
eHealthAfrica/onadata | onadata/apps/sms_support/views.py | 9 | 2704 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: ai ts=4 sts=4 et sw=4 nu
import json
from django.http import HttpResponse
from django.views.decorators.http import require_GET, require_POST
from django.views.decorators.csrf import csrf_exempt
from django.utils.translation import ugettext as _
from tools import SMS_API_ERROR
from parser import process_incoming_smses
def get_response(data):
response = {'status': data.get('code'),
'message': data.get('text')}
return HttpResponse(json.dumps(response), mimetype='application/json')
@require_GET
def import_submission(request, username):
""" Process an SMS text as a form submission
:param string identity: phone number of the sender
:param string text: SMS content
:returns: a JSON dict with:
'status': one of 'ACCEPTED', 'REJECTED', 'PARSING_FAILED'
'message': Error message if not ACCEPTED.
'id: Unique submission ID if ACCEPTED. """
return import_submission_for_form(request, username, None)
@require_POST
@csrf_exempt
def import_multiple_submissions(request, username):
''' Process several POSTED SMS texts as XForm submissions
:param json messages: JSON list of {"identity": "x", "text": "x"}
:returns json list of
{"status": "x", "message": "x", "id": "x"} '''
return import_multiple_submissions_for_form(request, username, None)
@require_GET
def import_submission_for_form(request, username, id_string):
""" idem import_submission with a defined id_string """
sms_identity = request.GET.get('identity', '').strip()
sms_text = request.GET.get('text', '').strip()
if not sms_identity or not sms_text:
return get_response({'code': SMS_API_ERROR,
'text': _(u"`identity` and `message` are "
u"both required and must not be "
u"empty.")})
incomings = [(sms_identity, sms_text)]
response = process_incoming_smses(username, incomings, id_string)[-1]
return get_response(response)
@require_POST
@csrf_exempt
def import_multiple_submissions_for_form(request, username, id_string):
""" idem import_multiple_submissions with a defined id_string """
messages = json.loads(request.POST.get('messages', '[]'))
incomings = [(m.get('identity', ''), m.get('text', '')) for m in messages]
responses = [{'status': d.get('code'),
'message': d.get('text'),
'instanceID': d.get('id')} for d
in process_incoming_smses(username, incomings, id_string)]
return HttpResponse(json.dumps(responses), mimetype='application/json')
| bsd-2-clause |
hassanabidpk/djangoproject.com | docs/management/commands/update_index.py | 8 | 1112 | from optparse import make_option as Option
from django.core.management.base import BaseCommand
from ...search import DocumentDocType
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
Option("--using", default=None,
help='The name of the connection to use'),
Option("-d", "--delete",
default=False,
dest='delete',
action='store_true',
help='Whether to delete the index or not'),
)
def log(self, msg, level='2'):
"""
Small log helper
"""
if self.verbosity >= level:
self.stdout.write(msg)
def handle(self, *args, **options):
self.verbosity = options['verbosity']
for ok, item in DocumentDocType.index_all(using=options['using'],
delete=options['delete']):
id_ = item.get('index', {}).get('_id', item)
if ok:
self.log('Successfully indexed item %s' % id_)
else:
self.log('Failed indexing item %s' % id_)
| bsd-3-clause |
RedHatQE/cfme_tests | cfme/tests/configure/test_db_backup_schedule.py | 1 | 8973 | # -*- coding: utf-8 -*-
from datetime import datetime
import fauxfactory
import pytest
from dateutil.relativedelta import relativedelta
from six.moves.urllib.parse import urlparse
from cfme.utils import conf
from cfme.utils import testgen
from cfme.utils.pretty import Pretty
from cfme.utils.ssh import SSHClient
from cfme.utils.virtual_machines import deploy_template
from cfme.utils.wait import wait_for
PROTOCOL_TYPES = ('smb', 'nfs')
class DbBackupData(Pretty):
""" Container for test data
Contains data from cfme_data and credentials conf files used in tests
+ protocol type, schedule name and schedule description
Args:
machine_id: cfme_data yaml key
``log_db_depot > *machine_id*``
machine_data: cfme_data yaml key
``log_db_depot > machine_id > *machine_data*``
protocol_type: One of :py:var:`PROTOCOL_TYPES`
"""
required_keys = {
'smb': ('sub_folder', 'path_on_host'),
'nfs': ('sub_folder',)
}
pretty_attrs = ['machine_data', 'protocol_type', 'protocol_data']
def __init__(self, machine_data, protocol_type, protocol_data):
self._param_name = protocol_type
self.protocol_type = protocol_type
self.protocol_data = protocol_data
self.schedule_name = self._get_random_schedule_name()
self.schedule_description = self._get_random_schedule_description()
self.credentials = self._get_credentials()
# data from cfme_data are accessed directly as attributes
self.__dict__.update(self._get_data(protocol_data, protocol_type))
def _get_random_schedule_name(self):
return '{}_name'.format(fauxfactory.gen_alphanumeric())
def _get_random_schedule_description(self):
return '{}_desc'.format(fauxfactory.gen_alphanumeric())
def _get_credentials(self):
""" Loads credentials that correspond to 'credentials' key from machine_data dict
"""
creds_key = conf.cfme_data.get('log_db_operations', {}).get('credentials', False)
assert creds_key, \
"No 'credentials' key found for machine {machine_id}".format(**self.__dict__)
assert creds_key in conf.credentials and conf.credentials[creds_key],\
"No credentials for key '{}' found in credentials yaml".format(creds_key)
credentials = conf.credentials[creds_key]
return credentials
def _get_data(self, protocol_data, protocol_type):
""" Loads data from machine_data dict
"""
data = {}
for key in self.required_keys[protocol_type]:
assert key in protocol_data and protocol_data[key],\
"'{}' key must be set for scheduled {} backup to work".format(key, protocol_type)
data[key] = protocol_data[key]
return data
@property
def id(self):
""" Used for pretty test identification string in report
"""
return '{protocol_type}-{sub_folder}'.format(**self.__dict__)
def pytest_generate_tests(metafunc):
""" Generates DbBackupData fixture called 'db_backup_data' with all the necessary data
"""
data = conf.cfme_data.get('log_db_operations', {})
if 'db_backup_data' in metafunc.fixturenames:
argnames = 'db_backup_data'
argvalues = []
ids = []
machine_data = data.get("log_db_depot_template")
if not machine_data:
pytest.skip('No log_db_depot information available!')
for protocol in data["protocols"]:
if protocol in PROTOCOL_TYPES and data["protocols"][protocol].get('use_for_db_backups',
False):
db_backup_data = DbBackupData(machine_data, protocol, data["protocols"][protocol])
argvalues.append(db_backup_data)
ids.append(db_backup_data.id)
testgen.parametrize(metafunc, argnames, argvalues, ids=ids)
@pytest.fixture(scope="module")
def db_depot_machine_ip(request, appliance):
""" Deploy vm for depot test
This fixture uses for deploy vm on provider from yaml and then receive it's ip
After test run vm deletes from provider
"""
depot_machine_name = "test_db_backup_depot_{}".format(fauxfactory.gen_alphanumeric())
data = conf.cfme_data.get("log_db_operations", {})
depot_provider_key = data["log_db_depot_template"]["provider"]
depot_template_name = data["log_db_depot_template"]["template_name"]
vm = deploy_template(depot_provider_key,
depot_machine_name,
template_name=depot_template_name)
if vm.ip is None:
pytest.skip('Depot VM does not have IP address')
yield vm.ip
vm.cleanup()
def get_schedulable_datetime():
""" Returns datetime for closest schedulable time (every 5 minutes)
"""
dt = datetime.utcnow()
delta_min = 5 - (dt.minute % 5)
if (delta_min < 3): # If the schedule would be set to run in less than 2mins
delta_min += 5 # Pad with 5 minutes
dt += relativedelta(minutes=delta_min)
return dt
def get_ssh_client(hostname, credentials):
""" Returns fresh ssh client connected to given server using given credentials
"""
hostname = urlparse('scheme://{}'.format(hostname)).netloc
connect_kwargs = {
'username': credentials['username'],
'password': credentials['password'],
'hostname': hostname,
}
return SSHClient(**connect_kwargs)
def get_full_path_to_file(path_on_host, schedule_name):
""" Returns full path to db backup file on host
"""
if not path_on_host.endswith('/'):
path_on_host += '/'
full_path = '{}db_backup/region_*/{}'.format(path_on_host, schedule_name)
return full_path
@pytest.mark.tier(3)
def test_db_backup_schedule(request, db_backup_data, db_depot_machine_ip, appliance):
""" Test scheduled one-type backup on given machines using smb/nfs
Polarion:
assignee: sbulage
casecomponent: Appliance
caseimportance: high
initialEstimate: 1/4h
"""
# ---- Create new db backup schedule set to run in the next 6 min
dt = get_schedulable_datetime()
# the dash is there to make strftime not use a leading zero
hour = dt.strftime('%-H')
minute = dt.strftime('%-M')
db_depot_uri = '{}{}'.format(db_depot_machine_ip, db_backup_data.sub_folder)
sched_args = {
'name': db_backup_data.schedule_name,
'description': db_backup_data.schedule_description,
'active': True,
'action_type': 'Database Backup',
'run_type': "Once",
'run_every': None,
'time_zone': "(GMT+00:00) UTC",
'start_date': dt,
'start_hour': hour,
'start_minute': minute,
'depot_name': fauxfactory.gen_alphanumeric()
}
if db_backup_data.protocol_type == 'smb':
sched_args.update({
'backup_type': 'Samba',
'uri': db_depot_uri,
'samba_username': db_backup_data.credentials['username'],
'samba_password': db_backup_data.credentials['password'],
})
else:
sched_args.update({
'backup_type': 'Network File System',
'uri': db_depot_uri
})
if db_backup_data.protocol_type == 'nfs':
path_on_host = urlparse('nfs://{}'.format(db_depot_uri)).path
else:
path_on_host = db_backup_data.path_on_host
full_path = get_full_path_to_file(path_on_host, db_backup_data.schedule_name)
sched = appliance.collections.system_schedules.create(**sched_args)
# ----
# ---- Add cleanup finalizer
def delete_sched_and_files():
with get_ssh_client(db_depot_uri, db_backup_data.credentials) as ssh_client:
ssh_client.run_command('rm -rf {}'.format(full_path), ensure_user=True)
sched.delete()
request.addfinalizer(delete_sched_and_files)
# ----
# ---- Wait for schedule to run
# check last date at schedule's table
wait_for(
lambda: sched.last_run_date != '',
num_sec=600,
delay=30,
fail_func=sched.browser.refresh,
message='Schedule failed to run in 10mins from being set up'
)
# ----
# ---- Check if the db backup file exists
with get_ssh_client(db_depot_uri, db_backup_data.credentials) as ssh_client:
assert ssh_client.run_command('cd "{}"'.format(path_on_host), ensure_user=True).success, (
"Could not cd into '{}' over ssh".format(path_on_host))
# Find files no more than 5 minutes old, count them and remove newline
file_check_cmd = "find {}/* -cmin -5 | wc -l | tr -d '\n' ".format(full_path)
wait_for(
lambda: ssh_client.run_command(file_check_cmd, ensure_user=True).output == '1',
delay=5,
num_sec=60,
message="File '{}' not found on share".format(full_path)
)
# ----
| gpl-2.0 |
trishnaguha/ansible | lib/ansible/modules/cloud/amazon/lightsail.py | 40 | 15620 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: lightsail
short_description: Create or delete a virtual machine instance in AWS Lightsail
description:
- Creates or instances in AWS Lightsail and optionally wait for it to be 'running'.
version_added: "2.4"
author: "Nick Ball (@nickball)"
options:
state:
description:
- Indicate desired state of the target.
default: present
choices: ['present', 'absent', 'running', 'restarted', 'stopped']
name:
description:
- Name of the instance
required: true
zone:
description:
- AWS availability zone in which to launch the instance. Required when state='present'
blueprint_id:
description:
- ID of the instance blueprint image. Required when state='present'
bundle_id:
description:
- Bundle of specification info for the instance. Required when state='present'
user_data:
description:
- Launch script that can configure the instance with additional data
key_pair_name:
description:
- Name of the key pair to use with the instance
wait:
description:
- Wait for the instance to be in state 'running' before returning. If wait is "no" an ip_address may not be returned
type: bool
default: 'yes'
wait_timeout:
description:
- How long before wait gives up, in seconds.
default: 300
requirements:
- "python >= 2.6"
- boto3
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Create a new Lightsail instance, register the instance details
- lightsail:
state: present
name: myinstance
region: us-east-1
zone: us-east-1a
blueprint_id: ubuntu_16_04
bundle_id: nano_1_0
key_pair_name: id_rsa
user_data: " echo 'hello world' > /home/ubuntu/test.txt"
wait_timeout: 500
register: my_instance
- debug:
msg: "Name is {{ my_instance.instance.name }}"
- debug:
msg: "IP is {{ my_instance.instance.public_ip_address }}"
# Delete an instance if present
- lightsail:
state: absent
region: us-east-1
name: myinstance
'''
RETURN = '''
changed:
description: if a snapshot has been modified/created
returned: always
type: bool
sample:
changed: true
instance:
description: instance data
returned: always
type: dict
sample:
arn: "arn:aws:lightsail:us-east-1:448830907657:Instance/1fef0175-d6c8-480e-84fa-214f969cda87"
blueprint_id: "ubuntu_16_04"
blueprint_name: "Ubuntu"
bundle_id: "nano_1_0"
created_at: "2017-03-27T08:38:59.714000-04:00"
hardware:
cpu_count: 1
ram_size_in_gb: 0.5
is_static_ip: false
location:
availability_zone: "us-east-1a"
region_name: "us-east-1"
name: "my_instance"
networking:
monthly_transfer:
gb_per_month_allocated: 1024
ports:
- access_direction: "inbound"
access_from: "Anywhere (0.0.0.0/0)"
access_type: "public"
common_name: ""
from_port: 80
protocol: tcp
to_port: 80
- access_direction: "inbound"
access_from: "Anywhere (0.0.0.0/0)"
access_type: "public"
common_name: ""
from_port: 22
protocol: tcp
to_port: 22
private_ip_address: "172.26.8.14"
public_ip_address: "34.207.152.202"
resource_type: "Instance"
ssh_key_name: "keypair"
state:
code: 16
name: running
support_code: "588307843083/i-0997c97831ee21e33"
username: "ubuntu"
'''
import time
import traceback
try:
import botocore
HAS_BOTOCORE = True
except ImportError:
HAS_BOTOCORE = False
try:
import boto3
except ImportError:
# will be caught by imported HAS_BOTO3
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import (ec2_argument_spec, get_aws_connection_info, boto3_conn,
HAS_BOTO3, camel_dict_to_snake_dict)
def create_instance(module, client, instance_name):
"""
Create an instance
module: Ansible module object
client: authenticated lightsail connection object
instance_name: name of instance to delete
Returns a dictionary of instance information
about the new instance.
"""
changed = False
# Check if instance already exists
inst = None
try:
inst = _find_instance_info(client, instance_name)
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] != 'NotFoundException':
module.fail_json(msg='Error finding instance {0}, error: {1}'.format(instance_name, e))
zone = module.params.get('zone')
blueprint_id = module.params.get('blueprint_id')
bundle_id = module.params.get('bundle_id')
key_pair_name = module.params.get('key_pair_name')
user_data = module.params.get('user_data')
user_data = '' if user_data is None else user_data
resp = None
if inst is None:
try:
resp = client.create_instances(
instanceNames=[
instance_name
],
availabilityZone=zone,
blueprintId=blueprint_id,
bundleId=bundle_id,
userData=user_data,
keyPairName=key_pair_name,
)
resp = resp['operations'][0]
except botocore.exceptions.ClientError as e:
module.fail_json(msg='Unable to create instance {0}, error: {1}'.format(instance_name, e))
changed = True
inst = _find_instance_info(client, instance_name)
return (changed, inst)
def delete_instance(module, client, instance_name):
"""
Terminates an instance
module: Ansible module object
client: authenticated lightsail connection object
instance_name: name of instance to delete
Returns a dictionary of instance information
about the instance deleted (pre-deletion).
If the instance to be deleted is running
"changed" will be set to False.
"""
# It looks like deleting removes the instance immediately, nothing to wait for
wait = module.params.get('wait')
wait_timeout = int(module.params.get('wait_timeout'))
wait_max = time.time() + wait_timeout
changed = False
inst = None
try:
inst = _find_instance_info(client, instance_name)
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] != 'NotFoundException':
module.fail_json(msg='Error finding instance {0}, error: {1}'.format(instance_name, e))
# Wait for instance to exit transition state before deleting
if wait:
while wait_max > time.time() and inst is not None and inst['state']['name'] in ('pending', 'stopping'):
try:
time.sleep(5)
inst = _find_instance_info(client, instance_name)
except botocore.exceptions.ClientError as e:
if e.response['ResponseMetadata']['HTTPStatusCode'] == "403":
module.fail_json(msg="Failed to delete instance {0}. Check that you have permissions to perform the operation.".format(instance_name),
exception=traceback.format_exc())
elif e.response['Error']['Code'] == "RequestExpired":
module.fail_json(msg="RequestExpired: Failed to delete instance {0}.".format(instance_name), exception=traceback.format_exc())
# sleep and retry
time.sleep(10)
# Attempt to delete
if inst is not None:
while not changed and ((wait and wait_max > time.time()) or (not wait)):
try:
client.delete_instance(instanceName=instance_name)
changed = True
except botocore.exceptions.ClientError as e:
module.fail_json(msg='Error deleting instance {0}, error: {1}'.format(instance_name, e))
# Timed out
if wait and not changed and wait_max <= time.time():
module.fail_json(msg="wait for instance delete timeout at %s" % time.asctime())
return (changed, inst)
def restart_instance(module, client, instance_name):
"""
Reboot an existing instance
module: Ansible module object
client: authenticated lightsail connection object
instance_name: name of instance to reboot
Returns a dictionary of instance information
about the restarted instance
If the instance was not able to reboot,
"changed" will be set to False.
Wait will not apply here as this is an OS-level operation
"""
wait = module.params.get('wait')
wait_timeout = int(module.params.get('wait_timeout'))
wait_max = time.time() + wait_timeout
changed = False
inst = None
try:
inst = _find_instance_info(client, instance_name)
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] != 'NotFoundException':
module.fail_json(msg='Error finding instance {0}, error: {1}'.format(instance_name, e))
# Wait for instance to exit transition state before state change
if wait:
while wait_max > time.time() and inst is not None and inst['state']['name'] in ('pending', 'stopping'):
try:
time.sleep(5)
inst = _find_instance_info(client, instance_name)
except botocore.exceptions.ClientError as e:
if e.response['ResponseMetadata']['HTTPStatusCode'] == "403":
module.fail_json(msg="Failed to restart instance {0}. Check that you have permissions to perform the operation.".format(instance_name),
exception=traceback.format_exc())
elif e.response['Error']['Code'] == "RequestExpired":
module.fail_json(msg="RequestExpired: Failed to restart instance {0}.".format(instance_name), exception=traceback.format_exc())
time.sleep(3)
# send reboot
if inst is not None:
try:
client.reboot_instance(instanceName=instance_name)
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] != 'NotFoundException':
module.fail_json(msg='Unable to reboot instance {0}, error: {1}'.format(instance_name, e))
changed = True
return (changed, inst)
def startstop_instance(module, client, instance_name, state):
"""
Starts or stops an existing instance
module: Ansible module object
client: authenticated lightsail connection object
instance_name: name of instance to start/stop
state: Target state ("running" or "stopped")
Returns a dictionary of instance information
about the instance started/stopped
If the instance was not able to state change,
"changed" will be set to False.
"""
wait = module.params.get('wait')
wait_timeout = int(module.params.get('wait_timeout'))
wait_max = time.time() + wait_timeout
changed = False
inst = None
try:
inst = _find_instance_info(client, instance_name)
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] != 'NotFoundException':
module.fail_json(msg='Error finding instance {0}, error: {1}'.format(instance_name, e))
# Wait for instance to exit transition state before state change
if wait:
while wait_max > time.time() and inst is not None and inst['state']['name'] in ('pending', 'stopping'):
try:
time.sleep(5)
inst = _find_instance_info(client, instance_name)
except botocore.exceptions.ClientError as e:
if e.response['ResponseMetadata']['HTTPStatusCode'] == "403":
module.fail_json(msg="Failed to start/stop instance {0}. Check that you have permissions to perform the operation".format(instance_name),
exception=traceback.format_exc())
elif e.response['Error']['Code'] == "RequestExpired":
module.fail_json(msg="RequestExpired: Failed to start/stop instance {0}.".format(instance_name), exception=traceback.format_exc())
time.sleep(1)
# Try state change
if inst is not None and inst['state']['name'] != state:
try:
if state == 'running':
client.start_instance(instanceName=instance_name)
else:
client.stop_instance(instanceName=instance_name)
except botocore.exceptions.ClientError as e:
module.fail_json(msg='Unable to change state for instance {0}, error: {1}'.format(instance_name, e))
changed = True
# Grab current instance info
inst = _find_instance_info(client, instance_name)
return (changed, inst)
def core(module):
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
if not region:
module.fail_json(msg='region must be specified')
client = None
try:
client = boto3_conn(module, conn_type='client', resource='lightsail',
region=region, endpoint=ec2_url, **aws_connect_kwargs)
except (botocore.exceptions.ClientError, botocore.exceptions.ValidationError) as e:
module.fail_json(msg='Failed while connecting to the lightsail service: %s' % e, exception=traceback.format_exc())
changed = False
state = module.params['state']
name = module.params['name']
if state == 'absent':
changed, instance_dict = delete_instance(module, client, name)
elif state in ('running', 'stopped'):
changed, instance_dict = startstop_instance(module, client, name, state)
elif state == 'restarted':
changed, instance_dict = restart_instance(module, client, name)
elif state == 'present':
changed, instance_dict = create_instance(module, client, name)
module.exit_json(changed=changed, instance=camel_dict_to_snake_dict(instance_dict))
def _find_instance_info(client, instance_name):
''' handle exceptions where this function is called '''
inst = None
try:
inst = client.get_instance(instanceName=instance_name)
except botocore.exceptions.ClientError as e:
raise
return inst['instance']
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
name=dict(type='str', required=True),
state=dict(type='str', default='present', choices=['present', 'absent', 'stopped', 'running', 'restarted']),
zone=dict(type='str'),
blueprint_id=dict(type='str'),
bundle_id=dict(type='str'),
key_pair_name=dict(type='str'),
user_data=dict(type='str'),
wait=dict(type='bool', default=True),
wait_timeout=dict(default=300),
))
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO3:
module.fail_json(msg='Python module "boto3" is missing, please install it')
if not HAS_BOTOCORE:
module.fail_json(msg='Python module "botocore" is missing, please install it')
try:
core(module)
except (botocore.exceptions.ClientError, Exception) as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
if __name__ == '__main__':
main()
| gpl-3.0 |
juhalindfors/bazel-patches | third_party/py/mock/tests/testmagicmethods.py | 109 | 14863 | # Copyright (C) 2007-2012 Michael Foord & the mock team
# E-mail: fuzzyman AT voidspace DOT org DOT uk
# http://www.voidspace.org.uk/python/mock/
from tests.support import unittest2, inPy3k
try:
unicode
except NameError:
# Python 3
unicode = str
long = int
import inspect
import sys
from mock import Mock, MagicMock, _magics
class TestMockingMagicMethods(unittest2.TestCase):
def test_deleting_magic_methods(self):
mock = Mock()
self.assertFalse(hasattr(mock, '__getitem__'))
mock.__getitem__ = Mock()
self.assertTrue(hasattr(mock, '__getitem__'))
del mock.__getitem__
self.assertFalse(hasattr(mock, '__getitem__'))
def test_magicmock_del(self):
mock = MagicMock()
# before using getitem
del mock.__getitem__
self.assertRaises(TypeError, lambda: mock['foo'])
mock = MagicMock()
# this time use it first
mock['foo']
del mock.__getitem__
self.assertRaises(TypeError, lambda: mock['foo'])
def test_magic_method_wrapping(self):
mock = Mock()
def f(self, name):
return self, 'fish'
mock.__getitem__ = f
self.assertFalse(mock.__getitem__ is f)
self.assertEqual(mock['foo'], (mock, 'fish'))
self.assertEqual(mock.__getitem__('foo'), (mock, 'fish'))
mock.__getitem__ = mock
self.assertTrue(mock.__getitem__ is mock)
def test_magic_methods_isolated_between_mocks(self):
mock1 = Mock()
mock2 = Mock()
mock1.__iter__ = Mock(return_value=iter([]))
self.assertEqual(list(mock1), [])
self.assertRaises(TypeError, lambda: list(mock2))
def test_repr(self):
mock = Mock()
self.assertEqual(repr(mock), "<Mock id='%s'>" % id(mock))
mock.__repr__ = lambda s: 'foo'
self.assertEqual(repr(mock), 'foo')
def test_str(self):
mock = Mock()
self.assertEqual(str(mock), object.__str__(mock))
mock.__str__ = lambda s: 'foo'
self.assertEqual(str(mock), 'foo')
@unittest2.skipIf(inPy3k, "no unicode in Python 3")
def test_unicode(self):
mock = Mock()
self.assertEqual(unicode(mock), unicode(str(mock)))
mock.__unicode__ = lambda s: unicode('foo')
self.assertEqual(unicode(mock), unicode('foo'))
def test_dict_methods(self):
mock = Mock()
self.assertRaises(TypeError, lambda: mock['foo'])
def _del():
del mock['foo']
def _set():
mock['foo'] = 3
self.assertRaises(TypeError, _del)
self.assertRaises(TypeError, _set)
_dict = {}
def getitem(s, name):
return _dict[name]
def setitem(s, name, value):
_dict[name] = value
def delitem(s, name):
del _dict[name]
mock.__setitem__ = setitem
mock.__getitem__ = getitem
mock.__delitem__ = delitem
self.assertRaises(KeyError, lambda: mock['foo'])
mock['foo'] = 'bar'
self.assertEqual(_dict, {'foo': 'bar'})
self.assertEqual(mock['foo'], 'bar')
del mock['foo']
self.assertEqual(_dict, {})
def test_numeric(self):
original = mock = Mock()
mock.value = 0
self.assertRaises(TypeError, lambda: mock + 3)
def add(self, other):
mock.value += other
return self
mock.__add__ = add
self.assertEqual(mock + 3, mock)
self.assertEqual(mock.value, 3)
del mock.__add__
def iadd(mock):
mock += 3
self.assertRaises(TypeError, iadd, mock)
mock.__iadd__ = add
mock += 6
self.assertEqual(mock, original)
self.assertEqual(mock.value, 9)
self.assertRaises(TypeError, lambda: 3 + mock)
mock.__radd__ = add
self.assertEqual(7 + mock, mock)
self.assertEqual(mock.value, 16)
@unittest2.skipIf(inPy3k, 'no truediv in Python 3')
def test_truediv(self):
mock = MagicMock()
mock.__truediv__.return_value = 6
context = {'mock': mock}
code = 'from __future__ import division\nresult = mock / 7\n'
exec(code, context)
self.assertEqual(context['result'], 6)
mock.__rtruediv__.return_value = 3
code = 'from __future__ import division\nresult = 2 / mock\n'
exec(code, context)
self.assertEqual(context['result'], 3)
@unittest2.skipIf(not inPy3k, 'truediv is available in Python 2')
def test_no_truediv(self):
self.assertRaises(
AttributeError, getattr, MagicMock(), '__truediv__'
)
self.assertRaises(
AttributeError, getattr, MagicMock(), '__rtruediv__'
)
def test_hash(self):
mock = Mock()
# test delegation
self.assertEqual(hash(mock), Mock.__hash__(mock))
def _hash(s):
return 3
mock.__hash__ = _hash
self.assertEqual(hash(mock), 3)
def test_nonzero(self):
m = Mock()
self.assertTrue(bool(m))
nonzero = lambda s: False
if not inPy3k:
m.__nonzero__ = nonzero
else:
m.__bool__ = nonzero
self.assertFalse(bool(m))
def test_comparison(self):
# note: this test fails with Jython 2.5.1 due to a Jython bug
# it is fixed in jython 2.5.2
if not inPy3k:
# incomparable in Python 3
self. assertEqual(Mock() < 3, object() < 3)
self. assertEqual(Mock() > 3, object() > 3)
self. assertEqual(Mock() <= 3, object() <= 3)
self. assertEqual(Mock() >= 3, object() >= 3)
else:
self.assertRaises(TypeError, lambda: MagicMock() < object())
self.assertRaises(TypeError, lambda: object() < MagicMock())
self.assertRaises(TypeError, lambda: MagicMock() < MagicMock())
self.assertRaises(TypeError, lambda: MagicMock() > object())
self.assertRaises(TypeError, lambda: object() > MagicMock())
self.assertRaises(TypeError, lambda: MagicMock() > MagicMock())
self.assertRaises(TypeError, lambda: MagicMock() <= object())
self.assertRaises(TypeError, lambda: object() <= MagicMock())
self.assertRaises(TypeError, lambda: MagicMock() <= MagicMock())
self.assertRaises(TypeError, lambda: MagicMock() >= object())
self.assertRaises(TypeError, lambda: object() >= MagicMock())
self.assertRaises(TypeError, lambda: MagicMock() >= MagicMock())
mock = Mock()
def comp(s, o):
return True
mock.__lt__ = mock.__gt__ = mock.__le__ = mock.__ge__ = comp
self. assertTrue(mock < 3)
self. assertTrue(mock > 3)
self. assertTrue(mock <= 3)
self. assertTrue(mock >= 3)
def test_equality(self):
for mock in Mock(), MagicMock():
self.assertEqual(mock == mock, True)
self.assertIsInstance(mock == mock, bool)
self.assertEqual(mock != mock, False)
self.assertIsInstance(mock != mock, bool)
self.assertEqual(mock == object(), False)
self.assertEqual(mock != object(), True)
def eq(self, other):
return other == 3
mock.__eq__ = eq
self.assertTrue(mock == 3)
self.assertFalse(mock == 4)
def ne(self, other):
return other == 3
mock.__ne__ = ne
self.assertTrue(mock != 3)
self.assertFalse(mock != 4)
mock = MagicMock()
mock.__eq__.return_value = True
self.assertIsInstance(mock == 3, bool)
self.assertEqual(mock == 3, True)
mock.__ne__.return_value = False
self.assertIsInstance(mock != 3, bool)
self.assertEqual(mock != 3, False)
def test_len_contains_iter(self):
mock = Mock()
self.assertRaises(TypeError, len, mock)
self.assertRaises(TypeError, iter, mock)
self.assertRaises(TypeError, lambda: 'foo' in mock)
mock.__len__ = lambda s: 6
self.assertEqual(len(mock), 6)
mock.__contains__ = lambda s, o: o == 3
self.assertTrue(3 in mock)
self.assertFalse(6 in mock)
mock.__iter__ = lambda s: iter('foobarbaz')
self.assertEqual(list(mock), list('foobarbaz'))
def test_magicmock(self):
mock = MagicMock()
mock.__iter__.return_value = iter([1, 2, 3])
self.assertEqual(list(mock), [1, 2, 3])
name = '__nonzero__'
other = '__bool__'
if inPy3k:
name, other = other, name
getattr(mock, name).return_value = False
self.assertFalse(hasattr(mock, other))
self.assertFalse(bool(mock))
for entry in _magics:
self.assertTrue(hasattr(mock, entry))
self.assertFalse(hasattr(mock, '__imaginery__'))
def test_magic_mock_equality(self):
mock = MagicMock()
self.assertIsInstance(mock == object(), bool)
self.assertIsInstance(mock != object(), bool)
self.assertEqual(mock == object(), False)
self.assertEqual(mock != object(), True)
self.assertEqual(mock == mock, True)
self.assertEqual(mock != mock, False)
def test_magicmock_defaults(self):
mock = MagicMock()
self.assertEqual(int(mock), 1)
self.assertEqual(complex(mock), 1j)
self.assertEqual(float(mock), 1.0)
self.assertEqual(long(mock), long(1))
self.assertNotIn(object(), mock)
self.assertEqual(len(mock), 0)
self.assertEqual(list(mock), [])
self.assertEqual(hash(mock), object.__hash__(mock))
self.assertEqual(str(mock), object.__str__(mock))
self.assertEqual(unicode(mock), object.__str__(mock))
self.assertIsInstance(unicode(mock), unicode)
self.assertTrue(bool(mock))
if not inPy3k:
self.assertEqual(oct(mock), '1')
else:
# in Python 3 oct and hex use __index__
# so these tests are for __index__ in py3k
self.assertEqual(oct(mock), '0o1')
self.assertEqual(hex(mock), '0x1')
# how to test __sizeof__ ?
@unittest2.skipIf(inPy3k, "no __cmp__ in Python 3")
def test_non_default_magic_methods(self):
mock = MagicMock()
self.assertRaises(AttributeError, lambda: mock.__cmp__)
mock = Mock()
mock.__cmp__ = lambda s, o: 0
self.assertEqual(mock, object())
def test_magic_methods_and_spec(self):
class Iterable(object):
def __iter__(self):
pass
mock = Mock(spec=Iterable)
self.assertRaises(AttributeError, lambda: mock.__iter__)
mock.__iter__ = Mock(return_value=iter([]))
self.assertEqual(list(mock), [])
class NonIterable(object):
pass
mock = Mock(spec=NonIterable)
self.assertRaises(AttributeError, lambda: mock.__iter__)
def set_int():
mock.__int__ = Mock(return_value=iter([]))
self.assertRaises(AttributeError, set_int)
mock = MagicMock(spec=Iterable)
self.assertEqual(list(mock), [])
self.assertRaises(AttributeError, set_int)
def test_magic_methods_and_spec_set(self):
class Iterable(object):
def __iter__(self):
pass
mock = Mock(spec_set=Iterable)
self.assertRaises(AttributeError, lambda: mock.__iter__)
mock.__iter__ = Mock(return_value=iter([]))
self.assertEqual(list(mock), [])
class NonIterable(object):
pass
mock = Mock(spec_set=NonIterable)
self.assertRaises(AttributeError, lambda: mock.__iter__)
def set_int():
mock.__int__ = Mock(return_value=iter([]))
self.assertRaises(AttributeError, set_int)
mock = MagicMock(spec_set=Iterable)
self.assertEqual(list(mock), [])
self.assertRaises(AttributeError, set_int)
def test_setting_unsupported_magic_method(self):
mock = MagicMock()
def set_setattr():
mock.__setattr__ = lambda self, name: None
self.assertRaisesRegexp(AttributeError,
"Attempting to set unsupported magic method '__setattr__'.",
set_setattr
)
def test_attributes_and_return_value(self):
mock = MagicMock()
attr = mock.foo
def _get_type(obj):
# the type of every mock (or magicmock) is a custom subclass
# so the real type is the second in the mro
return type(obj).__mro__[1]
self.assertEqual(_get_type(attr), MagicMock)
returned = mock()
self.assertEqual(_get_type(returned), MagicMock)
def test_magic_methods_are_magic_mocks(self):
mock = MagicMock()
self.assertIsInstance(mock.__getitem__, MagicMock)
mock[1][2].__getitem__.return_value = 3
self.assertEqual(mock[1][2][3], 3)
def test_magic_method_reset_mock(self):
mock = MagicMock()
str(mock)
self.assertTrue(mock.__str__.called)
mock.reset_mock()
self.assertFalse(mock.__str__.called)
@unittest2.skipUnless(sys.version_info[:2] >= (2, 6),
"__dir__ not available until Python 2.6 or later")
def test_dir(self):
# overriding the default implementation
for mock in Mock(), MagicMock():
def _dir(self):
return ['foo']
mock.__dir__ = _dir
self.assertEqual(dir(mock), ['foo'])
@unittest2.skipIf('PyPy' in sys.version, "This fails differently on pypy")
def test_bound_methods(self):
m = Mock()
# XXXX should this be an expected failure instead?
# this seems like it should work, but is hard to do without introducing
# other api inconsistencies. Failure message could be better though.
m.__iter__ = [3].__iter__
self.assertRaises(TypeError, iter, m)
def test_magic_method_type(self):
class Foo(MagicMock):
pass
foo = Foo()
self.assertIsInstance(foo.__int__, Foo)
def test_descriptor_from_class(self):
m = MagicMock()
type(m).__str__.return_value = 'foo'
self.assertEqual(str(m), 'foo')
def test_iterable_as_iter_return_value(self):
m = MagicMock()
m.__iter__.return_value = [1, 2, 3]
self.assertEqual(list(m), [1, 2, 3])
self.assertEqual(list(m), [1, 2, 3])
m.__iter__.return_value = iter([4, 5, 6])
self.assertEqual(list(m), [4, 5, 6])
self.assertEqual(list(m), [])
if __name__ == '__main__':
unittest2.main()
| apache-2.0 |
coffeemakr/torweb | torweb/api/json/encoder.py | 1 | 1539 | import datetime
import json
import txtorcon.util
from torweb.api.json import (base, circuit, minimalstream,
router)
from txtorcon.util import ipaddr as ipaddress
from txtorcon import Router, Circuit, Stream
IPADDRESSES = (ipaddress.IPv4Address,
ipaddress.IPv6Address,
ipaddress.IPAddress)
__all__ = ['ExtendedJSONEncoder']
class ExtendedJSONEncoder(json.JSONEncoder):
'''
Customized JSON encoder which handels the following objects:
* Implementations of :class:`IJSONSerializable`
* :class:`txtorcon.util.NetLocation`
* :class:`txtorcon.util.ipaddr.IPv4Address`
* :class:`txtorcon.util.ipaddr.IPv6Address`
* :class:`txtorcon.util.ipaddr.IPAddress`
'''
def default(self, o):
'''
Implementation of encoding
'''
if base.IJSONSerializable.providedBy(o):
return base.IJSONSerializable(o).as_dict()
elif isinstance(o, Router):
return router.JsonRouterMinimal(o).as_dict()
elif isinstance(o, Circuit):
return circuit.JsonCircuitMinimal(o).as_dict()
elif isinstance(o, Stream):
return minimalstream.JsonStreamMinimal(o).as_dict()
elif isinstance(o, datetime.datetime):
return o.isoformat()
elif isinstance(o, txtorcon.util.NetLocation):
return {'country': o.countrycode}
elif isinstance(o, IPADDRESSES):
return o.exploded
return json.JSONEncoder.default(self, o)
| gpl-2.0 |
ciraxwe/cherrypy-app-engine | cherrypy/_cpserver.py | 58 | 8275 | """Manage HTTP servers with CherryPy."""
import warnings
import cherrypy
from cherrypy.lib import attributes
from cherrypy._cpcompat import basestring, py3k
# We import * because we want to export check_port
# et al as attributes of this module.
from cherrypy.process.servers import *
class Server(ServerAdapter):
"""An adapter for an HTTP server.
You can set attributes (like socket_host and socket_port)
on *this* object (which is probably cherrypy.server), and call
quickstart. For example::
cherrypy.server.socket_port = 80
cherrypy.quickstart()
"""
socket_port = 8080
"""The TCP port on which to listen for connections."""
_socket_host = '127.0.0.1'
def _get_socket_host(self):
return self._socket_host
def _set_socket_host(self, value):
if value == '':
raise ValueError("The empty string ('') is not an allowed value. "
"Use '0.0.0.0' instead to listen on all active "
"interfaces (INADDR_ANY).")
self._socket_host = value
socket_host = property(
_get_socket_host,
_set_socket_host,
doc="""The hostname or IP address on which to listen for connections.
Host values may be any IPv4 or IPv6 address, or any valid hostname.
The string 'localhost' is a synonym for '127.0.0.1' (or '::1', if
your hosts file prefers IPv6). The string '0.0.0.0' is a special
IPv4 entry meaning "any active interface" (INADDR_ANY), and '::'
is the similar IN6ADDR_ANY for IPv6. The empty string or None are
not allowed.""")
socket_file = None
"""If given, the name of the UNIX socket to use instead of TCP/IP.
When this option is not None, the `socket_host` and `socket_port` options
are ignored."""
socket_queue_size = 5
"""The 'backlog' argument to socket.listen(); specifies the maximum number
of queued connections (default 5)."""
socket_timeout = 10
"""The timeout in seconds for accepted connections (default 10)."""
accepted_queue_size = -1
"""The maximum number of requests which will be queued up before
the server refuses to accept it (default -1, meaning no limit)."""
accepted_queue_timeout = 10
"""The timeout in seconds for attempting to add a request to the
queue when the queue is full (default 10)."""
shutdown_timeout = 5
"""The time to wait for HTTP worker threads to clean up."""
protocol_version = 'HTTP/1.1'
"""The version string to write in the Status-Line of all HTTP responses,
for example, "HTTP/1.1" (the default). Depending on the HTTP server used,
this should also limit the supported features used in the response."""
thread_pool = 10
"""The number of worker threads to start up in the pool."""
thread_pool_max = -1
"""The maximum size of the worker-thread pool. Use -1 to indicate no limit.
"""
max_request_header_size = 500 * 1024
"""The maximum number of bytes allowable in the request headers.
If exceeded, the HTTP server should return "413 Request Entity Too Large".
"""
max_request_body_size = 100 * 1024 * 1024
"""The maximum number of bytes allowable in the request body. If exceeded,
the HTTP server should return "413 Request Entity Too Large"."""
instance = None
"""If not None, this should be an HTTP server instance (such as
CPWSGIServer) which cherrypy.server will control. Use this when you need
more control over object instantiation than is available in the various
configuration options."""
ssl_context = None
"""When using PyOpenSSL, an instance of SSL.Context."""
ssl_certificate = None
"""The filename of the SSL certificate to use."""
ssl_certificate_chain = None
"""When using PyOpenSSL, the certificate chain to pass to
Context.load_verify_locations."""
ssl_private_key = None
"""The filename of the private key to use with SSL."""
if py3k:
ssl_module = 'builtin'
"""The name of a registered SSL adaptation module to use with
the builtin WSGI server. Builtin options are: 'builtin' (to
use the SSL library built into recent versions of Python).
You may also register your own classes in the
wsgiserver.ssl_adapters dict."""
else:
ssl_module = 'pyopenssl'
"""The name of a registered SSL adaptation module to use with the
builtin WSGI server. Builtin options are 'builtin' (to use the SSL
library built into recent versions of Python) and 'pyopenssl' (to
use the PyOpenSSL project, which you must install separately). You
may also register your own classes in the wsgiserver.ssl_adapters
dict."""
statistics = False
"""Turns statistics-gathering on or off for aware HTTP servers."""
nodelay = True
"""If True (the default since 3.1), sets the TCP_NODELAY socket option."""
wsgi_version = (1, 0)
"""The WSGI version tuple to use with the builtin WSGI server.
The provided options are (1, 0) [which includes support for PEP 3333,
which declares it covers WSGI version 1.0.1 but still mandates the
wsgi.version (1, 0)] and ('u', 0), an experimental unicode version.
You may create and register your own experimental versions of the WSGI
protocol by adding custom classes to the wsgiserver.wsgi_gateways dict."""
def __init__(self):
self.bus = cherrypy.engine
self.httpserver = None
self.interrupt = None
self.running = False
def httpserver_from_self(self, httpserver=None):
"""Return a (httpserver, bind_addr) pair based on self attributes."""
if httpserver is None:
httpserver = self.instance
if httpserver is None:
from cherrypy import _cpwsgi_server
httpserver = _cpwsgi_server.CPWSGIServer(self)
if isinstance(httpserver, basestring):
# Is anyone using this? Can I add an arg?
httpserver = attributes(httpserver)(self)
return httpserver, self.bind_addr
def start(self):
"""Start the HTTP server."""
if not self.httpserver:
self.httpserver, self.bind_addr = self.httpserver_from_self()
ServerAdapter.start(self)
start.priority = 75
def _get_bind_addr(self):
if self.socket_file:
return self.socket_file
if self.socket_host is None and self.socket_port is None:
return None
return (self.socket_host, self.socket_port)
def _set_bind_addr(self, value):
if value is None:
self.socket_file = None
self.socket_host = None
self.socket_port = None
elif isinstance(value, basestring):
self.socket_file = value
self.socket_host = None
self.socket_port = None
else:
try:
self.socket_host, self.socket_port = value
self.socket_file = None
except ValueError:
raise ValueError("bind_addr must be a (host, port) tuple "
"(for TCP sockets) or a string (for Unix "
"domain sockets), not %r" % value)
bind_addr = property(
_get_bind_addr,
_set_bind_addr,
doc='A (host, port) tuple for TCP sockets or '
'a str for Unix domain sockets.')
def base(self):
"""Return the base (scheme://host[:port] or sock file) for this server.
"""
if self.socket_file:
return self.socket_file
host = self.socket_host
if host in ('0.0.0.0', '::'):
# 0.0.0.0 is INADDR_ANY and :: is IN6ADDR_ANY.
# Look up the host name, which should be the
# safest thing to spit out in a URL.
import socket
host = socket.gethostname()
port = self.socket_port
if self.ssl_certificate:
scheme = "https"
if port != 443:
host += ":%s" % port
else:
scheme = "http"
if port != 80:
host += ":%s" % port
return "%s://%s" % (scheme, host)
| apache-2.0 |
fschulze/pytest-warnings | pytest_warnings/__init__.py | 1 | 3578 | import inspect
import os
import pytest
import warnings
_DISABLED = False
def _setoption(wmod, arg):
"""
Copy of the warning._setoption function but does not escape arguments.
"""
parts = arg.split(':')
if len(parts) > 5:
raise wmod._OptionError("too many fields (max 5): %r" % (arg,))
while len(parts) < 5:
parts.append('')
action, message, category, module, lineno = [s.strip()
for s in parts]
action = wmod._getaction(action)
category = wmod._getcategory(category)
if lineno:
try:
lineno = int(lineno)
if lineno < 0:
raise ValueError
except (ValueError, OverflowError):
raise wmod._OptionError("invalid lineno %r" % (lineno,))
else:
lineno = 0
wmod.filterwarnings(action, message, category, module, lineno)
def pytest_addoption(parser):
global _DISABLED
version = []
for part in pytest.__version__.split('.'):
try:
version.append(int(part))
except ValueError:
version.append(part)
if tuple(version)[:2] >= (3, 1):
_DISABLED = True
warnings.warn('pytest-warnings plugin was introduced in core pytest on 3.1, please '
'uninstall pytest-warnings')
return
group = parser.getgroup("pytest-warnings")
try:
group.addoption(
'-W', '--pythonwarnings', action='append',
help="set which warnings to report, see -W option of python itself.")
parser.addini("filterwarnings", type="linelist",
help="Each line specifies warning filter pattern which would be passed"
"to warnings.filterwarnings. Process after -W and --pythonwarnings.")
except ValueError:
pass
@pytest.hookimpl(hookwrapper=True)
def pytest_runtest_call(item):
if _DISABLED:
yield
return
from _pytest.recwarn import RecordedWarning, WarningsRecorder
wrec = WarningsRecorder()
def showwarning(message, category, filename, lineno, file=None, line=None):
frame = inspect.currentframe()
if '/_pytest/recwarn' in frame.f_back.f_code.co_filename:
# we are in test recorder, so this warning is already handled
return
wrec._list.append(RecordedWarning(
message, category, filename, lineno, file, line))
# still perform old showwarning functionality
wrec._showwarning(
message, category, filename, lineno, file=file, line=line)
args = item.config.getoption('pythonwarnings') or []
inifilters = item.config.getini("filterwarnings")
with wrec:
_showwarning = wrec._showwarning
warnings.showwarning = showwarning
wrec._module.simplefilter('once')
for arg in args:
wrec._module._setoption(arg)
for arg in inifilters:
_setoption(wrec._module, arg)
yield
wrec._showwarning = _showwarning
for warning in wrec.list:
msg = warnings.formatwarning(
warning.message, warning.category,
os.path.relpath(warning.filename), warning.lineno, warning.line)
fslocation = getattr(item, "location", None)
if fslocation is None:
fslocation = getattr(item, "fspath", None)
else:
fslocation = "%s:%s" % fslocation[:2]
fslocation = "in %s the following warning was recorded:\n" % fslocation
item.config.warn("W0", msg, fslocation=fslocation)
| mit |
PalisadoesFoundation/switchmap-ng | switchmap/test/test_mib_if.py | 2 | 15887 | #!/usr/bin/env python3
"""Test the mib_if module."""
import os
import sys
import binascii
import unittest
from mock import Mock
# Try to create a working PYTHONPATH
TEST_DIRECTORY = os.path.dirname(os.path.realpath(__file__))
SWITCHMAP_DIRECTORY = os.path.abspath(os.path.join(TEST_DIRECTORY, os.pardir))
ROOT_DIRECTORY = os.path.abspath(os.path.join(SWITCHMAP_DIRECTORY, os.pardir))
if TEST_DIRECTORY.endswith('/switchmap-ng/switchmap/test') is True:
sys.path.append(ROOT_DIRECTORY)
else:
print(
'This script is not installed in the "switchmap-ng/bin" directory. '
'Please fix.')
sys.exit(2)
from switchmap.snmp import mib_if as testimport
class Query(object):
"""Class for snmp_manager.Query mock.
A detailed tutorial about Python mocks can be found here:
http://www.drdobbs.com/testing/using-mocks-in-python/240168251
"""
def query(self):
"""Do an SNMP query."""
pass
def oid_exists(self):
"""Determine existence of OID on device."""
pass
def swalk(self):
"""Do a failsafe SNMPwalk."""
pass
def walk(self):
"""Do a failable SNMPwalk."""
pass
class KnownValues(unittest.TestCase):
"""Checks all functions and methods."""
#########################################################################
# General object setup
#########################################################################
# SNMPwalk results used by Mocks.
# Normalized walk returning integers
nwalk_results_integer = {
100: 1234,
200: 5678
}
# Set the stage for SNMPwalk for integer results
snmpobj_integer = Mock(spec=Query)
mock_spec_integer = {
'swalk.return_value': nwalk_results_integer,
'walk.return_value': nwalk_results_integer,
}
snmpobj_integer.configure_mock(**mock_spec_integer)
# Normalized walk returning integers for the ifIndex
nwalk_results_ifindex = {
100: 100,
200: 200
}
# Set the stage for SNMPwalk for integer results for the ifIndex
snmpobj_ifindex = Mock(spec=Query)
mock_spec_ifindex = {
'swalk.return_value': nwalk_results_ifindex,
'walk.return_value': nwalk_results_ifindex,
}
snmpobj_ifindex.configure_mock(**mock_spec_ifindex)
# Normalized walk returning strings
nwalk_results_bytes = {
100: b'1234',
200: b'5678'
}
# Set the stage for SNMPwalk for string results
snmpobj_bytes = Mock(spec=Query)
mock_spec_bytes = {
'swalk.return_value': nwalk_results_bytes,
'walk.return_value': nwalk_results_bytes,
}
snmpobj_bytes.configure_mock(**mock_spec_bytes)
# Normalized walk returning binary data
nwalk_results_binary = {
100: binascii.unhexlify('1234'),
200: binascii.unhexlify('5678')
}
# Set the stage for SNMPwalk for binary results
snmpobj_binary = Mock(spec=Query)
mock_spec_binary = {
'swalk.return_value': nwalk_results_binary,
'walk.return_value': nwalk_results_binary,
}
snmpobj_binary.configure_mock(**mock_spec_binary)
# Initializing key variables
expected_dict = {
100: {
'ifAlias': '1234',
'ifSpeed': 1234,
'ifOperStatus': 1234,
'ifAdminStatus': 1234,
'ifType': 1234,
'ifName': '1234',
'ifIndex': 100,
'ifPhysAddress': '1234',
'ifInOctets': 1234,
'ifOutOctets': 1234,
'ifInBroadcastPkts': 1234,
'ifOutBroadcastPkts': 1234,
'ifInMulticastPkts': 1234,
'ifOutMulticastPkts': 1234,
'ifLastChange': 1234,
'ifDescr': '1234'
},
200: {
'ifAlias': '5678',
'ifSpeed': 5678,
'ifOperStatus': 5678,
'ifAdminStatus': 5678,
'ifType': 5678,
'ifName': '5678',
'ifIndex': 200,
'ifPhysAddress': '5678',
'ifInOctets': 5678,
'ifOutOctets': 5678,
'ifInBroadcastPkts': 5678,
'ifOutBroadcastPkts': 5678,
'ifInMulticastPkts': 5678,
'ifOutMulticastPkts': 5678,
'ifLastChange': 5678,
'ifDescr': '5678'
}
}
def test_get_query(self):
"""Testing function get_query."""
pass
def test_init_query(self):
"""Testing function init_query."""
pass
def test___init__(self):
"""Testing function __init__."""
pass
def test_system(self):
"""Testing function system."""
pass
def test_layer1(self):
"""Testing function layer1."""
# Layer 1 testing only seems to work when all the methods return
# the same type of results (eg. int, string, hex)
pass
def test_iflastchange(self):
"""Testing function iflastchange."""
# Initialize key variables
oid_key = 'ifLastChange'
oid = '.1.3.6.1.2.1.2.2.1.9'
# Get results
testobj = testimport.init_query(self.snmpobj_integer)
results = testobj.iflastchange()
# Basic testing of results
for key, value in results.items():
self.assertEqual(isinstance(key, int), True)
self.assertEqual(value, self.expected_dict[key][oid_key])
# Test that we are getting the correct OID
results = testobj.iflastchange(oidonly=True)
self.assertEqual(results, oid)
def test_ifinoctets(self):
"""Testing function ifinoctets."""
# Initialize key variables
oid_key = 'ifInOctets'
oid = '.1.3.6.1.2.1.2.2.1.10'
# Get results
testobj = testimport.init_query(self.snmpobj_integer)
results = testobj.ifinoctets()
# Basic testing of results
for key, value in results.items():
self.assertEqual(isinstance(key, int), True)
self.assertEqual(value, self.expected_dict[key][oid_key])
# Test that we are getting the correct OID
results = testobj.ifinoctets(oidonly=True)
self.assertEqual(results, oid)
def test_ifoutoctets(self):
"""Testing function ifoutoctets."""
# Initialize key variables
oid_key = 'ifOutOctets'
oid = '.1.3.6.1.2.1.2.2.1.16'
# Get results
testobj = testimport.init_query(self.snmpobj_integer)
results = testobj.ifoutoctets()
# Basic testing of results
for key, value in results.items():
self.assertEqual(isinstance(key, int), True)
self.assertEqual(value, self.expected_dict[key][oid_key])
# Test that we are getting the correct OID
results = testobj.ifoutoctets(oidonly=True)
self.assertEqual(results, oid)
def test_ifdescr(self):
"""Testing function ifdescr."""
# Initialize key variables
oid_key = 'ifDescr'
oid = '.1.3.6.1.2.1.2.2.1.2'
# Get results
testobj = testimport.init_query(self.snmpobj_bytes)
results = testobj.ifdescr()
# Basic testing of results
for key, value in results.items():
self.assertEqual(isinstance(key, int), True)
self.assertEqual(value, self.expected_dict[key][oid_key])
# Test that we are getting the correct OID
results = testobj.ifdescr(oidonly=True)
self.assertEqual(results, oid)
def test_iftype(self):
"""Testing function iftype."""
# Initialize key variables
oid_key = 'ifType'
oid = '.1.3.6.1.2.1.2.2.1.3'
# Get results
testobj = testimport.init_query(self.snmpobj_integer)
results = testobj.iftype()
# Basic testing of results
for key, value in results.items():
self.assertEqual(isinstance(key, int), True)
self.assertEqual(value, self.expected_dict[key][oid_key])
# Test that we are getting the correct OID
results = testobj.iftype(oidonly=True)
self.assertEqual(results, oid)
def test_ifspeed(self):
"""Testing function ifspeed."""
# Initialize key variables
oid_key = 'ifSpeed'
oid = '.1.3.6.1.2.1.2.2.1.5'
# Get results
testobj = testimport.init_query(self.snmpobj_integer)
results = testobj.ifspeed()
# Basic testing of results
for key, value in results.items():
self.assertEqual(isinstance(key, int), True)
self.assertEqual(value, self.expected_dict[key][oid_key])
# Test that we are getting the correct OID
results = testobj.ifspeed(oidonly=True)
self.assertEqual(results, oid)
def test_ifadminstatus(self):
"""Testing function ifadminstatus."""
# Initialize key variables
oid_key = 'ifAdminStatus'
oid = '.1.3.6.1.2.1.2.2.1.7'
# Get results
testobj = testimport.init_query(self.snmpobj_integer)
results = testobj.ifadminstatus()
# Basic testing of results
for key, value in results.items():
self.assertEqual(isinstance(key, int), True)
self.assertEqual(value, self.expected_dict[key][oid_key])
# Test that we are getting the correct OID
results = testobj.ifadminstatus(oidonly=True)
self.assertEqual(results, oid)
def test_ifoperstatus(self):
"""Testing function ifoperstatus."""
# Initialize key variables
oid_key = 'ifOperStatus'
oid = '.1.3.6.1.2.1.2.2.1.8'
# Get results
testobj = testimport.init_query(self.snmpobj_integer)
results = testobj.ifoperstatus()
# Basic testing of results
for key, value in results.items():
self.assertEqual(isinstance(key, int), True)
self.assertEqual(value, self.expected_dict[key][oid_key])
# Test that we are getting the correct OID
results = testobj.ifoperstatus(oidonly=True)
self.assertEqual(results, oid)
def test_ifalias(self):
"""Testing function ifalias."""
# Initialize key variables
oid_key = 'ifAlias'
oid = '.1.3.6.1.2.1.31.1.1.1.18'
# Get results
testobj = testimport.init_query(self.snmpobj_bytes)
results = testobj.ifalias()
# Basic testing of results
for key, value in results.items():
self.assertEqual(isinstance(key, int), True)
self.assertEqual(value, self.expected_dict[key][oid_key])
# Test that we are getting the correct OID
results = testobj.ifalias(oidonly=True)
self.assertEqual(results, oid)
def test_ifname(self):
"""Testing function ifname."""
# Initialize key variables
oid_key = 'ifName'
oid = '.1.3.6.1.2.1.31.1.1.1.1'
# Get results
testobj = testimport.init_query(self.snmpobj_bytes)
results = testobj.ifname()
# Basic testing of results
for key, value in results.items():
self.assertEqual(isinstance(key, int), True)
self.assertEqual(value, self.expected_dict[key][oid_key])
# Test that we are getting the correct OID
results = testobj.ifname(oidonly=True)
self.assertEqual(results, oid)
def test_ifindex(self):
"""Testing function ifindex."""
# Initialize key variables
oid_key = 'ifIndex'
oid = '.1.3.6.1.2.1.2.2.1.1'
# Get results
testobj = testimport.init_query(self.snmpobj_ifindex)
results = testobj.ifindex()
# Basic testing of results
for key, value in results.items():
self.assertEqual(isinstance(key, int), True)
self.assertEqual(value, self.expected_dict[key][oid_key])
# The ifIndex value must match that of the key.
# We are keying off of the ifIndex so this must be true.
self.assertEqual(key, value)
# Test that we are getting the correct OID
results = testobj.ifindex(oidonly=True)
self.assertEqual(results, oid)
def test_ifphysaddress(self):
"""Testing function ifphysaddress."""
# Initialize key variables
oid_key = 'ifPhysAddress'
oid = '.1.3.6.1.2.1.2.2.1.6'
# Get results
testobj = testimport.init_query(self.snmpobj_binary)
results = testobj.ifphysaddress()
# Basic testing of results
for key, value in results.items():
self.assertEqual(isinstance(key, int), True)
self.assertEqual(value, self.expected_dict[key][oid_key])
# Test that we are getting the correct OID
results = testobj.ifphysaddress(oidonly=True)
self.assertEqual(results, oid)
def test_ifinmulticastpkts(self):
"""Testing function ifinmulticastpkts."""
# Initialize key variables
oid_key = 'ifInMulticastPkts'
oid = '.1.3.6.1.2.1.31.1.1.1.2'
# Get results
testobj = testimport.init_query(self.snmpobj_integer)
results = testobj.ifinmulticastpkts()
# Basic testing of results
for key, value in results.items():
self.assertEqual(isinstance(key, int), True)
self.assertEqual(value, self.expected_dict[key][oid_key])
# Test that we are getting the correct OID
results = testobj.ifinmulticastpkts(oidonly=True)
self.assertEqual(results, oid)
def test_ifoutmulticastpkts(self):
"""Testing function ifoutmulticastpkts."""
# Initialize key variables
oid_key = 'ifOutMulticastPkts'
oid = '.1.3.6.1.2.1.31.1.1.1.4'
# Get results
testobj = testimport.init_query(self.snmpobj_integer)
results = testobj.ifoutmulticastpkts()
# Basic testing of results
for key, value in results.items():
self.assertEqual(isinstance(key, int), True)
self.assertEqual(value, self.expected_dict[key][oid_key])
# Test that we are getting the correct OID
results = testobj.ifoutmulticastpkts(oidonly=True)
self.assertEqual(results, oid)
def test_ifinbroadcastpkts(self):
"""Testing function ifinbroadcastpkts."""
# Initialize key variables
oid_key = 'ifInBroadcastPkts'
oid = '.1.3.6.1.2.1.31.1.1.1.3'
# Get results
testobj = testimport.init_query(self.snmpobj_integer)
results = testobj.ifinbroadcastpkts()
# Basic testing of results
for key, value in results.items():
self.assertEqual(isinstance(key, int), True)
self.assertEqual(value, self.expected_dict[key][oid_key])
# Test that we are getting the correct OID
results = testobj.ifinbroadcastpkts(oidonly=True)
self.assertEqual(results, oid)
def test_ifoutbroadcastpkts(self):
"""Testing function ifoutbroadcastpkts."""
# Initialize key variables
oid_key = 'ifOutBroadcastPkts'
oid = '.1.3.6.1.2.1.31.1.1.1.5'
# Get results
testobj = testimport.init_query(self.snmpobj_integer)
results = testobj.ifoutbroadcastpkts()
# Basic testing of results
for key, value in results.items():
self.assertEqual(isinstance(key, int), True)
self.assertEqual(value, self.expected_dict[key][oid_key])
# Test that we are getting the correct OID
results = testobj.ifoutbroadcastpkts(oidonly=True)
self.assertEqual(results, oid)
def test_ifstackstatus(self):
"""Testing function ifstackstatus."""
pass
def test__get_data(self):
"""Testing function _get_data."""
# Tested by all other methods
pass
if __name__ == '__main__':
# Do the unit test
unittest.main()
| apache-2.0 |
SPACEDAC7/TrabajoFinalGrado | DynamicAnalyzer/tools/adb/mac/systrace/systrace.py | 148 | 10843 | #!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Android system-wide tracing utility.
This is a tool for capturing a trace that includes data from both userland and
the kernel. It creates an HTML file for visualizing the trace.
"""
import errno, optparse, os, re, select, subprocess, sys, time, zlib
flattened_css_file = 'style.css'
flattened_js_file = 'script.js'
class OptionParserIgnoreErrors(optparse.OptionParser):
def error(self, msg):
pass
def exit(self):
pass
def print_usage(self):
pass
def print_help(self):
pass
def print_version(self):
pass
def get_device_sdk_version():
getprop_args = ['adb', 'shell', 'getprop', 'ro.build.version.sdk']
parser = OptionParserIgnoreErrors()
parser.add_option('-e', '--serial', dest='device_serial', type='string')
options, args = parser.parse_args()
if options.device_serial is not None:
getprop_args[1:1] = ['-s', options.device_serial]
adb = subprocess.Popen(getprop_args, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = adb.communicate()
if adb.returncode != 0:
print >> sys.stderr, 'Error querying device SDK-version:'
print >> sys.stderr, err
sys.exit(1)
version = int(out)
return version
def add_adb_serial(command, serial):
if serial is not None:
command.insert(1, serial)
command.insert(1, '-s')
def main():
device_sdk_version = get_device_sdk_version()
if device_sdk_version < 18:
legacy_script = os.path.join(os.path.dirname(sys.argv[0]), 'systrace-legacy.py')
os.execv(legacy_script, sys.argv)
usage = "Usage: %prog [options] [category1 [category2 ...]]"
desc = "Example: %prog -b 32768 -t 15 gfx input view sched freq"
parser = optparse.OptionParser(usage=usage, description=desc)
parser.add_option('-o', dest='output_file', help='write HTML to FILE',
default='trace.html', metavar='FILE')
parser.add_option('-t', '--time', dest='trace_time', type='int',
help='trace for N seconds', metavar='N')
parser.add_option('-b', '--buf-size', dest='trace_buf_size', type='int',
help='use a trace buffer size of N KB', metavar='N')
parser.add_option('-k', '--ktrace', dest='kfuncs', action='store',
help='specify a comma-separated list of kernel functions to trace')
parser.add_option('-l', '--list-categories', dest='list_categories', default=False,
action='store_true', help='list the available categories and exit')
parser.add_option('-a', '--app', dest='app_name', default=None, type='string',
action='store', help='enable application-level tracing for comma-separated ' +
'list of app cmdlines')
parser.add_option('--no-fix-threads', dest='fix_threads', default=True,
action='store_false', help='don\'t fix missing or truncated thread names')
parser.add_option('--link-assets', dest='link_assets', default=False,
action='store_true', help='link to original CSS or JS resources '
'instead of embedding them')
parser.add_option('--from-file', dest='from_file', action='store',
help='read the trace from a file (compressed) rather than running a live trace')
parser.add_option('--asset-dir', dest='asset_dir', default='trace-viewer',
type='string', help='')
parser.add_option('-e', '--serial', dest='device_serial', type='string',
help='adb device serial number')
options, args = parser.parse_args()
if options.list_categories:
atrace_args = ['adb', 'shell', 'atrace', '--list_categories']
expect_trace = False
elif options.from_file is not None:
atrace_args = ['cat', options.from_file]
expect_trace = True
else:
atrace_args = ['adb', 'shell', 'atrace', '-z']
expect_trace = True
if options.trace_time is not None:
if options.trace_time > 0:
atrace_args.extend(['-t', str(options.trace_time)])
else:
parser.error('the trace time must be a positive number')
if options.trace_buf_size is not None:
if options.trace_buf_size > 0:
atrace_args.extend(['-b', str(options.trace_buf_size)])
else:
parser.error('the trace buffer size must be a positive number')
if options.app_name is not None:
atrace_args.extend(['-a', options.app_name])
if options.kfuncs is not None:
atrace_args.extend(['-k', options.kfuncs])
atrace_args.extend(args)
if options.fix_threads:
atrace_args.extend([';', 'ps', '-t'])
if atrace_args[0] == 'adb':
add_adb_serial(atrace_args, options.device_serial)
script_dir = os.path.dirname(os.path.abspath(sys.argv[0]))
if options.link_assets:
src_dir = os.path.join(script_dir, options.asset_dir, 'src')
build_dir = os.path.join(script_dir, options.asset_dir, 'build')
js_files, js_flattenizer, css_files, templates = get_assets(src_dir, build_dir)
css = '\n'.join(linked_css_tag % (os.path.join(src_dir, f)) for f in css_files)
js = '<script language="javascript">\n%s</script>\n' % js_flattenizer
js += '\n'.join(linked_js_tag % (os.path.join(src_dir, f)) for f in js_files)
else:
css_filename = os.path.join(script_dir, flattened_css_file)
js_filename = os.path.join(script_dir, flattened_js_file)
css = compiled_css_tag % (open(css_filename).read())
js = compiled_js_tag % (open(js_filename).read())
templates = ''
html_filename = options.output_file
adb = subprocess.Popen(atrace_args, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
result = None
data = []
# Read the text portion of the output and watch for the 'TRACE:' marker that
# indicates the start of the trace data.
while result is None:
ready = select.select([adb.stdout, adb.stderr], [], [adb.stdout, adb.stderr])
if adb.stderr in ready[0]:
err = os.read(adb.stderr.fileno(), 4096)
sys.stderr.write(err)
sys.stderr.flush()
if adb.stdout in ready[0]:
out = os.read(adb.stdout.fileno(), 4096)
parts = out.split('\nTRACE:', 1)
txt = parts[0].replace('\r', '')
if len(parts) == 2:
# The '\nTRACE:' match stole the last newline from the text, so add it
# back here.
txt += '\n'
sys.stdout.write(txt)
sys.stdout.flush()
if len(parts) == 2:
data.append(parts[1])
sys.stdout.write("downloading trace...")
sys.stdout.flush()
break
result = adb.poll()
# Read and buffer the data portion of the output.
while True:
ready = select.select([adb.stdout, adb.stderr], [], [adb.stdout, adb.stderr])
keepReading = False
if adb.stderr in ready[0]:
err = os.read(adb.stderr.fileno(), 4096)
if len(err) > 0:
keepReading = True
sys.stderr.write(err)
sys.stderr.flush()
if adb.stdout in ready[0]:
out = os.read(adb.stdout.fileno(), 4096)
if len(out) > 0:
keepReading = True
data.append(out)
if result is not None and not keepReading:
break
result = adb.poll()
if result == 0:
if expect_trace:
data = ''.join(data)
# Collapse CRLFs that are added by adb shell.
if data.startswith('\r\n'):
data = data.replace('\r\n', '\n')
# Skip the initial newline.
data = data[1:]
if not data:
print >> sys.stderr, ('No data was captured. Output file was not ' +
'written.')
sys.exit(1)
else:
# Indicate to the user that the data download is complete.
print " done\n"
# Extract the thread list dumped by ps.
threads = {}
if options.fix_threads:
parts = data.split('USER PID PPID VSIZE RSS WCHAN PC NAME', 1)
if len(parts) == 2:
data = parts[0]
for line in parts[1].splitlines():
cols = line.split(None, 8)
if len(cols) == 9:
tid = int(cols[1])
name = cols[8]
threads[tid] = name
# Decompress and preprocess the data.
out = zlib.decompress(data)
if options.fix_threads:
def repl(m):
tid = int(m.group(2))
if tid > 0:
name = threads.get(tid)
if name is None:
name = m.group(1)
if name == '<...>':
name = '<' + str(tid) + '>'
threads[tid] = name
return name + '-' + m.group(2)
else:
return m.group(0)
out = re.sub(r'^\s*(\S+)-(\d+)', repl, out, flags=re.MULTILINE)
html_prefix = read_asset(script_dir, 'prefix.html')
html_suffix = read_asset(script_dir, 'suffix.html')
html_file = open(html_filename, 'w')
html_file.write(html_prefix % (css, js, templates))
html_out = out.replace('\n', '\\n\\\n')
html_file.write(html_out)
html_file.write(html_suffix)
html_file.close()
print "\n wrote file://%s\n" % os.path.abspath(options.output_file)
else: # i.e. result != 0
print >> sys.stderr, 'adb returned error code %d' % result
sys.exit(1)
def read_asset(src_dir, filename):
return open(os.path.join(src_dir, filename)).read()
def get_assets(src_dir, build_dir):
sys.path.append(build_dir)
gen = __import__('generate_standalone_timeline_view', {}, {})
parse_deps = __import__('parse_deps', {}, {})
gen_templates = __import__('generate_template_contents', {}, {})
filenames = gen._get_input_filenames()
load_sequence = parse_deps.calc_load_sequence(filenames, src_dir)
js_files = []
js_flattenizer = "window.FLATTENED = {};\n"
js_flattenizer += "window.FLATTENED_RAW_SCRIPTS = {};\n"
css_files = []
for module in load_sequence:
js_files.append(os.path.relpath(module.filename, src_dir))
js_flattenizer += "window.FLATTENED['%s'] = true;\n" % module.name
for dependent_raw_script_name in module.dependent_raw_script_names:
js_flattenizer += (
"window.FLATTENED_RAW_SCRIPTS['%s'] = true;\n" %
dependent_raw_script_name)
for style_sheet in module.style_sheets:
css_files.append(os.path.relpath(style_sheet.filename, src_dir))
templates = gen_templates.generate_templates()
sys.path.pop()
return (js_files, js_flattenizer, css_files, templates)
compiled_css_tag = """<style type="text/css">%s</style>"""
compiled_js_tag = """<script language="javascript">%s</script>"""
linked_css_tag = """<link rel="stylesheet" href="%s"></link>"""
linked_js_tag = """<script language="javascript" src="%s"></script>"""
if __name__ == '__main__':
main()
| gpl-3.0 |
collects/VTK | Imaging/Core/Testing/Python/TestHSVToRGB.py | 20 | 2405 | #!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# Use the painter to draw using colors.
# This is not a pipeline object. It will support pipeline objects.
# Please do not use this object directly.
imageCanvas = vtk.vtkImageCanvasSource2D()
imageCanvas.SetNumberOfScalarComponents(3)
imageCanvas.SetScalarTypeToUnsignedChar()
imageCanvas.SetExtent(0,320,0,320,0,0)
imageCanvas.SetDrawColor(0,0,0)
imageCanvas.FillBox(0,511,0,511)
# r, g, b
imageCanvas.SetDrawColor(255,0,0)
imageCanvas.FillBox(0,50,0,100)
imageCanvas.SetDrawColor(128,128,0)
imageCanvas.FillBox(50,100,0,100)
imageCanvas.SetDrawColor(0,255,0)
imageCanvas.FillBox(100,150,0,100)
imageCanvas.SetDrawColor(0,128,128)
imageCanvas.FillBox(150,200,0,100)
imageCanvas.SetDrawColor(0,0,255)
imageCanvas.FillBox(200,250,0,100)
imageCanvas.SetDrawColor(128,0,128)
imageCanvas.FillBox(250,300,0,100)
# intensity scale
imageCanvas.SetDrawColor(5,5,5)
imageCanvas.FillBox(0,50,110,210)
imageCanvas.SetDrawColor(55,55,55)
imageCanvas.FillBox(50,100,110,210)
imageCanvas.SetDrawColor(105,105,105)
imageCanvas.FillBox(100,150,110,210)
imageCanvas.SetDrawColor(155,155,155)
imageCanvas.FillBox(150,200,110,210)
imageCanvas.SetDrawColor(205,205,205)
imageCanvas.FillBox(200,250,110,210)
imageCanvas.SetDrawColor(255,255,255)
imageCanvas.FillBox(250,300,110,210)
# saturation scale
imageCanvas.SetDrawColor(245,0,0)
imageCanvas.FillBox(0,50,220,320)
imageCanvas.SetDrawColor(213,16,16)
imageCanvas.FillBox(50,100,220,320)
imageCanvas.SetDrawColor(181,32,32)
imageCanvas.FillBox(100,150,220,320)
imageCanvas.SetDrawColor(149,48,48)
imageCanvas.FillBox(150,200,220,320)
imageCanvas.SetDrawColor(117,64,64)
imageCanvas.FillBox(200,250,220,320)
imageCanvas.SetDrawColor(85,80,80)
imageCanvas.FillBox(250,300,220,320)
convert = vtk.vtkImageRGBToHSV()
convert.SetInputConnection(imageCanvas.GetOutputPort())
convertBack = vtk.vtkImageHSVToRGB()
convertBack.SetInputConnection(convert.GetOutputPort())
cast = vtk.vtkImageCast()
cast.SetInputConnection(convertBack.GetOutputPort())
cast.SetOutputScalarTypeToFloat()
cast.ReleaseDataFlagOff()
viewer = vtk.vtkImageViewer()
viewer.SetInputConnection(convertBack.GetOutputPort())
#viewer SetInputConnection [imageCanvas GetOutputPort]
viewer.SetColorWindow(256)
viewer.SetColorLevel(127.5)
viewer.Render()
# --- end of script --
| bsd-3-clause |
prheenan/prhUtil | igor/scripts/SqlAutoGen/sqltableread.py | 1 | 6863 | # force floating point division. Can still use integer with //
from __future__ import division
# This file is used for importing the common utilities classes.
import numpy as np
import matplotlib.pyplot as plt
# need to add the utilities class. Want 'home' to be platform independent
from os.path import expanduser
home = expanduser("~")
# get the utilties directory (assume it lives in ~/utilities/python)
# but simple to change
path= home +"/utilities/python"
import sys
sys.path.append(path)
# import the patrick-specific utilities
import GenUtilities as pGenUtil
import PlotUtilities as pPlotUtil
import CheckpointUtilities as pCheckUtil
import mysql.connector as sqlcon
from IgorConvert import IgorConvert,fileContent
from SqlDefine import SqlDefine
class tableInfo:
def __init__(self):
self._data = []
self._keyName = "fieldname"
self._keyType = "type"
self._keyTable= "table"
self._allFields = []
pass
def add(self,mType,mName,mTable):
self._data.append( {self._keyType: mType,
self._keyName : mName,
self._keyTable :mTable})
def generateTableDict(self):
# first, determine the set of tables
tableSet = set( [ ele[self._keyTable] for ele in self._data] )
# make a dictionary for each element belonging to the tables
toRet = dict()
for tableName in tableSet:
toRet[tableName] = []
for row in self._data:
if (row[self._keyTable] == tableName):
# this row belongs to the table
fieldName = row[self._keyName]
toRet[tableName].append({self._keyType :row[self._keyType],
self._keyName :fieldName})
# add every field at first, set-ify later
self._allFields.append(fieldName)
# POST: looked through all the rows
self._mDict = toRet
def getDBString(self,strBetweenTables="\n",
funcTableToConst=IgorConvert.getTableConst,
funcTableToStruct=IgorConvert.getStructs,
funcTableToInsert=IgorConvert.getInsertFuncs,
funcColNames=IgorConvert.getColNameWaveFunc,
funcFieldConstants=IgorConvert.getFieldConstants,
funcInitStructs=IgorConvert.InitStructs,
funcAllTables=IgorConvert.getAllTables,
funcConversions=IgorConvert.getConverts,
funcSelect=IgorConvert.getSelectFuncs,
funcHandlers=IgorConvert.getHandlers,
funcId=IgorConvert.getTableIdMethods,
funcHandleGlobal=IgorConvert.getHandleGlobal):
# must have called
tableDict = self._mDict
allFields = self._allFields
structString = ""
constString = ""
insertString = ""
colNameString = ""
fieldNameString =""
initStructString = ""
selectString = ""
convertString = ""
# add each element of the tables
mKeys = sorted(tableDict.keys())
mTableString = funcAllTables(tableDict)
fieldNameString += funcFieldConstants(allFields)
idTableString = funcId(tableDict)
handlerString = funcHandleGlobal(tableDict)
for table in mKeys:
mTableField = tableDict[table]
# look through each element, corresponding to a separate field.
namesTmp,typeTmp = SqlDefine.getNameType(mTableField)
# POST: all elements accounted for...
# XXX make more efficient, store this way?
constString += funcTableToConst(table)
colNameString += funcColNames(table,typeTmp,namesTmp) + \
strBetweenTables
structString += funcTableToStruct(table,typeTmp,namesTmp) + \
strBetweenTables
insertString += funcTableToInsert(table,typeTmp,namesTmp) + \
strBetweenTables
initStructString += funcInitStructs(table,typeTmp,namesTmp) + \
strBetweenTables
selectString += funcSelect(table,typeTmp,namesTmp) + \
strBetweenTables
convertString += funcConversions(table,typeTmp,namesTmp) + \
strBetweenTables
handlerString += funcHandlers(table,typeTmp,namesTmp) + \
strBetweenTables
globalDef = (
"// Defined table names\n{:s}\n"+\
"// Defined table field names\n{:s}\n"+\
"// All Table function\n{:s}\n"+\
"// Defined structures\n{:s}\n"+\
"// Defined id structure\n{:s}\n"
).format(constString,fieldNameString,mTableString,structString,
idTableString)
globalFunc = (
"// Defined insert functions\n{:s}\n"+\
"// Initialization for structures\n{:s}\n"
"// Conversion functions\n{:s}\n" + \
"// Select functions\n{:s}\n"
).format(insertString,initStructString,
convertString,selectString)
globalHandle = ("//Defined Handlers\n{:s}\n").format(handlerString)
utilFuncs = ("//Column names and types\n{:s}\n").format(colNameString)
toRet = fileContent(globalDef,globalFunc,globalHandle,utilFuncs)
return toRet
class connInf:
def __init__(self,user="root",pwd="",host="127.0.0.1",database="CypherAFM"):
self._user = user
self._pwd = pwd
self._host = host
self._database = database
def connect(self):
self._cnx = sqlcon.connect(user=self._user,password=self._pwd,
host = self._host,
database = self._database,
raise_on_warnings=True)
print(self._cnx)
self._cur = self._cnx.cursor()
def safeExec(self,mStr):
try:
self._cur.execute(mStr)
except mysql.connector.Error as err:
print(mStr)
print("Execution failed: {}".format(err))
exit(-1)
def getAllTableInfo(self):
mStr = ("SELECT DATA_TYPE,COLUMN_NAME,TABLE_NAME FROM "+\
"INFORMATION_SCHEMA.COLUMNS WHERE TABLE_SCHEMA='{:s}'").\
format(self._database)
idxType = 0
idxName = 1
idxTable = 2
self.safeExec(mStr)
# make a 'tableinfo' struct to save everything as.
toRet = tableInfo()
for r in self._cur:
toRet.add(r[idxType],r[idxName],r[idxTable])
return toRet
def close(self):
self._cur.close()
self._cnx.close()
| gpl-2.0 |
gcd0318/django | tests/utils_tests/test_duration.py | 364 | 1677 | import datetime
import unittest
from django.utils.dateparse import parse_duration
from django.utils.duration import duration_string
class TestDurationString(unittest.TestCase):
def test_simple(self):
duration = datetime.timedelta(hours=1, minutes=3, seconds=5)
self.assertEqual(duration_string(duration), '01:03:05')
def test_days(self):
duration = datetime.timedelta(days=1, hours=1, minutes=3, seconds=5)
self.assertEqual(duration_string(duration), '1 01:03:05')
def test_microseconds(self):
duration = datetime.timedelta(hours=1, minutes=3, seconds=5, microseconds=12345)
self.assertEqual(duration_string(duration), '01:03:05.012345')
def test_negative(self):
duration = datetime.timedelta(days=-1, hours=1, minutes=3, seconds=5)
self.assertEqual(duration_string(duration), '-1 01:03:05')
class TestParseDurationRoundtrip(unittest.TestCase):
def test_simple(self):
duration = datetime.timedelta(hours=1, minutes=3, seconds=5)
self.assertEqual(parse_duration(duration_string(duration)), duration)
def test_days(self):
duration = datetime.timedelta(days=1, hours=1, minutes=3, seconds=5)
self.assertEqual(parse_duration(duration_string(duration)), duration)
def test_microseconds(self):
duration = datetime.timedelta(hours=1, minutes=3, seconds=5, microseconds=12345)
self.assertEqual(parse_duration(duration_string(duration)), duration)
def test_negative(self):
duration = datetime.timedelta(days=-1, hours=1, minutes=3, seconds=5)
self.assertEqual(parse_duration(duration_string(duration)), duration)
| bsd-3-clause |
moijes12/oh-mainline | vendor/packages/scrapy/scrapyd/app.py | 16 | 1290 | from twisted.application.service import Application
from twisted.application.internet import TimerService, TCPServer
from twisted.web import server
from twisted.python import log
from .interfaces import IEggStorage, IPoller, ISpiderScheduler, IEnvironment
from .launcher import Launcher
from .eggstorage import FilesystemEggStorage
from .scheduler import SpiderScheduler
from .poller import QueuePoller
from .environ import Environment
from .website import Root
from .config import Config
def application(config):
app = Application("Scrapyd")
http_port = config.getint('http_port', 6800)
poller = QueuePoller(config)
eggstorage = FilesystemEggStorage(config)
scheduler = SpiderScheduler(config)
environment = Environment(config)
app.setComponent(IPoller, poller)
app.setComponent(IEggStorage, eggstorage)
app.setComponent(ISpiderScheduler, scheduler)
app.setComponent(IEnvironment, environment)
launcher = Launcher(config, app)
timer = TimerService(5, poller.poll)
webservice = TCPServer(http_port, server.Site(Root(config, app)))
log.msg("Scrapyd web console available at http://localhost:%s/" % http_port)
launcher.setServiceParent(app)
timer.setServiceParent(app)
webservice.setServiceParent(app)
return app
| agpl-3.0 |
appleseedhq/gaffer | python/GafferSceneUITest/TranslateToolTest.py | 1 | 29298 | ##########################################################################
#
# Copyright (c) 2016, John Haddon. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import inspect
import math
import os
import imath
import IECore
import Gaffer
import GafferTest
import GafferUITest
import GafferScene
import GafferSceneUI
class TranslateToolTest( GafferUITest.TestCase ) :
def testSelection( self ) :
script = Gaffer.ScriptNode()
script["plane"] = GafferScene.Plane()
script["group"] = GafferScene.Group()
script["group"]["in"][0].setInput( script["plane"]["out"] )
script["transformFilter"] = GafferScene.PathFilter()
script["transform"] = GafferScene.Transform()
script["transform"]["in"].setInput( script["group"]["out"] )
script["transform"]["filter"].setInput( script["transformFilter"]["out"] )
view = GafferSceneUI.SceneView()
view["in"].setInput( script["transform"]["out"] )
tool = GafferSceneUI.TranslateTool( view )
tool["active"].setValue( True )
self.assertEqual( len( tool.selection() ), 0 )
GafferSceneUI.ContextAlgo.setSelectedPaths( view.getContext(), IECore.PathMatcher( [ "/group/plane" ] ) )
self.assertEqual( len( tool.selection() ), 1 )
self.assertEqual( tool.selection()[0].path, "/group/plane" )
self.assertEqual( tool.selection()[0].context, view.getContext() )
self.assertTrue( tool.selection()[0].upstreamScene.isSame( script["plane"]["out"] ) )
self.assertEqual( tool.selection()[0].upstreamPath, "/plane" )
self.assertTrue( tool.selection()[0].transformPlug.isSame( script["plane"]["transform"] ) )
self.assertEqual( tool.selection()[0].transformSpace, imath.M44f() )
GafferSceneUI.ContextAlgo.setSelectedPaths( view.getContext(), IECore.PathMatcher( [ "/group" ] ) )
self.assertEqual( tool.selection()[0].path, "/group" )
self.assertEqual( tool.selection()[0].context, view.getContext() )
self.assertTrue( tool.selection()[0].upstreamScene.isSame( script["group"]["out"] ) )
self.assertEqual( tool.selection()[0].upstreamPath, "/group" )
self.assertTrue( tool.selection()[0].transformPlug.isSame( script["group"]["transform"] ) )
self.assertEqual( tool.selection()[0].transformSpace, imath.M44f() )
script["transformFilter"]["paths"].setValue( IECore.StringVectorData( [ "/group" ] ) )
self.assertTrue( tool.selection()[0].transformPlug.isSame( script["transform"]["transform"] ) )
script["transformFilter"]["enabled"].setValue( False )
self.assertTrue( tool.selection()[0].transformPlug.isSame( script["group"]["transform"] ) )
script["transformFilter"]["enabled"].setValue( True )
self.assertEqual( tool.selection()[0].path, "/group" )
self.assertEqual( tool.selection()[0].context, view.getContext() )
self.assertTrue( tool.selection()[0].upstreamScene.isSame( script["transform"]["out"] ) )
self.assertEqual( tool.selection()[0].upstreamPath, "/group" )
self.assertTrue( tool.selection()[0].transformPlug.isSame( script["transform"]["transform"] ) )
self.assertEqual( tool.selection()[0].transformSpace, imath.M44f() )
script["transform"]["enabled"].setValue( False )
self.assertTrue( tool.selection()[0].transformPlug.isSame( script["group"]["transform"] ) )
def testTranslate( self ) :
script = Gaffer.ScriptNode()
script["plane"] = GafferScene.Plane()
view = GafferSceneUI.SceneView()
view["in"].setInput( script["plane"]["out"] )
GafferSceneUI.ContextAlgo.setSelectedPaths( view.getContext(), IECore.PathMatcher( [ "/plane" ] ) )
tool = GafferSceneUI.TranslateTool( view )
tool["active"].setValue( True )
tool.translate( imath.V3f( 1, 0, 0 ) )
self.assertEqual(
script["plane"]["out"].fullTransform( "/plane" ).translation(),
imath.V3f( 1, 0, 0 ),
)
def testInteractionWithRotation( self ) :
script = Gaffer.ScriptNode()
script["plane"] = GafferScene.Plane()
view = GafferSceneUI.SceneView()
view["in"].setInput( script["plane"]["out"] )
GafferSceneUI.ContextAlgo.setSelectedPaths( view.getContext(), IECore.PathMatcher( [ "/plane" ] ) )
tool = GafferSceneUI.TranslateTool( view )
tool["active"].setValue( True )
tool["orientation"].setValue( tool.Orientation.Local )
with Gaffer.UndoScope( script ) :
tool.translate( imath.V3f( 1, 0, 0 ) )
self.assertTrue(
imath.V3f( 1, 0, 0 ).equalWithAbsError(
script["plane"]["out"].fullTransform( "/plane" ).translation(),
0.0000001
)
)
script.undo()
script["plane"]["transform"]["rotate"]["y"].setValue( 90 )
with Gaffer.UndoScope( script ) :
tool.translate( imath.V3f( 1, 0, 0 ) )
self.assertTrue(
imath.V3f( 0, 0, -1 ).equalWithAbsError(
script["plane"]["out"].fullTransform( "/plane" ).translation(),
0.0000001
)
)
script.undo()
def testInteractionWithGroupRotation( self ) :
script = Gaffer.ScriptNode()
script["plane"] = GafferScene.Plane()
script["group"] = GafferScene.Group()
script["group"]["in"][0].setInput( script["plane"]["out"] )
script["group"]["transform"]["rotate"]["y"].setValue( 90 )
view = GafferSceneUI.SceneView()
view["in"].setInput( script["group"]["out"] )
GafferSceneUI.ContextAlgo.setSelectedPaths( view.getContext(), IECore.PathMatcher( [ "/group/plane" ] ) )
tool = GafferSceneUI.TranslateTool( view )
tool["active"].setValue( True )
tool.translate( imath.V3f( 1, 0, 0 ) )
self.assertTrue(
imath.V3f( 0, 0, -1 ).equalWithAbsError(
script["group"]["out"].fullTransform( "/group/plane" ).translation(),
0.0000001
)
)
def testInteractionWithGroupTranslation( self ) :
script = Gaffer.ScriptNode()
script["plane"] = GafferScene.Plane()
script["group"] = GafferScene.Group()
script["group"]["in"][0].setInput( script["plane"]["out"] )
script["group"]["transform"]["translate"].setValue( imath.V3f( 1, 2, 3 ) )
view = GafferSceneUI.SceneView()
view["in"].setInput( script["group"]["out"] )
GafferSceneUI.ContextAlgo.setSelectedPaths( view.getContext(), IECore.PathMatcher( [ "/group/plane" ] ) )
tool = GafferSceneUI.TranslateTool( view )
tool["active"].setValue( True )
tool.translate( imath.V3f( -1, 0, 0 ) )
self.assertEqual(
script["group"]["out"].fullTransform( "/group/plane" ).translation(),
imath.V3f( 0, 2, 3 ),
)
def testOrientation( self ) :
script = Gaffer.ScriptNode()
script["plane"] = GafferScene.Plane()
script["plane"]["transform"]["rotate"]["y"].setValue( 90 )
script["group"] = GafferScene.Group()
script["group"]["in"][0].setInput( script["plane"]["out"] )
script["group"]["transform"]["rotate"]["y"].setValue( 90 )
view = GafferSceneUI.SceneView()
view["in"].setInput( script["group"]["out"] )
GafferSceneUI.ContextAlgo.setSelectedPaths( view.getContext(), IECore.PathMatcher( [ "/group/plane" ] ) )
tool = GafferSceneUI.TranslateTool( view )
tool["active"].setValue( True )
# Local
tool["orientation"].setValue( tool.Orientation.Local )
with Gaffer.UndoScope( script ) :
tool.translate( imath.V3f( 1, 0, 0 ) )
self.assertTrue(
imath.V3f( -1, 0, 0 ).equalWithAbsError(
script["group"]["out"].fullTransform( "/group/plane" ).translation(),
0.000001
)
)
script.undo()
# Parent
tool["orientation"].setValue( tool.Orientation.Parent )
with Gaffer.UndoScope( script ) :
tool.translate( imath.V3f( 1, 0, 0 ) )
self.assertTrue(
imath.V3f( 0, 0, -1 ).equalWithAbsError(
script["group"]["out"].fullTransform( "/group/plane" ).translation(),
0.0000001
)
)
script.undo()
# World
tool["orientation"].setValue( tool.Orientation.World )
with Gaffer.UndoScope( script ) :
tool.translate( imath.V3f( 1, 0, 0 ) )
self.assertTrue(
imath.V3f( 1, 0, 0 ).equalWithAbsError(
script["group"]["out"].fullTransform( "/group/plane" ).translation(),
0.0000001
)
)
def testScale( self ) :
script = Gaffer.ScriptNode()
script["plane"] = GafferScene.Plane()
script["plane"]["transform"]["scale"].setValue( imath.V3f( 10 ) )
view = GafferSceneUI.SceneView()
view["in"].setInput( script["plane"]["out"] )
GafferSceneUI.ContextAlgo.setSelectedPaths( view.getContext(), IECore.PathMatcher( [ "/plane" ] ) )
tool = GafferSceneUI.TranslateTool( view )
tool["active"].setValue( True )
with Gaffer.UndoScope( script ) :
tool.translate( imath.V3f( 1, 0, 0 ) )
self.assertTrue(
imath.V3f( 1, 0, 0 ).equalWithAbsError(
script["plane"]["out"].fullTransform( "/plane" ).translation(),
0.0000001
)
)
script.undo()
tool["orientation"].setValue( tool.Orientation.Local )
with Gaffer.UndoScope( script ) :
tool.translate( imath.V3f( 1, 0, 0 ) )
self.assertTrue(
imath.V3f( 1, 0, 0 ).equalWithAbsError(
script["plane"]["out"].fullTransform( "/plane" ).translation(),
0.0000001
)
)
def testGroup( self ) :
script = Gaffer.ScriptNode()
script["group"] = GafferScene.Group()
view = GafferSceneUI.SceneView()
view["in"].setInput( script["group"]["out"] )
GafferSceneUI.ContextAlgo.setSelectedPaths( view.getContext(), IECore.PathMatcher( [ "/group" ] ) )
tool = GafferSceneUI.TranslateTool( view )
tool["active"].setValue( True )
tool.translate( imath.V3f( 1, 0, 0 ) )
self.assertEqual(
script["group"]["out"].fullTransform( "/group" ).translation(),
imath.V3f( 1, 0, 0 ),
)
def testTransform( self ) :
script = Gaffer.ScriptNode()
script["plane"] = GafferScene.Plane()
script["plane"]["transform"]["rotate"]["y"].setValue( 90 )
script["transformFilter"] = GafferScene.PathFilter()
script["transformFilter"]["paths"].setValue( IECore.StringVectorData( [ "/plane" ] ) )
script["transform"] = GafferScene.Transform()
script["transform"]["in"].setInput( script["plane"]["out"] )
script["transform"]["filter"].setInput( script["transformFilter"]["out"] )
view = GafferSceneUI.SceneView()
view["in"].setInput( script["transform"]["out"] )
GafferSceneUI.ContextAlgo.setSelectedPaths( view.getContext(), IECore.PathMatcher( [ "/plane" ] ) )
tool = GafferSceneUI.TranslateTool( view )
tool["active"].setValue( True )
tool["orientation"].setValue( tool.Orientation.Local )
tool.translate( imath.V3f( 1, 0, 0 ) )
self.assertTrue(
imath.V3f( 0, 0, -1 ).equalWithAbsError(
script["transform"]["out"].fullTransform( "/plane" ).translation(),
0.0000001
)
)
def testTransformWithRotation( self ) :
script = Gaffer.ScriptNode()
script["plane"] = GafferScene.Plane()
script["transformFilter"] = GafferScene.PathFilter()
script["transformFilter"]["paths"].setValue( IECore.StringVectorData( [ "/plane" ] ) )
script["transform"] = GafferScene.Transform()
script["transform"]["in"].setInput( script["plane"]["out"] )
script["transform"]["filter"].setInput( script["transformFilter"]["out"] )
script["transform"]["transform"]["rotate"]["y"].setValue( 90 )
view = GafferSceneUI.SceneView()
view["in"].setInput( script["transform"]["out"] )
GafferSceneUI.ContextAlgo.setSelectedPaths( view.getContext(), IECore.PathMatcher( [ "/plane" ] ) )
tool = GafferSceneUI.TranslateTool( view )
tool["active"].setValue( True )
tool["orientation"].setValue( tool.Orientation.Local )
tool.translate( imath.V3f( 1, 0, 0 ) )
self.assertTrue(
imath.V3f( 0, 0, -1 ).equalWithAbsError(
script["transform"]["out"].fullTransform( "/plane" ).translation(),
0.0000001
)
)
def testHandlesTransform( self ) :
script = Gaffer.ScriptNode()
script["plane"] = GafferScene.Plane()
script["plane"]["transform"]["rotate"]["y"].setValue( 90 )
view = GafferSceneUI.SceneView()
view["in"].setInput( script["plane"]["out"] )
GafferSceneUI.ContextAlgo.setSelectedPaths( view.getContext(), IECore.PathMatcher( [ "/plane" ] ) )
tool = GafferSceneUI.TranslateTool( view )
tool["active"].setValue( True )
tool["orientation"].setValue( tool.Orientation.Local )
self.assertTrue(
tool.handlesTransform().equalWithAbsError(
imath.M44f().rotate( imath.V3f( 0, math.pi / 2, 0 ) ),
0.000001
)
)
tool["orientation"].setValue( tool.Orientation.Parent )
self.assertEqual(
tool.handlesTransform(), imath.M44f()
)
tool["orientation"].setValue( tool.Orientation.World )
self.assertEqual(
tool.handlesTransform(), imath.M44f()
)
def testContext( self ) :
script = Gaffer.ScriptNode()
script["variables"].addChild( Gaffer.NameValuePlug( "enabled", True ) )
script["variables"].addChild( Gaffer.NameValuePlug( "x", 1.0 ) )
script["plane"] = GafferScene.Plane()
script["expression"] = Gaffer.Expression()
script["expression"].setExpression( inspect.cleandoc(
"""
parent["plane"]["transform"]["translate"]["x"] = context["x"]
parent["plane"]["enabled"] = context["enabled"]
"""
) )
view = GafferSceneUI.SceneView()
view["in"].setInput( script["plane"]["out"] )
view.setContext( script.context() )
GafferSceneUI.ContextAlgo.setSelectedPaths( view.getContext(), IECore.PathMatcher( [ "/plane" ] ) )
tool = GafferSceneUI.TranslateTool( view )
tool["active"].setValue( True )
self.assertEqual( tool.selection()[0].path, "/plane" )
self.assertEqual( tool.selection()[0].transformSpace, imath.M44f() )
def testPivotExpression( self ) :
script = Gaffer.ScriptNode()
script["plane"] = GafferScene.Plane()
script["expression"] = Gaffer.Expression()
script["expression"].setExpression( inspect.cleandoc(
"""
parent["plane"]["transform"]["pivot"]["x"] = context["x"]
"""
) )
script["variables"] = Gaffer.ContextVariables()
script["variables"].setup( GafferScene.ScenePlug() )
script["variables"]["in"].setInput( script["plane"]["out"] )
script["variables"]["variables"].addChild( Gaffer.NameValuePlug( "x", 1.0 ) )
view = GafferSceneUI.SceneView()
view["in"].setInput( script["variables"]["out"] )
view.setContext( script.context() )
GafferSceneUI.ContextAlgo.setSelectedPaths( view.getContext(), IECore.PathMatcher( [ "/plane" ] ) )
tool = GafferSceneUI.TranslateTool( view )
tool["active"].setValue( True )
self.assertEqual( tool.selection()[0].path, "/plane" )
self.assertEqual( tool.handlesTransform(), imath.M44f().translate( imath.V3f( 1, 0, 0 ) ) )
def testMultipleSelection( self ) :
script = Gaffer.ScriptNode()
script["plane"] = GafferScene.Plane()
script["sphere"] = GafferScene.Sphere()
script["sphere"]["transform"]["rotate"]["y"].setValue( 90 )
script["group"] = GafferScene.Group()
script["group"]["in"][0].setInput( script["plane"]["out"] )
script["group"]["in"][1].setInput( script["sphere"]["out"] )
view = GafferSceneUI.SceneView()
view["in"].setInput( script["group"]["out"] )
tool = GafferSceneUI.TranslateTool( view )
tool["active"].setValue( True )
GafferSceneUI.ContextAlgo.setSelectedPaths( view.getContext(), IECore.PathMatcher( [ "/group/plane", "/group/sphere" ] ) )
selection = tool.selection()
self.assertEqual( len( selection ), 2 )
self.assertEqual( { s.transformPlug for s in selection }, { script["plane"]["transform"], script["sphere"]["transform"] } )
tool["orientation"].setValue( tool.Orientation.Local )
tool.translate( imath.V3f( 1, 0, 0 ) )
self.assertEqual( script["plane"]["transform"]["translate"].getValue(), imath.V3f( 1, 0, 0 ) )
self.assertTrue(
imath.V3f( 0, 0, -1 ).equalWithAbsError(
script["sphere"]["transform"]["translate"].getValue(), 0.000001
)
)
def testMultipleSelectionDoesntPickSamePlugTwice( self ) :
script = Gaffer.ScriptNode()
script["plane"] = GafferScene.Plane()
script["group"] = GafferScene.Group()
script["group"]["in"][0].setInput( script["plane"]["out"] )
script["group"]["in"][1].setInput( script["plane"]["out"] )
view = GafferSceneUI.SceneView()
view["in"].setInput( script["group"]["out"] )
tool = GafferSceneUI.TranslateTool( view )
tool["active"].setValue( True )
GafferSceneUI.ContextAlgo.setSelectedPaths( view.getContext(), IECore.PathMatcher( [ "/group/plane", "/group/plane1" ] ) )
# Even though there are two selected paths, there should only be
# one thing in the tool's selection, because both paths are generated
# by the same upstream node.
selection = tool.selection()
self.assertEqual( len( selection ), 1 )
self.assertEqual( selection[0].transformPlug, script["plane"]["transform"] )
def testHandlesFollowLastSelected( self ) :
script = Gaffer.ScriptNode()
script["plane"] = GafferScene.Plane()
script["sphere"] = GafferScene.Sphere()
script["sphere"]["transform"]["translate"].setValue( imath.V3f( 1 ) )
script["group"] = GafferScene.Group()
script["group"]["in"][0].setInput( script["plane"]["out"] )
script["group"]["in"][1].setInput( script["sphere"]["out"] )
view = GafferSceneUI.SceneView()
view["in"].setInput( script["group"]["out"] )
tool = GafferSceneUI.TranslateTool( view )
tool["active"].setValue( True )
GafferSceneUI.ContextAlgo.setLastSelectedPath( view.getContext(), "/group/plane" )
self.assertEqual( tool.handlesTransform(), imath.M44f() )
GafferSceneUI.ContextAlgo.setLastSelectedPath( view.getContext(), "/group/sphere" )
self.assertEqual( tool.handlesTransform(), imath.M44f().translate( script["sphere"]["transform"]["translate"].getValue() ) )
GafferSceneUI.ContextAlgo.setSelectedPaths( view.getContext(), IECore.PathMatcher( [ "/group/plane" ] ) )
self.assertEqual( tool.handlesTransform(), imath.M44f() )
def testPromotedPlugs( self ) :
script = Gaffer.ScriptNode()
script["box"] = Gaffer.Box()
script["box"]["sphere"] = GafferScene.Sphere()
Gaffer.PlugAlgo.promote( script["box"]["sphere"]["transform"] )
Gaffer.PlugAlgo.promote( script["box"]["sphere"]["out"] )
view = GafferSceneUI.SceneView()
view["in"].setInput( script["box"]["out"] )
tool = GafferSceneUI.TranslateTool( view )
tool["active"].setValue( True )
GafferSceneUI.ContextAlgo.setLastSelectedPath( view.getContext(), "/sphere" )
self.assertEqual( tool.selection()[0].transformPlug, script["box"]["transform"] )
def testSelectionChangedSignal( self ) :
script = Gaffer.ScriptNode()
script["plane"] = GafferScene.Plane()
view = GafferSceneUI.SceneView()
view["in"].setInput( script["plane"]["out"] )
tool = GafferSceneUI.TranslateTool( view )
tool["active"].setValue( True )
cs = GafferTest.CapturingSlot( tool.selectionChangedSignal() )
GafferSceneUI.ContextAlgo.setSelectedPaths( view.getContext(), IECore.PathMatcher( [ "/plane" ] ) )
self.assertTrue( len( cs ) )
self.assertEqual( cs[0][0], tool )
def testEditAncestorIfSelectionNotTransformable( self ) :
script = Gaffer.ScriptNode()
script["sceneReader"] = GafferScene.SceneReader()
script["sceneReader"]["fileName"].setValue( "${GAFFER_ROOT}/python/GafferSceneTest/alembicFiles/groupedPlane.abc" )
view = GafferSceneUI.SceneView()
view["in"].setInput( script["sceneReader"]["out"] )
tool = GafferSceneUI.TranslateTool( view )
tool["active"].setValue( True )
GafferSceneUI.ContextAlgo.setSelectedPaths( view.getContext(), IECore.PathMatcher( [ "/group/plane" ] ) )
selection = tool.selection()
self.assertEqual( len( selection ), 1 )
self.assertEqual( selection[0].transformPlug, script["sceneReader"]["transform"] )
self.assertEqual( selection[0].path, "/group" )
def testSelectionRefersToFirstPublicPlug( self ) :
script = Gaffer.ScriptNode()
script["plane"] = GafferScene.Plane()
view = GafferSceneUI.SceneView()
tool = GafferSceneUI.TranslateTool( view )
tool["active"].setValue( True )
self.assertEqual( tool.selection(), [] )
view["in"].setInput( script["plane"]["out"] )
self.assertEqual( tool.selection(), [] )
GafferSceneUI.ContextAlgo.setSelectedPaths( view.getContext(), IECore.PathMatcher( [ "/plane" ] ) )
self.assertEqual( len( tool.selection() ), 1 )
self.assertEqual( tool.selection()[0].scene, script["plane"]["out"] )
box = Gaffer.Box.create( script, Gaffer.StandardSet( [ script["plane"] ] ) )
Gaffer.PlugAlgo.promote( box["plane"]["out"] )
view["in"].setInput( box["out"] )
self.assertEqual( tool.selection()[0].scene, box["out"] )
def testSelectionRefersToCorrectPlug( self ) :
script = Gaffer.ScriptNode()
script["sphere"] = GafferScene.Sphere()
script["cube"] = GafferScene.Cube()
script["freeze"] = GafferScene.FreezeTransform()
script["freezeFilter"] = GafferScene.PathFilter()
script["freezeFilter"]["paths"].setValue( IECore.StringVectorData( [ "/sphere" ] ) )
script["freeze"]["in"].setInput( script["sphere"]["out"] )
script["freeze"]["filter"].setInput( script["freezeFilter"]["out"] )
script["instancer"] = GafferScene.Instancer()
script["instancerFilter"] = GafferScene.PathFilter()
script["instancerFilter"]["paths"].setValue( IECore.StringVectorData( [ "/sphere" ] ) )
script["instancer"]["in"].setInput( script["freeze"]["out"] )
script["instancer"]["prototypes"].setInput( script["cube"]["out"] )
script["instancer"]["filter"].setInput( script["instancerFilter"]["out"] )
script["subTree"] = GafferScene.SubTree()
script["subTree"]["root"].setValue( "/sphere/instances" )
script["subTree"]["in"].setInput( script["instancer"]["out"] )
script["plane"] = GafferScene.Plane()
script["group"] = GafferScene.Group()
script["group"]["in"][0].setInput( script["subTree"]["out"] )
script["group"]["in"][1].setInput( script["plane"]["out"] )
view = GafferSceneUI.SceneView()
tool = GafferSceneUI.TranslateTool( view )
tool["active"].setValue( True )
self.assertEqual( tool.selection(), [] )
view["in"].setInput( script["group"]["out"] )
self.assertEqual( tool.selection(), [] )
GafferSceneUI.ContextAlgo.setSelectedPaths( view.getContext(), IECore.PathMatcher( [ "/group/plane" ] ) )
self.assertEqual( len( tool.selection() ), 1 )
self.assertEqual( tool.selection()[0].transformPlug, script["plane"]["transform"] )
def testLastSelectedObjectWithSharedTransformPlug( self ) :
script = Gaffer.ScriptNode()
script["sphere"] = GafferScene.Sphere()
script["sphere"]["transform"]["translate"].setValue( imath.V3f( 1, 0, 0 ) )
script["group"] = GafferScene.Group()
script["group"]["in"][0].setInput( script["sphere"]["out"] )
script["group"]["in"][1].setInput( script["sphere"]["out"] )
view = GafferSceneUI.SceneView()
view["in"].setInput( script["group"]["out"] )
tool = GafferSceneUI.TranslateTool( view )
tool["active"].setValue( True )
GafferSceneUI.ContextAlgo.setSelectedPaths( view.getContext(), IECore.PathMatcher( [ "/group/sphere" ] ) )
self.assertEqual( len( tool.selection() ), 1 )
self.assertEqual( tool.selection()[0].transformPlug, script["sphere"]["transform"] )
self.assertEqual( tool.selection()[0].path, "/group/sphere" )
GafferSceneUI.ContextAlgo.setLastSelectedPath( view.getContext(), "/group/sphere1" )
self.assertEqual( len( tool.selection() ), 1 )
self.assertEqual( tool.selection()[0].transformPlug, script["sphere"]["transform"] )
self.assertEqual( tool.selection()[0].path, "/group/sphere1" )
GafferSceneUI.ContextAlgo.setLastSelectedPath( view.getContext(), "/group/sphere" )
self.assertEqual( len( tool.selection() ), 1 )
self.assertEqual( tool.selection()[0].transformPlug, script["sphere"]["transform"] )
self.assertEqual( tool.selection()[0].path, "/group/sphere" )
self.assertEqual( tool.handlesTransform(), imath.M44f().translate( imath.V3f( 1, 0, 0 ) ) )
def testSelectionSorting( self ) :
# This test exposes a bug we had when sorting the selection internal
# to the TransformTool, triggering a heap-buffer-overflow report in
# ASAN builds.
# Craft a scene containing 26 spheres underneath a group.
script = Gaffer.ScriptNode()
script["sphere"] = GafferScene.Sphere()
script["group"] = GafferScene.Group()
selection = IECore.PathMatcher()
for i in range( 0, 26 ) :
script["group"]["in"][i].setInput( script["sphere"]["out"] )
selection.addPath( "/group/sphere" + ( str( i ) if i else "" ) )
# Write it out to disk and read it back in again. This gives us the
# same scene, but now the individual spheres aren't transformable on
# their own - the only editable transform is now the root.
script["writer"] = GafferScene.SceneWriter()
script["writer"]["in"].setInput( script["group"]["out"] )
script["writer"]["fileName"].setValue( os.path.join( self.temporaryDirectory(), "test.abc" ) )
script["writer"]["task"].execute()
script["reader"] = GafferScene.SceneReader()
script["reader"]["fileName"].setInput( script["writer"]["fileName"] )
# Set up a TransformTool and tell it to transform each of the spheres.
view = GafferSceneUI.SceneView()
view["in"].setInput( script["reader"]["out"] )
tool = GafferSceneUI.TranslateTool( view )
tool["active"].setValue( True )
GafferSceneUI.ContextAlgo.setSelectedPaths( view.getContext(), selection )
# The tool should instead choose to transform the root location.
self.assertEqual( len( tool.selection() ), 1 )
self.assertEqual( tool.selection()[0].transformPlug, script["reader"]["transform"] )
def testSetFilter( self ) :
script = Gaffer.ScriptNode()
script["sphere"] = GafferScene.Sphere()
script["sphere"]["sets"].setValue( "A" )
script["setFilter"] = GafferScene.SetFilter()
script["setFilter"]["set"].setValue( "A" )
script["transform"] = GafferScene.Transform()
script["transform"]["in"].setInput( script["sphere"]["out"] )
script["transform"]["filter"].setInput( script["setFilter"]["out"] )
view = GafferSceneUI.SceneView()
view["in"].setInput( script["transform"]["out"] )
tool = GafferSceneUI.TranslateTool( view )
tool["active"].setValue( True )
GafferSceneUI.ContextAlgo.setSelectedPaths( view.getContext(), IECore.PathMatcher( [ "/sphere" ] ) )
self.assertEqual( tool.selection()[0].transformPlug, script["transform"]["transform"] )
def testSpreadsheetAndCollect( self ) :
script = Gaffer.ScriptNode()
script["sphere"] = GafferScene.Sphere()
script["spreadsheet"] = Gaffer.Spreadsheet()
script["spreadsheet"]["rows"].addColumn( script["sphere"]["transform"] )
script["sphere"]["transform"].setInput( script["spreadsheet"]["out"]["transform"] )
script["spreadsheet"]["rows"].addRow()["name"].setValue( "sphere1" )
script["spreadsheet"]["rows"].addRow()["name"].setValue( "sphere2" )
script["spreadsheet"]["selector"].setValue( "${collect:rootName}" )
script["collect"] = GafferScene.CollectScenes()
script["collect"]["in"].setInput( script["sphere"]["out"] )
script["collect"]["rootNames"].setInput( script["spreadsheet"]["activeRowNames"] )
self.assertEqual( script["collect"]["out"].childNames( "/" ), IECore.InternedStringVectorData( [ "sphere1", "sphere2" ] ) )
view = GafferSceneUI.SceneView()
view["in"].setInput( script["collect"]["out"] )
tool = GafferSceneUI.TranslateTool( view )
tool["active"].setValue( True )
GafferSceneUI.ContextAlgo.setSelectedPaths( view.getContext(), IECore.PathMatcher( [ "/sphere1" ] ) )
self.assertEqual( tool.selection()[0].transformPlug, script["spreadsheet"]["rows"][1]["cells"]["transform"]["value"] )
GafferSceneUI.ContextAlgo.setSelectedPaths( view.getContext(), IECore.PathMatcher( [ "/sphere2" ] ) )
self.assertEqual( tool.selection()[0].transformPlug, script["spreadsheet"]["rows"][2]["cells"]["transform"]["value"] )
# Check that we can work with promoted plugs too
box = Gaffer.Box.create( script, Gaffer.StandardSet( [ script["collect"], script["sphere"], script["spreadsheet"] ] ) )
promotedRowsPlug = Gaffer.PlugAlgo.promote( box["spreadsheet"]["rows"] )
self.assertEqual( tool.selection()[0].transformPlug, promotedRowsPlug[2]["cells"]["transform"]["value"] )
if __name__ == "__main__":
unittest.main()
| bsd-3-clause |
katerina7479/kadre | view/main_window.py | 1 | 4144 | from PySide import QtGui
from PySide import QtCore
from PySide.QtGui import QMainWindow
from PySide.QtGui import QAction
from utils import myjson
from pagemanager import PageManager
from dialogs.opendialog import OpenDialog
from kglobals import _PATH
class KMainWindow(QMainWindow):
def __init__(self, title):
super(KMainWindow, self).__init__()
self.setWindowTitle(title)
self.path = _PATH
self._setPaths()
self.actiondict = {
"open": self.on_open, "new": self.on_new, "database": self.on_database,
"exit": QtGui.qApp.quit, "back": self.on_back, "next": self.on_next}
self.statusBar()
self.PageManager = PageManager()
self._config()
def _setPaths(self):
self.menuConfigPath = self.path + "\\view\\menu_actions.json"
self.toolConfigPath = self.path + "\\view\\toolbar_actions.json"
def sizeHint(self):
return QtCore.QSize(800, 600)
def _config(self):
self.menuconfigdata = myjson.GetData(self.menuConfigPath)
self._makeMenuBar()
self.setCentralWidget(self.PageManager)
self.toolbarconfigdata = myjson.GetData(self.toolConfigPath)
self._makeToolBar()
def _makeMenuBar(self):
self.menubar = self.menuBar()
self.menulist = []
for menu in self.menuconfigdata:
mymenu = self.menubar.addMenu(menu)
self.menulist.append(mymenu)
actions = self.menuconfigdata[menu]
actionlist = self._parseactions(actions)
for item in actionlist:
action = item[0]
name = item[1]
if action == "Separator":
mymenu.addSeparator()
else:
if name in self.actiondict:
mymenu.addAction(action)
method = self.actiondict[name]
action.triggered.connect(method)
else:
mymenu.addAction(action)
def _makeToolBar(self):
self.toolbar = QtGui.QToolBar()
self.addToolBar(self.toolbar)
self.toolbarlist = []
toollist = self._parseactions(self.toolbarconfigdata)
for tool in toollist:
action = tool[0]
name = tool[1]
if name == "Separator":
self.toolbar.addSeparator()
else:
if name in self.actiondict:
self.toolbar.addAction(action)
method = self.actiondict[name]
action.triggered.connect(method)
else:
self.toolbar.addAction(action)
def _sortbyposition(self, uslist):
slist = sorted(uslist)
flist = []
for item in slist:
flist.append((item[1], item[2]))
return flist
def _parseactions(self, actions):
actionlist = []
for act in actions:
atts = actions[act]
if act == "Separator":
newaction = "Separator"
else:
try:
newaction = QAction(QtGui.QIcon(atts["icon"]), atts["text"], self)
except:
newaction = QAction(atts["text"], self)
try:
newaction.setShortcut(atts["shortcut"])
except:
pass
try:
newaction.setStatusTip(atts["statustip"])
except:
pass
actionlist.append((atts["pos"], newaction, act))
actionlist = self._sortbyposition(actionlist)
return actionlist
def on_open(self):
self.PageManager.ThisPage("Blank")
self.diag = OpenDialog(self)
self.diag.show()
def on_new(self):
self.PageManager.ThisPage("FormPage")
def on_database(self):
self.PageManager.ThisPage("Blank")
def on_refresh(self):
self.PageManager.PageRefresh()
def on_back(self):
self.PageManager.LastPage()
def on_next(self):
self.PageManager.NextPage()
| mit |
yhpeng-git/mxnet | example/warpctc/lstm_model.py | 15 | 2099 |
# pylint: disable=C0111,too-many-arguments,too-many-instance-attributes,too-many-locals,redefined-outer-name,fixme
# pylint: disable=superfluous-parens, no-member, invalid-name
import sys
sys.path.insert(0, "../../python")
import numpy as np
import mxnet as mx
from lstm import LSTMState, LSTMParam, lstm, lstm_inference_symbol
class LSTMInferenceModel(object):
def __init__(self,
num_lstm_layer,
seq_len,
num_hidden,
num_label,
arg_params,
data_size,
ctx=mx.cpu()):
self.sym = lstm_inference_symbol(num_lstm_layer,
seq_len,
num_hidden,
num_label)
batch_size = 1
init_c = [('l%d_init_c'%l, (batch_size, num_hidden)) for l in range(num_lstm_layer)]
init_h = [('l%d_init_h'%l, (batch_size, num_hidden)) for l in range(num_lstm_layer)]
data_shape = [("data", (batch_size, data_size))]
input_shapes = dict(init_c + init_h + data_shape)
self.executor = self.sym.simple_bind(ctx=ctx, **input_shapes)
for key in self.executor.arg_dict.keys():
if key in arg_params:
arg_params[key].copyto(self.executor.arg_dict[key])
state_name = []
for i in range(num_lstm_layer):
state_name.append("l%d_init_c" % i)
state_name.append("l%d_init_h" % i)
self.states_dict = dict(zip(state_name, self.executor.outputs[1:]))
self.input_arr = mx.nd.zeros(data_shape[0][1])
def forward(self, input_data, new_seq=False):
if new_seq == True:
for key in self.states_dict.keys():
self.executor.arg_dict[key][:] = 0.
input_data.copyto(self.executor.arg_dict["data"])
self.executor.forward()
for key in self.states_dict.keys():
self.states_dict[key].copyto(self.executor.arg_dict[key])
prob = self.executor.outputs[0].asnumpy()
return prob | apache-2.0 |
mrquim/repository.mrquim | repo/script.module.youtube.dl/lib/youtube_dl/extractor/scrippsnetworks.py | 33 | 2528 | # coding: utf-8
from __future__ import unicode_literals
from .adobepass import AdobePassIE
from ..utils import (
int_or_none,
smuggle_url,
update_url_query,
)
class ScrippsNetworksWatchIE(AdobePassIE):
IE_NAME = 'scrippsnetworks:watch'
_VALID_URL = r'https?://watch\.(?:hgtv|foodnetwork|travelchannel|diynetwork|cookingchanneltv)\.com/player\.[A-Z0-9]+\.html#(?P<id>\d+)'
_TEST = {
'url': 'http://watch.hgtv.com/player.HNT.html#0256538',
'md5': '26545fd676d939954c6808274bdb905a',
'info_dict': {
'id': '0256538',
'ext': 'mp4',
'title': 'Seeking a Wow House',
'description': 'Buyers retiring in Palm Springs, California, want a modern house with major wow factor. They\'re also looking for a pool and a large, open floorplan with tall windows looking out at the views.',
'uploader': 'SCNI',
'upload_date': '20170207',
'timestamp': 1486450493,
},
'skip': 'requires TV provider authentication',
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
channel = self._parse_json(self._search_regex(
r'"channels"\s*:\s*(\[.+\])',
webpage, 'channels'), video_id)[0]
video_data = next(v for v in channel['videos'] if v.get('nlvid') == video_id)
title = video_data['title']
release_url = video_data['releaseUrl']
if video_data.get('restricted'):
requestor_id = self._search_regex(
r'requestorId\s*=\s*"([^"]+)";', webpage, 'requestor id')
resource = self._get_mvpd_resource(
requestor_id, title, video_id,
video_data.get('ratings', [{}])[0].get('rating'))
auth = self._extract_mvpd_auth(
url, video_id, requestor_id, resource)
release_url = update_url_query(release_url, {'auth': auth})
return {
'_type': 'url_transparent',
'id': video_id,
'title': title,
'url': smuggle_url(release_url, {'force_smil_url': True}),
'description': video_data.get('description'),
'thumbnail': video_data.get('thumbnailUrl'),
'series': video_data.get('showTitle'),
'season_number': int_or_none(video_data.get('season')),
'episode_number': int_or_none(video_data.get('episodeNumber')),
'ie_key': 'ThePlatform',
}
| gpl-2.0 |
whn09/tensorflow | tensorflow/contrib/metrics/python/ops/histogram_ops.py | 159 | 10459 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=g-short-docstring-punctuation
"""Metrics that use histograms.
Module documentation, including "@@" callouts, should be put in
third_party/tensorflow/contrib/metrics/__init__.py
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.framework.python.framework import tensor_util
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import histogram_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import variable_scope
def auc_using_histogram(boolean_labels,
scores,
score_range,
nbins=100,
collections=None,
check_shape=True,
name=None):
"""AUC computed by maintaining histograms.
Rather than computing AUC directly, this Op maintains Variables containing
histograms of the scores associated with `True` and `False` labels. By
comparing these the AUC is generated, with some discretization error.
See: "Efficient AUC Learning Curve Calculation" by Bouckaert.
This AUC Op updates in `O(batch_size + nbins)` time and works well even with
large class imbalance. The accuracy is limited by discretization error due
to finite number of bins. If scores are concentrated in a fewer bins,
accuracy is lower. If this is a concern, we recommend trying different
numbers of bins and comparing results.
Args:
boolean_labels: 1-D boolean `Tensor`. Entry is `True` if the corresponding
record is in class.
scores: 1-D numeric `Tensor`, same shape as boolean_labels.
score_range: `Tensor` of shape `[2]`, same dtype as `scores`. The min/max
values of score that we expect. Scores outside range will be clipped.
nbins: Integer number of bins to use. Accuracy strictly increases as the
number of bins increases.
collections: List of graph collections keys. Internal histogram Variables
are added to these collections. Defaults to `[GraphKeys.LOCAL_VARIABLES]`.
check_shape: Boolean. If `True`, do a runtime shape check on the scores
and labels.
name: A name for this Op. Defaults to "auc_using_histogram".
Returns:
auc: `float32` scalar `Tensor`. Fetching this converts internal histograms
to auc value.
update_op: `Op`, when run, updates internal histograms.
"""
if collections is None:
collections = [ops.GraphKeys.LOCAL_VARIABLES]
with variable_scope.variable_scope(
name, 'auc_using_histogram', [boolean_labels, scores, score_range]):
scores, boolean_labels = tensor_util.remove_squeezable_dimensions(
scores, boolean_labels)
score_range = ops.convert_to_tensor(score_range, name='score_range')
boolean_labels, scores = _check_labels_and_scores(
boolean_labels, scores, check_shape)
hist_true, hist_false = _make_auc_histograms(boolean_labels, scores,
score_range, nbins)
hist_true_acc, hist_false_acc, update_op = _auc_hist_accumulate(hist_true,
hist_false,
nbins,
collections)
auc = _auc_convert_hist_to_auc(hist_true_acc, hist_false_acc, nbins)
return auc, update_op
def _check_labels_and_scores(boolean_labels, scores, check_shape):
"""Check the rank of labels/scores, return tensor versions."""
with ops.name_scope('_check_labels_and_scores',
values=[boolean_labels, scores]):
boolean_labels = ops.convert_to_tensor(boolean_labels,
name='boolean_labels')
scores = ops.convert_to_tensor(scores, name='scores')
if boolean_labels.dtype != dtypes.bool:
raise ValueError(
'Argument boolean_labels should have dtype bool. Found: %s',
boolean_labels.dtype)
if check_shape:
labels_rank_1 = control_flow_ops.Assert(
math_ops.equal(1, array_ops.rank(boolean_labels)),
['Argument boolean_labels should have rank 1. Found: ',
boolean_labels.name, array_ops.shape(boolean_labels)])
scores_rank_1 = control_flow_ops.Assert(
math_ops.equal(1, array_ops.rank(scores)),
['Argument scores should have rank 1. Found: ', scores.name,
array_ops.shape(scores)])
with ops.control_dependencies([labels_rank_1, scores_rank_1]):
return boolean_labels, scores
else:
return boolean_labels, scores
def _make_auc_histograms(boolean_labels, scores, score_range, nbins):
"""Create histogram tensors from one batch of labels/scores."""
with variable_scope.variable_scope(
None, 'make_auc_histograms', [boolean_labels, scores, nbins]):
# Histogram of scores for records in this batch with True label.
hist_true = histogram_ops.histogram_fixed_width(
array_ops.boolean_mask(scores, boolean_labels),
score_range,
nbins=nbins,
dtype=dtypes.int64,
name='hist_true')
# Histogram of scores for records in this batch with False label.
hist_false = histogram_ops.histogram_fixed_width(
array_ops.boolean_mask(scores, math_ops.logical_not(boolean_labels)),
score_range,
nbins=nbins,
dtype=dtypes.int64,
name='hist_false')
return hist_true, hist_false
def _auc_hist_accumulate(hist_true, hist_false, nbins, collections):
"""Accumulate histograms in new variables."""
with variable_scope.variable_scope(
None, 'hist_accumulate', [hist_true, hist_false]):
# Holds running total histogram of scores for records labeled True.
hist_true_acc = variable_scope.get_variable(
'hist_true_acc',
shape=[nbins],
dtype=hist_true.dtype,
initializer=init_ops.zeros_initializer(),
collections=collections,
trainable=False)
# Holds running total histogram of scores for records labeled False.
hist_false_acc = variable_scope.get_variable(
'hist_false_acc',
shape=[nbins],
dtype=hist_true.dtype,
initializer=init_ops.zeros_initializer(),
collections=collections,
trainable=False)
update_op = control_flow_ops.group(
hist_true_acc.assign_add(hist_true),
hist_false_acc.assign_add(hist_false),
name='update_op')
return hist_true_acc, hist_false_acc, update_op
def _auc_convert_hist_to_auc(hist_true_acc, hist_false_acc, nbins):
"""Convert histograms to auc.
Args:
hist_true_acc: `Tensor` holding accumulated histogram of scores for records
that were `True`.
hist_false_acc: `Tensor` holding accumulated histogram of scores for
records that were `False`.
nbins: Integer number of bins in the histograms.
Returns:
Scalar `Tensor` estimating AUC.
"""
# Note that this follows the "Approximating AUC" section in:
# Efficient AUC learning curve calculation, R. R. Bouckaert,
# AI'06 Proceedings of the 19th Australian joint conference on Artificial
# Intelligence: advances in Artificial Intelligence
# Pages 181-191.
# Note that the above paper has an error, and we need to re-order our bins to
# go from high to low score.
# Normalize histogram so we get fraction in each bin.
normed_hist_true = math_ops.truediv(hist_true_acc,
math_ops.reduce_sum(hist_true_acc))
normed_hist_false = math_ops.truediv(hist_false_acc,
math_ops.reduce_sum(hist_false_acc))
# These become delta x, delta y from the paper.
delta_y_t = array_ops.reverse_v2(normed_hist_true, [0], name='delta_y_t')
delta_x_t = array_ops.reverse_v2(normed_hist_false, [0], name='delta_x_t')
# strict_1d_cumsum requires float32 args.
delta_y_t = math_ops.cast(delta_y_t, dtypes.float32)
delta_x_t = math_ops.cast(delta_x_t, dtypes.float32)
# Trapezoidal integration, \int_0^1 0.5 * (y_t + y_{t-1}) dx_t
y_t = _strict_1d_cumsum(delta_y_t, nbins)
first_trap = delta_x_t[0] * y_t[0] / 2.0
other_traps = delta_x_t[1:] * (y_t[1:] + y_t[:nbins - 1]) / 2.0
return math_ops.add(first_trap, math_ops.reduce_sum(other_traps), name='auc')
# TODO(langmore) Remove once a faster cumsum (accumulate_sum) Op is available.
# Also see if cast to float32 above can be removed with new cumsum.
# See: https://github.com/tensorflow/tensorflow/issues/813
def _strict_1d_cumsum(tensor, len_tensor):
"""Cumsum of a 1D tensor with defined shape by padding and convolving."""
# Assumes tensor shape is fully defined.
with ops.name_scope('strict_1d_cumsum', values=[tensor]):
if len_tensor == 0:
return constant_op.constant([])
len_pad = len_tensor - 1
x = array_ops.pad(tensor, [[len_pad, 0]])
h = array_ops.ones_like(x)
return _strict_conv1d(x, h)[:len_tensor]
# TODO(langmore) Remove once a faster cumsum (accumulate_sum) Op is available.
# See: https://github.com/tensorflow/tensorflow/issues/813
def _strict_conv1d(x, h):
"""Return x * h for rank 1 tensors x and h."""
with ops.name_scope('strict_conv1d', values=[x, h]):
x = array_ops.reshape(x, (1, -1, 1, 1))
h = array_ops.reshape(h, (-1, 1, 1, 1))
result = nn_ops.conv2d(x, h, [1, 1, 1, 1], 'SAME')
return array_ops.reshape(result, [-1])
| apache-2.0 |
RRCKI/panda-server | pandaserver/userinterface/Client.py | 1 | 63710 | '''
client methods
'''
import os
import re
import sys
import gzip
import urllib
import commands
import tempfile
import cPickle as pickle
try:
import json
except:
import simplejson as json
# configuration
try:
baseURL = os.environ['PANDA_URL']
except:
baseURL = 'http://pandaserver.cern.ch:25080/server/panda'
try:
baseURLSSL = os.environ['PANDA_URL_SSL']
except:
baseURLSSL = 'https://pandaserver.cern.ch:25443/server/panda'
# exit code
EC_Failed = 255
# panda server URLs
if os.environ.has_key('PANDA_URL_MAP'):
serverURLs = {'default' : {'URL' : baseURL,
'URLSSL' : baseURLSSL},
}
# decode envvar to map
try:
for tmpCompStr in os.environ['PANDA_URL_MAP'].split('|'):
tmpKey,tmpURL,tmpURLSSL = tmpCompStr.split(',')
# append
serverURLs[tmpKey] = {'URL' : tmpURL,
'URLSSL' : tmpURLSSL}
except:
pass
else:
# default
serverURLs = {'default' : {'URL' : baseURL,
'URLSSL' : baseURLSSL},
'CERN' : {'URL' : 'http://pandaserver.cern.ch:25080/server/panda',
'URLSSL' : 'https://pandaserver.cern.ch:25443/server/panda'},
}
# bamboo
baseURLBAMBOO = 'http://pandabamboo.cern.ch:25070/bamboo/bamboo'
# get URL
def _getURL(type,srvID=None):
if serverURLs.has_key(srvID):
urls = serverURLs[srvID]
else:
urls = serverURLs['default']
return urls[type]
# get Panda srvIDs
def getPandas():
srvs = serverURLs.keys()
# remove 'default'
try:
srvs.remove('default')
except:
pass
return srvs
# look for a grid proxy certificate
def _x509():
# see X509_USER_PROXY
try:
return os.environ['X509_USER_PROXY']
except:
pass
# see the default place
x509 = '/tmp/x509up_u%s' % os.getuid()
if os.access(x509,os.R_OK):
return x509
# no valid proxy certificate
# FIXME
print "No valid grid proxy certificate found"
return ''
# curl class
class _Curl:
# constructor
def __init__(self):
# path to curl
self.path = 'curl'
# verification of the host certificate
self.verifyHost = True
# request a compressed response
self.compress = True
# SSL cert/key
self.sslCert = ''
self.sslKey = ''
# verbose
self.verbose = False
# GET method
def get(self,url,data):
# make command
com = '%s --silent --get' % self.path
if not self.verifyHost:
com += ' --insecure'
elif os.environ.has_key('X509_CERT_DIR'):
com += ' --capath %s' % os.environ['X509_CERT_DIR']
elif os.path.exists('/etc/grid-security/certificates'):
com += ' --capath /etc/grid-security/certificates'
if self.compress:
com += ' --compressed'
if self.sslCert != '':
com += ' --cert %s' % self.sslCert
com += ' --cacert %s' % self.sslCert
if self.sslKey != '':
com += ' --key %s' % self.sslKey
# timeout
com += ' -m 600'
# data
strData = ''
for key in data.keys():
strData += 'data="%s"\n' % urllib.urlencode({key:data[key]})
# write data to temporary config file
try:
tmpName = os.environ['PANDA_TMP']
except:
tmpName = '/tmp'
tmpName += '/%s_%s' % (commands.getoutput('whoami'),commands.getoutput('uuidgen'))
tmpFile = open(tmpName,'w')
tmpFile.write(strData)
tmpFile.close()
com += ' --config %s' % tmpName
com += ' %s' % url
# execute
if self.verbose:
print com
print commands.getoutput('cat %s' % tmpName)
ret = commands.getstatusoutput(com)
# remove temporary file
os.remove(tmpName)
if ret[0] != 0:
ret = (ret[0]%255,ret[1])
if self.verbose:
print ret
return ret
# POST method
def post(self,url,data):
# make command
com = '%s --silent' % self.path
if not self.verifyHost:
com += ' --insecure'
elif os.environ.has_key('X509_CERT_DIR'):
com += ' --capath %s' % os.environ['X509_CERT_DIR']
elif os.path.exists('/etc/grid-security/certificates'):
com += ' --capath /etc/grid-security/certificates'
if self.compress:
com += ' --compressed'
if self.sslCert != '':
com += ' --cert %s' % self.sslCert
com += ' --cacert %s' % self.sslCert
if self.sslKey != '':
com += ' --key %s' % self.sslKey
# timeout
com += ' -m 600'
# data
strData = ''
for key in data.keys():
strData += 'data="%s"\n' % urllib.urlencode({key:data[key]})
# write data to temporary config file
try:
tmpName = os.environ['PANDA_TMP']
except:
tmpName = '/tmp'
tmpName += '/%s_%s' % (commands.getoutput('whoami'),commands.getoutput('uuidgen'))
tmpFile = open(tmpName,'w')
tmpFile.write(strData)
tmpFile.close()
com += ' --config %s' % tmpName
com += ' %s' % url
# execute
if self.verbose:
print com
print commands.getoutput('cat %s' % tmpName)
ret = commands.getstatusoutput(com)
# remove temporary file
os.remove(tmpName)
if ret[0] != 0:
ret = (ret[0]%255,ret[1])
if self.verbose:
print ret
return ret
# PUT method
def put(self,url,data):
# make command
com = '%s --silent' % self.path
if not self.verifyHost:
com += ' --insecure'
elif os.environ.has_key('X509_CERT_DIR'):
com += ' --capath %s' % os.environ['X509_CERT_DIR']
elif os.path.exists('/etc/grid-security/certificates'):
com += ' --capath /etc/grid-security/certificates'
if self.compress:
com += ' --compressed'
if self.sslCert != '':
com += ' --cert %s' % self.sslCert
com += ' --cacert %s' % self.sslCert
if self.sslKey != '':
com += ' --key %s' % self.sslKey
# emulate PUT
for key in data.keys():
com += ' -F "%s=@%s"' % (key,data[key])
com += ' %s' % url
# execute
if self.verbose:
print com
ret = commands.getstatusoutput(com)
if ret[0] != 0:
ret = (ret[0]%255,ret[1])
if self.verbose:
print ret
return ret
'''
Client API
'''
# use web cache
def useWebCache():
"""Switch to use web cache for some read-only requests so that the number
of hits to the back-end database is reduced.
args:
returns:
"""
global baseURL
baseURL = re.sub('25080','25085',baseURL)
global serverURLs
for tmpKey,tmpVal in serverURLs.iteritems():
tmpVal['URL'] = baseURL
# submit jobs
def submitJobs(jobs,srvID=None,toPending=False):
"""Submit jobs
args:
jobs: the list of JobSpecs
srvID: obsolete
toPending: set True if jobs need to be pending state for the
two-staged submission mechanism
returns:
status code
0: communication succeeded to the panda server
255: communication failure
return code
True: request is processed
False: not processed
"""
# set hostname
hostname = commands.getoutput('hostname')
for job in jobs:
job.creationHost = hostname
# serialize
strJobs = pickle.dumps(jobs)
# instantiate curl
curl = _Curl()
curl.sslCert = _x509()
curl.sslKey = _x509()
# execute
url = _getURL('URLSSL',srvID) + '/submitJobs'
data = {'jobs':strJobs}
if toPending:
data['toPending'] = True
status,output = curl.post(url,data)
if status!=0:
print output
return status,output
try:
return status,pickle.loads(output)
except:
type, value, traceBack = sys.exc_info()
errStr = "ERROR submitJobs : %s %s" % (type,value)
print errStr
return EC_Failed,output+'\n'+errStr
# run task assignment
def runTaskAssignment(jobs):
"""Run the task brokerage
args:
ids: list of typical JobSpecs for tasks to be assigned
returns:
status code
0: communication succeeded to the panda server
255: communication failure
return code
True: request is processed
False: not processed
"""
# set hostname
hostname = commands.getoutput('hostname')
for job in jobs:
job.creationHost = hostname
# serialize
strJobs = pickle.dumps(jobs)
# instantiate curl
curl = _Curl()
curl.sslCert = _x509()
curl.sslKey = _x509()
# execute
url = baseURLSSL + '/runTaskAssignment'
data = {'jobs':strJobs}
status,output = curl.post(url,data)
if status!=0:
print output
return status,output
try:
return status,pickle.loads(output)
except:
type, value, traceBack = sys.exc_info()
errStr = "ERROR runTaskAssignment : %s %s" % (type,value)
print errStr
return EC_Failed,output+'\n'+errStr
# get job status
def getJobStatus(ids,srvID=None):
"""Get job status
args:
ids: the list of PandaIDs
returns:
status code
0: communication succeeded to the panda server
255: communication failure
the list of JobSpecs (or Nones for non-existing PandaIDs)
"""
# serialize
strIDs = pickle.dumps(ids)
# instantiate curl
curl = _Curl()
# execute
url = _getURL('URL',srvID) + '/getJobStatus'
data = {'ids':strIDs}
status,output = curl.post(url,data)
try:
return status,pickle.loads(output)
except:
type, value, traceBack = sys.exc_info()
errStr = "ERROR getJobStatus : %s %s" % (type,value)
print errStr
return EC_Failed,output+'\n'+errStr
# get PandaID with jobexeID
def getPandaIDwithJobExeID(ids):
"""Get the list of PandaIDs corresponding to a given jobExecutionIDs
args:
ids: list of jobExecutionIDs
returns:
status code
0: communication succeeded to the panda server
255: communication failure
the list of PandaIDs (or Nones for non-existing IDs)
"""
# serialize
strIDs = pickle.dumps(ids)
# instantiate curl
curl = _Curl()
# execute
url = _getURL('URL') + '/getPandaIDwithJobExeID'
data = {'ids':strIDs}
status,output = curl.post(url,data)
try:
return status,pickle.loads(output)
except:
type, value, traceBack = sys.exc_info()
errStr = "ERROR getPandaIDwithJobExeID : %s %s" % (type,value)
print errStr
return EC_Failed,output+'\n'+errStr
# get assigning task
def getAssigningTask():
"""Get the list of IDs of tasks which are being assigned by the
task brokerage
args:
returns:
status code
0: communication succeeded to the panda server
255: communication failure
the list of taskIDs
"""
# instantiate curl
curl = _Curl()
# execute
url = baseURL + '/getAssigningTask'
status,output = curl.get(url,{})
try:
return status,pickle.loads(output)
except:
print output
type, value, traceBack = sys.exc_info()
errStr = "ERROR getAssigningTask : %s %s" % (type,value)
print errStr
return EC_Failed,output+'\n'+errStr
# get assigned cloud for tasks
def seeCloudTask(ids):
"""Check to which clouds the tasks are assigned
args:
ids: the list of taskIDs
returns:
status code
0: communication succeeded to the panda server
255: communication failure
the list of clouds (or Nones if tasks are not yet assigned)
raises:
EC_Failed: if communication failure to the panda server
"""
# serialize
strIDs = pickle.dumps(ids)
# instantiate curl
curl = _Curl()
# execute
url = baseURL + '/seeCloudTask'
data = {'ids':strIDs}
status,output = curl.post(url,data)
try:
return status,pickle.loads(output)
except:
type, value, traceBack = sys.exc_info()
errStr = "ERROR seeCloudTask : %s %s" % (type,value)
print errStr
return EC_Failed,output+'\n'+errStr
# kill jobs
def killJobs(ids,code=None,verbose=False,srvID=None,useMailAsID=False,keepUnmerged=False):
"""Kill jobs. Normal users can kill only their own jobs.
People with production VOMS role can kill any jobs.
Running jobs are killed when next heartbeat comes from the pilot.
Set code=9 if running jobs need to be killed immediately.
args:
ids: the list of PandaIDs
code: specify why the jobs are killed
2: expire
3: aborted
4: expire in waiting
7: retry by server
8: rebrokerage
9: force kill
50: kill by JEDI
91: kill user jobs with prod role
verbose: set True to see what's going on
srvID: obsolete
useMailAsID: obsolete
keepUnmerged: set True not to cancel unmerged jobs when pmerge is killed.
returns:
status code
0: communication succeeded to the panda server
255: communication failure
the list of clouds (or Nones if tasks are not yet assigned)
"""
# serialize
strIDs = pickle.dumps(ids)
# instantiate curl
curl = _Curl()
curl.sslCert = _x509()
curl.sslKey = _x509()
curl.verbose = verbose
# execute
url = _getURL('URLSSL',srvID) + '/killJobs'
data = {'ids':strIDs,'code':code,'useMailAsID':useMailAsID}
killOpts = ''
if keepUnmerged:
killOpts += 'keepUnmerged,'
data['killOpts'] = killOpts[:-1]
status,output = curl.post(url,data)
try:
return status,pickle.loads(output)
except:
type, value, traceBack = sys.exc_info()
errStr = "ERROR killJobs : %s %s" % (type,value)
print errStr
return EC_Failed,output+'\n'+errStr
# reassign jobs
def reassignJobs(ids,forPending=False,firstSubmission=None):
"""Triggers reassignment of jobs. This is not effective if jobs were preassigned to sites before being submitted.
args:
ids: the list of taskIDs
forPending: set True if pending jobs are reassigned
firstSubmission: set True if first jobs are submitted for a task, or False if not
returns:
status code
0: communication succeeded to the panda server
255: communication failure
return code
True: request is processed
False: not processed
"""
# serialize
strIDs = pickle.dumps(ids)
# instantiate curl
curl = _Curl()
curl.sslCert = _x509()
curl.sslKey = _x509()
# execute
url = baseURLSSL + '/reassignJobs'
data = {'ids':strIDs}
if forPending:
data['forPending'] = True
if firstSubmission != None:
data['firstSubmission'] = firstSubmission
status,output = curl.post(url,data)
try:
return status,pickle.loads(output)
except:
type, value, traceBack = sys.exc_info()
errStr = "ERROR reassignJobs : %s %s" % (type,value)
print errStr
return EC_Failed,"stat=%s err=%s %s" % (status,output,errStr)
# query PandaIDs (obsolete)
def queryPandaIDs(ids):
# serialize
strIDs = pickle.dumps(ids)
# instantiate curl
curl = _Curl()
# execute
url = baseURL + '/queryPandaIDs'
data = {'ids':strIDs}
status,output = curl.post(url,data)
try:
return status,pickle.loads(output)
except:
type, value, traceBack = sys.exc_info()
errStr = "ERROR queryPandaIDs : %s %s" % (type,value)
print errStr
return EC_Failed,output+'\n'+errStr
# query job info per cloud (obsolete)
def queryJobInfoPerCloud(cloud,schedulerID=None):
# instantiate curl
curl = _Curl()
# execute
url = baseURL + '/queryJobInfoPerCloud'
data = {'cloud':cloud}
if schedulerID != None:
data['schedulerID'] = schedulerID
status,output = curl.post(url,data)
try:
return status,pickle.loads(output)
except:
type, value, traceBack = sys.exc_info()
errStr = "ERROR queryJobInfoPerCloud : %s %s" % (type,value)
print errStr
return EC_Failed,output+'\n'+errStr
# get job statistics
def getJobStatistics(sourcetype=None):
"""Get job statistics
args:
sourcetype: type of jobs
all: all jobs
analysis: analysis jobs
production: production jobs
returns:
status code
0: communication succeeded to the panda server
255: communication failure
map of the number jobs per job status in each site
"""
# instantiate curl
curl = _Curl()
# execute
ret = {}
for srvID in getPandas():
url = _getURL('URL',srvID) + '/getJobStatistics'
data = {}
if sourcetype != None:
data['sourcetype'] = sourcetype
status,output = curl.get(url,data)
try:
tmpRet = status,pickle.loads(output)
if status != 0:
return tmpRet
except:
print output
type, value, traceBack = sys.exc_info()
errStr = "ERROR getJobStatistics : %s %s" % (type,value)
print errStr
return EC_Failed,output+'\n'+errStr
# gather
for tmpCloud,tmpVal in tmpRet[1].iteritems():
if not ret.has_key(tmpCloud):
# append cloud values
ret[tmpCloud] = tmpVal
else:
# sum statistics
for tmpStatus,tmpCount in tmpVal.iteritems():
if ret[tmpCloud].has_key(tmpStatus):
ret[tmpCloud][tmpStatus] += tmpCount
else:
ret[tmpCloud][tmpStatus] = tmpCount
return 0,ret
# get job statistics for Bamboo
def getJobStatisticsForBamboo(useMorePG=False):
"""Get job statistics for Bamboo
args:
useMorePG: set True if fine-grained classification is required
returns:
status code
0: communication succeeded to the panda server
255: communication failure
map of the number jobs per job status in each site
"""
# instantiate curl
curl = _Curl()
# execute
ret = {}
for srvID in getPandas():
url = _getURL('URL',srvID) + '/getJobStatisticsForBamboo'
data = {}
if useMorePG != False:
data['useMorePG'] = useMorePG
status,output = curl.get(url,data)
try:
tmpRet = status,pickle.loads(output)
if status != 0:
return tmpRet
except:
print output
type, value, traceBack = sys.exc_info()
errStr = "ERROR getJobStatisticsForBamboo : %s %s" % (type,value)
print errStr
return EC_Failed,output+'\n'+errStr
# gather
for tmpCloud,tmpMap in tmpRet[1].iteritems():
if not ret.has_key(tmpCloud):
# append cloud values
ret[tmpCloud] = tmpMap
else:
# sum statistics
for tmpPType,tmpVal in tmpMap.iteritems():
if not ret[tmpCloud].has_key(tmpPType):
ret[tmpCloud][tmpPType] = tmpVal
else:
for tmpStatus,tmpCount in tmpVal.iteritems():
if ret[tmpCloud][tmpPType].has_key(tmpStatus):
ret[tmpCloud][tmpPType][tmpStatus] += tmpCount
else:
ret[tmpCloud][tmpPType][tmpStatus] = tmpCount
return 0,ret
# get highest prio jobs
def getHighestPrioJobStat(perPG=False,useMorePG=False):
"""Get the number of jobs with the highest priorities in each combination of cloud and processingType
args:
perPG: set True if grouped by processingGroup instead of processingType
useMorePG: set True if fine-grained classification is required
returns:
status code
0: communication succeeded to the panda server
255: communication failure
map of the number jobs and priorities in each combination of cloud and processingType (or processingGroup)
"""
# instantiate curl
curl = _Curl()
# execute
ret = {}
url = baseURL + '/getHighestPrioJobStat'
data = {'perPG':perPG}
if useMorePG != False:
data['useMorePG'] = useMorePG
status,output = curl.get(url,data)
try:
return status,pickle.loads(output)
except:
print output
type, value, traceBack = sys.exc_info()
errStr = "ERROR getHighestPrioJobStat : %s %s" % (type,value)
print errStr
return EC_Failed,output+'\n'+errStr
# get jobs updated recently
def getJobsToBeUpdated(limit=5000,lockedby='',srvID=None):
"""Get the list of jobs which have been recently updated.
args:
limit: the maximum number of jobs
lockedby: name of the machinery which submitted jobs
srvID: obsolete
returns:
status code
0: communication succeeded to the panda server
255: communication failure
the lit of PandaIDs
"""
# instantiate curl
curl = _Curl()
# execute
url = _getURL('URL',srvID) + '/getJobsToBeUpdated'
status,output = curl.get(url,{'limit':limit,'lockedby':lockedby})
try:
return status,pickle.loads(output)
except:
print output
type, value, traceBack = sys.exc_info()
errStr = "ERROR getJobsToBeUpdated : %s %s" % (type,value)
print errStr
return EC_Failed,output+'\n'+errStr
# update prodDBUpdateTimes
def updateProdDBUpdateTimes(params,verbose=False,srvID=None):
"""Update timestamp of jobs when update info is propagated to another database
args:
params: map of PandaID and jobStatus and timestamp
verbose: set True to see what's going on
srvID: obsolete
returns:
status code
0: communication succeeded to the panda server
255: communication failure
return code
True: request is processed
False: not processed
"""
# serialize
strPar = pickle.dumps(params)
# instantiate curl
curl = _Curl()
curl.sslCert = _x509()
curl.sslKey = _x509()
curl.verbose = verbose
# execute
url = _getURL('URLSSL',srvID) + '/updateProdDBUpdateTimes'
data = {'params':strPar}
status,output = curl.post(url,data)
try:
return status,pickle.loads(output)
except:
type, value, traceBack = sys.exc_info()
errStr = "ERROR updateProdDBUpdateTimes : %s %s" % (type,value)
print errStr
return EC_Failed,output+'\n'+errStr
# get PandaID at site
def getPandaIDsSite(site,status,limit=500):
"""Get the list of jobs in a job status at at a site
args:
site: site name
status: job status
limit: maximum number of jobs
returns:
status code
0: communication succeeded to the panda server
255: communication failure
the list of PandaIDs
"""
# instantiate curl
curl = _Curl()
# execute
url = baseURL + '/getPandaIDsSite'
status,output = curl.get(url,{'site':site,'status':status,'limit':limit})
try:
return status,pickle.loads(output)
except:
print output
type, value, traceBack = sys.exc_info()
errStr = "ERROR getPandaIDsSite : %s %s" % (type,value)
print errStr
return EC_Failed,output+'\n'+errStr
# get job statistics per site
def getJobStatisticsPerSite(predefined=False,workingGroup='',countryGroup='',jobType='',minPriority=None,
readArchived=None):
"""Get job statistics with job attributes
args:
predefined: get jobs which are assiggned to sites before being submitted
workingGroup: commna-separated list of workingGroups
countryGroup: commna-separated list of countryGroups
jobType: type of jobs
all: all jobs
analysis: analysis jobs
production: production jobs
minPriority: get jobs with higher priorities than this value
readArchived: get jobs with finished/failed/cancelled state in addition
returns:
status code
0: communication succeeded to the panda server
255: communication failure
map of the number jobs per job status in each site
"""
# instantiate curl
curl = _Curl()
# execute
ret = {}
for srvID in getPandas():
url = _getURL('URL',srvID) + '/getJobStatisticsPerSite'
data = {'predefined':predefined}
if not workingGroup in ['',None]:
data['workingGroup'] = workingGroup
if not countryGroup in ['',None]:
data['countryGroup'] = countryGroup
if not jobType in ['',None]:
data['jobType'] = jobType
if not minPriority in ['',None]:
data['minPriority'] = minPriority
if not readArchived in ['',None]:
data['readArchived'] = readArchived
status,output = curl.get(url,data)
try:
tmpRet = status,pickle.loads(output)
if status != 0:
return tmpRet
except:
print output
type, value, traceBack = sys.exc_info()
errStr = "ERROR getJobStatisticsPerSite : %s %s" % (type,value)
print errStr
return EC_Failed,output+'\n'+errStr
# gather
for tmpSite,tmpVal in tmpRet[1].iteritems():
if not ret.has_key(tmpSite):
# append site values
ret[tmpSite] = tmpVal
else:
# sum statistics
for tmpStatus,tmpCount in tmpVal.iteritems():
if ret[tmpSite].has_key(tmpStatus):
ret[tmpSite][tmpStatus] += tmpCount
else:
ret[tmpSite][tmpStatus] = tmpCount
return 0,ret
# get job statistics per site with label
def getJobStatisticsWithLabel(site=''):
"""Get job statistics per prodSourceLabel
args:
site: commna-separated list of sites. An empty string for all sites.
returns:
status code
0: communication succeeded to the panda server
255: communication failure
map of the number jobs per job status and prodSourceLabel in each site
"""
# instantiate curl
curl = _Curl()
# execute
url = baseURL + '/getJobStatisticsWithLabel'
data = {}
if not site in ['',None]:
data['site'] = site
status,output = curl.get(url,data)
try:
return status,pickle.loads(output)
except:
print output
type, value, traceBack = sys.exc_info()
errStr = "ERROR getJobStatisticsWithLabel : %s %s" % (type,value)
print errStr
return EC_Failed,output+'\n'+errStr
# get the number of waiting jobs per site and user (obsolete)
def getJobStatisticsPerUserSite():
# instantiate curl
curl = _Curl()
# execute
url = baseURL + '/getJobStatisticsPerUserSite'
data = {}
status,output = curl.get(url,data)
try:
return status,pickle.loads(output)
except:
print output
type, value, traceBack = sys.exc_info()
errStr = "ERROR getJobStatisticsPerUserSite : %s %s" % (type,value)
print errStr
return EC_Failed,output+'\n'+errStr
# query last files in datasets
def queryLastFilesInDataset(datasets):
"""Get names of files which have the largest serial number in each dataset
args:
datasets: the list of dataset names
returns:
status code
0: communication succeeded to the panda server
255: communication failure
map of the dataset name and the file name
"""
# serialize
strDSs = pickle.dumps(datasets)
# instantiate curl
curl = _Curl()
# execute
url = baseURL + '/queryLastFilesInDataset'
data = {'datasets':strDSs}
status,output = curl.post(url,data)
try:
return status,pickle.loads(output)
except:
type, value, traceBack = sys.exc_info()
print "ERROR queryLastFilesInDataset : %s %s" % (type,value)
return EC_Failed,None
# insert sandbox file info
def insertSandboxFileInfo(userName,fileName,fileSize,checkSum,verbose=False):
"""Insert infomation of input sandbox
args:
userName: the name of the user
fileName: the file name
fileSize: the file size
fileSize: md5sum of the file
verbose: set True to see what's going on
returns:
status code
0: communication succeeded to the panda server
else: communication failure
"""
# instantiate curl
curl = _Curl()
curl.sslCert = _x509()
curl.sslKey = _x509()
curl.verbose = verbose
# execute
url = baseURLSSL + '/insertSandboxFileInfo'
data = {'userName':userName,'fileName':fileName,'fileSize':fileSize,'checkSum':checkSum}
return curl.post(url,data)
# upload input sandbox file
def putFile(file):
"""Upload input sandbox
args:
file: the file name
returns:
status code
0: communication succeeded to the panda server
else: communication failure
"""
# instantiate curl
curl = _Curl()
curl.sslCert = _x509()
curl.sslKey = _x509()
# execute
url = baseURLSSL + '/putFile'
data = {'file':file}
return curl.put(url,data)
# delete file (obsolete)
def deleteFile(file):
# instantiate curl
curl = _Curl()
curl.sslCert = _x509()
curl.sslKey = _x509()
# execute
url = baseURLSSL + '/deleteFile'
data = {'file':file}
return curl.post(url,data)
# touch file (obsolete)
def touchFile(sourceURL,filename):
# instantiate curl
curl = _Curl()
curl.sslCert = _x509()
curl.sslKey = _x509()
# execute
url = sourceURL + '/server/panda/touchFile'
data = {'filename':filename}
return curl.post(url,data)
# get site specs
def getSiteSpecs(siteType=None):
"""Get list of site specifications
args:
siteType: type of sites
None: all sites
analysis: analysis sites
production: production sites
returns:
status code
0: communication succeeded to the panda server
255: communication failure
map of site and attributes
"""
# instantiate curl
curl = _Curl()
# execute
url = baseURL + '/getSiteSpecs'
data = {}
if siteType != None:
data = {'siteType':siteType}
status,output = curl.get(url,data)
try:
return status,pickle.loads(output)
except:
type, value, traceBack = sys.exc_info()
errStr = "ERROR getSiteSpecs : %s %s" % (type,value)
print errStr
return EC_Failed,output+'\n'+errStr
# get cloud specs
def getCloudSpecs():
"""Get list of cloud specifications
args:
returns:
status code
0: communication succeeded to the panda server
255: communication failure
map of cloud and attributes
"""
# instantiate curl
curl = _Curl()
# execute
url = baseURL + '/getCloudSpecs'
status,output = curl.get(url,{})
try:
return status,pickle.loads(output)
except:
type, value, traceBack = sys.exc_info()
errStr = "ERROR getCloudSpecs : %s %s" % (type,value)
print errStr
return EC_Failed,output+'\n'+errStr
# get nPilots (obsolete)
def getNumPilots():
# instantiate curl
curl = _Curl()
# execute
url = baseURL + '/getNumPilots'
status,output = curl.get(url,{})
try:
return status,pickle.loads(output)
except:
type, value, traceBack = sys.exc_info()
errStr = "ERROR getNumPilots : %s %s" % (type,value)
print errStr
return EC_Failed,output+'\n'+errStr
# get a list of DN/myproxy pass phrase/queued job count at a site
def getNUserJobs(siteName):
"""Get a list of DN/myproxy pass phrase/queued job count at a site. production or pilot role is required
args:
siteName: the site name
returns:
status code
0: communication succeeded to the panda server
else: communication failure
a dictionary of DN, myproxy pass phrase, queued job count, hostname of myproxy server
"""
# instantiate curl
curl = _Curl()
curl.sslCert = _x509()
curl.sslKey = _x509()
# execute
url = baseURLSSL + '/getNUserJobs'
data = {'siteName':siteName}
status,output = curl.get(url,data)
try:
return status,pickle.loads(output)
except:
type, value, traceBack = sys.exc_info()
errStr = "ERROR getNUserJobs : %s %s" % (type,value)
print errStr
return EC_Failed,output+'\n'+errStr
# run brokerage
def runBrokerage(sites,atlasRelease,cmtConfig=None):
"""Run brokerage
args:
sites: the list of candidate sites
atlasRelease: version number of SW release
cmtConfig: cmt config
returns:
status code
0: communication succeeded to the panda server
else: communication failure
the name of the selected site
"""
# serialize
strSites = pickle.dumps(sites)
# instantiate curl
curl = _Curl()
# execute
url = baseURL + '/runBrokerage'
data = {'sites':strSites,
'atlasRelease':atlasRelease}
if cmtConfig != None:
data['cmtConfig'] = cmtConfig
return curl.get(url,data)
# get RW
def getRW(priority=0):
"""Get the amount of workload queued in each cloud
args:
priority: workload with higher priorities than this value
returns:
status code
0: communication succeeded to the panda server
255: communication failure
map of cloud and the amount of workload
"""
# instantiate curl
curl = _Curl()
# execute
url = baseURLBAMBOO + '/getRW'
# get RWs for high priority tasks
data = {'priority':priority}
status,output = curl.get(url,data)
try:
return status,pickle.loads(output)
except:
type, value, traceBack = sys.exc_info()
errStr = "ERROR getRW : %s %s" % (type,value)
print errStr
return EC_Failed,output+'\n'+errStr
# change job priorities (obsolete)
def changeJobPriorities(newPrioMap):
# serialize
newPrioMapStr = pickle.dumps(newPrioMap)
# instantiate curl
curl = _Curl()
curl.sslCert = _x509()
curl.sslKey = _x509()
# execute
url = baseURLSSL + '/changeJobPriorities'
data = {'newPrioMap':newPrioMapStr}
status,output = curl.post(url,data)
try:
return status,pickle.loads(output)
except:
errtype,errvalue = sys.exc_info()[:2]
errStr = "ERROR changeJobPriorities : %s %s" % (errtype,errvalue)
return EC_Failed,output+'\n'+errStr
# insert task params
def insertTaskParams(taskParams):
"""Insert task parameters
args:
taskParams: a dictionary of task parameters
returns:
status code
0: communication succeeded to the panda server
255: communication failure
tuple of return code and JediTaskID
True: request is processed
False: not processed
"""
# serialize
taskParamsStr = json.dumps(taskParams)
# instantiate curl
curl = _Curl()
curl.sslCert = _x509()
curl.sslKey = _x509()
# execute
url = baseURLSSL + '/insertTaskParams'
data = {'taskParams':taskParamsStr}
status,output = curl.post(url,data)
try:
return status,pickle.loads(output)
except:
errtype,errvalue = sys.exc_info()[:2]
errStr = "ERROR insertTaskParams : %s %s" % (errtype,errvalue)
return EC_Failed,output+'\n'+errStr
# kill task
def killTask(jediTaskID):
"""Kill a task
args:
jediTaskID: jediTaskID of the task to be killed
returns:
status code
0: communication succeeded to the panda server
255: communication failure
tuple of return code and diagnostic message
0: request is registered
1: server error
2: task not found
3: permission denied
4: irrelevant task status
100: non SSL connection
101: irrelevant taskID
"""
# instantiate curl
curl = _Curl()
curl.sslCert = _x509()
curl.sslKey = _x509()
# execute
url = baseURLSSL + '/killTask'
data = {'jediTaskID':jediTaskID}
data['properErrorCode'] = True
status,output = curl.post(url,data)
try:
return status,pickle.loads(output)
except:
errtype,errvalue = sys.exc_info()[:2]
errStr = "ERROR killTask : %s %s" % (errtype,errvalue)
return EC_Failed,output+'\n'+errStr
# finish task
def finishTask(jediTaskID,soft=False):
"""Finish a task
args:
jediTaskID: jediTaskID of the task to be finished
soft: If True, new jobs are not generated and the task is
finihsed once all remaining jobs are done.
If False, all remaining jobs are killed and then the
task is finished
returns:
status code
0: communication succeeded to the panda server
255: communication failure
tuple of return code and diagnostic message
0: request is registered
1: server error
2: task not found
3: permission denied
4: irrelevant task status
100: non SSL connection
101: irrelevant taskID
"""
# instantiate curl
curl = _Curl()
curl.sslCert = _x509()
curl.sslKey = _x509()
# execute
url = baseURLSSL + '/finishTask'
data = {'jediTaskID':jediTaskID}
data['properErrorCode'] = True
if soft:
data['soft'] = True
status,output = curl.post(url,data)
try:
return status,pickle.loads(output)
except:
errtype,errvalue = sys.exc_info()[:2]
errStr = "ERROR finishTask : %s %s" % (errtype,errvalue)
return EC_Failed,output+'\n'+errStr
# reassign task to a site
def reassignTaskToSite(jediTaskID,site,mode=None):
"""Reassign a task to a site. Existing jobs are killed and new jobs are generated at the site
args:
jediTaskID: jediTaskID of the task to be reassigned
site: the site name where the task is reassigned
mode: If soft, only defined/waiting/assigned/activated jobs are killed. If nokill, no jobs are killed. All jobs are killed by default.
returns:
status code
0: communication succeeded to the panda server
255: communication failure
tuple of return code and diagnostic message
0: request is registered
1: server error
2: task not found
3: permission denied
4: irrelevant task status
100: non SSL connection
101: irrelevant taskID
"""
maxSite = 60
if site != None and len(site) > maxSite:
return EC_Failed,'site parameter is too long > {0}chars'.format(maxSite)
# instantiate curl
curl = _Curl()
curl.sslCert = _x509()
curl.sslKey = _x509()
# execute
url = baseURLSSL + '/reassignTask'
data = {'jediTaskID':jediTaskID,'site':site}
if mode != None:
data['mode'] = mode
status,output = curl.post(url,data)
try:
return status,pickle.loads(output)
except:
errtype,errvalue = sys.exc_info()[:2]
errStr = "ERROR reassignTaskToSite : %s %s" % (errtype,errvalue)
return EC_Failed,output+'\n'+errStr
# reassign task to a cloud
def reassignTaskToCloud(jediTaskID,cloud,mode=None):
"""Reassign a task to a cloud. Existing jobs are killed and new jobs are generated in the cloud
args:
jediTaskID: jediTaskID of the task to be reassigned
cloud: the cloud name where the task is reassigned
mode: If soft, only defined/waiting/assigned/activated jobs are killed. If nokill, no jobs are killed. All jobs are killed by default.
returns:
status code
0: communication succeeded to the panda server
255: communication failure
tuple of return code and diagnostic message
0: request is registered
1: server error
2: task not found
3: permission denied
4: irrelevant task status
100: non SSL connection
101: irrelevant taskID
"""
# instantiate curl
curl = _Curl()
curl.sslCert = _x509()
curl.sslKey = _x509()
# execute
url = baseURLSSL + '/reassignTask'
data = {'jediTaskID':jediTaskID,'cloud':cloud}
if mode != None:
data['mode'] = mode
status,output = curl.post(url,data)
try:
return status,pickle.loads(output)
except:
errtype,errvalue = sys.exc_info()[:2]
errStr = "ERROR reassignTaskToCloud : %s %s" % (errtype,errvalue)
return EC_Failed,output+'\n'+errStr
# reassign task to a nucleus
def reassignTaskToNucleus(jediTaskID,nucleus,mode=None):
"""Reassign a task to a nucleus. Existing jobs are killed and new jobs are generated in the cloud
args:
jediTaskID: jediTaskID of the task to be reassigned
nucleus: the nucleus name where the task is reassigned
mode: If soft, only defined/waiting/assigned/activated jobs are killed. If nokill, no jobs are killed. All jobs are killed by default.
returns:
status code
0: communication succeeded to the panda server
255: communication failure
tuple of return code and diagnostic message
0: request is registered
1: server error
2: task not found
3: permission denied
4: irrelevant task status
100: non SSL connection
101: irrelevant taskID
"""
# instantiate curl
curl = _Curl()
curl.sslCert = _x509()
curl.sslKey = _x509()
# execute
url = baseURLSSL + '/reassignTask'
data = {'jediTaskID':jediTaskID,'nucleus':nucleus}
if mode != None:
data['mode'] = mode
status,output = curl.post(url,data)
try:
return status,pickle.loads(output)
except:
errtype,errvalue = sys.exc_info()[:2]
errStr = "ERROR reassignTaskToCloud : %s %s" % (errtype,errvalue)
return EC_Failed,output+'\n'+errStr
# upload log
def uploadLog(logStr,logFileName):
"""Upload sandbox
args:
logStr: log message
logFileName: name of log file
returns:
status code
0: communication succeeded to the panda server
else: communication failure
"""
# instantiate curl
curl = _Curl()
curl.sslCert = _x509()
curl.sslKey = _x509()
# write log to a tmp file
fh = tempfile.NamedTemporaryFile(delete=False)
gfh = gzip.open(fh.name,mode='wb')
gfh.write(logStr)
gfh.close()
# execute
url = baseURLSSL + '/uploadLog'
data = {'file':'{0};filename={1}'.format(fh.name,logFileName)}
retVal = curl.put(url,data)
os.unlink(fh.name)
return retVal
# change task priority
def changeTaskPriority(jediTaskID,newPriority):
"""Change task priority
args:
jediTaskID: jediTaskID of the task to change the priority
newPriority: new task priority
returns:
status code
0: communication succeeded to the panda server
255: communication failure
return code
0: unknown task
1: succeeded
None: database error
"""
# instantiate curl
curl = _Curl()
curl.sslCert = _x509()
curl.sslKey = _x509()
# execute
url = baseURLSSL + '/changeTaskPriority'
data = {'jediTaskID':jediTaskID,
'newPriority':newPriority}
status,output = curl.post(url,data)
try:
return status,pickle.loads(output)
except:
errtype,errvalue = sys.exc_info()[:2]
errStr = "ERROR changeTaskPriority : %s %s" % (errtype,errvalue)
return EC_Failed,output+'\n'+errStr
# set debug mode
def setDebugMode(pandaID,modeOn):
"""Turn debug mode on/off for a job
args:
pandaID: PandaID of the job
modeOn: True to turn it on. Oppositely, False
returns:
status code
0: communication succeeded to the panda server
another: communication failure
error message
"""
# instantiate curl
curl = _Curl()
curl.sslCert = _x509()
curl.sslKey = _x509()
# execute
url = baseURLSSL + '/setDebugMode'
data = {'pandaID':pandaID,
'modeOn':modeOn}
return curl.post(url,data)
# retry task
def retryTask(jediTaskID,verbose=False):
"""Retry task
args:
jediTaskID: jediTaskID of the task to retry
returns:
status code
0: communication succeeded to the panda server
255: communication failure
tuple of return code and diagnostic message
0: request is registered
1: server error
2: task not found
3: permission denied
4: irrelevant task status
100: non SSL connection
101: irrelevant taskID
"""
# instantiate curl
curl = _Curl()
curl.sslCert = _x509()
curl.sslKey = _x509()
curl.verbose = verbose
# execute
url = baseURLSSL + '/retryTask'
data = {'jediTaskID':jediTaskID}
data['properErrorCode'] = True
status,output = curl.post(url,data)
try:
return status,pickle.loads(output)
except:
errtype,errvalue = sys.exc_info()[:2]
errStr = "ERROR retryTask : %s %s" % (errtype,errvalue)
return EC_Failed,output+'\n'+errStr
# change task walltime
def changeTaskWalltime(jediTaskID,wallTime):
"""Change task priority
args:
jediTaskID: jediTaskID of the task to change the priority
wallTime: new walltime for the task
returns:
status code
0: communication succeeded to the panda server
255: communication failure
return code
0: unknown task
1: succeeded
None: database error
"""
# instantiate curl
curl = _Curl()
curl.sslCert = _x509()
curl.sslKey = _x509()
# execute
url = baseURLSSL + '/changeTaskAttributePanda'
data = {'jediTaskID':jediTaskID,
'attrName':'wallTime',
'attrValue':wallTime}
status,output = curl.post(url,data)
try:
return status,pickle.loads(output)
except:
errtype,errvalue = sys.exc_info()[:2]
errStr = "ERROR changeTaskWalltime : %s %s" % (errtype,errvalue)
return EC_Failed,output+'\n'+errStr
# change task cputime
def changeTaskCputime(jediTaskID,cpuTime):
"""Change task cpuTime
args:
jediTaskID: jediTaskID of the task to change the priority
cpuTime: new cputime for the task
returns:
status code
0: communication succeeded to the panda server
255: communication failure
return code
0: unknown task
1: succeeded
None: database error
"""
# instantiate curl
curl = _Curl()
curl.sslCert = _x509()
curl.sslKey = _x509()
# execute
url = baseURLSSL + '/changeTaskAttributePanda'
data = {'jediTaskID':jediTaskID,
'attrName':'cpuTime',
'attrValue':cpuTime}
status,output = curl.post(url,data)
try:
return status,pickle.loads(output)
except:
errtype,errvalue = sys.exc_info()[:2]
errStr = "ERROR changeTaskCputime : %s %s" % (errtype,errvalue)
return EC_Failed,output+'\n'+errStr
# change task RAM count
def changeTaskRamCount(jediTaskID,ramCount):
"""Change task priority
args:
jediTaskID: jediTaskID of the task to change the priority
ramCount: new ramCount for the task
returns:
status code
0: communication succeeded to the panda server
255: communication failure
return code
0: unknown task
1: succeeded
None: database error
"""
# instantiate curl
curl = _Curl()
curl.sslCert = _x509()
curl.sslKey = _x509()
# execute
url = baseURLSSL + '/changeTaskAttributePanda'
data = {'jediTaskID':jediTaskID,
'attrName':'ramCount',
'attrValue':ramCount}
status,output = curl.post(url,data)
try:
return status,pickle.loads(output)
except:
errtype,errvalue = sys.exc_info()[:2]
errStr = "ERROR changeTaskRamCount : %s %s" % (errtype,errvalue)
return EC_Failed,output+'\n'+errStr
# change task attribute
def changeTaskAttribute(jediTaskID,attrName,attrValue):
"""Change task attribute
args:
jediTaskID: jediTaskID of the task to change the attribute
attrName: attribute name
attrValue: new value for the attribute
returns:
status code
0: communication succeeded to the panda server
255: communication failure
return: a tupple of return code and message
0: unknown task
1: succeeded
2: disallowed to update the attribute
None: database error
"""
# instantiate curl
curl = _Curl()
curl.sslCert = _x509()
curl.sslKey = _x509()
# execute
url = baseURLSSL + '/changeTaskAttributePanda'
data = {'jediTaskID':jediTaskID,
'attrName':attrName,
'attrValue':attrValue}
status,output = curl.post(url,data)
try:
return status,pickle.loads(output)
except:
errtype,errvalue = sys.exc_info()[:2]
errStr = "ERROR changeTaskAttributePanda : %s %s" % (errtype,errvalue)
return EC_Failed,output+'\n'+errStr
# change split rule for task
def changeTaskSplitRule(jediTaskID,ruleName,ruleValue):
"""Change split rule fo task
args:
jediTaskID: jediTaskID of the task to change the rule
ruleName: rule name
ruleValue: new value for the rule
returns:
status code
0: communication succeeded to the panda server
255: communication failure
return: a tupple of return code and message
0: unknown task
1: succeeded
2: disallowed to update the attribute
None: database error
"""
# instantiate curl
curl = _Curl()
curl.sslCert = _x509()
curl.sslKey = _x509()
# execute
url = baseURLSSL + '/changeTaskSplitRulePanda'
data = {'jediTaskID':jediTaskID,
'attrName':ruleName,
'attrValue':ruleValue}
status,output = curl.post(url,data)
try:
return status,pickle.loads(output)
except:
errtype,errvalue = sys.exc_info()[:2]
errStr = "ERROR changeTaskSplitRule : %s %s" % (errtype,errvalue)
return EC_Failed,output+'\n'+errStr
# pause task
def pauseTask(jediTaskID,verbose=False):
"""Pause task
args:
jediTaskID: jediTaskID of the task to pause
returns:
status code
0: communication succeeded to the panda server
255: communication failure
tuple of return code and diagnostic message
0: request is registered
1: server error
2: task not found
3: permission denied
4: irrelevant task status
100: non SSL connection
101: irrelevant taskID
"""
# instantiate curl
curl = _Curl()
curl.sslCert = _x509()
curl.sslKey = _x509()
curl.verbose = verbose
# execute
url = baseURLSSL + '/pauseTask'
data = {'jediTaskID':jediTaskID}
status,output = curl.post(url,data)
try:
return status,pickle.loads(output)
except:
errtype,errvalue = sys.exc_info()[:2]
errStr = "ERROR pauseTask : %s %s" % (errtype,errvalue)
return EC_Failed,output+'\n'+errStr
# resume task
def resumeTask(jediTaskID,verbose=False):
"""Resume task
args:
jediTaskID: jediTaskID of the task to release
returns:
status code
0: communication succeeded to the panda server
255: communication failure
tuple of return code and diagnostic message
0: request is registered
1: server error
2: task not found
3: permission denied
4: irrelevant task status
100: non SSL connection
101: irrelevant taskID
"""
# instantiate curl
curl = _Curl()
curl.sslCert = _x509()
curl.sslKey = _x509()
curl.verbose = verbose
# execute
url = baseURLSSL + '/resumeTask'
data = {'jediTaskID':jediTaskID}
status,output = curl.post(url,data)
try:
return status,pickle.loads(output)
except:
errtype,errvalue = sys.exc_info()[:2]
errStr = "ERROR resumeTask : %s %s" % (errtype,errvalue)
return EC_Failed,output+'\n'+errStr
# avalanche task
def avalancheTask(jediTaskID,verbose=False):
"""force avalanche for task
args:
jediTaskID: jediTaskID of the task to avalanche
returns:
status code
0: communication succeeded to the panda server
255: communication failure
tuple of return code and diagnostic message
0: request is registered
1: server error
2: task not found
3: permission denied
4: irrelevant task status
100: non SSL connection
101: irrelevant taskID
"""
# instantiate curl
curl = _Curl()
curl.sslCert = _x509()
curl.sslKey = _x509()
curl.verbose = verbose
# execute
url = baseURLSSL + '/avalancheTask'
data = {'jediTaskID':jediTaskID}
status,output = curl.post(url,data)
try:
return status,pickle.loads(output)
except:
errtype,errvalue = sys.exc_info()[:2]
errStr = "ERROR resumeTask : %s %s" % (errtype,errvalue)
return EC_Failed,output+'\n'+errStr
# increase attempt number for unprocessed files
def increaseAttemptNr(jediTaskID,increase):
"""Change task priority
args:
jediTaskID: jediTaskID of the task to increase attempt numbers
increase: increase for attempt numbers
returns:
status code
0: communication succeeded to the panda server
255: communication failure
return code
0: succeeded
1: unknown task
2: invalid task status
3: permission denied
4: wrong parameter
None: database error
"""
# instantiate curl
curl = _Curl()
curl.sslCert = _x509()
curl.sslKey = _x509()
# execute
url = baseURLSSL + '/increaseAttemptNrPanda'
data = {'jediTaskID':jediTaskID,
'increasedNr':increase}
status,output = curl.post(url,data)
try:
return status,pickle.loads(output)
except:
errtype,errvalue = sys.exc_info()[:2]
errStr = "ERROR increaseAttemptNr : %s %s" % (errtype,errvalue)
return EC_Failed,output+'\n'+errStr
# kill unfinished jobs
def killUnfinishedJobs(jediTaskID,code=None,verbose=False,srvID=None,useMailAsID=False):
"""Kill unfinished jobs in a task. Normal users can kill only their own jobs.
People with production VOMS role can kill any jobs.
Running jobs are killed when next heartbeat comes from the pilot.
Set code=9 if running jobs need to be killed immediately.
args:
jediTaskID: the taskID of the task
code: specify why the jobs are killed
2: expire
3: aborted
4: expire in waiting
7: retry by server
8: rebrokerage
9: force kill
50: kill by JEDI
91: kill user jobs with prod role
verbose: set True to see what's going on
srvID: obsolete
useMailAsID: obsolete
returns:
status code
0: communication succeeded to the panda server
255: communication failure
the list of clouds (or Nones if tasks are not yet assigned)
"""
# instantiate curl
curl = _Curl()
curl.sslCert = _x509()
curl.sslKey = _x509()
curl.verbose = verbose
# execute
url = _getURL('URLSSL',srvID) + '/killUnfinishedJobs'
data = {'jediTaskID':jediTaskID,'code':code,'useMailAsID':useMailAsID}
status,output = curl.post(url,data)
try:
return status,pickle.loads(output)
except:
type, value, traceBack = sys.exc_info()
errStr = "ERROR killUnfinishedJobs : %s %s" % (type,value)
print errStr
return EC_Failed,output+'\n'+errStr
# trigger task brokerage
def triggerTaskBrokerage(jediTaskID):
"""Trigger task brokerge
args:
jediTaskID: jediTaskID of the task to change the attribute
returns:
status code
0: communication succeeded to the panda server
255: communication failure
return: a tupple of return code and message
0: unknown task
1: succeeded
None: database error
"""
# instantiate curl
curl = _Curl()
curl.sslCert = _x509()
curl.sslKey = _x509()
# execute
url = baseURLSSL + '/changeTaskModTimePanda'
data = {'jediTaskID':jediTaskID,
'diffValue':-12}
status,output = curl.post(url,data)
try:
return status,pickle.loads(output)
except:
errtype,errvalue = sys.exc_info()[:2]
errStr = "ERROR triggerTaskBrokerage : %s %s" % (errtype,errvalue)
return EC_Failed,output+'\n'+errStr
# get PanDA IDs with TaskID
def getPandaIDsWithTaskID(jediTaskID):
"""Get PanDA IDs with TaskID
args:
jediTaskID: jediTaskID of the task to get lit of PanDA IDs
returns:
status code
0: communication succeeded to the panda server
255: communication failure
the list of PanDA IDs
"""
# instantiate curl
curl = _Curl()
# execute
url = baseURL + '/getPandaIDsWithTaskID'
data = {'jediTaskID':jediTaskID}
status,output = curl.post(url,data)
try:
return status,pickle.loads(output)
except:
type, value, traceBack = sys.exc_info()
errStr = "ERROR getPandaIDsWithTaskID : %s %s" % (type,value)
print errStr
return EC_Failed,output+'\n'+errStr
# reactivate task
def reactivateTask(jediTaskID):
"""Reactivate task
args:
jediTaskID: jediTaskID of the task to be reactivated
returns:
status code
0: communication succeeded to the panda server
255: communication failure
return: a tupple of return code and message
0: unknown task
1: succeeded
None: database error
"""
# instantiate curl
curl = _Curl()
curl.sslCert = _x509()
curl.sslKey = _x509()
# execute
url = baseURLSSL + '/reactivateTask'
data = {'jediTaskID':jediTaskID}
status,output = curl.post(url,data)
try:
return status,pickle.loads(output)
except:
errtype,errvalue = sys.exc_info()[:2]
errStr = "ERROR reactivateTask : %s %s" % (errtype,errvalue)
return EC_Failed,output+'\n'+errStr
# get task status TaskID
def getTaskStatus(jediTaskID):
"""Get task status
args:
jediTaskID: jediTaskID of the task to get lit of PanDA IDs
returns:
status code
0: communication succeeded to the panda server
255: communication failure
the status string
"""
# instantiate curl
curl = _Curl()
# execute
url = baseURL + '/getTaskStatus'
data = {'jediTaskID':jediTaskID}
status,output = curl.post(url,data)
try:
return status,pickle.loads(output)
except:
type, value, traceBack = sys.exc_info()
errStr = "ERROR getTaskStatus : %s %s" % (type,value)
print errStr
return EC_Failed,output+'\n'+errStr | apache-2.0 |
egoldchain/egoldchain-master | contrib/seeds/generate-seeds.py | 10 | 4342 | #!/usr/bin/env python3
# Copyright (c) 2014-2017 Wladimir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Script to generate list of seed nodes for chainparams.cpp.
This script expects two text files in the directory that is passed as an
argument:
nodes_main.txt
nodes_test.txt
These files must consist of lines in the format
<ip>
<ip>:<port>
[<ipv6>]
[<ipv6>]:<port>
<onion>.onion
0xDDBBCCAA (IPv4 little-endian old pnSeeds format)
The output will be two data structures with the peers in binary format:
static SeedSpec6 pnSeed6_main[]={
...
}
static SeedSpec6 pnSeed6_test[]={
...
}
These should be pasted into `src/chainparamsseeds.h`.
'''
from base64 import b32decode
from binascii import a2b_hex
import sys, os
import re
# ipv4 in ipv6 prefix
pchIPv4 = bytearray([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff])
# tor-specific ipv6 prefix
pchOnionCat = bytearray([0xFD,0x87,0xD8,0x7E,0xEB,0x43])
def name_to_ipv6(addr):
if len(addr)>6 and addr.endswith('.onion'):
vchAddr = b32decode(addr[0:-6], True)
if len(vchAddr) != 16-len(pchOnionCat):
raise ValueError('Invalid onion %s' % s)
return pchOnionCat + vchAddr
elif '.' in addr: # IPv4
return pchIPv4 + bytearray((int(x) for x in addr.split('.')))
elif ':' in addr: # IPv6
sub = [[], []] # prefix, suffix
x = 0
addr = addr.split(':')
for i,comp in enumerate(addr):
if comp == '':
if i == 0 or i == (len(addr)-1): # skip empty component at beginning or end
continue
x += 1 # :: skips to suffix
assert(x < 2)
else: # two bytes per component
val = int(comp, 16)
sub[x].append(val >> 8)
sub[x].append(val & 0xff)
nullbytes = 16 - len(sub[0]) - len(sub[1])
assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0))
return bytearray(sub[0] + ([0] * nullbytes) + sub[1])
elif addr.startswith('0x'): # IPv4-in-little-endian
return pchIPv4 + bytearray(reversed(a2b_hex(addr[2:])))
else:
raise ValueError('Could not parse address %s' % addr)
def parse_spec(s, defaultport):
match = re.match('\[([0-9a-fA-F:]+)\](?::([0-9]+))?$', s)
if match: # ipv6
host = match.group(1)
port = match.group(2)
elif s.count(':') > 1: # ipv6, no port
host = s
port = ''
else:
(host,_,port) = s.partition(':')
if not port:
port = defaultport
else:
port = int(port)
host = name_to_ipv6(host)
return (host,port)
def process_nodes(g, f, structname, defaultport):
g.write('static SeedSpec6 %s[] = {\n' % structname)
first = True
for line in f:
comment = line.find('#')
if comment != -1:
line = line[0:comment]
line = line.strip()
if not line:
continue
if not first:
g.write(',\n')
first = False
(host,port) = parse_spec(line, defaultport)
hoststr = ','.join(('0x%02x' % b) for b in host)
g.write(' {{%s}, %i}' % (hoststr, port))
g.write('\n};\n')
def main():
if len(sys.argv)<2:
print(('Usage: %s <path_to_nodes_txt>' % sys.argv[0]), file=sys.stderr)
exit(1)
g = sys.stdout
indir = sys.argv[1]
g.write('#ifndef BITCOIN_CHAINPARAMSSEEDS_H\n')
g.write('#define BITCOIN_CHAINPARAMSSEEDS_H\n')
g.write('/**\n')
g.write(' * List of fixed seed nodes for the litecoin network\n')
g.write(' * AUTOGENERATED by contrib/seeds/generate-seeds.py\n')
g.write(' *\n')
g.write(' * Each line contains a 16-byte IPv6 address and a port.\n')
g.write(' * IPv4 as well as onion addresses are wrapped inside a IPv6 address accordingly.\n')
g.write(' */\n')
with open(os.path.join(indir,'nodes_main.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_main', 9333)
g.write('\n')
with open(os.path.join(indir,'nodes_test.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_test', 19335)
g.write('#endif // BITCOIN_CHAINPARAMSSEEDS_H\n')
if __name__ == '__main__':
main()
| mit |
shadowsocksR-private/shadowsocksR | shadowsocks/obfsplugin/obfs_tls.py | 4 | 13578 | #!/usr/bin/env python
#
# Copyright 2015-2015 breakwa11
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import os
import sys
import hashlib
import logging
import binascii
import struct
import base64
import time
import random
import hmac
import hashlib
import string
from shadowsocks import common
from shadowsocks.obfsplugin import plain
from shadowsocks.common import to_bytes, to_str, ord
from shadowsocks import lru_cache
def create_tls_ticket_auth_obfs(method):
return tls_ticket_auth(method)
obfs_map = {
'tls1.2_ticket_auth': (create_tls_ticket_auth_obfs,),
'tls1.2_ticket_auth_compatible': (create_tls_ticket_auth_obfs,),
'tls1.2_ticket_fastauth': (create_tls_ticket_auth_obfs,),
'tls1.2_ticket_fastauth_compatible': (create_tls_ticket_auth_obfs,),
}
def match_begin(str1, str2):
if len(str1) >= len(str2):
if str1[:len(str2)] == str2:
return True
return False
class obfs_auth_data(object):
def __init__(self):
self.client_data = lru_cache.LRUCache(60 * 5)
self.client_id = os.urandom(32)
self.startup_time = int(time.time() - 60 * 30) & 0xFFFFFFFF
self.ticket_buf = {}
class tls_ticket_auth(plain.plain):
def __init__(self, method):
self.method = method
self.handshake_status = 0
self.send_buffer = b''
self.recv_buffer = b''
self.client_id = b''
self.max_time_dif = 60 * 60 * 24 # time dif (second) setting
self.tls_version = b'\x03\x03'
self.overhead = 5
def init_data(self):
return obfs_auth_data()
def get_overhead(self, direction): # direction: true for c->s false for s->c
return self.overhead
def sni(self, url):
url = common.to_bytes(url)
data = b"\x00" + struct.pack('>H', len(url)) + url
data = b"\x00\x00" + struct.pack('>H', len(data) + 2) + struct.pack('>H', len(data)) + data
return data
def pack_auth_data(self, client_id):
utc_time = int(time.time()) & 0xFFFFFFFF
data = struct.pack('>I', utc_time) + os.urandom(18)
data += hmac.new(self.server_info.key + client_id, data, hashlib.sha1).digest()[:10]
return data
def client_encode(self, buf):
if self.handshake_status == -1:
return buf
if self.handshake_status == 8:
ret = b''
while len(buf) > 2048:
size = min(struct.unpack('>H', os.urandom(2))[0] % 4096 + 100, len(buf))
ret += b"\x17" + self.tls_version + struct.pack('>H', size) + buf[:size]
buf = buf[size:]
if len(buf) > 0:
ret += b"\x17" + self.tls_version + struct.pack('>H', len(buf)) + buf
return ret
if len(buf) > 0:
self.send_buffer += b"\x17" + self.tls_version + struct.pack('>H', len(buf)) + buf
if self.handshake_status == 0:
self.handshake_status = 1
data = self.tls_version + self.pack_auth_data(self.server_info.data.client_id) + b"\x20" + self.server_info.data.client_id + binascii.unhexlify(b"001cc02bc02fcca9cca8cc14cc13c00ac014c009c013009c0035002f000a" + b"0100")
ext = binascii.unhexlify(b"ff01000100")
host = self.server_info.obfs_param or self.server_info.host
if host and host[-1] in string.digits:
host = ''
hosts = host.split(',')
host = random.choice(hosts)
ext += self.sni(host)
ext += b"\x00\x17\x00\x00"
if host not in self.server_info.data.ticket_buf:
self.server_info.data.ticket_buf[host] = os.urandom((struct.unpack('>H', os.urandom(2))[0] % 17 + 8) * 16)
ext += b"\x00\x23" + struct.pack('>H', len(self.server_info.data.ticket_buf[host])) + self.server_info.data.ticket_buf[host]
ext += binascii.unhexlify(b"000d001600140601060305010503040104030301030302010203")
ext += binascii.unhexlify(b"000500050100000000")
ext += binascii.unhexlify(b"00120000")
ext += binascii.unhexlify(b"75500000")
ext += binascii.unhexlify(b"000b00020100")
ext += binascii.unhexlify(b"000a0006000400170018")
data += struct.pack('>H', len(ext)) + ext
data = b"\x01\x00" + struct.pack('>H', len(data)) + data
data = b"\x16\x03\x01" + struct.pack('>H', len(data)) + data
return data
elif self.handshake_status == 1 and len(buf) == 0:
data = b"\x14" + self.tls_version + b"\x00\x01\x01" #ChangeCipherSpec
data += b"\x16" + self.tls_version + b"\x00\x20" + os.urandom(22) #Finished
data += hmac.new(self.server_info.key + self.server_info.data.client_id, data, hashlib.sha1).digest()[:10]
ret = data + self.send_buffer
self.send_buffer = b''
self.handshake_status = 8
return ret
return b''
def client_decode(self, buf):
if self.handshake_status == -1:
return (buf, False)
if self.handshake_status == 8:
ret = b''
self.recv_buffer += buf
while len(self.recv_buffer) > 5:
if ord(self.recv_buffer[0]) != 0x17:
logging.info("data = %s" % (binascii.hexlify(self.recv_buffer)))
raise Exception('server_decode appdata error')
size = struct.unpack('>H', self.recv_buffer[3:5])[0]
if len(self.recv_buffer) < size + 5:
break
buf = self.recv_buffer[5:size+5]
ret += buf
self.recv_buffer = self.recv_buffer[size+5:]
return (ret, False)
if len(buf) < 11 + 32 + 1 + 32:
raise Exception('client_decode data error')
verify = buf[11:33]
if hmac.new(self.server_info.key + self.server_info.data.client_id, verify, hashlib.sha1).digest()[:10] != buf[33:43]:
raise Exception('client_decode data error')
if hmac.new(self.server_info.key + self.server_info.data.client_id, buf[:-10], hashlib.sha1).digest()[:10] != buf[-10:]:
raise Exception('client_decode data error')
return (b'', True)
def server_encode(self, buf):
if self.handshake_status == -1:
return buf
if (self.handshake_status & 8) == 8:
ret = b''
while len(buf) > 2048:
size = min(struct.unpack('>H', os.urandom(2))[0] % 4096 + 100, len(buf))
ret += b"\x17" + self.tls_version + struct.pack('>H', size) + buf[:size]
buf = buf[size:]
if len(buf) > 0:
ret += b"\x17" + self.tls_version + struct.pack('>H', len(buf)) + buf
return ret
self.handshake_status |= 8
data = self.tls_version + self.pack_auth_data(self.client_id) + b"\x20" + self.client_id + binascii.unhexlify(b"c02f000005ff01000100")
data = b"\x02\x00" + struct.pack('>H', len(data)) + data #server hello
data = b"\x16" + self.tls_version + struct.pack('>H', len(data)) + data
if random.randint(0, 8) < 1:
ticket = os.urandom((struct.unpack('>H', os.urandom(2))[0] % 164) * 2 + 64)
ticket = struct.pack('>H', len(ticket) + 4) + b"\x04\x00" + struct.pack('>H', len(ticket)) + ticket
data += b"\x16" + self.tls_version + ticket #New session ticket
data += b"\x14" + self.tls_version + b"\x00\x01\x01" #ChangeCipherSpec
finish_len = random.choice([32, 40])
data += b"\x16" + self.tls_version + struct.pack('>H', finish_len) + os.urandom(finish_len - 10) #Finished
data += hmac.new(self.server_info.key + self.client_id, data, hashlib.sha1).digest()[:10]
if buf:
data += self.server_encode(buf)
return data
def decode_error_return(self, buf):
self.handshake_status = -1
self.overhead = 0
if self.method == 'tls1.2_ticket_auth':
return (b'E'*2048, False, False)
return (buf, True, False)
def server_decode(self, buf):
if self.handshake_status == -1:
return (buf, True, False)
if (self.handshake_status & 4) == 4:
ret = b''
self.recv_buffer += buf
while len(self.recv_buffer) > 5:
if ord(self.recv_buffer[0]) != 0x17 or ord(self.recv_buffer[1]) != 0x3 or ord(self.recv_buffer[2]) != 0x3:
logging.info("data = %s" % (binascii.hexlify(self.recv_buffer)))
raise Exception('server_decode appdata error')
size = struct.unpack('>H', self.recv_buffer[3:5])[0]
if len(self.recv_buffer) < size + 5:
break
ret += self.recv_buffer[5:size+5]
self.recv_buffer = self.recv_buffer[size+5:]
return (ret, True, False)
if (self.handshake_status & 1) == 1:
self.recv_buffer += buf
buf = self.recv_buffer
verify = buf
if len(buf) < 11:
raise Exception('server_decode data error')
if not match_begin(buf, b"\x14" + self.tls_version + b"\x00\x01\x01"): #ChangeCipherSpec
raise Exception('server_decode data error')
buf = buf[6:]
if not match_begin(buf, b"\x16" + self.tls_version + b"\x00"): #Finished
raise Exception('server_decode data error')
verify_len = struct.unpack('>H', buf[3:5])[0] + 1 # 11 - 10
if len(verify) < verify_len + 10:
return (b'', False, False)
if hmac.new(self.server_info.key + self.client_id, verify[:verify_len], hashlib.sha1).digest()[:10] != verify[verify_len:verify_len+10]:
raise Exception('server_decode data error')
self.recv_buffer = verify[verify_len + 10:]
status = self.handshake_status
self.handshake_status |= 4
ret = self.server_decode(b'')
return ret;
#raise Exception("handshake data = %s" % (binascii.hexlify(buf)))
self.recv_buffer += buf
buf = self.recv_buffer
ogn_buf = buf
if len(buf) < 3:
return (b'', False, False)
if not match_begin(buf, b'\x16\x03\x01'):
return self.decode_error_return(ogn_buf)
buf = buf[3:]
header_len = struct.unpack('>H', buf[:2])[0]
if header_len > len(buf) - 2:
return (b'', False, False)
self.recv_buffer = self.recv_buffer[header_len + 5:]
self.handshake_status = 1
buf = buf[2:header_len + 2]
if not match_begin(buf, b'\x01\x00'): #client hello
logging.info("tls_auth not client hello message")
return self.decode_error_return(ogn_buf)
buf = buf[2:]
if struct.unpack('>H', buf[:2])[0] != len(buf) - 2:
logging.info("tls_auth wrong message size")
return self.decode_error_return(ogn_buf)
buf = buf[2:]
if not match_begin(buf, self.tls_version):
logging.info("tls_auth wrong tls version")
return self.decode_error_return(ogn_buf)
buf = buf[2:]
verifyid = buf[:32]
buf = buf[32:]
sessionid_len = ord(buf[0])
if sessionid_len < 32:
logging.info("tls_auth wrong sessionid_len")
return self.decode_error_return(ogn_buf)
sessionid = buf[1:sessionid_len + 1]
buf = buf[sessionid_len+1:]
self.client_id = sessionid
sha1 = hmac.new(self.server_info.key + sessionid, verifyid[:22], hashlib.sha1).digest()[:10]
utc_time = struct.unpack('>I', verifyid[:4])[0]
time_dif = common.int32((int(time.time()) & 0xffffffff) - utc_time)
if self.server_info.obfs_param:
try:
self.max_time_dif = int(self.server_info.obfs_param)
except:
pass
if self.max_time_dif > 0 and (time_dif < -self.max_time_dif or time_dif > self.max_time_dif \
or common.int32(utc_time - self.server_info.data.startup_time) < -self.max_time_dif / 2):
logging.info("tls_auth wrong time")
return self.decode_error_return(ogn_buf)
if sha1 != verifyid[22:]:
logging.info("tls_auth wrong sha1")
return self.decode_error_return(ogn_buf)
if self.server_info.data.client_data.get(verifyid[:22]):
logging.info("replay attack detect, id = %s" % (binascii.hexlify(verifyid)))
return self.decode_error_return(ogn_buf)
self.server_info.data.client_data.sweep()
self.server_info.data.client_data[verifyid[:22]] = sessionid
if len(self.recv_buffer) >= 11:
ret = self.server_decode(b'')
return (ret[0], True, True)
# (buffer_to_recv, is_need_decrypt, is_need_to_encode_and_send_back)
return (b'', False, True)
| apache-2.0 |
ivanhorvath/openshift-tools | ansible/roles/lib_openshift_3.2/library/oc_process.py | 6 | 37409 | #!/usr/bin/env python # pylint: disable=too-many-lines
# ___ ___ _ _ ___ ___ _ _____ ___ ___
# / __| __| \| | __| _ \ /_\_ _| __| \
# | (_ | _|| .` | _|| / / _ \| | | _|| |) |
# \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____
# | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _|
# | |) | (_) | | .` | (_) || | | _|| |) | | | |
# |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_|
'''
OpenShiftCLI class that wraps the oc commands in a subprocess
'''
# pylint: disable=too-many-lines
import atexit
import json
import os
import re
import shutil
import subprocess
import ruamel.yaml as yaml
#import yaml
#
## This is here because of a bug that causes yaml
## to incorrectly handle timezone info on timestamps
#def timestamp_constructor(_, node):
# '''return timestamps as strings'''
# return str(node.value)
#yaml.add_constructor(u'tag:yaml.org,2002:timestamp', timestamp_constructor)
class OpenShiftCLIError(Exception):
'''Exception class for openshiftcli'''
pass
# pylint: disable=too-few-public-methods
class OpenShiftCLI(object):
''' Class to wrap the command line tools '''
def __init__(self,
namespace,
kubeconfig='/etc/origin/master/admin.kubeconfig',
verbose=False,
all_namespaces=False):
''' Constructor for OpenshiftCLI '''
self.namespace = namespace
self.verbose = verbose
self.kubeconfig = kubeconfig
self.all_namespaces = all_namespaces
# Pylint allows only 5 arguments to be passed.
# pylint: disable=too-many-arguments
def _replace_content(self, resource, rname, content, force=False, sep='.'):
''' replace the current object with the content '''
res = self._get(resource, rname)
if not res['results']:
return res
fname = '/tmp/%s' % rname
yed = Yedit(fname, res['results'][0], separator=sep)
changes = []
for key, value in content.items():
changes.append(yed.put(key, value))
if any([change[0] for change in changes]):
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._replace(fname, force)
return {'returncode': 0, 'updated': False}
def _replace(self, fname, force=False):
'''return all pods '''
cmd = ['-n', self.namespace, 'replace', '-f', fname]
if force:
cmd.append('--force')
return self.openshift_cmd(cmd)
def _create_from_content(self, rname, content):
'''return all pods '''
fname = '/tmp/%s' % rname
yed = Yedit(fname, content=content)
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._create(fname)
def _create(self, fname):
'''return all pods '''
return self.openshift_cmd(['create', '-f', fname, '-n', self.namespace])
def _delete(self, resource, rname, selector=None):
'''return all pods '''
cmd = ['delete', resource, rname, '-n', self.namespace]
if selector:
cmd.append('--selector=%s' % selector)
return self.openshift_cmd(cmd)
def _process(self, template_name, create=False, params=None, template_data=None):
'''return all pods '''
cmd = ['process', '-n', self.namespace]
if template_data:
cmd.extend(['-f', '-'])
else:
cmd.append(template_name)
if params:
param_str = ["%s=%s" % (key, value) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
results = self.openshift_cmd(cmd, output=True, input_data=template_data)
if results['returncode'] != 0 or not create:
return results
fname = '/tmp/%s' % template_name
yed = Yedit(fname, results['results'])
yed.write()
atexit.register(Utils.cleanup, [fname])
return self.openshift_cmd(['-n', self.namespace, 'create', '-f', fname])
def _get(self, resource, rname=None, selector=None):
'''return a resource by name '''
cmd = ['get', resource]
if selector:
cmd.append('--selector=%s' % selector)
if self.all_namespaces:
cmd.extend(['--all-namespaces'])
elif self.namespace:
cmd.extend(['-n', self.namespace])
cmd.extend(['-o', 'json'])
if rname:
cmd.append(rname)
rval = self.openshift_cmd(cmd, output=True)
# Ensure results are retuned in an array
if rval.has_key('items'):
rval['results'] = rval['items']
elif not isinstance(rval['results'], list):
rval['results'] = [rval['results']]
return rval
def _schedulable(self, node=None, selector=None, schedulable=True):
''' perform oadm manage-node scheduable '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector=%s' % selector)
cmd.append('--schedulable=%s' % schedulable)
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
def _list_pods(self, node=None, selector=None, pod_selector=None):
''' perform oadm manage-node evacuate '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector=%s' % selector)
if pod_selector:
cmd.append('--pod-selector=%s' % pod_selector)
cmd.extend(['--list-pods', '-o', 'json'])
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
#pylint: disable=too-many-arguments
def _evacuate(self, node=None, selector=None, pod_selector=None, dry_run=False, grace_period=None, force=False):
''' perform oadm manage-node evacuate '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector=%s' % selector)
if dry_run:
cmd.append('--dry-run')
if pod_selector:
cmd.append('--pod-selector=%s' % pod_selector)
if grace_period:
cmd.append('--grace-period=%s' % int(grace_period))
if force:
cmd.append('--force')
cmd.append('--evacuate')
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
def _import_image(self, url=None, name=None, tag=None):
''' perform image import '''
cmd = ['import-image']
image = '{0}'.format(name)
if tag:
image += ':{0}'.format(tag)
cmd.append(image)
if url:
cmd.append('--from={0}/{1}'.format(url, image))
cmd.append('-n{0}'.format(self.namespace))
cmd.append('--confirm')
return self.openshift_cmd(cmd)
#pylint: disable=too-many-arguments
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
'''Base command for oc '''
cmds = []
if oadm:
cmds = ['/usr/bin/oc', 'adm']
else:
cmds = ['/usr/bin/oc']
cmds.extend(cmd)
rval = {}
results = ''
err = None
if self.verbose:
print ' '.join(cmds)
proc = subprocess.Popen(cmds,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env={'KUBECONFIG': self.kubeconfig})
stdout, stderr = proc.communicate(input_data)
rval = {"returncode": proc.returncode,
"results": results,
"cmd": ' '.join(cmds),
}
if proc.returncode == 0:
if output:
if output_type == 'json':
try:
rval['results'] = json.loads(stdout)
except ValueError as err:
if "No JSON object could be decoded" in err.message:
err = err.message
elif output_type == 'raw':
rval['results'] = stdout
if self.verbose:
print stdout
print stderr
if err:
rval.update({"err": err,
"stderr": stderr,
"stdout": stdout,
"cmd": cmds
})
else:
rval.update({"stderr": stderr,
"stdout": stdout,
"results": {},
})
return rval
class Utils(object):
''' utilities for openshiftcli modules '''
@staticmethod
def create_file(rname, data, ftype='yaml'):
''' create a file in tmp with name and contents'''
path = os.path.join('/tmp', rname)
with open(path, 'w') as fds:
if ftype == 'yaml':
fds.write(yaml.dump(data, Dumper=yaml.RoundTripDumper))
elif ftype == 'json':
fds.write(json.dumps(data))
else:
fds.write(data)
# Register cleanup when module is done
atexit.register(Utils.cleanup, [path])
return path
@staticmethod
def create_files_from_contents(content, content_type=None):
'''Turn an array of dict: filename, content into a files array'''
if not isinstance(content, list):
content = [content]
files = []
for item in content:
path = Utils.create_file(item['path'], item['data'], ftype=content_type)
files.append({'name': os.path.basename(path), 'path': path})
return files
@staticmethod
def cleanup(files):
'''Clean up on exit '''
for sfile in files:
if os.path.exists(sfile):
if os.path.isdir(sfile):
shutil.rmtree(sfile)
elif os.path.isfile(sfile):
os.remove(sfile)
@staticmethod
def exists(results, _name):
''' Check to see if the results include the name '''
if not results:
return False
if Utils.find_result(results, _name):
return True
return False
@staticmethod
def find_result(results, _name):
''' Find the specified result by name'''
rval = None
for result in results:
if result.has_key('metadata') and result['metadata']['name'] == _name:
rval = result
break
return rval
@staticmethod
def get_resource_file(sfile, sfile_type='yaml'):
''' return the service file '''
contents = None
with open(sfile) as sfd:
contents = sfd.read()
if sfile_type == 'yaml':
contents = yaml.load(contents, yaml.RoundTripLoader)
elif sfile_type == 'json':
contents = json.loads(contents)
return contents
# Disabling too-many-branches. This is a yaml dictionary comparison function
# pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
@staticmethod
def check_def_equal(user_def, result_def, skip_keys=None, debug=False):
''' Given a user defined definition, compare it with the results given back by our query. '''
# Currently these values are autogenerated and we do not need to check them
skip = ['metadata', 'status']
if skip_keys:
skip.extend(skip_keys)
for key, value in result_def.items():
if key in skip:
continue
# Both are lists
if isinstance(value, list):
if not user_def.has_key(key):
if debug:
print 'User data does not have key [%s]' % key
print 'User data: %s' % user_def
return False
if not isinstance(user_def[key], list):
if debug:
print 'user_def[key] is not a list key=[%s] user_def[key]=%s' % (key, user_def[key])
return False
if len(user_def[key]) != len(value):
if debug:
print "List lengths are not equal."
print "key=[%s]: user_def[%s] != value[%s]" % (key, len(user_def[key]), len(value))
print "user_def: %s" % user_def[key]
print "value: %s" % value
return False
for values in zip(user_def[key], value):
if isinstance(values[0], dict) and isinstance(values[1], dict):
if debug:
print 'sending list - list'
print type(values[0])
print type(values[1])
result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug)
if not result:
print 'list compare returned false'
return False
elif value != user_def[key]:
if debug:
print 'value should be identical'
print value
print user_def[key]
return False
# recurse on a dictionary
elif isinstance(value, dict):
if not user_def.has_key(key):
if debug:
print "user_def does not have key [%s]" % key
return False
if not isinstance(user_def[key], dict):
if debug:
print "dict returned false: not instance of dict"
return False
# before passing ensure keys match
api_values = set(value.keys()) - set(skip)
user_values = set(user_def[key].keys()) - set(skip)
if api_values != user_values:
if debug:
print "keys are not equal in dict"
print api_values
print user_values
return False
result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
if not result:
if debug:
print "dict returned false"
print result
return False
# Verify each key, value pair is the same
else:
if not user_def.has_key(key) or value != user_def[key]:
if debug:
print "value not equal; user_def does not have key"
print key
print value
if user_def.has_key(key):
print user_def[key]
return False
if debug:
print 'returning true'
return True
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
self.kubeconfig = kubeconfig
self.name = rname
self.namespace = namespace
self._options = options
@property
def config_options(self):
''' return config options '''
return self._options
def to_option_list(self):
'''return all options as a string'''
return self.stringify()
def stringify(self):
''' return the options hash as cli params in a string '''
rval = []
for key, data in self.config_options.items():
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
rval.append('--%s=%s' % (key.replace('_', '-'), data['value']))
return rval
class YeditException(Exception):
''' Exception class for Yedit '''
pass
class Yedit(object):
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
def __init__(self, filename=None, content=None, content_type='yaml', separator='.', backup=False):
self.content = content
self._separator = separator
self.filename = filename
self.__yaml_dict = content
self.content_type = content_type
self.backup = backup
self.load(content_type=self.content_type)
if self.__yaml_dict == None:
self.__yaml_dict = {}
@property
def separator(self):
''' getter method for yaml_dict '''
return self._separator
@separator.setter
def separator(self):
''' getter method for yaml_dict '''
return self._separator
@property
def yaml_dict(self):
''' getter method for yaml_dict '''
return self.__yaml_dict
@yaml_dict.setter
def yaml_dict(self, value):
''' setter method for yaml_dict '''
self.__yaml_dict = value
@staticmethod
def parse_key(key, sep='.'):
'''parse the key allowing the appropriate separator'''
common_separators = list(Yedit.com_sep - set([sep]))
return re.findall(Yedit.re_key % ''.join(common_separators), key)
@staticmethod
def valid_key(key, sep='.'):
'''validate the incoming key'''
common_separators = list(Yedit.com_sep - set([sep]))
if not re.match(Yedit.re_valid_key % ''.join(common_separators), key):
return False
return True
@staticmethod
def remove_entry(data, key, sep='.'):
''' remove data at location key '''
if key == '' and isinstance(data, dict):
data.clear()
return True
elif key == '' and isinstance(data, list):
del data[:]
return True
if not (key and Yedit.valid_key(key, sep)) and isinstance(data, (list, dict)):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key and isinstance(data, dict):
data = data.get(dict_key, None)
elif arr_ind and isinstance(data, list) and int(arr_ind) <= len(data) - 1:
data = data[int(arr_ind)]
else:
return None
# process last index for remove
# expected list entry
if key_indexes[-1][0]:
if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1:
del data[int(key_indexes[-1][0])]
return True
# expected dict entry
elif key_indexes[-1][1]:
if isinstance(data, dict):
del data[key_indexes[-1][1]]
return True
@staticmethod
def add_entry(data, key, item=None, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a#b
return c
'''
if key == '':
pass
elif not (key and Yedit.valid_key(key, sep)) and isinstance(data, (list, dict)):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key:
if isinstance(data, dict) and data.has_key(dict_key) and data[dict_key]:
data = data[dict_key]
continue
elif data and not isinstance(data, dict):
raise YeditException("Unexpected item type found while going through key " +
"path: {} (at key: {})".format(key, dict_key))
data[dict_key] = {}
data = data[dict_key]
elif arr_ind and isinstance(data, list) and int(arr_ind) <= len(data) - 1:
data = data[int(arr_ind)]
else:
raise YeditException("Unexpected item type found while going through key path: {}".format(key))
if key == '':
data = item
# process last index for add
# expected list entry
elif key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1:
data[int(key_indexes[-1][0])] = item
# expected dict entry
elif key_indexes[-1][1] and isinstance(data, dict):
data[key_indexes[-1][1]] = item
# didn't add/update to an existing list, nor add/update key to a dict
# so we must have been provided some syntax like a.b.c[<int>] = "data" for a
# non-existent array
else:
raise YeditException("Error adding data to object at path: {}".format(key))
return data
@staticmethod
def get_entry(data, key, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a.b
return c
'''
if key == '':
pass
elif not (key and Yedit.valid_key(key, sep)) and isinstance(data, (list, dict)):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes:
if dict_key and isinstance(data, dict):
data = data.get(dict_key, None)
elif arr_ind and isinstance(data, list) and int(arr_ind) <= len(data) - 1:
data = data[int(arr_ind)]
else:
return None
return data
def write(self):
''' write to file '''
if not self.filename:
raise YeditException('Please specify a filename.')
if self.backup and self.file_exists():
shutil.copy(self.filename, self.filename + '.orig')
tmp_filename = self.filename + '.yedit'
try:
with open(tmp_filename, 'w') as yfd:
# pylint: disable=no-member,maybe-no-member
if hasattr(self.yaml_dict, 'fa'):
self.yaml_dict.fa.set_block_style()
yfd.write(yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
except Exception as err:
raise YeditException(err.message)
os.rename(tmp_filename, self.filename)
return (True, self.yaml_dict)
def read(self):
''' read from file '''
# check if it exists
if self.filename == None or not self.file_exists():
return None
contents = None
with open(self.filename) as yfd:
contents = yfd.read()
return contents
def file_exists(self):
''' return whether file exists '''
if os.path.exists(self.filename):
return True
return False
def load(self, content_type='yaml'):
''' return yaml file '''
contents = self.read()
if not contents and not self.content:
return None
if self.content:
if isinstance(self.content, dict):
self.yaml_dict = self.content
return self.yaml_dict
elif isinstance(self.content, str):
contents = self.content
# check if it is yaml
try:
if content_type == 'yaml' and contents:
self.yaml_dict = yaml.load(contents, yaml.RoundTripLoader)
# pylint: disable=no-member,maybe-no-member
if hasattr(self.yaml_dict, 'fa'):
self.yaml_dict.fa.set_block_style()
elif content_type == 'json' and contents:
self.yaml_dict = json.loads(contents)
except yaml.YAMLError as err:
# Error loading yaml or json
raise YeditException('Problem with loading yaml file. %s' % err)
return self.yaml_dict
def get(self, key):
''' get a specified key'''
try:
entry = Yedit.get_entry(self.yaml_dict, key, self.separator)
except KeyError as _:
entry = None
return entry
def pop(self, path, key_or_item):
''' remove a key, value pair from a dict or an item for a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError as _:
entry = None
if entry == None:
return (False, self.yaml_dict)
if isinstance(entry, dict):
# pylint: disable=no-member,maybe-no-member
if entry.has_key(key_or_item):
entry.pop(key_or_item)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
elif isinstance(entry, list):
# pylint: disable=no-member,maybe-no-member
ind = None
try:
ind = entry.index(key_or_item)
except ValueError:
return (False, self.yaml_dict)
entry.pop(ind)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
def delete(self, path):
''' remove path from a dict'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError as _:
entry = None
if entry == None:
return (False, self.yaml_dict)
result = Yedit.remove_entry(self.yaml_dict, path, self.separator)
if not result:
return (False, self.yaml_dict)
return (True, self.yaml_dict)
def exists(self, path, value):
''' check if value exists at path'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError as _:
entry = None
if isinstance(entry, list):
if value in entry:
return True
return False
elif isinstance(entry, dict):
if isinstance(value, dict):
rval = False
for key, val in value.items():
if entry[key] != val:
rval = False
break
else:
rval = True
return rval
return value in entry
return entry == value
def append(self, path, value):
'''append value to a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError as _:
entry = None
if entry is None:
self.put(path, [])
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
if not isinstance(entry, list):
return (False, self.yaml_dict)
# pylint: disable=no-member,maybe-no-member
entry.append(value)
return (True, self.yaml_dict)
# pylint: disable=too-many-arguments
def update(self, path, value, index=None, curr_value=None):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError as _:
entry = None
if isinstance(entry, dict):
# pylint: disable=no-member,maybe-no-member
if not isinstance(value, dict):
raise YeditException('Cannot replace key, value entry in dict with non-dict type.' \
' value=[%s] [%s]' % (value, type(value)))
entry.update(value)
return (True, self.yaml_dict)
elif isinstance(entry, list):
# pylint: disable=no-member,maybe-no-member
ind = None
if curr_value:
try:
ind = entry.index(curr_value)
except ValueError:
return (False, self.yaml_dict)
elif index != None:
ind = index
if ind != None and entry[ind] != value:
entry[ind] = value
return (True, self.yaml_dict)
# see if it exists in the list
try:
ind = entry.index(value)
except ValueError:
# doesn't exist, append it
entry.append(value)
return (True, self.yaml_dict)
#already exists, return
if ind != None:
return (False, self.yaml_dict)
return (False, self.yaml_dict)
def put(self, path, value):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError as _:
entry = None
if entry == value:
return (False, self.yaml_dict)
# deepcopy didn't work
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict, default_flow_style=False), yaml.RoundTripLoader)
# pylint: disable=no-member
if hasattr(self.yaml_dict, 'fa'):
tmp_copy.fa.set_block_style()
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if not result:
return (False, self.yaml_dict)
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
def create(self, path, value):
''' create a yaml file '''
if not self.file_exists():
# deepcopy didn't work
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict, default_flow_style=False), yaml.RoundTripLoader)
# pylint: disable=no-member
if hasattr(self.yaml_dict, 'fa'):
tmp_copy.fa.set_block_style()
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if result:
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
return (False, self.yaml_dict)
# pylint: disable=too-many-instance-attributes
class OCProcess(OpenShiftCLI):
''' Class to wrap the oc command line tools '''
# pylint allows 5. we need 6
# pylint: disable=too-many-arguments
def __init__(self,
namespace,
tname=None,
params=None,
create=False,
kubeconfig='/etc/origin/master/admin.kubeconfig',
tdata=None,
verbose=False):
''' Constructor for OpenshiftOC '''
super(OCProcess, self).__init__(namespace, kubeconfig)
self.namespace = namespace
self.name = tname
self.data = tdata
self.params = params
self.create = create
self.kubeconfig = kubeconfig
self.verbose = verbose
self._template = None
@property
def template(self):
'''template property'''
if self._template == None:
results = self._process(self.name, False, self.params, self.data)
if results['returncode'] != 0:
raise OpenShiftCLIError('Error processing template [%s].' % self.name)
self._template = results['results']['items']
return self._template
def get(self):
'''get the template'''
results = self._get('template', self.name)
if results['returncode'] != 0:
# Does the template exist??
if 'not found' in results['stderr']:
results['returncode'] = 0
results['exists'] = False
results['results'] = []
return results
def delete(self, obj):
'''delete a resource'''
return self._delete(obj['kind'], obj['metadata']['name'])
def create_obj(self, obj):
'''create a resource'''
return self._create_from_content(obj['metadata']['name'], obj)
def process(self, create=None):
'''process a template'''
do_create = False
if create != None:
do_create = create
else:
do_create = self.create
return self._process(self.name, do_create, self.params, self.data)
def exists(self):
'''return whether the template exists'''
# Always return true if we're being passed template data
if self.data:
return True
t_results = self._get('template', self.name)
if t_results['returncode'] != 0:
# Does the template exist??
if 'not found' in t_results['stderr']:
return False
else:
raise OpenShiftCLIError('Something went wrong. %s' % t_results)
return True
def needs_update(self):
'''attempt to process the template and return it for comparison with oc objects'''
obj_results = []
for obj in self.template:
# build a list of types to skip
skip = []
if obj['kind'] == 'ServiceAccount':
skip.extend(['secrets', 'imagePullSecrets'])
if obj['kind'] == 'BuildConfig':
skip.extend(['lastTriggeredImageID'])
if obj['kind'] == 'ImageStream':
skip.extend(['generation'])
if obj['kind'] == 'DeploymentConfig':
skip.extend(['lastTriggeredImage'])
# fetch the current object
curr_obj_results = self._get(obj['kind'], obj['metadata']['name'])
if curr_obj_results['returncode'] != 0:
# Does the template exist??
if 'not found' in curr_obj_results['stderr']:
obj_results.append((obj, True))
continue
# check the generated object against the existing object
if not Utils.check_def_equal(obj, curr_obj_results['results'][0], skip_keys=skip):
obj_results.append((obj, True))
continue
obj_results.append((obj, False))
return obj_results
# pylint: disable=too-many-branches
def main():
'''
ansible oc module for services
'''
module = AnsibleModule(
argument_spec=dict(
kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
state=dict(default='present', type='str', choices=['present', 'list']),
debug=dict(default=False, type='bool'),
namespace=dict(default='default', type='str'),
template_name=dict(default=None, type='str'),
content=dict(default=None, type='str'),
params=dict(default=None, type='dict'),
create=dict(default=False, type='bool'),
reconcile=dict(default=True, type='bool'),
),
supports_check_mode=True,
)
ocprocess = OCProcess(module.params['namespace'],
module.params['template_name'],
module.params['params'],
module.params['create'],
kubeconfig=module.params['kubeconfig'],
tdata=module.params['content'],
verbose=module.params['debug'])
state = module.params['state']
api_rval = ocprocess.get()
if state == 'list':
if api_rval['returncode'] != 0:
module.fail_json(msg=api_rval)
module.exit_json(changed=False, results=api_rval, state="list")
elif state == 'present':
if not ocprocess.exists() or not module.params['reconcile']:
#FIXME: this code will never get run in a way that succeeds when
# module.params['reconcile'] is true. Because oc_process doesn't
# create the actual template, the check of ocprocess.exists()
# is meaningless. Either it's already here and this code
# won't be run, or this code will fail because there is no
# template available for oc process to use. Have we conflated
# the template's existence with the existence of the objects
# it describes?
# Create it here
api_rval = ocprocess.process()
if api_rval['returncode'] != 0:
module.fail_json(msg=api_rval)
module.exit_json(changed=True, results=api_rval, state="present")
# verify results
update = False
rval = []
all_results = ocprocess.needs_update()
for obj, status in all_results:
if status:
ocprocess.delete(obj)
results = ocprocess.create_obj(obj)
results['kind'] = obj['kind']
rval.append(results)
update = True
if not update:
module.exit_json(changed=update, results=api_rval, state="present")
for cmd in rval:
if cmd['returncode'] != 0:
module.fail_json(changed=update, results=rval, state="present")
module.exit_json(changed=update, results=rval, state="present")
module.exit_json(failed=True,
changed=False,
results='Unknown state passed. %s' % state,
state="unknown")
# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
# import module snippets. This are required
if __name__ == '__main__':
from ansible.module_utils.basic import *
main()
| apache-2.0 |
sarantapichos/faircoop-market | addons/account_budget/report/analytic_account_budget_report.py | 360 | 7589 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import osv
from openerp.report import report_sxw
class analytic_account_budget_report(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(analytic_account_budget_report, self).__init__(cr, uid, name, context=context)
self.localcontext.update( {
'funct': self.funct,
'funct_total': self.funct_total,
'time': time,
})
self.context = context
def funct(self, object, form, ids=None, done=None, level=1):
if ids is None:
ids = {}
if not ids:
ids = self.ids
if not done:
done = {}
global tot
tot = {
'theo':0.00,
'pln':0.00,
'prac':0.00,
'perc':0.00
}
result = []
accounts = self.pool.get('account.analytic.account').browse(self.cr, self.uid, [object.id], self.context.copy())
c_b_lines_obj = self.pool.get('crossovered.budget.lines')
obj_c_budget = self.pool.get('crossovered.budget')
for account_id in accounts:
res = {}
b_line_ids = []
for line in account_id.crossovered_budget_line:
b_line_ids.append(line.id)
if not b_line_ids:
return []
d_from = form['date_from']
d_to = form['date_to']
self.cr.execute('SELECT DISTINCT(crossovered_budget_id) FROM crossovered_budget_lines WHERE id =ANY(%s)',(b_line_ids,))
budget_ids = self.cr.fetchall()
context = {'wizard_date_from':d_from,'wizard_date_to':d_to}
for i in range(0, len(budget_ids)):
budget_name = obj_c_budget.browse(self.cr, self.uid, [budget_ids[i][0]])
res= {
'b_id':'-1',
'a_id':'-1',
'name':budget_name[0].name,
'status':1,
'theo':0.00,
'pln':0.00,
'prac':0.00,
'perc':0.00
}
result.append(res)
line_ids = c_b_lines_obj.search(self.cr, self.uid, [('id', 'in', b_line_ids), ('crossovered_budget_id','=',budget_ids[i][0])])
line_id = c_b_lines_obj.browse(self.cr, self.uid, line_ids)
tot_theo = tot_pln = tot_prac = tot_perc = 0
done_budget = []
for line in line_id:
if line.id in b_line_ids:
theo = pract = 0.00
theo = c_b_lines_obj._theo_amt(self.cr, self.uid, [line.id], context)[line.id]
pract = c_b_lines_obj._prac_amt(self.cr, self.uid, [line.id], context)[line.id]
if line.general_budget_id.id in done_budget:
for record in result:
if record['b_id'] == line.general_budget_id.id and record['a_id'] == line.analytic_account_id.id:
record['theo'] += theo
record['pln'] += line.planned_amount
record['prac'] += pract
record['perc'] += line.percentage
tot_theo += theo
tot_pln += line.planned_amount
tot_prac += pract
tot_perc += line.percentage
else:
res1 = {
'b_id': line.general_budget_id.id,
'a_id': line.analytic_account_id.id,
'name': line.general_budget_id.name,
'status': 2,
'theo': theo,
'pln': line.planned_amount,
'prac': pract,
'perc': line.percentage
}
tot_theo += theo
tot_pln += line.planned_amount
tot_prac += pract
tot_perc += line.percentage
result.append(res1)
done_budget.append(line.general_budget_id.id)
else:
if line.general_budget_id.id in done_budget:
continue
else:
res1={
'b_id': line.general_budget_id.id,
'a_id': line.analytic_account_id.id,
'name': line.general_budget_id.name,
'status': 2,
'theo': 0.00,
'pln': 0.00,
'prac': 0.00,
'perc': 0.00
}
result.append(res1)
done_budget.append(line.general_budget_id.id)
if tot_theo == 0.00:
tot_perc = 0.00
else:
tot_perc = float(tot_prac / tot_theo) * 100
result[-(len(done_budget) +1)]['theo'] = tot_theo
tot['theo'] +=tot_theo
result[-(len(done_budget) +1)]['pln'] = tot_pln
tot['pln'] +=tot_pln
result[-(len(done_budget) +1)]['prac'] = tot_prac
tot['prac'] +=tot_prac
result[-(len(done_budget) +1)]['perc'] = tot_perc
if tot['theo'] == 0.00:
tot['perc'] = 0.00
else:
tot['perc'] = float(tot['prac'] / tot['theo']) * 100
return result
def funct_total(self, form):
result = []
res = {}
res = {
'tot_theo': tot['theo'],
'tot_pln': tot['pln'],
'tot_prac': tot['prac'],
'tot_perc': tot['perc']
}
result.append(res)
return result
class report_analyticaccountbudget(osv.AbstractModel):
_name = 'report.account_budget.report_analyticaccountbudget'
_inherit = 'report.abstract_report'
_template = 'account_budget.report_analyticaccountbudget'
_wrapped_report_class = analytic_account_budget_report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
tst-ppenev/earthenterprise | earth_enterprise/src/fusion/tools/pound_server/pound.py | 4 | 3388 | #!/usr/bin/python
#
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Simple tool for pounding the server to make sure it properly handles
# concurrent requests for data from the same globe.
# Expects Atlanta globe (Atlanta.glb) to be available
# on the server.
# Checking the data makes sure that the data is consistent on every read,
# not that it is correct in some absolute sense.
#
import sys
import urllib2
DEFAULT_HOST = "hostname.com"
TEST_PACKET0_ADDRESS = ("http://%s/portable/"
"Atlanta.glb/flatfile?f1-03103032222212031-i.5")
TEST_PACKET1_ADDRESS = ("http://%s/portable/"
"Atlanta.glb/flatfile?f1-03103032222212032-i.5")
def InitGoldData(packet_addresses, gold_data, host):
"""Gold standard is data from first read."""
packet_addresses.append(TEST_PACKET0_ADDRESS % host)
packet_addresses.append(TEST_PACKET1_ADDRESS % host)
gold_data.append(urllib2.urlopen(packet_addresses[0]).read())
gold_data.append(urllib2.urlopen(packet_addresses[1]).read())
def CheckData(packet1, packet2, gold_data):
"""Check that data matches the gold standard."""
if packet1 != gold_data[0]:
raise Exception("Packet1 data does not match gold standard.")
if packet2 != gold_data[1]:
raise Exception("Packet2 data does not match gold standard.")
def RunTrial(packet_addresses, check_data, gold_data):
"""Read packets for next trial."""
packet0 = urllib2.urlopen(packet_addresses[0]).read()
packet1 = urllib2.urlopen(packet_addresses[1]).read()
if check_data == "t":
CheckData(packet0, packet1, gold_data)
def Usage():
"""Show usage information."""
print "usage: pound.py host [num_trials] [check_data]"
print " host: host server (default = hotname)"
print " num_trials: number of times data is read (default = 50000)"
print " check_data: (t or f) whether data is checked (default = f)"
print " E.g. ./pound.py localhost 10000 t"
def main():
"""Main for pounder."""
num_trials = 50000
check_data = "f"
host = DEFAULT_HOST
if len(sys.argv) == 1:
Usage()
return
if len(sys.argv) > 1:
arg1 = sys.argv[1]
if arg1 == "-h" or arg1 == "--help":
Usage()
return
else:
host = arg1
if len(sys.argv) > 2:
num_trials = int(sys.argv[2])
if len(sys.argv) > 3:
check_data = sys.argv[3]
print "Sever:", host
print "Trials:", num_trials
print "Checking data:", check_data
had_error = False
gold_data = []
packet_addresses = []
InitGoldData(packet_addresses, gold_data, host)
for i in xrange(num_trials):
try:
RunTrial(packet_addresses, check_data, gold_data)
except urllib2.URLError, e:
had_error = True
print "Error at", i, ":", e
break
if not had_error:
print "No errors.", num_trials, "trials."
print "Done."
if __name__ == "__main__":
main()
| apache-2.0 |
navrasio/mxnet | python/mxnet/module/base_module.py | 4 | 47155 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=fixme, too-many-arguments, too-many-locals
# pylint: disable=too-many-public-methods, too-many-branches, too-many-lines
"""`BaseModule` defines an API for modules."""
import time
import logging
import warnings
from .. import metric
from .. import ndarray
from ..context import cpu
from ..model import BatchEndParam
from ..initializer import Uniform
from ..io import DataDesc
from ..base import _as_list
def _check_input_names(symbol, names, typename, throw):
"""Check that all input names are in symbol's arguments."""
args = symbol.list_arguments()
for name in names:
if name in args:
continue
candidates = [arg for arg in args if
not arg.endswith('_weight') and
not arg.endswith('_bias') and
not arg.endswith('_gamma') and
not arg.endswith('_beta')]
msg = "\033[91mYou created Module with Module(..., %s_names=%s) but " \
"input with name '%s' is not found in symbol.list_arguments(). " \
"Did you mean one of:\n\t%s\033[0m"%(
typename, str(names), name, '\n\t'.join(candidates))
if throw:
raise ValueError(msg)
else:
warnings.warn(msg)
def _check_names_match(data_names, data_shapes, name, throw):
"""Check that input names matches input data descriptors."""
actual = [x[0] for x in data_shapes]
if sorted(data_names) != sorted(actual):
msg = "Data provided by %s_shapes don't match names specified by %s_names (%s vs. %s)"%(
name, name, str(data_shapes), str(data_names))
if throw:
raise ValueError(msg)
else:
warnings.warn(msg)
def _parse_data_desc(data_names, label_names, data_shapes, label_shapes):
"""parse data_attrs into DataDesc format and check that names match"""
data_shapes = [x if isinstance(x, DataDesc) else DataDesc(*x) for x in data_shapes]
_check_names_match(data_names, data_shapes, 'data', True)
if label_shapes is not None:
label_shapes = [x if isinstance(x, DataDesc) else DataDesc(*x) for x in label_shapes]
_check_names_match(label_names, label_shapes, 'label', False)
else:
_check_names_match(label_names, [], 'label', False)
return data_shapes, label_shapes
class BaseModule(object):
"""The base class of a module.
A module represents a computation component. One can think of module as a computation machine.
A module can execute forward and backward passes and update parameters in a model.
We aim to make the APIs easy to use, especially in the case when we need to use the imperative
API to work with multiple modules (e.g. stochastic depth network).
A module has several states:
- Initial state: Memory is not allocated yet, so the module is not ready for computation yet.
- Binded: Shapes for inputs, outputs, and parameters are all known, memory has been allocated,
and the module is ready for computation.
- Parameters are initialized: For modules with parameters, doing computation before
initializing the parameters might result in undefined outputs.
- Optimizer is installed: An optimizer can be installed to a module. After this, the parameters
of the module can be updated according to the optimizer after gradients are computed
(forward-backward).
In order for a module to interact with others, it must be able to report the
following information in its initial state (before binding):
- `data_names`: list of type string indicating the names of the required input data.
- `output_names`: list of type string indicating the names of the required outputs.
After binding, a module should be able to report the following richer information:
- state information
- `binded`: `bool`, indicates whether the memory buffers needed for computation
have been allocated.
- `for_training`: whether the module is bound for training.
- `params_initialized`: `bool`, indicates whether the parameters of this module
have been initialized.
- `optimizer_initialized`: `bool`, indicates whether an optimizer is defined
and initialized.
- `inputs_need_grad`: `bool`, indicates whether gradients with respect to the
input data are needed. Might be useful when implementing composition of modules.
- input/output information
- `data_shapes`: a list of `(name, shape)`. In theory, since the memory is allocated,
we could directly provide the data arrays. But in the case of data parallelism,
the data arrays might not be of the same shape as viewed from the external world.
- `label_shapes`: a list of `(name, shape)`. This might be `[]` if the module does
not need labels (e.g. it does not contains a loss function at the top), or a module
is not bound for training.
- `output_shapes`: a list of `(name, shape)` for outputs of the module.
- parameters (for modules with parameters)
- `get_params()`: return a tuple `(arg_params, aux_params)`. Each of those
is a dictionary of name to ``NDArray`` mapping. Those `NDArray` always lives on
CPU. The actual parameters used for computing might live on other devices (GPUs),
this function will retrieve (a copy of) the latest parameters.
- ``set_params(arg_params, aux_params)``: assign parameters to the devices
doing the computation.
- ``init_params(...)``: a more flexible interface to assign or initialize the parameters.
- setup
- `bind()`: prepare environment for computation.
- `init_optimizer()`: install optimizer for parameter updating.
- `prepare()`: prepare the module based on the current data batch.
- computation
- `forward(data_batch)`: forward operation.
- `backward(out_grads=None)`: backward operation.
- `update()`: update parameters according to installed optimizer.
- `get_outputs()`: get outputs of the previous forward operation.
- `get_input_grads()`: get the gradients with respect to the inputs computed
in the previous backward operation.
- `update_metric(metric, labels, pre_sliced=False)`: update performance metric
for the previous forward
computed results.
- other properties (mostly for backward compatibility)
- `symbol`: the underlying symbolic graph for this module (if any)
This property is not necessarily constant. For example, for `BucketingModule`,
this property is simply the *current* symbol being used. For other modules,
this value might not be well defined.
When those intermediate-level API are implemented properly, the following
high-level API will be automatically available for a module:
- `fit`: train the module parameters on a data set.
- `predict`: run prediction on a data set and collect outputs.
- `score`: run prediction on a data set and evaluate performance.
Examples
--------
>>> # An example of creating a mxnet module.
>>> import mxnet as mx
>>> data = mx.symbol.Variable('data')
>>> fc1 = mx.symbol.FullyConnected(data, name='fc1', num_hidden=128)
>>> act1 = mx.symbol.Activation(fc1, name='relu1', act_type="relu")
>>> fc2 = mx.symbol.FullyConnected(act1, name = 'fc2', num_hidden = 64)
>>> act2 = mx.symbol.Activation(fc2, name='relu2', act_type="relu")
>>> fc3 = mx.symbol.FullyConnected(act2, name='fc3', num_hidden=10)
>>> out = mx.symbol.SoftmaxOutput(fc3, name = 'softmax')
>>> mod = mx.mod.Module(out)
"""
def __init__(self, logger=logging):
self.logger = logger
self.binded = False
self.for_training = False
self.inputs_need_grad = False
self.params_initialized = False
self.optimizer_initialized = False
self._symbol = None
self._total_exec_bytes = 0
################################################################################
# High Level API
################################################################################
def forward_backward(self, data_batch):
"""A convenient function that calls both ``forward`` and ``backward``."""
self.forward(data_batch, is_train=True)
self.backward()
def score(self, eval_data, eval_metric, num_batch=None, batch_end_callback=None,
score_end_callback=None,
reset=True, epoch=0, sparse_row_id_fn=None):
"""Runs prediction on ``eval_data`` and evaluates the performance according to
the given ``eval_metric``.
Checkout `Module Tutorial <http://mxnet.io/tutorials/basic/module.html>`_ to see
a end-to-end use-case.
Parameters
----------
eval_data : DataIter
Evaluation data to run prediction on.
eval_metric : EvalMetric or list of EvalMetrics
Evaluation metric to use.
num_batch : int
Number of batches to run. Defaults to ``None``, indicating run until the `DataIter`
finishes.
batch_end_callback : function
Could also be a list of functions.
reset : bool
Defaults to ``True``. Indicates whether we should reset `eval_data` before starting
evaluating.
epoch : int
Defaults to 0. For compatibility, this will be passed to callbacks (if any).
During training, this will correspond to the training epoch number.
sparse_row_id_fn : A callback function
The function takes `data_batch` as an input and returns a dict of
str -> NDArray. The resulting dict is used for pulling row_sparse
parameters from the kvstore, where the str key is the name of the param,
and the value is the row id of the param to pull.
Examples
--------
>>> # An example of using score for prediction.
>>> # Evaluate accuracy on val_dataiter
>>> metric = mx.metric.Accuracy()
>>> mod.score(val_dataiter, metric)
>>> mod.score(val_dataiter, ['mse', 'acc'])
"""
assert self.binded and self.params_initialized
if reset:
eval_data.reset()
if not isinstance(eval_metric, metric.EvalMetric):
eval_metric = metric.create(eval_metric)
eval_metric.reset()
actual_num_batch = 0
for nbatch, eval_batch in enumerate(eval_data):
if num_batch is not None and nbatch == num_batch:
break
self.prepare(eval_batch, sparse_row_id_fn=sparse_row_id_fn)
self.forward(eval_batch, is_train=False)
if isinstance(eval_batch, list):
self.update_metric(eval_metric, [eb.label for eb in eval_batch], pre_sliced=True)
else:
self.update_metric(eval_metric, eval_batch.label)
if batch_end_callback is not None:
batch_end_params = BatchEndParam(epoch=epoch,
nbatch=nbatch,
eval_metric=eval_metric,
locals=locals())
for callback in _as_list(batch_end_callback):
callback(batch_end_params)
actual_num_batch += 1
if score_end_callback:
params = BatchEndParam(epoch=epoch,
nbatch=actual_num_batch,
eval_metric=eval_metric,
locals=locals())
for callback in _as_list(score_end_callback):
callback(params)
return eval_metric.get_name_value()
def iter_predict(self, eval_data, num_batch=None, reset=True, sparse_row_id_fn=None):
"""Iterates over predictions.
Example Usage:
----------
>>> for pred, i_batch, batch in module.iter_predict(eval_data):
... # pred is a list of outputs from the module
... # i_batch is a integer
... # batch is the data batch from the data iterator
Parameters
----------
eval_data : DataIter
Evaluation data to run prediction on.
num_batch : int
Default is ``None``, indicating running all the batches in the data iterator.
reset : bool
Default is ``True``, indicating whether we should reset the data iter before start
doing prediction.
sparse_row_id_fn : A callback function
The function takes `data_batch` as an input and returns a dict of
str -> NDArray. The resulting dict is used for pulling row_sparse
parameters from the kvstore, where the str key is the name of the param,
and the value is the row id of the param to pull.
"""
assert self.binded and self.params_initialized
if reset:
eval_data.reset()
for nbatch, eval_batch in enumerate(eval_data):
if num_batch is not None and nbatch == num_batch:
break
self.prepare(eval_batch, sparse_row_id_fn=sparse_row_id_fn)
self.forward(eval_batch, is_train=False)
pad = eval_batch.pad
outputs = [out[0:out.shape[0]-pad] for out in self.get_outputs()]
yield (outputs, nbatch, eval_batch)
def predict(self, eval_data, num_batch=None, merge_batches=True, reset=True,
always_output_list=False, sparse_row_id_fn=None):
"""Runs prediction and collects the outputs.
When `merge_batches` is ``True`` (by default), the return value will be a list
``[out1, out2, out3]``, where each element is formed by concatenating the outputs for
all the mini-batches. When `always_output_list` is ``False`` (as by default),
then in the case of a single output, `out1` is returned instead of ``[out1]``.
When `merge_batches` is ``False``, the return value will be a nested list like
``[[out1_batch1, out2_batch1], [out1_batch2], ...]``. This mode is useful because
in some cases (e.g. bucketing), the module does not necessarily produce the same
number of outputs.
The objects in the results have type `NDArray`. If you need to work with a numpy array,
just call ``.asnumpy()`` on each `NDArray`.
Parameters
----------
eval_data : DataIter
Evaluation data to run prediction on.
num_batch : int
Defaults to ``None``, indicates running all the batches in the data iterator.
merge_batches : bool
Defaults to ``True``, see above for return values.
reset : bool
Defaults to ``True``, indicates whether we should reset the data iter before
doing prediction.
always_output_list : bool
Defaults to ``False``, see above for return values.
sparse_row_id_fn : A callback function
The function takes `data_batch` as an input and returns a dict of
str -> NDArray. The resulting dict is used for pulling row_sparse
parameters from the kvstore, where the str key is the name of the param,
and the value is the row id of the param to pull.
Returns
-------
list of NDArray or list of list of NDArray
Prediction results.
Examples
--------
>>> # An example of using `predict` for prediction.
>>> # Predict on the first 10 batches of val_dataiter
>>> mod.predict(eval_data=val_dataiter, num_batch=10)
"""
assert self.binded and self.params_initialized
if reset:
eval_data.reset()
output_list = []
for nbatch, eval_batch in enumerate(eval_data):
if num_batch is not None and nbatch == num_batch:
break
self.prepare(eval_batch, sparse_row_id_fn=sparse_row_id_fn)
self.forward(eval_batch, is_train=False)
pad = eval_batch.pad
outputs = [out[0:out.shape[0]-pad].copy() for out in self.get_outputs()]
output_list.append(outputs)
if len(output_list) == 0:
return output_list
if merge_batches:
num_outputs = len(output_list[0])
for out in output_list:
assert len(out) == num_outputs, \
'Cannot merge batches, as num of outputs is not the same ' + \
'in mini-batches. Maybe bucketing is used?'
output_list2 = [ndarray.concatenate([out[i] for out in output_list])
for i in range(num_outputs)]
if num_outputs == 1 and not always_output_list:
return output_list2[0]
return output_list2
return output_list
def fit(self, train_data, eval_data=None, eval_metric='acc',
epoch_end_callback=None, batch_end_callback=None, kvstore='local',
optimizer='sgd', optimizer_params=(('learning_rate', 0.01),),
eval_end_callback=None,
eval_batch_end_callback=None, initializer=Uniform(0.01),
arg_params=None, aux_params=None, allow_missing=False,
force_rebind=False, force_init=False, begin_epoch=0, num_epoch=None,
validation_metric=None, monitor=None, sparse_row_id_fn=None):
"""Trains the module parameters.
Checkout `Module Tutorial <http://mxnet.io/tutorials/basic/module.html>`_ to see
a end-to-end use-case.
Parameters
----------
train_data : DataIter
Train DataIter.
eval_data : DataIter
If not ``None``, will be used as validation set and the performance
after each epoch will be evaluated.
eval_metric : str or EvalMetric
Defaults to 'accuracy'. The performance measure used to display during training.
Other possible predefined metrics are:
'ce' (CrossEntropy), 'f1', 'mae', 'mse', 'rmse', 'top_k_accuracy'.
epoch_end_callback : function or list of functions
Each callback will be called with the current `epoch`, `symbol`, `arg_params`
and `aux_params`.
batch_end_callback : function or list of function
Each callback will be called with a `BatchEndParam`.
kvstore : str or KVStore
Defaults to 'local'.
optimizer : str or Optimizer
Defaults to 'sgd'.
optimizer_params : dict
Defaults to ``(('learning_rate', 0.01),)``. The parameters for
the optimizer constructor.
The default value is not a dict, just to avoid pylint warning on dangerous
default values.
eval_end_callback : function or list of function
These will be called at the end of each full evaluation, with the metrics over
the entire evaluation set.
eval_batch_end_callback : function or list of function
These will be called at the end of each mini-batch during evaluation.
initializer : Initializer
The initializer is called to initialize the module parameters when they are
not already initialized.
arg_params : dict
Defaults to ``None``, if not ``None``, should be existing parameters from a trained
model or loaded from a checkpoint (previously saved model). In this case,
the value here will be used to initialize the module parameters, unless they
are already initialized by the user via a call to `init_params` or `fit`.
`arg_params` has a higher priority than `initializer`.
aux_params : dict
Defaults to ``None``. Similar to `arg_params`, except for auxiliary states.
allow_missing : bool
Defaults to ``False``. Indicates whether to allow missing parameters when `arg_params`
and `aux_params` are not ``None``. If this is ``True``, then the missing parameters
will be initialized via the `initializer`.
force_rebind : bool
Defaults to ``False``. Whether to force rebinding the executors if already bound.
force_init : bool
Defaults to ``False``. Indicates whether to force initialization even if the
parameters are already initialized.
begin_epoch : int
Defaults to 0. Indicates the starting epoch. Usually, if resumed from a
checkpoint saved at a previous training phase at epoch N, then this value should be
N+1.
num_epoch : int
Number of epochs for training.
sparse_row_id_fn : A callback function
The function takes `data_batch` as an input and returns a dict of
str -> NDArray. The resulting dict is used for pulling row_sparse
parameters from the kvstore, where the str key is the name of the param,
and the value is the row id of the param to pull.
Examples
--------
>>> # An example of using fit for training.
>>> # Assume training dataIter and validation dataIter are ready
>>> # Assume loading a previously checkpointed model
>>> sym, arg_params, aux_params = mx.model.load_checkpoint(model_prefix, 3)
>>> mod.fit(train_data=train_dataiter, eval_data=val_dataiter, optimizer='sgd',
... optimizer_params={'learning_rate':0.01, 'momentum': 0.9},
... arg_params=arg_params, aux_params=aux_params,
... eval_metric='acc', num_epoch=10, begin_epoch=3)
"""
assert num_epoch is not None, 'please specify number of epochs'
self.bind(data_shapes=train_data.provide_data, label_shapes=train_data.provide_label,
for_training=True, force_rebind=force_rebind)
if monitor is not None:
self.install_monitor(monitor)
self.init_params(initializer=initializer, arg_params=arg_params, aux_params=aux_params,
allow_missing=allow_missing, force_init=force_init)
self.init_optimizer(kvstore=kvstore, optimizer=optimizer,
optimizer_params=optimizer_params)
if validation_metric is None:
validation_metric = eval_metric
if not isinstance(eval_metric, metric.EvalMetric):
eval_metric = metric.create(eval_metric)
################################################################################
# training loop
################################################################################
for epoch in range(begin_epoch, num_epoch):
tic = time.time()
eval_metric.reset()
nbatch = 0
data_iter = iter(train_data)
end_of_batch = False
next_data_batch = next(data_iter)
while not end_of_batch:
data_batch = next_data_batch
if monitor is not None:
monitor.tic()
self.forward_backward(data_batch)
self.update()
try:
# pre fetch next batch
next_data_batch = next(data_iter)
self.prepare(next_data_batch, sparse_row_id_fn=sparse_row_id_fn)
except StopIteration:
end_of_batch = True
if isinstance(data_batch, list):
self.update_metric(eval_metric,
[db.label for db in data_batch],
pre_sliced=True)
else:
self.update_metric(eval_metric, data_batch.label)
if monitor is not None:
monitor.toc_print()
if end_of_batch:
eval_name_vals = eval_metric.get_name_value()
if batch_end_callback is not None:
batch_end_params = BatchEndParam(epoch=epoch, nbatch=nbatch,
eval_metric=eval_metric,
locals=locals())
for callback in _as_list(batch_end_callback):
callback(batch_end_params)
nbatch += 1
# one epoch of training is finished
for name, val in eval_name_vals:
self.logger.info('Epoch[%d] Train-%s=%f', epoch, name, val)
toc = time.time()
self.logger.info('Epoch[%d] Time cost=%.3f', epoch, (toc-tic))
# sync aux params across devices
arg_params, aux_params = self.get_params()
self.set_params(arg_params, aux_params)
if epoch_end_callback is not None:
for callback in _as_list(epoch_end_callback):
callback(epoch, self.symbol, arg_params, aux_params)
#----------------------------------------
# evaluation on validation set
if eval_data:
res = self.score(eval_data, validation_metric,
score_end_callback=eval_end_callback,
batch_end_callback=eval_batch_end_callback, epoch=epoch)
#TODO: pull this into default
for name, val in res:
self.logger.info('Epoch[%d] Validation-%s=%f', epoch, name, val)
# end of 1 epoch, reset the data-iter for another epoch
train_data.reset()
################################################################################
# Symbol information
################################################################################
@property
def data_names(self):
"""A list of names for data required by this module."""
raise NotImplementedError()
@property
def output_names(self):
"""A list of names for the outputs of this module."""
raise NotImplementedError()
################################################################################
# Input/Output information
################################################################################
@property
def data_shapes(self):
"""A list of (name, shape) pairs specifying the data inputs to this module."""
raise NotImplementedError()
@property
def label_shapes(self):
"""A list of (name, shape) pairs specifying the label inputs to this module.
If this module does not accept labels -- either it is a module without loss
function, or it is not bound for training, then this should return an empty
list ``[]``.
"""
raise NotImplementedError()
@property
def output_shapes(self):
"""A list of (name, shape) pairs specifying the outputs of this module."""
raise NotImplementedError()
################################################################################
# Parameters of a module
################################################################################
def get_params(self):
"""Gets parameters, those are potentially copies of the the actual parameters used
to do computation on the device.
Returns
-------
``(arg_params, aux_params)``
A pair of dictionaries each mapping parameter names to NDArray values.
Examples
--------
>>> # An example of getting module parameters.
>>> print mod.get_params()
({'fc2_weight': <NDArray 64x128 @cpu(0)>, 'fc1_weight': <NDArray 128x100 @cpu(0)>,
'fc3_bias': <NDArray 10 @cpu(0)>, 'fc3_weight': <NDArray 10x64 @cpu(0)>,
'fc2_bias': <NDArray 64 @cpu(0)>, 'fc1_bias': <NDArray 128 @cpu(0)>}, {})
"""
raise NotImplementedError()
def init_params(self, initializer=Uniform(0.01), arg_params=None, aux_params=None,
allow_missing=False, force_init=False, allow_extra=False):
"""Initializes the parameters and auxiliary states.
Parameters
----------
initializer : Initializer
Called to initialize parameters if needed.
arg_params : dict
If not ``None``, should be a dictionary of existing `arg_params`. Initialization
will be copied from that.
aux_params : dict
If not ``None``, should be a dictionary of existing `aux_params`. Initialization
will be copied from that.
allow_missing : bool
If ``True``, params could contain missing values, and the initializer will be
called to fill those missing params.
force_init : bool
If ``True``, `force_init` will force re-initialize even if already initialized.
allow_extra : boolean, optional
Whether allow extra parameters that are not needed by symbol.
If this is True, no error will be thrown when arg_params or aux_params
contain extra parameters that is not needed by the executor.
Examples
--------
>>> # An example of initializing module parameters.
>>> mod.init_params()
"""
raise NotImplementedError()
def set_params(self, arg_params, aux_params, allow_missing=False, force_init=True,
allow_extra=False):
"""Assigns parameter and aux state values.
Parameters
----------
arg_params : dict
Dictionary of name to value (`NDArray`) mapping.
aux_params : dict
Dictionary of name to value (`NDArray`) mapping.
allow_missing : bool
If ``True``, params could contain missing values, and the initializer will be
called to fill those missing params.
force_init : bool
If ``True``, will force re-initialize even if already initialized.
allow_extra : boolean, optional
Whether allow extra parameters that are not needed by symbol.
If this is True, no error will be thrown when arg_params or aux_params
contain extra parameters that is not needed by the executor.
Examples
--------
>>> # An example of setting module parameters.
>>> sym, arg_params, aux_params = mx.model.load_checkpoint(model_prefix, n_epoch_load)
>>> mod.set_params(arg_params=arg_params, aux_params=aux_params)
"""
self.init_params(initializer=None, arg_params=arg_params, aux_params=aux_params,
allow_missing=allow_missing, force_init=force_init,
allow_extra=allow_extra)
def save_params(self, fname):
"""Saves model parameters to file.
Parameters
----------
fname : str
Path to output param file.
Examples
--------
>>> # An example of saving module parameters.
>>> mod.save_params('myfile')
"""
arg_params, aux_params = self.get_params()
save_dict = {('arg:%s' % k) : v.as_in_context(cpu()) for k, v in arg_params.items()}
save_dict.update({('aux:%s' % k) : v.as_in_context(cpu()) for k, v in aux_params.items()})
ndarray.save(fname, save_dict)
def load_params(self, fname):
"""Loads model parameters from file.
Parameters
----------
fname : str
Path to input param file.
Examples
--------
>>> # An example of loading module parameters.
>>> mod.load_params('myfile')
"""
save_dict = ndarray.load(fname)
arg_params = {}
aux_params = {}
for k, value in save_dict.items():
arg_type, name = k.split(':', 1)
if arg_type == 'arg':
arg_params[name] = value
elif arg_type == 'aux':
aux_params[name] = value
else:
raise ValueError("Invalid param file " + fname)
self.set_params(arg_params, aux_params)
def get_states(self, merge_multi_context=True):
"""Gets states from all devices
If `merge_multi_context` is ``True``, returns output of form ``[out1, out2]``.
Otherwise, it returns output of the form
``[[out1_dev1, out1_dev2], [out2_dev1, out2_dev2]]``.
All output elements are `NDArray`.
Parameters
----------
merge_multi_context : bool
Defaults to ``True``. In the case when data-parallelism is used, the states
will be collected from multiple devices. A ``True`` value indicates that we
should merge the collected results so that they look like from a single
executor.
Returns
-------
A list of ``NDArray`` or a list of list of ``NDArray``.
"""
assert self.binded and self.params_initialized
assert not merge_multi_context
return []
def set_states(self, states=None, value=None):
"""Sets value for states. Only one of states & value can be specified.
Parameters
----------
states : list of list of NDArray
Source states arrays formatted like
``[[state1_dev1, state1_dev2], [state2_dev1, state2_dev2]]``.
value : number
A single scalar value for all state arrays.
"""
assert self.binded and self.params_initialized
assert not states and not value
def install_monitor(self, mon):
"""Installs monitor on all executors."""
raise NotImplementedError()
################################################################################
# Computations
################################################################################
# pylint: disable=unused-argument
def prepare(self, data_batch, sparse_row_id_fn=None):
'''Prepares the module for processing a data batch.
Usually involves switching bucket and reshaping.
For modules that contain `row_sparse` parameters in KVStore,
it prepares the `row_sparse` parameters based on the sparse_row_id_fn.
When KVStore is used to update parameters for multi-device or multi-machine training,
a copy of the parameters are stored in KVStore. Note that for `row_sparse` parameters,
the `update()` updates the copy of parameters in KVStore, but doesn't broadcast
the updated parameters to all devices / machines. The `prepare` function is used to
broadcast `row_sparse` parameters with the next batch of data.
Parameters
----------
data_batch : DataBatch
The current batch of data for forward computation.
sparse_row_id_fn : A callback function
The function takes `data_batch` as an input and returns a dict of
str -> NDArray. The resulting dict is used for pulling row_sparse
parameters from the kvstore, where the str key is the name of the param,
and the value is the row id of the param to pull.
'''
if sparse_row_id_fn is not None:
warnings.warn(UserWarning("sparse_row_id_fn is not invoked for BaseModule."))
# pylint: enable=unused-argument
def forward(self, data_batch, is_train=None):
"""Forward computation. It supports data batches with different shapes, such as
different batch sizes or different image sizes.
If reshaping of data batch relates to modification of symbol or module, such as
changing image layout ordering or switching from training to predicting, module
rebinding is required.
Parameters
----------
data_batch : DataBatch
Could be anything with similar API implemented.
is_train : bool
Default is ``None``, which means `is_train` takes the value of ``self.for_training``.
Examples
--------
>>> import mxnet as mx
>>> from collections import namedtuple
>>> Batch = namedtuple('Batch', ['data'])
>>> data = mx.sym.Variable('data')
>>> out = data * 2
>>> mod = mx.mod.Module(symbol=out, label_names=None)
>>> mod.bind(data_shapes=[('data', (1, 10))])
>>> mod.init_params()
>>> data1 = [mx.nd.ones((1, 10))]
>>> mod.forward(Batch(data1))
>>> print mod.get_outputs()[0].asnumpy()
[[ 2. 2. 2. 2. 2. 2. 2. 2. 2. 2.]]
>>> # Forward with data batch of different shape
>>> data2 = [mx.nd.ones((3, 5))]
>>> mod.forward(Batch(data2))
>>> print mod.get_outputs()[0].asnumpy()
[[ 2. 2. 2. 2. 2.]
[ 2. 2. 2. 2. 2.]
[ 2. 2. 2. 2. 2.]]
"""
raise NotImplementedError()
def backward(self, out_grads=None):
"""Backward computation.
Parameters
----------
out_grads : NDArray or list of NDArray, optional
Gradient on the outputs to be propagated back.
This parameter is only needed when bind is called
on outputs that are not a loss function.
Examples
--------
>>> # An example of backward computation.
>>> mod.backward()
>>> print mod.get_input_grads()[0].asnumpy()
[[[ 1.10182791e-05 5.12257748e-06 4.01927764e-06 8.32566820e-06
-1.59775993e-06 7.24269375e-06 7.28067835e-06 -1.65902311e-05
5.46342608e-06 8.44196393e-07]
...]]
"""
raise NotImplementedError()
def get_outputs(self, merge_multi_context=True):
"""Gets outputs of the previous forward computation.
If `merge_multi_context` is ``True``, it is like ``[out1, out2]``. Otherwise,
it returns out put of form ``[[out1_dev1, out1_dev2], [out2_dev1, out2_dev2]]``.
All the output elements have type `NDArray`. When `merge_multi_context` is ``False``,
those `NDArray` instances might live on different devices.
Parameters
----------
merge_multi_context : bool
Defaults to ``True``. In the case when data-parallelism is used, the outputs
will be collected from multiple devices. A ``True`` value indicates that we
should merge the collected results so that they look like from a single
executor.
Returns
-------
list of `NDArray` or list of list of `NDArray`.
Output
Examples
--------
>>> # An example of getting forward output.
>>> print mod.get_outputs()[0].asnumpy()
[[ 0.09999977 0.10000153 0.10000716 0.10000195 0.09999853 0.09999743
0.10000272 0.10000113 0.09999088 0.09999888]]
"""
raise NotImplementedError()
def get_input_grads(self, merge_multi_context=True):
"""Gets the gradients to the inputs, computed in the previous backward computation.
If `merge_multi_context` is ``True``, it is like ``[grad1, grad2]``. Otherwise, it
is like ``[[grad1_dev1, grad1_dev2], [grad2_dev1, grad2_dev2]]``. All the output
elements have type `NDArray`. When `merge_multi_context` is ``False``, those `NDArray`
instances might live on different devices.
Parameters
----------
merge_multi_context : bool
Defaults to ``True``. In the case when data-parallelism is used, the gradients
will be collected from multiple devices. A ``True`` value indicates that we
should merge the collected results so that they look like from a single
executor.
Returns
-------
list of NDArray or list of list of NDArray
Input gradients.
Examples
--------
>>> # An example of getting input gradients.
>>> print mod.get_input_grads()[0].asnumpy()
[[[ 1.10182791e-05 5.12257748e-06 4.01927764e-06 8.32566820e-06
-1.59775993e-06 7.24269375e-06 7.28067835e-06 -1.65902311e-05
5.46342608e-06 8.44196393e-07]
...]]
"""
raise NotImplementedError()
def update(self):
"""Updates parameters according to the installed optimizer and the gradients computed
in the previous forward-backward batch.
When KVStore is used to update parameters for multi-device or multi-machine training,
a copy of the parameters are stored in KVStore. Note that for `row_sparse` parameters,
this function does update the copy of parameters in KVStore, but doesn't broadcast the
updated parameters to all devices / machines. Please call `prepare` to broadcast
`row_sparse` parameters with the next batch of data.
Examples
--------
>>> # An example of updating module parameters.
>>> mod.init_optimizer(kvstore='local', optimizer='sgd',
... optimizer_params=(('learning_rate', 0.01), ))
>>> mod.backward()
>>> mod.update()
>>> print mod.get_params()[0]['fc3_weight'].asnumpy()
[[ 5.86930104e-03 5.28078526e-03 -8.88729654e-03 -1.08308345e-03
6.13054074e-03 4.27560415e-03 1.53817423e-03 4.62131854e-03
4.69872449e-03 -2.42400169e-03 9.94111411e-04 1.12386420e-03
...]]
"""
raise NotImplementedError()
def update_metric(self, eval_metric, labels, pre_sliced=False):
"""Evaluates and accumulates evaluation metric on outputs of the last forward
computation.
Parameters
----------
eval_metric : EvalMetric
Evaluation metric to use.
labels : list of NDArray if `pre_sliced` parameter is set to `False`,
list of lists of NDArray otherwise. Typically `data_batch.label`.
pre_sliced: bool
Whether the labels are already sliced per device (default: False).
Examples
--------
>>> # An example of updating evaluation metric.
>>> mod.forward(data_batch)
>>> mod.update_metric(metric, data_batch.label)
"""
raise NotImplementedError()
################################################################################
# module setup
################################################################################
def bind(self, data_shapes, label_shapes=None, for_training=True,
inputs_need_grad=False, force_rebind=False, shared_module=None,
grad_req='write'):
"""Binds the symbols to construct executors. This is necessary before one
can perform computation with the module.
Parameters
----------
data_shapes : list of (str, tuple) or DataDesc objects
Typically is ``data_iter.provide_data``. Can also be a list of
(data name, data shape).
label_shapes : list of (str, tuple) or DataDesc objects
Typically is ``data_iter.provide_label``. Can also be a list of
(label name, label shape).
for_training : bool
Default is ``True``. Whether the executors should be bind for training.
inputs_need_grad : bool
Default is ``False``. Whether the gradients to the input data need to be computed.
Typically this is not needed. But this might be needed when implementing composition
of modules.
force_rebind : bool
Default is ``False``. This function does nothing if the executors are already
bound. But with this ``True``, the executors will be forced to rebind.
shared_module : Module
Default is ``None``. This is used in bucketing. When not ``None``, the shared module
essentially corresponds to a different bucket -- a module with different symbol
but with the same sets of parameters (e.g. unrolled RNNs with different lengths).
grad_req : str, list of str, dict of str to str
Requirement for gradient accumulation. Can be 'write', 'add', or 'null'
(default to 'write').
Can be specified globally (str) or for each argument (list, dict).
Examples
--------
>>> # An example of binding symbols.
>>> mod.bind(data_shapes=[('data', (1, 10, 10))])
>>> # Assume train_iter is already created.
>>> mod.bind(data_shapes=train_iter.provide_data, label_shapes=train_iter.provide_label)
"""
raise NotImplementedError()
def init_optimizer(self, kvstore='local', optimizer='sgd',
optimizer_params=(('learning_rate', 0.01),), force_init=False):
"""Installs and initializes optimizers, as well as initialize kvstore for
distributed training
Parameters
----------
kvstore : str or KVStore
Defaults to `'local'`.
optimizer : str or Optimizer
Defaults to `'sgd'`.
optimizer_params : dict
Defaults to ``(('learning_rate', 0.01),)``. The default value is not a dictionary,
just to avoid pylint warning of dangerous default values.
force_init : bool
Defaults to ``False``, indicates whether to force re-initializing an optimizer
if it is already installed.
Examples
--------
>>> # An example of initializing optimizer.
>>> mod.init_optimizer(optimizer='sgd', optimizer_params=(('learning_rate', 0.005),))
"""
raise NotImplementedError()
################################################################################
# misc
################################################################################
@property
def symbol(self):
"""Gets the symbol associated with this module.
Except for `Module`, for other types of modules (e.g. `BucketingModule`), this
property might not be a constant throughout its life time. Some modules might
not even be associated with any symbols.
"""
return self._symbol
| apache-2.0 |
caseyching/incubator-airflow | airflow/operators/python_operator.py | 24 | 6181 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from builtins import str
from datetime import datetime
import logging
from airflow.models import BaseOperator, TaskInstance
from airflow.utils.state import State
from airflow.utils.decorators import apply_defaults
from airflow import settings
class PythonOperator(BaseOperator):
"""
Executes a Python callable
:param python_callable: A reference to an object that is callable
:type python_callable: python callable
:param op_kwargs: a dictionary of keyword arguments that will get unpacked
in your function
:type op_kwargs: dict
:param op_args: a list of positional arguments that will get unpacked when
calling your callable
:type op_args: list
:param provide_context: if set to true, Airflow will pass a set of
keyword arguments that can be used in your function. This set of
kwargs correspond exactly to what you can use in your jinja
templates. For this to work, you need to define `**kwargs` in your
function header.
:type provide_context: bool
:param templates_dict: a dictionary where the values are templates that
will get templated by the Airflow engine sometime between
``__init__`` and ``execute`` takes place and are made available
in your callable's context after the template has been applied
:type templates_dict: dict of str
:param templates_exts: a list of file extensions to resolve while
processing templated fields, for examples ``['.sql', '.hql']``
"""
template_fields = ('templates_dict',)
template_ext = tuple()
ui_color = '#ffefeb'
@apply_defaults
def __init__(
self,
python_callable,
op_args=None,
op_kwargs=None,
provide_context=False,
templates_dict=None,
templates_exts=None,
*args, **kwargs):
super(PythonOperator, self).__init__(*args, **kwargs)
self.python_callable = python_callable
self.op_args = op_args or []
self.op_kwargs = op_kwargs or {}
self.provide_context = provide_context
self.templates_dict = templates_dict
if templates_exts:
self.template_ext = templates_exts
def execute(self, context):
if self.provide_context:
context.update(self.op_kwargs)
context['templates_dict'] = self.templates_dict
self.op_kwargs = context
return_value = self.python_callable(*self.op_args, **self.op_kwargs)
logging.info("Done. Returned value was: " + str(return_value))
return return_value
class BranchPythonOperator(PythonOperator):
"""
Allows a workflow to "branch" or follow a single path following the
execution of this task.
It derives the PythonOperator and expects a Python function that returns
the task_id to follow. The task_id returned should point to a task
directly downstream from {self}. All other "branches" or
directly downstream tasks are marked with a state of ``skipped`` so that
these paths can't move forward. The ``skipped`` states are propageted
downstream to allow for the DAG state to fill up and the DAG run's state
to be inferred.
Note that using tasks with ``depends_on_past=True`` downstream from
``BranchPythonOperator`` is logically unsound as ``skipped`` status
will invariably lead to block tasks that depend on their past successes.
``skipped`` states propagates where all directly upstream tasks are
``skipped``.
"""
def execute(self, context):
branch = super(BranchPythonOperator, self).execute(context)
logging.info("Following branch " + branch)
logging.info("Marking other directly downstream tasks as skipped")
session = settings.Session()
for task in context['task'].downstream_list:
if task.task_id != branch:
ti = TaskInstance(
task, execution_date=context['ti'].execution_date)
ti.state = State.SKIPPED
ti.start_date = datetime.now()
ti.end_date = datetime.now()
session.merge(ti)
session.commit()
session.close()
logging.info("Done.")
class ShortCircuitOperator(PythonOperator):
"""
Allows a workflow to continue only if a condition is met. Otherwise, the
workflow "short-circuits" and downstream tasks are skipped.
The ShortCircuitOperator is derived from the PythonOperator. It evaluates a
condition and short-circuits the workflow if the condition is False. Any
downstream tasks are marked with a state of "skipped". If the condition is
True, downstream tasks proceed as normal.
The condition is determined by the result of `python_callable`.
"""
def execute(self, context):
condition = super(ShortCircuitOperator, self).execute(context)
logging.info("Condition result is {}".format(condition))
if condition:
logging.info('Proceeding with downstream tasks...')
return
else:
logging.info('Skipping downstream tasks...')
session = settings.Session()
for task in context['task'].downstream_list:
ti = TaskInstance(
task, execution_date=context['ti'].execution_date)
ti.state = State.SKIPPED
ti.start_date = datetime.now()
ti.end_date = datetime.now()
session.merge(ti)
session.commit()
session.close()
logging.info("Done.")
| apache-2.0 |
NixaSoftware/CVis | venv/lib/python2.7/site-packages/jedi/evaluate/helpers.py | 1 | 5436 | import copy
import sys
import re
import os
from itertools import chain
from contextlib import contextmanager
from parso.python import tree
from jedi.parser_utils import get_parent_scope
def is_stdlib_path(path):
# Python standard library paths look like this:
# /usr/lib/python3.5/...
# TODO The implementation below is probably incorrect and not complete.
if 'dist-packages' in path or 'site-packages' in path:
return False
base_path = os.path.join(sys.prefix, 'lib', 'python')
return bool(re.match(re.escape(base_path) + '\d.\d', path))
def deep_ast_copy(obj):
"""
Much, much faster than copy.deepcopy, but just for parser tree nodes.
"""
# If it's already in the cache, just return it.
new_obj = copy.copy(obj)
# Copy children
new_children = []
for child in obj.children:
if isinstance(child, tree.Leaf):
new_child = copy.copy(child)
new_child.parent = new_obj
else:
new_child = deep_ast_copy(child)
new_child.parent = new_obj
new_children.append(new_child)
new_obj.children = new_children
return new_obj
def evaluate_call_of_leaf(context, leaf, cut_own_trailer=False):
"""
Creates a "call" node that consist of all ``trailer`` and ``power``
objects. E.g. if you call it with ``append``::
list([]).append(3) or None
You would get a node with the content ``list([]).append`` back.
This generates a copy of the original ast node.
If you're using the leaf, e.g. the bracket `)` it will return ``list([])``.
# TODO remove cut_own_trailer option, since its always used with it. Just
# ignore it, It's not what we want anyway. Or document it better?
"""
trailer = leaf.parent
# The leaf may not be the last or first child, because there exist three
# different trailers: `( x )`, `[ x ]` and `.x`. In the first two examples
# we should not match anything more than x.
if trailer.type != 'trailer' or leaf not in (trailer.children[0], trailer.children[-1]):
if trailer.type == 'atom':
return context.eval_node(trailer)
return context.eval_node(leaf)
power = trailer.parent
index = power.children.index(trailer)
if cut_own_trailer:
cut = index
else:
cut = index + 1
if power.type == 'error_node':
start = index
while True:
start -= 1
base = power.children[start]
if base.type != 'trailer':
break
trailers = power.children[start + 1: index + 1]
else:
base = power.children[0]
trailers = power.children[1:cut]
values = context.eval_node(base)
for trailer in trailers:
values = context.eval_trailer(values, trailer)
return values
def call_of_leaf(leaf):
"""
Creates a "call" node that consist of all ``trailer`` and ``power``
objects. E.g. if you call it with ``append``::
list([]).append(3) or None
You would get a node with the content ``list([]).append`` back.
This generates a copy of the original ast node.
If you're using the leaf, e.g. the bracket `)` it will return ``list([])``.
"""
# TODO this is the old version of this call. Try to remove it.
trailer = leaf.parent
# The leaf may not be the last or first child, because there exist three
# different trailers: `( x )`, `[ x ]` and `.x`. In the first two examples
# we should not match anything more than x.
if trailer.type != 'trailer' or leaf not in (trailer.children[0], trailer.children[-1]):
if trailer.type == 'atom':
return trailer
return leaf
power = trailer.parent
index = power.children.index(trailer)
new_power = copy.copy(power)
new_power.children = list(new_power.children)
new_power.children[index + 1:] = []
if power.type == 'error_node':
start = index
while True:
start -= 1
if power.children[start].type != 'trailer':
break
transformed = tree.Node('power', power.children[start:])
transformed.parent = power.parent
return transformed
return power
def get_names_of_node(node):
try:
children = node.children
except AttributeError:
if node.type == 'name':
return [node]
else:
return []
else:
return list(chain.from_iterable(get_names_of_node(c) for c in children))
def get_module_names(module, all_scopes):
"""
Returns a dictionary with name parts as keys and their call paths as
values.
"""
names = chain.from_iterable(module.get_used_names().values())
if not all_scopes:
# We have to filter all the names that don't have the module as a
# parent_scope. There's None as a parent, because nodes in the module
# node have the parent module and not suite as all the others.
# Therefore it's important to catch that case.
names = [n for n in names if get_parent_scope(n).parent in (module, None)]
return names
@contextmanager
def predefine_names(context, flow_scope, dct):
predefined = context.predefined_names
if flow_scope in predefined:
raise NotImplementedError('Why does this happen?')
predefined[flow_scope] = dct
try:
yield
finally:
del predefined[flow_scope]
| apache-2.0 |
badloop/SickRage | lib/unidecode/x020.py | 87 | 4070 | data = (
' ', # 0x00
' ', # 0x01
' ', # 0x02
' ', # 0x03
' ', # 0x04
' ', # 0x05
' ', # 0x06
' ', # 0x07
' ', # 0x08
' ', # 0x09
' ', # 0x0a
' ', # 0x0b
'', # 0x0c
'', # 0x0d
'', # 0x0e
'', # 0x0f
'-', # 0x10
'-', # 0x11
'-', # 0x12
'-', # 0x13
'--', # 0x14
'--', # 0x15
'||', # 0x16
'_', # 0x17
'\'', # 0x18
'\'', # 0x19
',', # 0x1a
'\'', # 0x1b
'"', # 0x1c
'"', # 0x1d
',,', # 0x1e
'"', # 0x1f
'+', # 0x20
'++', # 0x21
'*', # 0x22
'*>', # 0x23
'.', # 0x24
'..', # 0x25
'...', # 0x26
'.', # 0x27
'\x0a', # 0x28
'\x0a\x0a', # 0x29
'', # 0x2a
'', # 0x2b
'', # 0x2c
'', # 0x2d
'', # 0x2e
' ', # 0x2f
'%0', # 0x30
'%00', # 0x31
'\'', # 0x32
'\'\'', # 0x33
'\'\'\'', # 0x34
'`', # 0x35
'``', # 0x36
'```', # 0x37
'^', # 0x38
'<', # 0x39
'>', # 0x3a
'*', # 0x3b
'!!', # 0x3c
'!?', # 0x3d
'-', # 0x3e
'_', # 0x3f
'-', # 0x40
'^', # 0x41
'***', # 0x42
'--', # 0x43
'/', # 0x44
'-[', # 0x45
']-', # 0x46
'[?]', # 0x47
'?!', # 0x48
'!?', # 0x49
'7', # 0x4a
'PP', # 0x4b
'(]', # 0x4c
'[)', # 0x4d
'[?]', # 0x4e
'[?]', # 0x4f
'[?]', # 0x50
'[?]', # 0x51
'[?]', # 0x52
'[?]', # 0x53
'[?]', # 0x54
'[?]', # 0x55
'[?]', # 0x56
'[?]', # 0x57
'[?]', # 0x58
'[?]', # 0x59
'[?]', # 0x5a
'[?]', # 0x5b
'[?]', # 0x5c
'[?]', # 0x5d
'[?]', # 0x5e
'[?]', # 0x5f
'[?]', # 0x60
'[?]', # 0x61
'[?]', # 0x62
'[?]', # 0x63
'[?]', # 0x64
'[?]', # 0x65
'[?]', # 0x66
'[?]', # 0x67
'[?]', # 0x68
'[?]', # 0x69
'', # 0x6a
'', # 0x6b
'', # 0x6c
'', # 0x6d
'', # 0x6e
'', # 0x6f
'0', # 0x70
'', # 0x71
'', # 0x72
'', # 0x73
'4', # 0x74
'5', # 0x75
'6', # 0x76
'7', # 0x77
'8', # 0x78
'9', # 0x79
'+', # 0x7a
'-', # 0x7b
'=', # 0x7c
'(', # 0x7d
')', # 0x7e
'n', # 0x7f
'0', # 0x80
'1', # 0x81
'2', # 0x82
'3', # 0x83
'4', # 0x84
'5', # 0x85
'6', # 0x86
'7', # 0x87
'8', # 0x88
'9', # 0x89
'+', # 0x8a
'-', # 0x8b
'=', # 0x8c
'(', # 0x8d
')', # 0x8e
'[?]', # 0x8f
'[?]', # 0x90
'[?]', # 0x91
'[?]', # 0x92
'[?]', # 0x93
'[?]', # 0x94
'[?]', # 0x95
'[?]', # 0x96
'[?]', # 0x97
'[?]', # 0x98
'[?]', # 0x99
'[?]', # 0x9a
'[?]', # 0x9b
'[?]', # 0x9c
'[?]', # 0x9d
'[?]', # 0x9e
'[?]', # 0x9f
'ECU', # 0xa0
'CL', # 0xa1
'Cr', # 0xa2
'FF', # 0xa3
'L', # 0xa4
'mil', # 0xa5
'N', # 0xa6
'Pts', # 0xa7
'Rs', # 0xa8
'W', # 0xa9
'NS', # 0xaa
'D', # 0xab
'EU', # 0xac
'K', # 0xad
'T', # 0xae
'Dr', # 0xaf
'[?]', # 0xb0
'[?]', # 0xb1
'[?]', # 0xb2
'[?]', # 0xb3
'[?]', # 0xb4
'[?]', # 0xb5
'[?]', # 0xb6
'[?]', # 0xb7
'[?]', # 0xb8
'[?]', # 0xb9
'[?]', # 0xba
'[?]', # 0xbb
'[?]', # 0xbc
'[?]', # 0xbd
'[?]', # 0xbe
'[?]', # 0xbf
'[?]', # 0xc0
'[?]', # 0xc1
'[?]', # 0xc2
'[?]', # 0xc3
'[?]', # 0xc4
'[?]', # 0xc5
'[?]', # 0xc6
'[?]', # 0xc7
'[?]', # 0xc8
'[?]', # 0xc9
'[?]', # 0xca
'[?]', # 0xcb
'[?]', # 0xcc
'[?]', # 0xcd
'[?]', # 0xce
'[?]', # 0xcf
'', # 0xd0
'', # 0xd1
'', # 0xd2
'', # 0xd3
'', # 0xd4
'', # 0xd5
'', # 0xd6
'', # 0xd7
'', # 0xd8
'', # 0xd9
'', # 0xda
'', # 0xdb
'', # 0xdc
'', # 0xdd
'', # 0xde
'', # 0xdf
'', # 0xe0
'', # 0xe1
'', # 0xe2
'', # 0xe3
'[?]', # 0xe4
'[?]', # 0xe5
'[?]', # 0xe6
'[?]', # 0xe7
'[?]', # 0xe8
'[?]', # 0xe9
'[?]', # 0xea
'[?]', # 0xeb
'[?]', # 0xec
'[?]', # 0xed
'[?]', # 0xee
'[?]', # 0xef
'[?]', # 0xf0
'[?]', # 0xf1
'[?]', # 0xf2
'[?]', # 0xf3
'[?]', # 0xf4
'[?]', # 0xf5
'[?]', # 0xf6
'[?]', # 0xf7
'[?]', # 0xf8
'[?]', # 0xf9
'[?]', # 0xfa
'[?]', # 0xfb
'[?]', # 0xfc
'[?]', # 0xfd
'[?]', # 0xfe
)
| gpl-3.0 |
jlopezmalla/spark | python/pyspark/status.py | 159 | 3756 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import namedtuple
__all__ = ["SparkJobInfo", "SparkStageInfo", "StatusTracker"]
class SparkJobInfo(namedtuple("SparkJobInfo", "jobId stageIds status")):
"""
Exposes information about Spark Jobs.
"""
class SparkStageInfo(namedtuple("SparkStageInfo",
"stageId currentAttemptId name numTasks numActiveTasks "
"numCompletedTasks numFailedTasks")):
"""
Exposes information about Spark Stages.
"""
class StatusTracker(object):
"""
Low-level status reporting APIs for monitoring job and stage progress.
These APIs intentionally provide very weak consistency semantics;
consumers of these APIs should be prepared to handle empty / missing
information. For example, a job's stage ids may be known but the status
API may not have any information about the details of those stages, so
`getStageInfo` could potentially return `None` for a valid stage id.
To limit memory usage, these APIs only provide information on recent
jobs / stages. These APIs will provide information for the last
`spark.ui.retainedStages` stages and `spark.ui.retainedJobs` jobs.
"""
def __init__(self, jtracker):
self._jtracker = jtracker
def getJobIdsForGroup(self, jobGroup=None):
"""
Return a list of all known jobs in a particular job group. If
`jobGroup` is None, then returns all known jobs that are not
associated with a job group.
The returned list may contain running, failed, and completed jobs,
and may vary across invocations of this method. This method does
not guarantee the order of the elements in its result.
"""
return list(self._jtracker.getJobIdsForGroup(jobGroup))
def getActiveStageIds(self):
"""
Returns an array containing the ids of all active stages.
"""
return sorted(list(self._jtracker.getActiveStageIds()))
def getActiveJobsIds(self):
"""
Returns an array containing the ids of all active jobs.
"""
return sorted((list(self._jtracker.getActiveJobIds())))
def getJobInfo(self, jobId):
"""
Returns a :class:`SparkJobInfo` object, or None if the job info
could not be found or was garbage collected.
"""
job = self._jtracker.getJobInfo(jobId)
if job is not None:
return SparkJobInfo(jobId, job.stageIds(), str(job.status()))
def getStageInfo(self, stageId):
"""
Returns a :class:`SparkStageInfo` object, or None if the stage
info could not be found or was garbage collected.
"""
stage = self._jtracker.getStageInfo(stageId)
if stage is not None:
# TODO: fetch them in batch for better performance
attrs = [getattr(stage, f)() for f in SparkStageInfo._fields[1:]]
return SparkStageInfo(stageId, *attrs)
| apache-2.0 |
uclouvain/osis_louvain | reference/tests/factories/decree.py | 1 | 1713 | ##############################################################################
#
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2018 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
#
##############################################################################
import string
import factory
import factory.fuzzy
import datetime
from factory import DjangoModelFactory
class DecreeFactory(DjangoModelFactory):
class Meta:
model = "reference.Decree"
external_id = factory.fuzzy.FuzzyText(length=10, chars=string.digits)
name = factory.Faker('text', max_nb_chars=80)
start_date = datetime.date(2015, 1, 1).isoformat()
end_date = datetime.date(2015, 12, 31).isoformat()
| agpl-3.0 |
pexip/os-python-tz | pytz/reference.py | 32 | 3778 | '''
Reference tzinfo implementations from the Python docs.
Used for testing against as they are only correct for the years
1987 to 2006. Do not use these for real code.
'''
from datetime import tzinfo, timedelta, datetime
from pytz import HOUR, ZERO, UTC
__all__ = [
'FixedOffset',
'LocalTimezone',
'USTimeZone',
'Eastern',
'Central',
'Mountain',
'Pacific',
'UTC'
]
# A class building tzinfo objects for fixed-offset time zones.
# Note that FixedOffset(0, "UTC") is a different way to build a
# UTC tzinfo object.
class FixedOffset(tzinfo):
"""Fixed offset in minutes east from UTC."""
def __init__(self, offset, name):
self.__offset = timedelta(minutes=offset)
self.__name = name
def utcoffset(self, dt):
return self.__offset
def tzname(self, dt):
return self.__name
def dst(self, dt):
return ZERO
import time as _time
STDOFFSET = timedelta(seconds=-_time.timezone)
if _time.daylight:
DSTOFFSET = timedelta(seconds=-_time.altzone)
else:
DSTOFFSET = STDOFFSET
DSTDIFF = DSTOFFSET - STDOFFSET
# A class capturing the platform's idea of local time.
class LocalTimezone(tzinfo):
def utcoffset(self, dt):
if self._isdst(dt):
return DSTOFFSET
else:
return STDOFFSET
def dst(self, dt):
if self._isdst(dt):
return DSTDIFF
else:
return ZERO
def tzname(self, dt):
return _time.tzname[self._isdst(dt)]
def _isdst(self, dt):
tt = (dt.year, dt.month, dt.day,
dt.hour, dt.minute, dt.second,
dt.weekday(), 0, -1)
stamp = _time.mktime(tt)
tt = _time.localtime(stamp)
return tt.tm_isdst > 0
Local = LocalTimezone()
def first_sunday_on_or_after(dt):
days_to_go = 6 - dt.weekday()
if days_to_go:
dt += timedelta(days_to_go)
return dt
# In the US, DST starts at 2am (standard time) on the first Sunday in April.
DSTSTART = datetime(1, 4, 1, 2)
# and ends at 2am (DST time; 1am standard time) on the last Sunday of Oct.
# which is the first Sunday on or after Oct 25.
DSTEND = datetime(1, 10, 25, 1)
# A complete implementation of current DST rules for major US time zones.
class USTimeZone(tzinfo):
def __init__(self, hours, reprname, stdname, dstname):
self.stdoffset = timedelta(hours=hours)
self.reprname = reprname
self.stdname = stdname
self.dstname = dstname
def __repr__(self):
return self.reprname
def tzname(self, dt):
if self.dst(dt):
return self.dstname
else:
return self.stdname
def utcoffset(self, dt):
return self.stdoffset + self.dst(dt)
def dst(self, dt):
if dt is None or dt.tzinfo is None:
# An exception may be sensible here, in one or both cases.
# It depends on how you want to treat them. The default
# fromutc() implementation (called by the default astimezone()
# implementation) passes a datetime with dt.tzinfo is self.
return ZERO
assert dt.tzinfo is self
# Find first Sunday in April & the last in October.
start = first_sunday_on_or_after(DSTSTART.replace(year=dt.year))
end = first_sunday_on_or_after(DSTEND.replace(year=dt.year))
# Can't compare naive to aware objects, so strip the timezone from
# dt first.
if start <= dt.replace(tzinfo=None) < end:
return HOUR
else:
return ZERO
Eastern = USTimeZone(-5, "Eastern", "EST", "EDT")
Central = USTimeZone(-6, "Central", "CST", "CDT")
Mountain = USTimeZone(-7, "Mountain", "MST", "MDT")
Pacific = USTimeZone(-8, "Pacific", "PST", "PDT")
| mit |
hdinsight/hue | apps/spark/setup.py | 31 | 1225 | # Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup, find_packages
from hueversion import VERSION
setup(
name = "spark",
version = VERSION,
author = "Hue",
url = 'http://github.com/cloudera/hue',
description = "Web UI for submitting Spark applications",
packages = find_packages('src'),
package_dir = {'': 'src'},
install_requires = ['setuptools', 'desktop'],
entry_points = { 'desktop.sdk.application': 'spark=spark' },
)
| apache-2.0 |
arsfeld/conduit | conduit/modules/GoogleModule/gdata/apps/service.py | 1 | 12808 | #!/usr/bin/python
#
# Copyright (C) 2007 SIOS Technology, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = '[email protected] (Takashi MATSUO)'
try:
from xml.etree import cElementTree as ElementTree
except ImportError:
try:
import cElementTree as ElementTree
except ImportError:
try:
from xml.etree import ElementTree
except ImportError:
from elementtree import ElementTree
import urllib
import gdata
import atom.service
import gdata.service
import gdata.apps
import atom
API_VER="2.0"
HTTP_OK=200
UNKOWN_ERROR=1000
USER_DELETED_RECENTLY=1100
USER_SUSPENDED=1101
DOMAIN_USER_LIMIT_EXCEEDED=1200
DOMAIN_ALIAS_LIMIT_EXCEEDED=1201
DOMAIN_SUSPENDED=1202
DOMAIN_FEATURE_UNAVAILABLE=1203
ENTITY_EXISTS=1300
ENTITY_DOES_NOT_EXIST=1301
ENTITY_NAME_IS_RESERVED=1302
ENTITY_NAME_NOT_VALID=1303
INVALID_GIVEN_NAME=1400
INVALID_FAMILY_NAME=1401
INVALID_PASSWORD=1402
INVALID_USERNAME=1403
INVALID_HASH_FUNCTION_NAME=1404
INVALID_HASH_DIGGEST_LENGTH=1405
INVALID_EMAIL_ADDRESS=1406
INVALID_QUERY_PARAMETER_VALUE=1407
TOO_MANY_RECIPIENTS_ON_EMAIL_LIST=1500
DEFAULT_QUOTA_LIMIT='2048'
class Error(Exception):
pass
class AppsForYourDomainException(Error):
def __init__(self, response):
self.args = response
try:
self.element_tree = ElementTree.fromstring(response['body'])
self.error_code = int(self.element_tree[0].attrib['errorCode'])
self.reason = self.element_tree[0].attrib['reason']
self.invalidInput = self.element_tree[0].attrib['invalidInput']
except:
self.error_code = UNKOWN_ERROR
class AppsService(gdata.service.GDataService):
"""Client for the Google Apps Provisioning service."""
def __init__(self, email=None, password=None, domain=None, source=None,
server='www.google.com', additional_headers=None):
gdata.service.GDataService.__init__(self, email=email, password=password,
service='apps', source=source,
server=server,
additional_headers=additional_headers)
self.ssl = True
self.port = 443
self.domain = domain
def _baseURL(self):
return "/a/feeds/%s" % self.domain
def GetGenaratorFromLinkFinder(self, link_finder, func):
"""returns a generator for pagination"""
yield link_finder
next = link_finder.GetNextLink()
while next is not None:
next_feed = func(str(self.Get(next.href)))
yield next_feed
next = next_feed.GetNextLink()
def AddAllElementsFromAllPages(self, link_finder, func):
"""retrieve all pages and add all elements"""
next = link_finder.GetNextLink()
while next is not None:
next_feed = func(str(self.Get(next.href)))
for a_entry in next_feed.entry:
link_finder.entry.append(a_entry)
next = next_feed.GetNextLink()
return link_finder
def RetrievePageOfEmailLists(self, start_email_list_name=None):
"""Retrieve one page of email list"""
uri = "%s/emailList/%s" % (self._baseURL(), API_VER)
if start_email_list_name is not None:
uri += "?startEmailListName=%s" % start_email_list_name
try:
return gdata.apps.EmailListFeedFromString(str(self.Get(uri)))
except gdata.service.RequestError, e:
raise AppsForYourDomainException(e.args[0])
def RetrieveAllEmailLists(self):
"""Retrieve all email list of a domain."""
ret = self.RetrievePageOfEmailLists()
# pagination
return self.AddAllElementsFromAllPages(
ret, gdata.apps.EmailListFeedFromString)
def RetrieveEmailList(self, list_name):
"""Retreive a single email list by the list's name."""
uri = "%s/emailList/%s/%s" % (
self._baseURL(), API_VER, list_name)
try:
return self.Get(uri, converter=gdata.apps.EmailListEntryFromString)
except gdata.service.RequestError, e:
raise AppsForYourDomainException(e.args[0])
def RetrieveEmailLists(self, recipient):
"""Retrieve All Email List Subscriptions for an Email Address."""
uri = "%s/emailList/%s?recipient=%s" % (
self._baseURL(), API_VER, recipient)
try:
ret = gdata.apps.EmailListFeedFromString(str(self.Get(uri)))
except gdata.service.RequestError, e:
raise AppsForYourDomainException(e.args[0])
# pagination
return self.AddAllElementsFromAllPages(
ret, gdata.apps.EmailListFeedFromString)
def RemoveRecipientFromEmailList(self, recipient, list_name):
"""Remove recipient from email list."""
uri = "%s/emailList/%s/%s/recipient/%s" % (
self._baseURL(), API_VER, list_name, recipient)
try:
self.Delete(uri)
except gdata.service.RequestError, e:
raise AppsForYourDomainException(e.args[0])
def RetrievePageOfRecipients(self, list_name, start_recipient=None):
"""Retrieve one page of recipient of an email list. """
uri = "%s/emailList/%s/%s/recipient" % (
self._baseURL(), API_VER, list_name)
if start_recipient is not None:
uri += "?startRecipient=%s" % start_recipient
try:
return gdata.apps.EmailListRecipientFeedFromString(str(self.Get(uri)))
except gdata.service.RequestError, e:
raise AppsForYourDomainException(e.args[0])
def RetrieveAllRecipients(self, list_name):
"""Retrieve all recipient of an email list."""
ret = self.RetrievePageOfRecipients(list_name)
# pagination
return self.AddAllElementsFromAllPages(
ret, gdata.apps.EmailListRecipientFeedFromString)
def AddRecipientToEmailList(self, recipient, list_name):
"""Add a recipient to a email list."""
uri = "%s/emailList/%s/%s/recipient" % (
self._baseURL(), API_VER, list_name)
recipient_entry = gdata.apps.EmailListRecipientEntry()
recipient_entry.who = gdata.apps.Who(email=recipient)
try:
return gdata.apps.EmailListRecipientEntryFromString(
str(self.Post(recipient_entry, uri)))
except gdata.service.RequestError, e:
raise AppsForYourDomainException(e.args[0])
def DeleteEmailList(self, list_name):
"""Delete a email list"""
uri = "%s/emailList/%s/%s" % (self._baseURL(), API_VER, list_name)
try:
self.Delete(uri)
except gdata.service.RequestError, e:
raise AppsForYourDomainException(e.args[0])
def CreateEmailList(self, list_name):
"""Create a email list. """
uri = "%s/emailList/%s" % (self._baseURL(), API_VER)
email_list_entry = gdata.apps.EmailListEntry()
email_list_entry.email_list = gdata.apps.EmailList(name=list_name)
try:
return gdata.apps.EmailListEntryFromString(
str(self.Post(email_list_entry, uri)))
except gdata.service.RequestError, e:
raise AppsForYourDomainException(e.args[0])
def DeleteNickname(self, nickname):
"""Delete a nickname"""
uri = "%s/nickname/%s/%s" % (self._baseURL(), API_VER, nickname)
try:
self.Delete(uri)
except gdata.service.RequestError, e:
raise AppsForYourDomainException(e.args[0])
def RetrievePageOfNicknames(self, start_nickname=None):
"""Retrieve one page of nicknames in the domain"""
uri = "%s/nickname/%s" % (self._baseURL(), API_VER)
if start_nickname is not None:
uri += "?startNickname=%s" % start_nickname
try:
return gdata.apps.NicknameFeedFromString(str(self.Get(uri)))
except gdata.service.RequestError, e:
raise AppsForYourDomainException(e.args[0])
def RetrieveAllNicknames(self):
"""Retrieve all nicknames in the domain"""
ret = self.RetrievePageOfNicknames()
# pagination
return self.AddAllElementsFromAllPages(
ret, gdata.apps.NicknameFeedFromString)
def RetrieveNicknames(self, user_name):
"""Retrieve nicknames of the user"""
uri = "%s/nickname/%s?username=%s" % (self._baseURL(), API_VER, user_name)
try:
ret = gdata.apps.NicknameFeedFromString(str(self.Get(uri)))
except gdata.service.RequestError, e:
raise AppsForYourDomainException(e.args[0])
# pagination
return self.AddAllElementsFromAllPages(
ret, gdata.apps.NicknameFeedFromString)
def RetrieveNickname(self, nickname):
"""Retrieve a nickname.
Args:
nickname: string The nickname to retrieve
Returns:
gdata.apps.NicknameEntry
"""
uri = "%s/nickname/%s/%s" % (self._baseURL(), API_VER, nickname)
try:
return gdata.apps.NicknameEntryFromString(str(self.Get(uri)))
except gdata.service.RequestError, e:
raise AppsForYourDomainException(e.args[0])
def CreateNickname(self, user_name, nickname):
"""Create a nickname"""
uri = "%s/nickname/%s" % (self._baseURL(), API_VER)
nickname_entry = gdata.apps.NicknameEntry()
nickname_entry.login = gdata.apps.Login(user_name=user_name)
nickname_entry.nickname = gdata.apps.Nickname(name=nickname)
try:
return gdata.apps.NicknameEntryFromString(
str(self.Post(nickname_entry, uri)))
except gdata.service.RequestError, e:
raise AppsForYourDomainException(e.args[0])
def DeleteUser(self, user_name):
"""Delete a user account"""
uri = "%s/user/%s/%s" % (self._baseURL(), API_VER, user_name)
try:
return self.Delete(uri)
except gdata.service.RequestError, e:
raise AppsForYourDomainException(e.args[0])
def UpdateUser(self, user_name, user_entry):
"""Update a user account."""
uri = "%s/user/%s/%s" % (self._baseURL(), API_VER, user_name)
try:
return gdata.apps.UserEntryFromString(str(self.Put(user_entry, uri)))
except gdata.service.RequestError, e:
raise AppsForYourDomainException(e.args[0])
def CreateUser(self, user_name, family_name, given_name, password,
suspended='false', quota_limit=None,
password_hash_function=None):
"""Create a user account. """
uri = "%s/user/%s" % (self._baseURL(), API_VER)
user_entry = gdata.apps.UserEntry()
user_entry.login = gdata.apps.Login(
user_name=user_name, password=password, suspended=suspended,
hash_function_name=password_hash_function)
user_entry.name = gdata.apps.Name(family_name=family_name,
given_name=given_name)
if quota_limit is not None:
user_entry.quota = gdata.apps.Quota(limit=str(quota_limit))
try:
return gdata.apps.UserEntryFromString(str(self.Post(user_entry, uri)))
except gdata.service.RequestError, e:
raise AppsForYourDomainException(e.args[0])
def SuspendUser(self, user_name):
user_entry = self.RetrieveUser(user_name)
if user_entry.login.suspended != 'true':
user_entry.login.suspended = 'true'
user_entry = self.UpdateUser(user_name, user_entry)
return user_entry
def RestoreUser(self, user_name):
user_entry = self.RetrieveUser(user_name)
if user_entry.login.suspended != 'false':
user_entry.login.suspended = 'false'
user_entry = self.UpdateUser(user_name, user_entry)
return user_entry
def RetrieveUser(self, user_name):
"""Retrieve an user account.
Args:
user_name: string The user name to retrieve
Returns:
gdata.apps.UserEntry
"""
uri = "%s/user/%s/%s" % (self._baseURL(), API_VER, user_name)
try:
return gdata.apps.UserEntryFromString(str(self.Get(uri)))
except gdata.service.RequestError, e:
raise AppsForYourDomainException(e.args[0])
def RetrievePageOfUsers(self, start_username=None):
"""Retrieve one page of users in this domain."""
uri = "%s/user/%s" % (self._baseURL(), API_VER)
if start_username is not None:
uri += "?startUsername=%s" % start_username
try:
return gdata.apps.UserFeedFromString(str(self.Get(uri)))
except gdata.service.RequestError, e:
raise AppsForYourDomainException(e.args[0])
def GetGeneratorForAllUsers(self):
"""Retrieve a generator for all users in this domain."""
first_page = self.RetrievePageOfUsers()
return self.GetGenaratorFromLinkFinder(first_page,
gdata.apps.UserFeedFromString)
def RetrieveAllUsers(self):
"""Retrieve all users in this domain. OBSOLETE"""
ret = self.RetrievePageOfUsers()
# pagination
return self.AddAllElementsFromAllPages(
ret, gdata.apps.UserFeedFromString)
| gpl-2.0 |
orig74/DroneSimLab | demos/px4_test/unreal_proxy.py | 1 | 1957 | # vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
from Wrappers import phandlers as ph
import zmq,pickle,time,cv2
import numpy as np
import config
context = zmq.Context()
socket_sub = context.socket(zmq.SUB)
socket_sub.connect('tcp://%s:%d'%config.zmq_pub_drone_main)
socket_pub = context.socket(zmq.PUB)
socket_pub.bind("tcp://*:%d" % config.zmq_pub_unreal_proxy[1] )
socket_sub.setsockopt(zmq.SUBSCRIBE,config.topic_sitl_position_report)
start=time.time()
def main_loop(gworld):
drone_actor=ph.FindActorByName(gworld,'Parrot_Drone_6',1)
#drone_camera_actor=ph.FindActorByName(gworld,'SceneCapture2Ddrone',1)
if drone_actor is None:# or drone_camera_actor is None:
print('ERROR: could not find drone_actor')
while 1:
yield
for _ in range(10): #need to send it a few time don't know why.
socket_pub.send_multipart([config.topic_unreal_state,b'main_loop'])
yield
drone_start_pos=np.array(ph.GetActorLocation(drone_actor))
position=None
while 1:
while len(zmq.select([socket_sub],[],[],0)[0])>0:
topic, msg = socket_sub.recv_multipart()
position=pickle.loads(msg)
if position is not None:
new_pos=drone_start_pos+np.array([position['posx'],position['posy'],position['posz']])*100 #turn to cm
ph.SetActorLocation(drone_actor,new_pos)
ph.SetActorRotation(drone_actor,(position['roll'],position['pitch'],position['yaw']))
#incase of gimabl
#ph.SetActorRotation(drone_camera_actor,(-position['roll'],-position['pitch'],-position['yaw']))
position=None
yield
img1=cv2.resize(ph.GetTextureImg(),(512,512),cv2.INTER_LINEAR)
cv2.imshow('camera 1',img1)
cv2.waitKey(1)
def kill():
print('done!')
socket_pub.send_multipart([config.topic_unreal_state,b'kill'])
cv2.destroyAllWindows()
for _ in range(10):
cv2.waitKey(10)
| mit |
felix-d/boto | boto/beanstalk/layer1.py | 146 | 56259 | # Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
# All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import boto
import boto.jsonresponse
from boto.compat import json
from boto.regioninfo import RegionInfo
from boto.connection import AWSQueryConnection
class Layer1(AWSQueryConnection):
APIVersion = '2010-12-01'
DefaultRegionName = 'us-east-1'
DefaultRegionEndpoint = 'elasticbeanstalk.us-east-1.amazonaws.com'
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
is_secure=True, port=None,
proxy=None, proxy_port=None,
proxy_user=None, proxy_pass=None, debug=0,
https_connection_factory=None, region=None, path='/',
api_version=None, security_token=None, profile_name=None):
if not region:
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint)
self.region = region
super(Layer1, self).__init__(aws_access_key_id,
aws_secret_access_key,
is_secure, port, proxy, proxy_port,
proxy_user, proxy_pass,
self.region.endpoint, debug,
https_connection_factory, path,
security_token, profile_name=profile_name)
def _required_auth_capability(self):
return ['hmac-v4']
def _encode_bool(self, v):
v = bool(v)
return {True: "true", False: "false"}[v]
def _get_response(self, action, params, path='/', verb='GET'):
params['ContentType'] = 'JSON'
response = self.make_request(action, params, path, verb)
body = response.read().decode('utf-8')
boto.log.debug(body)
if response.status == 200:
return json.loads(body)
else:
raise self.ResponseError(response.status, response.reason, body)
def check_dns_availability(self, cname_prefix):
"""Checks if the specified CNAME is available.
:type cname_prefix: string
:param cname_prefix: The prefix used when this CNAME is
reserved.
"""
params = {'CNAMEPrefix': cname_prefix}
return self._get_response('CheckDNSAvailability', params)
def create_application(self, application_name, description=None):
"""
Creates an application that has one configuration template
named default and no application versions.
:type application_name: string
:param application_name: The name of the application.
Constraint: This name must be unique within your account. If the
specified name already exists, the action returns an
InvalidParameterValue error.
:type description: string
:param description: Describes the application.
:raises: TooManyApplicationsException
"""
params = {'ApplicationName': application_name}
if description:
params['Description'] = description
return self._get_response('CreateApplication', params)
def create_application_version(self, application_name, version_label,
description=None, s3_bucket=None,
s3_key=None, auto_create_application=None):
"""Creates an application version for the specified application.
:type application_name: string
:param application_name: The name of the application. If no
application is found with this name, and AutoCreateApplication is
false, returns an InvalidParameterValue error.
:type version_label: string
:param version_label: A label identifying this version. Constraint:
Must be unique per application. If an application version already
exists with this label for the specified application, AWS Elastic
Beanstalk returns an InvalidParameterValue error.
:type description: string
:param description: Describes this version.
:type s3_bucket: string
:param s3_bucket: The Amazon S3 bucket where the data is located.
:type s3_key: string
:param s3_key: The Amazon S3 key where the data is located. Both
s3_bucket and s3_key must be specified in order to use a specific
source bundle. If both of these values are not specified the
sample application will be used.
:type auto_create_application: boolean
:param auto_create_application: Determines how the system behaves if
the specified application for this version does not already exist:
true: Automatically creates the specified application for this
version if it does not already exist. false: Returns an
InvalidParameterValue if the specified application for this version
does not already exist. Default: false Valid Values: true | false
:raises: TooManyApplicationsException,
TooManyApplicationVersionsException,
InsufficientPrivilegesException,
S3LocationNotInServiceRegionException
"""
params = {'ApplicationName': application_name,
'VersionLabel': version_label}
if description:
params['Description'] = description
if s3_bucket and s3_key:
params['SourceBundle.S3Bucket'] = s3_bucket
params['SourceBundle.S3Key'] = s3_key
if auto_create_application:
params['AutoCreateApplication'] = self._encode_bool(
auto_create_application)
return self._get_response('CreateApplicationVersion', params)
def create_configuration_template(self, application_name, template_name,
solution_stack_name=None,
source_configuration_application_name=None,
source_configuration_template_name=None,
environment_id=None, description=None,
option_settings=None):
"""Creates a configuration template.
Templates are associated with a specific application and are used to
deploy different versions of the application with the same
configuration settings.
:type application_name: string
:param application_name: The name of the application to associate with
this configuration template. If no application is found with this
name, AWS Elastic Beanstalk returns an InvalidParameterValue error.
:type template_name: string
:param template_name: The name of the configuration template.
Constraint: This name must be unique per application. Default: If
a configuration template already exists with this name, AWS Elastic
Beanstalk returns an InvalidParameterValue error.
:type solution_stack_name: string
:param solution_stack_name: The name of the solution stack used by this
configuration. The solution stack specifies the operating system,
architecture, and application server for a configuration template.
It determines the set of configuration options as well as the
possible and default values. Use ListAvailableSolutionStacks to
obtain a list of available solution stacks. Default: If the
SolutionStackName is not specified and the source configuration
parameter is blank, AWS Elastic Beanstalk uses the default solution
stack. If not specified and the source configuration parameter is
specified, AWS Elastic Beanstalk uses the same solution stack as
the source configuration template.
:type source_configuration_application_name: string
:param source_configuration_application_name: The name of the
application associated with the configuration.
:type source_configuration_template_name: string
:param source_configuration_template_name: The name of the
configuration template.
:type environment_id: string
:param environment_id: The ID of the environment used with this
configuration template.
:type description: string
:param description: Describes this configuration.
:type option_settings: list
:param option_settings: If specified, AWS Elastic Beanstalk sets the
specified configuration option to the requested value. The new
value overrides the value obtained from the solution stack or the
source configuration template.
:raises: InsufficientPrivilegesException,
TooManyConfigurationTemplatesException
"""
params = {'ApplicationName': application_name,
'TemplateName': template_name}
if solution_stack_name:
params['SolutionStackName'] = solution_stack_name
if source_configuration_application_name:
params['SourceConfiguration.ApplicationName'] = source_configuration_application_name
if source_configuration_template_name:
params['SourceConfiguration.TemplateName'] = source_configuration_template_name
if environment_id:
params['EnvironmentId'] = environment_id
if description:
params['Description'] = description
if option_settings:
self._build_list_params(params, option_settings,
'OptionSettings.member',
('Namespace', 'OptionName', 'Value'))
return self._get_response('CreateConfigurationTemplate', params)
def create_environment(self, application_name, environment_name,
version_label=None, template_name=None,
solution_stack_name=None, cname_prefix=None,
description=None, option_settings=None,
options_to_remove=None, tier_name=None,
tier_type=None, tier_version='1.0'):
"""Launches an environment for the application using a configuration.
:type application_name: string
:param application_name: The name of the application that contains the
version to be deployed. If no application is found with this name,
CreateEnvironment returns an InvalidParameterValue error.
:type environment_name: string
:param environment_name: A unique name for the deployment environment.
Used in the application URL. Constraint: Must be from 4 to 23
characters in length. The name can contain only letters, numbers,
and hyphens. It cannot start or end with a hyphen. This name must
be unique in your account. If the specified name already exists,
AWS Elastic Beanstalk returns an InvalidParameterValue error.
Default: If the CNAME parameter is not specified, the environment
name becomes part of the CNAME, and therefore part of the visible
URL for your application.
:type version_label: string
:param version_label: The name of the application version to deploy. If
the specified application has no associated application versions,
AWS Elastic Beanstalk UpdateEnvironment returns an
InvalidParameterValue error. Default: If not specified, AWS
Elastic Beanstalk attempts to launch the most recently created
application version.
:type template_name: string
:param template_name: The name of the configuration template to
use in deployment. If no configuration template is found with this
name, AWS Elastic Beanstalk returns an InvalidParameterValue error.
Condition: You must specify either this parameter or a
SolutionStackName, but not both. If you specify both, AWS Elastic
Beanstalk returns an InvalidParameterCombination error. If you do
not specify either, AWS Elastic Beanstalk returns a
MissingRequiredParameter error.
:type solution_stack_name: string
:param solution_stack_name: This is an alternative to specifying a
configuration name. If specified, AWS Elastic Beanstalk sets the
configuration values to the default values associated with the
specified solution stack. Condition: You must specify either this
or a TemplateName, but not both. If you specify both, AWS Elastic
Beanstalk returns an InvalidParameterCombination error. If you do
not specify either, AWS Elastic Beanstalk returns a
MissingRequiredParameter error.
:type cname_prefix: string
:param cname_prefix: If specified, the environment attempts to use this
value as the prefix for the CNAME. If not specified, the
environment uses the environment name.
:type description: string
:param description: Describes this environment.
:type option_settings: list
:param option_settings: If specified, AWS Elastic Beanstalk sets the
specified configuration options to the requested value in the
configuration set for the new environment. These override the
values obtained from the solution stack or the configuration
template. Each element in the list is a tuple of (Namespace,
OptionName, Value), for example::
[('aws:autoscaling:launchconfiguration',
'Ec2KeyName', 'mykeypair')]
:type options_to_remove: list
:param options_to_remove: A list of custom user-defined configuration
options to remove from the configuration set for this new
environment.
:type tier_name: string
:param tier_name: The name of the tier. Valid values are
"WebServer" and "Worker". Defaults to "WebServer".
The ``tier_name`` and a ``tier_type`` parameters are
related and the values provided must be valid.
The possible combinations are:
* "WebServer" and "Standard" (the default)
* "Worker" and "SQS/HTTP"
:type tier_type: string
:param tier_type: The type of the tier. Valid values are
"Standard" if ``tier_name`` is "WebServer" and "SQS/HTTP"
if ``tier_name`` is "Worker". Defaults to "Standard".
:type tier_version: string
:type tier_version: The version of the tier. Valid values
currently are "1.0". Defaults to "1.0".
:raises: TooManyEnvironmentsException, InsufficientPrivilegesException
"""
params = {'ApplicationName': application_name,
'EnvironmentName': environment_name}
if version_label:
params['VersionLabel'] = version_label
if template_name:
params['TemplateName'] = template_name
if solution_stack_name:
params['SolutionStackName'] = solution_stack_name
if cname_prefix:
params['CNAMEPrefix'] = cname_prefix
if description:
params['Description'] = description
if option_settings:
self._build_list_params(params, option_settings,
'OptionSettings.member',
('Namespace', 'OptionName', 'Value'))
if options_to_remove:
self.build_list_params(params, options_to_remove,
'OptionsToRemove.member')
if tier_name and tier_type and tier_version:
params['Tier.Name'] = tier_name
params['Tier.Type'] = tier_type
params['Tier.Version'] = tier_version
return self._get_response('CreateEnvironment', params)
def create_storage_location(self):
"""
Creates the Amazon S3 storage location for the account. This
location is used to store user log files.
:raises: TooManyBucketsException,
S3SubscriptionRequiredException,
InsufficientPrivilegesException
"""
return self._get_response('CreateStorageLocation', params={})
def delete_application(self, application_name,
terminate_env_by_force=None):
"""
Deletes the specified application along with all associated
versions and configurations. The application versions will not
be deleted from your Amazon S3 bucket.
:type application_name: string
:param application_name: The name of the application to delete.
:type terminate_env_by_force: boolean
:param terminate_env_by_force: When set to true, running
environments will be terminated before deleting the application.
:raises: OperationInProgressException
"""
params = {'ApplicationName': application_name}
if terminate_env_by_force:
params['TerminateEnvByForce'] = self._encode_bool(
terminate_env_by_force)
return self._get_response('DeleteApplication', params)
def delete_application_version(self, application_name, version_label,
delete_source_bundle=None):
"""Deletes the specified version from the specified application.
:type application_name: string
:param application_name: The name of the application to delete
releases from.
:type version_label: string
:param version_label: The label of the version to delete.
:type delete_source_bundle: boolean
:param delete_source_bundle: Indicates whether to delete the
associated source bundle from Amazon S3. Valid Values: true |
false
:raises: SourceBundleDeletionException,
InsufficientPrivilegesException,
OperationInProgressException,
S3LocationNotInServiceRegionException
"""
params = {'ApplicationName': application_name,
'VersionLabel': version_label}
if delete_source_bundle:
params['DeleteSourceBundle'] = self._encode_bool(
delete_source_bundle)
return self._get_response('DeleteApplicationVersion', params)
def delete_configuration_template(self, application_name, template_name):
"""Deletes the specified configuration template.
:type application_name: string
:param application_name: The name of the application to delete
the configuration template from.
:type template_name: string
:param template_name: The name of the configuration template to
delete.
:raises: OperationInProgressException
"""
params = {'ApplicationName': application_name,
'TemplateName': template_name}
return self._get_response('DeleteConfigurationTemplate', params)
def delete_environment_configuration(self, application_name,
environment_name):
"""
Deletes the draft configuration associated with the running
environment. Updating a running environment with any
configuration changes creates a draft configuration set. You can
get the draft configuration using DescribeConfigurationSettings
while the update is in progress or if the update fails. The
DeploymentStatus for the draft configuration indicates whether
the deployment is in process or has failed. The draft
configuration remains in existence until it is deleted with this
action.
:type application_name: string
:param application_name: The name of the application the
environment is associated with.
:type environment_name: string
:param environment_name: The name of the environment to delete
the draft configuration from.
"""
params = {'ApplicationName': application_name,
'EnvironmentName': environment_name}
return self._get_response('DeleteEnvironmentConfiguration', params)
def describe_application_versions(self, application_name=None,
version_labels=None):
"""Returns descriptions for existing application versions.
:type application_name: string
:param application_name: If specified, AWS Elastic Beanstalk restricts
the returned descriptions to only include ones that are associated
with the specified application.
:type version_labels: list
:param version_labels: If specified, restricts the returned
descriptions to only include ones that have the specified version
labels.
"""
params = {}
if application_name:
params['ApplicationName'] = application_name
if version_labels:
self.build_list_params(params, version_labels,
'VersionLabels.member')
return self._get_response('DescribeApplicationVersions', params)
def describe_applications(self, application_names=None):
"""Returns the descriptions of existing applications.
:type application_names: list
:param application_names: If specified, AWS Elastic Beanstalk restricts
the returned descriptions to only include those with the specified
names.
"""
params = {}
if application_names:
self.build_list_params(params, application_names,
'ApplicationNames.member')
return self._get_response('DescribeApplications', params)
def describe_configuration_options(self, application_name=None,
template_name=None,
environment_name=None,
solution_stack_name=None, options=None):
"""Describes configuration options used in a template or environment.
Describes the configuration options that are used in a
particular configuration template or environment, or that a
specified solution stack defines. The description includes the
values the options, their default values, and an indication of
the required action on a running environment if an option value
is changed.
:type application_name: string
:param application_name: The name of the application associated with
the configuration template or environment. Only needed if you want
to describe the configuration options associated with either the
configuration template or environment.
:type template_name: string
:param template_name: The name of the configuration template whose
configuration options you want to describe.
:type environment_name: string
:param environment_name: The name of the environment whose
configuration options you want to describe.
:type solution_stack_name: string
:param solution_stack_name: The name of the solution stack whose
configuration options you want to describe.
:type options: list
:param options: If specified, restricts the descriptions to only
the specified options.
"""
params = {}
if application_name:
params['ApplicationName'] = application_name
if template_name:
params['TemplateName'] = template_name
if environment_name:
params['EnvironmentName'] = environment_name
if solution_stack_name:
params['SolutionStackName'] = solution_stack_name
if options:
self.build_list_params(params, options, 'Options.member')
return self._get_response('DescribeConfigurationOptions', params)
def describe_configuration_settings(self, application_name,
template_name=None,
environment_name=None):
"""
Returns a description of the settings for the specified
configuration set, that is, either a configuration template or
the configuration set associated with a running environment.
When describing the settings for the configuration set
associated with a running environment, it is possible to receive
two sets of setting descriptions. One is the deployed
configuration set, and the other is a draft configuration of an
environment that is either in the process of deployment or that
failed to deploy.
:type application_name: string
:param application_name: The application for the environment or
configuration template.
:type template_name: string
:param template_name: The name of the configuration template to
describe. Conditional: You must specify either this parameter or
an EnvironmentName, but not both. If you specify both, AWS Elastic
Beanstalk returns an InvalidParameterCombination error. If you do
not specify either, AWS Elastic Beanstalk returns a
MissingRequiredParameter error.
:type environment_name: string
:param environment_name: The name of the environment to describe.
Condition: You must specify either this or a TemplateName, but not
both. If you specify both, AWS Elastic Beanstalk returns an
InvalidParameterCombination error. If you do not specify either,
AWS Elastic Beanstalk returns MissingRequiredParameter error.
"""
params = {'ApplicationName': application_name}
if template_name:
params['TemplateName'] = template_name
if environment_name:
params['EnvironmentName'] = environment_name
return self._get_response('DescribeConfigurationSettings', params)
def describe_environment_resources(self, environment_id=None,
environment_name=None):
"""Returns AWS resources for this environment.
:type environment_id: string
:param environment_id: The ID of the environment to retrieve AWS
resource usage data. Condition: You must specify either this or an
EnvironmentName, or both. If you do not specify either, AWS Elastic
Beanstalk returns MissingRequiredParameter error.
:type environment_name: string
:param environment_name: The name of the environment to retrieve
AWS resource usage data. Condition: You must specify either this
or an EnvironmentId, or both. If you do not specify either, AWS
Elastic Beanstalk returns MissingRequiredParameter error.
:raises: InsufficientPrivilegesException
"""
params = {}
if environment_id:
params['EnvironmentId'] = environment_id
if environment_name:
params['EnvironmentName'] = environment_name
return self._get_response('DescribeEnvironmentResources', params)
def describe_environments(self, application_name=None, version_label=None,
environment_ids=None, environment_names=None,
include_deleted=None,
included_deleted_back_to=None):
"""Returns descriptions for existing environments.
:type application_name: string
:param application_name: If specified, AWS Elastic Beanstalk restricts
the returned descriptions to include only those that are associated
with this application.
:type version_label: string
:param version_label: If specified, AWS Elastic Beanstalk restricts the
returned descriptions to include only those that are associated
with this application version.
:type environment_ids: list
:param environment_ids: If specified, AWS Elastic Beanstalk restricts
the returned descriptions to include only those that have the
specified IDs.
:type environment_names: list
:param environment_names: If specified, AWS Elastic Beanstalk restricts
the returned descriptions to include only those that have the
specified names.
:type include_deleted: boolean
:param include_deleted: Indicates whether to include deleted
environments: true: Environments that have been deleted after
IncludedDeletedBackTo are displayed. false: Do not include deleted
environments.
:type included_deleted_back_to: timestamp
:param included_deleted_back_to: If specified when IncludeDeleted is
set to true, then environments deleted after this date are
displayed.
"""
params = {}
if application_name:
params['ApplicationName'] = application_name
if version_label:
params['VersionLabel'] = version_label
if environment_ids:
self.build_list_params(params, environment_ids,
'EnvironmentIds.member')
if environment_names:
self.build_list_params(params, environment_names,
'EnvironmentNames.member')
if include_deleted:
params['IncludeDeleted'] = self._encode_bool(include_deleted)
if included_deleted_back_to:
params['IncludedDeletedBackTo'] = included_deleted_back_to
return self._get_response('DescribeEnvironments', params)
def describe_events(self, application_name=None, version_label=None,
template_name=None, environment_id=None,
environment_name=None, request_id=None, severity=None,
start_time=None, end_time=None, max_records=None,
next_token=None):
"""Returns event descriptions matching criteria up to the last 6 weeks.
:type application_name: string
:param application_name: If specified, AWS Elastic Beanstalk restricts
the returned descriptions to include only those associated with
this application.
:type version_label: string
:param version_label: If specified, AWS Elastic Beanstalk restricts the
returned descriptions to those associated with this application
version.
:type template_name: string
:param template_name: If specified, AWS Elastic Beanstalk restricts the
returned descriptions to those that are associated with this
environment configuration.
:type environment_id: string
:param environment_id: If specified, AWS Elastic Beanstalk restricts
the returned descriptions to those associated with this
environment.
:type environment_name: string
:param environment_name: If specified, AWS Elastic Beanstalk restricts
the returned descriptions to those associated with this
environment.
:type request_id: string
:param request_id: If specified, AWS Elastic Beanstalk restricts the
described events to include only those associated with this request
ID.
:type severity: string
:param severity: If specified, limits the events returned from this
call to include only those with the specified severity or higher.
:type start_time: timestamp
:param start_time: If specified, AWS Elastic Beanstalk restricts the
returned descriptions to those that occur on or after this time.
:type end_time: timestamp
:param end_time: If specified, AWS Elastic Beanstalk restricts the
returned descriptions to those that occur up to, but not including,
the EndTime.
:type max_records: integer
:param max_records: Specifies the maximum number of events that can be
returned, beginning with the most recent event.
:type next_token: string
:param next_token: Pagination token. If specified, the events return
the next batch of results.
"""
params = {}
if application_name:
params['ApplicationName'] = application_name
if version_label:
params['VersionLabel'] = version_label
if template_name:
params['TemplateName'] = template_name
if environment_id:
params['EnvironmentId'] = environment_id
if environment_name:
params['EnvironmentName'] = environment_name
if request_id:
params['RequestId'] = request_id
if severity:
params['Severity'] = severity
if start_time:
params['StartTime'] = start_time
if end_time:
params['EndTime'] = end_time
if max_records:
params['MaxRecords'] = max_records
if next_token:
params['NextToken'] = next_token
return self._get_response('DescribeEvents', params)
def list_available_solution_stacks(self):
"""Returns a list of the available solution stack names."""
return self._get_response('ListAvailableSolutionStacks', params={})
def rebuild_environment(self, environment_id=None, environment_name=None):
"""
Deletes and recreates all of the AWS resources (for example:
the Auto Scaling group, load balancer, etc.) for a specified
environment and forces a restart.
:type environment_id: string
:param environment_id: The ID of the environment to rebuild.
Condition: You must specify either this or an EnvironmentName, or
both. If you do not specify either, AWS Elastic Beanstalk returns
MissingRequiredParameter error.
:type environment_name: string
:param environment_name: The name of the environment to rebuild.
Condition: You must specify either this or an EnvironmentId, or
both. If you do not specify either, AWS Elastic Beanstalk returns
MissingRequiredParameter error.
:raises: InsufficientPrivilegesException
"""
params = {}
if environment_id:
params['EnvironmentId'] = environment_id
if environment_name:
params['EnvironmentName'] = environment_name
return self._get_response('RebuildEnvironment', params)
def request_environment_info(self, info_type='tail', environment_id=None,
environment_name=None):
"""
Initiates a request to compile the specified type of
information of the deployed environment. Setting the InfoType
to tail compiles the last lines from the application server log
files of every Amazon EC2 instance in your environment. Use
RetrieveEnvironmentInfo to access the compiled information.
:type info_type: string
:param info_type: The type of information to request.
:type environment_id: string
:param environment_id: The ID of the environment of the
requested data. If no such environment is found,
RequestEnvironmentInfo returns an InvalidParameterValue error.
Condition: You must specify either this or an EnvironmentName, or
both. If you do not specify either, AWS Elastic Beanstalk returns
MissingRequiredParameter error.
:type environment_name: string
:param environment_name: The name of the environment of the
requested data. If no such environment is found,
RequestEnvironmentInfo returns an InvalidParameterValue error.
Condition: You must specify either this or an EnvironmentId, or
both. If you do not specify either, AWS Elastic Beanstalk returns
MissingRequiredParameter error.
"""
params = {'InfoType': info_type}
if environment_id:
params['EnvironmentId'] = environment_id
if environment_name:
params['EnvironmentName'] = environment_name
return self._get_response('RequestEnvironmentInfo', params)
def restart_app_server(self, environment_id=None, environment_name=None):
"""
Causes the environment to restart the application container
server running on each Amazon EC2 instance.
:type environment_id: string
:param environment_id: The ID of the environment to restart the server
for. Condition: You must specify either this or an
EnvironmentName, or both. If you do not specify either, AWS Elastic
Beanstalk returns MissingRequiredParameter error.
:type environment_name: string
:param environment_name: The name of the environment to restart the
server for. Condition: You must specify either this or an
EnvironmentId, or both. If you do not specify either, AWS Elastic
Beanstalk returns MissingRequiredParameter error.
"""
params = {}
if environment_id:
params['EnvironmentId'] = environment_id
if environment_name:
params['EnvironmentName'] = environment_name
return self._get_response('RestartAppServer', params)
def retrieve_environment_info(self, info_type='tail', environment_id=None,
environment_name=None):
"""
Retrieves the compiled information from a RequestEnvironmentInfo
request.
:type info_type: string
:param info_type: The type of information to retrieve.
:type environment_id: string
:param environment_id: The ID of the data's environment. If no such
environment is found, returns an InvalidParameterValue error.
Condition: You must specify either this or an EnvironmentName, or
both. If you do not specify either, AWS Elastic Beanstalk returns
MissingRequiredParameter error.
:type environment_name: string
:param environment_name: The name of the data's environment. If no such
environment is found, returns an InvalidParameterValue error.
Condition: You must specify either this or an EnvironmentId, or
both. If you do not specify either, AWS Elastic Beanstalk returns
MissingRequiredParameter error.
"""
params = {'InfoType': info_type}
if environment_id:
params['EnvironmentId'] = environment_id
if environment_name:
params['EnvironmentName'] = environment_name
return self._get_response('RetrieveEnvironmentInfo', params)
def swap_environment_cnames(self, source_environment_id=None,
source_environment_name=None,
destination_environment_id=None,
destination_environment_name=None):
"""Swaps the CNAMEs of two environments.
:type source_environment_id: string
:param source_environment_id: The ID of the source environment.
Condition: You must specify at least the SourceEnvironmentID or the
SourceEnvironmentName. You may also specify both. If you specify
the SourceEnvironmentId, you must specify the
DestinationEnvironmentId.
:type source_environment_name: string
:param source_environment_name: The name of the source environment.
Condition: You must specify at least the SourceEnvironmentID or the
SourceEnvironmentName. You may also specify both. If you specify
the SourceEnvironmentName, you must specify the
DestinationEnvironmentName.
:type destination_environment_id: string
:param destination_environment_id: The ID of the destination
environment. Condition: You must specify at least the
DestinationEnvironmentID or the DestinationEnvironmentName. You may
also specify both. You must specify the SourceEnvironmentId with
the DestinationEnvironmentId.
:type destination_environment_name: string
:param destination_environment_name: The name of the destination
environment. Condition: You must specify at least the
DestinationEnvironmentID or the DestinationEnvironmentName. You may
also specify both. You must specify the SourceEnvironmentName with
the DestinationEnvironmentName.
"""
params = {}
if source_environment_id:
params['SourceEnvironmentId'] = source_environment_id
if source_environment_name:
params['SourceEnvironmentName'] = source_environment_name
if destination_environment_id:
params['DestinationEnvironmentId'] = destination_environment_id
if destination_environment_name:
params['DestinationEnvironmentName'] = destination_environment_name
return self._get_response('SwapEnvironmentCNAMEs', params)
def terminate_environment(self, environment_id=None, environment_name=None,
terminate_resources=None):
"""Terminates the specified environment.
:type environment_id: string
:param environment_id: The ID of the environment to terminate.
Condition: You must specify either this or an EnvironmentName, or
both. If you do not specify either, AWS Elastic Beanstalk returns
MissingRequiredParameter error.
:type environment_name: string
:param environment_name: The name of the environment to terminate.
Condition: You must specify either this or an EnvironmentId, or
both. If you do not specify either, AWS Elastic Beanstalk returns
MissingRequiredParameter error.
:type terminate_resources: boolean
:param terminate_resources: Indicates whether the associated AWS
resources should shut down when the environment is terminated:
true: (default) The user AWS resources (for example, the Auto
Scaling group, LoadBalancer, etc.) are terminated along with the
environment. false: The environment is removed from the AWS
Elastic Beanstalk but the AWS resources continue to operate. For
more information, see the AWS Elastic Beanstalk User Guide.
Default: true Valid Values: true | false
:raises: InsufficientPrivilegesException
"""
params = {}
if environment_id:
params['EnvironmentId'] = environment_id
if environment_name:
params['EnvironmentName'] = environment_name
if terminate_resources:
params['TerminateResources'] = self._encode_bool(
terminate_resources)
return self._get_response('TerminateEnvironment', params)
def update_application(self, application_name, description=None):
"""
Updates the specified application to have the specified
properties.
:type application_name: string
:param application_name: The name of the application to update.
If no such application is found, UpdateApplication returns an
InvalidParameterValue error.
:type description: string
:param description: A new description for the application. Default: If
not specified, AWS Elastic Beanstalk does not update the
description.
"""
params = {'ApplicationName': application_name}
if description:
params['Description'] = description
return self._get_response('UpdateApplication', params)
def update_application_version(self, application_name, version_label,
description=None):
"""Updates the application version to have the properties.
:type application_name: string
:param application_name: The name of the application associated with
this version. If no application is found with this name,
UpdateApplication returns an InvalidParameterValue error.
:type version_label: string
:param version_label: The name of the version to update. If no
application version is found with this label, UpdateApplication
returns an InvalidParameterValue error.
:type description: string
:param description: A new description for this release.
"""
params = {'ApplicationName': application_name,
'VersionLabel': version_label}
if description:
params['Description'] = description
return self._get_response('UpdateApplicationVersion', params)
def update_configuration_template(self, application_name, template_name,
description=None, option_settings=None,
options_to_remove=None):
"""
Updates the specified configuration template to have the
specified properties or configuration option values.
:type application_name: string
:param application_name: The name of the application associated with
the configuration template to update. If no application is found
with this name, UpdateConfigurationTemplate returns an
InvalidParameterValue error.
:type template_name: string
:param template_name: The name of the configuration template to update.
If no configuration template is found with this name,
UpdateConfigurationTemplate returns an InvalidParameterValue error.
:type description: string
:param description: A new description for the configuration.
:type option_settings: list
:param option_settings: A list of configuration option settings to
update with the new specified option value.
:type options_to_remove: list
:param options_to_remove: A list of configuration options to remove
from the configuration set. Constraint: You can remove only
UserDefined configuration options.
:raises: InsufficientPrivilegesException
"""
params = {'ApplicationName': application_name,
'TemplateName': template_name}
if description:
params['Description'] = description
if option_settings:
self._build_list_params(params, option_settings,
'OptionSettings.member',
('Namespace', 'OptionName', 'Value'))
if options_to_remove:
self.build_list_params(params, options_to_remove,
'OptionsToRemove.member')
return self._get_response('UpdateConfigurationTemplate', params)
def update_environment(self, environment_id=None, environment_name=None,
version_label=None, template_name=None,
description=None, option_settings=None,
options_to_remove=None, tier_name=None,
tier_type=None, tier_version='1.0'):
"""
Updates the environment description, deploys a new application
version, updates the configuration settings to an entirely new
configuration template, or updates select configuration option
values in the running environment. Attempting to update both
the release and configuration is not allowed and AWS Elastic
Beanstalk returns an InvalidParameterCombination error. When
updating the configuration settings to a new template or
individual settings, a draft configuration is created and
DescribeConfigurationSettings for this environment returns two
setting descriptions with different DeploymentStatus values.
:type environment_id: string
:param environment_id: The ID of the environment to update. If no
environment with this ID exists, AWS Elastic Beanstalk returns an
InvalidParameterValue error. Condition: You must specify either
this or an EnvironmentName, or both. If you do not specify either,
AWS Elastic Beanstalk returns MissingRequiredParameter error.
:type environment_name: string
:param environment_name: The name of the environment to update. If no
environment with this name exists, AWS Elastic Beanstalk returns an
InvalidParameterValue error. Condition: You must specify either
this or an EnvironmentId, or both. If you do not specify either,
AWS Elastic Beanstalk returns MissingRequiredParameter error.
:type version_label: string
:param version_label: If this parameter is specified, AWS Elastic
Beanstalk deploys the named application version to the environment.
If no such application version is found, returns an
InvalidParameterValue error.
:type template_name: string
:param template_name: If this parameter is specified, AWS Elastic
Beanstalk deploys this configuration template to the environment.
If no such configuration template is found, AWS Elastic Beanstalk
returns an InvalidParameterValue error.
:type description: string
:param description: If this parameter is specified, AWS Elastic
Beanstalk updates the description of this environment.
:type option_settings: list
:param option_settings: If specified, AWS Elastic Beanstalk updates the
configuration set associated with the running environment and sets
the specified configuration options to the requested value.
:type options_to_remove: list
:param options_to_remove: A list of custom user-defined configuration
options to remove from the configuration set for this environment.
:type tier_name: string
:param tier_name: The name of the tier. Valid values are
"WebServer" and "Worker". Defaults to "WebServer".
The ``tier_name`` and a ``tier_type`` parameters are
related and the values provided must be valid.
The possible combinations are:
* "WebServer" and "Standard" (the default)
* "Worker" and "SQS/HTTP"
:type tier_type: string
:param tier_type: The type of the tier. Valid values are
"Standard" if ``tier_name`` is "WebServer" and "SQS/HTTP"
if ``tier_name`` is "Worker". Defaults to "Standard".
:type tier_version: string
:type tier_version: The version of the tier. Valid values
currently are "1.0". Defaults to "1.0".
:raises: InsufficientPrivilegesException
"""
params = {}
if environment_id:
params['EnvironmentId'] = environment_id
if environment_name:
params['EnvironmentName'] = environment_name
if version_label:
params['VersionLabel'] = version_label
if template_name:
params['TemplateName'] = template_name
if description:
params['Description'] = description
if option_settings:
self._build_list_params(params, option_settings,
'OptionSettings.member',
('Namespace', 'OptionName', 'Value'))
if options_to_remove:
self.build_list_params(params, options_to_remove,
'OptionsToRemove.member')
if tier_name and tier_type and tier_version:
params['Tier.Name'] = tier_name
params['Tier.Type'] = tier_type
params['Tier.Version'] = tier_version
return self._get_response('UpdateEnvironment', params)
def validate_configuration_settings(self, application_name,
option_settings, template_name=None,
environment_name=None):
"""
Takes a set of configuration settings and either a
configuration template or environment, and determines whether
those values are valid. This action returns a list of messages
indicating any errors or warnings associated with the selection
of option values.
:type application_name: string
:param application_name: The name of the application that the
configuration template or environment belongs to.
:type template_name: string
:param template_name: The name of the configuration template to
validate the settings against. Condition: You cannot specify both
this and an environment name.
:type environment_name: string
:param environment_name: The name of the environment to validate the
settings against. Condition: You cannot specify both this and a
configuration template name.
:type option_settings: list
:param option_settings: A list of the options and desired values to
evaluate.
:raises: InsufficientPrivilegesException
"""
params = {'ApplicationName': application_name}
self._build_list_params(params, option_settings,
'OptionSettings.member',
('Namespace', 'OptionName', 'Value'))
if template_name:
params['TemplateName'] = template_name
if environment_name:
params['EnvironmentName'] = environment_name
return self._get_response('ValidateConfigurationSettings', params)
def _build_list_params(self, params, user_values, prefix, tuple_names):
# For params such as the ConfigurationOptionSettings,
# they can specify a list of tuples where each tuple maps to a specific
# arg. For example:
# user_values = [('foo', 'bar', 'baz']
# prefix=MyOption.member
# tuple_names=('One', 'Two', 'Three')
# would result in:
# MyOption.member.1.One = foo
# MyOption.member.1.Two = bar
# MyOption.member.1.Three = baz
for i, user_value in enumerate(user_values, 1):
current_prefix = '%s.%s' % (prefix, i)
for key, value in zip(tuple_names, user_value):
full_key = '%s.%s' % (current_prefix, key)
params[full_key] = value
| mit |
fjbatresv/odoo | addons/website_partner/__openerp__.py | 383 | 1498 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013-Today OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Website Partner',
'category': 'Website',
'summary': 'Partner Module for Website',
'version': '0.1',
'description': """Base module holding website-related stuff for partner model""",
'author': 'OpenERP SA',
'depends': ['website'],
'data': [
'views/res_partner_view.xml',
'views/website_partner_view.xml',
'data/website_data.xml',
],
'demo': ['data/demo.xml'],
'qweb': [
],
'installable': True,
'auto_install': False,
}
| agpl-3.0 |
leeon/annotated-django | tests/deprecation/tests.py | 35 | 9237 | from __future__ import unicode_literals
import os
import unittest
import warnings
from django.test import SimpleTestCase, RequestFactory, override_settings
from django.utils import six, translation
from django.utils.deprecation import RenameMethodsBase
from django.utils.encoding import force_text
from django.utils.functional import memoize
class RenameManagerMethods(RenameMethodsBase):
renamed_methods = (
('old', 'new', DeprecationWarning),
)
class RenameMethodsTests(SimpleTestCase):
"""
Tests the `RenameMethodsBase` type introduced to rename `get_query_set`
to `get_queryset` across the code base following #15363.
"""
def test_class_definition_warnings(self):
"""
Ensure a warning is raised upon class definition to suggest renaming
the faulty method.
"""
with warnings.catch_warnings(record=True) as recorded:
warnings.simplefilter('always')
class Manager(six.with_metaclass(RenameManagerMethods)):
def old(self):
pass
self.assertEqual(len(recorded), 1)
msg = str(recorded[0].message)
self.assertEqual(msg,
'`Manager.old` method should be renamed `new`.')
def test_get_new_defined(self):
"""
Ensure `old` complains and not `new` when only `new` is defined.
"""
with warnings.catch_warnings(record=True) as recorded:
warnings.simplefilter('ignore')
class Manager(six.with_metaclass(RenameManagerMethods)):
def new(self):
pass
warnings.simplefilter('always')
manager = Manager()
manager.new()
self.assertEqual(len(recorded), 0)
manager.old()
self.assertEqual(len(recorded), 1)
msg = str(recorded.pop().message)
self.assertEqual(msg,
'`Manager.old` is deprecated, use `new` instead.')
def test_get_old_defined(self):
"""
Ensure `old` complains when only `old` is defined.
"""
with warnings.catch_warnings(record=True) as recorded:
warnings.simplefilter('ignore')
class Manager(six.with_metaclass(RenameManagerMethods)):
def old(self):
pass
warnings.simplefilter('always')
manager = Manager()
manager.new()
self.assertEqual(len(recorded), 0)
manager.old()
self.assertEqual(len(recorded), 1)
msg = str(recorded.pop().message)
self.assertEqual(msg,
'`Manager.old` is deprecated, use `new` instead.')
def test_deprecated_subclass_renamed(self):
"""
Ensure the correct warnings are raised when a class that didn't rename
`old` subclass one that did.
"""
with warnings.catch_warnings(record=True) as recorded:
warnings.simplefilter('ignore')
class Renamed(six.with_metaclass(RenameManagerMethods)):
def new(self):
pass
class Deprecated(Renamed):
def old(self):
super(Deprecated, self).old()
warnings.simplefilter('always')
deprecated = Deprecated()
deprecated.new()
self.assertEqual(len(recorded), 1)
msg = str(recorded.pop().message)
self.assertEqual(msg,
'`Renamed.old` is deprecated, use `new` instead.')
recorded[:] = []
deprecated.old()
self.assertEqual(len(recorded), 2)
msgs = [str(warning.message) for warning in recorded]
self.assertEqual(msgs, [
'`Deprecated.old` is deprecated, use `new` instead.',
'`Renamed.old` is deprecated, use `new` instead.',
])
def test_renamed_subclass_deprecated(self):
"""
Ensure the correct warnings are raised when a class that renamed
`old` subclass one that didn't.
"""
with warnings.catch_warnings(record=True) as recorded:
warnings.simplefilter('ignore')
class Deprecated(six.with_metaclass(RenameManagerMethods)):
def old(self):
pass
class Renamed(Deprecated):
def new(self):
super(Renamed, self).new()
warnings.simplefilter('always')
renamed = Renamed()
renamed.new()
self.assertEqual(len(recorded), 0)
renamed.old()
self.assertEqual(len(recorded), 1)
msg = str(recorded.pop().message)
self.assertEqual(msg,
'`Renamed.old` is deprecated, use `new` instead.')
def test_deprecated_subclass_renamed_and_mixins(self):
"""
Ensure the correct warnings are raised when a subclass inherit from a
class that renamed `old` and mixins that may or may not have renamed
`new`.
"""
with warnings.catch_warnings(record=True) as recorded:
warnings.simplefilter('ignore')
class Renamed(six.with_metaclass(RenameManagerMethods)):
def new(self):
pass
class RenamedMixin(object):
def new(self):
super(RenamedMixin, self).new()
class DeprecatedMixin(object):
def old(self):
super(DeprecatedMixin, self).old()
class Deprecated(DeprecatedMixin, RenamedMixin, Renamed):
pass
warnings.simplefilter('always')
deprecated = Deprecated()
deprecated.new()
self.assertEqual(len(recorded), 1)
msg = str(recorded.pop().message)
self.assertEqual(msg,
'`RenamedMixin.old` is deprecated, use `new` instead.')
deprecated.old()
self.assertEqual(len(recorded), 2)
msgs = [str(warning.message) for warning in recorded]
self.assertEqual(msgs, [
'`DeprecatedMixin.old` is deprecated, use `new` instead.',
'`RenamedMixin.old` is deprecated, use `new` instead.',
])
class DeprecatingRequestMergeDictTest(SimpleTestCase):
def test_deprecated_request(self):
"""
Ensure the correct warning is raised when WSGIRequest.REQUEST is
accessed.
"""
with warnings.catch_warnings(record=True) as recorded:
warnings.simplefilter('always')
request = RequestFactory().get('/')
request.REQUEST # evaluate
msgs = [str(warning.message) for warning in recorded]
self.assertEqual(msgs, [
'`request.REQUEST` is deprecated, use `request.GET` or '
'`request.POST` instead.',
'`MergeDict` is deprecated, use `dict.update()` instead.',
])
@override_settings(USE_I18N=True)
class DeprecatedChineseLanguageCodes(SimpleTestCase):
def test_deprecation_warning(self):
with warnings.catch_warnings(record=True) as recorded:
warnings.simplefilter('always')
with translation.override('zh-cn'):
pass
with translation.override('zh-tw'):
pass
msgs = [str(warning.message) for warning in recorded]
self.assertEqual(msgs, [
"The use of the language code 'zh-cn' is deprecated. "
"Please use the 'zh-hans' translation instead.",
"The use of the language code 'zh-tw' is deprecated. "
"Please use the 'zh-hant' translation instead.",
])
class DeprecatingMemoizeTest(SimpleTestCase):
def test_deprecated_memoize(self):
"""
Ensure the correct warning is raised when memoize is used.
"""
with warnings.catch_warnings(record=True) as recorded:
warnings.simplefilter('always')
memoize(lambda x: x, {}, 1)
msg = str(recorded.pop().message)
self.assertEqual(msg,
'memoize wrapper is deprecated and will be removed in Django '
'1.9. Use django.utils.lru_cache instead.')
class DeprecatingSimpleTestCaseUrls(unittest.TestCase):
def test_deprecation(self):
"""
Ensure the correct warning is raised when SimpleTestCase.urls is used.
"""
class TempTestCase(SimpleTestCase):
urls = 'tests.urls'
def test(self):
pass
with warnings.catch_warnings(record=True) as recorded:
suite = unittest.TestLoader().loadTestsFromTestCase(TempTestCase)
with open(os.devnull, 'w') as devnull:
unittest.TextTestRunner(stream=devnull, verbosity=2).run(suite)
msg = force_text(recorded.pop().message)
self.assertEqual(msg,
"SimpleTestCase.urls is deprecated and will be removed in "
"Django 2.0. Use @override_settings(ROOT_URLCONF=...) "
"in TempTestCase instead.")
| bsd-3-clause |