text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
from . import _ccallback_c
import ctypes
PyCFuncPtr = ctypes.CFUNCTYPE(ctypes.c_void_p).__bases__[0]
ffi = None
class CData(object):
pass
def _import_cffi():
global ffi, CData
if ffi is not None:
return
try:
import cffi
ffi = cffi.FFI()
CData = ffi.CData
except ImportError:
ffi = False
class LowLevelCallable(tuple):
"""
Low-level callback function.
Parameters
----------
function : {PyCapsule, ctypes function pointer, cffi function pointer}
Low-level callback function.
user_data : {PyCapsule, ctypes void pointer, cffi void pointer}
User data to pass on to the callback function.
signature : str, optional
Signature of the function. If omitted, determined from *function*,
if possible.
Attributes
----------
function
Callback function given
user_data
User data given
signature
Signature of the function.
Methods
-------
from_cython
Class method for constructing callables from Cython C-exported
functions.
Notes
-----
The argument ``function`` can be one of:
- PyCapsule, whose name contains the C function signature
- ctypes function pointer
- cffi function pointer
The signature of the low-level callback must match one of those expected
by the routine it is passed to.
If constructing low-level functions from a PyCapsule, the name of the
capsule must be the corresponding signature, in the format::
return_type (arg1_type, arg2_type, ...)
For example::
"void (double)"
"double (double, int *, void *)"
The context of a PyCapsule passed in as ``function`` is used as ``user_data``,
if an explicit value for ``user_data`` was not given.
"""
# Make the class immutable
__slots__ = ()
def __new__(cls, function, user_data=None, signature=None):
# We need to hold a reference to the function & user data,
# to prevent them going out of scope
item = cls._parse_callback(function, user_data, signature)
return tuple.__new__(cls, (item, function, user_data))
def __repr__(self):
return "LowLevelCallable({!r}, {!r})".format(self.function, self.user_data)
@property
def function(self):
return tuple.__getitem__(self, 1)
@property
def user_data(self):
return tuple.__getitem__(self, 2)
@property
def signature(self):
return _ccallback_c.get_capsule_signature(tuple.__getitem__(self, 0))
def __getitem__(self, idx):
raise ValueError()
@classmethod
def from_cython(cls, module, name, user_data=None, signature=None):
"""
Create a low-level callback function from an exported Cython function.
Parameters
----------
module : module
Cython module where the exported function resides
name : str
Name of the exported function
user_data : {PyCapsule, ctypes void pointer, cffi void pointer}, optional
User data to pass on to the callback function.
signature : str, optional
Signature of the function. If omitted, determined from *function*.
"""
try:
function = module.__pyx_capi__[name]
except AttributeError:
raise ValueError("Given module is not a Cython module with __pyx_capi__ attribute")
except KeyError:
raise ValueError("No function {!r} found in __pyx_capi__ of the module".format(name))
return cls(function, user_data, signature)
@classmethod
def _parse_callback(cls, obj, user_data=None, signature=None):
_import_cffi()
if isinstance(obj, LowLevelCallable):
func = tuple.__getitem__(obj, 0)
elif isinstance(obj, PyCFuncPtr):
func, signature = _get_ctypes_func(obj, signature)
elif isinstance(obj, CData):
func, signature = _get_cffi_func(obj, signature)
elif _ccallback_c.check_capsule(obj):
func = obj
else:
raise ValueError("Given input is not a callable or a low-level callable (pycapsule/ctypes/cffi)")
if isinstance(user_data, ctypes.c_void_p):
context = _get_ctypes_data(user_data)
elif isinstance(user_data, CData):
context = _get_cffi_data(user_data)
elif user_data is None:
context = 0
elif _ccallback_c.check_capsule(user_data):
context = user_data
else:
raise ValueError("Given user data is not a valid low-level void* pointer (pycapsule/ctypes/cffi)")
return _ccallback_c.get_raw_capsule(func, signature, context)
#
# ctypes helpers
#
def _get_ctypes_func(func, signature=None):
# Get function pointer
func_ptr = ctypes.cast(func, ctypes.c_void_p).value
# Construct function signature
if signature is None:
signature = _typename_from_ctypes(func.restype) + " ("
for j, arg in enumerate(func.argtypes):
if j == 0:
signature += _typename_from_ctypes(arg)
else:
signature += ", " + _typename_from_ctypes(arg)
signature += ")"
return func_ptr, signature
def _typename_from_ctypes(item):
if item is None:
return "void"
elif item is ctypes.c_void_p:
return "void *"
name = item.__name__
pointer_level = 0
while name.startswith("LP_"):
pointer_level += 1
name = name[3:]
if name.startswith('c_'):
name = name[2:]
if pointer_level > 0:
name += " " + "*"*pointer_level
return name
def _get_ctypes_data(data):
# Get voidp pointer
return ctypes.cast(data, ctypes.c_void_p).value
#
# CFFI helpers
#
def _get_cffi_func(func, signature=None):
# Get function pointer
func_ptr = ffi.cast('uintptr_t', func)
# Get signature
if signature is None:
signature = ffi.getctype(ffi.typeof(func)).replace('(*)', ' ')
return func_ptr, signature
def _get_cffi_data(data):
# Get pointer
return ffi.cast('uintptr_t', data)
| Eric89GXL/scipy | scipy/_lib/_ccallback.py | Python | bsd-3-clause | 6,196 | 0.001453 |
#!/usr/bin/env python
# coding: utf-8
"""
Copyright 2015 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
class JsonErrorResponse(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
Swagger model
:param dict swaggerTypes: The key is attribute name and the value is attribute type.
:param dict attributeMap: The key is attribute name and the value is json key in definition.
"""
self.swagger_types = {
'status': 'str',
'message': 'str'
}
self.attribute_map = {
'status': 'status',
'message': 'message'
}
# Status: \"ok\" or \"error\"
self.status = None # str
# Error message
self.message = None # str
def __repr__(self):
properties = []
for p in self.__dict__:
if p != 'swaggerTypes' and p != 'attributeMap':
properties.append('{prop}={val!r}'.format(prop=p, val=self.__dict__[p]))
return '<{name} {props}>'.format(name=__name__, props=' '.join(properties))
| QuantiModo/QuantiModo-SDK-Python | SwaggerPetstore/models/json_error_response.py | Python | gpl-2.0 | 1,773 | 0.00564 |
# -*- coding: utf-8 -*-
appid = 'example'
apikey = 'c5dd7e7dkjp27377l903c42c032b413b'
sender = '01000000000' # FIXME - MUST BE CHANGED AS REAL PHONE NUMBER
receivers = ['01000000000', ] # FIXME - MUST BE CHANGED AS REAL PHONE NUMBERS
content = u'나는 유리를 먹을 수 있어요. 그래도 아프지 않아요'
| BlueHouseLab/sms-openapi | python-requests/conf.py | Python | apache-2.0 | 324 | 0.003521 |
##
# Copyright 2009-2017 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for software that is configured with CMake, implemented as an easyblock
@author: Stijn De Weirdt (Ghent University)
@author: Dries Verdegem (Ghent University)
@author: Kenneth Hoste (Ghent University)
@author: Pieter De Baets (Ghent University)
@author: Jens Timmerman (Ghent University)
@author: Ward Poelmans (Ghent University)
"""
import os
from easybuild.easyblocks.generic.configuremake import ConfigureMake
from easybuild.framework.easyconfig import CUSTOM
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.config import build_option
from easybuild.tools.environment import setvar
from easybuild.tools.run import run_cmd
class CMakeMake(ConfigureMake):
"""Support for configuring build with CMake instead of traditional configure script"""
@staticmethod
def extra_options(extra_vars=None):
"""Define extra easyconfig parameters specific to CMakeMake."""
extra_vars = ConfigureMake.extra_options(extra_vars)
extra_vars.update({
'srcdir': [None, "Source directory location to provide to cmake command", CUSTOM],
'separate_build_dir': [False, "Perform build in a separate directory", CUSTOM],
})
return extra_vars
def configure_step(self, srcdir=None, builddir=None):
"""Configure build using cmake"""
if builddir is not None:
self.log.nosupport("CMakeMake.configure_step: named argument 'builddir' (should be 'srcdir')", "2.0")
# Set the search paths for CMake
include_paths = os.pathsep.join(self.toolchain.get_variable("CPPFLAGS", list))
library_paths = os.pathsep.join(self.toolchain.get_variable("LDFLAGS", list))
setvar("CMAKE_INCLUDE_PATH", include_paths)
setvar("CMAKE_LIBRARY_PATH", library_paths)
default_srcdir = '.'
if self.cfg.get('separate_build_dir', False):
objdir = os.path.join(self.builddir, 'easybuild_obj')
try:
os.mkdir(objdir)
os.chdir(objdir)
except OSError, err:
raise EasyBuildError("Failed to create separate build dir %s in %s: %s", objdir, os.getcwd(), err)
default_srcdir = self.cfg['start_dir']
if srcdir is None:
if self.cfg.get('srcdir', None) is not None:
srcdir = self.cfg['srcdir']
else:
srcdir = default_srcdir
options = ['-DCMAKE_INSTALL_PREFIX=%s' % self.installdir]
env_to_options = {
'CC': 'CMAKE_C_COMPILER',
'CFLAGS': 'CMAKE_C_FLAGS',
'CXX': 'CMAKE_CXX_COMPILER',
'CXXFLAGS': 'CMAKE_CXX_FLAGS',
'F90': 'CMAKE_Fortran_COMPILER',
'FFLAGS': 'CMAKE_Fortran_FLAGS',
}
for env_name, option in env_to_options.items():
value = os.getenv(env_name)
if value is not None:
options.append("-D%s='%s'" % (option, value))
if build_option('rpath'):
# instruct CMake not to fiddle with RPATH when --rpath is used, since it will undo stuff on install...
# https://github.com/LLNL/spack/blob/0f6a5cd38538e8969d11bd2167f11060b1f53b43/lib/spack/spack/build_environment.py#L416
options.append('-DCMAKE_SKIP_RPATH=ON')
# show what CMake is doing by default
options.append('-DCMAKE_VERBOSE_MAKEFILE=ON')
options_string = ' '.join(options)
command = "%s cmake %s %s %s" % (self.cfg['preconfigopts'], srcdir, options_string, self.cfg['configopts'])
(out, _) = run_cmd(command, log_all=True, simple=False)
return out
| ULHPC/easybuild-easyblocks | easybuild/easyblocks/generic/cmakemake.py | Python | gpl-2.0 | 4,702 | 0.002552 |
# coding=utf-8
from __future__ import unicode_literals
"""
Name: MyArgparse
Author: Andy Liu
Email : [email protected]
Created: 3/26/2015
Copyright: All rights reserved.
Licence: This program is free software: you can redistribute it
and/or modify it under the terms of the GNU General Public License
as published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import argparse
import logging
def parse_command_line():
parser = argparse.ArgumentParser(prog='PROG', description='%(prog)s can ...')
parser.add_argument('NoPre', action="store", help='help information')
parser.add_argument('-t', action="store_true", dest='boolean_switch', default=False, help='Set a switch to true')
parser.add_argument('-f', action="store_false", dest='boolean_switch', default=True, help='Set a switch to false')
parser.add_argument('-s', action="store", dest='simple_value', help="Store a simple value")
parser.add_argument('-st', action="store", dest="simple_value", type=int,
help='Store a simple value and define type')
parser.add_argument('-c', action='store_const', dest='constant_value', const='value-to-store',
help='Store a constant value')
parser.add_argument('-a', action='append', dest='collection', default=[], help='Add repeated values to a list')
parser.add_argument('-A', action='append_const', dest='const_collection', const='value-1-to-append', default=[],
help='Add different values to list')
parser.add_argument('-B', action='append_const', dest='const_collection', const='value-2-to-append',
help='Add different values to list')
args = parser.parse_args()
logging.debug('NoPre = %r' % args.NoPre)
logging.debug('simple_value = %r' % args.simple_value)
logging.debug('constant_value = %r' % args.constant_value)
logging.debug('boolean_switch = %r' % args.boolean_switch)
logging.debug('collection = %r' % args.collection)
logging.debug('const_collection = %r' % args.const_collection)
return args
if __name__ == '__main__':
from MyLog import init_logger
logger = init_logger()
parse_command_line()
| asiroliu/MyTools | MyArgparse.py | Python | gpl-2.0 | 2,738 | 0.004018 |
from django.contrib import admin
from trainer.models import Language, Word, Card, Set
admin.site.register(Language)
admin.site.register(Word)
admin.site.register(Card)
admin.site.register(Set)
| chrigu6/vocabulary | vocabulary/trainer/admin.py | Python | gpl-3.0 | 195 | 0 |
# -*- coding: utf-8 -*-
import logging
import random
from collections import (
namedtuple,
defaultdict,
)
from itertools import repeat
import cachetools
import gevent
from gevent.event import (
_AbstractLinkable,
AsyncResult,
Event,
)
from ethereum import slogging
from raiden.exceptions import (
InvalidAddress,
InvalidLocksRoot,
InvalidNonce,
TransferWhenClosed,
TransferUnwanted,
UnknownAddress,
UnknownTokenAddress,
)
from raiden.constants import (
UDP_MAX_MESSAGE_SIZE,
)
from raiden.settings import (
CACHE_TTL,
)
from raiden.messages import decode, Ack, Ping, SignedMessage
from raiden.utils import isaddress, sha3, pex
from raiden.utils.notifying_queue import NotifyingQueue
log = slogging.get_logger(__name__) # pylint: disable=invalid-name
ping_log = slogging.get_logger(__name__ + '.ping') # pylint: disable=invalid-name
# - async_result available for code that wants to block on message acknowledgment
# - receiver_address used to tie back the echohash to the receiver (mainly for
# logging purposes)
SentMessageState = namedtuple('SentMessageState', (
'async_result',
'receiver_address',
))
HealthEvents = namedtuple('HealthEvents', (
'event_healthy',
'event_unhealthy',
))
NODE_NETWORK_UNKNOWN = 'unknown'
NODE_NETWORK_UNREACHABLE = 'unreachable'
NODE_NETWORK_REACHABLE = 'reachable'
# GOALS:
# - Each netting channel must have the messages processed in-order, the
# protocol must detect unacknowledged messages and retry them.
# - A queue must not stall because of synchronization problems in other queues.
# - Assuming a queue can stall, the unhealthiness of a node must not be
# inferred from the lack of acknowledgement from a single queue, but healthiness
# may be safely inferred from it.
# - The state of the node must be synchronized among all tasks that are
# handling messages.
def event_first_of(*events):
""" Waits until one of `events` is set.
The event returned is /not/ cleared with any of the `events`, this value
must not be reused if the clearing behavior is used.
"""
first_finished = Event()
if not all(isinstance(e, _AbstractLinkable) for e in events):
raise ValueError('all events must be linkable')
for event in events:
event.rawlink(lambda _: first_finished.set())
return first_finished
def timeout_exponential_backoff(retries, timeout, maximum):
""" Timeouts generator with an exponential backoff strategy.
Timeouts start spaced by `timeout`, after `retries` exponentially increase
the retry delays until `maximum`, then maximum is returned indefinitely.
"""
yield timeout
tries = 1
while tries < retries:
tries += 1
yield timeout
while timeout < maximum:
timeout = min(timeout * 2, maximum)
yield timeout
while True:
yield maximum
def retry(protocol, data, receiver_address, event_stop, timeout_backoff):
""" Send data until it's acknowledged.
Exits when the first of the following happen:
- The packet is acknowledged.
- Event_stop is set.
- The iterator timeout_backoff runs out of values.
Returns:
bool: True if the message was acknowledged, False otherwise.
"""
async_result = protocol.send_raw_with_result(
data,
receiver_address,
)
event_quit = event_first_of(
async_result,
event_stop,
)
for timeout in timeout_backoff:
if event_quit.wait(timeout=timeout) is True:
break
protocol.send_raw_with_result(
data,
receiver_address,
)
return async_result.ready()
def wait_recovery(event_stop, event_healthy):
event_first_of(
event_stop,
event_healthy,
).wait()
if event_stop.is_set():
return
# There may be multiple threads waiting, do not restart them all at
# once to avoid message flood.
gevent.sleep(random.random())
def retry_with_recovery(
protocol,
data,
receiver_address,
event_stop,
event_healthy,
event_unhealthy,
backoff):
""" Send data while the node is healthy until it's acknowledged.
Note:
backoff must be an infinite iterator, otherwise this task will
become a hot loop.
"""
# The underlying unhealthy will be cleared, care must be taken to properly
# clear stop_or_unhealthy too.
stop_or_unhealthy = event_first_of(
event_stop,
event_unhealthy,
)
acknowledged = False
while not event_stop.is_set() and not acknowledged:
# Packets must not be sent to an unhealthy node, nor should the task
# wait for it to become available if the message has been acknowledged.
if event_unhealthy.is_set():
wait_recovery(
event_stop,
event_healthy,
)
# Assume wait_recovery returned because unhealthy was cleared and
# continue execution, this is safe to do because event_stop is
# checked below.
stop_or_unhealthy.clear()
if event_stop.is_set():
return
acknowledged = retry(
protocol,
data,
receiver_address,
# retry will stop when this event is set, allowing this task to
# wait for recovery when the node becomes unhealthy or to quit if
# the stop event is set.
stop_or_unhealthy,
# Intentionally reusing backoff to restart from the last
# timeout/number of iterations.
backoff,
)
return acknowledged
def single_queue_send(
protocol,
receiver_address,
queue,
event_stop,
event_healthy,
event_unhealthy,
message_retries,
message_retry_timeout,
message_retry_max_timeout):
""" Handles a single message queue for `receiver_address`.
Notes:
- This task must be the only consumer of queue.
- This task can be killed at any time, but the intended usage is to stop it
with the event_stop.
- If there are many queues for the same receiver_address, it is the
caller's responsibility to not start them together to avoid congestion.
- This task assumes the endpoint is never cleared after it's first known.
If this assumption changes the code must be updated to handle unknown
addresses.
"""
# A NotifyingQueue is required to implement cancelability, otherwise the
# task cannot be stoped while the greenlet waits for an element to be
# inserted in the queue.
if not isinstance(queue, NotifyingQueue):
raise ValueError('queue must be a NotifyingQueue.')
# Reusing the event, clear must be carefully done
data_or_stop = event_first_of(
queue,
event_stop,
)
# Wait for the endpoint registration or to quit
event_first_of(
event_healthy,
event_stop,
).wait()
while True:
data_or_stop.wait()
if event_stop.is_set():
return
# The queue is not empty at this point, so this won't raise Empty.
# This task being the only consumer is a requirement.
data = queue.peek(block=False)
backoff = timeout_exponential_backoff(
message_retries,
message_retry_timeout,
message_retry_max_timeout,
)
acknowledged = retry_with_recovery(
protocol,
data,
receiver_address,
event_stop,
event_healthy,
event_unhealthy,
backoff,
)
if acknowledged:
queue.get()
# Checking the length of the queue does not trigger a
# context-switch, so it's safe to assume the length of the queue
# won't change under our feet and when a new item will be added the
# event will be set again.
if not queue:
data_or_stop.clear()
if event_stop.is_set():
return
def healthcheck(
protocol,
receiver_address,
event_stop,
event_healthy,
event_unhealthy,
nat_keepalive_retries,
nat_keepalive_timeout,
nat_invitation_timeout,
ping_nonce):
""" Sends a periodical Ping to `receiver_address` to check its health. """
# The state of the node is unknown, the events are set to allow the tasks
# to do work.
protocol.set_node_network_state(
receiver_address,
NODE_NETWORK_UNKNOWN,
)
# Always call `clear` before `set`, since only `set` does context-switches
# it's easier to reason about tasks that are waiting on both events.
# Wait for the end-point registration or for the node to quit
try:
protocol.get_host_port(receiver_address)
except UnknownAddress:
event_healthy.clear()
event_unhealthy.set()
backoff = timeout_exponential_backoff(
nat_keepalive_retries,
nat_keepalive_timeout,
nat_invitation_timeout,
)
sleep = next(backoff)
while not event_stop.wait(sleep):
try:
protocol.get_host_port(receiver_address)
except UnknownAddress:
sleep = next(backoff)
else:
break
# Don't wait to send the first Ping and to start sending messages if the
# endpoint is known
sleep = 0
event_unhealthy.clear()
event_healthy.set()
while not event_stop.wait(sleep):
sleep = nat_keepalive_timeout
ping_nonce['nonce'] += 1
data = protocol.get_ping(
ping_nonce['nonce'],
)
# Send Ping a few times before setting the node as unreachable
acknowledged = retry(
protocol,
data,
receiver_address,
event_stop,
[nat_keepalive_timeout] * nat_keepalive_retries,
)
if event_stop.is_set():
return
if not acknowledged:
# The node is not healthy, clear the event to stop all queue
# tasks
protocol.set_node_network_state(
receiver_address,
NODE_NETWORK_UNREACHABLE,
)
event_healthy.clear()
event_unhealthy.set()
# Retry until recovery, used for:
# - Checking node status.
# - Nat punching.
acknowledged = retry(
protocol,
data,
receiver_address,
event_stop,
repeat(nat_invitation_timeout),
)
if acknowledged:
event_unhealthy.clear()
event_healthy.set()
protocol.set_node_network_state(
receiver_address,
NODE_NETWORK_REACHABLE,
)
class RaidenProtocol(object):
""" Encode the message into a packet and send it.
Each message received is stored by hash and if it is received twice the
previous answer is resent.
Repeat sending messages until an acknowledgment is received or the maximum
number of retries is hit.
"""
def __init__(
self,
transport,
discovery,
raiden,
retry_interval,
retries_before_backoff,
nat_keepalive_retries,
nat_keepalive_timeout,
nat_invitation_timeout):
self.transport = transport
self.discovery = discovery
self.raiden = raiden
self.retry_interval = retry_interval
self.retries_before_backoff = retries_before_backoff
self.nat_keepalive_retries = nat_keepalive_retries
self.nat_keepalive_timeout = nat_keepalive_timeout
self.nat_invitation_timeout = nat_invitation_timeout
self.event_stop = Event()
self.channel_queue = dict() # TODO: Change keys to the channel address
self.greenlets = list()
self.addresses_events = dict()
self.nodeaddresses_networkstatuses = defaultdict(lambda: NODE_NETWORK_UNKNOWN)
# Maps the echohash of received and *sucessfully* processed messages to
# its Ack, used to ignored duplicate messages and resend the Ack.
self.receivedhashes_to_acks = dict()
# Maps the echohash to a SentMessageState
self.senthashes_to_states = dict()
# Maps the addresses to a dict with the latest nonce (using a dict
# because python integers are immutable)
self.nodeaddresses_to_nonces = dict()
cache = cachetools.TTLCache(
maxsize=50,
ttl=CACHE_TTL,
)
cache_wrapper = cachetools.cached(cache=cache)
self.get_host_port = cache_wrapper(discovery.get)
def start(self):
self.transport.start()
def stop_and_wait(self):
# Stop handling incoming packets, but don't close the socket. The
# socket can only be safely closed after all outgoing tasks are stopped
self.transport.stop_accepting()
# Stop processing the outgoing queues
self.event_stop.set()
gevent.wait(self.greenlets)
# All outgoing tasks are stopped. Now it's safe to close the socket. At
# this point there might be some incoming message being processed,
# keeping the socket open is not useful for these.
self.transport.stop()
# Set all the pending results to False
for waitack in self.senthashes_to_states.itervalues():
waitack.async_result.set(False)
def get_health_events(self, receiver_address):
""" Starts a healthcheck taks for `receiver_address` and returns a
HealthEvents with locks to react on its current state.
"""
if receiver_address not in self.addresses_events:
self.start_health_check(receiver_address)
return self.addresses_events[receiver_address]
def start_health_check(self, receiver_address):
""" Starts a task for healthchecking `receiver_address` if there is not
one yet.
"""
if receiver_address not in self.addresses_events:
ping_nonce = self.nodeaddresses_to_nonces.setdefault(
receiver_address,
{'nonce': 0}, # HACK: Allows the task to mutate the object
)
events = HealthEvents(
event_healthy=Event(),
event_unhealthy=Event(),
)
self.addresses_events[receiver_address] = events
self.greenlets.append(gevent.spawn(
healthcheck,
self,
receiver_address,
self.event_stop,
events.event_healthy,
events.event_unhealthy,
self.nat_keepalive_retries,
self.nat_keepalive_timeout,
self.nat_invitation_timeout,
ping_nonce,
))
def get_channel_queue(self, receiver_address, token_address):
key = (
receiver_address,
token_address,
)
if key in self.channel_queue:
return self.channel_queue[key]
queue = NotifyingQueue()
self.channel_queue[key] = queue
events = self.get_health_events(receiver_address)
self.greenlets.append(gevent.spawn(
single_queue_send,
self,
receiver_address,
queue,
self.event_stop,
events.event_healthy,
events.event_unhealthy,
self.retries_before_backoff,
self.retry_interval,
self.retry_interval * 10,
))
if log.isEnabledFor(logging.DEBUG):
log.debug(
'new queue created for',
node=pex(self.raiden.address),
token=pex(token_address),
to=pex(receiver_address),
)
return queue
def send_async(self, receiver_address, message):
if not isaddress(receiver_address):
raise ValueError('Invalid address {}'.format(pex(receiver_address)))
if isinstance(message, (Ack, Ping)):
raise ValueError('Do not use send for Ack or Ping messages')
# Messages that are not unique per receiver can result in hash
# collision, e.g. Secret messages. The hash collision has the undesired
# effect of aborting message resubmission once /one/ of the nodes
# replied with an Ack, adding the receiver address into the echohash to
# avoid these collisions.
messagedata = message.encode()
echohash = sha3(messagedata + receiver_address)
if len(messagedata) > UDP_MAX_MESSAGE_SIZE:
raise ValueError(
'message size exceeds the maximum {}'.format(UDP_MAX_MESSAGE_SIZE)
)
# All messages must be ordered, but only on a per channel basis.
token_address = getattr(message, 'token', '')
# Ignore duplicated messages
if echohash not in self.senthashes_to_states:
async_result = AsyncResult()
self.senthashes_to_states[echohash] = SentMessageState(
async_result,
receiver_address,
)
queue = self.get_channel_queue(
receiver_address,
token_address,
)
if log.isEnabledFor(logging.DEBUG):
log.debug(
'SENDING MESSAGE',
to=pex(receiver_address),
node=pex(self.raiden.address),
message=message,
echohash=pex(echohash),
)
queue.put(messagedata)
else:
waitack = self.senthashes_to_states[echohash]
async_result = waitack.async_result
return async_result
def send_and_wait(self, receiver_address, message, timeout=None):
"""Sends a message and wait for the response ack."""
async_result = self.send_async(receiver_address, message)
return async_result.wait(timeout=timeout)
def maybe_send_ack(self, receiver_address, ack_message):
""" Send ack_message to receiver_address if the transport is running. """
if not isaddress(receiver_address):
raise ValueError('Invalid address {}'.format(pex(receiver_address)))
if not isinstance(ack_message, Ack):
raise ValueError('Use maybe_send_ack only for Ack messages')
messagedata = ack_message.encode()
self.receivedhashes_to_acks[ack_message.echo] = (receiver_address, messagedata)
self._maybe_send_ack(*self.receivedhashes_to_acks[ack_message.echo])
def _maybe_send_ack(self, receiver_address, messagedata):
""" ACK must not go into the queue, otherwise nodes will deadlock
waiting for the confirmation.
"""
host_port = self.get_host_port(receiver_address)
# ACKs are sent at the end of the receive method, after the message is
# sucessfully processed. It may be the case that the server is stopped
# after the message is received but before the ack is sent, under that
# circumstance the udp socket would be unavaiable and then an exception
# is raised.
#
# This check verifies the udp socket is still available before trying
# to send the ack. There must be *no context-switches after this test*.
if self.transport.server.started:
self.transport.send(
self.raiden,
host_port,
messagedata,
)
def get_ping(self, nonce):
""" Returns a signed Ping message.
Note: Ping messages don't have an enforced ordering, so a Ping message
with a higher nonce may be acknowledged first.
"""
message = Ping(nonce)
self.raiden.sign(message)
message_data = message.encode()
return message_data
def send_raw_with_result(self, data, receiver_address):
""" Sends data to receiver_address and returns an AsyncResult that will
be set once the message is acknowledged.
Always returns same AsyncResult instance for equal input.
"""
host_port = self.get_host_port(receiver_address)
echohash = sha3(data + receiver_address)
if echohash not in self.senthashes_to_states:
async_result = AsyncResult()
self.senthashes_to_states[echohash] = SentMessageState(
async_result,
receiver_address,
)
else:
async_result = self.senthashes_to_states[echohash].async_result
if not async_result.ready():
self.transport.send(
self.raiden,
host_port,
data,
)
return async_result
def set_node_network_state(self, node_address, node_state):
self.nodeaddresses_networkstatuses[node_address] = node_state
def receive(self, data):
if len(data) > UDP_MAX_MESSAGE_SIZE:
log.error('receive packet larger than maximum size', length=len(data))
return
# Repeat the ACK if the message has been handled before
echohash = sha3(data + self.raiden.address)
if echohash in self.receivedhashes_to_acks:
return self._maybe_send_ack(*self.receivedhashes_to_acks[echohash])
message = decode(data)
if isinstance(message, Ack):
waitack = self.senthashes_to_states.get(message.echo)
if waitack is None:
if log.isEnabledFor(logging.DEBUG):
log.debug(
'ACK FOR UNKNOWN ECHO',
node=pex(self.raiden.address),
echohash=pex(message.echo),
)
else:
if log.isEnabledFor(logging.DEBUG):
log.debug(
'ACK RECEIVED',
node=pex(self.raiden.address),
receiver=pex(waitack.receiver_address),
echohash=pex(message.echo),
)
waitack.async_result.set(True)
elif isinstance(message, Ping):
if ping_log.isEnabledFor(logging.DEBUG):
ping_log.debug(
'PING RECEIVED',
node=pex(self.raiden.address),
echohash=pex(echohash),
message=message,
sender=pex(message.sender),
)
ack = Ack(
self.raiden.address,
echohash,
)
self.maybe_send_ack(
message.sender,
ack,
)
elif isinstance(message, SignedMessage):
if log.isEnabledFor(logging.INFO):
log.info(
'MESSAGE RECEIVED',
node=pex(self.raiden.address),
echohash=pex(echohash),
message=message,
message_sender=pex(message.sender)
)
try:
self.raiden.on_message(message, echohash)
# only send the Ack if the message was handled without exceptions
ack = Ack(
self.raiden.address,
echohash,
)
try:
if log.isEnabledFor(logging.DEBUG):
log.debug(
'SENDING ACK',
node=pex(self.raiden.address),
to=pex(message.sender),
echohash=pex(echohash),
)
self.maybe_send_ack(
message.sender,
ack,
)
except (InvalidAddress, UnknownAddress) as e:
log.debug("Couldn't send the ACK", e=e)
except (UnknownAddress, InvalidNonce, TransferWhenClosed, TransferUnwanted) as e:
log.DEV('maybe unwanted transfer', e=e)
except (UnknownTokenAddress, InvalidLocksRoot) as e:
if log.isEnabledFor(logging.WARN):
log.warn(str(e))
elif log.isEnabledFor(logging.ERROR):
log.error(
'Invalid message',
message=data.encode('hex'),
)
| tomashaber/raiden | raiden/network/protocol.py | Python | mit | 24,753 | 0.000485 |
# Copyright (C) 2012-2015 ASTRON (Netherlands Institute for Radio Astronomy)
# P.O. Box 2, 7990 AA Dwingeloo, The Netherlands
#
# This file is part of the LOFAR software suite.
# The LOFAR software suite is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# The LOFAR software suite is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with the LOFAR software suite. If not, see <http://www.gnu.org/licenses/>.
'''default config for webservice'''
DEBUG = False
JSONIFY_PRETTYPRINT_REGULAR = False
print('default config loaded')
| kernsuite-debian/lofar | SAS/ResourceAssignment/ResourceAssignmentEditor/config/default.py | Python | gpl-3.0 | 967 | 0 |
# -*- coding: utf-8 -*-
import json
import logging
import vobject
from datetime import datetime
from contextlib import contextmanager
from radicale import ical
from yats.shortcuts import get_ticket_model, build_ticket_search_ext, touch_ticket, remember_changes, mail_ticket, jabber_ticket, check_references, add_history, mail_comment, jabber_comment
from yats.models import tickets_reports, UserProfile, get_flow_end, tickets_comments, ticket_resolution, get_default_resolution, convertPrio
from yats.forms import SimpleTickets
from django.contrib.auth.models import AnonymousUser, User
from django.http import QueryDict
from django.conf import settings
from django.utils import timezone
from django.utils.translation import ugettext as _
from djradicale.models import DBProperties
logger = logging.getLogger('djradicale')
ICAL_TYPES = (
ical.Event,
ical.Todo,
ical.Journal,
# ical.Card,
ical.Timezone,
)
class FakeRequest:
def __init__(self):
self.GET = {}
self.POST = {}
self.session = {}
self.user = AnonymousUser()
class Collection(ical.Collection):
@property
def headers(self):
return (
ical.Header('PRODID:-//YATS//NONSGML Radicale Server//EN'),
ical.Header('VERSION:%s' % self.version))
def delete(self):
repid = self._getReportFromUrl(self.path)
tickets_reports.objects.get(pk=repid).delete()
def append(self, name, text):
import pydevd
pydevd.settrace('192.168.33.1', 5678)
new_items = self._parse(text, ICAL_TYPES, name)
timezones = list(filter(
lambda x: x.tag == ical.Timezone.tag, new_items.values()))
request = self._getRequestFromUrl(self.path)
for new_item in new_items.values():
if new_item.tag == ical.Timezone.tag:
continue
if new_item.name not in self.items:
self.items[new_item.name] = new_item
text = ical.serialize(self.tag, self.headers, [new_item] + timezones)
cal = vobject.readOne(text)
# close ticket
if hasattr(cal.vtodo, 'status') and cal.vtodo.status.value == 'COMPLETED':
ticket = get_ticket_model()
try:
flow_end = get_flow_end()
resolution = get_default_resolution()
close_comment = _('closed via CalDAV')
tic = ticket.objects.get(uuid=cal.vtodo.uid.value)
tic.resolution = resolution
tic.closed = True
tic.close_date = timezone.now()
tic.state = flow_end
tic.save(user=request.user)
com = tickets_comments()
com.comment = _('ticket closed - resolution: %(resolution)s\n\n%(comment)s') % {'resolution': resolution.name, 'comment': close_comment}
com.ticket = tic
com.action = 1
com.save(user=request.user)
check_references(request, com)
touch_ticket(request.user, tic.id)
add_history(request, tic, 1, close_comment)
mail_comment(request, com.pk)
jabber_comment(request, com.pk)
except Exception:
pass
# change or new
else:
params = {
'caption': cal.vtodo.summary.value,
'description': cal.vtodo.description.value if hasattr(cal.vtodo, 'description') else None,
'uuid': cal.vtodo.uid.value,
'show_start': cal.vtodo.due.value if hasattr(cal.vtodo, 'due') else None,
'priority': convertPrio(cal.vtodo.priority.value) if hasattr(cal.vtodo, 'priority') else None
}
fakePOST = QueryDict(mutable=True)
fakePOST.update(params)
form = SimpleTickets(fakePOST)
if form.is_valid():
cd = form.cleaned_data
ticket = get_ticket_model()
# change ticket
try:
tic = ticket.objects.get(uuid=cal.vtodo.uid.value)
tic.caption = cd['caption']
tic.description = cd['description']
tic.priority = cd['priority']
# tic.assigned = cd['assigned']
tic.show_start = cd['show_start']
tic.save(user=request.user)
# new ticket
except ticket.DoesNotExist:
tic = ticket()
tic.caption = cd['caption']
tic.description = cd['description']
if 'priority' not in cd or not cd['priority']:
if hasattr(settings, 'KEEP_IT_SIMPLE_DEFAULT_PRIORITY') and settings.KEEP_IT_SIMPLE_DEFAULT_PRIORITY:
tic.priority_id = settings.KEEP_IT_SIMPLE_DEFAULT_PRIORITY
else:
tic.priority = cd['priority']
tic.assigned = request.user
if hasattr(settings, 'KEEP_IT_SIMPLE_DEFAULT_CUSTOMER') and settings.KEEP_IT_SIMPLE_DEFAULT_CUSTOMER:
if settings.KEEP_IT_SIMPLE_DEFAULT_CUSTOMER == -1:
tic.customer = request.organisation
else:
tic.customer_id = settings.KEEP_IT_SIMPLE_DEFAULT_CUSTOME
if hasattr(settings, 'KEEP_IT_SIMPLE_DEFAULT_COMPONENT') and settings.KEEP_IT_SIMPLE_DEFAULT_COMPONENT:
tic.component_id = settings.KEEP_IT_SIMPLE_DEFAULT_COMPONENT
tic.show_start = cd['show_start']
tic.uuid = cal.vtodo.uid.value
tic.save(user=request.user)
if tic.assigned:
touch_ticket(tic.assigned, tic.pk)
for ele in form.changed_data:
form.initial[ele] = ''
remember_changes(request, form, tic)
touch_ticket(request.user, tic.pk)
mail_ticket(request, tic.pk, form, rcpt=settings.TICKET_NEW_MAIL_RCPT, is_api=True)
jabber_ticket(request, tic.pk, form, rcpt=settings.TICKET_NEW_JABBER_RCPT, is_api=True)
else:
raise Exception(form.errors)
def remove(self, name):
pass
def replace(self, name, text):
self.append(name, text)
@property
def text(self):
return ical.serialize(self.tag, self.headers, self.items.values())
@classmethod
def children(cls, path):
"""Yield the children of the collection at local ``path``."""
request = cls._getRequestFromUrl(path)
children = list(tickets_reports.objects.filter(active_record=True, c_user=request.user).values_list('slug', flat=True))
children = ['%s/%s.ics' % (request.user.username, itm) for itm in children]
return map(cls, children)
@classmethod
def is_node(cls, path):
"""Return ``True`` if relative ``path`` is a node.
A node is a WebDAV collection whose members are other collections.
"""
request = cls._getRequestFromUrl(path)
if path == request.user.username:
return True
else:
return False
@classmethod
def is_leaf(cls, path):
"""Return ``True`` if relative ``path`` is a leaf.
A leaf is a WebDAV collection whose members are not collections.
"""
result = False
if '.ics' in path:
try:
request = cls._getRequestFromUrl(path)
rep = tickets_reports.objects.get(active_record=True, pk=cls._getReportFromUrl(path))
tic = get_ticket_model().objects.select_related('type', 'state', 'assigned', 'priority', 'customer').all()
search_params, tic = build_ticket_search_ext(request, tic, json.loads(rep.search))
result = (tic.exists())
except Exception:
import sys
a = sys.exc_info()
return result
@property
def last_modified(self):
try:
request = self._getRequestFromUrl(self.path)
rep = tickets_reports.objects.get(active_record=True, pk=self._getReportFromUrl(self.path))
tic = get_ticket_model().objects.select_related('type', 'state', 'assigned', 'priority', 'customer').all()
search_params, tic = build_ticket_search_ext(request, tic, json.loads(rep.search))
date = tic.latest('u_date')
return datetime.strftime(
date.last_action_date, '%a, %d %b %Y %H:%M:%S %z')
except Exception:
import sys
a = sys.exc_info()
@property
def tag(self):
with self.props as props:
if 'tag' not in props:
props['tag'] = 'VCALENDAR'
return props['tag']
@property
@contextmanager
def props(self):
# On enter
properties = {}
try:
props = DBProperties.objects.get(path=self.path)
except DBProperties.DoesNotExist:
pass
else:
properties.update(json.loads(props.text))
old_properties = properties.copy()
yield properties
# On exit
if old_properties != properties:
props, created = DBProperties.objects.get_or_create(path=self.path)
props.text = json.dumps(properties)
props.save()
@property
def items(self):
itms = {}
try:
request = self._getRequestFromUrl(self.path)
if self.path == request.user.username:
return itms
rep = tickets_reports.objects.get(active_record=True, pk=self._getReportFromUrl(self.path))
tic = get_ticket_model().objects.select_related('type', 'state', 'assigned', 'priority', 'customer').all()
search_params, tic = build_ticket_search_ext(request, tic, json.loads(rep.search))
for item in tic:
text = self._itemToICal(item)
itms.update(self._parse(text, ICAL_TYPES))
except Exception:
import sys
a = sys.exc_info()
return itms
@classmethod
def _getRequestFromUrl(cls, path):
user = path.split('/')[0]
request = FakeRequest()
request.user = User.objects.get(username=user)
request.organisation = UserProfile.objects.get(user=request.user).organisation
return request
@classmethod
def _getReportFromUrl(cls, path):
if '.ics' in path:
file = path.split('/')[-1]
file = file.replace('.ics', '')
repid = tickets_reports.objects.get(active_record=True, slug=file).pk
return repid
return 0
@classmethod
def _itemToICal(cls, item):
cal = vobject.iCalendar()
cal.add('vtodo')
cal.vtodo.add('summary').value = item.caption
cal.vtodo.add('uid').value = str(item.uuid)
cal.vtodo.add('created').value = item.c_date
if item.closed:
cal.vtodo.add('status').value = 'COMPLETED'
if item.priority:
cal.vtodo.add('priority').value = str(item.priority.caldav)
else:
cal.vtodo.add('priority').value = '0'
if item.description:
cal.vtodo.add('description').value = item.description
if item.show_start:
# cal.vtodo.add('dstart').value = item.show_start
cal.vtodo.add('due').value = item.show_start
cal.vtodo.add('valarm')
cal.vtodo.valarm.add('uuid').value = '%s-%s' % (str(item.uuid), item.pk)
cal.vtodo.valarm.add('x-wr-alarmuid').value = '%s-%s' % (str(item.uuid), item.pk)
cal.vtodo.valarm.add('action').value = 'DISPLAY'
# cal.vtodo.valarm.add('x-apple-proximity').value = 'DEPART'
cal.vtodo.valarm.add('description').value = 'Erinnerung an ein Ereignis'
# cal.vtodo.valarm.add('trigger').value =
# TRIGGER;VALUE=DATE-TIME:20180821T200000Z
cal.vtodo.add('x-radicale-name').value = '%s.ics' % str(item.uuid)
return cal.serialize()
| mediafactory/yats | modules/yats/caldav/storage.py | Python | mit | 12,605 | 0.002697 |
# -*- coding: utf-8 -*-
# Copyright 2020 Green Valley Belgium NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.7@@
from mcfw.properties import bool_property, unicode_list_property, unicode_property, typed_property
class BankDataTO(object):
bankCode = unicode_property('bankCode')
name = unicode_property('name')
bic = unicode_property('bic')
class OpenIbanResultTO(object):
valid = bool_property('valid')
messages = unicode_list_property('message')
iban = unicode_property('iban')
bankData = typed_property('bankData', BankDataTO) # type: BankDataTO
checkResults = typed_property('checkResults', dict)
| our-city-app/oca-backend | src/rogerthat/bizz/payment/to.py | Python | apache-2.0 | 1,164 | 0.000859 |
# -*- coding: utf-8 -*-
# Copyright(C) 2012 Romain Bignon
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from .module import CmsoModule
__all__ = ['CmsoModule']
| laurent-george/weboob | modules/cmso/__init__.py | Python | agpl-3.0 | 788 | 0 |
import datetime
import itertools
import re
import urllib2
import mimetypes
import operator
import logging
import sys
import traceback
import warnings
import tagging
import tagging.models
import vidscraper
from bs4 import BeautifulSoup
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.comments.moderation import CommentModerator, moderator
from django.contrib.sites.models import Site
from django.contrib.contenttypes import generic
from django.core.exceptions import ValidationError
from django.core.mail import EmailMessage
from django.core.signals import request_finished
from django.core.validators import ipv4_re
from django.db import models
from django.template import Context, loader
from django.utils.html import escape as html_escape
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _
from haystack import connections, connection_router
from mptt.models import MPTTModel
from notification import models as notification
from slugify import slugify
from localtv import utils, settings as lsettings
from localtv.managers import SiteRelatedManager, VideoManager
from localtv.signals import post_video_from_vidscraper, submit_finished
from localtv.templatetags.filters import sanitize
VIDEO_SERVICE_REGEXES = (
('YouTube', r'http://gdata\.youtube\.com/feeds/'),
('YouTube', r'http://(www\.)?youtube\.com/'),
('blip.tv', r'http://(.+\.)?blip\.tv/'),
('Vimeo', r'http://(www\.)?vimeo\.com/'),
('Dailymotion', r'http://(www\.)?dailymotion\.com/rss'))
class Thumbnailable(models.Model):
"""
A type of Model that has thumbnails generated for it. Now that we're using
Daguerre for thumbnails, this is just for backwards compatibility.
"""
# we set this to "logo" for SiteSettings, 'icon' for WidgetSettings
thumbnail_attribute = 'thumbnail'
class Meta:
abstract = True
@property
def has_thumbnail(self):
warnings.warn("has_thumbnail is deprecated and will be removed in a "
"future version.", DeprecationWarning)
return bool(getattr(self, self.thumbnail_attribute))
@property
def thumbnail_path(self):
warnings.warn("thumbnail_path is deprecated and will be removed in a "
"future version.", DeprecationWarning)
thumb_file = getattr(self, self.thumbnail_attribute)
if thumb_file:
return thumb_file.name
else:
return ''
class SiteSettings(Thumbnailable):
"""
A model for storing Site-specific settings (feature switches, custom HTML
and CSS, etc) in the database rather than in settings files. Most of
these can thus be set by site admins rather than sysadmins. There are
also a few fields for storing site event state.
"""
thumbnail_attribute = 'logo'
#: Link to the Site these settings are for.
site = models.OneToOneField(Site)
## Site styles ##
#: Custom logo image for this site.
logo = models.ImageField(upload_to=utils.UploadTo('localtv/sitesettings/logo/%Y/%m/%d/'), blank=True)
#: Custom background image for this site.
background = models.ImageField(upload_to=utils.UploadTo('localtv/sitesettings/background/%Y/%m/%d/'),
blank=True)
#: Arbitrary custom css overrides.
css = models.TextField(blank=True)
## Custom HTML ##
#: Subheader for the site.
tagline = models.CharField(max_length=4096, blank=True)
#: Arbitrary custom HTML which (currently) is used as a site description
#: on the main page.
sidebar_html = models.TextField(blank=True)
#: Arbitrary custom HTML which displays in the footer of all non-admin pages.
footer_html = models.TextField(blank=True)
#: Arbitrary custom HTML which displays on the about page.
about_html = models.TextField(blank=True)
## Site permissions ##
#: A collection of Users who have administrative access to the site.
admins = models.ManyToManyField('auth.User', blank=True,
related_name='admin_for')
#: Whether or not the Submit Video button should display or not.
#: Doesn't affect whether videos can be submitted or not.
#: See http://bugzilla.pculture.org/show_bug.cgi?id=19809
display_submit_button = models.BooleanField(default=True)
#: Whether or not users need to log in to submit videos.
submission_requires_login = models.BooleanField(default=False)
#: Whether or not an email address needs to be given with an
#: unauthenticated video submission.
submission_requires_email = models.BooleanField(default=False)
## Feature switches ##
#: Whether playlist functionality is enabled.
playlists_enabled = models.IntegerField(default=1)
#: Whether the original publication date or date added to this site
#: should be used for sorting videos.
use_original_date = models.BooleanField(
default=True,
help_text="If set, use the original date the video was posted. "
"Otherwise, use the date the video was added to this site.")
#: Whether comments should be held for moderation.
screen_all_comments = models.BooleanField(
verbose_name='Hold comments for moderation',
default=True,
help_text="Hold all comments for moderation by default?")
#: Whether leaving a comment requires you to be logged in.
comments_required_login = models.BooleanField(
default=False,
verbose_name="Require Login",
help_text="If True, comments require the user to be logged in.")
## Tracking fields ##
#: Whether a user has elected to hide the "get started" section in
#: the admin interface.
hide_get_started = models.BooleanField(default=False)
objects = SiteRelatedManager()
def __unicode__(self):
return u'%s (%s)' % (self.site.name, self.site.domain)
def user_is_admin(self, user):
"""
Return True if the given User is an admin for this SiteSettings.
"""
if not user.is_authenticated() or not user.is_active:
return False
if user.is_superuser:
return True
return self.admins.filter(pk=user.pk).exists()
def should_show_dashboard(self):
"""Returns True for backwards-compatibility."""
warnings.warn("should_show_dashboard is deprecated and will be "
"removed in a future version.", DeprecationWarning)
return True
class WidgetSettingsManager(SiteRelatedManager):
def _new_entry(self, site, using):
ws = super(WidgetSettingsManager, self)._new_entry(site, using)
try:
site_settings = SiteSettings.objects.get_cached(site, using)
except SiteSettings.DoesNotExist:
pass
else:
if site_settings.logo:
site_settings.logo.open()
ws.icon = site_settings.logo
ws.save()
return ws
class WidgetSettings(Thumbnailable):
"""
A Model which represents the options for controlling the widget creator.
"""
thumbnail_attribute = 'icon'
site = models.OneToOneField(Site)
title = models.CharField(max_length=250, blank=True)
title_editable = models.BooleanField(default=True)
icon = models.ImageField(upload_to=utils.UploadTo('localtv/widgetsettings/icon/%Y/%m/%d/'), blank=True)
icon_editable = models.BooleanField(default=False)
css = models.FileField(upload_to=utils.UploadTo('localtv/widgetsettings/css/%Y/%m/%d/'), blank=True)
css_editable = models.BooleanField(default=False)
bg_color = models.CharField(max_length=20, blank=True)
bg_color_editable = models.BooleanField(default=False)
text_color = models.CharField(max_length=20, blank=True)
text_color_editable = models.BooleanField(default=False)
border_color = models.CharField(max_length=20, blank=True)
border_color_editable = models.BooleanField(default=False)
objects = WidgetSettingsManager()
def get_title_or_reasonable_default(self):
# Is the title worth using? If so, use that.
use_title = True
if self.title.endswith('example.com'):
use_title = False
if not self.title:
use_title = False
# Okay, so either we return the title, or a sensible default
if use_title:
return html_escape(self.title)
return self.generate_reasonable_default_title()
def generate_reasonable_default_title(self):
prefix = 'Watch Videos on %s'
# Now, work on calculating what goes at the end.
site = Site.objects.get_current()
# The default suffix is a self-link. If the site name and
# site domain are plausible, do that.
if ((site.name and site.name.lower() != 'example.com') and
(site.domain and site.domain.lower() != 'example.com')):
suffix = '<a href="http://%s/">%s</a>' % (
site.domain, html_escape(site.name))
# First, we try the site name, if that's a nice string.
elif site.name and site.name.lower() != 'example.com':
suffix = site.name
# Else, we try the site domain, if that's not example.com
elif site.domain.lower() != 'example.com':
suffix = site.domain
else:
suffix = 'our video site'
return prefix % suffix
class Source(Thumbnailable):
"""
An abstract base class to represent things which are sources of multiple
videos. Current subclasses are Feed and SavedSearch.
"""
id = models.AutoField(primary_key=True)
site = models.ForeignKey(Site)
thumbnail = models.ImageField(upload_to=utils.UploadTo('localtv/source/thumbnail/%Y/%m/%d/'),
blank=True)
auto_approve = models.BooleanField(default=False)
auto_update = models.BooleanField(default=True,
help_text=_("If selected, new videos will"
" automatically be imported "
"from this source."))
user = models.ForeignKey('auth.User', null=True, blank=True)
auto_categories = models.ManyToManyField("Category", blank=True)
auto_authors = models.ManyToManyField("auth.User", blank=True,
related_name='auto_%(class)s_set')
class Meta:
abstract = True
def update(self, video_iter, source_import, clear_rejected=False):
"""
Imports videos from a feed/search. `videos` is an iterable which
returns :class:`vidscraper.videos.Video` objects. We use
:method:`.Video.from_vidscraper_video` to map the Vidscraper fields to
Video attributes.
If ``clear_rejected`` is ``True``, rejected versions of videos that are
found in the ``video_iter`` will be deleted and re-imported.
"""
author_pks = list(self.auto_authors.values_list('pk', flat=True))
category_pks = list(self.auto_categories.values_list('pk', flat=True))
import_opts = source_import.__class__._meta
from localtv.tasks import video_from_vidscraper_video, mark_import_pending
total_videos = 0
try:
for vidscraper_video in video_iter:
total_videos += 1
try:
video_from_vidscraper_video.delay(
vidscraper_video.serialize(),
site_pk=self.site_id,
import_app_label=import_opts.app_label,
import_model=import_opts.module_name,
import_pk=source_import.pk,
status=Video.PENDING,
author_pks=author_pks,
category_pks=category_pks,
clear_rejected=clear_rejected)
except Exception:
source_import.handle_error(
'Import task creation failed for %r' % (
vidscraper_video.url,),
is_skip=True,
with_exception=True)
except Exception:
source_import.fail(with_exception=True)
return
source_import.__class__._default_manager.filter(
pk=source_import.pk
).update(
total_videos=total_videos
)
mark_import_pending.delay(import_app_label=import_opts.app_label,
import_model=import_opts.module_name,
import_pk=source_import.pk)
class Feed(Source):
"""
Feed to pull videos in from.
If the same feed is used on two different sites, they will require two
separate entries here.
Fields:
- feed_url: The location of this field
- site: which site this feed belongs to
- name: human readable name for this feed
- webpage: webpage that this feed\'s content is associated with
- description: human readable description of this item
- last_updated: last time we ran self.update_items()
- when_submitted: when this feed was first registered on this site
- status: one of Feed.STATUS_CHOICES
- etag: used to see whether or not the feed has changed since our last
update.
- auto_approve: whether or not to set all videos in this feed to approved
during the import process
- user: a user that submitted this feed, if any
- auto_categories: categories that are automatically applied to videos on
import
- auto_authors: authors that are automatically applied to videos on
import
"""
INACTIVE = 0
ACTIVE = 1
STATUS_CHOICES = (
(INACTIVE, _(u'Inactive')),
(ACTIVE, _(u'Active')),
)
feed_url = models.URLField(verify_exists=False)
name = models.CharField(max_length=250)
webpage = models.URLField(verify_exists=False, blank=True)
description = models.TextField(blank=True)
last_updated = models.DateTimeField()
when_submitted = models.DateTimeField(auto_now_add=True)
etag = models.CharField(max_length=250, blank=True)
calculated_source_type = models.CharField(max_length=255, blank=True, default='')
status = models.IntegerField(choices=STATUS_CHOICES, default=INACTIVE)
class Meta:
unique_together = (
('feed_url', 'site'))
get_latest_by = 'last_updated'
def __unicode__(self):
return self.name
@models.permalink
def get_absolute_url(self):
return ('localtv_list_feed', [self.pk])
def update(self, **kwargs):
"""
Fetch and import new videos from this feed.
"""
try:
FeedImport.objects.get(source=self,
status=FeedImport.STARTED)
except FeedImport.DoesNotExist:
pass
else:
logging.info('Skipping import of %s: already in progress' % self)
return
feed_import = FeedImport.objects.create(source=self,
auto_approve=self.auto_approve)
video_iter = vidscraper.auto_feed(
self.feed_url,
max_results=None if self.status == self.INACTIVE else 100,
api_keys=lsettings.API_KEYS,
)
try:
video_iter.load()
except Exception:
feed_import.fail("Data loading failed for {source}",
with_exception=True)
return
self.etag = getattr(video_iter, 'etag', None) or ''
self.last_updated = datetime.datetime.now()
if self.status == self.INACTIVE:
# If these fields have already been changed, don't
# override those changes. Don't unset the name field
# if no further data is available.
if self.name == self.feed_url:
self.name = video_iter.title or self.name
if not self.webpage:
self.webpage = video_iter.webpage or ''
if not self.description:
self.description = video_iter.description or ''
self.save()
super(Feed, self).update(video_iter, source_import=feed_import,
**kwargs)
def source_type(self):
return self.calculated_source_type
def _calculate_source_type(self):
video_service = self.video_service()
if video_service is None:
return u'Feed'
else:
return u'User: %s' % video_service
def video_service(self):
for service, regexp in VIDEO_SERVICE_REGEXES:
if re.search(regexp, self.feed_url, re.I):
return service
def pre_save_set_calculated_source_type(instance, **kwargs):
# Always save the calculated_source_type
instance.calculated_source_type = instance._calculate_source_type()
# Plus, if the name changed, we have to recalculate all the Videos that depend on us.
try:
v = Feed.objects.get(id=instance.id)
except Feed.DoesNotExist:
return instance
if v.name != instance.name:
# recalculate all the sad little videos' calculated_source_type
for vid in instance.video_set.all():
vid.save()
models.signals.pre_save.connect(pre_save_set_calculated_source_type,
sender=Feed)
class Category(MPTTModel):
"""
A category for videos to be contained in.
Categories and tags aren't too different functionally, but categories are
more strict as they can't be defined by visitors. Categories can also be
hierarchical.
Fields:
- site: A link to the django.contrib.sites.models.Site object this object
is bound to
- name: Name of this category
- slug: a slugified verison of the name, used to create more friendly URLs
- logo: An image to associate with this category
- description: human readable description of this item
- parent: Reference to another Category. Allows you to have heirarchical
categories.
"""
site = models.ForeignKey(Site)
name = models.CharField(
max_length=80, verbose_name='Category Name',
help_text=_("The name is used to identify the category almost "
"everywhere; for example, under a video or in a "
"category widget."))
slug = models.SlugField(
verbose_name='Category Slug',
help_text=_("The \"slug\" is the URL-friendly version of the name. It "
"is usually lower-case and contains only letters, numbers "
"and hyphens."))
logo = models.ImageField(
upload_to=utils.UploadTo('localtv/category/logo/%Y/%m/%d/'),
blank=True,
verbose_name='Thumbnail/Logo',
help_text=_("Optional. For example: a leaf for 'environment' or the "
"logo of a university department."))
description = models.TextField(
blank=True, verbose_name='Description (HTML)',
help_text=_("Optional. The description is not prominent by default, but"
" some themes may show it."))
parent = models.ForeignKey(
'self', blank=True, null=True,
related_name='child_set',
verbose_name='Category Parent',
help_text=_("Categories, unlike tags, can have a hierarchy."))
class MPTTMeta:
order_insertion_by = ['name']
class Meta:
unique_together = (
('slug', 'site'),
('name', 'site'))
def __unicode__(self):
return self.name
def dashes(self):
"""
Returns a string of em dashes equal to the :class:`Category`\ 's
level. This is used to indent the category name in the admin
templates.
"""
return mark_safe('—' * self.level)
@models.permalink
def get_absolute_url(self):
return ('localtv_category', [self.slug])
def approved_set(self):
"""
Returns active videos for the category and its subcategories, ordered
by decreasing best date.
"""
opts = self._mptt_meta
lookups = {
'status': Video.ACTIVE,
'categories__left__gte': getattr(self, opts.left_attr),
'categories__left__lte': getattr(self, opts.right_attr),
'categories__tree_id': getattr(self, opts.tree_id_attr)
}
lookups = self._tree_manager._translate_lookups(**lookups)
return Video.objects.filter(**lookups).distinct()
approved_set = property(approved_set)
def unique_error_message(self, model_class, unique_check):
return 'Category with this %s already exists.' % (
unique_check[0],)
class SavedSearch(Source):
"""
A set of keywords to regularly pull in new videos from.
There's an administrative interface for doing "live searches"
Fields:
- site: site this savedsearch applies to
- query_string: a whitespace-separated list of words to search for. Words
starting with a dash will be processed as negative query terms
- when_created: date and time that this search was saved.
"""
query_string = models.TextField()
when_created = models.DateTimeField(auto_now_add=True)
def __unicode__(self):
return self.query_string
def update(self, **kwargs):
"""
Fetch and import new videos from this search.
"""
try:
SearchImport.objects.get(source=self,
status=SearchImport.STARTED)
except SearchImport.DoesNotExist:
pass
else:
logging.info('Skipping import of %s: already in progress' % self)
return
search_import = SearchImport.objects.create(
source=self,
auto_approve=self.auto_approve
)
searches = vidscraper.auto_search(
self.query_string,
max_results=100,
api_keys=lsettings.API_KEYS,
)
video_iters = []
for video_iter in searches:
try:
video_iter.load()
except Exception:
search_import.handle_error(u'Skipping import of search results '
u'from %s' % video_iter.__class__.__name__,
with_exception=True)
continue
video_iters.append(video_iter)
if video_iters:
super(SavedSearch, self).update(itertools.chain(*video_iters),
source_import=search_import,
**kwargs)
else:
# Mark the import as failed if none of the searches could load.
search_import.fail("All searches failed for {source}",
with_exception=False)
def source_type(self):
return u'Search'
class SourceImportIndex(models.Model):
video = models.OneToOneField('Video', unique=True)
index = models.PositiveIntegerField(blank=True, null=True)
class Meta:
abstract = True
class FeedImportIndex(SourceImportIndex):
source_import = models.ForeignKey('FeedImport', related_name='indexes')
class SearchImportIndex(SourceImportIndex):
source_import = models.ForeignKey('SearchImport', related_name='indexes')
class SourceImportError(models.Model):
message = models.TextField()
traceback = models.TextField(blank=True)
is_skip = models.BooleanField(help_text="Whether this error represents a "
"video that was skipped.")
datetime = models.DateTimeField(auto_now_add=True)
class Meta:
abstract = True
class FeedImportError(SourceImportError):
source_import = models.ForeignKey('FeedImport', related_name='errors')
class SearchImportError(SourceImportError):
source_import = models.ForeignKey('SearchImport', related_name='errors')
class SourceImport(models.Model):
STARTED = 'started'
PENDING = 'pending'
COMPLETE = 'complete'
FAILED = 'failed'
STATUS_CHOICES = (
(STARTED, _('Started')),
(PENDING, _('Pending haystack updates')),
(COMPLETE, _('Complete')),
(FAILED, _('Failed'))
)
start = models.DateTimeField(auto_now_add=True)
last_activity = models.DateTimeField(blank=True, null=True)
total_videos = models.PositiveIntegerField(blank=True, null=True)
videos_imported = models.PositiveIntegerField(default=0)
videos_skipped = models.PositiveIntegerField(default=0)
#: Caches the auto_approve of the search on the import, so that the imported
#: videos can be approved en masse at the end of the import based on the
#: settings at the beginning of the import.
auto_approve = models.BooleanField()
status = models.CharField(max_length=10, choices=STATUS_CHOICES,
default=STARTED)
class Meta:
get_latest_by = 'start'
ordering = ['-start']
abstract = True
def is_running(self):
"""
Returns True if the SourceImport is currently running.
"""
return self.status in (self.STARTED, self.PENDING)
def set_video_source(self, video):
"""
Sets the value of the correct field on the ``video`` to mark it as
having the same source as this import. Must be implemented by
subclasses.
"""
raise NotImplementedError
def get_videos(self):
raise NotImplementedError
def handle_error(self, message, is_skip=False, with_exception=False):
"""
Logs the error with the default logger and to the database.
:param message: A human-friendly description of the error that does
not contain sensitive information.
:param is_skip: ``True`` if the error results in a video being skipped.
Default: False.
:param with_exception: ``True`` if exception information should be
recorded. Default: False.
:param using: The database to use. Default: 'default'.
"""
if with_exception:
exc_info = sys.exc_info()
logging.warn(message, exc_info=exc_info)
tb = ''.join(traceback.format_exception(*exc_info))
else:
logging.warn(message)
tb = ''
self.errors.create(message=message,
source_import=self,
traceback=tb,
is_skip=is_skip)
if is_skip:
self.__class__._default_manager.filter(pk=self.pk
).update(videos_skipped=models.F('videos_skipped') + 1)
def get_index_creation_kwargs(self, video, vidscraper_video):
return {
'source_import': self,
'video': video,
'index': vidscraper_video.index
}
def handle_video(self, video, vidscraper_video):
"""
Creates an index instance connecting the video to this import.
:param video: The :class:`Video` instance which was imported.
:param vidscraper_video: The original video from :mod:`vidscraper`.
:param using: The database alias to use. Default: 'default'
"""
self.indexes.create(
**self.get_index_creation_kwargs(video, vidscraper_video))
self.__class__._default_manager.filter(pk=self.pk
).update(videos_imported=models.F('videos_imported') + 1)
def fail(self, message="Import failed for {source}", with_exception=False):
"""
Mark an import as failed, along with some post-fail cleanup.
"""
self.status = self.FAILED
self.last_activity = datetime.datetime.now()
self.save()
self.handle_error(message.format(source=self.source),
with_exception=with_exception)
self.get_videos().delete()
class FeedImport(SourceImport):
source = models.ForeignKey(Feed, related_name='imports')
def set_video_source(self, video):
video.feed_id = self.source_id
def get_videos(self):
return Video.objects.filter(feedimportindex__source_import=self)
class SearchImport(SourceImport):
source = models.ForeignKey(SavedSearch, related_name='imports')
def set_video_source(self, video):
video.search_id = self.source_id
def get_videos(self):
return Video.objects.filter(searchimportindex__source_import=self)
class Video(Thumbnailable):
"""
Fields:
- name: Name of this video
- site: Site this video is attached to
- description: Video description
- tags: A list of Tag objects associated with this item
- categories: Similar to Tags
- authors: the person/people responsible for this video
- file_url: The file this object points to (if any) ... if not
provided, at minimum we need the embed_code for the item.
- file_url_length: size of the file, in bytes
- file_url_mimetype: mimetype of the file
- when_submitted: When this item was first entered into the
database
- when_approved: When this item was marked to appear publicly on
the site
- when_published: When this file was published at its original
source (if known)
- last_featured: last time this item was featured.
- status: one of Video.STATUS_CHOICES
- feed: which feed this item came from (if any)
- website_url: The page that this item is associated with.
- embed_code: code used to embed this item.
- flash_enclosure_url: Crappy enclosure link that doesn't
actually point to a url.. the kind crappy flash video sites
give out when they don't actually want their enclosures to
point to video files.
- guid: data used to identify this video
- thumbnail_url: url to the thumbnail, if such a thing exists
- user: if not None, the user who submitted this video
- search: if not None, the SavedSearch from which this video came
- video_service_user: if not blank, the username of the user on the video
service who owns this video. We can figure out the service from the
website_url.
- contact: a free-text field for anonymous users to specify some contact
info
- notes: a free-text field to add notes about the video
"""
UNAPPROVED = 0
ACTIVE = 1
REJECTED = 2
PENDING = 3
STATUS_CHOICES = (
(UNAPPROVED, _(u'Unapproved')),
(ACTIVE, _(u'Active')),
(REJECTED, _(u'Rejected')),
(PENDING, _(u'Waiting on import to finish')),
)
site = models.ForeignKey(Site)
name = models.CharField(verbose_name="Video Name", max_length=250)
description = models.TextField(verbose_name="Video Description (optional)",
blank=True)
thumbnail_url = models.URLField(verbose_name="Thumbnail URL (optional)",
verify_exists=False, blank=True,
max_length=400)
thumbnail = models.ImageField(upload_to=utils.UploadTo('localtv/video/thumbnail/%Y/%m/%d/'),
blank=True)
categories = models.ManyToManyField(Category, blank=True)
authors = models.ManyToManyField('auth.User', blank=True,
related_name='authored_set')
file_url = models.URLField(verify_exists=False, blank=True,
max_length=2048)
file_url_length = models.IntegerField(null=True, blank=True)
file_url_mimetype = models.CharField(max_length=60, blank=True)
when_modified = models.DateTimeField(auto_now=True,
db_index=True,
default=datetime.datetime.now)
when_submitted = models.DateTimeField(auto_now_add=True)
when_approved = models.DateTimeField(null=True, blank=True)
when_published = models.DateTimeField(null=True, blank=True)
last_featured = models.DateTimeField(null=True, blank=True)
status = models.IntegerField(choices=STATUS_CHOICES, default=UNAPPROVED)
feed = models.ForeignKey(Feed, null=True, blank=True)
website_url = models.URLField(
verbose_name='Original Video Page URL (optional)',
max_length=2048,
verify_exists=False,
blank=True)
embed_code = models.TextField(verbose_name="Video <embed> code", blank=True)
flash_enclosure_url = models.URLField(verify_exists=False, max_length=2048,
blank=True)
guid = models.CharField(max_length=250, blank=True)
user = models.ForeignKey('auth.User', null=True, blank=True)
search = models.ForeignKey(SavedSearch, null=True, blank=True)
video_service_user = models.CharField(max_length=250, blank=True)
video_service_url = models.URLField(verify_exists=False, blank=True)
contact = models.CharField(verbose_name='Email (optional)', max_length=250,
blank=True)
notes = models.TextField(verbose_name='Notes (optional)', blank=True)
calculated_source_type = models.CharField(max_length=255, blank=True, default='')
objects = VideoManager()
taggeditem_set = generic.GenericRelation(tagging.models.TaggedItem,
content_type_field='content_type',
object_id_field='object_id')
class Meta:
ordering = ['-when_submitted']
get_latest_by = 'when_modified'
def __unicode__(self):
return self.name
def clean(self):
# clean is always run during ModelForm cleaning. If a model form is in
# play, rejected videos don't matter; the submission of that form
# should be considered valid. During automated imports, rejected
# videos are not excluded.
self._check_for_duplicates(exclude_rejected=True)
def _check_for_duplicates(self, exclude_rejected=True):
if not self.embed_code and not self.file_url:
raise ValidationError("Video has no embed code or file url.")
qs = Video.objects.filter(site=self.site_id)
if exclude_rejected:
qs = qs.exclude(status=Video.REJECTED)
if self.pk is not None:
qs = qs.exclude(pk=self.pk)
if self.guid and qs.filter(guid=self.guid).exists():
raise ValidationError("Another video with the same guid "
"already exists.")
if (self.website_url and
qs.filter(website_url=self.website_url).exists()):
raise ValidationError("Another video with the same website url "
"already exists.")
if self.file_url and qs.filter(file_url=self.file_url).exists():
raise ValidationError("Another video with the same file url "
"already exists.")
def clear_rejected_duplicates(self):
"""
Deletes rejected copies of this video based on the file_url,
website_url, and guid fields.
"""
if not any((self.website_url, self.file_url, self.guid)):
return
q_filter = models.Q()
if self.website_url:
q_filter |= models.Q(website_url=self.website_url)
if self.file_url:
q_filter |= models.Q(file_url=self.file_url)
if self.guid:
q_filter |= models.Q(guid=self.guid)
qs = Video.objects.filter(
site=self.site_id,
status=Video.REJECTED).filter(q_filter)
qs.delete()
@models.permalink
def get_absolute_url(self):
return ('localtv_view_video', (),
{'video_id': self.id,
'slug': slugify(self.name)[:30]})
def save(self, **kwargs):
"""
Adds support for an ```update_index`` kwarg, defaulting to ``True``.
If this kwarg is ``False``, then no index updates will be run by the
search index.
"""
# This actually relies on logic in
# :meth:`QueuedSearchIndex._enqueue_instance`
self._update_index = kwargs.pop('update_index', True)
super(Video, self).save(**kwargs)
save.alters_data = True
@classmethod
def from_vidscraper_video(cls, video, status=None, commit=True,
source_import=None, site_pk=None, authors=None,
categories=None, update_index=True):
"""
Builds a :class:`Video` instance from a
:class:`vidscraper.videos.Video` instance. If `commit` is False,
the :class:`Video` will not be saved, and the created instance will have
a `save_m2m()` method that must be called after you call `save()`.
"""
video_file = video.get_file()
if video_file and video_file.expires is None:
file_url = video_file.url
else:
file_url = None
if status is None:
status = cls.UNAPPROVED
if site_pk is None:
site_pk = settings.SITE_ID
now = datetime.datetime.now()
instance = cls(
guid=video.guid or '',
name=video.title or '',
description=video.description or '',
website_url=video.link or '',
when_published=video.publish_datetime,
file_url=file_url or '',
file_url_mimetype=getattr(video_file, 'mime_type', '') or '',
file_url_length=getattr(video_file, 'length', None),
when_submitted=now,
when_approved=now if status == cls.ACTIVE else None,
status=status,
thumbnail_url=video.thumbnail_url or '',
embed_code=video.embed_code or '',
flash_enclosure_url=video.flash_enclosure_url or '',
video_service_user=video.user or '',
video_service_url=video.user_url or '',
site_id=site_pk
)
if instance.description:
soup = BeautifulSoup(video.description)
for tag in soup.find_all(
'div', {'class': "miro-community-description"}):
instance.description = unicode(tag)
break
instance.description = sanitize(instance.description,
extra_filters=['img'])
instance._vidscraper_video = video
if source_import is not None:
source_import.set_video_source(instance)
def save_m2m():
if authors:
instance.authors = authors
if video.user:
name = video.user
if ' ' in name:
first, last = name.split(' ', 1)
else:
first, last = name, ''
author, created = User.objects.get_or_create(
username=name[:30],
defaults={'first_name': first[:30],
'last_name': last[:30]})
if created:
author.set_unusable_password()
author.save()
utils.get_profile_model()._default_manager.create(
user=author, website=video.user_url or '')
instance.authors.add(author)
if categories:
instance.categories = categories
if video.tags:
if settings.FORCE_LOWERCASE_TAGS:
fix = lambda t: t.lower().strip()
else:
fix = lambda t: t.strip()
tags = set(fix(tag) for tag in video.tags if tag.strip())
for tag_name in tags:
tag, created = \
tagging.models.Tag._default_manager.get_or_create(name=tag_name)
tagging.models.TaggedItem._default_manager.create(
tag=tag, object=instance)
if source_import is not None:
source_import.handle_video(instance, video)
post_video_from_vidscraper.send(sender=cls, instance=instance,
vidscraper_video=video)
if update_index:
using = connection_router.for_write()
index = connections[using].get_unified_index().get_index(cls)
index._enqueue_update(instance)
if commit:
instance.save(update_index=False)
save_m2m()
else:
instance.save_m2m = save_m2m
return instance
def get_tags(self):
if self.pk is None:
vidscraper_video = getattr(self, '_vidscraper_video', None)
return getattr(vidscraper_video, 'tags', None) or []
if (hasattr(self, '_prefetched_objects_cache') and
'taggeditem_set' in self._prefetched_objects_cache):
return [item.tag for item in
self._prefetched_objects_cache['taggeditem_set']]
return self.tags
def try_to_get_file_url_data(self):
"""
Do a HEAD request on self.file_url to find information about
self.file_url_length and self.file_url_mimetype
Note that while this method fills in those attributes, it does *NOT*
run self.save() ... so be sure to do so after calling this method!
"""
if not self.file_url:
return
request = urllib2.Request(utils.quote_unicode_url(self.file_url))
request.get_method = lambda: 'HEAD'
try:
http_file = urllib2.urlopen(request, timeout=5)
except Exception:
pass
else:
self.file_url_length = http_file.headers.get('content-length')
self.file_url_mimetype = http_file.headers.get('content-type', '')
if self.file_url_mimetype in ('application/octet-stream', ''):
# We got a not-useful MIME type; guess!
guess = mimetypes.guess_type(self.file_url)
if guess[0] is not None:
self.file_url_mimetype = guess[0]
def submitter(self):
"""
Return the user that submitted this video. If necessary, use the
submitter from the originating feed or savedsearch.
"""
if self.user is not None:
return self.user
elif self.feed is not None:
return self.feed.user
elif self.search is not None:
return self.search.user
else:
# XXX warning?
return None
def when(self):
"""
Simple method for getting the when_published date if the video came
from a feed or a search, otherwise the when_approved date.
"""
site_settings = SiteSettings.objects.get_cached(self.site_id,
self._state.db)
if site_settings.use_original_date and self.when_published:
return self.when_published
return self.when_approved or self.when_submitted
def source_type(self):
if self.id and self.search_id:
try:
return u'Search: %s' % self.search
except SavedSearch.DoesNotExist:
return u''
if self.id and self.feed_id:
try:
if self.feed.video_service():
return u'User: %s: %s' % (
self.feed.video_service(),
self.feed.name)
else:
return 'Feed: %s' % self.feed.name
except Feed.DoesNotExist:
return ''
if self.video_service_user:
return u'User: %s: %s' % (self.video_service(),
self.video_service_user)
return ''
def video_service(self):
if not self.website_url:
return
url = self.website_url
for service, regexp in VIDEO_SERVICE_REGEXES:
if re.search(regexp, url, re.I):
return service
def when_prefix(self):
"""
When videos are bulk imported (from a feed or a search), we list the
date as "published", otherwise we show 'posted'.
"""
site_settings = SiteSettings.objects.get_cached(site=self.site_id,
using=self._state.db)
if self.when_published and site_settings.use_original_date:
return 'published'
else:
return 'posted'
@property
def all_categories(self):
"""
Returns a set of all the categories to which this video belongs.
"""
categories = self.categories.all()
if not categories:
return categories
q_list = []
opts = Category._mptt_meta
for category in categories:
l = {
'left__lte': getattr(category, opts.left_attr),
'right__gte': getattr(category, opts.right_attr),
'tree_id': getattr(category, opts.tree_id_attr)
}
l = Category._tree_manager._translate_lookups(**l)
q_list.append(models.Q(**l))
q = reduce(operator.or_, q_list)
return Category.objects.filter(q)
def pre_save_video_set_calculated_source_type(instance, **kwargs):
# Always recalculate the source_type field.
instance.calculated_source_type = instance.source_type()
models.signals.pre_save.connect(pre_save_video_set_calculated_source_type,
sender=Video)
class Watch(models.Model):
"""
Record of a video being watched.
fields:
- video: Video that was watched
- timestamp: when watched
- user: user that watched it, if any
- ip_address: IP address of the user
"""
video = models.ForeignKey(Video)
timestamp = models.DateTimeField(auto_now_add=True, db_index=True)
user = models.ForeignKey('auth.User', blank=True, null=True)
ip_address = models.IPAddressField()
@classmethod
def add(Class, request, video):
"""
Adds a record of a watched video to the database. If the request came
from localhost, check to see if it was forwarded to (hopefully) get the
right IP address.
"""
ignored_bots = getattr(settings, 'LOCALTV_WATCH_IGNORED_USER_AGENTS',
('bot', 'spider', 'crawler'))
user_agent = request.META.get('HTTP_USER_AGENT', '').lower()
if user_agent and ignored_bots:
for bot in ignored_bots:
if bot in user_agent:
return
ip = request.META.get('REMOTE_ADDR', '0.0.0.0')
if not ipv4_re.match(ip):
ip = '0.0.0.0'
if hasattr(request, 'user') and request.user.is_authenticated():
user = request.user
else:
user = None
try:
Class(video=video, user=user, ip_address=ip).save()
except Exception:
pass
class VideoModerator(CommentModerator):
def allow(self, comment, video, request):
site_settings = SiteSettings.objects.get_cached(site=video.site_id,
using=video._state.db)
if site_settings.comments_required_login:
return request.user and request.user.is_authenticated()
else:
return True
def email(self, comment, video, request):
# we do the import in the function because otherwise there's a circular
# dependency
from localtv.utils import send_notice
site_settings = SiteSettings.objects.get_cached(site=video.site_id,
using=video._state.db)
t = loader.get_template('comments/comment_notification_email.txt')
c = Context({'comment': comment,
'content_object': video,
'user_is_admin': True})
subject = '[%s] New comment posted on "%s"' % (video.site.name,
video)
message = t.render(c)
send_notice('admin_new_comment', subject, message,
site_settings=site_settings)
admin_new_comment = notification.NoticeType.objects.get(
label="admin_new_comment")
if video.user and video.user.email:
video_comment = notification.NoticeType.objects.get(
label="video_comment")
if notification.should_send(video.user, video_comment, "1") and \
not notification.should_send(video.user,
admin_new_comment, "1"):
c = Context({'comment': comment,
'content_object': video,
'user_is_admin': False})
message = t.render(c)
EmailMessage(subject, message, settings.DEFAULT_FROM_EMAIL,
[video.user.email]).send(fail_silently=True)
comment_post_comment = notification.NoticeType.objects.get(
label="comment_post_comment")
previous_users = set()
for previous_comment in comment.__class__.objects.filter(
content_type=comment.content_type,
object_pk=video.pk,
is_public=True,
is_removed=False,
submit_date__lte=comment.submit_date,
user__email__isnull=False).exclude(
user__email='').exclude(pk=comment.pk):
if (previous_comment.user not in previous_users and
notification.should_send(previous_comment.user,
comment_post_comment, "1") and
not notification.should_send(previous_comment.user,
admin_new_comment, "1")):
previous_users.add(previous_comment.user)
c = Context({'comment': comment,
'content_object': video,
'user_is_admin': False})
message = t.render(c)
EmailMessage(subject, message, settings.DEFAULT_FROM_EMAIL,
[previous_comment.user.email]).send(fail_silently=True)
def moderate(self, comment, video, request):
site_settings = SiteSettings.objects.get_cached(site=video.site_id,
using=video._state.db)
if site_settings.screen_all_comments:
if not getattr(request, 'user'):
return True
else:
return not site_settings.user_is_admin(request.user)
else:
return False
moderator.register(Video, VideoModerator)
tagging.register(Video)
def finished(sender, **kwargs):
SiteSettings.objects.clear_cache()
request_finished.connect(finished)
def tag_unicode(self):
# hack to make sure that Unicode data gets returned for all tags
if isinstance(self.name, str):
self.name = self.name.decode('utf8')
return self.name
tagging.models.Tag.__unicode__ = tag_unicode
def send_new_video_email(sender, **kwargs):
site_settings = SiteSettings.objects.get_cached(site=sender.site_id,
using=sender._state.db)
if sender.status == Video.ACTIVE:
# don't send the e-mail for videos that are already active
return
t = loader.get_template('localtv/submit_video/new_video_email.txt')
c = Context({'video': sender})
message = t.render(c)
subject = '[%s] New Video in Review Queue: %s' % (sender.site.name,
sender)
utils.send_notice('admin_new_submission',
subject, message,
site_settings=site_settings)
submit_finished.connect(send_new_video_email, weak=False)
def create_email_notices(app, created_models, verbosity, **kwargs):
notification.create_notice_type('video_comment',
'New comment on your video',
'Someone commented on your video',
default=2,
verbosity=verbosity)
notification.create_notice_type('comment_post_comment',
'New comment after your comment',
'Someone commented on a video after you',
default=2,
verbosity=verbosity)
notification.create_notice_type('video_approved',
'Your video was approved',
'An admin approved your video',
default=2,
verbosity=verbosity)
notification.create_notice_type('admin_new_comment',
'New comment',
'A comment was submitted to the site',
default=1,
verbosity=verbosity)
notification.create_notice_type('admin_new_submission',
'New Submission',
'A new video was submitted',
default=1,
verbosity=verbosity)
notification.create_notice_type('admin_queue_weekly',
'Weekly Queue Update',
'A weekly e-mail of the queue status',
default=1,
verbosity=verbosity)
notification.create_notice_type('admin_queue_daily',
'Daily Queue Update',
'A daily e-mail of the queue status',
default=1,
verbosity=verbosity)
notification.create_notice_type('admin_video_updated',
'Video Updated',
'A video from a service was updated',
default=1,
verbosity=verbosity)
notification.create_notice_type('admin_new_playlist',
'Request for Playlist Moderation',
'A new playlist asked to be public',
default=2,
verbosity=verbosity)
models.signals.post_syncdb.connect(create_email_notices)
def delete_comments(sender, instance, **kwargs):
from django.contrib.comments import get_model
get_model().objects.filter(
object_pk=instance.pk,
content_type__app_label='localtv',
content_type__model='video'
).delete()
models.signals.pre_delete.connect(delete_comments,
sender=Video)
| pculture/mirocommunity | localtv/models.py | Python | agpl-3.0 | 55,425 | 0.000902 |
from rest_framework import exceptions as drf_exceptions
from rest_framework import versioning as drf_versioning
from rest_framework.compat import unicode_http_header
from rest_framework.utils.mediatypes import _MediaType
from api.base import exceptions
from api.base import utils
from api.base.renderers import BrowsableAPIRendererNoForms
from api.base.settings import LATEST_VERSIONS
def get_major_version(version):
return int(version.split('.')[0])
def url_path_version_to_decimal(url_path_version):
# 'v2' --> '2.0'
return str(float(url_path_version.split('v')[1]))
def decimal_version_to_url_path(decimal_version):
# '2.0' --> 'v2'
return 'v{}'.format(get_major_version(decimal_version))
def get_latest_sub_version(major_version):
# '2' --> '2.6'
return LATEST_VERSIONS.get(major_version, None)
class BaseVersioning(drf_versioning.BaseVersioning):
def __init__(self):
super(BaseVersioning, self).__init__()
def get_url_path_version(self, kwargs):
invalid_version_message = 'Invalid version in URL path.'
version = kwargs.get(self.version_param)
if version is None:
return self.default_version
version = url_path_version_to_decimal(version)
if not self.is_allowed_version(version):
raise drf_exceptions.NotFound(invalid_version_message)
if get_major_version(version) == get_major_version(self.default_version):
return self.default_version
return version
def get_header_version(self, request, major_version):
invalid_version_message = 'Invalid version in "Accept" header.'
media_type = _MediaType(request.accepted_media_type)
version = media_type.params.get(self.version_param)
if not version:
return None
if version == 'latest':
return get_latest_sub_version(major_version)
version = unicode_http_header(version)
if not self.is_allowed_version(version):
raise drf_exceptions.NotAcceptable(invalid_version_message)
return version
def get_default_version(self, request, major_version):
"""Returns the latest available version for the browsable api, otherwise REST_FRAMEWORK default version"""
if request.accepted_renderer.__class__ == BrowsableAPIRendererNoForms:
return get_latest_sub_version(major_version)
return self.default_version
def get_query_param_version(self, request, major_version):
invalid_version_message = 'Invalid version in query parameter.'
version = request.query_params.get(self.version_param)
if not version:
return None
if version == 'latest':
return get_latest_sub_version(major_version)
if not self.is_allowed_version(version):
raise drf_exceptions.NotFound(invalid_version_message)
return version
def validate_pinned_versions(self, url_path_version, header_version, query_parameter_version):
url_path_major_version = get_major_version(url_path_version)
header_major_version = get_major_version(header_version) if header_version else None
query_major_version = get_major_version(query_parameter_version) if query_parameter_version else None
if header_version and header_major_version != url_path_major_version:
raise exceptions.Conflict(
detail='Version {} specified in "Accept" header does not fall within URL path version {}'.format(
header_version,
url_path_version,
),
)
if query_parameter_version and query_major_version != url_path_major_version:
raise exceptions.Conflict(
detail='Version {} specified in query parameter does not fall within URL path version {}'.format(
query_parameter_version,
url_path_version,
),
)
if header_version and query_parameter_version and (header_version != query_parameter_version):
raise exceptions.Conflict(
detail='Version {} specified in "Accept" header does not match version {} specified in query parameter'.format(
header_version,
query_parameter_version,
),
)
def determine_version(self, request, *args, **kwargs):
url_path_version = self.get_url_path_version(kwargs)
major_version = get_major_version(url_path_version)
header_version = self.get_header_version(request, major_version)
query_parameter_version = self.get_query_param_version(request, major_version)
version = url_path_version
if header_version or query_parameter_version:
self.validate_pinned_versions(url_path_version, header_version, query_parameter_version)
version = header_version if header_version else query_parameter_version
else:
version = self.get_default_version(request, major_version)
return version
def reverse(self, viewname, args=None, kwargs=None, request=None, format=None, **extra):
url_path_version = self.get_url_path_version(kwargs)
major_version = get_major_version(url_path_version)
query_parameter_version = self.get_query_param_version(request, major_version)
kwargs = {} if (kwargs is None) else kwargs
kwargs[self.version_param] = decimal_version_to_url_path(url_path_version)
query_kwargs = {'version': query_parameter_version} if query_parameter_version else None
return utils.absolute_reverse(
viewname, query_kwargs=query_kwargs, args=args, kwargs=kwargs,
)
| caseyrollins/osf.io | api/base/versioning.py | Python | apache-2.0 | 5,741 | 0.003135 |
# -*- coding: utf-8 -*-
from module.plugins.internal.XFSAccount import XFSAccount
class FilerioCom(XFSAccount):
__name__ = "FilerioCom"
__type__ = "account"
__version__ = "0.07"
__status__ = "testing"
__description__ = """FileRio.in account plugin"""
__license__ = "GPLv3"
__authors__ = [("zoidberg", "[email protected]")]
PLUGIN_DOMAIN = "filerio.in"
| Guidobelix/pyload | module/plugins/accounts/FilerioCom.py | Python | gpl-3.0 | 407 | 0.014742 |
import pytest
from ray.train.callbacks.results_preprocessors import (
ExcludedKeysResultsPreprocessor,
IndexedResultsPreprocessor,
SequentialResultsPreprocessor,
AverageResultsPreprocessor,
MaxResultsPreprocessor,
WeightedAverageResultsPreprocessor,
)
def test_excluded_keys_results_preprocessor():
results = [{"a": 1, "b": 2}, {"a": 3, "b": 4}]
expected = [{"b": 2}, {"b": 4}]
preprocessor = ExcludedKeysResultsPreprocessor("a")
preprocessed_results = preprocessor.preprocess(results)
assert preprocessed_results == expected
def test_indexed_results_preprocessor():
results = [{"a": 1}, {"a": 2}, {"a": 3}, {"a": 4}]
expected = [{"a": 1}, {"a": 3}]
preprocessor = IndexedResultsPreprocessor([0, 2])
preprocessed_results = preprocessor.preprocess(results)
assert preprocessed_results == expected
def test_sequential_results_preprocessor():
results = [{"a": 1, "b": 2}, {"a": 3, "b": 4}, {"a": 5, "b": 6}, {"a": 7, "b": 8}]
expected = [{"b": 2}, {"b": 6}]
preprocessor_1 = ExcludedKeysResultsPreprocessor("a")
# [{"b": 2}, {"b": 4}, {"b": 6}, {"b": 8}]
preprocessor_2 = IndexedResultsPreprocessor([0, 2])
preprocessor = SequentialResultsPreprocessor([preprocessor_1, preprocessor_2])
preprocessed_results = preprocessor.preprocess(results)
assert preprocessed_results == expected
def test_average_results_preprocessor():
from copy import deepcopy
import numpy as np
results = [{"a": 1, "b": 2}, {"a": 3, "b": 4}, {"a": 5, "b": 6}, {"a": 7, "b": 8}]
expected = deepcopy(results)
for res in expected:
res.update(
{
"avg(a)": np.mean([result["a"] for result in results]),
"avg(b)": np.mean([result["b"] for result in results]),
}
)
preprocessor = AverageResultsPreprocessor(["a", "b"])
preprocessed_results = preprocessor.preprocess(results)
assert preprocessed_results == expected
def test_max_results_preprocessor():
from copy import deepcopy
import numpy as np
results = [{"a": 1, "b": 2}, {"a": 3, "b": 4}, {"a": 5, "b": 6}, {"a": 7, "b": 8}]
expected = deepcopy(results)
for res in expected:
res.update(
{
"max(a)": np.max([result["a"] for result in results]),
"max(b)": np.max([result["b"] for result in results]),
}
)
preprocessor = MaxResultsPreprocessor(["a", "b"])
preprocessed_results = preprocessor.preprocess(results)
assert preprocessed_results == expected
def test_weighted_average_results_preprocessor():
from copy import deepcopy
import numpy as np
results = [{"a": 1, "b": 2}, {"a": 3, "b": 4}, {"a": 5, "b": 6}, {"a": 7, "b": 8}]
expected = deepcopy(results)
total_weight = np.sum([result["b"] for result in results])
for res in expected:
res.update(
{
"weight_avg_b(a)": np.sum(
[result["a"] * result["b"] / total_weight for result in results]
)
}
)
preprocessor = WeightedAverageResultsPreprocessor(["a"], "b")
preprocessed_results = preprocessor.preprocess(results)
assert preprocessed_results == expected
@pytest.mark.parametrize(
("results_preprocessor", "expected_value"),
[(AverageResultsPreprocessor, 2.0), (MaxResultsPreprocessor, 3.0)],
)
def test_warning_in_aggregate_results_preprocessors(
caplog, results_preprocessor, expected_value
):
import logging
from copy import deepcopy
from ray.util import debug
caplog.at_level(logging.WARNING)
results1 = [{"a": 1}, {"a": 2}, {"a": 3}, {"a": 4}]
results2 = [{"a": 1}, {"a": "invalid"}, {"a": 3}, {"a": "invalid"}]
results3 = [{"a": "invalid"}, {"a": "invalid"}, {"a": "invalid"}, {"a": "invalid"}]
results4 = [{"a": 1}, {"a": 2}, {"a": 3}, {"c": 4}]
# test case 1: metric key `b` is missing from all workers
results_preprocessor1 = results_preprocessor(["b"])
results_preprocessor1.preprocess(results1)
assert "`b` is not reported from workers, so it is ignored." in caplog.text
# test case 2: some values of key `a` have invalid data type
results_preprocessor2 = results_preprocessor(["a"])
expected2 = deepcopy(results2)
aggregation_key = results_preprocessor2.aggregate_fn.wrap_key("a")
for res in expected2:
res.update({aggregation_key: expected_value})
assert results_preprocessor2.preprocess(results2) == expected2
# test case 3: all key `a` values are invalid
results_preprocessor2.preprocess(results3)
assert "`a` value type is not valid, so it is ignored." in caplog.text
# test case 4: some workers don't report key `a`
expected4 = deepcopy(results4)
aggregation_key = results_preprocessor2.aggregate_fn.wrap_key("a")
for res in expected4:
res.update({aggregation_key: expected_value})
assert results_preprocessor2.preprocess(results4) == expected4
for record in caplog.records:
assert record.levelname == "WARNING"
debug.reset_log_once("b")
debug.reset_log_once("a")
def test_warning_in_weighted_average_results_preprocessors(caplog):
import logging
from copy import deepcopy
caplog.at_level(logging.WARNING)
results1 = [{"a": 1}, {"a": 2}, {"a": 3}, {"a": 4}]
results2 = [{"b": 1}, {"b": 2}, {"b": 3}, {"b": 4}]
results3 = [
{"a": 1, "c": 3},
{"a": 2, "c": "invalid"},
{"a": "invalid", "c": 1},
{"a": 4, "c": "invalid"},
]
results4 = [
{"a": 1, "c": "invalid"},
{"a": 2, "c": "invalid"},
{"a": 3, "c": "invalid"},
{"a": 4, "c": "invalid"},
]
# test case 1: weight key `b` is not reported from all workers
results_preprocessor1 = WeightedAverageResultsPreprocessor(["a"], "b")
expected1 = deepcopy(results1)
for res in expected1:
res.update({"weight_avg_b(a)": 2.5})
assert results_preprocessor1.preprocess(results1) == expected1
assert (
"Averaging weight `b` is not reported by all workers in `train.report()`."
in caplog.text
)
assert "Use equal weight instead." in caplog.text
# test case 2: metric key `a` (to be averaged) is not reported from all workers
results_preprocessor1.preprocess(results2)
assert "`a` is not reported from workers, so it is ignored." in caplog.text
# test case 3: both metric and weight keys have invalid data type
results_preprocessor2 = WeightedAverageResultsPreprocessor(["a"], "c")
expected3 = deepcopy(results3)
for res in expected3:
res.update({"weight_avg_c(a)": 1.0})
assert results_preprocessor2.preprocess(results3) == expected3
# test case 4: all weight values are invalid
expected4 = deepcopy(results4)
for res in expected4:
res.update({"weight_avg_c(a)": 2.5})
assert results_preprocessor2.preprocess(results4) == expected4
assert "Averaging weight `c` value type is not valid." in caplog.text
for record in caplog.records:
assert record.levelname == "WARNING"
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", "-x", __file__]))
| ray-project/ray | python/ray/train/tests/test_results_preprocessors.py | Python | apache-2.0 | 7,269 | 0.001238 |
import re
from django.core.exceptions import ImproperlyConfigured
from sqlalchemy import create_engine, MetaData
from sqlalchemy.orm import sessionmaker
from tranquil.models import Importer
__all__ = ( 'engine', 'meta', 'Session', )
class EngineCache(object):
__shared_state = dict(
engine = None,
meta = None,
Session = None,
)
_mappings = {
'sqlite3': 'sqlite',
'mysql': 'mysql',
'postgresql': 'postgresql',
'postgresql_psycopg2': 'postgresql+psycopg2',
'oracle': 'oracle',
}
def __init__(self):
from django.conf import settings
self.__dict__ = self.__shared_state
if self.engine is not None:
return
if settings.DATABASE_ENGINE == 'django_sqlalchemy.backend':
from django_sqlalchemy import backend
self.engine = backend.engine
else:
options = {
'protocol': self._mappings.get( settings.DATABASE_ENGINE ),
'name': settings.DATABASE_NAME,
'user': settings.DATABASE_USER,
'pass': settings.DATABASE_PASSWORD,
'host': settings.DATABASE_HOST,
'port': settings.DATABASE_PORT,
}
if options['protocol'] is None:
raise ImproperlyConfigured( 'Unknown database engine: %s' % settings.DATABASE_ENGINE )
url = '{protocol}://{user}:{pass}@{host}{port}/{name}'
for p in options:
if p == 'port' and len( options[p] ) > 0:
url = re.sub( '{%s}' % p, ':%s' % options[p], url )
else:
url = re.sub( '{%s}' % p, options[p], url )
self.engine = create_engine( url )
self.meta = MetaData(bind=self.engine,reflect=True)
self.Session = sessionmaker( bind=self.engine, autoflush=True, autocommit=False )
self.importer = Importer(self.meta)
cache = EngineCache()
engine = cache.engine
meta = cache.meta
Session = cache.Session
| g2p/tranquil | tranquil/__init__.py | Python | bsd-3-clause | 1,717 | 0.041351 |
"""
Tests outgoing calls created with InitialAudio and/or InitialVideo, and
exposing the initial contents of incoming calls as values of InitialAudio and
InitialVideo
"""
import operator
from servicetest import (
assertContains, assertEquals, assertLength,
wrap_channel, EventPattern, call_async, make_channel_proxy)
from jingletest2 import JingleTest2, test_all_dialects
import constants as cs
def outgoing(jp, q, bus, conn, stream):
remote_jid = '[email protected]/beyond'
jt = JingleTest2(jp, conn, q, stream, 'test@localhost', remote_jid)
jt.prepare()
self_handle = conn.GetSelfHandle()
remote_handle = conn.RequestHandles(cs.HT_CONTACT, [remote_jid])[0]
rccs = conn.Properties.Get(cs.CONN_IFACE_REQUESTS, 'RequestableChannelClasses')
media_classes = [ rcc for rcc in rccs
if rcc[0][cs.CHANNEL_TYPE] == cs.CHANNEL_TYPE_STREAMED_MEDIA ]
assertLength(1, media_classes)
fixed, allowed = media_classes[0]
assertContains(cs.INITIAL_AUDIO, allowed)
assertContains(cs.INITIAL_VIDEO, allowed)
check_neither(q, conn, bus, stream, remote_handle)
check_iav(jt, q, conn, bus, stream, remote_handle, True, False)
check_iav(jt, q, conn, bus, stream, remote_handle, False, True)
check_iav(jt, q, conn, bus, stream, remote_handle, True, True)
def check_neither(q, conn, bus, stream, remote_handle):
"""
Make a channel without specifying InitialAudio or InitialVideo; check
that it's announced with both False, and that they're both present and
false in GetAll().
"""
path, props = conn.Requests.CreateChannel({
cs.CHANNEL_TYPE: cs.CHANNEL_TYPE_STREAMED_MEDIA,
cs.TARGET_HANDLE_TYPE: cs.HT_CONTACT,
cs.TARGET_HANDLE: remote_handle})
assertContains((cs.INITIAL_AUDIO, False), props.items())
assertContains((cs.INITIAL_VIDEO, False), props.items())
chan = wrap_channel(bus.get_object(conn.bus_name, path),
cs.CHANNEL_TYPE_STREAMED_MEDIA, ['MediaSignalling'])
props = chan.Properties.GetAll(cs.CHANNEL_TYPE_STREAMED_MEDIA)
assertContains(('InitialAudio', False), props.items())
assertContains(('InitialVideo', False), props.items())
# We shouldn't have started a session yet, so there shouldn't be any
# session handlers. Strictly speaking, there could be a session handler
# with no stream handlers, but...
session_handlers = chan.MediaSignalling.GetSessionHandlers()
assertLength(0, session_handlers)
def check_iav(jt, q, conn, bus, stream, remote_handle, initial_audio,
initial_video):
"""
Make a channel and check that its InitialAudio and InitialVideo properties
come out correctly.
"""
call_async(q, conn.Requests, 'CreateChannel', {
cs.CHANNEL_TYPE: cs.CHANNEL_TYPE_STREAMED_MEDIA,
cs.TARGET_HANDLE_TYPE: cs.HT_CONTACT,
cs.TARGET_HANDLE: remote_handle,
cs.INITIAL_AUDIO: initial_audio,
cs.INITIAL_VIDEO: initial_video,
})
if initial_video and (not jt.jp.can_do_video()
or (not initial_audio and not jt.jp.can_do_video_only ())):
# Some protocols can't do video
event = q.expect('dbus-error', method='CreateChannel')
assertEquals(cs.NOT_CAPABLE, event.error.get_dbus_name())
else:
path, props = q.expect('dbus-return', method='CreateChannel').value
assertContains((cs.INITIAL_AUDIO, initial_audio), props.items())
assertContains((cs.INITIAL_VIDEO, initial_video), props.items())
chan = wrap_channel(bus.get_object(conn.bus_name, path),
cs.CHANNEL_TYPE_STREAMED_MEDIA, ['MediaSignalling'])
props = chan.Properties.GetAll(cs.CHANNEL_TYPE_STREAMED_MEDIA)
assertContains(('InitialAudio', initial_audio), props.items())
assertContains(('InitialVideo', initial_video), props.items())
session_handlers = chan.MediaSignalling.GetSessionHandlers()
assertLength(1, session_handlers)
path, type = session_handlers[0]
assertEquals('rtp', type)
session_handler = make_channel_proxy(conn, path, 'Media.SessionHandler')
session_handler.Ready()
stream_handler_paths = []
stream_handler_types = []
for x in [initial_audio, initial_video]:
if x:
e = q.expect('dbus-signal', signal='NewStreamHandler')
stream_handler_paths.append(e.args[0])
stream_handler_types.append(e.args[2])
if initial_audio:
assertContains(cs.MEDIA_STREAM_TYPE_AUDIO, stream_handler_types)
if initial_video:
assertContains(cs.MEDIA_STREAM_TYPE_VIDEO, stream_handler_types)
for x in xrange (0, len(stream_handler_paths)):
p = stream_handler_paths[x]
t = stream_handler_types[x]
sh = make_channel_proxy(conn, p, 'Media.StreamHandler')
sh.NewNativeCandidate("fake", jt.get_remote_transports_dbus())
if t == cs.MEDIA_STREAM_TYPE_AUDIO:
sh.Ready(jt.get_audio_codecs_dbus())
else:
sh.Ready(jt.get_video_codecs_dbus())
sh.StreamState(cs.MEDIA_STREAM_STATE_CONNECTED)
e = q.expect('stream-iq',
predicate=jt.jp.action_predicate('session-initiate'))
jt.parse_session_initiate (e.query)
jt.accept()
events = reduce(operator.concat,
[ [ EventPattern('dbus-signal', signal='SetRemoteCodecs', path=p),
EventPattern('dbus-signal', signal='SetStreamPlaying', path=p),
] for p in stream_handler_paths
], [])
q.expect_many(*events)
chan.Close()
def incoming(jp, q, bus, conn, stream):
remote_jid = 'skinny.fists@heaven/antennas'
jt = JingleTest2(jp, conn, q, stream, 'test@localhost', remote_jid)
jt.prepare()
self_handle = conn.GetSelfHandle()
remote_handle = conn.RequestHandles(cs.HT_CONTACT, [remote_jid])[0]
for a, v in [("audio1", None), (None, "video1"), ("audio1", "video1")]:
if v!= None and not jp.can_do_video():
continue
if a == None and v != None and not jp.can_do_video_only():
continue
jt.incoming_call(audio=a, video=v)
e = q.expect('dbus-signal', signal='NewChannels',
predicate=lambda e:
cs.CHANNEL_TYPE_CONTACT_LIST not in e.args[0][0][1].values())
chans = e.args[0]
assertLength(1, chans)
path, props = chans[0]
assertEquals(cs.CHANNEL_TYPE_STREAMED_MEDIA, props[cs.CHANNEL_TYPE])
assertEquals(a != None, props[cs.INITIAL_AUDIO])
assertEquals(v != None, props[cs.INITIAL_VIDEO])
# FIXME: This doesn't check non-Google contacts that can only do one
# media type, as such contacts as simulated by JingleTest2 can always
# do both.
assertEquals(not jp.can_do_video() or not jp.can_do_video_only(),
props[cs.IMMUTABLE_STREAMS])
chan = wrap_channel(bus.get_object(conn.bus_name, path),
cs.CHANNEL_TYPE_STREAMED_MEDIA)
chan.Close()
if __name__ == '__main__':
test_all_dialects(outgoing)
test_all_dialects(incoming)
| jku/telepathy-gabble | tests/twisted/jingle/initial-audio-video.py | Python | lgpl-2.1 | 7,213 | 0.004298 |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Tests for textio module."""
from __future__ import absolute_import
from __future__ import division
import bz2
import datetime
import glob
import gzip
import logging
import os
import shutil
import sys
import tempfile
import unittest
import zlib
from builtins import range
import apache_beam as beam
import apache_beam.io.source_test_utils as source_test_utils
from apache_beam import coders
from apache_beam.io import ReadAllFromText
from apache_beam.io import iobase
from apache_beam.io.filesystem import CompressionTypes
from apache_beam.io.textio import _TextSink as TextSink
from apache_beam.io.textio import _TextSource as TextSource
# Importing following private classes for testing.
from apache_beam.io.textio import ReadFromText
from apache_beam.io.textio import ReadFromTextWithFilename
from apache_beam.io.textio import WriteToText
from apache_beam.testing.test_pipeline import TestPipeline
from apache_beam.testing.test_utils import TempDir
from apache_beam.testing.util import assert_that
from apache_beam.testing.util import equal_to
from apache_beam.transforms.core import Create
class EOL(object):
LF = 1
CRLF = 2
MIXED = 3
LF_WITH_NOTHING_AT_LAST_LINE = 4
def write_data(
num_lines, no_data=False, directory=None, prefix=tempfile.template,
eol=EOL.LF):
"""Writes test data to a temporary file.
Args:
num_lines (int): The number of lines to write.
no_data (bool): If :data:`True`, empty lines will be written, otherwise
each line will contain a concatenation of b'line' and the line number.
directory (str): The name of the directory to create the temporary file in.
prefix (str): The prefix to use for the temporary file.
eol (int): The line ending to use when writing.
:class:`~apache_beam.io.textio_test.EOL` exposes attributes that can be
used here to define the eol.
Returns:
Tuple[str, List[str]]: A tuple of the filename and a list of the
utf-8 decoded written data.
"""
all_data = []
with tempfile.NamedTemporaryFile(
delete=False, dir=directory, prefix=prefix) as f:
sep_values = [b'\n', b'\r\n']
for i in range(num_lines):
data = b'' if no_data else b'line' + str(i).encode()
all_data.append(data)
if eol == EOL.LF:
sep = sep_values[0]
elif eol == EOL.CRLF:
sep = sep_values[1]
elif eol == EOL.MIXED:
sep = sep_values[i % len(sep_values)]
elif eol == EOL.LF_WITH_NOTHING_AT_LAST_LINE:
sep = b'' if i == (num_lines - 1) else sep_values[0]
else:
raise ValueError('Received unknown value %s for eol.' % eol)
f.write(data + sep)
return f.name, [line.decode('utf-8') for line in all_data]
def write_pattern(lines_per_file, no_data=False):
"""Writes a pattern of temporary files.
Args:
lines_per_file (List[int]): The number of lines to write per file.
no_data (bool): If :data:`True`, empty lines will be written, otherwise
each line will contain a concatenation of b'line' and the line number.
Returns:
Tuple[str, List[str]]: A tuple of the filename pattern and a list of the
utf-8 decoded written data.
"""
temp_dir = tempfile.mkdtemp()
all_data = []
file_name = None
start_index = 0
for i in range(len(lines_per_file)):
file_name, data = write_data(lines_per_file[i], no_data=no_data,
directory=temp_dir, prefix='mytemp')
all_data.extend(data)
start_index += lines_per_file[i]
assert file_name
return (
file_name[:file_name.rfind(os.path.sep)] + os.path.sep + 'mytemp*',
all_data)
class TextSourceTest(unittest.TestCase):
# Number of records that will be written by most tests.
DEFAULT_NUM_RECORDS = 100
@classmethod
def setUpClass(cls):
# Method has been renamed in Python 3
if sys.version_info[0] < 3:
cls.assertCountEqual = cls.assertItemsEqual
def _run_read_test(self, file_or_pattern, expected_data,
buffer_size=DEFAULT_NUM_RECORDS,
compression=CompressionTypes.UNCOMPRESSED):
# Since each record usually takes more than 1 byte, default buffer size is
# smaller than the total size of the file. This is done to
# increase test coverage for cases that hit the buffer boundary.
source = TextSource(file_or_pattern, 0, compression,
True, coders.StrUtf8Coder(), buffer_size)
range_tracker = source.get_range_tracker(None, None)
read_data = list(source.read(range_tracker))
self.assertCountEqual(expected_data, read_data)
def test_read_single_file(self):
file_name, expected_data = write_data(TextSourceTest.DEFAULT_NUM_RECORDS)
assert len(expected_data) == TextSourceTest.DEFAULT_NUM_RECORDS
self._run_read_test(file_name, expected_data)
def test_read_single_file_smaller_than_default_buffer(self):
file_name, expected_data = write_data(TextSourceTest.DEFAULT_NUM_RECORDS)
self._run_read_test(file_name, expected_data,
buffer_size=TextSource.DEFAULT_READ_BUFFER_SIZE)
def test_read_single_file_larger_than_default_buffer(self):
file_name, expected_data = write_data(TextSource.DEFAULT_READ_BUFFER_SIZE)
self._run_read_test(file_name, expected_data,
buffer_size=TextSource.DEFAULT_READ_BUFFER_SIZE)
def test_read_file_pattern(self):
pattern, expected_data = write_pattern(
[TextSourceTest.DEFAULT_NUM_RECORDS * 5,
TextSourceTest.DEFAULT_NUM_RECORDS * 3,
TextSourceTest.DEFAULT_NUM_RECORDS * 12,
TextSourceTest.DEFAULT_NUM_RECORDS * 8,
TextSourceTest.DEFAULT_NUM_RECORDS * 8,
TextSourceTest.DEFAULT_NUM_RECORDS * 4])
assert len(expected_data) == TextSourceTest.DEFAULT_NUM_RECORDS * 40
self._run_read_test(pattern, expected_data)
def test_read_single_file_windows_eol(self):
file_name, expected_data = write_data(TextSourceTest.DEFAULT_NUM_RECORDS,
eol=EOL.CRLF)
assert len(expected_data) == TextSourceTest.DEFAULT_NUM_RECORDS
self._run_read_test(file_name, expected_data)
def test_read_single_file_mixed_eol(self):
file_name, expected_data = write_data(TextSourceTest.DEFAULT_NUM_RECORDS,
eol=EOL.MIXED)
assert len(expected_data) == TextSourceTest.DEFAULT_NUM_RECORDS
self._run_read_test(file_name, expected_data)
def test_read_single_file_last_line_no_eol(self):
file_name, expected_data = write_data(
TextSourceTest.DEFAULT_NUM_RECORDS,
eol=EOL.LF_WITH_NOTHING_AT_LAST_LINE)
assert len(expected_data) == TextSourceTest.DEFAULT_NUM_RECORDS
self._run_read_test(file_name, expected_data)
def test_read_single_file_single_line_no_eol(self):
file_name, expected_data = write_data(
1, eol=EOL.LF_WITH_NOTHING_AT_LAST_LINE)
assert len(expected_data) == 1
self._run_read_test(file_name, expected_data)
def test_read_empty_single_file(self):
file_name, written_data = write_data(
1, no_data=True, eol=EOL.LF_WITH_NOTHING_AT_LAST_LINE)
assert len(written_data) == 1
# written data has a single entry with an empty string. Reading the source
# should not produce anything since we only wrote a single empty string
# without an end of line character.
self._run_read_test(file_name, [])
def test_read_single_file_last_line_no_eol_gzip(self):
file_name, expected_data = write_data(
TextSourceTest.DEFAULT_NUM_RECORDS,
eol=EOL.LF_WITH_NOTHING_AT_LAST_LINE)
gzip_file_name = file_name + '.gz'
with open(file_name, 'rb') as src, gzip.open(gzip_file_name, 'wb') as dst:
dst.writelines(src)
assert len(expected_data) == TextSourceTest.DEFAULT_NUM_RECORDS
self._run_read_test(gzip_file_name, expected_data,
compression=CompressionTypes.GZIP)
def test_read_single_file_single_line_no_eol_gzip(self):
file_name, expected_data = write_data(
1, eol=EOL.LF_WITH_NOTHING_AT_LAST_LINE)
gzip_file_name = file_name + '.gz'
with open(file_name, 'rb') as src, gzip.open(gzip_file_name, 'wb') as dst:
dst.writelines(src)
assert len(expected_data) == 1
self._run_read_test(gzip_file_name, expected_data,
compression=CompressionTypes.GZIP)
def test_read_empty_single_file_no_eol_gzip(self):
file_name, written_data = write_data(
1, no_data=True, eol=EOL.LF_WITH_NOTHING_AT_LAST_LINE)
gzip_file_name = file_name + '.gz'
with open(file_name, 'rb') as src, gzip.open(gzip_file_name, 'wb') as dst:
dst.writelines(src)
assert len(written_data) == 1
# written data has a single entry with an empty string. Reading the source
# should not produce anything since we only wrote a single empty string
# without an end of line character.
self._run_read_test(gzip_file_name, [], compression=CompressionTypes.GZIP)
def test_read_single_file_with_empty_lines(self):
file_name, expected_data = write_data(
TextSourceTest.DEFAULT_NUM_RECORDS, no_data=True, eol=EOL.LF)
assert len(expected_data) == TextSourceTest.DEFAULT_NUM_RECORDS
assert not expected_data[0]
self._run_read_test(file_name, expected_data)
def test_read_single_file_without_striping_eol_lf(self):
file_name, written_data = write_data(TextSourceTest.DEFAULT_NUM_RECORDS,
eol=EOL.LF)
assert len(written_data) == TextSourceTest.DEFAULT_NUM_RECORDS
source = TextSource(file_name, 0,
CompressionTypes.UNCOMPRESSED,
False, coders.StrUtf8Coder())
range_tracker = source.get_range_tracker(None, None)
read_data = list(source.read(range_tracker))
self.assertCountEqual([line + '\n' for line in written_data], read_data)
def test_read_single_file_without_striping_eol_crlf(self):
file_name, written_data = write_data(TextSourceTest.DEFAULT_NUM_RECORDS,
eol=EOL.CRLF)
assert len(written_data) == TextSourceTest.DEFAULT_NUM_RECORDS
source = TextSource(file_name, 0, CompressionTypes.UNCOMPRESSED,
False, coders.StrUtf8Coder())
range_tracker = source.get_range_tracker(None, None)
read_data = list(source.read(range_tracker))
self.assertCountEqual([line + '\r\n' for line in written_data], read_data)
def test_read_file_pattern_with_empty_files(self):
pattern, expected_data = write_pattern(
[5 * TextSourceTest.DEFAULT_NUM_RECORDS,
3 * TextSourceTest.DEFAULT_NUM_RECORDS,
12 * TextSourceTest.DEFAULT_NUM_RECORDS,
8 * TextSourceTest.DEFAULT_NUM_RECORDS,
8 * TextSourceTest.DEFAULT_NUM_RECORDS,
4 * TextSourceTest.DEFAULT_NUM_RECORDS],
no_data=True)
assert len(expected_data) == 40 * TextSourceTest.DEFAULT_NUM_RECORDS
assert not expected_data[0]
self._run_read_test(pattern, expected_data)
def test_read_after_splitting(self):
file_name, expected_data = write_data(10)
assert len(expected_data) == 10
source = TextSource(file_name, 0, CompressionTypes.UNCOMPRESSED, True,
coders.StrUtf8Coder())
splits = list(source.split(desired_bundle_size=33))
reference_source_info = (source, None, None)
sources_info = ([
(split.source, split.start_position, split.stop_position) for
split in splits])
source_test_utils.assert_sources_equal_reference_source(
reference_source_info, sources_info)
def test_header_processing(self):
file_name, expected_data = write_data(10)
assert len(expected_data) == 10
def header_matcher(line):
return line in expected_data[:5]
header_lines = []
def store_header(lines):
for line in lines:
header_lines.append(line)
source = TextSource(file_name, 0, CompressionTypes.UNCOMPRESSED, True,
coders.StrUtf8Coder(),
header_processor_fns=(header_matcher, store_header))
splits = list(source.split(desired_bundle_size=100000))
assert len(splits) == 1
range_tracker = splits[0].source.get_range_tracker(
splits[0].start_position, splits[0].stop_position)
read_data = list(source.read_records(file_name, range_tracker))
self.assertCountEqual(expected_data[:5], header_lines)
self.assertCountEqual(expected_data[5:], read_data)
def test_progress(self):
file_name, expected_data = write_data(10)
assert len(expected_data) == 10
source = TextSource(file_name, 0, CompressionTypes.UNCOMPRESSED, True,
coders.StrUtf8Coder())
splits = list(source.split(desired_bundle_size=100000))
assert len(splits) == 1
fraction_consumed_report = []
split_points_report = []
range_tracker = splits[0].source.get_range_tracker(
splits[0].start_position, splits[0].stop_position)
for _ in splits[0].source.read(range_tracker):
fraction_consumed_report.append(range_tracker.fraction_consumed())
split_points_report.append(range_tracker.split_points())
self.assertEqual(
[float(i) / 10 for i in range(0, 10)], fraction_consumed_report)
expected_split_points_report = [
((i - 1), iobase.RangeTracker.SPLIT_POINTS_UNKNOWN)
for i in range(1, 10)]
# At last split point, the remaining split points callback returns 1 since
# the expected position of next record becomes equal to the stop position.
expected_split_points_report.append((9, 1))
self.assertEqual(
expected_split_points_report, split_points_report)
def test_read_reentrant_without_splitting(self):
file_name, expected_data = write_data(10)
assert len(expected_data) == 10
source = TextSource(file_name, 0, CompressionTypes.UNCOMPRESSED, True,
coders.StrUtf8Coder())
source_test_utils.assert_reentrant_reads_succeed((source, None, None))
def test_read_reentrant_after_splitting(self):
file_name, expected_data = write_data(10)
assert len(expected_data) == 10
source = TextSource(file_name, 0, CompressionTypes.UNCOMPRESSED, True,
coders.StrUtf8Coder())
splits = list(source.split(desired_bundle_size=100000))
assert len(splits) == 1
source_test_utils.assert_reentrant_reads_succeed(
(splits[0].source, splits[0].start_position, splits[0].stop_position))
def test_dynamic_work_rebalancing(self):
file_name, expected_data = write_data(5)
assert len(expected_data) == 5
source = TextSource(file_name, 0, CompressionTypes.UNCOMPRESSED, True,
coders.StrUtf8Coder())
splits = list(source.split(desired_bundle_size=100000))
assert len(splits) == 1
source_test_utils.assert_split_at_fraction_exhaustive(
splits[0].source, splits[0].start_position, splits[0].stop_position)
def test_dynamic_work_rebalancing_windows_eol(self):
file_name, expected_data = write_data(15, eol=EOL.CRLF)
assert len(expected_data) == 15
source = TextSource(file_name, 0, CompressionTypes.UNCOMPRESSED, True,
coders.StrUtf8Coder())
splits = list(source.split(desired_bundle_size=100000))
assert len(splits) == 1
source_test_utils.assert_split_at_fraction_exhaustive(
splits[0].source, splits[0].start_position, splits[0].stop_position,
perform_multi_threaded_test=False)
def test_dynamic_work_rebalancing_mixed_eol(self):
file_name, expected_data = write_data(5, eol=EOL.MIXED)
assert len(expected_data) == 5
source = TextSource(file_name, 0, CompressionTypes.UNCOMPRESSED, True,
coders.StrUtf8Coder())
splits = list(source.split(desired_bundle_size=100000))
assert len(splits) == 1
source_test_utils.assert_split_at_fraction_exhaustive(
splits[0].source, splits[0].start_position, splits[0].stop_position,
perform_multi_threaded_test=False)
def test_read_from_text_single_file(self):
file_name, expected_data = write_data(5)
assert len(expected_data) == 5
pipeline = TestPipeline()
pcoll = pipeline | 'Read' >> ReadFromText(file_name)
assert_that(pcoll, equal_to(expected_data))
pipeline.run()
def test_read_from_text_with_file_name_single_file(self):
file_name, data = write_data(5)
expected_data = [(file_name, el) for el in data]
assert len(expected_data) == 5
pipeline = TestPipeline()
pcoll = pipeline | 'Read' >> ReadFromTextWithFilename(file_name)
assert_that(pcoll, equal_to(expected_data))
pipeline.run()
def test_read_all_single_file(self):
file_name, expected_data = write_data(5)
assert len(expected_data) == 5
pipeline = TestPipeline()
pcoll = pipeline | 'Create' >> Create(
[file_name]) |'ReadAll' >> ReadAllFromText()
assert_that(pcoll, equal_to(expected_data))
pipeline.run()
def test_read_all_many_single_files(self):
file_name1, expected_data1 = write_data(5)
assert len(expected_data1) == 5
file_name2, expected_data2 = write_data(10)
assert len(expected_data2) == 10
file_name3, expected_data3 = write_data(15)
assert len(expected_data3) == 15
expected_data = []
expected_data.extend(expected_data1)
expected_data.extend(expected_data2)
expected_data.extend(expected_data3)
pipeline = TestPipeline()
pcoll = pipeline | 'Create' >> Create(
[file_name1, file_name2, file_name3]) |'ReadAll' >> ReadAllFromText()
assert_that(pcoll, equal_to(expected_data))
pipeline.run()
def test_read_all_unavailable_files_ignored(self):
file_name1, expected_data1 = write_data(5)
assert len(expected_data1) == 5
file_name2, expected_data2 = write_data(10)
assert len(expected_data2) == 10
file_name3, expected_data3 = write_data(15)
assert len(expected_data3) == 15
file_name4 = "/unavailable_file"
expected_data = []
expected_data.extend(expected_data1)
expected_data.extend(expected_data2)
expected_data.extend(expected_data3)
pipeline = TestPipeline()
pcoll = (pipeline
| 'Create' >> Create(
[file_name1, file_name2, file_name3, file_name4])
|'ReadAll' >> ReadAllFromText())
assert_that(pcoll, equal_to(expected_data))
pipeline.run()
def test_read_from_text_single_file_with_coder(self):
class DummyCoder(coders.Coder):
def encode(self, x):
raise ValueError
def decode(self, x):
return (x * 2).decode('utf-8')
file_name, expected_data = write_data(5)
assert len(expected_data) == 5
pipeline = TestPipeline()
pcoll = pipeline | 'Read' >> ReadFromText(file_name, coder=DummyCoder())
assert_that(pcoll, equal_to([record * 2 for record in expected_data]))
pipeline.run()
def test_read_from_text_file_pattern(self):
pattern, expected_data = write_pattern([5, 3, 12, 8, 8, 4])
assert len(expected_data) == 40
pipeline = TestPipeline()
pcoll = pipeline | 'Read' >> ReadFromText(pattern)
assert_that(pcoll, equal_to(expected_data))
pipeline.run()
def test_read_from_text_with_file_name_file_pattern(self):
prefix = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
file_name_1, data_1 = write_data(5, prefix=prefix)
file_name_2, data_2 = write_data(5, prefix=prefix)
expected_data = []
expected_data.extend([(file_name_1, el) for el in data_1])
expected_data.extend([(file_name_2, el) for el in data_2])
folder = file_name_1[:file_name_1.rfind(os.path.sep)]
pattern = folder + os.path.sep + prefix + '*'
assert len(expected_data) == 10
pipeline = TestPipeline()
pcoll = pipeline | 'Read' >> ReadFromTextWithFilename(pattern)
assert_that(pcoll, equal_to(expected_data))
pipeline.run()
def test_read_all_file_pattern(self):
pattern, expected_data = write_pattern([5, 3, 12, 8, 8, 4])
assert len(expected_data) == 40
pipeline = TestPipeline()
pcoll = (pipeline
| 'Create' >> Create([pattern])
|'ReadAll' >> ReadAllFromText())
assert_that(pcoll, equal_to(expected_data))
pipeline.run()
def test_read_all_many_file_patterns(self):
pattern1, expected_data1 = write_pattern([5, 3, 12, 8, 8, 4])
assert len(expected_data1) == 40
pattern2, expected_data2 = write_pattern([3, 7, 9])
assert len(expected_data2) == 19
pattern3, expected_data3 = write_pattern([11, 20, 5, 5])
assert len(expected_data3) == 41
expected_data = []
expected_data.extend(expected_data1)
expected_data.extend(expected_data2)
expected_data.extend(expected_data3)
pipeline = TestPipeline()
pcoll = pipeline | 'Create' >> Create(
[pattern1, pattern2, pattern3]) |'ReadAll' >> ReadAllFromText()
assert_that(pcoll, equal_to(expected_data))
pipeline.run()
def test_read_auto_bzip2(self):
_, lines = write_data(15)
with TempDir() as tempdir:
file_name = tempdir.create_temp_file(suffix='.bz2')
with bz2.BZ2File(file_name, 'wb') as f:
f.write('\n'.join(lines).encode('utf-8'))
pipeline = TestPipeline()
pcoll = pipeline | 'Read' >> ReadFromText(file_name)
assert_that(pcoll, equal_to(lines))
pipeline.run()
def test_read_auto_deflate(self):
_, lines = write_data(15)
with TempDir() as tempdir:
file_name = tempdir.create_temp_file(suffix='.deflate')
with open(file_name, 'wb') as f:
f.write(zlib.compress('\n'.join(lines).encode('utf-8')))
pipeline = TestPipeline()
pcoll = pipeline | 'Read' >> ReadFromText(file_name)
assert_that(pcoll, equal_to(lines))
pipeline.run()
def test_read_auto_gzip(self):
_, lines = write_data(15)
with TempDir() as tempdir:
file_name = tempdir.create_temp_file(suffix='.gz')
with gzip.GzipFile(file_name, 'wb') as f:
f.write('\n'.join(lines).encode('utf-8'))
pipeline = TestPipeline()
pcoll = pipeline | 'Read' >> ReadFromText(file_name)
assert_that(pcoll, equal_to(lines))
pipeline.run()
def test_read_bzip2(self):
_, lines = write_data(15)
with TempDir() as tempdir:
file_name = tempdir.create_temp_file()
with bz2.BZ2File(file_name, 'wb') as f:
f.write('\n'.join(lines).encode('utf-8'))
pipeline = TestPipeline()
pcoll = pipeline | 'Read' >> ReadFromText(
file_name,
compression_type=CompressionTypes.BZIP2)
assert_that(pcoll, equal_to(lines))
pipeline.run()
def test_read_corrupted_bzip2_fails(self):
_, lines = write_data(15)
with TempDir() as tempdir:
file_name = tempdir.create_temp_file()
with bz2.BZ2File(file_name, 'wb') as f:
f.write('\n'.join(lines).encode('utf-8'))
with open(file_name, 'wb') as f:
f.write(b'corrupt')
pipeline = TestPipeline()
pcoll = pipeline | 'Read' >> ReadFromText(
file_name,
compression_type=CompressionTypes.BZIP2)
assert_that(pcoll, equal_to(lines))
with self.assertRaises(Exception):
pipeline.run()
def test_read_bzip2_concat(self):
with TempDir() as tempdir:
bzip2_file_name1 = tempdir.create_temp_file()
lines = ['a', 'b', 'c']
with bz2.BZ2File(bzip2_file_name1, 'wb') as dst:
data = '\n'.join(lines) + '\n'
dst.write(data.encode('utf-8'))
bzip2_file_name2 = tempdir.create_temp_file()
lines = ['p', 'q', 'r']
with bz2.BZ2File(bzip2_file_name2, 'wb') as dst:
data = '\n'.join(lines) + '\n'
dst.write(data.encode('utf-8'))
bzip2_file_name3 = tempdir.create_temp_file()
lines = ['x', 'y', 'z']
with bz2.BZ2File(bzip2_file_name3, 'wb') as dst:
data = '\n'.join(lines) + '\n'
dst.write(data.encode('utf-8'))
final_bzip2_file = tempdir.create_temp_file()
with open(bzip2_file_name1, 'rb') as src, open(
final_bzip2_file, 'wb') as dst:
dst.writelines(src.readlines())
with open(bzip2_file_name2, 'rb') as src, open(
final_bzip2_file, 'ab') as dst:
dst.writelines(src.readlines())
with open(bzip2_file_name3, 'rb') as src, open(
final_bzip2_file, 'ab') as dst:
dst.writelines(src.readlines())
pipeline = TestPipeline()
lines = pipeline | 'ReadFromText' >> beam.io.ReadFromText(
final_bzip2_file,
compression_type=beam.io.filesystem.CompressionTypes.BZIP2)
expected = ['a', 'b', 'c', 'p', 'q', 'r', 'x', 'y', 'z']
assert_that(lines, equal_to(expected))
pipeline.run()
def test_read_deflate(self):
_, lines = write_data(15)
with TempDir() as tempdir:
file_name = tempdir.create_temp_file()
with open(file_name, 'wb') as f:
f.write(zlib.compress('\n'.join(lines).encode('utf-8')))
pipeline = TestPipeline()
pcoll = pipeline | 'Read' >> ReadFromText(
file_name,
0, CompressionTypes.DEFLATE,
True, coders.StrUtf8Coder())
assert_that(pcoll, equal_to(lines))
pipeline.run()
def test_read_corrupted_deflate_fails(self):
_, lines = write_data(15)
with TempDir() as tempdir:
file_name = tempdir.create_temp_file()
with open(file_name, 'wb') as f:
f.write(zlib.compress('\n'.join(lines).encode('utf-8')))
with open(file_name, 'wb') as f:
f.write(b'corrupt')
pipeline = TestPipeline()
pcoll = pipeline | 'Read' >> ReadFromText(
file_name,
0, CompressionTypes.DEFLATE,
True, coders.StrUtf8Coder())
assert_that(pcoll, equal_to(lines))
with self.assertRaises(Exception):
pipeline.run()
def test_read_deflate_concat(self):
with TempDir() as tempdir:
deflate_file_name1 = tempdir.create_temp_file()
lines = ['a', 'b', 'c']
with open(deflate_file_name1, 'wb') as dst:
data = '\n'.join(lines) + '\n'
dst.write(zlib.compress(data.encode('utf-8')))
deflate_file_name2 = tempdir.create_temp_file()
lines = ['p', 'q', 'r']
with open(deflate_file_name2, 'wb') as dst:
data = '\n'.join(lines) + '\n'
dst.write(zlib.compress(data.encode('utf-8')))
deflate_file_name3 = tempdir.create_temp_file()
lines = ['x', 'y', 'z']
with open(deflate_file_name3, 'wb') as dst:
data = '\n'.join(lines) + '\n'
dst.write(zlib.compress(data.encode('utf-8')))
final_deflate_file = tempdir.create_temp_file()
with open(deflate_file_name1, 'rb') as src, \
open(final_deflate_file, 'wb') as dst:
dst.writelines(src.readlines())
with open(deflate_file_name2, 'rb') as src, \
open(final_deflate_file, 'ab') as dst:
dst.writelines(src.readlines())
with open(deflate_file_name3, 'rb') as src, \
open(final_deflate_file, 'ab') as dst:
dst.writelines(src.readlines())
pipeline = TestPipeline()
lines = pipeline | 'ReadFromText' >> beam.io.ReadFromText(
final_deflate_file,
compression_type=beam.io.filesystem.CompressionTypes.DEFLATE)
expected = ['a', 'b', 'c', 'p', 'q', 'r', 'x', 'y', 'z']
assert_that(lines, equal_to(expected))
def test_read_gzip(self):
_, lines = write_data(15)
with TempDir() as tempdir:
file_name = tempdir.create_temp_file()
with gzip.GzipFile(file_name, 'wb') as f:
f.write('\n'.join(lines).encode('utf-8'))
pipeline = TestPipeline()
pcoll = pipeline | 'Read' >> ReadFromText(
file_name,
0, CompressionTypes.GZIP,
True, coders.StrUtf8Coder())
assert_that(pcoll, equal_to(lines))
pipeline.run()
def test_read_corrupted_gzip_fails(self):
_, lines = write_data(15)
with TempDir() as tempdir:
file_name = tempdir.create_temp_file()
with gzip.GzipFile(file_name, 'wb') as f:
f.write('\n'.join(lines).encode('utf-8'))
with open(file_name, 'wb') as f:
f.write(b'corrupt')
pipeline = TestPipeline()
pcoll = pipeline | 'Read' >> ReadFromText(
file_name,
0, CompressionTypes.GZIP,
True, coders.StrUtf8Coder())
assert_that(pcoll, equal_to(lines))
with self.assertRaises(Exception):
pipeline.run()
def test_read_gzip_concat(self):
with TempDir() as tempdir:
gzip_file_name1 = tempdir.create_temp_file()
lines = ['a', 'b', 'c']
with gzip.open(gzip_file_name1, 'wb') as dst:
data = '\n'.join(lines) + '\n'
dst.write(data.encode('utf-8'))
gzip_file_name2 = tempdir.create_temp_file()
lines = ['p', 'q', 'r']
with gzip.open(gzip_file_name2, 'wb') as dst:
data = '\n'.join(lines) + '\n'
dst.write(data.encode('utf-8'))
gzip_file_name3 = tempdir.create_temp_file()
lines = ['x', 'y', 'z']
with gzip.open(gzip_file_name3, 'wb') as dst:
data = '\n'.join(lines) + '\n'
dst.write(data.encode('utf-8'))
final_gzip_file = tempdir.create_temp_file()
with open(gzip_file_name1, 'rb') as src, \
open(final_gzip_file, 'wb') as dst:
dst.writelines(src.readlines())
with open(gzip_file_name2, 'rb') as src, \
open(final_gzip_file, 'ab') as dst:
dst.writelines(src.readlines())
with open(gzip_file_name3, 'rb') as src, \
open(final_gzip_file, 'ab') as dst:
dst.writelines(src.readlines())
pipeline = TestPipeline()
lines = pipeline | 'ReadFromText' >> beam.io.ReadFromText(
final_gzip_file,
compression_type=beam.io.filesystem.CompressionTypes.GZIP)
expected = ['a', 'b', 'c', 'p', 'q', 'r', 'x', 'y', 'z']
assert_that(lines, equal_to(expected))
def test_read_all_gzip(self):
_, lines = write_data(100)
with TempDir() as tempdir:
file_name = tempdir.create_temp_file()
with gzip.GzipFile(file_name, 'wb') as f:
f.write('\n'.join(lines).encode('utf-8'))
pipeline = TestPipeline()
pcoll = (pipeline
| Create([file_name])
| 'ReadAll' >> ReadAllFromText(
compression_type=CompressionTypes.GZIP))
assert_that(pcoll, equal_to(lines))
pipeline.run()
def test_read_gzip_large(self):
_, lines = write_data(10000)
with TempDir() as tempdir:
file_name = tempdir.create_temp_file()
with gzip.GzipFile(file_name, 'wb') as f:
f.write('\n'.join(lines).encode('utf-8'))
pipeline = TestPipeline()
pcoll = pipeline | 'Read' >> ReadFromText(
file_name,
0, CompressionTypes.GZIP,
True, coders.StrUtf8Coder())
assert_that(pcoll, equal_to(lines))
pipeline.run()
def test_read_gzip_large_after_splitting(self):
_, lines = write_data(10000)
with TempDir() as tempdir:
file_name = tempdir.create_temp_file()
with gzip.GzipFile(file_name, 'wb') as f:
f.write('\n'.join(lines).encode('utf-8'))
source = TextSource(file_name, 0, CompressionTypes.GZIP, True,
coders.StrUtf8Coder())
splits = list(source.split(desired_bundle_size=1000))
if len(splits) > 1:
raise ValueError('FileBasedSource generated more than one initial '
'split for a compressed file.')
reference_source_info = (source, None, None)
sources_info = ([
(split.source, split.start_position, split.stop_position) for
split in splits])
source_test_utils.assert_sources_equal_reference_source(
reference_source_info, sources_info)
def test_read_gzip_empty_file(self):
with TempDir() as tempdir:
file_name = tempdir.create_temp_file()
pipeline = TestPipeline()
pcoll = pipeline | 'Read' >> ReadFromText(
file_name,
0, CompressionTypes.GZIP,
True, coders.StrUtf8Coder())
assert_that(pcoll, equal_to([]))
pipeline.run()
def _remove_lines(self, lines, sublist_lengths, num_to_remove):
"""Utility function to remove num_to_remove lines from each sublist.
Args:
lines: list of items.
sublist_lengths: list of integers representing length of sublist
corresponding to each source file.
num_to_remove: number of lines to remove from each sublist.
Returns:
remaining lines.
"""
curr = 0
result = []
for offset in sublist_lengths:
end = curr + offset
start = min(curr + num_to_remove, end)
result += lines[start:end]
curr += offset
return result
def _read_skip_header_lines(self, file_or_pattern, skip_header_lines):
"""Simple wrapper function for instantiating TextSource."""
source = TextSource(
file_or_pattern,
0,
CompressionTypes.UNCOMPRESSED,
True,
coders.StrUtf8Coder(),
skip_header_lines=skip_header_lines)
range_tracker = source.get_range_tracker(None, None)
return list(source.read(range_tracker))
def test_read_skip_header_single(self):
file_name, expected_data = write_data(TextSourceTest.DEFAULT_NUM_RECORDS)
assert len(expected_data) == TextSourceTest.DEFAULT_NUM_RECORDS
skip_header_lines = 1
expected_data = self._remove_lines(expected_data,
[TextSourceTest.DEFAULT_NUM_RECORDS],
skip_header_lines)
read_data = self._read_skip_header_lines(file_name, skip_header_lines)
self.assertEqual(len(expected_data), len(read_data))
self.assertCountEqual(expected_data, read_data)
def test_read_skip_header_pattern(self):
line_counts = [
TextSourceTest.DEFAULT_NUM_RECORDS * 5,
TextSourceTest.DEFAULT_NUM_RECORDS * 3,
TextSourceTest.DEFAULT_NUM_RECORDS * 12,
TextSourceTest.DEFAULT_NUM_RECORDS * 8,
TextSourceTest.DEFAULT_NUM_RECORDS * 8,
TextSourceTest.DEFAULT_NUM_RECORDS * 4
]
skip_header_lines = 2
pattern, data = write_pattern(line_counts)
expected_data = self._remove_lines(data, line_counts, skip_header_lines)
read_data = self._read_skip_header_lines(pattern, skip_header_lines)
self.assertEqual(len(expected_data), len(read_data))
self.assertCountEqual(expected_data, read_data)
def test_read_skip_header_pattern_insufficient_lines(self):
line_counts = [
5, 3, # Fewer lines in file than we want to skip
12, 8, 8, 4
]
skip_header_lines = 4
pattern, data = write_pattern(line_counts)
data = self._remove_lines(data, line_counts, skip_header_lines)
read_data = self._read_skip_header_lines(pattern, skip_header_lines)
self.assertEqual(len(data), len(read_data))
self.assertCountEqual(data, read_data)
def test_read_gzip_with_skip_lines(self):
_, lines = write_data(15)
with TempDir() as tempdir:
file_name = tempdir.create_temp_file()
with gzip.GzipFile(file_name, 'wb') as f:
f.write('\n'.join(lines).encode('utf-8'))
pipeline = TestPipeline()
pcoll = pipeline | 'Read' >> ReadFromText(
file_name, 0, CompressionTypes.GZIP,
True, coders.StrUtf8Coder(), skip_header_lines=2)
assert_that(pcoll, equal_to(lines[2:]))
pipeline.run()
def test_read_after_splitting_skip_header(self):
file_name, expected_data = write_data(100)
assert len(expected_data) == 100
source = TextSource(file_name, 0, CompressionTypes.UNCOMPRESSED, True,
coders.StrUtf8Coder(), skip_header_lines=2)
splits = list(source.split(desired_bundle_size=33))
reference_source_info = (source, None, None)
sources_info = ([
(split.source, split.start_position, split.stop_position) for
split in splits])
self.assertGreater(len(sources_info), 1)
reference_lines = source_test_utils.read_from_source(*reference_source_info)
split_lines = []
for source_info in sources_info:
split_lines.extend(source_test_utils.read_from_source(*source_info))
self.assertEqual(expected_data[2:], reference_lines)
self.assertEqual(reference_lines, split_lines)
class TextSinkTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
# Method has been renamed in Python 3
if sys.version_info[0] < 3:
cls.assertCountEqual = cls.assertItemsEqual
def setUp(self):
super(TextSinkTest, self).setUp()
self.lines = [b'Line %d' % d for d in range(100)]
self.tempdir = tempfile.mkdtemp()
self.path = self._create_temp_file()
def tearDown(self):
if os.path.exists(self.tempdir):
shutil.rmtree(self.tempdir)
def _create_temp_file(self, name='', suffix=''):
if not name:
name = tempfile.template
file_name = tempfile.NamedTemporaryFile(
delete=False, prefix=name,
dir=self.tempdir, suffix=suffix).name
return file_name
def _write_lines(self, sink, lines):
f = sink.open(self.path)
for line in lines:
sink.write_record(f, line)
sink.close(f)
def test_write_text_file(self):
sink = TextSink(self.path)
self._write_lines(sink, self.lines)
with open(self.path, 'rb') as f:
self.assertEqual(f.read().splitlines(), self.lines)
def test_write_text_file_empty(self):
sink = TextSink(self.path)
self._write_lines(sink, [])
with open(self.path, 'rb') as f:
self.assertEqual(f.read().splitlines(), [])
def test_write_bzip2_file(self):
sink = TextSink(
self.path, compression_type=CompressionTypes.BZIP2)
self._write_lines(sink, self.lines)
with bz2.BZ2File(self.path, 'rb') as f:
self.assertEqual(f.read().splitlines(), self.lines)
def test_write_bzip2_file_auto(self):
self.path = self._create_temp_file(suffix='.bz2')
sink = TextSink(self.path)
self._write_lines(sink, self.lines)
with bz2.BZ2File(self.path, 'rb') as f:
self.assertEqual(f.read().splitlines(), self.lines)
def test_write_gzip_file(self):
sink = TextSink(
self.path, compression_type=CompressionTypes.GZIP)
self._write_lines(sink, self.lines)
with gzip.GzipFile(self.path, 'rb') as f:
self.assertEqual(f.read().splitlines(), self.lines)
def test_write_gzip_file_auto(self):
self.path = self._create_temp_file(suffix='.gz')
sink = TextSink(self.path)
self._write_lines(sink, self.lines)
with gzip.GzipFile(self.path, 'rb') as f:
self.assertEqual(f.read().splitlines(), self.lines)
def test_write_gzip_file_empty(self):
sink = TextSink(
self.path, compression_type=CompressionTypes.GZIP)
self._write_lines(sink, [])
with gzip.GzipFile(self.path, 'rb') as f:
self.assertEqual(f.read().splitlines(), [])
def test_write_deflate_file(self):
sink = TextSink(
self.path, compression_type=CompressionTypes.DEFLATE)
self._write_lines(sink, self.lines)
with open(self.path, 'rb') as f:
self.assertEqual(zlib.decompress(f.read()).splitlines(), self.lines)
def test_write_deflate_file_auto(self):
self.path = self._create_temp_file(suffix='.deflate')
sink = TextSink(self.path)
self._write_lines(sink, self.lines)
with open(self.path, 'rb') as f:
self.assertEqual(zlib.decompress(f.read()).splitlines(), self.lines)
def test_write_deflate_file_empty(self):
sink = TextSink(
self.path, compression_type=CompressionTypes.DEFLATE)
self._write_lines(sink, [])
with open(self.path, 'rb') as f:
self.assertEqual(zlib.decompress(f.read()).splitlines(), [])
def test_write_text_file_with_header(self):
header = b'header1\nheader2'
sink = TextSink(self.path, header=header)
self._write_lines(sink, self.lines)
with open(self.path, 'rb') as f:
self.assertEqual(f.read().splitlines(), header.splitlines() + self.lines)
def test_write_text_file_empty_with_header(self):
header = b'header1\nheader2'
sink = TextSink(self.path, header=header)
self._write_lines(sink, [])
with open(self.path, 'rb') as f:
self.assertEqual(f.read().splitlines(), header.splitlines())
def test_write_dataflow(self):
pipeline = TestPipeline()
pcoll = pipeline | beam.core.Create(self.lines)
pcoll | 'Write' >> WriteToText(self.path) # pylint: disable=expression-not-assigned
pipeline.run()
read_result = []
for file_name in glob.glob(self.path + '*'):
with open(file_name, 'rb') as f:
read_result.extend(f.read().splitlines())
self.assertEqual(read_result, self.lines)
def test_write_dataflow_auto_compression(self):
pipeline = TestPipeline()
pcoll = pipeline | beam.core.Create(self.lines)
pcoll | 'Write' >> WriteToText(self.path, file_name_suffix='.gz') # pylint: disable=expression-not-assigned
pipeline.run()
read_result = []
for file_name in glob.glob(self.path + '*'):
with gzip.GzipFile(file_name, 'rb') as f:
read_result.extend(f.read().splitlines())
self.assertEqual(read_result, self.lines)
def test_write_dataflow_auto_compression_unsharded(self):
pipeline = TestPipeline()
pcoll = pipeline | 'Create' >> beam.core.Create(self.lines)
pcoll | 'Write' >> WriteToText( # pylint: disable=expression-not-assigned
self.path + '.gz',
shard_name_template='')
pipeline.run()
read_result = []
for file_name in glob.glob(self.path + '*'):
with gzip.GzipFile(file_name, 'rb') as f:
read_result.extend(f.read().splitlines())
self.assertEqual(read_result, self.lines)
def test_write_dataflow_header(self):
pipeline = TestPipeline()
pcoll = pipeline | 'Create' >> beam.core.Create(self.lines)
header_text = 'foo'
pcoll | 'Write' >> WriteToText( # pylint: disable=expression-not-assigned
self.path + '.gz',
shard_name_template='',
header=header_text)
pipeline.run()
read_result = []
for file_name in glob.glob(self.path + '*'):
with gzip.GzipFile(file_name, 'rb') as f:
read_result.extend(f.read().splitlines())
# header_text is automatically encoded in WriteToText
self.assertEqual(read_result, [header_text.encode('utf-8')] + self.lines)
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
unittest.main()
| markflyhigh/incubator-beam | sdks/python/apache_beam/io/textio_test.py | Python | apache-2.0 | 43,198 | 0.006898 |
from django.dispatch import receiver
from pretix.base.signals import register_payment_providers
@receiver(register_payment_providers, dispatch_uid="payment_paypal")
def register_payment_provider(sender, **kwargs):
from .payment import Paypal
return Paypal
| awg24/pretix | src/pretix/plugins/paypal/signals.py | Python | apache-2.0 | 267 | 0 |
from django.db import models
class AdjacencyListModel(models.Model):
title = models.CharField(max_length=100)
parent = models.ForeignKey(
'self', related_name='%(class)s_parent', on_delete=models.CASCADE, db_index=True, null=True, blank=True)
def __str__(self):
return 'adjacencylistmodel_%s' % self.title
class NestedSetModel(models.Model):
title = models.CharField(max_length=100)
lft = models.IntegerField(db_index=True)
rgt = models.IntegerField(db_index=True)
level = models.IntegerField(db_index=True)
def __str__(self):
return 'nestedsetmodel_%s' % self.title
| idlesign/django-admirarchy | admirarchy/tests/testapp/models.py | Python | bsd-3-clause | 633 | 0.00158 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2019-04-10 03:58
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('netdevice', '0006_auto_20190409_0325'),
]
operations = [
migrations.RenameField(
model_name='vrf',
old_name='vrf_name',
new_name='name',
),
migrations.RenameField(
model_name='vrf',
old_name='vrf_target',
new_name='target',
),
]
| lkmhaqer/gtools-python | netdevice/migrations/0007_auto_20190410_0358.py | Python | mit | 567 | 0 |
from functools import reduce
class ScopedString (object):
def __init__ (self):
self._stack = []
def push (self, frame):
self._stack.append (frame)
def pop (self):
frame = self._stack.pop()
return frame
def __str__ (self):
return '.'.join (self._stack)
class ScopedList (object):
def __init__ (self, stack=None):
if stack:
self._stack = stack
else:
self._stack = []
self.push()
def push (self):
self._stack.append ([])
def pop (self):
if (len (self._stack) <= 1):
raise IndexError ("Attempt to pop global scope")
self._stack.pop()
def append (self, val):
self._stack[-1].append (val)
def _normalize (self):
return reduce (lambda x, y: x + y, self._stack, [])
def __str__ (self):
return str (self._normalize())
def __repr__ (self):
return "ScopedDict(" + repr(self._stack) + ")"
def __iter__ (self):
return self._normalize().__iter__()
class ScopedDict (object):
def __init__ (self, stack=None):
if stack:
self._stack = stack
else:
self._stack = []
self.push ()
def push (self):
self._stack.insert (0, {})
def pop (self):
if (len (self._stack) <= 1):
raise IndexError ("Attempt to pop global scope")
temp = self._stack[0]
del (self._stack[0])
return temp
def _normalize (self):
normal = {}
for frame in self._stack:
for key, value in frame.items():
if key not in normal:
normal[key] = value
return normal
def __getitem__ (self, key):
for frame in self._stack:
if key in frame:
return frame[key]
raise KeyError (key)
def __setitem__ (self, key, value):
self._stack[0][key] = value
def __contains__ (self, key):
for frame in self._stack:
if key in frame:
return True
return False
def __str__ (self):
return str (self._normalize())
def __repr__ (self):
return "ScopedDict(" + repr(self._stack) + ")"
def __iter__ (self):
return self._normalize().__iter__()
def items (self):
return self._normalize().items()
def keys (self):
return self._normalize().keys()
def values (self):
return self._normalize().values()
| doffm/dbuf | src/dbuf/util.py | Python | bsd-3-clause | 3,108 | 0.026705 |
"""A likelihood function representing a Student-t distribution.
Author:
Ilias Bilionis
Date:
1/21/2013
"""
__all__ = ['StudentTLikelihoodFunction']
import numpy as np
import scipy
import math
from . import GaussianLikelihoodFunction
class StudentTLikelihoodFunction(GaussianLikelihoodFunction):
"""An object representing a Student-t likelihood function."""
# The degrees of freedom
_nu = None
@property
def nu(self):
"""Get the degrees of freedom."""
return self._nu
@nu.setter
def nu(self, value):
"""Set the degrees of freedom."""
if not isinstance(value, float):
raise TypeError('nu must be a float.')
self._nu = value
def __init__(self, nu, num_input=None, data=None, mean_function=None, cov=None,
name='Student-t Likelihood Function'):
"""Initialize the object.
Arguments:
nu --- The degrees of freedom of the distribution.
Keyword Arguments
num_input --- The number of inputs. Optional, if
mean_function is a proper Function.
data --- The observed data. A vector. Optional,
if mean_function is a proper Function.
It can be set later.
mean_function --- The mean function. See the super class
for the description.
cov --- The covariance matrix. It can either be
a positive definite matrix, or a number.
The data or a proper mean_funciton is
preassumed.
name --- A name for the likelihood function.
"""
self.nu = nu
super(StudentTLikelihoodFunction, self).__init__(num_input=num_input,
data=data,
mean_function=mean_function,
cov=cov,
name=name)
def __call__(self, x):
"""Evaluate the function at x."""
mu = self.mean_function(x)
y = scipy.linalg.solve_triangular(self.L_cov, self.data - mu)
return (
- 0.5 * (self.nu + self.num_data) * math.log(1. + np.dot(y, y) / self.nu))
| ebilionis/py-best | best/random/_student_t_likelihood_function.py | Python | lgpl-3.0 | 2,586 | 0.001933 |
# coding: utf-8
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""
FILE: sample_index_crud_operations_async.py
DESCRIPTION:
This sample demonstrates how to get, create, update, or delete an index.
USAGE:
python sample_index_crud_operations_async.py
Set the environment variables with your own values before running the sample:
1) AZURE_SEARCH_SERVICE_ENDPOINT - the endpoint of your Azure Cognitive Search service
2) AZURE_SEARCH_API_KEY - your search API key
"""
import os
import asyncio
service_endpoint = os.getenv("AZURE_SEARCH_SERVICE_ENDPOINT")
key = os.getenv("AZURE_SEARCH_API_KEY")
from azure.core.credentials import AzureKeyCredential
from azure.search.documents.indexes.aio import SearchIndexClient
from azure.search.documents.indexes.models import (
ComplexField,
CorsOptions,
SearchIndex,
ScoringProfile,
SearchFieldDataType,
SimpleField,
SearchableField
)
client = SearchIndexClient(service_endpoint, AzureKeyCredential(key))
async def create_index():
# [START create_index_async]
name = "hotels"
fields = [
SimpleField(name="hotelId", type=SearchFieldDataType.String, key=True),
SimpleField(name="baseRate", type=SearchFieldDataType.Double),
SearchableField(name="description", type=SearchFieldDataType.String, collection=True),
ComplexField(name="address", fields=[
SimpleField(name="streetAddress", type=SearchFieldDataType.String),
SimpleField(name="city", type=SearchFieldDataType.String),
], collection=True)
]
cors_options = CorsOptions(allowed_origins=["*"], max_age_in_seconds=60)
scoring_profiles = []
index = SearchIndex(
name=name,
fields=fields,
scoring_profiles=scoring_profiles,
cors_options=cors_options)
result = await client.create_index(index)
# [END create_index_async]
async def get_index():
# [START get_index_async]
name = "hotels"
result = await client.get_index(name)
# [END get_index_async]
async def update_index():
# [START update_index_async]
name = "hotels"
fields = [
SimpleField(name="hotelId", type=SearchFieldDataType.String, key=True),
SimpleField(name="baseRate", type=SearchFieldDataType.Double),
SearchableField(name="description", type=SearchFieldDataType.String, collection=True),
SearchableField(name="hotelName", type=SearchFieldDataType.String),
ComplexField(name="address", fields=[
SimpleField(name="streetAddress", type=SearchFieldDataType.String),
SimpleField(name="city", type=SearchFieldDataType.String),
SimpleField(name="state", type=SearchFieldDataType.String),
], collection=True)
]
cors_options = CorsOptions(allowed_origins=["*"], max_age_in_seconds=60)
scoring_profile = ScoringProfile(
name="MyProfile"
)
scoring_profiles = []
scoring_profiles.append(scoring_profile)
index = SearchIndex(
name=name,
fields=fields,
scoring_profiles=scoring_profiles,
cors_options=cors_options)
result = await client.create_or_update_index(index=index)
# [END update_index_async]
async def delete_index():
# [START delete_index_async]
name = "hotels"
await client.delete_index(name)
# [END delete_index_async]
async def main():
await create_index()
await get_index()
await update_index()
await delete_index()
await client.close()
if __name__ == '__main__':
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
loop.close()
| Azure/azure-sdk-for-python | sdk/search/azure-search-documents/samples/async_samples/sample_index_crud_operations_async.py | Python | mit | 3,884 | 0.00309 |
def RGB01ToHex(rgb):
"""
Return an RGB color value as a hex color string.
"""
return '#%02x%02x%02x' % tuple([int(x * 255) for x in rgb])
def hexToRGB01(hexColor):
"""
Return a hex color string as an RGB tuple of floats in the range 0..1
"""
h = hexColor.lstrip('#')
return tuple([x / 255.0 for x in [int(h[i:i + 2], 16) for i in (0, 2, 4)]])
| bohdon/maya-pulse | src/pulse/scripts/pulse/colors.py | Python | mit | 381 | 0 |
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from resource_management import *
class ECSServiceCheck(Script):
def service_check(self, env):
import params
env.set_params(params)
# run fs list command to make sure ECS client can talk to ECS backend
list_command = format("fs -ls /")
if params.security_enabled:
Execute(format("{kinit_path_local} -kt {hdfs_user_keytab} {hdfs_principal_name}"),
user=params.hdfs_user
)
ExecuteHadoop(list_command,
user=params.hdfs_user,
logoutput=True,
conf_dir=params.hadoop_conf_dir,
try_sleep=3,
tries=20,
bin_dir=params.hadoop_bin_dir
)
if __name__ == "__main__":
ECSServiceCheck().execute()
| arenadata/ambari | ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/ECS/package/scripts/service_check.py | Python | apache-2.0 | 1,514 | 0.004624 |
# Copyright (c) 2008 Mikeal Rogers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distribuetd under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dealer.git import git
from django.template import RequestContext
requestcontext = None
class MakoMiddleware(object):
def process_request(self, request):
global requestcontext
requestcontext = RequestContext(request)
requestcontext['is_secure'] = request.is_secure()
requestcontext['site'] = request.get_host()
requestcontext['REVISION'] = git.revision
| abo-abo/edx-platform | common/djangoapps/mitxmako/middleware.py | Python | agpl-3.0 | 1,006 | 0 |
"""
Module with functionality for splitting and shuffling datasets.
"""
import numpy as np
from sklearn.utils import murmurhash3_32
from spotlight.interactions import Interactions
def _index_or_none(array, shuffle_index):
if array is None:
return None
else:
return array[shuffle_index]
def shuffle_interactions(interactions,
random_state=None):
"""
Shuffle interactions.
Parameters
----------
interactions: :class:`spotlight.interactions.Interactions`
The interactions to shuffle.
random_state: np.random.RandomState, optional
The random state used for the shuffle.
Returns
-------
interactions: :class:`spotlight.interactions.Interactions`
The shuffled interactions.
"""
if random_state is None:
random_state = np.random.RandomState()
shuffle_indices = np.arange(len(interactions.user_ids))
random_state.shuffle(shuffle_indices)
return Interactions(interactions.user_ids[shuffle_indices],
interactions.item_ids[shuffle_indices],
ratings=_index_or_none(interactions.ratings,
shuffle_indices),
timestamps=_index_or_none(interactions.timestamps,
shuffle_indices),
weights=_index_or_none(interactions.weights,
shuffle_indices),
num_users=interactions.num_users,
num_items=interactions.num_items)
def random_train_test_split(interactions,
test_percentage=0.2,
random_state=None):
"""
Randomly split interactions between training and testing.
Parameters
----------
interactions: :class:`spotlight.interactions.Interactions`
The interactions to shuffle.
test_percentage: float, optional
The fraction of interactions to place in the test set.
random_state: np.random.RandomState, optional
The random state used for the shuffle.
Returns
-------
(train, test): (:class:`spotlight.interactions.Interactions`,
:class:`spotlight.interactions.Interactions`)
A tuple of (train data, test data)
"""
interactions = shuffle_interactions(interactions,
random_state=random_state)
cutoff = int((1.0 - test_percentage) * len(interactions))
train_idx = slice(None, cutoff)
test_idx = slice(cutoff, None)
train = Interactions(interactions.user_ids[train_idx],
interactions.item_ids[train_idx],
ratings=_index_or_none(interactions.ratings,
train_idx),
timestamps=_index_or_none(interactions.timestamps,
train_idx),
weights=_index_or_none(interactions.weights,
train_idx),
num_users=interactions.num_users,
num_items=interactions.num_items)
test = Interactions(interactions.user_ids[test_idx],
interactions.item_ids[test_idx],
ratings=_index_or_none(interactions.ratings,
test_idx),
timestamps=_index_or_none(interactions.timestamps,
test_idx),
weights=_index_or_none(interactions.weights,
test_idx),
num_users=interactions.num_users,
num_items=interactions.num_items)
return train, test
def user_based_train_test_split(interactions,
test_percentage=0.2,
random_state=None):
"""
Split interactions between a train and a test set based on
user ids, so that a given user's entire interaction history
is either in the train, or the test set.
Parameters
----------
interactions: :class:`spotlight.interactions.Interactions`
The interactions to shuffle.
test_percentage: float, optional
The fraction of users to place in the test set.
random_state: np.random.RandomState, optional
The random state used for the shuffle.
Returns
-------
(train, test): (:class:`spotlight.interactions.Interactions`,
:class:`spotlight.interactions.Interactions`)
A tuple of (train data, test data)
"""
if random_state is None:
random_state = np.random.RandomState()
minint = np.iinfo(np.uint32).min
maxint = np.iinfo(np.uint32).max
seed = random_state.randint(minint, maxint, dtype=np.int64)
in_test = ((murmurhash3_32(interactions.user_ids,
seed=seed,
positive=True) % 100 /
100.0) <
test_percentage)
in_train = np.logical_not(in_test)
train = Interactions(interactions.user_ids[in_train],
interactions.item_ids[in_train],
ratings=_index_or_none(interactions.ratings,
in_train),
timestamps=_index_or_none(interactions.timestamps,
in_train),
weights=_index_or_none(interactions.weights,
in_train),
num_users=interactions.num_users,
num_items=interactions.num_items)
test = Interactions(interactions.user_ids[in_test],
interactions.item_ids[in_test],
ratings=_index_or_none(interactions.ratings,
in_test),
timestamps=_index_or_none(interactions.timestamps,
in_test),
weights=_index_or_none(interactions.weights,
in_test),
num_users=interactions.num_users,
num_items=interactions.num_items)
return train, test
| maciejkula/spotlight | spotlight/cross_validation.py | Python | mit | 6,519 | 0 |
# encoding: utf-8
from south.db import db
from south.v2 import SchemaMigration
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'MEP.stg_office'
db.delete_column('meps_mep', 'stg_office')
def backwards(self, orm):
# User chose to not deal with backwards NULL issues for 'MEP.stg_office'
raise RuntimeError("Cannot reverse this migration. 'MEP.stg_office' and its values cannot be restored.")
models = {
'meps.building': {
'Meta': {'object_name': 'Building'},
'id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'postcode': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'street': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'meps.committee': {
'Meta': {'object_name': 'Committee'},
'abbreviation': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
'meps.committeerole': {
'Meta': {'object_name': 'CommitteeRole'},
'begin': ('django.db.models.fields.DateField', [], {'null': 'True'}),
'committee': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['meps.Committee']"}),
'end': ('django.db.models.fields.DateField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mep': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['meps.MEP']"}),
'role': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'meps.country': {
'Meta': {'object_name': 'Country'},
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '2'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'meps.countrymep': {
'Meta': {'object_name': 'CountryMEP'},
'begin': ('django.db.models.fields.DateField', [], {}),
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['meps.Country']"}),
'end': ('django.db.models.fields.DateField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mep': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['meps.MEP']"}),
'party': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['reps.Party']"})
},
'meps.delegation': {
'Meta': {'object_name': 'Delegation'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
'meps.delegationrole': {
'Meta': {'object_name': 'DelegationRole'},
'begin': ('django.db.models.fields.DateField', [], {'null': 'True'}),
'delegation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['meps.Delegation']"}),
'end': ('django.db.models.fields.DateField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mep': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['meps.MEP']"}),
'role': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'meps.group': {
'Meta': {'object_name': 'Group'},
'abbreviation': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '10'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'})
},
'meps.groupmep': {
'Meta': {'object_name': 'GroupMEP'},
'begin': ('django.db.models.fields.DateField', [], {'null': 'True'}),
'end': ('django.db.models.fields.DateField', [], {'null': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['meps.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mep': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['meps.MEP']"}),
'role': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'meps.mep': {
'Meta': {'ordering': "['last_name']", 'object_name': 'MEP', '_ormbases': ['reps.Representative']},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'bxl_building': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'bxl_building'", 'to': "orm['meps.Building']"}),
'bxl_fax': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'bxl_floor': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'bxl_office_number': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'bxl_phone1': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'bxl_phone2': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'committees': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['meps.Committee']", 'through': "orm['meps.CommitteeRole']", 'symmetrical': 'False'}),
'countries': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['meps.Country']", 'through': "orm['meps.CountryMEP']", 'symmetrical': 'False'}),
'delegations': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['meps.Delegation']", 'through': "orm['meps.DelegationRole']", 'symmetrical': 'False'}),
'ep_debates': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'ep_declarations': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'ep_id': ('django.db.models.fields.IntegerField', [], {'unique': 'True'}),
'ep_motions': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'ep_opinions': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'ep_questions': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'ep_reports': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'ep_webpage': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['meps.Group']", 'through': "orm['meps.GroupMEP']", 'symmetrical': 'False'}),
'organizations': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['meps.Organization']", 'through': "orm['meps.OrganizationMEP']", 'symmetrical': 'False'}),
'position': ('django.db.models.fields.IntegerField', [], {'default': 'None', 'null': 'True'}),
'representative_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['reps.Representative']", 'unique': 'True', 'primary_key': 'True'}),
'stg_building': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'stg_building'", 'to': "orm['meps.Building']"}),
'stg_fax': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'stg_floor': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'stg_office_number': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'stg_phone1': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'stg_phone2': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'total_score': ('django.db.models.fields.FloatField', [], {'default': 'None', 'null': 'True'})
},
'meps.organization': {
'Meta': {'object_name': 'Organization'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
'meps.organizationmep': {
'Meta': {'object_name': 'OrganizationMEP'},
'begin': ('django.db.models.fields.DateField', [], {}),
'end': ('django.db.models.fields.DateField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mep': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['meps.MEP']"}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['meps.Organization']"}),
'role': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'meps.postaladdress': {
'Meta': {'object_name': 'PostalAddress'},
'addr': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mep': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['meps.MEP']"})
},
'reps.opinion': {
'Meta': {'object_name': 'Opinion'},
'content': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '1023'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
'reps.opinionrep': {
'Meta': {'object_name': 'OpinionREP'},
'date': ('django.db.models.fields.DateField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'opinion': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['reps.Opinion']"}),
'representative': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['reps.Representative']"})
},
'reps.party': {
'Meta': {'object_name': 'Party'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
'reps.partyrepresentative': {
'Meta': {'object_name': 'PartyRepresentative'},
'current': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'party': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['reps.Party']"}),
'representative': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['reps.Representative']"}),
'role': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'})
},
'reps.representative': {
'Meta': {'ordering': "['last_name']", 'object_name': 'Representative'},
'birth_date': ('django.db.models.fields.DateField', [], {}),
'birth_place': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'full_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'primary_key': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'local_party': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['reps.Party']", 'through': "orm['reps.PartyRepresentative']", 'symmetrical': 'False'}),
'opinions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['reps.Opinion']", 'through': "orm['reps.OpinionREP']", 'symmetrical': 'False'}),
'picture': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
}
}
complete_apps = ['meps']
| alirizakeles/memopol-core | memopol/meps/migrations/0017_auto__del_field_mep_stg_office.py | Python | gpl-3.0 | 12,677 | 0.008046 |
# importing libraries:
import maya.cmds as cmds
import maya.mel as mel
# global variables to this module:
CLASS_NAME = "Arm"
TITLE = "m028_arm"
DESCRIPTION = "m029_armDesc"
ICON = "/Icons/dp_arm.png"
def Arm(dpAutoRigInst):
""" This function will create all guides needed to compose an arm.
"""
# check modules integrity:
guideDir = 'Modules'
checkModuleList = ['dpLimb', 'dpFinger']
checkResultList = dpAutoRigInst.startGuideModules(guideDir, "check", None, checkModuleList=checkModuleList)
if len(checkResultList) == 0:
# creating module instances:
armLimbInstance = dpAutoRigInst.initGuide('dpLimb', guideDir)
# change name to arm:
dpAutoRigInst.guide.Limb.editUserName(armLimbInstance, checkText=dpAutoRigInst.langDic[dpAutoRigInst.langName]['m028_arm'].capitalize())
# create finger instances:
indexFingerInstance = dpAutoRigInst.initGuide('dpFinger', guideDir)
dpAutoRigInst.guide.Finger.editUserName(indexFingerInstance, checkText=dpAutoRigInst.langDic[dpAutoRigInst.langName]['m032_index'])
middleFingerInstance = dpAutoRigInst.initGuide('dpFinger', guideDir)
dpAutoRigInst.guide.Finger.editUserName(middleFingerInstance, checkText=dpAutoRigInst.langDic[dpAutoRigInst.langName]['m033_middle'])
ringFingerInstance = dpAutoRigInst.initGuide('dpFinger', guideDir)
dpAutoRigInst.guide.Finger.editUserName(ringFingerInstance, checkText=dpAutoRigInst.langDic[dpAutoRigInst.langName]['m034_ring'])
pinkFingerInstance = dpAutoRigInst.initGuide('dpFinger', guideDir)
dpAutoRigInst.guide.Finger.editUserName(pinkFingerInstance, checkText=dpAutoRigInst.langDic[dpAutoRigInst.langName]['m035_pink'])
thumbFingerInstance = dpAutoRigInst.initGuide('dpFinger', guideDir)
dpAutoRigInst.guide.Finger.editUserName(thumbFingerInstance, checkText=dpAutoRigInst.langDic[dpAutoRigInst.langName]['m036_thumb'])
# edit arm limb guide:
armBaseGuide = armLimbInstance.moduleGrp
cmds.setAttr(armBaseGuide+".translateX", 2.5)
cmds.setAttr(armBaseGuide+".translateY", 16)
cmds.setAttr(armBaseGuide+".displayAnnotation", 0)
cmds.setAttr(armLimbInstance.cvExtremLoc+".translateZ", 7)
cmds.setAttr(armLimbInstance.radiusCtrl+".translateX", 1.5)
# edit finger guides:
fingerInstanceList = [indexFingerInstance, middleFingerInstance, ringFingerInstance, pinkFingerInstance, thumbFingerInstance]
fingerTZList = [0.6, 0.2, -0.2, -0.6, 0.72]
for n, fingerInstance in enumerate(fingerInstanceList):
cmds.setAttr(fingerInstance.moduleGrp+".translateX", 11)
cmds.setAttr(fingerInstance.moduleGrp+".translateY", 16)
cmds.setAttr(fingerInstance.moduleGrp+".translateZ", fingerTZList[n])
cmds.setAttr(fingerInstance.moduleGrp+".displayAnnotation", 0)
cmds.setAttr(fingerInstance.radiusCtrl+".translateX", 0.3)
cmds.setAttr(fingerInstance.annotation+".visibility", 0)
if n == len(fingerInstanceList)-1:
# correct not commun values for thumb guide:
cmds.setAttr(thumbFingerInstance.moduleGrp+".translateX", 10.1)
cmds.setAttr(thumbFingerInstance.moduleGrp+".rotateX", 60)
dpAutoRigInst.guide.Finger.changeJointNumber(thumbFingerInstance, 2)
cmds.setAttr(thumbFingerInstance.moduleGrp+".nJoints", 2)
# parent finger guide to the arm wrist guide:
cmds.parent(fingerInstance.moduleGrp, armLimbInstance.cvExtremLoc, absolute=True)
# select the armGuide_Base:
cmds.select(armBaseGuide)
else:
# error checking modules in the folder:
mel.eval('error \"'+ dpAutoRigInst.langDic[dpAutoRigInst.langName]['e001_GuideNotChecked'] +' - '+ (", ").join(checkResultList) +'\";')
| SqueezeStudioAnimation/dpAutoRigSystem | dpAutoRigSystem/Scripts/dpArm.py | Python | gpl-2.0 | 3,972 | 0.007301 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-03-28 15:26
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('genevieve_client', '0003_variant_myvariant_dbsnp'),
]
operations = [
migrations.CreateModel(
name='OpenHumansUser',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('access_token', models.CharField(blank=True, max_length=30)),
('refresh_token', models.CharField(blank=True, max_length=30)),
('token_expiration', models.DateTimeField(null=True)),
('connected_id', models.CharField(max_length=30, unique=True)),
('openhumans_username', models.CharField(blank=True, max_length=30)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
migrations.RemoveField(
model_name='gennoteseditor',
name='gennotes_id',
),
migrations.RemoveField(
model_name='gennoteseditor',
name='genome_storage_enabled',
),
migrations.AddField(
model_name='gennoteseditor',
name='connected_id',
field=models.CharField(default=0, max_length=30, unique=True),
preserve_default=False,
),
]
| madprime/genevieve | genevieve_client/migrations/0004_auto_20160328_1526.py | Python | mit | 1,731 | 0.001733 |
from __future__ import print_function, division
import sys
import os
sys.path.append(os.path.abspath("."))
sys.dont_write_bytecode = True
from distribution import *
import operator as o
from utils.lib import gt, lt, gte, lte, neq, eq
__author__ = "bigfatnoob"
def sample(values, size=100):
return np.random.choice(values, size=size)
def expected_value(values, size=1000):
means = []
for _ in range(1000):
samples = sample(values, int(size))
means.append(np.mean(samples))
return np.mean(means)
def standard_deviation(values):
return np.std(values)
def percentile(values, percent):
return np.percentile(values, percent)
def probability(values):
return sum([1 if v >= 1 else 0 for v in values]) / len(values)
def lambda_ev(*args):
return lambda x: expected_value(x, *args)
def lambda_std():
return lambda x: standard_deviation(x)
def lambda_percentile(*args):
return lambda x: percentile(x, *args)
def lambda_probability():
return lambda x: probability(x)
def to_int(func):
return lambda a, b: 1 if func(a, b) else 0
evaluations = {
"EV": lambda_ev,
"STD": lambda_std,
"PERCENTILE": lambda_percentile,
"PROBABILITY": lambda_probability
}
distributions = {
"constant": Constant,
"normal": Normal,
"normalCI": NormalCI,
"uniform": Uniform,
"random": Random,
"exp": Exponential,
"binomial": Binomial,
"geometric": Geometric,
"triangular": Triangular
}
operations = {
"+": o.add,
"-": o.sub,
"*": o.mul,
"/": o.div,
"|": max,
"&": o.mul,
">": to_int(gt),
"<": to_int(lt),
">=": to_int(gte),
"<=": to_int(lte),
"==": to_int(eq),
"!=": to_int(neq)
}
| dr-bigfatnoob/quirk | language/functions.py | Python | unlicense | 1,698 | 0.009423 |
#!/usr/bin/env python
"""Distutils setup file, used to install or test 'setuptools'"""
import textwrap
import sys
try:
import setuptools
except ImportError:
sys.stderr.write("Distribute 0.7 may only upgrade an existing "
"Distribute 0.6 installation")
raise SystemExit(1)
long_description = textwrap.dedent("""
Distribute - legacy package
This package is a simple compatibility layer that installs Setuptools 0.7+.
""").lstrip()
setup_params = dict(
name="distribute",
version='0.7.3',
description="distribute legacy wrapper",
author="The fellowship of the packaging",
author_email="[email protected]",
license="PSF or ZPL",
long_description=long_description,
keywords="CPAN PyPI distutils eggs package management",
url="http://packages.python.org/distribute",
zip_safe=True,
classifiers=textwrap.dedent("""
Development Status :: 5 - Production/Stable
Intended Audience :: Developers
License :: OSI Approved :: Python Software Foundation License
License :: OSI Approved :: Zope Public License
Operating System :: OS Independent
Programming Language :: Python :: 2.4
Programming Language :: Python :: 2.5
Programming Language :: Python :: 2.6
Programming Language :: Python :: 2.7
Programming Language :: Python :: 3
Programming Language :: Python :: 3.1
Programming Language :: Python :: 3.2
Programming Language :: Python :: 3.3
Topic :: Software Development :: Libraries :: Python Modules
Topic :: System :: Archiving :: Packaging
Topic :: System :: Systems Administration
Topic :: Utilities
""").strip().splitlines(),
install_requires=[
'setuptools>=0.7',
],
)
if __name__ == '__main__':
setuptools.setup(**setup_params)
| timohtey/mediadrop_copy | mediacore_env/Lib/site-packages/distribute-0.7.3/setup.py | Python | gpl-3.0 | 1,879 | 0.000532 |
from django.core.management.base import BaseCommand, CommandError
from tweets.tasks import stream
#The class must be named Command, and subclass BaseCommand
class Command(BaseCommand):
# Show this when the user types help
help = "My twitter stream command"
# A command must define handle()
def handle(self, *args, **options):
stream()
| shazadan/mood-map | tweets/management/commands/stream.py | Python | gpl-2.0 | 361 | 0.00554 |
#!/usr/bin/env python
# coding=utf-8
"""
Created on April 15 2017
@author: yytang
"""
from scrapy import Selector
from libs.misc import get_spider_name_from_domain
from libs.polish import polish_title, polish_subtitle, polish_content
from novelsCrawler.spiders.novelSpider import NovelSpider
class PiaotianSpider(NovelSpider):
"""
classdocs
example: https://www.piaotian.com/html/9/9459/index.html
"""
allowed_domains = ['www.piaotian.com']
name = get_spider_name_from_domain(allowed_domains[0])
# custom_settings = {
# 'DOWNLOAD_DELAY': 0.3,
# }
def parse_title(self, response):
sel = Selector(response)
title = sel.xpath('//h1/text()').extract()[0]
title = polish_title(title, self.name)
return title
def parse_episodes(self, response):
sel = Selector(response)
episodes = []
subtitle_selectors = sel.xpath('//div[@class="centent"]/ul/li/a')
for page_id, subtitle_selector in enumerate(subtitle_selectors):
subtitle_url = subtitle_selector.xpath('@href').extract()[0]
subtitle_url = response.urljoin(subtitle_url.strip())
subtitle_name = subtitle_selector.xpath('text()').extract()[0]
subtitle_name = polish_subtitle(subtitle_name)
episodes.append((page_id, subtitle_name, subtitle_url))
return episodes
def parse_content(self, response):
# sel = Selector(response)
# content = sel.xpath('//div[@id="content"]/p/text()').extract()
# content = polish_content(content)
html = str(response.body.decode('GBK'))
pattern = r' (.*)'
import re
m = re.search(pattern, html)
if m:
content = m.group(1)
else:
content = ''
content = content.replace('<br /><br /> ', '\n\n')
return content
| yytang2012/novels-crawler | novelsCrawler/spiders/piaotian.py | Python | mit | 1,931 | 0.000518 |
a, b = <warning descr="Need more values to unpack">None</warning> | signed/intellij-community | python/testData/inspections/PyTupleAssignmentBalanceInspectionTest/unpackNonePy3.py | Python | apache-2.0 | 65 | 0.076923 |
"""
@author: dhoomakethu
"""
from __future__ import absolute_import, unicode_literals | dhoomakethu/apocalypse | apocalypse/server/__init__.py | Python | mit | 86 | 0.011628 |
#!/usr/env/bin/ python3
from setuptools import setup, Extension
#
#CXX_FLAGS = "-O3 -std=gnu++11 -Wall -Wno-comment"
#
## List of C/C++ sources that will conform the library
#sources = [
#
# "andrnx/clib/android.c",
#
#]
setup(name="andrnx",
version="0.1",
description="Package to convert from GNSS logger to Rinex files",
author='Miquel Garcia',
author_email='[email protected]',
url='https://www.rokubun.cat',
packages=['andrnx'],
test_suite="andrnx.test",
scripts=['bin/gnsslogger_to_rnx'])
| rokubun/android_rinex | setup.py | Python | bsd-2-clause | 547 | 0.007313 |
class DrawingDimensioningWorkbench (Workbench):
# Icon generated using by converting linearDimension.svg to xpm format using Gimp
Icon = '''
/* XPM */
static char * linearDimension_xpm[] = {
"32 32 10 1",
" c None",
". c #000000",
"+ c #0008FF",
"@ c #0009FF",
"# c #000AFF",
"$ c #00023D",
"% c #0008F7",
"& c #0008EE",
"* c #000587",
"= c #000001",
". .",
". .",
". .",
". .",
". .",
". .",
". .",
". .",
". .",
". .",
". .",
". .",
". +@@ + .",
". @+@@+ +@@+@ .",
". +@+@@@@@@ @@@@@@@# .",
"$%@@@@@@@@@+@@@@@@@@@@@@@@@@@@&$",
". #@@@@@@@@ #+@@@@@@@@*=",
". @+@@+ +@@@@@ .",
". +@ #@++ .",
". # .",
". .",
". .",
". .",
". .",
". .",
". .",
". .",
". .",
". .",
". .",
". .",
". ."};
'''
MenuText = 'Drawing Dimensioning'
def Initialize(self):
import importlib, os
from dimensioning import __dir__, debugPrint, iconPath
import linearDimension
import linearDimension_stack
import deleteDimension
import circularDimension
import grabPointAdd
import textAdd
import textEdit
import textMove
import escapeDimensioning
import angularDimension
import radiusDimension
import centerLines
import noteCircle
import toleranceAdd
commandslist = [
'dd_linearDimension', #where dd is short-hand for drawing dimensioning
'dd_linearDimensionStack',
'dd_circularDimension',
'dd_radiusDimension',
'dd_angularDimension',
'dd_centerLines',
'dd_centerLine',
'dd_noteCircle',
'dd_grabPoint',
'dd_addText',
'dd_editText',
'dd_moveText',
'dd_addTolerance',
'dd_deleteDimension',
'dd_escapeDimensioning',
]
self.appendToolbar('Drawing Dimensioning', commandslist)
import unfold
import unfold_bending_note
import unfold_export_to_dxf
unfold_cmds = [
'dd_unfold',
'dd_bendingNote',
]
if hasattr(os,'uname') and os.uname()[0] == 'Linux' : #this command only works on Linux systems
unfold_cmds.append('dd_exportToDxf')
self.appendToolbar( 'Drawing Dimensioning Folding', unfold_cmds )
import weldingSymbols
if int( FreeCAD.Version()[1] > 15 ) and int( FreeCAD.Version()[2].split()[0] ) > 5165:
weldingCommandList = ['dd_weldingGroupCommand']
else:
weldingCommandList = weldingSymbols.weldingCmds
self.appendToolbar('Drawing Dimensioning Welding Symbols', weldingCommandList)
self.appendToolbar('Drawing Dimensioning Help', [ 'dd_help' ])
FreeCADGui.addIconPath(iconPath)
FreeCADGui.addPreferencePage( os.path.join( __dir__, 'Resources', 'ui', 'drawing_dimensioing_prefs-base.ui'),'Drawing Dimensioning' )
Gui.addWorkbench(DrawingDimensioningWorkbench())
| ulikoehler/FreeCAD_drawing_dimensioning | InitGui.py | Python | gpl-3.0 | 3,832 | 0.008351 |
# Copyright (c) 2009-2010 LOGILAB S.A. (Paris, FRANCE).
# http://www.logilab.fr/ -- mailto:[email protected]
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
from tortoisehg.util import hglib, patchctx
from tortoisehg.hgqt.qtlib import geticon, getoverlaidicon
from PyQt4.QtCore import *
from PyQt4.QtGui import *
nullvariant = QVariant()
def getSubrepoIcoDict():
'Return a dictionary mapping each subrepo type to the corresponding icon'
_subrepoType2IcoMap = {
'hg': 'hg',
'git': 'thg-git-subrepo',
'svn': 'thg-svn-subrepo',
'hgsubversion': 'thg-svn-subrepo',
'empty': 'hg'
}
icOverlay = geticon('thg-subrepo')
subrepoIcoDict = {}
for stype in _subrepoType2IcoMap:
ic = geticon(_subrepoType2IcoMap[stype])
ic = getoverlaidicon(ic, icOverlay)
subrepoIcoDict[stype] = ic
return subrepoIcoDict
class HgFileListModel(QAbstractTableModel):
"""
Model used for listing (modified) files of a given Hg revision
"""
showMessage = pyqtSignal(QString)
def __init__(self, parent):
QAbstractTableModel.__init__(self, parent)
self._boldfont = parent.font()
self._boldfont.setBold(True)
self._ctx = None
self._files = []
self._filesdict = {}
self._fulllist = False
self._subrepoIcoDict = getSubrepoIcoDict()
@pyqtSlot(bool)
def toggleFullFileList(self, value):
self._fulllist = value
self.loadFiles()
self.layoutChanged.emit()
def __len__(self):
return len(self._files)
def rowCount(self, parent=None):
return len(self)
def columnCount(self, parent=None):
return 1
def file(self, row):
return self._files[row]['path']
def setContext(self, ctx):
reload = False
if not self._ctx:
reload = True
elif self._ctx.rev() is None:
reload = True
elif ctx.thgid() != self._ctx.thgid():
reload = True
if reload:
self._ctx = ctx
self.loadFiles()
self.layoutChanged.emit()
def fileFromIndex(self, index):
if not index.isValid() or index.row()>=len(self) or not self._ctx:
return None
row = index.row()
return self._files[row]['path']
def dataFromIndex(self, index):
if not index.isValid() or index.row()>=len(self) or not self._ctx:
return None
row = index.row()
return self._files[row]
def indexFromFile(self, filename):
if filename in self._filesdict:
row = self._files.index(self._filesdict[filename])
return self.index(row, 0)
return QModelIndex()
def _buildDesc(self, parent):
files = []
ctxfiles = self._ctx.files()
modified, added, removed = self._ctx.changesToParent(parent)
ismerge = bool(self._ctx.p2())
# Add the list of modified subrepos to the top of the list
if not isinstance(self._ctx, patchctx.patchctx):
if ".hgsubstate" in ctxfiles or ".hgsub" in ctxfiles:
from mercurial import subrepo
# Add the list of modified subrepos
for s, sd in self._ctx.substate.items():
srev = self._ctx.substate.get(s, subrepo.nullstate)[1]
stype = self._ctx.substate.get(s, subrepo.nullstate)[2]
sp1rev = self._ctx.p1().substate.get(s, subrepo.nullstate)[1]
sp2rev = ''
if ismerge:
sp2rev = self._ctx.p2().substate.get(s, subrepo.nullstate)[1]
if srev != sp1rev or (sp2rev != '' and srev != sp2rev):
wasmerged = ismerge and s in ctxfiles
files.append({'path': s, 'status': 'S', 'parent': parent,
'wasmerged': wasmerged, 'stype': stype})
# Add the list of missing subrepos
subreposet = set(self._ctx.substate.keys())
subrepoparent1set = set(self._ctx.p1().substate.keys())
missingsubreposet = subrepoparent1set.difference(subreposet)
for s in missingsubreposet:
wasmerged = ismerge and s in ctxfiles
stype = self._ctx.p1().substate.get(s, subrepo.nullstate)[2]
files.append({'path': s, 'status': 'S', 'parent': parent,
'wasmerged': wasmerged, 'stype': stype})
if self._fulllist and ismerge:
func = lambda x: True
else:
func = lambda x: x in ctxfiles
for lst, flag in ((added, 'A'), (modified, 'M'), (removed, 'R')):
for f in filter(func, lst):
wasmerged = ismerge and f in ctxfiles
f = self._ctx.removeStandin(f)
files.append({'path': f, 'status': flag, 'parent': parent,
'wasmerged': wasmerged})
return files
def loadFiles(self):
self._files = []
try:
self._files = self._buildDesc(0)
if bool(self._ctx.p2()):
_paths = [x['path'] for x in self._files]
_files = self._buildDesc(1)
self._files += [x for x in _files if x['path'] not in _paths]
except EnvironmentError, e:
self.showMessage.emit(hglib.tounicode(str(e)))
self._filesdict = dict([(f['path'], f) for f in self._files])
def data(self, index, role):
if not index.isValid() or index.row()>len(self) or not self._ctx:
return nullvariant
if index.column() != 0:
return nullvariant
row = index.row()
column = index.column()
current_file_desc = self._files[row]
current_file = current_file_desc['path']
if role in (Qt.DisplayRole, Qt.ToolTipRole):
return QVariant(hglib.tounicode(current_file))
elif role == Qt.DecorationRole:
if self._fulllist and bool(self._ctx.p2()):
if current_file_desc['wasmerged']:
icn = geticon('thg-file-merged')
elif current_file_desc['parent'] == 0:
icn = geticon('thg-file-p0')
elif current_file_desc['parent'] == 1:
icn = geticon('thg-file-p1')
return QVariant(icn.pixmap(20,20))
elif current_file_desc['status'] == 'A':
return QVariant(geticon('fileadd'))
elif current_file_desc['status'] == 'R':
return QVariant(geticon('filedelete'))
elif current_file_desc['status'] == 'S':
stype = current_file_desc.get('stype', 'hg')
return QVariant(self._subrepoIcoDict[stype])
#else:
# return QVariant(geticon('filemodify'))
elif role == Qt.FontRole:
if current_file_desc['wasmerged']:
return QVariant(self._boldfont)
else:
return nullvariant
| gilshwartz/tortoisehg-caja | tortoisehg/hgqt/filelistmodel.py | Python | gpl-2.0 | 7,732 | 0.002069 |
"""Maya initialisation for Mindbender pipeline"""
from maya import cmds
def setup():
assert __import__("pyblish_maya").is_setup(), (
"pyblish-mindbender depends on pyblish_maya which has not "
"yet been setup. Run pyblish_maya.setup()")
from pyblish import api
api.register_gui("pyblish_lite")
from mindbender import api, maya
api.install(maya)
# Allow time for dependencies (e.g. pyblish-maya)
# to be installed first.
cmds.evalDeferred(setup)
| pyblish/pyblish-mindbender | mindbender/maya/pythonpath/userSetup.py | Python | mit | 488 | 0 |
""":mod:`kinsumer.checkpointer` --- Persisting positions for Kinesis shards
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
import abc
import json
import os.path
from typing import Optional, Dict
class Checkpointer(abc.ABC, object):
"""Checkpointer is the interface for persisting positions for Kinesis shards
"""
@abc.abstractmethod
def get_checkpoints(self) -> Dict[str, str]:
"""Get a dictionary whose keys are all the shard ids we are aware of,
and whose values are the sequence id of the last record processed for
its shard
"""
@abc.abstractmethod
def checkpoint(self, shard_id: str, sequence: str) -> None:
"""Persist the sequence number for a given shard"""
@abc.abstractmethod
def get_checkpoint(self, shard_id: str) -> Optional[str]:
"""Get the sequence number of the last successfully processed record"""
class InMemoryCheckpointer(Checkpointer):
def __init__(self) -> None:
self._checkpoints = {}
def get_checkpoints(self) -> Dict[str, str]:
return self._checkpoints.copy()
def checkpoint(self, shard_id: str, sequence: str) -> None:
self._checkpoints[shard_id] = sequence
def get_checkpoint(self, shard_id: str) -> Optional[str]:
return self._checkpoints.get(shard_id)
class FileCheckpointer(InMemoryCheckpointer):
def __init__(self, file: str) -> None:
super().__init__()
self.file = os.path.expanduser(file)
if os.path.exists(self.file):
with open(self.file, 'rb') as f:
self._checkpoints = json.load(f)
def checkpoint(self, shard_id: str, sequence: str) -> None:
super().checkpoint(shard_id, sequence)
with open(self.file, 'wb') as f:
f.write(json.dumps(self._checkpoints, ensure_ascii=False).encode())
| balancehero/kinsumer | kinsumer/checkpointer.py | Python | mit | 1,882 | 0.000531 |
class ParametrizedError(Exception):
def __init__(self, problem, invalid):
self.problem = str(problem)
self.invalid = str(invalid)
def __str__(self):
print('--- Error: {0}\n--- Caused by: {1}'.format(self.problem, self.invalid))
class InvalidToken(ParametrizedError):
pass
class ToneError(ParametrizedError):
pass
class IntervalError(ParametrizedError):
pass
class TriolaError(ParametrizedError):
pass
class ConfigError(ParametrizedError):
pass
class ComposingError(ParametrizedError):
pass | gribvirus74/Bee-per | Error.py | Python | gpl-3.0 | 534 | 0.035581 |
# Copyright 2016 F5 Networks Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
class TestFailoverStatus(object):
def test_get_status(self, request, mgmt_root):
failover_status = mgmt_root.tm.cm.failover_status
assert failover_status._meta_data['uri'].endswith(
"/mgmt/tm/cm/failover-status/")
failover_status.refresh()
des =\
(failover_status.entries['https://localhost/mgmt/tm/cm/failover-status/0']
['nestedStats']
['entries']
['status']
['description'])
assert des == "ACTIVE"
| F5Networks/f5-common-python | f5/bigip/tm/cm/test/functional/test_failover_status.py | Python | apache-2.0 | 1,104 | 0.000906 |
# third party
# third party
import numpy as np
import pytest
# syft absolute
# absolute
from syft.core.tensor.smpc.share_tensor import ShareTensor
@pytest.mark.smpc
def test_bit_extraction() -> None:
share = ShareTensor(rank=0, parties_info=[], ring_size=2**32)
data = np.array([[21, 32], [-54, 89]], dtype=np.int32)
share.child = data
exp_res1 = np.array([[False, False], [True, False]], dtype=np.bool_)
res = share.bit_extraction(31).child
assert (res == exp_res1).all()
exp_res2 = np.array([[True, False], [False, False]], dtype=np.bool_)
res = share.bit_extraction(2).child
assert (res == exp_res2).all()
@pytest.mark.smpc
def test_bit_extraction_exception() -> None:
share = ShareTensor(rank=0, parties_info=[], ring_size=2**32)
data = np.array([[21, 32], [-54, 89]], dtype=np.int32)
share.child = data
with pytest.raises(Exception):
share >> 33
with pytest.raises(Exception):
share >> -1
| OpenMined/PySyft | tests/integration/smpc/tensor/share_tensor_test.py | Python | apache-2.0 | 975 | 0 |
### hierarchical_clustering.py
#Author Nathan Salomonis - [email protected]
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is furnished
#to do so, subject to the following conditions:
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
#INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
#PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
#HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
#OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
#SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
""" Batch script for extracting many junction.bed and building exon.bed files from
an input set of BAM files in a directory. Requires a reference text file containing
exon regions (currently provided from AltAnalyze - see ReferenceExonCoordinates
folder). Can produce only junction.bed files, only a combined exon reference or only
exon.bed files optionally. Can run using a single processor or multiple simultaneous
processes (--m flag)."""
import export
import string
import time
import sys, os
import shutil
import unique
import subprocess
import BAMtoJunctionBED
import BAMtoExonBED
import getopt
import traceback
################# General data import methods #################
def filepath(filename):
fn = unique.filepath(filename)
return fn
def cleanUpLine(line):
data = string.replace(line,'\n','')
data = string.replace(data,'\c','')
data = string.replace(data,'\r','')
data = string.replace(data,'"','')
return data
def getFolders(sub_dir):
dir_list = unique.read_directory(sub_dir); dir_list2 = []
###Only get folder names
for entry in dir_list:
if '.' not in entry: dir_list2.append(entry)
return dir_list2
def getFiles(sub_dir):
dir_list = unique.read_directory(sub_dir); dir_list2 = []
###Only get folder names
for entry in dir_list:
if '.' in entry: dir_list2.append(entry)
return dir_list2
def getFolders(sub_dir):
dir_list = unique.read_directory(sub_dir); dir_list2 = []
###Only get folder names
for entry in dir_list:
if '.' not in entry: dir_list2.append(entry)
return dir_list2
def parallelBAMProcessing(directory,refExonCoordinateFile,bed_reference_dir,analysisType=[],useMultiProcessing=False,MLP=None,root=None):
paths_to_run=[]
errors=[]
if '.bam' in directory:
### Allow a single BAM file to be specifically analyzed (e.g., bsub operation)
bam_file = directory
bam_file = string.replace(directory,'\\','/')
directory = string.join(string.split(directory,'/')[:-1],'/')
else:
bam_file = None
outputExonCoordinateRefBEDfile = str(bed_reference_dir)
bed_reference_dir = string.replace(bed_reference_dir,'\\','/')
### Check if the BAM files are located in the target folder (not in subdirectories)
files = getFiles(directory)
for file in files:
if '.bam' in file and '.bai' not in file:
source_file = directory+'/'+file
source_file = filepath(source_file)
output_filename = string.replace(file,'.bam','')
output_filename = string.replace(output_filename,'=','_')
destination_file = directory+'/'+output_filename+'__exon.bed'
destination_file = filepath(destination_file)
paths_to_run.append((source_file,refExonCoordinateFile,bed_reference_dir,destination_file))
### Otherwise, check subdirectories for BAM files
folders = getFolders(directory)
if len(paths_to_run)==0:
for top_level in folders:
try:
files = getFiles(directory+'/'+top_level)
for file in files:
if '.bam' in file and '.bai' not in file:
source_file = directory+'/'+file
source_file = filepath(source_file)
destination_file = directory+'/'+top_level+'__exon.bed'
destination_file = filepath(destination_file)
paths_to_run.append((source_file,refExonCoordinateFile,bed_reference_dir,destination_file))
except Exception: pass
### If a single BAM file is indicated
if bam_file != None:
output_filename = string.replace(bam_file,'.bam','')
output_filename = string.replace(output_filename,'=','_')
destination_file = output_filename+'__exon.bed'
paths_to_run = [(bam_file,refExonCoordinateFile,bed_reference_dir,destination_file)]
if 'reference' in analysisType and len(analysisType)==1:
augmentExonReferences(directory,refExonCoordinateFile,outputExonCoordinateRefBEDfile)
sys.exit()
if useMultiProcessing:
pool_size = MLP.cpu_count()
if len(paths_to_run)<pool_size:
pool_size = len(paths_to_run)
print 'Using %d processes' % pool_size
if len(paths_to_run) > pool_size:
pool_size = len(paths_to_run)
if len(analysisType) == 0 or 'junction' in analysisType:
print 'Extracting junction alignments from BAM files...',
pool = MLP.Pool(processes=pool_size)
try: results = pool.map(runBAMtoJunctionBED, paths_to_run) ### worker jobs initiated in tandem
except ValueError:
print_out = '\WARNING!!! No Index found for the BAM files (.bam.bai). Sort and Index using Samtools prior to loading in AltAnalyze'
print traceback.format_exc()
if root!=None:
import UI
UI.WarningWindow(print_out,'Exit');sys.exit()
try:pool.close(); pool.join(); pool = None
except Exception: pass
print_out=None
for sample,missing in results:
if len(missing)>1:
print_out = '\nWarning!!! %s chromosomes not found in: %s (PySam platform-specific error)' % (string.join(missing,', '),sample)
if root!=None and print_out!=None:
try:
import UI
UI.WarningWindow(print_out,'Continue')
except Exception: pass
print len(paths_to_run), 'BAM files','processed'
if len(analysisType) == 0 or 'reference' in analysisType:
#print 'Building exon reference coordinates from Ensembl/UCSC and all junctions...',
augmentExonReferences(directory,refExonCoordinateFile,outputExonCoordinateRefBEDfile)
#print 'completed'
print 'Extracting exon alignments from BAM files...',
if len(analysisType) == 0 or 'exon' in analysisType:
pool = MLP.Pool(processes=pool_size)
results = pool.map(runBAMtoExonBED, paths_to_run) ### worker jobs initiated in tandem
try:pool.close(); pool.join(); pool = None
except Exception: pass
print len(paths_to_run), 'BAM files','processed'
else:
if len(analysisType) == 0 or 'junction' in analysisType:
for i in paths_to_run:
runBAMtoJunctionBED(i)
if len(analysisType) == 0 or 'reference' in analysisType:
augmentExonReferences(directory,refExonCoordinateFile,outputExonCoordinateRefBEDfile)
if len(analysisType) == 0 or 'exon' in analysisType:
for i in paths_to_run:
runBAMtoExonBED(i)
def runBAMtoJunctionBED(paths_to_run):
bamfile_dir,refExonCoordinateFile,bed_reference_dir,output_bedfile_path = paths_to_run
output_bedfile_path = string.replace(bamfile_dir,'.bam','__junction.bed')
#if os.path.exists(output_bedfile_path) == False: ### Only run if the file doesn't exist
results = BAMtoJunctionBED.parseJunctionEntries(bamfile_dir,multi=True,ReferenceDir=refExonCoordinateFile)
#else: print output_bedfile_path, 'already exists.'
return results
def runBAMtoExonBED(paths_to_run):
bamfile_dir,refExonCoordinateFile,bed_reference_dir,output_bedfile_path = paths_to_run
if os.path.exists(output_bedfile_path) == False: ### Only run if the file doesn't exist
BAMtoExonBED.parseExonReferences(bamfile_dir,bed_reference_dir,multi=True)
else:
print output_bedfile_path, 'already exists... re-writing'
BAMtoExonBED.parseExonReferences(bamfile_dir,bed_reference_dir,multi=True)
def getChrFormat(directory):
### Determine if the chromosomes have 'chr' or nothing
files = getFiles(directory)
chr_status=True
for file in files:
firstLine=True
if 'junction' in file and '.bed' in file:
for line in open(directory+'/'+file,'rU').xreadlines():
if firstLine: firstLine=False
else:
t = string.split(line)
chr = t[0]
if 'chr' not in chr:
chr_status = False
break
break
return chr_status
def augmentExonReferences(directory,refExonCoordinateFile,outputExonCoordinateRefBEDfile):
print 'Building reference bed file from all junction.bed files'
splicesite_db={} ### reference splice-site database (we only want to add novel splice-sites to our reference)
real_splicesites={}
introns={}
novel_db={}
reference_toplevel = string.join(string.split(outputExonCoordinateRefBEDfile,'/')[:-1],'/')
try: os.mkdir(reference_toplevel) ### If the bed folder doesn't exist
except Exception: pass
chr_status = getChrFormat(directory)
o = open (outputExonCoordinateRefBEDfile,"w")
#refExonCoordinateFile = '/Users/saljh8/Desktop/Code/AltAnalyze/AltDatabase/EnsMart72/ensembl/Mm/Mm_Ensembl_exon.txt'
reference_rows=0
if '.gtf' in refExonCoordinateFile: firstLine = False
else: firstLine = True
for line in open(refExonCoordinateFile,'rU').xreadlines():
if firstLine: firstLine=False
else:
line = line.rstrip('\n')
reference_rows+=1
t = string.split(line,'\t'); #'gene', 'exon-id', 'chromosome', 'strand', 'exon-region-start(s)', 'exon-region-stop(s)', 'constitutive_call', 'ens_exon_ids', 'splice_events', 'splice_junctions'
geneID, exon, chr, strand, start, stop = t[:6]
if chr_status == False:
chr = string.replace(chr,'chr','')
o.write(string.join([chr,start,stop,geneID+':'+exon,'',strand],'\t')+'\n')
start = int(start); stop = int(stop)
#geneID = string.split(exon,':')[0]
splicesite_db[chr,start]=geneID
splicesite_db[chr,stop]=geneID
if 'I' in exon:
try: introns[geneID].append([start,stop])
except Exception: introns[geneID] = [[start,stop]]
files = getFiles(directory)
for file in files:
firstLine=True
if 'junction' in file and '.bed' in file:
for line in open(directory+'/'+file,'rU').xreadlines():
if firstLine: firstLine=False
else:
line = line.rstrip('\n')
t = string.split(line,'\t'); #'12', '6998470', '6998522', 'ENSG00000111671:E1.1_ENSE00001754003', '0', '-'
chr, exon1_start, exon2_stop, junction_id, reads, strand, null, null, null, null, lengths, null = t
exon1_len,exon2_len=string.split(lengths,',')[:2]; exon1_len = int(exon1_len); exon2_len = int(exon2_len)
exon1_start = int(exon1_start); exon2_stop = int(exon2_stop)
if strand == '-':
exon1_stop = exon1_start+exon1_len; exon2_start=exon2_stop-exon2_len+1
### Exons have the opposite order
a = exon1_start,exon1_stop; b = exon2_start,exon2_stop
exon1_stop,exon1_start = b; exon2_stop,exon2_start = a
else:
exon1_stop = exon1_start+exon1_len; exon2_start=exon2_stop-exon2_len+1
seq_length = abs(float(exon1_stop-exon2_start)) ### Junction distance
key = chr,exon1_stop,exon2_start
if (chr,exon1_stop) not in splicesite_db: ### record the splice site and position of the max read
if (chr,exon2_start) in splicesite_db: ### only include splice sites where one site is known
geneID = splicesite_db[(chr,exon2_start)]
novel_db[chr,exon1_stop,strand] = exon1_start,geneID,5
real_splicesites[chr,exon2_start]=None
elif (chr,exon2_start) not in splicesite_db: ### record the splice site and position of the max read
if (chr,exon1_stop) in splicesite_db: ### only include splice sites where one site is known
#if 121652702 ==exon2_start:
#print chr, exon1_start,exon1_stop,exon2_start,exon2_stop, strand;sys.exit()
geneID = splicesite_db[(chr,exon1_stop)]
novel_db[chr,exon2_start,strand] = exon2_stop,geneID,3
real_splicesites[chr,exon1_stop]=None
else:
real_splicesites[chr,exon1_stop]=None
real_splicesites[chr,exon2_start]=None
print len(novel_db), 'novel splice sites and', len(real_splicesites), 'known splice sites.'
gene_organized={}
for (chr,pos1,strand) in novel_db:
pos2,geneID,type = novel_db[(chr,pos1,strand)]
try: gene_organized[chr,geneID,strand].append([pos1,pos2,type])
except Exception: gene_organized[chr,geneID,strand] = [[pos1,pos2,type]]
def intronCheck(geneID,coords):
### see if the coordinates are within a given intron
try:
for ic in introns[geneID]:
if withinQuery(ic,coords):
return True
except Exception:
pass
def withinQuery(ls1,ls2):
imax = max(ls1)
imin = min(ls1)
qmax = max(ls2)
qmin = min(ls2)
if qmin >= imin and qmax <= imax:
return True
else:
return False
### Compare the novel splice site locations in each gene
added=[]
for (chr,geneID,strand) in gene_organized:
gene_organized[(chr,geneID,strand)].sort()
if strand == '-':
gene_organized[(chr,geneID,strand)].reverse()
i=0
set = gene_organized[(chr,geneID,strand)]
for (pos1,pos2,type) in set:
k = [pos1,pos2]
annotation='novel'
if i==0 and type == 3:
if len(set)>1:
if set[i+1][-1]==5:
l = [set[i+1][0],pos1]
if (max(l)-min(l))<300 and intronCheck(geneID,l):
k=l
#print chr,k
annotation='novel-paired'
elif type == 5:
if set[i-1][-1]==3:
l = [set[i-1][0],pos1]
if (max(l)-min(l))<300 and intronCheck(geneID,l):
k=l
#print chr,k
annotation='novel-paired'
k.sort(); i+=1
if k not in added:
values = string.join([chr,str(k[0]),str(k[1]),geneID+':'+annotation,'',strand],'\t')+'\n'
added.append(k)
o.write(values)
o.close()
if __name__ == '__main__':
import multiprocessing as mlp
refExonCoordinateFile = ''
outputExonCoordinateRefBEDfile = ''
#bam_dir = "H9.102.2.6.bam"
#outputExonCoordinateRefBEDfile = 'H9.102.2.6__exon.bed'
################ Comand-line arguments ################
if len(sys.argv[1:])<=1: ### Indicates that there are insufficient number of command-line arguments
print "Warning! Please designate a directory containing BAM files as input in the command-line"
print "Example: python multiBAMtoBED.py --i /Users/me/BAMfiles --g /Users/me/ReferenceExonCoordinates/Hs_Ensembl_exon_hg19.txt --r /Users/me/ExonBEDRef/Hs_Ensembl_exon-cancer_hg19.bed --a exon --a junction --a reference"
print "Example: python multiBAMtoBED.py --i /Users/me/BAMfiles --a junction"
sys.exit()
else:
analysisType = []
useMultiProcessing=False
options, remainder = getopt.getopt(sys.argv[1:],'', ['i=','g=','r=','a=','m='])
for opt, arg in options:
if opt == '--i': bam_dir=arg
elif opt == '--g': refExonCoordinateFile=arg
elif opt == '--r': outputExonCoordinateRefBEDfile=arg
elif opt == '--a': analysisType.append(arg) ### options are: all, junction, exon, reference
elif opt == '--m': ### Run each BAM file on a different processor
if arg == 'yes': useMultiProcessing=True
elif arg == 'True': useMultiProcessing=True
else: useMultiProcessing=False
else:
print "Warning! Command-line argument: %s not recognized. Exiting..." % opt; sys.exit()
if len(analysisType) == 0:
analysisType = ['exon','junction','reference']
try:
refExonCoordinateFile = refExonCoordinateFile
outputExonCoordinateRefBEDfile = outputExonCoordinateRefBEDfile
except Exception:
print 'Please provide a exon coordinate text file using the option --g and a output coordinate file path (--r) to generate exon.bed files'
analysisType = ['junction']
refExonCoordinateFile = ''
outputExonCoordinateRefBEDfile = ''
try: bam_dir = bam_dir
except Exception: print 'You must specify a directory of BAM files or a single bam file with --i';sys.exit()
try: refExonCoordinateFile = refExonCoordinateFile
except Exception: print 'You must specify a AltAnalyze exon coordinate text file with --g';sys.exit()
try: outputExonCoordinateRefBEDfile = outputExonCoordinateRefBEDfile
except Exception: print 'You must specify an output path for the exon.bed reference file location with --r (e.g., --r /users/Hs_exon.bed)';sys.exit()
parallelBAMProcessing(bam_dir,refExonCoordinateFile,outputExonCoordinateRefBEDfile,analysisType=analysisType,useMultiProcessing=useMultiProcessing,MLP=mlp)
| nsalomonis/BAM-to-Junction-BED | multiBAMtoBED.py | Python | apache-2.0 | 19,040 | 0.024685 |
#!/usr/bin/env python3
from django.shortcuts import render
# Create your views here.
from CnbetaApis.datas.Models import *
from CnbetaApis.datas.get_letv_json import get_letv_json
from CnbetaApis.datas.get_youku_json import get_youku_json
from django.views.decorators.csrf import csrf_exempt
from django.http import *
from datetime import timezone, timedelta
import json
def getrelate(ids, session):
relateds = session.query(Article).filter(Article.id.in_(ids))
relateds_arr = []
for related in relateds:
relateds_arr.append({
'id': related.id,
'title': related.title,
'url': related.url,
})
return relateds_arr
def get_home_data(request):
if not request.method == 'GET':
raise HttpResponseNotAllowed('GET')
lastID = request.GET.get('lastid')
limit = request.GET.get('limit') or 20
session = DBSession()
datas = None
if lastID:
datas = session.query(Article).order_by(desc(Article.id)).filter(and_(Article.introduction != None, Article.id < lastID)).limit(limit).all()
else:
datas = session.query(Article).order_by(desc(Article.id)).limit(limit).all()
values = []
for data in datas:
values.append({
'id': data.id,
'title': data.title,
'url': data.url,
'source': data.source,
'imgUrl': data.imgUrl,
'introduction': data.introduction,
'createTime': data.createTime.replace(tzinfo=timezone(timedelta(hours=8))).astimezone(timezone.utc).timestamp(),
'related': getrelate(data.related.split(','), session),
'readCount': data.readCount,
'opinionCount': data.opinionCount,
})
session.close()
return JsonResponse({"result": values})
def get_article_content(request):
if not request.method == 'GET':
raise HttpResponseNotAllowed('GET')
article_id = request.GET.get('id')
session = DBSession()
datas = session.query(Article).filter(Article.id == article_id).all()
if not len(datas):
raise Http404('Article not exist')
data = datas[0]
result = {'result': {
'id': data.id,
'title': data.title,
'url': data.url,
'imgUrl': data.imgUrl,
'source': data.source,
'introduction': data.introduction,
'createTime': data.createTime.replace(tzinfo=timezone(timedelta(hours=8))).astimezone(timezone.utc).timestamp(),
'related': getrelate(data.related.split(','), session),
'readCount': data.readCount,
'opinionCount': data.opinionCount,
'content': json.loads(data.content),
}}
session.close()
return JsonResponse(result)
@csrf_exempt
def get_video_realUrl(req):
if not req.method == 'POST':
raise HttpResponseNotAllowed('POST')
source_url = req.POST.get('url')
source_type = req.POST.get('type')
if source_type == "youku":
source_url = get_youku_json(source_url)
elif source_type == "letv":
source_url = get_letv_json(source_url)
else:
raise Http404('Article not exist')
return JsonResponse({"result": source_url})
| kagenZhao/cnBeta | CnbetaApi/CnbetaApis/views.py | Python | mit | 3,183 | 0.002513 |
"""The tests for the google calendar platform."""
import copy
from unittest.mock import Mock, patch
import httplib2
import pytest
from homeassistant.components.google import (
CONF_CAL_ID,
CONF_CLIENT_ID,
CONF_CLIENT_SECRET,
CONF_DEVICE_ID,
CONF_ENTITIES,
CONF_NAME,
CONF_TRACK,
DEVICE_SCHEMA,
SERVICE_SCAN_CALENDARS,
do_setup,
)
from homeassistant.const import STATE_OFF, STATE_ON
from homeassistant.helpers.template import DATE_STR_FORMAT
from homeassistant.setup import async_setup_component
from homeassistant.util import slugify
import homeassistant.util.dt as dt_util
from tests.common import async_mock_service
GOOGLE_CONFIG = {CONF_CLIENT_ID: "client_id", CONF_CLIENT_SECRET: "client_secret"}
TEST_ENTITY = "calendar.we_are_we_are_a_test_calendar"
TEST_ENTITY_NAME = "We are, we are, a... Test Calendar"
TEST_EVENT = {
"summary": "Test All Day Event",
"start": {},
"end": {},
"location": "Test Cases",
"description": "test event",
"kind": "calendar#event",
"created": "2016-06-23T16:37:57.000Z",
"transparency": "transparent",
"updated": "2016-06-24T01:57:21.045Z",
"reminders": {"useDefault": True},
"organizer": {
"email": "[email protected]",
"displayName": "Organizer Name",
"self": True,
},
"sequence": 0,
"creator": {
"email": "[email protected]",
"displayName": "Organizer Name",
"self": True,
},
"id": "_c8rinwq863h45qnucyoi43ny8",
"etag": '"2933466882090000"',
"htmlLink": "https://www.google.com/calendar/event?eid=*******",
"iCalUID": "[email protected]",
"status": "confirmed",
}
def get_calendar_info(calendar):
"""Convert data from Google into DEVICE_SCHEMA."""
calendar_info = DEVICE_SCHEMA(
{
CONF_CAL_ID: calendar["id"],
CONF_ENTITIES: [
{
CONF_TRACK: calendar["track"],
CONF_NAME: calendar["summary"],
CONF_DEVICE_ID: slugify(calendar["summary"]),
}
],
}
)
return calendar_info
@pytest.fixture(autouse=True)
def mock_google_setup(hass, test_calendar):
"""Mock the google set up functions."""
hass.loop.run_until_complete(async_setup_component(hass, "group", {"group": {}}))
calendar = get_calendar_info(test_calendar)
calendars = {calendar[CONF_CAL_ID]: calendar}
patch_google_auth = patch(
"homeassistant.components.google.do_authentication", side_effect=do_setup
)
patch_google_load = patch(
"homeassistant.components.google.load_config", return_value=calendars
)
patch_google_services = patch("homeassistant.components.google.setup_services")
async_mock_service(hass, "google", SERVICE_SCAN_CALENDARS)
with patch_google_auth, patch_google_load, patch_google_services:
yield
@pytest.fixture(autouse=True)
def mock_http(hass):
"""Mock the http component."""
hass.http = Mock()
@pytest.fixture(autouse=True)
def set_time_zone():
"""Set the time zone for the tests."""
# Set our timezone to CST/Regina so we can check calculations
# This keeps UTC-6 all year round
dt_util.set_default_time_zone(dt_util.get_time_zone("America/Regina"))
yield
dt_util.set_default_time_zone(dt_util.get_time_zone("UTC"))
@pytest.fixture(name="google_service")
def mock_google_service():
"""Mock google service."""
patch_google_service = patch(
"homeassistant.components.google.calendar.GoogleCalendarService"
)
with patch_google_service as mock_service:
yield mock_service
async def test_all_day_event(hass, mock_next_event):
"""Test that we can create an event trigger on device."""
week_from_today = dt_util.dt.date.today() + dt_util.dt.timedelta(days=7)
end_event = week_from_today + dt_util.dt.timedelta(days=1)
event = copy.deepcopy(TEST_EVENT)
start = week_from_today.isoformat()
end = end_event.isoformat()
event["start"]["date"] = start
event["end"]["date"] = end
mock_next_event.return_value.event = event
assert await async_setup_component(hass, "google", {"google": GOOGLE_CONFIG})
await hass.async_block_till_done()
state = hass.states.get(TEST_ENTITY)
assert state.name == TEST_ENTITY_NAME
assert state.state == STATE_OFF
assert dict(state.attributes) == {
"friendly_name": TEST_ENTITY_NAME,
"message": event["summary"],
"all_day": True,
"offset_reached": False,
"start_time": week_from_today.strftime(DATE_STR_FORMAT),
"end_time": end_event.strftime(DATE_STR_FORMAT),
"location": event["location"],
"description": event["description"],
}
async def test_future_event(hass, mock_next_event):
"""Test that we can create an event trigger on device."""
one_hour_from_now = dt_util.now() + dt_util.dt.timedelta(minutes=30)
end_event = one_hour_from_now + dt_util.dt.timedelta(minutes=60)
start = one_hour_from_now.isoformat()
end = end_event.isoformat()
event = copy.deepcopy(TEST_EVENT)
event["start"]["dateTime"] = start
event["end"]["dateTime"] = end
mock_next_event.return_value.event = event
assert await async_setup_component(hass, "google", {"google": GOOGLE_CONFIG})
await hass.async_block_till_done()
state = hass.states.get(TEST_ENTITY)
assert state.name == TEST_ENTITY_NAME
assert state.state == STATE_OFF
assert dict(state.attributes) == {
"friendly_name": TEST_ENTITY_NAME,
"message": event["summary"],
"all_day": False,
"offset_reached": False,
"start_time": one_hour_from_now.strftime(DATE_STR_FORMAT),
"end_time": end_event.strftime(DATE_STR_FORMAT),
"location": event["location"],
"description": event["description"],
}
async def test_in_progress_event(hass, mock_next_event):
"""Test that we can create an event trigger on device."""
middle_of_event = dt_util.now() - dt_util.dt.timedelta(minutes=30)
end_event = middle_of_event + dt_util.dt.timedelta(minutes=60)
start = middle_of_event.isoformat()
end = end_event.isoformat()
event = copy.deepcopy(TEST_EVENT)
event["start"]["dateTime"] = start
event["end"]["dateTime"] = end
mock_next_event.return_value.event = event
assert await async_setup_component(hass, "google", {"google": GOOGLE_CONFIG})
await hass.async_block_till_done()
state = hass.states.get(TEST_ENTITY)
assert state.name == TEST_ENTITY_NAME
assert state.state == STATE_ON
assert dict(state.attributes) == {
"friendly_name": TEST_ENTITY_NAME,
"message": event["summary"],
"all_day": False,
"offset_reached": False,
"start_time": middle_of_event.strftime(DATE_STR_FORMAT),
"end_time": end_event.strftime(DATE_STR_FORMAT),
"location": event["location"],
"description": event["description"],
}
async def test_offset_in_progress_event(hass, mock_next_event):
"""Test that we can create an event trigger on device."""
middle_of_event = dt_util.now() + dt_util.dt.timedelta(minutes=14)
end_event = middle_of_event + dt_util.dt.timedelta(minutes=60)
start = middle_of_event.isoformat()
end = end_event.isoformat()
event_summary = "Test Event in Progress"
event = copy.deepcopy(TEST_EVENT)
event["start"]["dateTime"] = start
event["end"]["dateTime"] = end
event["summary"] = "{} !!-15".format(event_summary)
mock_next_event.return_value.event = event
assert await async_setup_component(hass, "google", {"google": GOOGLE_CONFIG})
await hass.async_block_till_done()
state = hass.states.get(TEST_ENTITY)
assert state.name == TEST_ENTITY_NAME
assert state.state == STATE_OFF
assert dict(state.attributes) == {
"friendly_name": TEST_ENTITY_NAME,
"message": event_summary,
"all_day": False,
"offset_reached": True,
"start_time": middle_of_event.strftime(DATE_STR_FORMAT),
"end_time": end_event.strftime(DATE_STR_FORMAT),
"location": event["location"],
"description": event["description"],
}
@pytest.mark.skip
async def test_all_day_offset_in_progress_event(hass, mock_next_event):
"""Test that we can create an event trigger on device."""
tomorrow = dt_util.dt.date.today() + dt_util.dt.timedelta(days=1)
end_event = tomorrow + dt_util.dt.timedelta(days=1)
start = tomorrow.isoformat()
end = end_event.isoformat()
event_summary = "Test All Day Event Offset In Progress"
event = copy.deepcopy(TEST_EVENT)
event["start"]["date"] = start
event["end"]["date"] = end
event["summary"] = "{} !!-25:0".format(event_summary)
mock_next_event.return_value.event = event
assert await async_setup_component(hass, "google", {"google": GOOGLE_CONFIG})
await hass.async_block_till_done()
state = hass.states.get(TEST_ENTITY)
assert state.name == TEST_ENTITY_NAME
assert state.state == STATE_OFF
assert dict(state.attributes) == {
"friendly_name": TEST_ENTITY_NAME,
"message": event_summary,
"all_day": True,
"offset_reached": True,
"start_time": tomorrow.strftime(DATE_STR_FORMAT),
"end_time": end_event.strftime(DATE_STR_FORMAT),
"location": event["location"],
"description": event["description"],
}
async def test_all_day_offset_event(hass, mock_next_event):
"""Test that we can create an event trigger on device."""
tomorrow = dt_util.dt.date.today() + dt_util.dt.timedelta(days=2)
end_event = tomorrow + dt_util.dt.timedelta(days=1)
start = tomorrow.isoformat()
end = end_event.isoformat()
offset_hours = 1 + dt_util.now().hour
event_summary = "Test All Day Event Offset"
event = copy.deepcopy(TEST_EVENT)
event["start"]["date"] = start
event["end"]["date"] = end
event["summary"] = "{} !!-{}:0".format(event_summary, offset_hours)
mock_next_event.return_value.event = event
assert await async_setup_component(hass, "google", {"google": GOOGLE_CONFIG})
await hass.async_block_till_done()
state = hass.states.get(TEST_ENTITY)
assert state.name == TEST_ENTITY_NAME
assert state.state == STATE_OFF
assert dict(state.attributes) == {
"friendly_name": TEST_ENTITY_NAME,
"message": event_summary,
"all_day": True,
"offset_reached": False,
"start_time": tomorrow.strftime(DATE_STR_FORMAT),
"end_time": end_event.strftime(DATE_STR_FORMAT),
"location": event["location"],
"description": event["description"],
}
async def test_update_error(hass, google_service):
"""Test that the calendar handles a server error."""
google_service.return_value.get = Mock(
side_effect=httplib2.ServerNotFoundError("unit test")
)
assert await async_setup_component(hass, "google", {"google": GOOGLE_CONFIG})
await hass.async_block_till_done()
state = hass.states.get(TEST_ENTITY)
assert state.name == TEST_ENTITY_NAME
assert state.state == "off"
| fbradyirl/home-assistant | tests/components/google/test_calendar.py | Python | apache-2.0 | 11,260 | 0.000977 |
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
r"""
********************
**espressopp.Int3D**
********************
.. function:: espressopp.__Int3D(\*args)
:param \*args:
:type \*args:
.. function:: espressopp.__Int3D.x(v, [0)
:param v:
:param [0:
:type v:
:type [0:
:rtype:
.. function:: espressopp.__Int3D.y(v, [1)
:param v:
:param [1:
:type v:
:type [1:
:rtype:
.. function:: espressopp.__Int3D.z(v, [2)
:param v:
:param [2:
:type v:
:type [2:
:rtype:
.. function:: espressopp.toInt3DFromVector(\*args)
:param \*args:
:type \*args:
.. function:: espressopp.toInt3D(\*args)
:param \*args:
:type \*args:
"""
from _espressopp import Int3D
from espressopp import esutil
# This injects additional methods into the Int3D class and pulls it
# into this module
class __Int3D(Int3D) :
__metaclass__ = esutil.ExtendBaseClass
__originit = Int3D.__init__
def __init__(self, *args):
if len(args) == 0:
x = y = z = 0.0
elif len(args) == 1:
arg0 = args[0]
if isinstance(arg0, Int3D):
x = arg0.x
y = arg0.y
z = arg0.z
# test whether the argument is iterable and has 3 elements
elif hasattr(arg0, '__iter__') and len(arg0) == 3:
x, y, z = arg0
elif isinstance(arg0, int):
x = y = z = arg0
else :
raise TypeError("Cannot initialize Int3D from %s" % (args))
elif len(args) == 3 :
x, y, z = args
else :
raise TypeError("Cannot initialize Int3D from %s" % (args))
return self.__originit(x, y, z)
# create setters and getters
@property
def x(self): return self[0]
@x.setter
def x(self, v): self[0] = v
@property
def y(self) : return self[1]
@y.setter
def y(self, v) : self[1] = v
@property
def z(self) : return self[2]
@z.setter
def z(self, v) : self[2] = v
# string conversion
def __str__(self) :
return str((self[0], self[1], self[2]))
def __repr__(self) :
return 'Int3D' + str(self)
def toInt3DFromVector(*args):
"""Try to convert the arguments to a Int3D.
This function will only convert to a Int3D if x, y and z are
specified."""
if len(args) == 1:
arg0 = args[0]
if isinstance(arg0, Int3D):
return arg0
elif hasattr(arg0, '__iter__') and len(arg0) == 3:
return Int3D(*args)
elif len(args) == 3:
return Int3D(*args)
raise TypeError("Specify x, y and z.")
def toInt3D(*args):
"""Try to convert the arguments to a Int3D, returns the argument,
if it is already a Int3D."""
if len(args) == 1 and isinstance(args[0], Int3D):
return args[0]
else:
return Int3D(*args)
| capoe/espressopp.soap | src/Int3D.py | Python | gpl-3.0 | 3,721 | 0.017468 |
# -*- coding: utf-8 -*-
# Copyright (C) Duncan Macleod (2018-2020)
#
# This file is part of GWpy.
#
# GWpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GWpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GWpy. If not, see <http://www.gnu.org/licenses/>.
"""Tests for `gwpy.plot.segments`
"""
import pytest
import numpy
from matplotlib import rcParams
from matplotlib.colors import ColorConverter
from matplotlib.collections import PatchCollection
from ...segments import (Segment, SegmentList, SegmentListDict,
DataQualityFlag, DataQualityDict)
from ...time import to_gps
from .. import SegmentAxes
from ..segments import SegmentRectangle
from .test_axes import TestAxes as _TestAxes
# extract color cycle
COLOR_CONVERTER = ColorConverter()
COLOR_CYCLE = rcParams['axes.prop_cycle'].by_key()['color']
COLOR0 = COLOR_CONVERTER.to_rgba(COLOR_CYCLE[0])
class TestSegmentAxes(_TestAxes):
AXES_CLASS = SegmentAxes
@staticmethod
@pytest.fixture()
def segments():
return SegmentList([Segment(0, 3), Segment(6, 7)])
@staticmethod
@pytest.fixture()
def flag():
known = SegmentList([Segment(0, 3), Segment(6, 7)])
active = SegmentList([Segment(1, 2), Segment(3, 4), Segment(5, 7)])
return DataQualityFlag(name='Test segments', known=known,
active=active)
def test_plot_flag(self, ax, flag):
c = ax.plot_flag(flag)
assert c.get_label() == flag.texname
assert len(ax.collections) == 2
assert ax.collections[0] is c
flag.isgood = False
c = ax.plot_flag(flag)
assert tuple(c.get_facecolors()[0]) == (1., 0., 0., 1.)
c = ax.plot_flag(flag, known={'facecolor': 'black'})
c = ax.plot_flag(flag, known='fancy')
def test_plot_dqflag(self, ax, flag):
with pytest.deprecated_call():
ax.plot_dqflag(flag)
assert ax.collections # make sure it plotted something
def test_plot_dict(self, ax, flag):
dqd = DataQualityDict()
dqd['a'] = flag
dqd['b'] = flag
colls = ax.plot_dict(dqd)
assert len(colls) == len(dqd)
assert all(isinstance(c, PatchCollection) for c in colls)
assert colls[0].get_label() == 'a'
assert colls[1].get_label() == 'b'
colls = ax.plot_dict(dqd, label='name')
assert colls[0].get_label() == 'Test segments'
colls = ax.plot_dict(dqd, label='anything')
assert colls[0].get_label() == 'anything'
def test_plot_dqdict(self, ax, flag):
with pytest.deprecated_call():
ax.plot_dqdict(DataQualityDict(a=flag))
def test_plot_segmentlist(self, ax, segments):
c = ax.plot_segmentlist(segments)
assert isinstance(c, PatchCollection)
assert numpy.isclose(ax.dataLim.x0, 0.)
assert numpy.isclose(ax.dataLim.x1, 7.)
assert len(c.get_paths()) == len(segments)
assert ax.get_epoch() == segments[0][0]
# test y
p = ax.plot_segmentlist(segments).get_paths()[0].get_extents()
assert p.y0 + p.height/2. == 1.
p = ax.plot_segmentlist(segments, y=8).get_paths()[0].get_extents()
assert p.y0 + p.height/2. == 8.
# test kwargs
c = ax.plot_segmentlist(segments, label='My segments',
rasterized=True)
assert c.get_label() == 'My segments'
assert c.get_rasterized() is True
# test collection=False
c = ax.plot_segmentlist(segments, collection=False, label='test')
assert isinstance(c, list)
assert not isinstance(c, PatchCollection)
assert c[0].get_label() == 'test'
assert c[1].get_label() == ''
assert len(ax.patches) == len(segments)
# test empty
c = ax.plot_segmentlist(type(segments)())
def test_plot_segmentlistdict(self, ax, segments):
sld = SegmentListDict()
sld['TEST'] = segments
ax.plot(sld)
def test_plot(self, ax, segments, flag):
dqd = DataQualityDict(a=flag)
ax.plot(segments)
ax.plot(flag)
ax.plot(dqd)
ax.plot(flag, segments, dqd)
def test_insetlabels(self, ax, segments):
ax.plot(segments)
ax.set_insetlabels(True)
def test_fmt_data(self, ax):
# just check that the LIGOTimeGPS repr is in place
value = 1234567890.123
assert ax.format_xdata(value) == str(to_gps(value))
# -- disable tests from upstream
def test_imshow(self):
return NotImplemented
def test_segmentrectangle():
patch = SegmentRectangle((1.1, 2.4), 10)
assert patch.get_xy(), (1.1, 9.6)
assert numpy.isclose(patch.get_height(), 0.8)
assert numpy.isclose(patch.get_width(), 1.3)
assert patch.get_facecolor() == COLOR0
# check kwarg passing
patch = SegmentRectangle((1.1, 2.4), 10, facecolor='red')
assert patch.get_facecolor() == COLOR_CONVERTER.to_rgba('red')
# check valign
patch = SegmentRectangle((1.1, 2.4), 10, valign='top')
assert patch.get_xy() == (1.1, 9.2)
patch = SegmentRectangle((1.1, 2.4), 10, valign='bottom')
assert patch.get_xy() == (1.1, 10.0)
with pytest.raises(ValueError):
patch = SegmentRectangle((0, 1), 0, valign='blah')
| areeda/gwpy | gwpy/plot/tests/test_segments.py | Python | gpl-3.0 | 5,755 | 0 |
# Copyright (c) 2008 Chris Moyer http://coredumped.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
class Qualifications:
def __init__(self, requirements=None):
if requirements == None:
requirements = []
self.requirements = requirements
def add(self, req):
self.requirements.append(req)
def get_as_params(self):
params = {}
assert(len(self.requirements) <= 10)
for n, req in enumerate(self.requirements):
reqparams = req.get_as_params()
for rp in reqparams:
params['QualificationRequirement.%s.%s' % ((n+1),rp) ] = reqparams[rp]
return params
class Requirement(object):
"""
Representation of a single requirement
"""
def __init__(self, qualification_type_id, comparator, integer_value=None, required_to_preview=False):
self.qualification_type_id = qualification_type_id
self.comparator = comparator
self.integer_value = integer_value
self.required_to_preview = required_to_preview
def get_as_params(self):
params = {
"QualificationTypeId": self.qualification_type_id,
"Comparator": self.comparator,
}
if self.comparator != 'Exists' and self.integer_value is not None:
params['IntegerValue'] = self.integer_value
if self.required_to_preview:
params['RequiredToPreview'] = "true"
return params
class PercentAssignmentsSubmittedRequirement(Requirement):
"""
The percentage of assignments the Worker has submitted, over all assignments the Worker has accepted. The value is an integer between 0 and 100.
"""
def __init__(self, comparator, integer_value, required_to_preview=False):
Requirement.__init__(self, qualification_type_id="00000000000000000000", comparator=comparator, integer_value=integer_value, required_to_preview=required_to_preview)
class PercentAssignmentsAbandonedRequirement(Requirement):
"""
The percentage of assignments the Worker has abandoned (allowed the deadline to elapse), over all assignments the Worker has accepted. The value is an integer between 0 and 100.
"""
def __init__(self, comparator, integer_value, required_to_preview=False):
Requirement.__init__(self, qualification_type_id="00000000000000000070", comparator=comparator, integer_value=integer_value, required_to_preview=required_to_preview)
class PercentAssignmentsReturnedRequirement(Requirement):
"""
The percentage of assignments the Worker has returned, over all assignments the Worker has accepted. The value is an integer between 0 and 100.
"""
def __init__(self, comparator, integer_value, required_to_preview=False):
Requirement.__init__(self, qualification_type_id="000000000000000000E0", comparator=comparator, integer_value=integer_value, required_to_preview=required_to_preview)
class PercentAssignmentsApprovedRequirement(Requirement):
"""
The percentage of assignments the Worker has submitted that were subsequently approved by the Requester, over all assignments the Worker has submitted. The value is an integer between 0 and 100.
"""
def __init__(self, comparator, integer_value, required_to_preview=False):
Requirement.__init__(self, qualification_type_id="000000000000000000L0", comparator=comparator, integer_value=integer_value, required_to_preview=required_to_preview)
class PercentAssignmentsRejectedRequirement(Requirement):
"""
The percentage of assignments the Worker has submitted that were subsequently rejected by the Requester, over all assignments the Worker has submitted. The value is an integer between 0 and 100.
"""
def __init__(self, comparator, integer_value, required_to_preview=False):
Requirement.__init__(self, qualification_type_id="000000000000000000S0", comparator=comparator, integer_value=integer_value, required_to_preview=required_to_preview)
class NumberHitsApprovedRequirement(Requirement):
"""
Specifies the total number of HITs submitted by a Worker that have been approved. The value is an integer greater than or equal to 0.
"""
def __init__(self, comparator, integer_value, required_to_preview=False):
Requirement.__init__(self, qualification_type_id="00000000000000000040", comparator=comparator, integer_value=integer_value, required_to_preview=required_to_preview)
class LocaleRequirement(Requirement):
"""
A Qualification requirement based on the Worker's location. The Worker's location is specified by the Worker to Mechanical Turk when the Worker creates his account.
"""
def __init__(self, comparator, locale, required_to_preview=False):
Requirement.__init__(self, qualification_type_id="00000000000000000071", comparator=comparator, integer_value=None, required_to_preview=required_to_preview)
self.locale = locale
def get_as_params(self):
params = {
"QualificationTypeId": self.qualification_type_id,
"Comparator": self.comparator,
'LocaleValue.Country': self.locale,
}
if self.required_to_preview:
params['RequiredToPreview'] = "true"
return params
class AdultRequirement(Requirement):
"""
Requires workers to acknowledge that they are over 18 and that they agree to work on potentially offensive content. The value type is boolean, 1 (required), 0 (not required, the default).
"""
def __init__(self, comparator, integer_value, required_to_preview=False):
Requirement.__init__(self, qualification_type_id="00000000000000000060", comparator=comparator, integer_value=integer_value, required_to_preview=required_to_preview)
| kumar303/rockit | vendor-local/boto/mturk/qualification.py | Python | bsd-3-clause | 6,761 | 0.005177 |
# Determine whether an integer is a palindrome. Do this without extra space.
class Solution:
# @return a boolean
def isPalindrome1(self, x):
if x < 0 or x % 10 == 0 and x:
return False
xhalf = 0
while x > xhalf:
xhalf = xhalf * 10 + x % 10
x /= 10
return (x == xhalf or x == xhalf/10
)
def isPalindrome(self, x):
if x < 0:
return False
size, xreverse = x, 0
while size:
xreverse = xreverse * 10 + size % 10
size = (size - (size % 10)) / 10
return True if xreverse==x else False
if __name__ == '__main__':
s = Solution()
print s.isPalindrome1(0) | ammzen/SolveLeetCode | 9PalindromeNumber.py | Python | mit | 727 | 0.008253 |
'''
Created on Aug 27, 2013
@author: Devon
Define gui events
'''
from pyHopeEngine import BaseEvent
class Event_ButtonPressed(BaseEvent):
'''Sent when a button is pressed'''
eventType = "ButtonPressed"
def __init__(self, value):
'''Contains a value identifying the button'''
self.value = value
class Event_ScreenResize(BaseEvent):
'''Sent when a screen resize is requestsed'''
eventType = "ScreenResize"
def __init__(self, width, height):
self.width = width
self.height = height | Trinak/PyHopeEngine | src/pyHopeEngine/event/guiEvents.py | Python | gpl-3.0 | 550 | 0.007273 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import mysql.connector
import time
import datetime
conn = mysql.connector.connect(host="localhost",user="spike",password="valentine", database="drupal")
cann = mysql.connector.connect(host="localhost",user="spike",password="valentine", database="content_delivery_weather")
cursor = conn.cursor()
cursar = cann.cursor()
cursor.execute("""SELECT uid, mail FROM users""")
rows = cursor.fetchall()
for row in rows:
if row[0] != 0:
print('{0} : {1} '.format(row[0], row[1]))
#print('UPDATE new_v4_users_probes_edit SET email = {0} WHERE uid = {1}'.format(row[1], row[0]))
cursar.execute("""UPDATE new_v4_users_probes_edit SET email = %s WHERE userid = %s""",(row[1], row[0]))
cursar.execute("""SELECT probename, probeid FROM new_v4_sonde""")
rows = cursar.fetchall()
for row in rows:
cursar.execute("""SHOW TABLES LIKE %s""",("%" + row[0] + "%",))
rowsbis = cursar.fetchall()
for rowbis in rowsbis:
result = rowbis[0].split("_")
month = 1 + int(result[4])
s = "01/" + str(month) + "/" + result[3]
timestamp = time.mktime(datetime.datetime.strptime(s, "%d/%m/%Y").timetuple())
print('{0} : {1} year: {2} month: {3} timestamp: {4}'.format(row[0], rowbis[0], result[3], result[4], round(timestamp,0)))
cursar.execute("""SELECT firsttime FROM new_v4_sonde WHERE probeid = %s""",(row[1],))
rowsbisbis = cursar.fetchall()
for rowbisbis in rowsbisbis:
if rowbisbis[0] == None:
cursar.execute("""UPDATE new_v4_sonde SET firsttime = %s WHERE probeid = %s""",(timestamp,row[1]))
print('firsttime: {0}'.format(rowbisbis[0],))
conn.close()
cann.close()
| gandalf-the-white/foundation | amaryl/scripts/initdatabase.py | Python | mit | 1,705 | 0.021114 |
# I made some modifications to termcolor so you can pass HEX colors to
# the colored function. It then chooses the nearest xterm 256 color to
# that HEX color. This requires some color functions that I have added
# in my python path.
#
# 2015/02/16
#
#
# coding: utf-8
# Copyright (c) 2008-2011 Volvox Development Team
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# Author: Konstantin Lepa <[email protected]>
"""ANSII Color formatting for output in terminal."""
from __future__ import print_function
import os
import re
from hexrgb_conversion import rgb
from x256 import from_rgb
__ALL__ = ["colored", "cprint"]
VERSION = (1, 1, 0)
ATTRIBUTES = dict(
list(
zip(
["bold", "dark", "", "underline", "blink", "", "reverse", "concealed"],
list(range(1, 9)),
)
)
)
del ATTRIBUTES[""]
HIGHLIGHTS = dict(
list(
zip(
[
"on_grey",
"on_red",
"on_green",
"on_yellow",
"on_blue",
"on_magenta",
"on_cyan",
"on_white",
],
list(range(40, 48)),
)
)
)
COLORS = dict(
list(
zip(
["grey", "red", "green", "yellow", "blue", "magenta", "cyan", "white"],
list(range(30, 38)),
)
)
)
RESET = "\033[0m"
def colored(text, color=None, on_color=None, attrs=None):
"""Colorize text.
I made some modification so you can pass HEX colors too
Available text colors:
red, green, yellow, blue, magenta, cyan, white.
Available text highlights:
on_red, on_green, on_yellow, on_blue, on_magenta, on_cyan, on_white.
Available attributes:
bold, dark, underline, blink, reverse, concealed.
Example:
colored('Hello, World!', 'red', 'on_grey', ['blue', 'blink'])
colored('Hello, World!', 'green')
"""
if os.getenv("ANSI_COLORS_DISABLED") is None:
fmt_str = "\033[%dm%s"
if color is not None:
if "#" in color:
color = re.sub("[#]", "", color)
RGB = rgb(color)
x256_color_index = from_rgb(RGB[0], RGB[1], RGB[2])
text = "\033[38;5;%dm%s" % (x256_color_index, text)
else:
text = fmt_str % (COLORS[color], text)
if on_color is not None:
if "#" in on_color:
on_color = re.sub("[#]", "", on_color)
RGB = rgb(on_color)
x256_color_index = from_rgb(RGB[0], RGB[1], RGB[2])
text = "\033[48;5;%dm%s" % (x256_color_index, text)
else:
text = fmt_str % (HIGHLIGHTS[on_color], text)
if attrs is not None:
for attr in attrs:
text = fmt_str % (ATTRIBUTES[attr], text)
text += RESET
return text
def cprint(text, color=None, on_color=None, attrs=None, **kwargs):
"""Print colorize text.
It accepts arguments of print function.
"""
print((colored(text, color, on_color, attrs)), **kwargs)
if __name__ == "__main__":
print("Current terminal type: %s" % os.getenv("TERM"))
print("Test basic colors:")
cprint("Grey color", "grey")
cprint("Red color", "red")
cprint("Green color", "green")
cprint("Yellow color", "yellow")
cprint("Blue color", "blue")
cprint("Magenta color", "magenta")
cprint("Cyan color", "cyan")
cprint("White color", "white")
print(("-" * 78))
print("Test highlights:")
cprint("On grey color", on_color="on_grey")
cprint("On red color", on_color="on_red")
cprint("On green color", on_color="on_green")
cprint("On yellow color", on_color="on_yellow")
cprint("On blue color", on_color="on_blue")
cprint("On magenta color", on_color="on_magenta")
cprint("On cyan color", on_color="on_cyan")
cprint("On white color", color="grey", on_color="on_white")
print("-" * 78)
print("Test attributes:")
cprint("Bold grey color", "grey", attrs=["bold"])
cprint("Dark red color", "red", attrs=["dark"])
cprint("Underline green color", "green", attrs=["underline"])
cprint("Blink yellow color", "yellow", attrs=["blink"])
cprint("Reversed blue color", "blue", attrs=["reverse"])
cprint("Concealed Magenta color", "magenta", attrs=["concealed"])
cprint(
"Bold underline reverse cyan color",
"cyan",
attrs=["bold", "underline", "reverse"],
)
cprint(
"Dark blink concealed white color",
"white",
attrs=["dark", "blink", "concealed"],
)
print(("-" * 78))
print("Test mixing:")
cprint("Underline red on grey color", "red", "on_grey", ["underline"])
cprint("Reversed green on red color", "green", "on_red", ["reverse"])
print("Using HEX colors:")
cprint("Use HEX color EE2E2F", "#EE2E2F")
| marchdf/dotfiles | mypython/mypython/mytermcolor.py | Python | mit | 5,928 | 0.000337 |
#! /usr/bin/python
'''
Suppose a sorted array is rotated at some pivot unknown to you beforehand. (i.e., 0 1 2 4 5 6 7 might become 4 5 6 7 0 1 2).
Find the minimum element.
'''
class Solution:
# @param num, a list of integer
# @return an integer
# You may assume no duplicate exists in the array.
def findMinNoDuplicate(self, num):
INT_MIN_VALUE = -(2**32)
size = len(num)
if size == 0:
return INT_MIN_VALUE
elif size == 1:
return num[0]
low_index = 0
high_index = size - 1
while (low_index < high_index - 1):
mid_index = low_index + (high_index - low_index) / 2
if (num[mid_index] > num[high_index]):
low_index = mid_index
else:
high_index = mid_index
return min(num[low_index], num[high_index])
# @param num, a list of integer
# @return an integer
# You may assume duplicate exists in the array.
def findMinDuplicate(self, num):
INT_MIN_VALUE = -(2**32)
size = len(num)
if size == 0:
return INT_MIN_VALUE
elif size == 1:
return num[0]
low_index = 0
high_index = size - 1
while (low_index < high_index - 1):
mid_index = low_index + (high_index - low_index) / 2
if (num[mid_index] > num[high_index]):
low_index = mid_index
elif (num[mid_index] < num[high_index]):
high_index = mid_index
else:
high_index -= 1
return min(num[low_index], num[high_index])
if __name__ == '__main__':
solution = Solution()
print solution.findMinDuplicate([3,3,1,2,2])
| shub0/algorithm-data-structure | python/find_minimum.py | Python | bsd-3-clause | 1,730 | 0.004046 |
import pytest
import re
import capybara
class TestHasSelector:
@pytest.fixture(autouse=True)
def setup_session(self, session):
session.visit("/with_html")
def test_is_true_if_the_given_selector_is_on_the_page(self, session):
assert session.has_selector("xpath", "//p")
assert session.has_selector("css", "p a#foo")
assert session.has_selector("//p[contains(.,'est')]")
def test_is_false_if_the_given_selector_is_not_on_the_page(self, session):
assert not session.has_selector("xpath", "//abbr")
assert not session.has_selector("css", "p a#doesnotexist")
assert not session.has_selector("//p[contains(.,'thisstringisnotonpage')]")
def test_uses_default_selector(self, session):
capybara.default_selector = "css"
assert not session.has_selector("p a#doesnotexist")
assert session.has_selector("p a#foo")
def test_respects_scopes(self, session):
with session.scope("//p[@id='first']"):
assert session.has_selector(".//a[@id='foo']")
assert not session.has_selector(".//a[@id='red']")
def test_is_true_if_the_content_is_on_the_page_the_given_number_of_times(self, session):
assert session.has_selector("//p", count=3)
assert session.has_selector("//p//a[@id='foo']", count=1)
assert session.has_selector("//p[contains(.,'est')]", count=1)
def test_is_false_if_the_content_is_not_on_the_page_the_given_number_of_times(self, session):
assert not session.has_selector("//p", count=6)
assert not session.has_selector("//p//a[@id='foo']", count=2)
assert not session.has_selector("//p[contains(.,'est')]", count=5)
def test_is_false_if_the_content_is_not_on_the_page_at_all(self, session):
assert not session.has_selector("//abbr", count=2)
assert not session.has_selector("//p//a[@id='doesnotexist']", count=1)
def test_discards_all_matches_where_the_given_string_is_not_contained(self, session):
assert session.has_selector("//p//a", text="Redirect", count=1)
assert not session.has_selector("//p", text="Doesnotexist")
def test_respects_visibility_setting(self, session):
assert session.has_selector("id", "hidden-text", text="Some of this text is hidden!", visible=False)
assert not session.has_selector("id", "hidden-text", text="Some of this text is hidden!", visible=True)
capybara.ignore_hidden_elements = False
assert session.has_selector("id", "hidden-text", text="Some of this text is hidden!", visible=False)
capybara.visible_text_only = True
assert not session.has_selector("id", "hidden-text", text="Some of this text is hidden!", visible=True)
def test_discards_all_matches_where_the_given_regex_is_not_matched(self, session):
assert session.has_selector("//p//a", text=re.compile("re[dab]i", re.IGNORECASE), count=1)
assert not session.has_selector("//p//a", text=re.compile("Red$"))
def test_only_matches_elements_that_match_exact_text_exactly(self, session):
assert session.has_selector("id", "h2one", exact_text="Header Class Test One")
assert not session.has_selector("id", "h2one", exact_text="Header Class Test")
def test_only_matches_elements_that_match_exactly_when_exact_text_true(self, session):
assert session.has_selector("id", "h2one", text="Header Class Test One", exact_text=True)
assert not session.has_selector("id", "h2one", text="Header Class Test", exact_text=True)
def test_matches_substrings_when_exact_text_false(self, session):
assert session.has_selector("id", "h2one", text="Header Class Test One", exact_text=False)
assert session.has_selector("id", "h2one", text="Header Class Test", exact_text=False)
class TestHasNoSelector:
@pytest.fixture(autouse=True)
def setup_session(self, session):
session.visit("/with_html")
def test_is_false_if_the_given_selector_is_on_the_page(self, session):
assert not session.has_no_selector("xpath", "//p")
assert not session.has_no_selector("css", "p a#foo")
assert not session.has_no_selector("//p[contains(.,'est')]")
def test_is_true_if_the_given_selector_is_not_on_the_page(self, session):
assert session.has_no_selector("xpath", "//abbr")
assert session.has_no_selector("css", "p a#doesnotexist")
assert session.has_no_selector("//p[contains(.,'thisstringisnotonpage')]")
def test_uses_default_selector(self, session):
capybara.default_selector = "css"
assert session.has_no_selector("p a#doesnotexist")
assert not session.has_no_selector("p a#foo")
def test_respects_scopes(self, session):
with session.scope("//p[@id='first']"):
assert not session.has_no_selector(".//a[@id='foo']")
assert session.has_no_selector("../a[@id='red']")
def test_is_false_if_the_content_is_on_the_page_the_given_number_of_times(self, session):
assert not session.has_no_selector("//p", count=3)
assert not session.has_no_selector("//p//a[@id='foo']", count=1)
assert not session.has_no_selector("//p[contains(.,'est')]", count=1)
def test_is_true_if_the_content_is_on_the_page_the_wrong_number_of_times(self, session):
assert session.has_no_selector("//p", count=6)
assert session.has_no_selector("//p//a[@id='foo']", count=2)
assert session.has_no_selector("//p[contains(.,'est')]", count=5)
def test_is_true_if_the_content_is_not_on_the_page_at_all(self, session):
assert session.has_no_selector("//abbr", count=2)
assert session.has_no_selector("//p//a[@id='doesnotexist']", count=1)
def test_discards_all_matches_where_the_given_string_is_contained(self, session):
assert not session.has_no_selector("//p//a", text="Redirect", count=1)
assert session.has_no_selector("//p", text="Doesnotexist")
def test_discards_all_matches_where_the_given_regex_is_matched(self, session):
assert not session.has_no_selector("//p//a", text=re.compile(r"re[dab]i", re.IGNORECASE), count=1)
assert session.has_no_selector("//p//a", text=re.compile(r"Red$"))
def test_only_matches_elements_that_do_not_match_exact_text_exactly(self, session):
assert not session.has_no_selector("id", "h2one", exact_text="Header Class Test One")
assert session.has_no_selector("id", "h2one", exact_text="Header Class Test")
def test_only_matches_elements_that_do_not_match_exactly_when_exact_text_true(self, session):
assert not session.has_no_selector("id", "h2one", text="Header Class Test One",
exact_text=True)
assert session.has_no_selector("id", "h2one", text="Header Class Test", exact_text=True)
def test_does_not_match_substrings_when_exact_text_false(self, session):
assert not session.has_no_selector("id", "h2one", text="Header Class Test One",
exact_text=False)
assert not session.has_no_selector("id", "h2one", text="Header Class Test", exact_text=False)
| elliterate/capybara.py | capybara/tests/session/test_has_selector.py | Python | mit | 7,185 | 0.004454 |
#
# Copyright (C) 2017 Smithsonian Astrophysical Observatory
#
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
This is based on sherpa/sim/tests_sim_unit.py.
"""
from sherpa.astro import sim
# This is part of #397
#
def test_list_samplers():
"""Ensure list_samplers returns a list."""
mcmc = sim.MCMC()
samplers = mcmc.list_samplers()
assert isinstance(samplers, list)
assert len(samplers) > 0
def test_list_samplers_contents():
"""Are the expected values included"""
# Test that the expected values exist in this list,
# but do not enforce these are the only values. This is
# a slightly-different return list to the non-astro version.
#
samplers = sim.MCMC().list_samplers()
for expected in ['mh', 'metropolismh', 'pragbayes', 'fullbayes']:
assert expected in samplers
| anetasie/sherpa | sherpa/astro/sim/tests/test_astro_sim_unit.py | Python | gpl-3.0 | 1,513 | 0 |
from impl import FixedClientDeauthAttack,\
SniffedClientDeauthAttack,\
GlobalDisassociationAttack
class WiFiDeauthAttackBuilder(object):
'''This object finds the appropriate attack for the options supplied by the
user.'''
@classmethod
def build_from(cls, options):
subclasses = WiFiDeauthAttackWrapper.__subclasses__()
candidates = filter(lambda subclass: subclass.handles(options),
subclasses)
return candidates[0](options)
class WiFiDeauthAttackWrapper(object):
@classmethod
def handles(cls, options):
raise NotImplementedError
def __init__(self, options):
self.options = options
def _get_attack_implementor(self):
raise NotImplementedError
def run(self):
attack = self._get_attack_implementor()
executions = self.options.executions
persistence_times = self.options.persistence_times
return attack.run(executions, persistence_times)
class FixedClientDeauthAttackWrapper(WiFiDeauthAttackWrapper):
@classmethod
def handles(cls, options):
return len(options.client) > 0
def _get_attack_implementor(self):
interface = self.options.interface
bssid = self.options.bssid
client = self.options.client
return FixedClientDeauthAttack(interface, bssid, [client])
class GlobalDisassociationAttackWrapper(WiFiDeauthAttackWrapper):
@classmethod
def handles(cls, options):
return len(options.client) == 0 and not options.should_sniff
def _get_attack_implementor(self):
interface = self.options.interface
bssid = self.options.bssid
return GlobalDisassociationAttack(interface, bssid)
class SniffedClientDeauthAttackWrapper(WiFiDeauthAttackWrapper):
@classmethod
def handles(cls, options):
return len(options.client) == 0 and options.should_sniff
def _get_attack_implementor(self):
interface = self.options.interface
bssid = self.options.bssid
timeout = self.options.timeout
return SniffedClientDeauthAttack(interface, bssid, timeout) | lukius/wifi-deauth | attack/builder.py | Python | mit | 2,286 | 0.010499 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPT2Config, GPT2Tokenizer, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, slow
from .test_generation_flax_utils import FlaxGenerationTesterMixin
from .test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gpt2.modeling_flax_gpt2 import FlaxGPT2LMHeadModel, FlaxGPT2Model
if is_torch_available():
import torch
class FlaxGPT2ModelTester:
def __init__(
self,
parent,
batch_size=14,
seq_length=7,
is_training=True,
use_input_mask=True,
use_token_type_ids=False,
use_labels=True,
vocab_size=99,
hidden_size=32,
num_hidden_layers=5,
num_attention_heads=4,
intermediate_size=37,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
initializer_range=0.02,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_token_type_ids = use_token_type_ids
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.initializer_range = initializer_range
self.scope = None
self.bos_token_id = vocab_size - 1
self.eos_token_id = vocab_size - 1
self.pad_token_id = vocab_size - 1
def prepare_config_and_inputs(self, gradient_checkpointing=False):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
config = GPT2Config(
vocab_size=self.vocab_size,
n_embd=self.hidden_size,
n_layer=self.num_hidden_layers,
n_head=self.num_attention_heads,
n_positions=self.max_position_embeddings,
n_ctx=self.max_position_embeddings,
use_cache=False,
bos_token_id=self.bos_token_id,
eos_token_id=self.eos_token_id,
pad_token_id=self.pad_token_id,
gradient_checkpointing=gradient_checkpointing,
)
return (config, input_ids, input_mask)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, input_ids, attention_mask = config_and_inputs
inputs_dict = {"input_ids": input_ids, "attention_mask": attention_mask}
return config, inputs_dict
def check_use_cache_forward(self, model_class_name, config, input_ids, attention_mask):
max_decoder_length = 20
model = model_class_name(config)
past_key_values = model.init_cache(input_ids.shape[0], max_decoder_length)
attention_mask = jnp.ones((input_ids.shape[0], max_decoder_length), dtype="i4")
position_ids = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1)[None, :], (input_ids.shape[0], input_ids.shape[-1] - 1)
)
outputs_cache = model(
input_ids[:, :-1],
attention_mask=attention_mask,
past_key_values=past_key_values,
position_ids=position_ids,
)
position_ids = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]], dtype="i4")
outputs_cache_next = model(
input_ids[:, -1:],
attention_mask=attention_mask,
past_key_values=outputs_cache.past_key_values,
position_ids=position_ids,
)
outputs = model(input_ids)
diff = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1e-3, msg=f"Max diff is {diff}")
def check_use_cache_forward_with_attn_mask(self, model_class_name, config, input_ids, attention_mask):
max_decoder_length = 20
model = model_class_name(config)
attention_mask_cache = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]))],
axis=-1,
)
past_key_values = model.init_cache(input_ids.shape[0], max_decoder_length)
position_ids = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1)[None, :], (input_ids.shape[0], input_ids.shape[-1] - 1)
)
outputs_cache = model(
input_ids[:, :-1],
attention_mask=attention_mask_cache,
past_key_values=past_key_values,
position_ids=position_ids,
)
position_ids = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]], dtype="i4")
outputs_cache_next = model(
input_ids[:, -1:],
past_key_values=outputs_cache.past_key_values,
attention_mask=attention_mask_cache,
position_ids=position_ids,
)
outputs = model(input_ids, attention_mask=attention_mask)
diff = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1e-3, msg=f"Max diff is {diff}")
@require_flax
class FlaxGPT2ModelTest(FlaxModelTesterMixin, FlaxGenerationTesterMixin, unittest.TestCase):
all_model_classes = (FlaxGPT2Model, FlaxGPT2LMHeadModel) if is_flax_available() else ()
all_generative_model_classes = (FlaxGPT2LMHeadModel,) if is_flax_available() else ()
def setUp(self):
self.model_tester = FlaxGPT2ModelTester(self)
def test_use_cache_forward(self):
for model_class_name in self.all_model_classes:
config, input_ids, attention_mask = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(model_class_name, config, input_ids, attention_mask)
def test_use_cache_forward_with_attn_mask(self):
for model_class_name in self.all_model_classes:
config, input_ids, attention_mask = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
model_class_name, config, input_ids, attention_mask
)
@slow
def test_batch_generation(self):
tokenizer = GPT2Tokenizer.from_pretrained("gpt2", pad_token="</s>", padding_side="left")
inputs = tokenizer(["Hello this is a long string", "Hey"], return_tensors="jax", padding=True, truncation=True)
model = FlaxGPT2LMHeadModel.from_pretrained("gpt2")
model.do_sample = False
model.config.pad_token_id = model.config.eos_token_id
jit_generate = jax.jit(model.generate)
output_sequences = jit_generate(inputs["input_ids"], attention_mask=inputs["attention_mask"]).sequences
output_string = tokenizer.batch_decode(output_sequences, skip_special_tokens=True)
expected_string = [
"Hello this is a long string of words. I'm going to try to explain what I mean.",
"Hey, I'm not sure if I'm going to be able to do",
]
self.assertListEqual(output_string, expected_string)
# overwrite from common since `attention_mask` in combination
# with `causal_mask` behaves slighly differently
@is_pt_flax_cross_test
def test_equivalence_pt_to_flax(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
# prepare inputs
prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class)
pt_inputs = {k: torch.tensor(v.tolist()) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
pt_model_class_name = model_class.__name__[4:] # Skip the "Flax" at the beginning
pt_model_class = getattr(transformers, pt_model_class_name)
batch_size, seq_length = pt_inputs["input_ids"].shape
rnd_start_indices = np.random.randint(0, seq_length - 1, size=(batch_size,))
for batch_idx, start_index in enumerate(rnd_start_indices):
pt_inputs["attention_mask"][batch_idx, :start_index] = 0
pt_inputs["attention_mask"][batch_idx, start_index:] = 1
prepared_inputs_dict["attention_mask"][batch_idx, :start_index] = 0
prepared_inputs_dict["attention_mask"][batch_idx, start_index:] = 1
pt_model = pt_model_class(config).eval()
fx_model = model_class(config, dtype=jnp.float32)
fx_state = convert_pytorch_state_dict_to_flax(pt_model.state_dict(), fx_model)
fx_model.params = fx_state
with torch.no_grad():
pt_outputs = pt_model(**pt_inputs).to_tuple()
fx_outputs = fx_model(**prepared_inputs_dict).to_tuple()
self.assertEqual(len(fx_outputs), len(pt_outputs), "Output lengths differ between Flax and PyTorch")
for fx_output, pt_output in zip(fx_outputs, pt_outputs):
self.assert_almost_equals(fx_output[:, -1], pt_output[:, -1].numpy(), 4e-2)
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(tmpdirname)
fx_model_loaded = model_class.from_pretrained(tmpdirname, from_pt=True)
fx_outputs_loaded = fx_model_loaded(**prepared_inputs_dict).to_tuple()
self.assertEqual(
len(fx_outputs_loaded), len(pt_outputs), "Output lengths differ between Flax and PyTorch"
)
for fx_output_loaded, pt_output in zip(fx_outputs_loaded, pt_outputs):
self.assert_almost_equals(fx_output_loaded[:, -1], pt_output[:, -1].numpy(), 4e-2)
# overwrite from common since `attention_mask` in combination
# with `causal_mask` behaves slighly differently
@is_pt_flax_cross_test
def test_equivalence_flax_to_pt(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
# prepare inputs
prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class)
pt_inputs = {k: torch.tensor(v.tolist()) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
pt_model_class_name = model_class.__name__[4:] # Skip the "Flax" at the beginning
pt_model_class = getattr(transformers, pt_model_class_name)
pt_model = pt_model_class(config).eval()
fx_model = model_class(config, dtype=jnp.float32)
pt_model = load_flax_weights_in_pytorch_model(pt_model, fx_model.params)
batch_size, seq_length = pt_inputs["input_ids"].shape
rnd_start_indices = np.random.randint(0, seq_length - 1, size=(batch_size,))
for batch_idx, start_index in enumerate(rnd_start_indices):
pt_inputs["attention_mask"][batch_idx, :start_index] = 0
pt_inputs["attention_mask"][batch_idx, start_index:] = 1
prepared_inputs_dict["attention_mask"][batch_idx, :start_index] = 0
prepared_inputs_dict["attention_mask"][batch_idx, start_index:] = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
pt_outputs = pt_model(**pt_inputs).to_tuple()
fx_outputs = fx_model(**prepared_inputs_dict).to_tuple()
self.assertEqual(len(fx_outputs), len(pt_outputs), "Output lengths differ between Flax and PyTorch")
for fx_output, pt_output in zip(fx_outputs, pt_outputs):
self.assert_almost_equals(fx_output[:, -1], pt_output[:, -1].numpy(), 4e-2)
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(tmpdirname)
pt_model_loaded = pt_model_class.from_pretrained(tmpdirname, from_flax=True)
with torch.no_grad():
pt_outputs_loaded = pt_model_loaded(**pt_inputs).to_tuple()
self.assertEqual(
len(fx_outputs), len(pt_outputs_loaded), "Output lengths differ between Flax and PyTorch"
)
for fx_output, pt_output in zip(fx_outputs, pt_outputs_loaded):
self.assert_almost_equals(fx_output[:, -1], pt_output[:, -1].numpy(), 4e-2)
@slow
def test_model_from_pretrained(self):
for model_class_name in self.all_model_classes:
model = model_class_name.from_pretrained("gpt2", from_pt=True)
outputs = model(np.ones((1, 1)))
self.assertIsNotNone(outputs)
| huggingface/pytorch-transformers | tests/test_modeling_flax_gpt2.py | Python | apache-2.0 | 14,464 | 0.00401 |
from xyz.location import build_location
def test_build_location_simple():
# test
Location = build_location()
location = Location("Canada", "Charlottetown")
assert location.country == "Canada"
assert location.city == "Charlottetown"
| codetojoy/gists | python/pipenv_jun_2020/tests/test_location.py | Python | apache-2.0 | 257 | 0.007782 |
# -*- coding: utf-8 -*-
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
"""SHA-224 cryptographic hash algorithm.
SHA-224 belongs to the SHA-2_ family of cryptographic hashes.
It produces the 224 bit digest of a message.
>>> from Cryptodome.Hash import SHA224
>>>
>>> h = SHA224.new()
>>> h.update(b'Hello')
>>> print h.hexdigest()
*SHA* stands for Secure Hash Algorithm.
.. _SHA-2: http://csrc.nist.gov/publications/fips/fips180-2/fips180-4.pdf
"""
from Cryptodome.Util.py3compat import *
from Cryptodome.Util._raw_api import (load_pycryptodome_raw_lib,
VoidPointer, SmartPointer,
create_string_buffer,
get_raw_buffer, c_size_t,
expect_byte_string)
_raw_sha224_lib = load_pycryptodome_raw_lib("Cryptodome.Hash._SHA224",
"""
int SHA224_init(void **shaState);
int SHA224_destroy(void *shaState);
int SHA224_update(void *hs,
const uint8_t *buf,
size_t len);
int SHA224_digest(const void *shaState,
uint8_t digest[16]);
int SHA224_copy(const void *src, void *dst);
""")
class SHA224Hash(object):
"""Class that implements a SHA-224 hash
"""
#: The size of the resulting hash in bytes.
digest_size = 28
#: The internal block size of the hash algorithm in bytes.
block_size = 64
#: ASN.1 Object ID
oid = '2.16.840.1.101.3.4.2.4'
def __init__(self, data=None):
state = VoidPointer()
result = _raw_sha224_lib.SHA224_init(state.address_of())
if result:
raise ValueError("Error %d while instantiating SHA224"
% result)
self._state = SmartPointer(state.get(),
_raw_sha224_lib.SHA224_destroy)
if data:
self.update(data)
def update(self, data):
"""Continue hashing of a message by consuming the next chunk of data.
Repeated calls are equivalent to a single call with the concatenation
of all the arguments. In other words:
>>> m.update(a); m.update(b)
is equivalent to:
>>> m.update(a+b)
:Parameters:
data : byte string
The next chunk of the message being hashed.
"""
expect_byte_string(data)
result = _raw_sha224_lib.SHA224_update(self._state.get(),
data,
c_size_t(len(data)))
if result:
raise ValueError("Error %d while instantiating SHA224"
% result)
def digest(self):
"""Return the **binary** (non-printable) digest of the message that has been hashed so far.
This method does not change the state of the hash object.
You can continue updating the object after calling this function.
:Return: A byte string of `digest_size` bytes. It may contain non-ASCII
characters, including null bytes.
"""
bfr = create_string_buffer(self.digest_size)
result = _raw_sha224_lib.SHA224_digest(self._state.get(),
bfr)
if result:
raise ValueError("Error %d while instantiating SHA224"
% result)
return get_raw_buffer(bfr)
def hexdigest(self):
"""Return the **printable** digest of the message that has been hashed so far.
This method does not change the state of the hash object.
:Return: A string of 2* `digest_size` characters. It contains only
hexadecimal ASCII digits.
"""
return "".join(["%02x" % bord(x) for x in self.digest()])
def copy(self):
"""Return a copy ("clone") of the hash object.
The copy will have the same internal state as the original hash
object.
This can be used to efficiently compute the digests of strings that
share a common initial substring.
:Return: A hash object of the same type
"""
clone = SHA224Hash()
result = _raw_sha224_lib.SHA224_copy(self._state.get(),
clone._state.get())
if result:
raise ValueError("Error %d while copying SHA224" % result)
return clone
def new(self, data=None):
return SHA224Hash(data)
def new(data=None):
"""Return a fresh instance of the hash object.
:Parameters:
data : byte string
The very first chunk of the message to hash.
It is equivalent to an early call to `SHA224Hash.update()`.
Optional.
:Return: A `SHA224Hash` object
"""
return SHA224Hash().new(data)
#: The size of the resulting hash in bytes.
digest_size = SHA224Hash.digest_size
#: The internal block size of the hash algorithm in bytes.
block_size = SHA224Hash.block_size
| Haynie-Research-and-Development/jarvis | deps/lib/python3.4/site-packages/Cryptodome/Hash/SHA224.py | Python | gpl-2.0 | 6,132 | 0.001794 |
"""
Braces for type definition(class / struct / union / enum) should be located in the seperate line.
== Violation ==
class K() { <== ERROR
}
struct K { <== ERROR
}
== Good ==
struct A()
{ <== CORRECT
}
class K()
{ <== CORRECT
public :
void Hello() { <== Don't care. It's a function definition.
}
}
"""
from nsiqunittest.nsiqcppstyle_unittestbase import *
from nsiqcppstyle_rulehelper import *
from nsiqcppstyle_reporter import *
from nsiqcppstyle_rulemanager import *
def RunRule(lexer, currentType, fullName, decl, contextStack, typeContext):
if not decl and currentType != "NAMESPACE" and typeContext is not None:
t = lexer.GetNextTokenInType("LBRACE", False, True)
if t is not None:
t2 = typeContext.endToken
if t2 is not None and t.lineno != t2.lineno:
prevToken = lexer.GetPrevTokenSkipWhiteSpaceAndCommentAndPreprocess()
# print contextStack.Peek()
if prevToken is not None and prevToken.lineno == t.lineno:
nsiqcppstyle_reporter.Error(
t, __name__, "The brace for type definition should be located in start of line")
if t2.lineno != t.lineno and GetRealColumn(
t2) != GetRealColumn(t):
nsiqcppstyle_reporter.Error(
t2, __name__, "The brace for type definition should be located in same column")
ruleManager.AddTypeNameRule(RunRule)
##########################################################################
# Unit Test
##########################################################################
class testRule(nct):
def setUpRule(self):
ruleManager.AddTypeNameRule(RunRule)
def test1(self):
self.Analyze("thisfile.c", """
public class A {
}
""")
self.ExpectError(__name__)
def test2(self):
self.Analyze("thisfile.c", """
class C : public AA {
}
""")
self.ExpectError(__name__)
def test3(self):
self.Analyze("thisfile.c", """
class K
{
void function() const {
}
class T
{
}
}
""")
self.ExpectSuccess(__name__)
def test4(self):
self.Analyze("thisfile.c", """
class K
{
void function() const {
}
class T {
}
}
""")
self.ExpectError(__name__)
def test5(self):
self.Analyze("thisfile.c", """
class C : public AA
{
class T {
}
}
""")
self.ExpectError(__name__)
def test6(self):
self.Analyze("thisfile.c", """
class C : public AA
{
class T
{
}
}
""")
self.ExpectError(__name__)
def test7(self):
self.Analyze("thisfile.c", """
class C : public AA
{
class T
{ }
}
""")
self.ExpectSuccess(__name__)
def test8(self):
self.Analyze("thisfile.c", """
namespace C {
}
""")
self.ExpectSuccess(__name__)
def test9(self):
self.Analyze("thisfile.c", """
if (hello) {
// {kr} m_btn5 {/kr}
}
""")
self.ExpectSuccess(__name__)
| kunaltyagi/nsiqcppstyle | rules/RULE_4_5_A_braces_for_type_definition_should_be_located_in_seperate_line.py | Python | gpl-2.0 | 3,220 | 0.001242 |
#########################################################################
# Uniprot XML parser to parse phosphorylation info of proteins
#
# eg 29/07/2009
#########################################################################
#from xml.etree.ElementTree import ElementTree
from xml.etree.ElementTree import iterparse
import TsvReader
def main():
file_name = "../data/disease/uniprot/humdisease.txt"
mim_to_mesh_values = get_mim_to_mesh(file_name)
print len(mim_to_mesh)
print mim_to_mesh["600807"]
return
from time import clock
parser = UniprotXMLParser("../data/Q12888.xml")
#parser = UniprotXMLParser("../../data/phosphorylation/uniprot/uniprot-phosphorylation-large-scale-analysis.xml")
#ids = parser.parse_ids()
#print map(len, ids)
#print ids[-1]
t1 = clock()
elements = parser.parse()
t2 = clock()
print len(elements), elements[-1]
print t2-t1
return
def get_uniprot_to_geneid(file_name, uniprot_ids=None, only_min=True, key_function=int):
"""
To parse HUMAN_9606_idmapping.dat file (trimmed to two columns) from Uniprot
only_min: Chooses the "min" defined by key_function used in min()
key_function: int (geneids) | len (gene symbols)
Creating the file
wget ftp://ftp.uniprot.org/pub/databases/uniprot/current_release/knowledgebase/idmapping/by_organism/HUMAN_9606_idmapping.dat.gz
zgrep Gene_Name HUMAN_9606_idmapping.dat.gz | cut -f 1,3 > uniprot_to_symbol.txt
zgrep GeneID HUMAN_9606_idmapping.dat.gz | cut -f 1,3 > idmapping.tab
OR zcat HUMAN_9606_idmapping_selected.dat.gz | cut -f 1,3 > idmapping.tab
"""
uniprot_to_geneids = {}
#geneid_to_uniprots = {}
f = open(file_name)
f.readline()
for line in f:
uniprot, geneid = line.split("\t")
geneid = geneid.strip()
uniprot = uniprot.strip()
if geneid == "" or uniprot == "":
continue
if uniprot_ids is not None and uniprot not in uniprot_ids:
continue
#if only_min:
# geneid = min(geneid.split("; "), key=key_function)
#uniprot_to_geneids[uniprot] = geneid
uniprot_to_geneids.setdefault(uniprot, set()).add(geneid)
f.close()
if only_min:
uniprot_to_geneid = {}
for uniprot, geneids in uniprot_to_geneids.iteritems():
uniprot_to_geneid[uniprot] = min(geneids, key=key_function)
uniprot_to_geneids = uniprot_to_geneid
return uniprot_to_geneids
def get_uniprot_to_geneid_from_idmapping_file(file_name, uniprot_ids=None):
"""
To parse idmapping.tab from Uniprot
Useful for id mapping of non-human species
"""
parser = TsvReader.TsvReader(file_name, delim="\t", inner_delim=";")
column_to_index, id_to_values = parser.read(fields_to_include=["UniProtKB-AC", "GeneID (EntrezGene)"], keys_to_include=uniprot_ids, merge_inner_values=True)
uniprot_to_geneid = {}
for uniprot, values in id_to_values.iteritems():
for val in values:
geneid = val[column_to_index["geneid (entrezgene)"]]
#if uniprot in uniprot_to_geneid:
# print "multiple gene id", uniprot
#uniprot_to_geneid.setdefault(uniprot, set()).add(geneid)
uniprot_to_geneid[uniprot] = geneid
return uniprot_to_geneid
def get_mim_to_mesh(file_name):
"""
To parse humdisease.txt from Uniprot
"""
mim_to_mesh_values = {}
f = open(file_name)
line = f.readline()
while not line.startswith("ID"):
line = f.readline()
words = line.strip().split()
disease = " ".join(words[1:]).rstrip(".")
for line in f:
words = line.strip().split()
if words[0] == "ID":
disease = " ".join(words[1:]).rstrip(".")
if words[0] == "DR":
id_type = words[1].lower().rstrip(";")
if id_type == "mesh":
mesh = words[2].rstrip(".")
elif id_type == "mim":
mim = words[2].rstrip(";")
if line.startswith("//"):
#if mim in mim_to_mesh_values and mim_to_mesh_values[mim][1] == mesh:
#continue
#if mim in mim_to_mesh_values: print mim, mim_to_mesh_values[mim], disease, mesh
mim_to_mesh_values.setdefault(mim, []).append((disease, mesh))
f.close()
return mim_to_mesh_values
class UniprotXMLParser(object):
NS="{http://uniprot.org/uniprot}"
psiteDesc_to_psiteChar = { "Phosphoserine": "S",
"Phosphothreonine": "T",
"Phosphotyrosine": "Y",
"Phosphohistidine": "H" }
def __init__(self, filename):
self.file_name = filename
#self.etree = ElementTree()
return
def parse_ids_high_mem(self):
self.etree = ElementTree()
tree = self.etree.parse(self.file_name)
#ids = tree.findall(self.NS+"accession")
ids = []
sub_ids = None
for e in tree.getiterator():
if e.tag == self.NS+"entry":
if sub_ids is not None:
ids.append(sub_ids)
sub_ids = []
if e.tag == self.NS+"accession":
sub_ids.append(e.text)
ids.append(sub_ids)
return ids
def parse_ids(self):
ids = []
sub_ids = []
# get an iterable
context = iterparse(self.file_name, ["start", "end"])
# turn it into an iterator
context = iter(context)
# get the root element
event, root = context.next()
for (event, elem) in context:
if event == "end":
if elem.tag == self.NS+"accession":
sub_ids.append(elem.text)
if elem.tag == self.NS+"entry":
ids.append(sub_ids)
sub_ids = []
elem.clear()
root.clear()
return ids
def parse(self):
ignored_modification_types = set()
context = iterparse(self.file_name, ["start", "end"])
context = iter(context)
event, root = context.next()
elements = []
current_element = None
current_position = None
for (event, elem) in context:
if event == "start":
if elem.tag == self.NS+"entry":
current_element = UniprotXMLElement()
elif event == "end":
if elem.tag == self.NS+"accession":
current_element.add_id(elem.text)
elif elem.tag == self.NS+"organism":
db_elm = elem.find(self.NS+"dbReference") #only looks at sublevel - alternative: keep tag stack
if db_elm.get("type") == "NCBI Taxonomy":
current_element.set_tax(db_elm.get("id"))
elif elem.tag == self.NS+"feature" and elem.get("type") == "modified residue":
#print elem.getchildren()
#pos_elm = elem.find(self.NS+"position")
#if elem.get("status") == "probable":
# continue
for sub_elm in elem.getiterator():
if sub_elm.tag == self.NS+"position":
pos_elm = sub_elm
pos = pos_elm.get("position")
desc = elem.get("description")
vals = desc.split(";")
type = vals[0]
kinase = vals[1][vals[1].find("by")+2:].strip() if (len(vals) > 1) else None
if self.psiteDesc_to_psiteChar.has_key(type):
type = self.psiteDesc_to_psiteChar[type]
current_element.add_psite(pos, type, kinase)
else:
ignored_modification_types.add(type)
elif elem.tag == self.NS+"entry":
seq_elm = elem.find(self.NS+"sequence")
current_element.set_sequence(seq_elm.text)
elements.append(current_element)
elem.clear()
root.clear()
print "Ignored mofications: ", ignored_modification_types
return elements
class UniprotXMLElement(object):
def __init__(self):
self.ids = []
self.taxid = None
self.phosphosites = []
self.sequence = None
def add_id(self, id):
self.ids.append(id)
def set_tax(self, taxid):
self.taxid = taxid
def add_psite(self, pos, type=None, kinase=None):
self.phosphosites.append( (pos, type, kinase) )
def set_sequence(self, seq):
self.sequence = seq.replace("\n","")
def get_ids(self):
return self.ids
def get_tax(self):
return self.taxid
def get_psites(self):
return self.phosphosites
def get_sequence(self):
return self.sequence
def __repr__(self):
return "%s\t%s\t%s\t%s" % (self.ids, self.taxid, self.phosphosites, self.sequence)
if __name__ == "__main__":
main()
| quimaguirre/diana | diana/toolbox/parse_uniprot.py | Python | mit | 9,091 | 0.0055 |
"""Add autoincrement
Revision ID: 73b63ad41d3
Revises: 331f2c45f5a
Create Date: 2017-07-25 17:09:55.204538
"""
# revision identifiers, used by Alembic.
revision = '73b63ad41d3'
down_revision = '331f2c45f5a'
from alembic import op
from sqlalchemy import Integer
import sqlalchemy as sa
def upgrade():
op.alter_column("RepositoryApp2languages", "id", existing_type=Integer, autoincrement=True, nullable=False)
def downgrade():
pass
| porduna/appcomposer | alembic/versions/73b63ad41d3_add_autoincrement.py | Python | bsd-2-clause | 446 | 0.008969 |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for UpdateDnsAuthorization
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-certificate-manager
# [START certificatemanager_v1_generated_CertificateManager_UpdateDnsAuthorization_async]
from google.cloud import certificate_manager_v1
async def sample_update_dns_authorization():
# Create a client
client = certificate_manager_v1.CertificateManagerAsyncClient()
# Initialize request argument(s)
dns_authorization = certificate_manager_v1.DnsAuthorization()
dns_authorization.domain = "domain_value"
request = certificate_manager_v1.UpdateDnsAuthorizationRequest(
dns_authorization=dns_authorization,
)
# Make the request
operation = client.update_dns_authorization(request=request)
print("Waiting for operation to complete...")
response = await operation.result()
# Handle the response
print(response)
# [END certificatemanager_v1_generated_CertificateManager_UpdateDnsAuthorization_async]
| googleapis/python-certificate-manager | samples/generated_samples/certificatemanager_v1_generated_certificate_manager_update_dns_authorization_async.py | Python | apache-2.0 | 1,818 | 0.00165 |
#!/usr/bin/env python
#---coding=utf8---
from HomeHandler import HomeHandler
from LoginHandler import LoginHandler
from LogoutHandler import LogoutHandler
from ArchivesHandler import ArchivesHandler
from CategoryHandler import CategoryHandler
from TagHandler import TagHandler
from PageHandler import PageHandler
from SearchHandler import SearchHandler
from AdminHome import AdminHome
from ListPost import ListPost
from EditPost import EditPost
from ListComment import ListComment
from ListTag import ListTag
from ListCategory import ListCategory
from ListHtml import ListHtml
handlers = [
(r"/", HomeHandler),
(r"/login", LoginHandler),
(r"/logout",LogoutHandler),
(r"/archives/([\d]*)",ArchivesHandler),
(r"/category",CategoryHandler),
(r"/tag",TagHandler),
(r"/page",PageHandler),
(r"/search",SearchHandler),
(r"/admin/",AdminHome),
(r"/list/post",ListPost),
(r"/edit/post",EditPost),
(r"/list/comment",ListComment),
(r"/list/tag",ListTag),
(r"/list/category",ListCategory),
(r"/list/html",ListHtml),
]
| grepcook/blog | blog/handlers.py | Python | mit | 1,071 | 0.013072 |
from __future__ import absolute_import
from collections import defaultdict
from django.db import transaction
from django.utils.translation import ugettext as _
from zerver.lib.exceptions import JsonableError
from zerver.models import UserProfile, Realm, UserGroupMembership, UserGroup
from typing import Dict, Iterable, List, Tuple, Any
def access_user_group_by_id(user_group_id: int, user_profile: UserProfile) -> UserGroup:
try:
user_group = UserGroup.objects.get(id=user_group_id, realm=user_profile.realm)
group_member_ids = get_user_group_members(user_group)
msg = _("Only group members and organization administrators can administer this group.")
if (not user_profile.is_realm_admin and user_profile.id not in group_member_ids):
raise JsonableError(msg)
except UserGroup.DoesNotExist:
raise JsonableError(_("Invalid user group"))
return user_group
def user_groups_in_realm(realm: Realm) -> List[UserGroup]:
user_groups = UserGroup.objects.filter(realm=realm)
return list(user_groups)
def user_groups_in_realm_serialized(realm: Realm) -> List[Dict[str, Any]]:
"""This function is used in do_events_register code path so this code
should be performant. We need to do 2 database queries because
Django's ORM doesn't properly support the left join between
UserGroup and UserGroupMembership that we need.
"""
realm_groups = UserGroup.objects.filter(realm=realm)
group_dicts = {} # type: Dict[str, Any]
for user_group in realm_groups:
group_dicts[user_group.id] = dict(
id=user_group.id,
name=user_group.name,
description=user_group.description,
members=[],
)
membership = UserGroupMembership.objects.filter(user_group__realm=realm).values_list(
'user_group_id', 'user_profile_id')
for (user_group_id, user_profile_id) in membership:
group_dicts[user_group_id]['members'].append(user_profile_id)
for group_dict in group_dicts.values():
group_dict['members'] = sorted(group_dict['members'])
return sorted(group_dicts.values(), key=lambda group_dict: group_dict['id'])
def get_user_groups(user_profile: UserProfile) -> List[UserGroup]:
return list(user_profile.usergroup_set.all())
def check_add_user_to_user_group(user_profile: UserProfile, user_group: UserGroup) -> bool:
member_obj, created = UserGroupMembership.objects.get_or_create(
user_group=user_group, user_profile=user_profile)
return created
def remove_user_from_user_group(user_profile: UserProfile, user_group: UserGroup) -> int:
num_deleted, _ = UserGroupMembership.objects.filter(
user_profile=user_profile, user_group=user_group).delete()
return num_deleted
def check_remove_user_from_user_group(user_profile: UserProfile, user_group: UserGroup) -> bool:
try:
num_deleted = remove_user_from_user_group(user_profile, user_group)
return bool(num_deleted)
except Exception:
return False
def create_user_group(name: str, members: List[UserProfile], realm: Realm,
description: str='') -> UserGroup:
with transaction.atomic():
user_group = UserGroup.objects.create(name=name, realm=realm,
description=description)
UserGroupMembership.objects.bulk_create([
UserGroupMembership(user_profile=member, user_group=user_group)
for member in members
])
return user_group
def get_user_group_members(user_group: UserGroup) -> List[UserProfile]:
members = UserGroupMembership.objects.filter(user_group=user_group)
return [member.user_profile.id for member in members]
def get_memberships_of_users(user_group: UserGroup, members: List[UserProfile]) -> List[int]:
return list(UserGroupMembership.objects.filter(
user_group=user_group,
user_profile__in=members).values_list('user_profile_id', flat=True))
| jackrzhang/zulip | zerver/lib/user_groups.py | Python | apache-2.0 | 3,997 | 0.005504 |
from bing_search_api import BingSearchAPI
my_key = "MEL5FOrb1H5G1E78YY8N5mkfcvUK2hNBYsZl1aAEEbE"
def query(query_string):
bing = BingSearchAPI(my_key)
params = {'ImageFilters':'"Face:Face"',
'$format': 'json',
'$top': 10,
'$skip': 0}
results = bing.search('web',query_string,params).json() # requests 1.0+
return [result['Url'] for result in results['d']['results'][0]['Web']]
if __name__ == "__main__":
query_string = "Your Query"
print query(query_string)
| mzweilin/HashTag-Understanding | test/test_bing_search.py | Python | apache-2.0 | 529 | 0.015123 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('editorial', '0062_auto_20171202_1413'),
]
operations = [
migrations.AddField(
model_name='assignment',
name='complete',
field=models.BooleanField(default=False, help_text=b'Is the assignment complete?'),
),
]
| ProjectFacet/facet | project/editorial/migrations/0063_assignment_complete.py | Python | mit | 453 | 0.002208 |
# -*- coding: utf-8 -*-
# © <2016> <ToproERP liujing>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from json import *
import logging
import string
import hashlib
import urllib2
from openerp import http
from openerp.http import request
import Cookie
import base64
import pytz
import datetime
from time import time, localtime
from ToproERP_Wechat_Enterprises.models.wechat_enterprise_basic import WeChatEnterprise
import time, json, random
from ToproERP_Wechat_Enterprises.models import wechat_enterprise
import urlparse
import werkzeug.utils
import werkzeug.wrappers
_logger = logging.getLogger(__name__)
class WechatGLD(http.Controller):
'''
用于接收微信发过来的任何消息,并转发给相应的业务类进行处理
'''
__check_str = 'NDOEHNDSY#$_@$JFDK:Q{!'
# 跳转SNS页面
@http.route('/WechatGLD/get_sns_html', type='http', auth="public", csrf=False)
def get_sns_html(self, name):
values = {"name": name}
return request.render('ToproERP_WeChat_GLD.get_sns_html', values)
# 获得当前单据的日志SNS信息
@http.route('/WechatGLD/get_sns', type='http', auth="public", csrf=False)
def get_sns(self, gld_name):
temp_list = []
gld = request.env['syt.oa.gld'].sudo().search([('name', '=', gld_name)])
message = request.env['mail.message'].sudo().search([('res_id', '=', gld.id), ('model', '=', 'syt.oa.gld')])
if message:
for value in message:
temp_item = {}
employee = request.env['hr.employee'].sudo().search([('user_id', '=', int(value.create_uid))])
# temp_item['operator'] = employee.name # 操作人
temp_item['id'] = employee.id # 员工id
temp_item['name'] = employee.name # 操作人
temp_item['email'] = employee.work_email # 员工邮箱
temp_item['body'] = str(value.body).replace("<p>", "").replace("</p>", "") # 内容
timeArray = time.strptime(str(value.create_date), "%Y-%m-%d %H:%M:%S")
timeStamp = int(time.mktime(timeArray))
create_time = timeStamp + 8 * 60 * 60 # 加8个小时
timeArray = time.localtime(create_time)
otherStyleTime = time.strftime("%Y-%m-%d %H:%M:%S", timeArray)
temp_item['time'] = otherStyleTime # 更新时间
temp_list.append(temp_item)
return JSONEncoder().encode(temp_list)
# 获得当前登录人的个人信息:图片 公司 部门 姓名
@http.route('/WechatGLD/get_user_image', type='http', auth="public", csrf=False)
def get_user_image(self, userid):
temp_list = []
user = request.env['hr.employee'].sudo().search([('user_id', '=', int(userid))])
image = '/web/binary/image?model=hr.employee&field=image&id=' + str(user.id) + '&resize='
if user:
temp_item = {}
temp_item['image'] = image
temp_item['id'] = user.id
temp_item['name'] = user.name
temp_item['company_name'] = user.department_id.company_id.name
temp_item['dept'] = user.department_id.name
temp_item['job_name'] = user.job_id.name
temp_list.append(temp_item)
return JSONEncoder().encode(temp_list)
# 获取我的工联单第一个页面:模板类型 模板 标题 正文 下一步
@wechat_enterprise.wechat_login
@http.route('/WechatGLD/get_add_gld_first_page', type='http', auth="public", csrf=False)
def get_add_gld_first_page(self, *args, **kw):
user = request.env['res.users'].sudo().search([('id', '=', request.session['uid'])])
temp_type = request.env['syt.oa.gld.template.type'].sudo().search([])
return request.render('ToproERP_WeChat_GLD.get_add_gld_first_page', {"user": user, "temp_type": temp_type})
# 获取我的工联单第二个页面:紧急程度 添加附件 添加审批人 添加抄送人 保存 保存并提交审批
@http.route('/WechatGLD/get_add_gld_second_page', type='http', auth="public", csrf=False)
def get_add_gld_second_page(self, template_type, template, title, text, emergency, userid):
user = request.env['res.users'].sudo().search([('id', '=', int(userid))])
return request.render('ToproERP_WeChat_GLD.get_add_gld_second_page',
{"template_type": template_type, "template": template,
"title": title, "text": text, "emergency": emergency, "user": user})
# 我发起的工联单 页面
@wechat_enterprise.wechat_login
@http.route('/WechatGLD/list_faqi', type='http', auth="public", csrf=False)
def list_faqi(self, *args, **kw):
data = request.env['syt.oa.gld'].sudo().search([('sponsor.user_id', '=', request.session['uid'])])
user = request.env['res.users'].sudo().search([('id', '=', request.session['uid'])])
return request.render('ToproERP_WeChat_GLD.list_faqi', {"user": user, "data": data})
# 待办工联单列表 页面
@wechat_enterprise.wechat_login
@http.route('/WechatGLD/list_daiban', type='http', auth="public", csrf=False)
def list_daiban(self, *args, **kw):
data = request.env['syt.oa.gld'].sudo().search(
['|',
'&', ('copy_users_dy_ids.user_id', '=', request.session['uid']), ('state', '=', 'through'),
'&', ('approver.user_id.id', '=', request.session['uid']), ('state', 'in', ('pending', 'pass'))])
user = request.env['res.users'].sudo().search([('id', '=', request.session['uid'])])
return request.render('ToproERP_WeChat_GLD.list_daiban', {"user": user, "data": data})
# 已办工联单列表 页面
@wechat_enterprise.wechat_login
@http.route('/WechatGLD/list_yiban', type='http', auth="public", csrf=False)
def list_yiban(self, *args, **kw):
data = request.env['syt.oa.gld'].sudo().search([('yi_approver_user_ids.user_id', '=', request.session['uid'])])
user = request.env['res.users'].sudo().search([('id', '=', request.session['uid'])])
return request.render('ToproERP_WeChat_GLD.list_yiban', {"user": user, "data": data})
# 工联单列表的展示(我的,待办,已办)
def onload_list(self, obj, user_id, copy_users=None):
temp_type_list = []
if obj:
for value_obj in obj:
temp_type_list.append(self.get_list(user_id, value_obj, copy_users))
if copy_users:
for value_copy in copy_users:
temp_type_list.append(self.get_list(user_id, value_copy, copy_users))
return JSONEncoder().encode(temp_type_list)
def get_list(self, user_id, value, copy_users):
temp_item = {}
temp_item['user_id'] = user_id # user_id
temp_item['id'] = value.create_uid.id # 创建员工ID
temp_item['user_name'] = value.create_uid.name # 创建员工姓名
temp_item['name'] = value.name # 单号
temp_item['company_name'] = value.company_id.name # 公司
temp_item['dept'] = value.subject # 标题
timeArray = time.strptime(str(value.write_date), "%Y-%m-%d %H:%M:%S")
timeStamp = int(time.mktime(timeArray))
create_time = timeStamp + 8 * 60 * 60 # 加8个小时
timeArray = time.localtime(create_time)
otherStyleTime = time.strftime("%Y-%m-%d %H:%M:%S", timeArray)
temp_item['write_date'] = otherStyleTime # 创建更新时间
temp_item['state'] = value.state # 状态
if copy_users:
temp_item['copy_users'] = 'yes' # 区别 判断是抄送人还是审批人
else:
temp_item['copy_users'] = 'no' # 区别 判断是抄送人还是审批人
return temp_item
# 我发起的工联单 数据展示
@http.route('/WechatGLD/get_faqi_list', type='http', auth="public", csrf=False)
def get_faqi_list(self, userid):
employee = request.env['hr.employee'].sudo().search([('user_id', '=', int(userid))])
obj = request.env['syt.oa.gld'].sudo().search([('create_uid', '=', int(userid))], limit=5)
return self.onload_list(obj, userid)
# 我发起的工联单 每次添加5条数据
@http.route('/WechatGLD/add_faqi_list', type='http', auth="public", csrf=False)
def add_faqi_list(self, userid, number):
employee = request.env['hr.employee'].sudo().search([('user_id', '=', int(userid))])
obj = request.env['syt.oa.gld'].sudo().search([('create_uid', '=', int(userid))], limit=5, offset=int(number))
return self.onload_list(obj, userid)
# 保存审批意见 同意与不同意 按钮
@http.route('/WechatGLD/save_opinion', type='http', auth="public", csrf=False)
def save_opinion(self, wechat_gldid, opinion, check_state, userid):
gld_bean = request.env['syt.oa.gld'].sudo().search([('name', '=', wechat_gldid)])
employee = request.env['hr.employee'].sudo().search([('user_id', '=', int(userid))])
return request.env['syt.oa.gld.opinion'].sudo(userid).save_opinion_service_wechat(gld_bean.id, opinion,
check_state,
employee, 2)
# 待办工联单列表 数据展示
@http.route('/WechatGLD/get_daiban_list', type='http', auth="public", csrf=False)
def get_daiban_list(self, userid):
copy_users = request.env['syt.oa.gld'].sudo().search(
[('copy_users_dy_ids.user_id', '=', int(userid)), ('state', '=', 'through')])
obj = request.env['syt.oa.gld'].sudo().search(
['|',
'&', ('copy_users_dy_ids.user_id', '=', userid), ('state', '=', 'through'),
'&', ('approver.user_id.id', '=', userid), ('state', 'in', ('pending', 'pass'))])
return self.onload_list(obj, userid, copy_users)
# 待办工联单列表 每次添加5条数据
@http.route('/WechatGLD/add_daiban_list', type='http', auth="public", csrf=False)
def add_daiban_list(self, userid, number):
copy_users = request.env['syt.oa.gld'].sudo().search(
[('copy_users_dy_ids.user_id', '=', int(userid)), ('state', '=', 'through')])
obj = request.env['syt.oa.gld'].sudo().search(
['|',
'&', ('copy_users_dy_ids.user_id', '=', userid), ('state', '=', 'through'),
'&', ('approver.user_id.id', '=', userid), ('state', 'in', ('pending', 'pass'))], limit=5,
offset=int(number))
return self.onload_list(obj, userid, copy_users)
# 已办工联单列表 数据展示
@http.route('/WechatGLD/get_yiban_list', type='http', auth="public", csrf=False)
def get_yiban_list(self, userid):
obj = request.env['syt.oa.gld'].sudo().search(
['|', ('copy_users_yy_ids.user_id', '=', int(userid)), ('yi_approver_user_ids.user_id', '=', int(userid))],
limit=5)
return self.onload_list(obj, userid)
# 已办工联单列表 每次添加5条数据
@http.route('/WechatGLD/add_yiban_list', type='http', auth="public", csrf=False)
def add_yiban_list(self, userid, number):
obj = request.env['syt.oa.gld'].sudo().search(
['|', ('copy_users_yy_ids.user_id', '=', int(userid)), ('yi_approver_user_ids.user_id', '=', int(userid))],
limit=5,
offset=int(number))
return self.onload_list(obj, userid)
# 根据输入的名称查询员工
@http.route('/WechatGLD/get_user_by_name', type='http', auth="public", csrf=False)
def get_user_by_name(self, name):
obj = request.env['hr.employee'].sudo().search([('name', 'ilike', name)])
temp_type_list = []
if obj:
for value in obj:
temp_item = {}
temp_item['id'] = value.id
temp_item['name'] = value.name
temp_item['company_name'] = value.company_id.name
temp_item['dept'] = value.department_id.name
temp_item['phone'] = value.mobile_phone
temp_item['job_name'] = value.job_id.name
image = '/web/binary/image?model=hr.employee&field=image&id=' + str(obj.id) + '&resize='
temp_item['image'] = image
temp_type_list.append(temp_item)
return JSONEncoder().encode(temp_type_list)
else:
return "2"
# 工联单详情页面路径
@http.route('/WechatGLD/xiangqing', type='http', auth="public", csrf=False)
def xiangqing(self, name, qubie=None, userid=None):
gld_obj = request.env['syt.oa.gld'].sudo().search([('name', '=', name.lstrip())])
if_cs = False
employee = request.env['hr.employee'].sudo().search([('user_id', '=', int(userid))])
if gld_obj.state == 'through' and gld_obj.copy_users_dy_ids:
for dy in gld_obj['copy_users_dy_ids']:
if employee.id == dy.id:
if_cs = True
break
if if_cs:
request.env['syt.oa.gld'].sudo(userid).read_gld_service(gld_obj, 2, employee)
message = request.env['mail.message'].sudo().search([('res_id', '=', gld_obj.id), ('model', '=', 'syt.oa.gld')])
curr_approver_is_luser = False
uid_is_approval = False
if (employee.id == gld_obj.sponsor.id):
curr_approver_is_luser = True
# 审批人是否为当前登录人
# 没有审批意见
for opina in gld_obj.approver_opinions:
if (request.env['hr.employee'].search([('id', '=', opina.approver.id)])["user_id"]["id"] == int(userid)):
if (opina.opinion == False):
uid_is_approval = True
return request.render('ToproERP_WeChat_GLD.xiangqing', {'gld_obj': gld_obj,
'userid': userid,
'attachment': len(gld_obj.attachment_ids),
'opinions': len(gld_obj.approver_opinions),
'copy_users': len(gld_obj.copy_users),
'messages': len(message),
'curr_approver_is_luser': curr_approver_is_luser,
'uid_is_approval': uid_is_approval})
# 工联单详情审批意见页面路径
@http.route('/WechatGLD/xiangqing_opinion', type='http', auth="public", csrf=False)
def xiangqing_opinion(self, name, qubie=None):
gld_obj = request.env['syt.oa.gld'].sudo().search([('name', '=', name.lstrip())])
return request.render('ToproERP_WeChat_GLD.xiangqing_opinion', {'gld_obj': gld_obj})
# 查询工联单审批意见的页面路径
@http.route('/WechatGLD/select_opinion', type='http', auth="public", csrf=False)
def select_opinion(self, name, qubie):
values = {"name": name, "shuzi": qubie}
gld_obj = request.env['syt.oa.gld'].sudo().search([('name', '=', name)])
return request.render('ToproERP_WeChat_GLD.select_opinion',
{'value': values, 'opinion': gld_obj.approver_opinions})
@http.route('/WechatGLD/get_enclosure', type='http', auth="public", csrf=False)
def get_enclosure(self, name):
gld_obj = request.env['syt.oa.gld'].sudo().search([('name', '=', name)])
if gld_obj:
attachment_list = []
for attachment in gld_obj.attachment_ids:
item = {}
item['id'] = attachment.id
fname, full_path = request.env['ir.attachment']._get_path('', attachment.checksum)
item['db_datas'] = full_path
attachment_list.append(item)
return JSONEncoder().encode(attachment_list)
else:
return "2"
# 查询工联单审批意见的数据展示
@http.route('/WechatGLD/get_opinion', type='http', auth="public", csrf=False)
def get_opinion(self, name, shuzi):
opinion = request.env['syt.oa.gld.opinion'].sudo().search([('gld_id', '=', name)])
if opinion:
opinion_list = []
for value in opinion:
item = {}
item['id'] = value.approver.id
item['name'] = value.approver.name
item['opinion'] = value.opinion
timeArray = time.strptime(str(value.appov_date), "%Y-%m-%d %H:%M:%S")
timeStamp = int(time.mktime(timeArray))
create_time = timeStamp + 8 * 60 * 60 # 加8个小时
timeArray = time.localtime(create_time)
otherStyleTime = time.strftime("%Y-%m-%d %H:%M:%S", timeArray)
item['time'] = otherStyleTime
item['dept'] = value.approver.department_id.name
item['company'] = value.approver.company_id.name
opinion_list.append(item)
return JSONEncoder().encode(opinion_list)
else:
return "2"
# 工联单详情页面数据展示
@http.route('/WechatGLD/get_gld_info', type='http', auth="public", csrf=False)
def get_gld_info(self, name):
temp_type = request.env['syt.oa.gld'].sudo().search([('name', '=', name.lstrip())])
copy_users = request.env['syt.oa.gld'].sudo().search([('copy_users.user_id', '=', request.session['uid'])])
temp_type_list = []
if temp_type:
for value in temp_type:
temp_item = {}
temp_item['name'] = value.name # 单号
temp_item['company_name'] = value.company_id.name # 公司
temp_item['dept'] = value.dept # 部门
temp_item['id'] = value.create_uid.id # 创建员工ID
temp_item['user_name'] = value.create_uid.name # 创建员工姓名
timeArray = time.strptime(str(value.create_date), "%Y-%m-%d %H:%M:%S")
timeStamp = int(time.mktime(timeArray))
create_time = timeStamp + 8 * 60 * 60 # 加8个小时
timeArray = time.localtime(create_time)
otherStyleTime = time.strftime("%Y-%m-%d %H:%M:%S", timeArray)
temp_item['write_date'] = otherStyleTime # 创建更新时间
temp_item['state'] = value.state # 状态
temp_item['subject'] = value.subject # 标题
temp_item['content'] = value.content # 正文
if copy_users:
temp_item['copy_users'] = 'yes' # 区别 判断是抄送人还是审批人
else:
temp_item['copy_users'] = 'no' # 区别 判断是抄送人还是审批人
temp_type_list.append(temp_item)
return JSONEncoder().encode(temp_type_list)
# 获取模板类型
@http.route('/WechatGLD/get_temp_type', type='http', auth="public", csrf=False)
def get_temp_type(self):
temp_type = request.env['syt.oa.gld.template.type'].sudo().search([])
temp_type_list = []
for value in temp_type:
temp_item = {}
temp_item['id'] = value.id
temp_item['name'] = value.name
temp_type_list.append(temp_item)
json = JSONEncoder().encode(temp_type_list)
return json
# 根据模板类型获取模板
@http.route('/WechatGLD/get_temp', type='http', auth="public", csrf=False)
def get_temp(self, temp_type_id):
temp = request.env['syt.oa.gld.template'].sudo().search([('temp_type', '=', int(temp_type_id))])
temp_list = []
for value in temp:
temp_item = {}
temp_item['id'] = value.id
temp_item['name'] = value.name
temp_list.append(temp_item)
return JSONEncoder().encode(temp_list)
# 保存工联单
@http.route('/WechatGLD/save', type='http', auth="public", csrf=False)
def save(self, template_type, template, title, text, urgency, approver, attachment_ids, userid):
self.save_public(template_type, template, title, text, urgency, approver, attachment_ids, userid, 2)
# 保存并提交工联单
@http.route('/WechatGLD/save_and_submit', type='http', auth="public", csrf=False)
def save_and_submit(self, template_type, template, title, text, urgency, approver, attachment_ids, userid):
self.save_public(template_type, template, title, text, urgency, approver, attachment_ids, userid, 1)
def save_public(self, template_type, template, title, text, urgency, approver, attachment_ids, userid, save_type):
'''
创建工联单的公共方法:为 保存、保存并提交的按钮提供创建工联单的方法
:param template_type:模板类型
:param template: 模板
:param title: 标题
:param text: 正文
:param urgency: 紧急程序
:param approver: 审批人
:param attachment_ids: 附件
:param userid: 用户Id
:param save_type: 代表当前是通过保存还是保存并提交过来的
:return:
'''
employee = request.env['hr.employee'].sudo().search([('user_id', '=', int(userid))])
approver_if_self = False # 审批人中是否包含自己
approver_arry = approver.split(",")
apprs = []
approver_id = '' # 审批人id
for appr in approver_arry:
approver_id += str(appr) + ','
if appr:
apprs.append((4, int(appr)))
if int(appr) == employee.id:
approver_if_self = True
if approver_if_self:
return "3"
vals = {}
vals['sponsor'] = employee.id
vals['dept'] = employee.department_id.name
vals['message_follower_ids'] = False
vals['message_ids'] = False
vals['emergency'] = urgency
vals['approver'] = apprs
if template_type != "0":
vals['temp_type'] = int(template_type)
if template != "0":
vals['gld_temp'] = int(template)
vals['expiration'] = time.strftime('%Y-%m-%d', time.localtime(time.time() + 86400 * 3)) # 截止时间
vals['subject'] = title
text = str(text).replace("$", "<br>")
vals['content'] = text
# 附件图片
if attachment_ids != "0":
attachment_arry = attachment_ids.split(",")
attachments = []
for attachment in attachment_arry:
if attachment != "":
attachments.append((4, int(attachment)))
vals['attachment_ids'] = attachments
request.uid = userid
gld_bean = request.env['syt.oa.gld'].sudo(userid).create(vals, "wechat")
if gld_bean:
if gld_bean.attachment_ids:
for attachment_ in gld_bean.attachment_ids:
# 给图片打水印
# self.make_watermark(attachment_, gld_bean.name)
# request.env['syt.oa.gld.service'].sudo().make_watermark(attachment_, gld_bean.name)
request.env['play.watermark'].make_watermark(obj=gld_bean, number=gld_bean.name)
if save_type == 1:
gld = request.env['syt.oa.gld'].sudo().search([('id', '=', int(gld_bean.id))])
id = 0
if gld:
id = gld.sudo(userid).write({"state": "pending"})
for appr in gld.approver:
icp = request.env['ir.config_parameter']
insur_str = icp.get_param('web.base.url')
url = insur_str + '/WechatGLD/xiangqing?name=%s&qubie=%s&userid=%s' % (
gld.name, '2', appr.user_id.id) # 跳转地址,需要和微信保持一致
description = u"%s提交了一张标题为“%s”的工联单,需要您进行审批,请点击查看全文,马上处理!" % (gld.sponsor.name, gld.subject)
request.env['syt.oa.gld'].get_gld_agentid(appr, u'单号:' + gld.name, description, url)
if id:
return "1" # True
else:
return "0" # False
# 获得前五位审批意见不为空的审批人
@http.route('/WechatGLD/get_approver', type='http', auth="public", csrf=False)
def get_approver(self, user_id):
approver = request.env['syt.oa.gld.service'].get_approver(user_id)
employee_list = []
for appr in approver:
employee = request.env['hr.employee'].sudo().search([('id', '=', int(appr))], limit=5)
employee_item = {}
if employee.mobile_phone == False:
employee.mobile_phone = "";
employee_item['id'] = str(employee.id) + ','
employee_item['name'] = employee.name
employee_item['mobile_phone'] = employee.mobile_phone
employee_item['company_name'] = employee.department_id.company_id.name
employee_item['approver_dept'] = employee.department_id.name
employee_list.append(employee_item)
return JSONEncoder().encode(employee_list)
# 根据模板类型获取标题、正文、紧急程度
@http.route('/WechatGLD/get_title', type='http', auth="public", csrf=False)
def get_title(self, temp_id):
approver = request.env['syt.oa.gld.template'].sudo().search([('id', '=', int(temp_id))])
temp_list = []
for value in approver:
temp_item = {}
temp_item['subject'] = value.subject
temp_item['content'] = value.content
# temp_item['content'] = base64.encodestring(value.content)
temp_item['emergency'] = value.emergency
temp_list.append(temp_item)
return JSONEncoder().encode(temp_list)
# 提交审批按钮
@http.route('/WechatGLD/gld_state_sent', type='http', auth="public", csrf=False)
def gld_state_sent(self, wechat_gldid, userid):
_logger.info(u'工联单单号(%s)' % wechat_gldid)
gld_bean = request.env['syt.oa.gld'].sudo().search([('name', '=', wechat_gldid)])
_logger.info(u'微信点击提交审批时的工联单对象(%s)' % gld_bean)
if gld_bean.approver_opinions:
approver_ids = []
for appr in gld_bean.approver_opinions:
approver_ids.append(int(appr.approver.id))
# approver_ids = str(approver_ids).replace('[', '(').replace(']', ')')
employee_obj = request.env["hr.employee"].sudo().search([('id', 'in', approver_ids)])
_logger.info(u'微信点击提交审批时的审批人列表(%s)' % employee_obj)
request.env['syt.oa.gld'].sudo().gld_state_sent_service(gld_bean, employee_obj)
return "1"
else:
return "2"
# 继续审批按钮
@http.route('/WechatGLD/gld_finish_to_pass', type='http', auth="public", csrf=False)
def gld_finish_to_pass(self, wechat_gldid):
gld_bean = request.env['syt.oa.gld'].sudo().search([('name', '=', wechat_gldid)])
request.env['syt.oa.gld'].gld_finish_to_pass_service(gld_bean)
return "1"
# 作废按钮
@http.route('/WechatGLD/gld_state_cancel', type='http', auth="public", csrf=False)
def gld_state_cancel(self, wechat_gldid, userid):
gld_bean = request.env['syt.oa.gld'].sudo().search([('name', '=', wechat_gldid)])
request.env['syt.oa.gld'].sudo(userid).gld_state_cancel_service(gld_bean)
return "1"
# 已阅按钮
@http.route('/WechatGLD/read_gld_service', type='http', auth="public", csrf=False)
def read_gld_service(self, wechat_gldid, userid):
gld_bean = request.env['syt.oa.gld'].sudo().search([('name', '=', wechat_gldid)])
request.env['syt.oa.gld'].sudo(userid).read_gld_service(gld_bean)
return "1"
# 置为草稿按钮
@http.route('/WechatGLD/gld_state_draft', type='http', auth="public", csrf=False)
def gld_state_draft(self, wechat_gldid, userid):
gld_bean = request.env['syt.oa.gld'].sudo().search([('name', '=', wechat_gldid)])
if gld_bean:
request.env['syt.oa.gld'].sudo(userid).gld_state_draft_service(gld_bean)
return "1"
else:
return "2"
# 不在本人审批范围按钮
@http.route('/WechatGLD/waiver', type='http', auth="public", csrf=False)
def waiver(self, wechat_gldid, userid):
gld_bean = request.env['syt.oa.gld'].sudo().search([('name', '=', wechat_gldid)])
employee = request.env['hr.employee'].sudo().search([('user_id', '=', int(userid))])
if gld_bean:
request.env['syt.oa.gld'].sudo(userid).waiver_service(gld_bean, userid, employee)
return "1"
else:
return "2"
# 展示添加审批人页面
@http.route('/WechatGLD/view_appr', type='http', auth="public", csrf=False)
def view_appr(self, no, name, userid):
values = {"no": no, "name": name, "userid": userid}
return request.render('ToproERP_WeChat_GLD.view_appr', values)
# 展示查看抄送人页面
@http.route('/WechatGLD/select_appr_copy_user', type='http', auth="public", csrf=False)
def select_appr_copy_user(self, no):
values = {"no": no}
gld = request.env['syt.oa.gld'].sudo().search([('name', '=', no)])
return request.render('ToproERP_WeChat_GLD.select_appr_copy_user',
{'values': values, 'people': gld.copy_users})
# 获得当前单据的所有抄送人
@http.route('/WechatGLD/get_copy_user', type='http', auth="public", csrf=False)
def get_copy_user(self, wechat_gldid):
gld = request.env['syt.oa.gld'].sudo().search([('name', '=', wechat_gldid)])
temp_list = []
if gld:
for copy_user in gld.copy_users:
# request.env['hr.employee'].search([('id', '=', int(employee_id))])
temp_item = {}
temp_item['name'] = copy_user.name
temp_item['company_name'] = copy_user.department_id.company_id.name
temp_item['dept'] = copy_user.department_id.name
temp_item['job_name'] = copy_user.job_id.name
image = '/web/binary/image?model=hr.employee&field=image&id=' + str(copy_user.id) + '&resize='
temp_item['image'] = image
temp_list.append(temp_item)
return JSONEncoder().encode(temp_list)
# 通过按钮添加审批人/抄送人 按钮
@http.route('/WechatGLD/add_approver_service', type='http', auth="public", csrf=False)
def add_approver_service(self, wechat_gldid, employee_id, name, userid):
gld_bean = request.env['syt.oa.gld'].sudo().search([('name', '=', wechat_gldid)])
employee = request.env['hr.employee'].sudo().search([('id', '=', int(employee_id))])
request.uid = userid
if name == u"添加抄送人":
result = request.env['syt.oa.gld.add.peoper.wizard'].sudo(userid).add_copy_peoper_service(gld_bean,
employee, '',
2)
elif name == u"添加审批人":
result = request.env['syt.oa.gld.add.approver.wizard'].sudo(userid).add_approver_service(gld_bean, employee,
2)
if result == "2":
return "2"
elif result == "3":
return "3"
else:
return "1"
@http.route('/WechatGLD/get_signature', type='http', auth="public", csrf=False)
def get_signature(self, url):
'''
获得微信的ticket 缓存起来
:return:
'''
cookie = Cookie.SimpleCookie()
# access_token_url = 'https://qyapi.weixin.qq.com/cgi-bin/gettoken?corpid=wx0046935c06f7c27e&corpsecret=fLuTp-KCwaG-HAPcsKZch0xNkNV2ahjMPmi1S4F_LnlP8rkJmsx7jVc931ljr46A'
access_token_url = 'https://qyapi.weixin.qq.com/cgi-bin/gettoken?corpid=wxc1317b61e7e122aa&corpsecret=EGjHS5l3ee0gBSvr29zgZN2HqG4r2tPbtr-LBpRqgoEC-4EqQrvPqQQGXrc1QxpH'
request_ = urllib2.Request(access_token_url)
opener = urllib2.build_opener()
conn = opener.open(request_)
access_token_list = conn.read()
access_token_list = json.loads(access_token_list)
if len(cookie) == 0:
cookie["access_token"] = access_token_list["access_token"]
request.session['access_token'] = access_token_list["access_token"]
if len(cookie) > 0:
cookie_ticket = Cookie.SimpleCookie()
ticket_url = 'https://qyapi.weixin.qq.com/cgi-bin/get_jsapi_ticket?access_token=' + cookie[
"access_token"].value
request_ = urllib2.Request(ticket_url)
opener = urllib2.build_opener()
conn = opener.open(request_)
ticket_list = conn.read()
ticket_list = json.loads(ticket_list)
if len(cookie_ticket) == 0:
cookie_ticket["ticket"] = ticket_list["ticket"]
ret_list = []
ret = {}
ret["nonceStr"] = self.__create_nonce_str() # 创建随机字符串
ret["jsapi_ticket"] = cookie_ticket["ticket"].value
ret["timestamp"] = self.__create_timestamp() # 创建时间戳
ret["url"] = url
signature = self.sign(ret)
ret["signature"] = signature
ret_list.append(ret)
return JSONEncoder().encode(ret_list)
def __create_nonce_str(self):
'''
创建随机字符串 nonceStr
:return:
'''
return ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(15))
def __create_timestamp(self):
'''
创建时间戳 timestamp
:return:
'''
return int(time.time())
def sign(self, ret):
'''
返回一个加密的signature 签名
:return:
'''
string = '&'.join(['%s=%s' % (key.lower(), ret[key]) for key in sorted(ret)])
signature = hashlib.sha1(string).hexdigest()
return signature
def _compute_checksum(self, bin_data):
""" compute the checksum for the given datas
:param bin_data : datas in its binary form
"""
# an empty file has a checksum too (for caching)
return hashlib.sha1(bin_data or '').hexdigest()
@http.route('/WechatGLD/downloadImage', type='http', auth="public", csrf=False)
def downloadImage(self, media_id):
cookie = Cookie.SimpleCookie()
# access_token_url = 'https://qyapi.weixin.qq.com/cgi-bin/gettoken?corpid=wx0046935c06f7c27e&corpsecret=fLuTp-KCwaG-HAPcsKZch0xNkNV2ahjMPmi1S4F_LnlP8rkJmsx7jVc931ljr46A'
access_token_url = 'https://qyapi.weixin.qq.com/cgi-bin/gettoken?corpid=wxc1317b61e7e122aa&corpsecret=EGjHS5l3ee0gBSvr29zgZN2HqG4r2tPbtr-LBpRqgoEC-4EqQrvPqQQGXrc1QxpH'
access_token_request = urllib2.Request(access_token_url)
opener = urllib2.build_opener()
conn = opener.open(access_token_request)
access_token_list = conn.read()
access_token_list = json.loads(access_token_list)
if len(cookie) == 0:
cookie["access_token"] = access_token_list["access_token"]
downloadImage_url = 'https://qyapi.weixin.qq.com/cgi-bin/media/get?access_token=' + cookie[
"access_token"].value + '&media_id=' + media_id + ''
# wechat = WeChatEnterprise(agentid=1)
# file = wechat.get_media(media_id)
wechat = request.env['wechat.enterprise.config'].get_wechat()
file = wechat.get_media(media_id)
url = downloadImage_url
f = urllib2.urlopen(url)
attachment = request.env['ir.attachment']
verification_code = random.randint(1000, 9999) # 4位随机码
vals = {}
vals["db_datas"] = file
vals['datas'] = base64.encodestring(f.read())
vals['name'] = str(verification_code) + ".jpg"
vals['datas_fname'] = str(verification_code) + ".jpg"
vals['type'] = "binary"
vals['index_content'] = "(u'image',)"
vals['mimetype'] = "image/jpeg"
vals['res_model'] = "syt.oa.gld"
attachment_bean = attachment.sudo().create(vals)
attachment_bean_vals = {}
if attachment_bean:
attachment_bean_vals["attachment_id"] = attachment_bean.id
else:
attachment_bean_vals["attachment_id"] = "0"
attachment_bean_vals["image_name"] = attachment_bean.name
return JSONEncoder().encode(attachment_bean_vals)
# 查询工联单附件的页面路径
@http.route('/WechatGLD/view_enclosure', type='http', auth="public", csrf=False)
def view_enclosure(self, name, *args, **kw):
gld_obj = request.env['syt.oa.gld'].sudo().search(
[('name', '=', name)])
attachments = []
if gld_obj:
for atta_item in gld_obj.attachment_ids:
attachments_obj = http.request.env['ir.attachment'].sudo().search(
[('id', '=', atta_item.id)])
attachments.append(attachments_obj)
# attachments_obj = http.request.env['ir.attachment'].sudo().search(
# [('id', '=', atta_item.id)])
return http.request.render('ToproERP_WeChat_GLD.select_enclosure',
{'attachments': attachments})
# 取附件的详情页面 大图
@http.route('/WechatGLD/enclosure_info', type='http', auth="public", csrf=False)
def enclosure_info(self, id, *args, **kwargs):
attachments = http.request.env['ir.attachment'].sudo().search(
[('id', '=', id)])
return http.request.render('ToproERP_WeChat_GLD.enclosure_info',
{'attachments': attachments})
def binary_content(xmlid=None, model='ir.attachment', id=None, field='datas', unique=False, filename=None,
filename_field='datas_fname', download=False, mimetype=None,
default_mimetype='application/octet-stream', env=None):
return request.registry['ir.http'].binary_content(
xmlid=xmlid, model=model, id=id, field=field, unique=unique, filename=filename,
filename_field=filename_field,
download=download, mimetype=mimetype, default_mimetype=default_mimetype, env=env)
@http.route(['/ToproERP_WeChat_GLD/content',
'/ToproERP_WeChat_GLD/content/<string:xmlid>',
'/ToproERP_WeChat_GLD/content/<string:xmlid>/<string:filename>',
'/ToproERP_WeChat_GLD/content/<int:id>',
'/ToproERP_WeChat_GLD/content/<int:id>/<string:filename>',
'/ToproERP_WeChat_GLD/content/<int:id>-<string:unique>',
'/ToproERP_WeChat_GLD/content/<int:id>-<string:unique>/<string:filename>',
'/ToproERP_WeChat_GLD/content/<string:model>/<int:id>/<string:field>',
'/ToproERP_WeChat_GLD/content/<string:model>/<int:id>/<string:field>/<string:filename>'], type='http',
auth="public")
def content_common(self, xmlid=None, model='ir.attachment', id=None, field='datas', filename=None,
filename_field='datas_fname', unique=None, mimetype=None, download=None, data=None, token=None):
status, headers, content = self.binary_content(xmlid=xmlid, model=model, id=id, field=field, unique=unique,
filename=filename, filename_field=filename_field,
download=download, mimetype='application/pdf')
if status == 304:
response = werkzeug.wrappers.Response(status=status, headers=headers)
elif status == 301:
return werkzeug.utils.redirect(content, code=301)
elif status != 200:
response = request.not_found()
else:
content_base64 = base64.b64decode(content)
headers.append(('Content-Length', len(content_base64)))
response = request.make_response(content_base64, headers)
if token:
response.set_cookie('fileToken', token)
return response
#
#
# def binary_content(xmlid=None, model='ir.attachment', id=None, field='datas', unique=False, filename=None,
# filename_field='datas_fname', download=False, mimetype=None,
# default_mimetype='application/octet-stream', env=None):
# return request.registry['ir.http'].binary_content(
# xmlid=xmlid, model=model, id=id, field=field, unique=unique, filename=filename,
# filename_field=filename_field,
# download=download, mimetype=mimetype, default_mimetype=default_mimetype, env=env)
#
# @http.route(['/web/content',
# '/web/content/<string:xmlid>',
# '/web/content/<string:xmlid>/<string:filename>',
# '/web/content/<int:id>',
# '/web/content/<int:id>/<string:filename>',
# '/web/content/<int:id>-<string:unique>',
# '/web/content/<int:id>-<string:unique>/<string:filename>',
# '/web/content/<string:model>/<int:id>/<string:field>',
# '/web/content/<string:model>/<int:id>/<string:field>/<string:filename>'], type='http', auth="public")
# def content_common(self, xmlid=None, model='ir.attachment', id=None, field='datas', filename=None,
# filename_field='datas_fname', unique=None, mimetype=None, download=None, data=None, token=None):
# status, headers, content = self.binary_content(xmlid=xmlid, model=model, id=id, field=field, unique=unique,
# filename=filename, filename_field=filename_field,
# download=download, mimetype=mimetype)
# if status == 304:
# response = werkzeug.wrappers.Response(status=status, headers=headers)
# elif status == 301:
# return werkzeug.utils.redirect(content, code=301)
# elif status != 200:
# response = request.not_found()
# else:
# content_base64 = base64.b64decode(content)
# headers.append(('Content-Length', len(content_base64)))
# response = request.make_response(content_base64, headers)
# if token:
# response.set_cookie('fileToken', token)
# return response
| xhair/TopOdoo_Addons | ToproERP_WeChat_GLD/controllers/wechat_gld.py | Python | agpl-3.0 | 43,140 | 0.004475 |
#!/usr/bin/env python
# ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''Demonstrates how to handle a platform-specific event not defined in
pyglet by subclassing Window. This is not for the faint-hearted!
A message will be printed to stdout when the following events are caught:
- On Mac OS X, the window drag region is clicked.
- On Windows, the display resolution is changed.
- On Linux, the window properties are changed.
'''
import pyglet
# Check for Carbon (OS X)
try:
from pyglet.window.carbon import *
_have_carbon = True
except ImportError:
_have_carbon = False
# Check for Win32
try:
from pyglet.window.win32 import *
from pyglet.window.win32.constants import *
_have_win32 = True
except ImportError:
_have_win32 = False
# Check for Xlib (Linux)
try:
from pyglet.window.xlib import *
_have_xlib = True
except ImportError:
_have_xlib = False
# Subclass Window
class MyWindow(pyglet.window.Window):
if _have_carbon:
@CarbonEventHandler(kEventClassWindow, kEventWindowClickDragRgn)
def _on_window_click_drag_rgn(self, next_handler, event, data):
print 'Clicked drag rgn.'
carbon.CallNextEventHandler(next_handler, event)
return noErr
if _have_win32:
@Win32EventHandler(WM_DISPLAYCHANGE)
def _on_window_display_change(self, msg, lParam, wParam):
print 'Display resolution changed.'
return 0
if _have_xlib:
@XlibEventHandler(xlib.PropertyNotify)
def _on_window_property_notify(self, event):
print 'Property notify.'
if __name__ == '__main__':
window = MyWindow()
pyglet.app.run()
| sangh/LaserShow | pyglet-hg/examples/window_platform_event.py | Python | bsd-3-clause | 3,351 | 0.001492 |
# Copyright (c) 2015 Huawei Technologies Co., Ltd.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
import uuid
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import units
from cinder import exception
from cinder.i18n import _, _LE, _LI, _LW
from cinder import utils
from cinder.volume import driver
from cinder.volume.drivers.huawei import constants
from cinder.volume.drivers.huawei import fc_zone_helper
from cinder.volume.drivers.huawei import huawei_utils
from cinder.volume.drivers.huawei import rest_client
from cinder.volume.drivers.huawei import smartx
from cinder.volume import utils as volume_utils
from cinder.volume import volume_types
from cinder.zonemanager import utils as fczm_utils
LOG = logging.getLogger(__name__)
huawei_opts = [
cfg.StrOpt('cinder_huawei_conf_file',
default='/etc/cinder/cinder_huawei_conf.xml',
help='The configuration file for the Cinder Huawei '
'driver.')]
CONF = cfg.CONF
CONF.register_opts(huawei_opts)
class HuaweiBaseDriver(driver.VolumeDriver):
def __init__(self, *args, **kwargs):
super(HuaweiBaseDriver, self).__init__(*args, **kwargs)
self.configuration = kwargs.get('configuration')
if not self.configuration:
msg = _('_instantiate_driver: configuration not found.')
raise exception.InvalidInput(reason=msg)
self.configuration.append_config_values(huawei_opts)
self.xml_file_path = self.configuration.cinder_huawei_conf_file
def do_setup(self, context):
"""Instantiate common class and login storage system."""
self.restclient = rest_client.RestClient(self.configuration)
return self.restclient.login()
def check_for_setup_error(self):
"""Check configuration file."""
return huawei_utils.check_conf_file(self.xml_file_path)
def get_volume_stats(self, refresh=False):
"""Get volume status."""
return self.restclient.update_volume_stats()
@utils.synchronized('huawei', external=True)
def create_volume(self, volume):
"""Create a volume."""
opts = huawei_utils.get_volume_params(volume)
smartx_opts = smartx.SmartX().get_smartx_specs_opts(opts)
params = huawei_utils.get_lun_params(self.xml_file_path,
smartx_opts)
pool_name = volume_utils.extract_host(volume['host'],
level='pool')
pools = self.restclient.find_all_pools()
pool_info = self.restclient.find_pool_info(pool_name, pools)
if not pool_info:
msg = (_('Error in getting pool information for the pool: %s.')
% pool_name)
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
volume_name = huawei_utils.encode_name(volume['id'])
volume_description = volume['name']
volume_size = huawei_utils.get_volume_size(volume)
LOG.info(_LI(
'Create volume: %(volume)s, size: %(size)s.'),
{'volume': volume_name,
'size': volume_size})
params['pool_id'] = pool_info['ID']
params['volume_size'] = volume_size
params['volume_description'] = volume_description
# Prepare LUN parameters.
lun_param = huawei_utils.init_lun_parameters(volume_name, params)
# Create LUN on the array.
lun_info = self.restclient.create_volume(lun_param)
lun_id = lun_info['ID']
try:
qos = huawei_utils.get_volume_qos(volume)
if qos:
smart_qos = smartx.SmartQos(self.restclient)
smart_qos.create_qos(qos, lun_id)
smartpartition = smartx.SmartPartition(self.restclient)
smartpartition.add(opts, lun_id)
smartcache = smartx.SmartCache(self.restclient)
smartcache.add(opts, lun_id)
except Exception as err:
self._delete_lun_with_check(lun_id)
raise exception.InvalidInput(
reason=_('Create volume error. Because %s.') % err)
return {'provider_location': lun_info['ID'],
'ID': lun_id,
'lun_info': lun_info}
@utils.synchronized('huawei', external=True)
def delete_volume(self, volume):
"""Delete a volume.
Three steps:
Firstly, remove associate from lungroup.
Secondly, remove associate from QoS policy.
Thirdly, remove the lun.
"""
name = huawei_utils.encode_name(volume['id'])
lun_id = volume.get('provider_location')
LOG.info(_LI('Delete volume: %(name)s, array lun id: %(lun_id)s.'),
{'name': name, 'lun_id': lun_id},)
if lun_id:
if self.restclient.check_lun_exist(lun_id):
qos_id = self.restclient.get_qosid_by_lunid(lun_id)
if qos_id:
self.remove_qos_lun(lun_id, qos_id)
self.restclient.delete_lun(lun_id)
else:
LOG.warning(_LW("Can't find lun %s on the array."), lun_id)
return False
return True
def remove_qos_lun(self, lun_id, qos_id):
lun_list = self.restclient.get_lun_list_in_qos(qos_id)
lun_count = len(lun_list)
if lun_count <= 1:
qos = smartx.SmartQos(self.restclient)
qos.delete_qos(qos_id)
else:
self.restclient.remove_lun_from_qos(lun_id,
lun_list,
qos_id)
def _delete_lun_with_check(self, lun_id):
if lun_id:
if self.restclient.check_lun_exist(lun_id):
qos_id = self.restclient.get_qosid_by_lunid(lun_id)
if qos_id:
self.remove_qos_lun(lun_id, qos_id)
self.restclient.delete_lun(lun_id)
def _is_lun_migration_complete(self, src_id, dst_id):
result = self.restclient.get_lun_migration_task()
found_migration_task = False
if 'data' in result:
for item in result['data']:
if (src_id == item['PARENTID']
and dst_id == item['TARGETLUNID']):
found_migration_task = True
if constants.MIGRATION_COMPLETE == item['RUNNINGSTATUS']:
return True
if constants.MIGRATION_FAULT == item['RUNNINGSTATUS']:
err_msg = (_('Lun migration error.'))
LOG.error(err_msg)
raise exception.VolumeBackendAPIException(data=err_msg)
if not found_migration_task:
err_msg = (_("Cannot find migration task."))
LOG.error(err_msg)
raise exception.VolumeBackendAPIException(data=err_msg)
return False
def _is_lun_migration_exist(self, src_id, dst_id):
try:
result = self.restclient.get_lun_migration_task()
except Exception:
LOG.error(_LE("Get LUN migration error."))
return False
if 'data' in result:
for item in result['data']:
if (src_id == item['PARENTID']
and dst_id == item['TARGETLUNID']):
return True
return False
def _migrate_lun(self, src_id, dst_id):
try:
self.restclient.create_lun_migration(src_id, dst_id)
def _is_lun_migration_complete():
return self._is_lun_migration_complete(src_id, dst_id)
wait_interval = constants.MIGRATION_WAIT_INTERVAL
huawei_utils.wait_for_condition(self.xml_file_path,
_is_lun_migration_complete,
wait_interval)
# Clean up if migration failed.
except Exception as ex:
raise exception.VolumeBackendAPIException(data=ex)
finally:
if self._is_lun_migration_exist(src_id, dst_id):
self.restclient.delete_lun_migration(src_id, dst_id)
self._delete_lun_with_check(dst_id)
LOG.debug("Migrate lun %s successfully.", src_id)
return True
def _wait_volume_ready(self, lun_id):
event_type = 'LUNReadyWaitInterval'
wait_interval = huawei_utils.get_wait_interval(self.xml_file_path,
event_type)
def _volume_ready():
result = self.restclient.get_lun_info(lun_id)
if (result['HEALTHSTATUS'] == constants.STATUS_HEALTH
and result['RUNNINGSTATUS'] == constants.STATUS_VOLUME_READY):
return True
return False
huawei_utils.wait_for_condition(self.xml_file_path,
_volume_ready,
wait_interval,
wait_interval * 10)
def _get_original_status(self, volume):
if not volume['volume_attachment']:
return 'available'
else:
return 'in-use'
def update_migrated_volume(self, ctxt, volume, new_volume,
original_volume_status=None):
original_name = huawei_utils.encode_name(volume['id'])
current_name = huawei_utils.encode_name(new_volume['id'])
lun_id = self.restclient.get_volume_by_name(current_name)
try:
self.restclient.rename_lun(lun_id, original_name)
except exception.VolumeBackendAPIException:
LOG.error(_LE('Unable to rename lun %s on array.'), current_name)
return {'_name_id': new_volume['_name_id'] or new_volume['id']}
LOG.debug("Rename lun from %(current_name)s to %(original_name)s "
"successfully.",
{'current_name': current_name,
'original_name': original_name})
model_update = {'_name_id': None}
return model_update
def migrate_volume(self, ctxt, volume, host, new_type=None):
"""Migrate a volume within the same array."""
return self._migrate_volume(volume, host, new_type)
def _check_migration_valid(self, host, volume):
if 'pool_name' not in host['capabilities']:
return False
target_device = host['capabilities']['location_info']
# Source and destination should be on same array.
if target_device != self.restclient.device_id:
return False
# Same protocol should be used if volume is in-use.
protocol = huawei_utils.get_protocol(self.xml_file_path)
if (host['capabilities']['storage_protocol'] != protocol
and self._get_original_status(volume) == 'in-use'):
return False
pool_name = host['capabilities']['pool_name']
if len(pool_name) == 0:
return False
return True
def _migrate_volume(self, volume, host, new_type=None):
if not self._check_migration_valid(host, volume):
return (False, None)
type_id = volume['volume_type_id']
volume_type = None
if type_id:
volume_type = volume_types.get_volume_type(None, type_id)
pool_name = host['capabilities']['pool_name']
pools = self.restclient.find_all_pools()
pool_info = self.restclient.find_pool_info(pool_name, pools)
src_volume_name = huawei_utils.encode_name(volume['id'])
dst_volume_name = six.text_type(hash(src_volume_name))
src_id = volume.get('provider_location')
src_lun_params = self.restclient.get_lun_info(src_id)
opts = None
qos = None
if new_type:
# If new type exists, use new type.
opts = huawei_utils._get_extra_spec_value(
new_type['extra_specs'])
opts = smartx.SmartX().get_smartx_specs_opts(opts)
if 'LUNType' not in opts:
opts['LUNType'] = huawei_utils.find_luntype_in_xml(
self.xml_file_path)
qos = huawei_utils.get_qos_by_volume_type(new_type)
elif volume_type:
qos = huawei_utils.get_qos_by_volume_type(volume_type)
if not opts:
opts = huawei_utils.get_volume_params(volume)
opts = smartx.SmartX().get_smartx_specs_opts(opts)
lun_info = self._create_lun_with_extra_feature(pool_info,
dst_volume_name,
src_lun_params,
opts)
lun_id = lun_info['ID']
if qos:
LOG.info(_LI('QoS: %s.'), qos)
SmartQos = smartx.SmartQos(self.restclient)
SmartQos.create_qos(qos, lun_id)
if opts:
smartpartition = smartx.SmartPartition(self.restclient)
smartpartition.add(opts, lun_id)
smartcache = smartx.SmartCache(self.restclient)
smartcache.add(opts, lun_id)
dst_id = lun_info['ID']
self._wait_volume_ready(dst_id)
moved = self._migrate_lun(src_id, dst_id)
return moved, {}
def _create_lun_with_extra_feature(self, pool_info,
lun_name,
lun_params,
spec_opts):
LOG.info(_LI('Create a new lun %s for migration.'), lun_name)
# Prepare lun parameters.
lunparam = {"TYPE": '11',
"NAME": lun_name,
"PARENTTYPE": '216',
"PARENTID": pool_info['ID'],
"ALLOCTYPE": lun_params['ALLOCTYPE'],
"CAPACITY": lun_params['CAPACITY'],
"WRITEPOLICY": lun_params['WRITEPOLICY'],
"MIRRORPOLICY": lun_params['MIRRORPOLICY'],
"PREFETCHPOLICY": lun_params['PREFETCHPOLICY'],
"PREFETCHVALUE": lun_params['PREFETCHVALUE'],
"DATATRANSFERPOLICY": '0',
"READCACHEPOLICY": lun_params['READCACHEPOLICY'],
"WRITECACHEPOLICY": lun_params['WRITECACHEPOLICY'],
"OWNINGCONTROLLER": lun_params['OWNINGCONTROLLER'],
}
if 'LUNType' in spec_opts:
lunparam['ALLOCTYPE'] = spec_opts['LUNType']
if spec_opts['policy']:
lunparam['DATATRANSFERPOLICY'] = spec_opts['policy']
lun_info = self.restclient.create_volume(lunparam)
return lun_info
def create_volume_from_snapshot(self, volume, snapshot):
"""Create a volume from a snapshot.
We use LUNcopy to copy a new volume from snapshot.
The time needed increases as volume size does.
"""
snapshotname = huawei_utils.encode_name(snapshot['id'])
snapshot_id = snapshot.get('provider_location')
if snapshot_id is None:
snapshot_id = self.restclient.get_snapshotid_by_name(snapshotname)
if snapshot_id is None:
err_msg = (_(
'create_volume_from_snapshot: Snapshot %(name)s '
'does not exist.')
% {'name': snapshotname})
LOG.error(err_msg)
raise exception.VolumeBackendAPIException(data=err_msg)
lun_info = self.create_volume(volume)
tgt_lun_id = lun_info['ID']
luncopy_name = huawei_utils.encode_name(volume['id'])
LOG.info(_LI(
'create_volume_from_snapshot: src_lun_id: %(src_lun_id)s, '
'tgt_lun_id: %(tgt_lun_id)s, copy_name: %(copy_name)s.'),
{'src_lun_id': snapshot_id,
'tgt_lun_id': tgt_lun_id,
'copy_name': luncopy_name})
event_type = 'LUNReadyWaitInterval'
wait_interval = huawei_utils.get_wait_interval(self.xml_file_path,
event_type)
def _volume_ready():
result = self.restclient.get_lun_info(tgt_lun_id)
if (result['HEALTHSTATUS'] == constants.STATUS_HEALTH
and result['RUNNINGSTATUS'] == constants.STATUS_VOLUME_READY):
return True
return False
huawei_utils.wait_for_condition(self.xml_file_path,
_volume_ready,
wait_interval,
wait_interval * 10)
self._copy_volume(volume, luncopy_name,
snapshot_id, tgt_lun_id)
return {'ID': lun_info['ID'],
'lun_info': lun_info}
def create_cloned_volume(self, volume, src_vref):
"""Clone a new volume from an existing volume."""
# Form the snapshot structure.
snapshot = {'id': uuid.uuid4().__str__(),
'volume_id': src_vref['id'],
'volume': src_vref}
# Create snapshot.
self.create_snapshot(snapshot)
try:
# Create volume from snapshot.
lun_info = self.create_volume_from_snapshot(volume, snapshot)
finally:
try:
# Delete snapshot.
self.delete_snapshot(snapshot)
except exception.VolumeBackendAPIException:
LOG.warning(_LW(
'Failure deleting the snapshot %(snapshot_id)s '
'of volume %(volume_id)s.'),
{'snapshot_id': snapshot['id'],
'volume_id': src_vref['id']},)
return {'provider_location': lun_info['ID'],
'lun_info': lun_info}
@utils.synchronized('huawei', external=True)
def extend_volume(self, volume, new_size):
"""Extend a volume."""
volume_size = huawei_utils.get_volume_size(volume)
new_volume_size = int(new_size) * units.Gi / 512
volume_name = huawei_utils.encode_name(volume['id'])
LOG.info(_LI(
'Extend volume: %(volumename)s, oldsize:'
' %(oldsize)s newsize: %(newsize)s.'),
{'volumename': volume_name,
'oldsize': volume_size,
'newsize': new_volume_size},)
lun_id = self.restclient.get_lunid(volume, volume_name)
luninfo = self.restclient.extend_volume(lun_id, new_volume_size)
return {'provider_location': luninfo['ID'],
'lun_info': luninfo}
@utils.synchronized('huawei', external=True)
def create_snapshot(self, snapshot):
snapshot_info = self.restclient.create_snapshot(snapshot)
snapshot_id = snapshot_info['ID']
self.restclient.activate_snapshot(snapshot_id)
return {'provider_location': snapshot_info['ID'],
'lun_info': snapshot_info}
@utils.synchronized('huawei', external=True)
def delete_snapshot(self, snapshot):
snapshotname = huawei_utils.encode_name(snapshot['id'])
volume_name = huawei_utils.encode_name(snapshot['volume_id'])
LOG.info(_LI(
'stop_snapshot: snapshot name: %(snapshot)s, '
'volume name: %(volume)s.'),
{'snapshot': snapshotname,
'volume': volume_name},)
snapshot_id = snapshot.get('provider_location')
if snapshot_id is None:
snapshot_id = self.restclient.get_snapshotid_by_name(snapshotname)
if snapshot_id is not None:
if self.restclient.check_snapshot_exist(snapshot_id):
self.restclient.stop_snapshot(snapshot_id)
self.restclient.delete_snapshot(snapshot_id)
else:
LOG.warning(_LW("Can't find snapshot on the array."))
else:
LOG.warning(_LW("Can't find snapshot on the array."))
return False
return True
def retype(self, ctxt, volume, new_type, diff, host):
"""Convert the volume to be of the new type."""
LOG.debug("Enter retype: id=%(id)s, new_type=%(new_type)s, "
"diff=%(diff)s, host=%(host)s.", {'id': volume['id'],
'new_type': new_type,
'diff': diff,
'host': host})
# Check what changes are needed
migration, change_opts, lun_id = self.determine_changes_when_retype(
volume, new_type, host)
try:
if migration:
LOG.debug("Begin to migrate LUN(id: %(lun_id)s) with "
"change %(change_opts)s.",
{"lun_id": lun_id, "change_opts": change_opts})
if self._migrate_volume(volume, host, new_type):
return True
else:
LOG.warning(_LW("Storage-assisted migration failed during "
"retype."))
return False
else:
# Modify lun to change policy
self.modify_lun(lun_id, change_opts)
return True
except exception.VolumeBackendAPIException:
LOG.exception(_LE('Retype volume error.'))
return False
def modify_lun(self, lun_id, change_opts):
if change_opts.get('partitionid'):
old, new = change_opts['partitionid']
old_id = old[0]
old_name = old[1]
new_id = new[0]
new_name = new[1]
if old_id:
self.restclient.remove_lun_from_partition(lun_id, old_id)
if new_id:
self.restclient.add_lun_to_partition(lun_id, new_id)
LOG.info(_LI("Retype LUN(id: %(lun_id)s) smartpartition from "
"(name: %(old_name)s, id: %(old_id)s) to "
"(name: %(new_name)s, id: %(new_id)s) success."),
{"lun_id": lun_id,
"old_id": old_id, "old_name": old_name,
"new_id": new_id, "new_name": new_name})
if change_opts.get('cacheid'):
old, new = change_opts['cacheid']
old_id = old[0]
old_name = old[1]
new_id = new[0]
new_name = new[1]
if old_id:
self.restclient.remove_lun_from_cache(lun_id, old_id)
if new_id:
self.restclient.add_lun_to_cache(lun_id, new_id)
LOG.info(_LI("Retype LUN(id: %(lun_id)s) smartcache from "
"(name: %(old_name)s, id: %(old_id)s) to "
"(name: %(new_name)s, id: %(new_id)s) successfully."),
{'lun_id': lun_id,
'old_id': old_id, "old_name": old_name,
'new_id': new_id, "new_name": new_name})
if change_opts.get('policy'):
old_policy, new_policy = change_opts['policy']
self.restclient.change_lun_smarttier(lun_id, new_policy)
LOG.info(_LI("Retype LUN(id: %(lun_id)s) smarttier policy from "
"%(old_policy)s to %(new_policy)s success."),
{'lun_id': lun_id,
'old_policy': old_policy,
'new_policy': new_policy})
if change_opts.get('qos'):
old_qos, new_qos = change_opts['qos']
old_qos_id = old_qos[0]
old_qos_value = old_qos[1]
if old_qos_id:
self.remove_qos_lun(lun_id, old_qos_id)
if new_qos:
smart_qos = smartx.SmartQos(self.restclient)
smart_qos.create_qos(new_qos, lun_id)
LOG.info(_LI("Retype LUN(id: %(lun_id)s) smartqos from "
"%(old_qos_value)s to %(new_qos)s success."),
{'lun_id': lun_id,
'old_qos_value': old_qos_value,
'new_qos': new_qos})
def get_lun_specs(self, lun_id):
lun_opts = {
'policy': None,
'partitionid': None,
'cacheid': None,
'LUNType': None,
}
lun_info = self.restclient.get_lun_info(lun_id)
lun_opts['LUNType'] = int(lun_info['ALLOCTYPE'])
if lun_info['DATATRANSFERPOLICY']:
lun_opts['policy'] = lun_info['DATATRANSFERPOLICY']
if lun_info['SMARTCACHEPARTITIONID']:
lun_opts['cacheid'] = lun_info['SMARTCACHEPARTITIONID']
if lun_info['CACHEPARTITIONID']:
lun_opts['partitionid'] = lun_info['CACHEPARTITIONID']
return lun_opts
def determine_changes_when_retype(self, volume, new_type, host):
migration = False
change_opts = {
'policy': None,
'partitionid': None,
'cacheid': None,
'qos': None,
'host': None,
'LUNType': None,
}
lun_id = volume.get('provider_location')
old_opts = self.get_lun_specs(lun_id)
new_specs = new_type['extra_specs']
new_opts = huawei_utils._get_extra_spec_value(new_specs)
new_opts = smartx.SmartX().get_smartx_specs_opts(new_opts)
if 'LUNType' not in new_opts:
new_opts['LUNType'] = huawei_utils.find_luntype_in_xml(
self.xml_file_path)
if volume['host'] != host['host']:
migration = True
change_opts['host'] = (volume['host'], host['host'])
if old_opts['LUNType'] != new_opts['LUNType']:
migration = True
change_opts['LUNType'] = (old_opts['LUNType'], new_opts['LUNType'])
new_cache_id = None
new_cache_name = new_opts['cachename']
if new_cache_name:
new_cache_id = self.restclient.get_cache_id_by_name(new_cache_name)
if new_cache_id is None:
msg = (_(
"Can't find cache name on the array, cache name is: "
"%(name)s.") % {'name': new_cache_name})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
new_partition_id = None
new_partition_name = new_opts['partitionname']
if new_partition_name:
new_partition_id = self.restclient.get_partition_id_by_name(
new_partition_name)
if new_partition_id is None:
msg = (_(
"Can't find partition name on the array, partition name "
"is: %(name)s.") % {'name': new_partition_name})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
# smarttier
if old_opts['policy'] != new_opts['policy']:
change_opts['policy'] = (old_opts['policy'], new_opts['policy'])
# smartcache
old_cache_id = old_opts['cacheid']
if old_cache_id != new_cache_id:
old_cache_name = None
if old_cache_id:
cache_info = self.restclient.get_cache_info_by_id(old_cache_id)
old_cache_name = cache_info['NAME']
change_opts['cacheid'] = ([old_cache_id, old_cache_name],
[new_cache_id, new_cache_name])
# smartpartition
old_partition_id = old_opts['partitionid']
if old_partition_id != new_partition_id:
old_partition_name = None
if old_partition_id:
partition_info = self.restclient.get_partition_info_by_id(
old_partition_id)
old_partition_name = partition_info['NAME']
change_opts['partitionid'] = ([old_partition_id,
old_partition_name],
[new_partition_id,
new_partition_name])
# smartqos
new_qos = huawei_utils.get_qos_by_volume_type(new_type)
old_qos_id = self.restclient.get_qosid_by_lunid(lun_id)
old_qos = self._get_qos_specs_from_array(old_qos_id)
if old_qos != new_qos:
change_opts['qos'] = ([old_qos_id, old_qos], new_qos)
LOG.debug("Determine changes when retype. Migration: "
"%(migration)s, change_opts: %(change_opts)s.",
{'migration': migration, 'change_opts': change_opts})
return migration, change_opts, lun_id
def _get_qos_specs_from_array(self, qos_id):
qos = {}
qos_info = {}
if qos_id:
qos_info = self.restclient.get_qos_info(qos_id)
for key, value in qos_info.items():
if key.upper() in constants.QOS_KEYS:
if key.upper() == 'LATENCY' and value == '0':
continue
else:
qos[key.upper()] = value
return qos
def create_export(self, context, volume, connector):
"""Export a volume."""
pass
def ensure_export(self, context, volume):
"""Synchronously recreate an export for a volume."""
pass
def remove_export(self, context, volume):
"""Remove an export for a volume."""
pass
def _copy_volume(self, volume, copy_name, src_lun, tgt_lun):
luncopy_id = self.restclient.create_luncopy(copy_name,
src_lun, tgt_lun)
event_type = 'LUNcopyWaitInterval'
wait_interval = huawei_utils.get_wait_interval(self.xml_file_path,
event_type)
try:
self.restclient.start_luncopy(luncopy_id)
def _luncopy_complete():
luncopy_info = self.restclient.get_luncopy_info(luncopy_id)
if luncopy_info['status'] == constants.STATUS_LUNCOPY_READY:
# luncopy_info['status'] means for the running status of
# the luncopy. If luncopy_info['status'] is equal to '40',
# this luncopy is completely ready.
return True
elif luncopy_info['state'] != constants.STATUS_HEALTH:
# luncopy_info['state'] means for the healthy status of the
# luncopy. If luncopy_info['state'] is not equal to '1',
# this means that an error occurred during the LUNcopy
# operation and we should abort it.
err_msg = (_(
'An error occurred during the LUNcopy operation. '
'LUNcopy name: %(luncopyname)s. '
'LUNcopy status: %(luncopystatus)s. '
'LUNcopy state: %(luncopystate)s.')
% {'luncopyname': luncopy_id,
'luncopystatus': luncopy_info['status'],
'luncopystate': luncopy_info['state']},)
LOG.error(err_msg)
raise exception.VolumeBackendAPIException(data=err_msg)
huawei_utils.wait_for_condition(self.xml_file_path,
_luncopy_complete,
wait_interval)
except Exception:
with excutils.save_and_reraise_exception():
self.restclient.delete_luncopy(luncopy_id)
self.delete_volume(volume)
self.restclient.delete_luncopy(luncopy_id)
class Huawei18000ISCSIDriver(HuaweiBaseDriver, driver.ISCSIDriver):
"""ISCSI driver for Huawei OceanStor 18000 storage arrays.
Version history:
1.0.0 - Initial driver
1.1.0 - Provide Huawei OceanStor 18000 storage volume driver
1.1.1 - Code refactor
CHAP support
Multiple pools support
ISCSI multipath support
SmartX support
Volume migration support
Volume retype support
"""
VERSION = "1.1.1"
def __init__(self, *args, **kwargs):
super(Huawei18000ISCSIDriver, self).__init__(*args, **kwargs)
def get_volume_stats(self, refresh=False):
"""Get volume status."""
data = HuaweiBaseDriver.get_volume_stats(self, refresh=False)
backend_name = self.configuration.safe_get('volume_backend_name')
data['volume_backend_name'] = backend_name or self.__class__.__name__
data['storage_protocol'] = 'iSCSI'
data['driver_version'] = self.VERSION
data['vendor_name'] = 'Huawei'
return data
@utils.synchronized('huawei', external=True)
def initialize_connection(self, volume, connector):
"""Map a volume to a host and return target iSCSI information."""
LOG.info(_LI('Enter initialize_connection.'))
initiator_name = connector['initiator']
volume_name = huawei_utils.encode_name(volume['id'])
LOG.info(_LI(
'initiator name: %(initiator_name)s, '
'volume name: %(volume)s.'),
{'initiator_name': initiator_name,
'volume': volume_name})
(iscsi_iqns,
target_ips,
portgroup_id) = self.restclient.get_iscsi_params(self.xml_file_path,
connector)
LOG.info(_LI('initialize_connection, iscsi_iqn: %(iscsi_iqn)s, '
'target_ip: %(target_ip)s, '
'portgroup_id: %(portgroup_id)s.'),
{'iscsi_iqn': iscsi_iqns,
'target_ip': target_ips,
'portgroup_id': portgroup_id},)
# Create hostgroup if not exist.
host_name = connector['host']
host_name_before_hash = None
if host_name and (len(host_name) > constants.MAX_HOSTNAME_LENGTH):
host_name_before_hash = host_name
host_name = six.text_type(hash(host_name))
host_id = self.restclient.add_host_with_check(host_name,
host_name_before_hash)
# Add initiator to the host.
self.restclient.ensure_initiator_added(self.xml_file_path,
initiator_name,
host_id)
hostgroup_id = self.restclient.add_host_into_hostgroup(host_id)
lun_id = self.restclient.get_lunid(volume, volume_name)
# Mapping lungroup and hostgroup to view.
self.restclient.do_mapping(lun_id, hostgroup_id,
host_id, portgroup_id)
hostlun_id = self.restclient.find_host_lun_id(host_id, lun_id)
LOG.info(_LI("initialize_connection, host lun id is: %s."),
hostlun_id)
iscsi_conf = huawei_utils.get_iscsi_conf(self.xml_file_path)
chapinfo = self.restclient.find_chap_info(iscsi_conf,
initiator_name)
# Return iSCSI properties.
properties = {}
properties['target_discovered'] = False
properties['volume_id'] = volume['id']
multipath = connector.get('multipath', False)
hostlun_id = int(hostlun_id)
if not multipath:
properties['target_portal'] = ('%s:3260' % target_ips[0])
properties['target_iqn'] = iscsi_iqns[0]
properties['target_lun'] = hostlun_id
else:
properties['target_iqns'] = [iqn for iqn in iscsi_iqns]
properties['target_portals'] = [
'%s:3260' % ip for ip in target_ips]
properties['target_luns'] = [hostlun_id] * len(target_ips)
# If use CHAP, return CHAP info.
if chapinfo:
chap_username, chap_password = chapinfo.split(';')
properties['auth_method'] = 'CHAP'
properties['auth_username'] = chap_username
properties['auth_password'] = chap_password
LOG.info(_LI("initialize_connection success. Return data: %s."),
properties)
return {'driver_volume_type': 'iscsi', 'data': properties}
@utils.synchronized('huawei', external=True)
def terminate_connection(self, volume, connector, **kwargs):
"""Delete map between a volume and a host."""
initiator_name = connector['initiator']
volume_name = huawei_utils.encode_name(volume['id'])
lun_id = volume.get('provider_location')
host_name = connector['host']
lungroup_id = None
LOG.info(_LI(
'terminate_connection: volume name: %(volume)s, '
'initiator name: %(ini)s, '
'lun_id: %(lunid)s.'),
{'volume': volume_name,
'ini': initiator_name,
'lunid': lun_id},)
iscsi_conf = huawei_utils.get_iscsi_conf(self.xml_file_path)
portgroup = None
portgroup_id = None
view_id = None
left_lunnum = -1
for ini in iscsi_conf['Initiator']:
if ini['Name'] == initiator_name:
for key in ini:
if key == 'TargetPortGroup':
portgroup = ini['TargetPortGroup']
break
if portgroup:
portgroup_id = self.restclient.find_tgt_port_group(portgroup)
if host_name and (len(host_name) > constants.MAX_HOSTNAME_LENGTH):
host_name = six.text_type(hash(host_name))
host_id = self.restclient.find_host(host_name)
if host_id:
mapping_view_name = constants.MAPPING_VIEW_PREFIX + host_id
view_id = self.restclient.find_mapping_view(mapping_view_name)
if view_id:
lungroup_id = self.restclient.find_lungroup_from_map(view_id)
# Remove lun from lungroup.
if lun_id and self.restclient.check_lun_exist(lun_id):
if lungroup_id:
lungroup_ids = self.restclient.get_lungroupids_by_lunid(lun_id)
if lungroup_id in lungroup_ids:
self.restclient.remove_lun_from_lungroup(lungroup_id,
lun_id)
else:
LOG.warning(_LW("Lun is not in lungroup. "
"Lun id: %(lun_id)s. "
"lungroup id: %(lungroup_id)s."),
{"lun_id": lun_id,
"lungroup_id": lungroup_id})
else:
LOG.warning(_LW("Can't find lun on the array."))
# Remove portgroup from mapping view if no lun left in lungroup.
if lungroup_id:
left_lunnum = self.restclient.get_lunnum_from_lungroup(lungroup_id)
if portgroup_id and view_id and (int(left_lunnum) <= 0):
if self.restclient.is_portgroup_associated_to_view(view_id,
portgroup_id):
self.restclient.delete_portgroup_mapping_view(view_id,
portgroup_id)
if view_id and (int(left_lunnum) <= 0):
self.restclient.remove_chap(initiator_name)
if self.restclient.lungroup_associated(view_id, lungroup_id):
self.restclient.delete_lungroup_mapping_view(view_id,
lungroup_id)
self.restclient.delete_lungroup(lungroup_id)
if self.restclient.is_initiator_associated_to_host(initiator_name):
self.restclient.remove_iscsi_from_host(initiator_name)
hostgroup_name = constants.HOSTGROUP_PREFIX + host_id
hostgroup_id = self.restclient.find_hostgroup(hostgroup_name)
if hostgroup_id:
if self.restclient.hostgroup_associated(view_id, hostgroup_id):
self.restclient.delete_hostgoup_mapping_view(view_id,
hostgroup_id)
self.restclient.remove_host_from_hostgroup(hostgroup_id,
host_id)
self.restclient.delete_hostgroup(hostgroup_id)
self.restclient.remove_host(host_id)
self.restclient.delete_mapping_view(view_id)
class Huawei18000FCDriver(HuaweiBaseDriver, driver.FibreChannelDriver):
"""FC driver for Huawei OceanStor 18000 storage arrays.
Version history:
1.0.0 - Initial driver
1.1.0 - Provide Huawei OceanStor 18000 storage volume driver
1.1.1 - Code refactor
Multiple pools support
SmartX support
Volume migration support
Volume retype support
FC zone enhancement
"""
VERSION = "1.1.1"
def __init__(self, *args, **kwargs):
super(Huawei18000FCDriver, self).__init__(*args, **kwargs)
self.fcsan_lookup_service = None
def get_volume_stats(self, refresh=False):
"""Get volume status."""
data = HuaweiBaseDriver.get_volume_stats(self, refresh=False)
backend_name = self.configuration.safe_get('volume_backend_name')
data['volume_backend_name'] = backend_name or self.__class__.__name__
data['storage_protocol'] = 'FC'
data['driver_version'] = self.VERSION
data['vendor_name'] = 'Huawei'
return data
@utils.synchronized('huawei', external=True)
@fczm_utils.AddFCZone
def initialize_connection(self, volume, connector):
wwns = connector['wwpns']
volume_name = huawei_utils.encode_name(volume['id'])
LOG.info(_LI(
'initialize_connection, initiator: %(wwpns)s,'
' volume name: %(volume)s.'),
{'wwpns': wwns,
'volume': volume_name},)
lun_id = self.restclient.get_lunid(volume, volume_name)
host_name_before_hash = None
host_name = connector['host']
if host_name and (len(host_name) > constants.MAX_HOSTNAME_LENGTH):
host_name_before_hash = host_name
host_name = six.text_type(hash(host_name))
if not self.fcsan_lookup_service:
self.fcsan_lookup_service = fczm_utils.create_lookup_service()
if self.fcsan_lookup_service:
# Use FC switch.
host_id = self.restclient.add_host_with_check(
host_name, host_name_before_hash)
zone_helper = fc_zone_helper.FCZoneHelper(
self.fcsan_lookup_service, self.restclient)
(tgt_port_wwns, init_targ_map) = (
zone_helper.build_ini_targ_map(wwns))
for ini in init_targ_map:
self.restclient.ensure_fc_initiator_added(ini, host_id)
else:
# Not use FC switch.
host_id = self.restclient.add_host_with_check(
host_name, host_name_before_hash)
online_wwns_in_host = (
self.restclient.get_host_online_fc_initiators(host_id))
online_free_wwns = self.restclient.get_online_free_wwns()
for wwn in wwns:
if (wwn not in online_wwns_in_host
and wwn not in online_free_wwns):
wwns_in_host = (
self.restclient.get_host_fc_initiators(host_id))
iqns_in_host = (
self.restclient.get_host_iscsi_initiators(host_id))
if not wwns_in_host and not iqns_in_host:
self.restclient.remove_host(host_id)
msg = (_('Can not add FC initiator to host.'))
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
for wwn in wwns:
if wwn in online_free_wwns:
self.restclient.add_fc_port_to_host(host_id, wwn)
(tgt_port_wwns, init_targ_map) = (
self.restclient.get_init_targ_map(wwns))
# Add host into hostgroup.
hostgroup_id = self.restclient.add_host_into_hostgroup(host_id)
self.restclient.do_mapping(lun_id, hostgroup_id, host_id)
host_lun_id = self.restclient.find_host_lun_id(host_id, lun_id)
# Return FC properties.
info = {'driver_volume_type': 'fibre_channel',
'data': {'target_lun': int(host_lun_id),
'target_discovered': True,
'target_wwn': tgt_port_wwns,
'volume_id': volume['id'],
'initiator_target_map': init_targ_map}, }
LOG.info(_LI("initialize_connection, return data is: %s."),
info)
return info
@utils.synchronized('huawei', external=True)
@fczm_utils.RemoveFCZone
def terminate_connection(self, volume, connector, **kwargs):
"""Delete map between a volume and a host."""
wwns = connector['wwpns']
volume_name = huawei_utils.encode_name(volume['id'])
lun_id = volume.get('provider_location')
host_name = connector['host']
left_lunnum = -1
lungroup_id = None
view_id = None
LOG.info(_LI('terminate_connection: volume name: %(volume)s, '
'wwpns: %(wwns)s, '
'lun_id: %(lunid)s.'),
{'volume': volume_name,
'wwns': wwns,
'lunid': lun_id},)
if host_name and len(host_name) > constants.MAX_HOSTNAME_LENGTH:
host_name = six.text_type(hash(host_name))
host_id = self.restclient.find_host(host_name)
if host_id:
mapping_view_name = constants.MAPPING_VIEW_PREFIX + host_id
view_id = self.restclient.find_mapping_view(mapping_view_name)
if view_id:
lungroup_id = self.restclient.find_lungroup_from_map(view_id)
if lun_id and self.restclient.check_lun_exist(lun_id):
if lungroup_id:
lungroup_ids = self.restclient.get_lungroupids_by_lunid(lun_id)
if lungroup_id in lungroup_ids:
self.restclient.remove_lun_from_lungroup(lungroup_id,
lun_id)
else:
LOG.warning(_LW("Lun is not in lungroup. "
"Lun id: %(lun_id)s. "
"Lungroup id: %(lungroup_id)s."),
{"lun_id": lun_id,
"lungroup_id": lungroup_id})
else:
LOG.warning(_LW("Can't find lun on the array."))
if lungroup_id:
left_lunnum = self.restclient.get_lunnum_from_lungroup(lungroup_id)
if int(left_lunnum) > 0:
info = {'driver_volume_type': 'fibre_channel',
'data': {}}
else:
if not self.fcsan_lookup_service:
self.fcsan_lookup_service = fczm_utils.create_lookup_service()
if self.fcsan_lookup_service:
zone_helper = fc_zone_helper.FCZoneHelper(
self.fcsan_lookup_service, self.restclient)
(tgt_port_wwns, init_targ_map) = (
zone_helper.build_ini_targ_map(wwns))
else:
(tgt_port_wwns, init_targ_map) = (
self.restclient.get_init_targ_map(wwns))
for wwn in wwns:
if self.restclient.is_fc_initiator_associated_to_host(wwn):
self.restclient.remove_fc_from_host(wwn)
if lungroup_id:
if view_id and self.restclient.lungroup_associated(
view_id, lungroup_id):
self.restclient.delete_lungroup_mapping_view(view_id,
lungroup_id)
self.restclient.delete_lungroup(lungroup_id)
if host_id:
hostgroup_name = constants.HOSTGROUP_PREFIX + host_id
hostgroup_id = self.restclient.find_hostgroup(hostgroup_name)
if hostgroup_id:
if view_id and self.restclient.hostgroup_associated(
view_id, hostgroup_id):
self.restclient.delete_hostgoup_mapping_view(
view_id, hostgroup_id)
self.restclient.remove_host_from_hostgroup(
hostgroup_id, host_id)
self.restclient.delete_hostgroup(hostgroup_id)
if not self.restclient.check_fc_initiators_exist_in_host(
host_id):
self.restclient.remove_host(host_id)
if view_id:
self.restclient.delete_mapping_view(view_id)
info = {'driver_volume_type': 'fibre_channel',
'data': {'target_wwn': tgt_port_wwns,
'initiator_target_map': init_targ_map}}
LOG.info(_LI("terminate_connection, return data is: %s."),
info)
return info
| Paul-Ezell/cinder-1 | cinder/volume/drivers/huawei/huawei_driver.py | Python | apache-2.0 | 49,899 | 0 |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
import unittest
import imgchili
import lxml.html
from os.path import join, getsize, isfile
class TestImgchili(unittest.TestCase):
def setUp(self):
self.basedir = '/mnt/d/Maidens/Uploads/'
self.url = 'http://imgchili.com/show/2765/2765317_9.jpg'
self.image_url = 'http://i1.imgchili.com/2765/2765317_9.jpg'
self.example_chiliimage_page = """<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8" />
<meta http-equiv="Content-Style-Type" content="text/css" />
<meta http-equiv="Content-Language" content="en-us" />
<meta http-equiv="imagetoolbar" content="no" />
<title>imgChili » 2765317_9.jpg</title>
<meta name="version" content="imgChili" />
<meta name="description" content="imgChili is the free image hosting solution for everyone. With imgChili you can upload your images and photos, categorize them, share them with friends, and you can even make money out of this!" />
<meta name="keywords" content="image hosting, image hosting service, multiple image hosting, unlimited bandwidth, free image hosting" />
<base href="http://imgchili.com/" />
<link rel="shortcut icon" href="./theme/images/favicoc.ico" />
<link href="theme/style.css" rel="stylesheet" type="text/css" media="screen" />
<script type="text/javascript">
var a = "<a class=\"removeAds2\" href=\"premium\" target=\"_blank\">Remove ads [x]<\/a><iframe src='http://www.streamate.com/landing/2/?AFNO=1-0-630365-341541&UHNSMTY=303' style='width:1028px;height:900px;border:0px;'></iframe>";
var x = "";
</script>
<script type="text/javascript" src="source/includes/scripts/jquery.js"></script>
<script type="text/javascript" src="source/includes/scripts/genjscript.js"></script>
<script type="text/javascript" src="source/includes/scripts/phpjs_00029.js"></script>
<script type="text/javascript" src="source/includes/scripts/jquery.jdMenu.js"></script>
<script type="text/javascript" src="source/includes/scripts/jquery.bgiframe.js"></script>
<script type="text/javascript" src="source/includes/scripts/jquery.positionBy.js"></script>
<script type="text/javascript" src="source/includes/scripts/jquery.dimensions.js"></script>
<script type="text/javascript" src="/js/maxlines.js"></script>
<!-- <# JSHOOK_GCHART #> -->
<!--[if lt IE 7]>
<script src="/js/pngfix.js" type="text/javascript"></script>
<script>DD_belatedPNG.fix('.pngfix');</script>
<![endif]-->
<script type="text/javascript">
function loginMenu(){
if (document.getElementById('loginMenu').style.display == 'none'){
document.getElementById('loginMenu').style.display = 'block';
$("#loginTab").attr("class","button6");
}else{
document.getElementById('loginMenu').style.display = 'none';
$("#loginTab").attr("class","button3");
}
}
</script>
<script type="text/javascript">
<!--
if (top.location!= self.location) {
top.location = self.location.href
}
//-->
</script>
<script language="vbscript" type="text/vbs">
set_ie_alert()
Function vb_alert(msg_str)
MsgBox msg_str,vbOKOnly+vbInformation+vbApplicationModal,alert_title
End Function
</script>
</head>
<body>
<script type="text/javascript">
var _gaq = _gaq || [];
_gaq.push(['_setAccount', 'UA-23829964-1']);
_gaq.push(['_trackPageview']);
(function() {
var ga = document.createElement('script'); ga.type = 'text/javascript'; ga.async = true;
ga.src = ('https:' == document.location.protocol ? 'https://ssl' : 'http://www') + '.google-analytics.com/ga.js';
var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(ga, s);
})();
</script>
<!-- Place this tag in your head or just before your close body tag -->
<script type="text/javascript" src="https://apis.google.com/js/plusone.js"></script>
<script type="text/javascript" src="//platform.twitter.com/widgets.js"></script>
<div id="ad" style="font-size:17px;padding:5px;display:none;"></div>
<div id="all">
<div class="members_bar">
<div class="guest_links">
<a onmouseover="loginMenu();" id="loginTab" class="button3" href="#">Log In</a>
<a href="signup" class="button3" >Register</a>
<div id="loginMenu" class="button5" style="display: none;">
<form action="users.php?act=login-d" method="post">
<p><label>Username: </label><input name="username" class="textfield" type="text" /></p>
<p><label>Password: </label><input name="password" class="textfield" type="password" /></p>
<br /><br /><p><a href="javascript:void(0);" onclick="toggle_lightbox('users.php?act=lost_password', 'lost_password_lightbox');">Reset Password</a> <input type="submit" value="Log In" class="button1" /></p>
</form>
</div>
</div>
</div>
<div class="logo_cell">
<a href="./" style="float: left;" class="logo"></a> <div style=""><img src="./theme/images/blank.gif" height="0" width="0" alt="blank" /></div>
<ul id="navlink">
<li><a href="./blog" class="navlink">News</a></li>
<li><a href="/premium" class="navlink">Premium</a></li>
<li><a href="./affiliate" class="navlink">Affiliate</a></li>
<li><a href="./tools" class="navlink">Tools</a></li>
</ul>
</div>
<div class="page_cell">
<div id="page_body" class="page_body"><script type="text/javascript">
// <![CDATA[
var scaled = false;
function scaleonload(){
e = document.getElementById('show_image');
scale(e);
}
function scale(e){
if(scaled){
e.width=originalWidth;
e.height=originalHeight;
scaled=false;
}else{
if(e.width>908){
originalWidth = e.width;
originalHeight = e.height;
e.style.cursor = "url(/theme/magnify.cur), default";
e.width=908;
e.height = Math.ceil(908*(originalHeight/originalWidth));
scaled=true;
}
}
}
// ]]>
</script>
<center>
<div class="sidebar2">
<div>
<a class="removeAds" href="premium">Remove ads [x]</a>
</div>
<a href="http://www.3dtoontube.com/?t=3dimgchili" target="_blank"><img src="http://imgchili.com/media/tube/728x90h.jpg" alt="3dtoontube" /></a>
</div>
</center>
<script src="/js/showa.js" type="text/javascript"></script>
<center><br /> <img id="show_image" onload="scale(this);" onclick="scale(this);" src="http://i1.imgchili.com/2765/2765317_9.jpg" alt="2765317_9.jpg" /></center>
<div>
<table cellpadding="4" cellspacing="1" border="0" style="width: 100%;">
<tr>
<td colspan="2" class="tdrow2">
<center> <script src="http://www.stumbleupon.com/hostedbadge.php?s=1"></script> <a href="https://twitter.com/share" class="twitter-share-button" data-count="horizontal">Tweet</a> <g:plusone size="medium"></g:plusone></center>
</td>
</tr>
<tr>
<td colspan="2" class="tdrow2">
<div class="sidebar">
<div>
<a class="removeAds" href="/premium">Remove ads [x]</a>
</div>
<center><iframe src='http://feeds.videosz.com/spots/index.php?sid=86' frameborder='0' scrolling='no' width='600' height='260'></iframe></center>
</div>
</td>
</tr>
<tr>
<td colspan="2" class="tdrow2"><br /><br />
<table cellpadding="5" cellspacing="0" border="0" style="width: 100%;">
<tr>
<td style="width: 20%;" valign="middle" class="text_align_center">
<a href="http://imgchili.com/show/2765/2765317_9.jpg"><img src="http://t1.imgchili.com/2765/2765317_9.jpg" alt="2765317_9.jpg" /></a>
</td>
<td style="width: 80%;">
<table cellspacing="1" cellpadding="0" border="0" style="width: 100%;">
<tr>
<td><input readonly="readonly" class="input_field" onfocus="javascript: this.select()" type="text" style="width: 555px;" value="http://imgchili.com/show/2765/2765317_9.jpg" /></td>
<td>Share Link</td>
</tr>
<tr>
<td><input readonly="readonly" class="input_field" onfocus="javascript: this.select()" type="text" style="width: 555px;" value="<a href="http://imgchili.com/show/2765/2765317_9.jpg" target="_blank"><img src="http://t1.imgchili.com/2765/2765317_9.jpg" border="0" alt="2765317_9.jpg" /></a>" /></td>
<td>Thumbnail for Website</td>
</tr>
<tr>
<td><input readonly="readonly" class="input_field" onfocus="javascript: this.select()" type="text" style="width: 555px;" value="[URL=http://imgchili.com/show/2765/2765317_9.jpg][IMG]http://t1.imgchili.com/2765/2765317_9.jpg[/IMG][/URL]" /></td>
<td>Thumbnail for Forum</td>
</tr>
<tr>
<td><input readonly="readonly" class="input_field" onfocus="javascript: this.select()" type="text" style="width: 555px;" value="<a href="http://imgchili.com/">Free image hosting</a> by imgChili." /></td>
<td>Link to Us</td>
</tr>
</table>
</td>
</tr>
</table>
</td>
</tr>
<tr>
<td colspan="2" class="table_footer"> </td>
</tr>
</table>
</div>
</div>
</div>
<div id="footer_cell" class="footer_cell"><table align="center" border="0" cellpadding="1" cellspacing="0" width="100%">
<tbody><tr> <td align="left">
<a class="footer-content" href="./">Home</a> |
<a class="footer-content" href="./abuse">Report abuse</a> |
<a class="footer-content" href="./tos">ToS</a> |
<a class="footer-content" href="./privacy_policy">Privacy policy</a> |
<a class="footer-content" href="./faq">FAQ</a> |
<a class="footer-content" href="./forum">Support</a>
</td>
<td class="footer-content" align="right">Copyright © 2011 - 2012 imgChili. All rights reserved</td>
</tr></tbody>
</table></div></div>
<br />
<script type='text/javascript' src='http://static.creatives.livejasmin.com/marketing_javascript/popunder/i/im/imgchilli/imgchilli.js'></script> </body>
</html>"""
self.chiliimage = imgchili.ImgchiliParse(self.url, self.basedir)
def test_process_url(self):
self.page = self.chiliimage.process_url(self.url)
self.assertIsInstance(self.page, lxml.html.HtmlElement)
def test_imgchili_get_image_src(self):
self.imgchili_src = self.chiliimage.imgchili_get_image_src(lxml.html.fromstring(self.example_chiliimage_page))
self.assertIsInstance(self.imgchili_src, list)
self.assertTrue(self.imgchili_src[0])
def test_imgchili_get_image_name(self):
self.imagename = self.chiliimage.imgchili_get_image_name(self.image_url)
self.assertIsInstance(self.imagename, list)
def test_imgchili_save_image(self):
urllist = []
urllist.append(self.image_url)
urlnamelist = []
urlnamelist.append('testname.jpg')
self.chiliimage.imgchili_save_image(urllist, urlnamelist)
savefile = join(self.basedir, urlnamelist[-1])
self.assertTrue(isfile(savefile))
self.assertTrue(getsize(savefile) >= 1000)
def main():
suite = unittest.TestLoader().loadTestsFromTestCase(TestImgchili)
unittest.TextTestRunner(verbosity=2).run(suite)
main()
| Donearm/PyImagedownloader | pyimagedownloader/tests/imgchili_test.py | Python | gpl-3.0 | 12,411 | 0.005076 |
from audioop import reverse
from time import sleep
import pytest
import requests
from utils import *
from conftest import file_dir as test_home
from conftest import ref_version
import json
import datetime
class TestsortMajor:
def test_sort_with_reminderState_1(self):
filter_response = call_ref_url("get", make_booking_filter_url("sort=title&reminderState=notReminded"))
assert filter_response.status_code == 200
def test_sort_with_downloadState_2(self):
filter_response = call_ref_url("get", make_booking_filter_url("sort=title&downloadState=notStarted"))
assert filter_response.status_code == 200
def test_sort_with_downloadState_3(self):
filter_response = call_ref_url("get", make_booking_filter_url("sort=title&downloadState=inProgress"))
assert filter_response.status_code == 200
def test_sort_with_downloadState_4(self):
filter_response = call_ref_url("get", make_booking_filter_url("sort=title&downloadState=suspended"))
assert filter_response.status_code == 200
def test_sort_with_downloadState_5(self):
filter_response = call_ref_url("get", make_booking_filter_url("sort=title&downloadState=notStarted,inProgress"))
assert filter_response.status_code == 200
def test_sort_with_downloadState_6(self):
filter_response = call_ref_url("get", make_booking_filter_url("sort=title&downloadState=notStarted,inProgress,suspended"))
assert filter_response.status_code == 200
def test_sort_with_recordingState_7(self):
filter_response = call_ref_url("get", make_booking_filter_url("sort=title&recordingState=notStarted"))
assert filter_response.status_code == 200
def test_sort_with_recordingState_8(self):
filter_response = call_ref_url("get", make_booking_filter_url("sort=title&recordingState=inProgress"))
assert filter_response.status_code == 200
def test_sort_with_recordingState_9(self):
filter_response = call_ref_url("get", make_booking_filter_url("sort=title&recordingState=notStarted,inProgress"))
assert filter_response.status_code == 200
def test_sort_with_recordingContentState_10(self):
filter_response = call_ref_url("get", make_booking_filter_url("sort=title&recordingContentState=partial"))
assert filter_response.status_code == 200
def test_sort_with_recordingContentState_11(self):
filter_response = call_ref_url("get", make_booking_filter_url("sort=title&recordingContentState=complete"))
assert filter_response.status_code == 200
def test_sort_with_recordingContentState_12(self):
filter_response = call_ref_url("get", make_booking_filter_url("sort=title&recordingContentState=partial,complete"))
assert filter_response.status_code == 200
def test_sort_with_downloadContentState_13(self):
filter_response = call_ref_url("get", make_booking_filter_url("sort=title&downloadContentState=partial"))
assert filter_response.status_code == 200
def test_sort_with_downloadContentState_14(self):
filter_response = call_ref_url("get", make_booking_filter_url("sort=title&downloadContentState=complete"))
assert filter_response.status_code == 200
def test_sort_with_downloadContentState_15(self):
filter_response = call_ref_url("get", make_booking_filter_url("sort=title&downloadContentState=partial,complete"))
assert filter_response.status_code == 200
def test_sort_with_embed_16(self):
filter_response = call_ref_url("get", make_booking_filter_url("sort=title&embed=eventBooking"))
assert filter_response.status_code == 200
def test_sort_with_embed_17(self):
filter_response = call_ref_url("get", make_booking_filter_url("sort=title&embed=seasonBooking"))
assert filter_response.status_code == 200
def test_sort_with_embed_18(self):
filter_response = call_ref_url("get", make_booking_filter_url("sort=title&embed=transcodeBooking"))
assert filter_response.status_code == 200
def test_sort_with_embed_19(self):
filter_response = call_ref_url("get", make_booking_filter_url("sort=title&embed=transcodeSeasonBooking"))
assert filter_response.status_code == 200
def test_sort_with_embed_20(self):
filter_response = call_ref_url("get", make_booking_filter_url("sort=title&embed=reminderBooking"))
assert filter_response.status_code == 200
def test_sort_with_embed_21(self):
filter_response = call_ref_url("get", make_booking_filter_url("sort=title&embed=eventBooking,seasonBooking"))
assert filter_response.status_code == 200
def test_sort_with_embed_22(self):
filter_response = call_ref_url("get", make_booking_filter_url("sort=title&embed=eventBooking,seasonBooking,transcodeBooking"))
assert filter_response.status_code == 200
def test_sort_with_embed_23(self):
filter_response = call_ref_url("get", make_booking_filter_url("sort=title&embed=eventBooking,seasonBooking,transcodeBooking,transcodeSeasonBooking"))
assert filter_response.status_code == 200
def test_sort_with_embed_24(self):
filter_response = call_ref_url("get", make_booking_filter_url("sort=title&embed=eventBooking,seasonBooking,transcodeBooking,transcodeSeasonBooking,reminderBooking"))
assert filter_response.status_code == 200
def test_sort_with_bookingType_25(self):
filter_response = call_ref_url("get", make_booking_filter_url("sort=title&bookingType=manual"))
assert filter_response.status_code == 200
def test_sort_with_bookingType_26(self):
filter_response = call_ref_url("get", make_booking_filter_url("sort=title&bookingType=event"))
assert filter_response.status_code == 200
def test_sort_with_bookingType_27(self):
filter_response = call_ref_url("get", make_booking_filter_url("sort=title&bookingType=manual,event"))
assert filter_response.status_code == 200
def test_sort_with_reminderState_28(self):
filter_response = call_ref_url("get", make_booking_filter_url("sort=date&reminderState=notReminded"))
assert filter_response.status_code == 200
def test_sort_with_downloadState_29(self):
filter_response = call_ref_url("get", make_booking_filter_url("sort=date&downloadState=notStarted"))
assert filter_response.status_code == 200
def test_sort_with_downloadState_30(self):
filter_response = call_ref_url("get", make_booking_filter_url("sort=date&downloadState=inProgress"))
assert filter_response.status_code == 200
def test_sort_with_downloadState_31(self):
filter_response = call_ref_url("get", make_booking_filter_url("sort=date&downloadState=suspended"))
assert filter_response.status_code == 200
def test_sort_with_downloadState_32(self):
filter_response = call_ref_url("get", make_booking_filter_url("sort=date&downloadState=notStarted,inProgress"))
assert filter_response.status_code == 200
def test_sort_with_downloadState_33(self):
filter_response = call_ref_url("get", make_booking_filter_url("sort=date&downloadState=notStarted,inProgress,suspended"))
assert filter_response.status_code == 200
def test_sort_with_recordingState_34(self):
filter_response = call_ref_url("get", make_booking_filter_url("sort=date&recordingState=notStarted"))
assert filter_response.status_code == 200
def test_sort_with_recordingState_35(self):
filter_response = call_ref_url("get", make_booking_filter_url("sort=date&recordingState=inProgress"))
assert filter_response.status_code == 200
def test_sort_with_recordingState_36(self):
filter_response = call_ref_url("get", make_booking_filter_url("sort=date&recordingState=notStarted,inProgress"))
assert filter_response.status_code == 200
def test_sort_with_recordingContentState_37(self):
filter_response = call_ref_url("get", make_booking_filter_url("sort=date&recordingContentState=partial"))
assert filter_response.status_code == 200
def test_sort_with_recordingContentState_38(self):
filter_response = call_ref_url("get", make_booking_filter_url("sort=date&recordingContentState=complete"))
assert filter_response.status_code == 200
def test_sort_with_recordingContentState_39(self):
filter_response = call_ref_url("get", make_booking_filter_url("sort=date&recordingContentState=partial,complete"))
assert filter_response.status_code == 200
def test_sort_with_downloadContentState_40(self):
filter_response = call_ref_url("get", make_booking_filter_url("sort=date&downloadContentState=partial"))
assert filter_response.status_code == 200
def test_sort_with_downloadContentState_41(self):
filter_response = call_ref_url("get", make_booking_filter_url("sort=date&downloadContentState=complete"))
assert filter_response.status_code == 200
def test_sort_with_downloadContentState_42(self):
filter_response = call_ref_url("get", make_booking_filter_url("sort=date&downloadContentState=partial,complete"))
assert filter_response.status_code == 200
def test_sort_with_embed_43(self):
filter_response = call_ref_url("get", make_booking_filter_url("sort=date&embed=eventBooking"))
assert filter_response.status_code == 200
def test_sort_with_embed_44(self):
filter_response = call_ref_url("get", make_booking_filter_url("sort=date&embed=seasonBooking"))
assert filter_response.status_code == 200
def test_sort_with_embed_45(self):
filter_response = call_ref_url("get", make_booking_filter_url("sort=date&embed=transcodeBooking"))
assert filter_response.status_code == 200
def test_sort_with_embed_46(self):
filter_response = call_ref_url("get", make_booking_filter_url("sort=date&embed=transcodeSeasonBooking"))
assert filter_response.status_code == 200
def test_sort_with_embed_47(self):
filter_response = call_ref_url("get", make_booking_filter_url("sort=date&embed=reminderBooking"))
assert filter_response.status_code == 200
def test_sort_with_embed_48(self):
filter_response = call_ref_url("get", make_booking_filter_url("sort=date&embed=eventBooking,seasonBooking"))
assert filter_response.status_code == 200
def test_sort_with_embed_49(self):
filter_response = call_ref_url("get", make_booking_filter_url("sort=date&embed=eventBooking,seasonBooking,transcodeBooking"))
assert filter_response.status_code == 200
def test_sort_with_embed_50(self):
filter_response = call_ref_url("get", make_booking_filter_url("sort=date&embed=eventBooking,seasonBooking,transcodeBooking,transcodeSeasonBooking"))
assert filter_response.status_code == 200
def test_sort_with_embed_51(self):
filter_response = call_ref_url("get", make_booking_filter_url("sort=date&embed=eventBooking,seasonBooking,transcodeBooking,transcodeSeasonBooking,reminderBooking"))
assert filter_response.status_code == 200
def test_sort_with_bookingType_52(self):
filter_response = call_ref_url("get", make_booking_filter_url("sort=date&bookingType=manual"))
assert filter_response.status_code == 200
def test_sort_with_bookingType_53(self):
filter_response = call_ref_url("get", make_booking_filter_url("sort=date&bookingType=event"))
assert filter_response.status_code == 200
def test_sort_with_bookingType_54(self):
filter_response = call_ref_url("get", make_booking_filter_url("sort=date&bookingType=manual,event"))
assert filter_response.status_code == 200
def test_sort_with_reminderState_55(self):
filter_response = call_ref_url("get", make_booking_filter_url("sort=title,date&reminderState=notReminded"))
assert filter_response.status_code == 200
def test_sort_with_downloadState_56(self):
filter_response = call_ref_url("get", make_booking_filter_url("sort=title,date&downloadState=notStarted"))
assert filter_response.status_code == 200
def test_sort_with_downloadState_57(self):
filter_response = call_ref_url("get", make_booking_filter_url("sort=title,date&downloadState=inProgress"))
assert filter_response.status_code == 200
def test_sort_with_downloadState_58(self):
filter_response = call_ref_url("get", make_booking_filter_url("sort=title,date&downloadState=suspended"))
assert filter_response.status_code == 200
def test_sort_with_downloadState_59(self):
filter_response = call_ref_url("get", make_booking_filter_url("sort=title,date&downloadState=notStarted,inProgress"))
assert filter_response.status_code == 200
def test_sort_with_downloadState_60(self):
filter_response = call_ref_url("get", make_booking_filter_url("sort=title,date&downloadState=notStarted,inProgress,suspended"))
assert filter_response.status_code == 200
def test_sort_with_recordingState_61(self):
filter_response = call_ref_url("get", make_booking_filter_url("sort=title,date&recordingState=notStarted"))
assert filter_response.status_code == 200
def test_sort_with_recordingState_62(self):
filter_response = call_ref_url("get", make_booking_filter_url("sort=title,date&recordingState=inProgress"))
assert filter_response.status_code == 200
def test_sort_with_recordingState_63(self):
filter_response = call_ref_url("get", make_booking_filter_url("sort=title,date&recordingState=notStarted,inProgress"))
assert filter_response.status_code == 200
def test_sort_with_recordingContentState_64(self):
filter_response = call_ref_url("get", make_booking_filter_url("sort=title,date&recordingContentState=partial"))
assert filter_response.status_code == 200
def test_sort_with_recordingContentState_65(self):
filter_response = call_ref_url("get", make_booking_filter_url("sort=title,date&recordingContentState=complete"))
assert filter_response.status_code == 200
def test_sort_with_recordingContentState_66(self):
filter_response = call_ref_url("get", make_booking_filter_url("sort=title,date&recordingContentState=partial,complete"))
assert filter_response.status_code == 200
def test_sort_with_downloadContentState_67(self):
filter_response = call_ref_url("get", make_booking_filter_url("sort=title,date&downloadContentState=partial"))
assert filter_response.status_code == 200
def test_sort_with_downloadContentState_68(self):
filter_response = call_ref_url("get", make_booking_filter_url("sort=title,date&downloadContentState=complete"))
assert filter_response.status_code == 200
def test_sort_with_downloadContentState_69(self):
filter_response = call_ref_url("get", make_booking_filter_url("sort=title,date&downloadContentState=partial,complete"))
assert filter_response.status_code == 200
def test_sort_with_embed_70(self):
filter_response = call_ref_url("get", make_booking_filter_url("sort=title,date&embed=eventBooking"))
assert filter_response.status_code == 200
def test_sort_with_embed_71(self):
filter_response = call_ref_url("get", make_booking_filter_url("sort=title,date&embed=seasonBooking"))
assert filter_response.status_code == 200
def test_sort_with_embed_72(self):
filter_response = call_ref_url("get", make_booking_filter_url("sort=title,date&embed=transcodeBooking"))
assert filter_response.status_code == 200
def test_sort_with_embed_73(self):
filter_response = call_ref_url("get", make_booking_filter_url("sort=title,date&embed=transcodeSeasonBooking"))
assert filter_response.status_code == 200
def test_sort_with_embed_74(self):
filter_response = call_ref_url("get", make_booking_filter_url("sort=title,date&embed=reminderBooking"))
assert filter_response.status_code == 200
def test_sort_with_embed_75(self):
filter_response = call_ref_url("get", make_booking_filter_url("sort=title,date&embed=eventBooking,seasonBooking"))
assert filter_response.status_code == 200
def test_sort_with_embed_76(self):
filter_response = call_ref_url("get", make_booking_filter_url("sort=title,date&embed=eventBooking,seasonBooking,transcodeBooking"))
assert filter_response.status_code == 200
def test_sort_with_embed_77(self):
filter_response = call_ref_url("get", make_booking_filter_url("sort=title,date&embed=eventBooking,seasonBooking,transcodeBooking,transcodeSeasonBooking"))
assert filter_response.status_code == 200
def test_sort_with_embed_78(self):
filter_response = call_ref_url("get", make_booking_filter_url("sort=title,date&embed=eventBooking,seasonBooking,transcodeBooking,transcodeSeasonBooking,reminderBooking"))
assert filter_response.status_code == 200
def test_sort_with_bookingType_79(self):
filter_response = call_ref_url("get", make_booking_filter_url("sort=title,date&bookingType=manual"))
assert filter_response.status_code == 200
def test_sort_with_bookingType_80(self):
filter_response = call_ref_url("get", make_booking_filter_url("sort=title,date&bookingType=event"))
assert filter_response.status_code == 200
def test_sort_with_bookingType_81(self):
filter_response = call_ref_url("get", make_booking_filter_url("sort=title,date&bookingType=manual,event"))
assert filter_response.status_code == 200
class TestreminderStateMajor:
def test_reminderState_with_downloadState_1(self):
filter_response = call_ref_url("get", make_booking_filter_url("reminderState=notReminded&downloadState=notStarted"))
assert filter_response.status_code == 200
def test_reminderState_with_downloadState_2(self):
filter_response = call_ref_url("get", make_booking_filter_url("reminderState=notReminded&downloadState=inProgress"))
assert filter_response.status_code == 200
def test_reminderState_with_downloadState_3(self):
filter_response = call_ref_url("get", make_booking_filter_url("reminderState=notReminded&downloadState=suspended"))
assert filter_response.status_code == 200
def test_reminderState_with_downloadState_4(self):
filter_response = call_ref_url("get", make_booking_filter_url("reminderState=notReminded&downloadState=notStarted,inProgress"))
assert filter_response.status_code == 200
def test_reminderState_with_downloadState_5(self):
filter_response = call_ref_url("get", make_booking_filter_url("reminderState=notReminded&downloadState=notStarted,inProgress,suspended"))
assert filter_response.status_code == 200
def test_reminderState_with_recordingState_6(self):
filter_response = call_ref_url("get", make_booking_filter_url("reminderState=notReminded&recordingState=notStarted"))
assert filter_response.status_code == 200
def test_reminderState_with_recordingState_7(self):
filter_response = call_ref_url("get", make_booking_filter_url("reminderState=notReminded&recordingState=inProgress"))
assert filter_response.status_code == 200
def test_reminderState_with_recordingState_8(self):
filter_response = call_ref_url("get", make_booking_filter_url("reminderState=notReminded&recordingState=notStarted,inProgress"))
assert filter_response.status_code == 200
def test_reminderState_with_recordingContentState_9(self):
filter_response = call_ref_url("get", make_booking_filter_url("reminderState=notReminded&recordingContentState=partial"))
assert filter_response.status_code == 200
def test_reminderState_with_recordingContentState_10(self):
filter_response = call_ref_url("get", make_booking_filter_url("reminderState=notReminded&recordingContentState=complete"))
assert filter_response.status_code == 200
def test_reminderState_with_recordingContentState_11(self):
filter_response = call_ref_url("get", make_booking_filter_url("reminderState=notReminded&recordingContentState=partial,complete"))
assert filter_response.status_code == 200
def test_reminderState_with_downloadContentState_12(self):
filter_response = call_ref_url("get", make_booking_filter_url("reminderState=notReminded&downloadContentState=partial"))
assert filter_response.status_code == 200
def test_reminderState_with_downloadContentState_13(self):
filter_response = call_ref_url("get", make_booking_filter_url("reminderState=notReminded&downloadContentState=complete"))
assert filter_response.status_code == 200
def test_reminderState_with_downloadContentState_14(self):
filter_response = call_ref_url("get", make_booking_filter_url("reminderState=notReminded&downloadContentState=partial,complete"))
assert filter_response.status_code == 200
def test_reminderState_with_embed_15(self):
filter_response = call_ref_url("get", make_booking_filter_url("reminderState=notReminded&embed=eventBooking"))
assert filter_response.status_code == 200
def test_reminderState_with_embed_16(self):
filter_response = call_ref_url("get", make_booking_filter_url("reminderState=notReminded&embed=seasonBooking"))
assert filter_response.status_code == 200
def test_reminderState_with_embed_17(self):
filter_response = call_ref_url("get", make_booking_filter_url("reminderState=notReminded&embed=transcodeBooking"))
assert filter_response.status_code == 200
def test_reminderState_with_embed_18(self):
filter_response = call_ref_url("get", make_booking_filter_url("reminderState=notReminded&embed=transcodeSeasonBooking"))
assert filter_response.status_code == 200
def test_reminderState_with_embed_19(self):
filter_response = call_ref_url("get", make_booking_filter_url("reminderState=notReminded&embed=reminderBooking"))
assert filter_response.status_code == 200
def test_reminderState_with_embed_20(self):
filter_response = call_ref_url("get", make_booking_filter_url("reminderState=notReminded&embed=eventBooking,seasonBooking"))
assert filter_response.status_code == 200
def test_reminderState_with_embed_21(self):
filter_response = call_ref_url("get", make_booking_filter_url("reminderState=notReminded&embed=eventBooking,seasonBooking,transcodeBooking"))
assert filter_response.status_code == 200
def test_reminderState_with_embed_22(self):
filter_response = call_ref_url("get", make_booking_filter_url("reminderState=notReminded&embed=eventBooking,seasonBooking,transcodeBooking,transcodeSeasonBooking"))
assert filter_response.status_code == 200
def test_reminderState_with_embed_23(self):
filter_response = call_ref_url("get", make_booking_filter_url("reminderState=notReminded&embed=eventBooking,seasonBooking,transcodeBooking,transcodeSeasonBooking,reminderBooking"))
assert filter_response.status_code == 200
def test_reminderState_with_bookingType_24(self):
filter_response = call_ref_url("get", make_booking_filter_url("reminderState=notReminded&bookingType=manual"))
assert filter_response.status_code == 200
def test_reminderState_with_bookingType_25(self):
filter_response = call_ref_url("get", make_booking_filter_url("reminderState=notReminded&bookingType=event"))
assert filter_response.status_code == 200
def test_reminderState_with_bookingType_26(self):
filter_response = call_ref_url("get", make_booking_filter_url("reminderState=notReminded&bookingType=manual,event"))
assert filter_response.status_code == 200
class TestdownloadStateMajor:
def test_downloadState_with_recordingState_1(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadState=notStarted&recordingState=notStarted"))
assert filter_response.status_code == 200
def test_downloadState_with_recordingState_2(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadState=notStarted&recordingState=inProgress"))
assert filter_response.status_code == 200
def test_downloadState_with_recordingState_3(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadState=notStarted&recordingState=notStarted,inProgress"))
assert filter_response.status_code == 200
def test_downloadState_with_recordingContentState_4(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadState=notStarted&recordingContentState=partial"))
assert filter_response.status_code == 200
def test_downloadState_with_recordingContentState_5(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadState=notStarted&recordingContentState=complete"))
assert filter_response.status_code == 200
def test_downloadState_with_recordingContentState_6(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadState=notStarted&recordingContentState=partial,complete"))
assert filter_response.status_code == 200
def test_downloadState_with_downloadContentState_7(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadState=notStarted&downloadContentState=partial"))
assert filter_response.status_code == 200
def test_downloadState_with_downloadContentState_8(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadState=notStarted&downloadContentState=complete"))
assert filter_response.status_code == 200
def test_downloadState_with_downloadContentState_9(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadState=notStarted&downloadContentState=partial,complete"))
assert filter_response.status_code == 200
def test_downloadState_with_embed_10(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadState=notStarted&embed=eventBooking"))
assert filter_response.status_code == 200
def test_downloadState_with_embed_11(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadState=notStarted&embed=seasonBooking"))
assert filter_response.status_code == 200
def test_downloadState_with_embed_12(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadState=notStarted&embed=transcodeBooking"))
assert filter_response.status_code == 200
def test_downloadState_with_embed_13(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadState=notStarted&embed=transcodeSeasonBooking"))
assert filter_response.status_code == 200
def test_downloadState_with_embed_14(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadState=notStarted&embed=reminderBooking"))
assert filter_response.status_code == 200
def test_downloadState_with_embed_15(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadState=notStarted&embed=eventBooking,seasonBooking"))
assert filter_response.status_code == 200
def test_downloadState_with_embed_16(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadState=notStarted&embed=eventBooking,seasonBooking,transcodeBooking"))
assert filter_response.status_code == 200
def test_downloadState_with_embed_17(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadState=notStarted&embed=eventBooking,seasonBooking,transcodeBooking,transcodeSeasonBooking"))
assert filter_response.status_code == 200
def test_downloadState_with_embed_18(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadState=notStarted&embed=eventBooking,seasonBooking,transcodeBooking,transcodeSeasonBooking,reminderBooking"))
assert filter_response.status_code == 200
def test_downloadState_with_bookingType_19(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadState=notStarted&bookingType=manual"))
assert filter_response.status_code == 200
def test_downloadState_with_bookingType_20(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadState=notStarted&bookingType=event"))
assert filter_response.status_code == 200
def test_downloadState_with_bookingType_21(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadState=notStarted&bookingType=manual,event"))
assert filter_response.status_code == 200
def test_downloadState_with_recordingState_22(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadState=inProgress&recordingState=notStarted"))
assert filter_response.status_code == 200
def test_downloadState_with_recordingState_23(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadState=inProgress&recordingState=inProgress"))
assert filter_response.status_code == 200
def test_downloadState_with_recordingState_24(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadState=inProgress&recordingState=notStarted,inProgress"))
assert filter_response.status_code == 200
def test_downloadState_with_recordingContentState_25(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadState=inProgress&recordingContentState=partial"))
assert filter_response.status_code == 200
def test_downloadState_with_recordingContentState_26(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadState=inProgress&recordingContentState=complete"))
assert filter_response.status_code == 200
def test_downloadState_with_recordingContentState_27(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadState=inProgress&recordingContentState=partial,complete"))
assert filter_response.status_code == 200
def test_downloadState_with_downloadContentState_28(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadState=inProgress&downloadContentState=partial"))
assert filter_response.status_code == 200
def test_downloadState_with_downloadContentState_29(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadState=inProgress&downloadContentState=complete"))
assert filter_response.status_code == 200
def test_downloadState_with_downloadContentState_30(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadState=inProgress&downloadContentState=partial,complete"))
assert filter_response.status_code == 200
def test_downloadState_with_embed_31(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadState=inProgress&embed=eventBooking"))
assert filter_response.status_code == 200
def test_downloadState_with_embed_32(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadState=inProgress&embed=seasonBooking"))
assert filter_response.status_code == 200
def test_downloadState_with_embed_33(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadState=inProgress&embed=transcodeBooking"))
assert filter_response.status_code == 200
def test_downloadState_with_embed_34(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadState=inProgress&embed=transcodeSeasonBooking"))
assert filter_response.status_code == 200
def test_downloadState_with_embed_35(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadState=inProgress&embed=reminderBooking"))
assert filter_response.status_code == 200
def test_downloadState_with_embed_36(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadState=inProgress&embed=eventBooking,seasonBooking"))
assert filter_response.status_code == 200
def test_downloadState_with_embed_37(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadState=inProgress&embed=eventBooking,seasonBooking,transcodeBooking"))
assert filter_response.status_code == 200
def test_downloadState_with_embed_38(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadState=inProgress&embed=eventBooking,seasonBooking,transcodeBooking,transcodeSeasonBooking"))
assert filter_response.status_code == 200
def test_downloadState_with_embed_39(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadState=inProgress&embed=eventBooking,seasonBooking,transcodeBooking,transcodeSeasonBooking,reminderBooking"))
assert filter_response.status_code == 200
def test_downloadState_with_bookingType_40(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadState=inProgress&bookingType=manual"))
assert filter_response.status_code == 200
def test_downloadState_with_bookingType_41(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadState=inProgress&bookingType=event"))
assert filter_response.status_code == 200
def test_downloadState_with_bookingType_42(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadState=inProgress&bookingType=manual,event"))
assert filter_response.status_code == 200
def test_downloadState_with_recordingState_43(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadState=suspended&recordingState=notStarted"))
assert filter_response.status_code == 200
def test_downloadState_with_recordingState_44(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadState=suspended&recordingState=inProgress"))
assert filter_response.status_code == 200
def test_downloadState_with_recordingState_45(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadState=suspended&recordingState=notStarted,inProgress"))
assert filter_response.status_code == 200
def test_downloadState_with_recordingContentState_46(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadState=suspended&recordingContentState=partial"))
assert filter_response.status_code == 200
def test_downloadState_with_recordingContentState_47(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadState=suspended&recordingContentState=complete"))
assert filter_response.status_code == 200
def test_downloadState_with_recordingContentState_48(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadState=suspended&recordingContentState=partial,complete"))
assert filter_response.status_code == 200
def test_downloadState_with_downloadContentState_49(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadState=suspended&downloadContentState=partial"))
assert filter_response.status_code == 200
def test_downloadState_with_downloadContentState_50(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadState=suspended&downloadContentState=complete"))
assert filter_response.status_code == 200
def test_downloadState_with_downloadContentState_51(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadState=suspended&downloadContentState=partial,complete"))
assert filter_response.status_code == 200
def test_downloadState_with_embed_52(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadState=suspended&embed=eventBooking"))
assert filter_response.status_code == 200
def test_downloadState_with_embed_53(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadState=suspended&embed=seasonBooking"))
assert filter_response.status_code == 200
def test_downloadState_with_embed_54(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadState=suspended&embed=transcodeBooking"))
assert filter_response.status_code == 200
def test_downloadState_with_embed_55(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadState=suspended&embed=transcodeSeasonBooking"))
assert filter_response.status_code == 200
def test_downloadState_with_embed_56(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadState=suspended&embed=reminderBooking"))
assert filter_response.status_code == 200
def test_downloadState_with_embed_57(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadState=suspended&embed=eventBooking,seasonBooking"))
assert filter_response.status_code == 200
def test_downloadState_with_embed_58(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadState=suspended&embed=eventBooking,seasonBooking,transcodeBooking"))
assert filter_response.status_code == 200
def test_downloadState_with_embed_59(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadState=suspended&embed=eventBooking,seasonBooking,transcodeBooking,transcodeSeasonBooking"))
assert filter_response.status_code == 200
def test_downloadState_with_embed_60(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadState=suspended&embed=eventBooking,seasonBooking,transcodeBooking,transcodeSeasonBooking,reminderBooking"))
assert filter_response.status_code == 200
def test_downloadState_with_bookingType_61(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadState=suspended&bookingType=manual"))
assert filter_response.status_code == 200
def test_downloadState_with_bookingType_62(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadState=suspended&bookingType=event"))
assert filter_response.status_code == 200
def test_downloadState_with_bookingType_63(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadState=suspended&bookingType=manual,event"))
assert filter_response.status_code == 200
def test_downloadState_with_recordingState_64(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadState=notStarted,inProgress&recordingState=notStarted"))
assert filter_response.status_code == 200
def test_downloadState_with_recordingState_65(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadState=notStarted,inProgress&recordingState=inProgress"))
assert filter_response.status_code == 200
def test_downloadState_with_recordingState_66(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadState=notStarted,inProgress&recordingState=notStarted,inProgress"))
assert filter_response.status_code == 200
def test_downloadState_with_recordingContentState_67(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadState=notStarted,inProgress&recordingContentState=partial"))
assert filter_response.status_code == 200
def test_downloadState_with_recordingContentState_68(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadState=notStarted,inProgress&recordingContentState=complete"))
assert filter_response.status_code == 200
def test_downloadState_with_recordingContentState_69(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadState=notStarted,inProgress&recordingContentState=partial,complete"))
assert filter_response.status_code == 200
def test_downloadState_with_downloadContentState_70(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadState=notStarted,inProgress&downloadContentState=partial"))
assert filter_response.status_code == 200
def test_downloadState_with_downloadContentState_71(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadState=notStarted,inProgress&downloadContentState=complete"))
assert filter_response.status_code == 200
def test_downloadState_with_downloadContentState_72(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadState=notStarted,inProgress&downloadContentState=partial,complete"))
assert filter_response.status_code == 200
def test_downloadState_with_embed_73(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadState=notStarted,inProgress&embed=eventBooking"))
assert filter_response.status_code == 200
def test_downloadState_with_embed_74(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadState=notStarted,inProgress&embed=seasonBooking"))
assert filter_response.status_code == 200
def test_downloadState_with_embed_75(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadState=notStarted,inProgress&embed=transcodeBooking"))
assert filter_response.status_code == 200
def test_downloadState_with_embed_76(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadState=notStarted,inProgress&embed=transcodeSeasonBooking"))
assert filter_response.status_code == 200
def test_downloadState_with_embed_77(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadState=notStarted,inProgress&embed=reminderBooking"))
assert filter_response.status_code == 200
def test_downloadState_with_embed_78(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadState=notStarted,inProgress&embed=eventBooking,seasonBooking"))
assert filter_response.status_code == 200
def test_downloadState_with_embed_79(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadState=notStarted,inProgress&embed=eventBooking,seasonBooking,transcodeBooking"))
assert filter_response.status_code == 200
def test_downloadState_with_embed_80(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadState=notStarted,inProgress&embed=eventBooking,seasonBooking,transcodeBooking,transcodeSeasonBooking"))
assert filter_response.status_code == 200
def test_downloadState_with_embed_81(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadState=notStarted,inProgress&embed=eventBooking,seasonBooking,transcodeBooking,transcodeSeasonBooking,reminderBooking"))
assert filter_response.status_code == 200
def test_downloadState_with_bookingType_82(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadState=notStarted,inProgress&bookingType=manual"))
assert filter_response.status_code == 200
def test_downloadState_with_bookingType_83(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadState=notStarted,inProgress&bookingType=event"))
assert filter_response.status_code == 200
def test_downloadState_with_bookingType_84(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadState=notStarted,inProgress&bookingType=manual,event"))
assert filter_response.status_code == 200
def test_downloadState_with_recordingState_85(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadState=notStarted,inProgress,suspended&recordingState=notStarted"))
assert filter_response.status_code == 200
def test_downloadState_with_recordingState_86(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadState=notStarted,inProgress,suspended&recordingState=inProgress"))
assert filter_response.status_code == 200
def test_downloadState_with_recordingState_87(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadState=notStarted,inProgress,suspended&recordingState=notStarted,inProgress"))
assert filter_response.status_code == 200
def test_downloadState_with_recordingContentState_88(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadState=notStarted,inProgress,suspended&recordingContentState=partial"))
assert filter_response.status_code == 200
def test_downloadState_with_recordingContentState_89(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadState=notStarted,inProgress,suspended&recordingContentState=complete"))
assert filter_response.status_code == 200
def test_downloadState_with_recordingContentState_90(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadState=notStarted,inProgress,suspended&recordingContentState=partial,complete"))
assert filter_response.status_code == 200
def test_downloadState_with_downloadContentState_91(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadState=notStarted,inProgress,suspended&downloadContentState=partial"))
assert filter_response.status_code == 200
def test_downloadState_with_downloadContentState_92(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadState=notStarted,inProgress,suspended&downloadContentState=complete"))
assert filter_response.status_code == 200
def test_downloadState_with_downloadContentState_93(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadState=notStarted,inProgress,suspended&downloadContentState=partial,complete"))
assert filter_response.status_code == 200
def test_downloadState_with_embed_94(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadState=notStarted,inProgress,suspended&embed=eventBooking"))
assert filter_response.status_code == 200
def test_downloadState_with_embed_95(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadState=notStarted,inProgress,suspended&embed=seasonBooking"))
assert filter_response.status_code == 200
def test_downloadState_with_embed_96(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadState=notStarted,inProgress,suspended&embed=transcodeBooking"))
assert filter_response.status_code == 200
def test_downloadState_with_embed_97(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadState=notStarted,inProgress,suspended&embed=transcodeSeasonBooking"))
assert filter_response.status_code == 200
def test_downloadState_with_embed_98(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadState=notStarted,inProgress,suspended&embed=reminderBooking"))
assert filter_response.status_code == 200
def test_downloadState_with_embed_99(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadState=notStarted,inProgress,suspended&embed=eventBooking,seasonBooking"))
assert filter_response.status_code == 200
def test_downloadState_with_embed_100(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadState=notStarted,inProgress,suspended&embed=eventBooking,seasonBooking,transcodeBooking"))
assert filter_response.status_code == 200
def test_downloadState_with_embed_101(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadState=notStarted,inProgress,suspended&embed=eventBooking,seasonBooking,transcodeBooking,transcodeSeasonBooking"))
assert filter_response.status_code == 200
def test_downloadState_with_embed_102(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadState=notStarted,inProgress,suspended&embed=eventBooking,seasonBooking,transcodeBooking,transcodeSeasonBooking,reminderBooking"))
assert filter_response.status_code == 200
def test_downloadState_with_bookingType_103(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadState=notStarted,inProgress,suspended&bookingType=manual"))
assert filter_response.status_code == 200
def test_downloadState_with_bookingType_104(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadState=notStarted,inProgress,suspended&bookingType=event"))
assert filter_response.status_code == 200
def test_downloadState_with_bookingType_105(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadState=notStarted,inProgress,suspended&bookingType=manual,event"))
assert filter_response.status_code == 200
class TestrecordingStateMajor:
def test_recordingState_with_recordingContentState_1(self):
filter_response = call_ref_url("get", make_booking_filter_url("recordingState=notStarted&recordingContentState=partial"))
assert filter_response.status_code == 200
def test_recordingState_with_recordingContentState_2(self):
filter_response = call_ref_url("get", make_booking_filter_url("recordingState=notStarted&recordingContentState=complete"))
assert filter_response.status_code == 200
def test_recordingState_with_recordingContentState_3(self):
filter_response = call_ref_url("get", make_booking_filter_url("recordingState=notStarted&recordingContentState=partial,complete"))
assert filter_response.status_code == 200
def test_recordingState_with_downloadContentState_4(self):
filter_response = call_ref_url("get", make_booking_filter_url("recordingState=notStarted&downloadContentState=partial"))
assert filter_response.status_code == 200
def test_recordingState_with_downloadContentState_5(self):
filter_response = call_ref_url("get", make_booking_filter_url("recordingState=notStarted&downloadContentState=complete"))
assert filter_response.status_code == 200
def test_recordingState_with_downloadContentState_6(self):
filter_response = call_ref_url("get", make_booking_filter_url("recordingState=notStarted&downloadContentState=partial,complete"))
assert filter_response.status_code == 200
def test_recordingState_with_embed_7(self):
filter_response = call_ref_url("get", make_booking_filter_url("recordingState=notStarted&embed=eventBooking"))
assert filter_response.status_code == 200
def test_recordingState_with_embed_8(self):
filter_response = call_ref_url("get", make_booking_filter_url("recordingState=notStarted&embed=seasonBooking"))
assert filter_response.status_code == 200
def test_recordingState_with_embed_9(self):
filter_response = call_ref_url("get", make_booking_filter_url("recordingState=notStarted&embed=transcodeBooking"))
assert filter_response.status_code == 200
def test_recordingState_with_embed_10(self):
filter_response = call_ref_url("get", make_booking_filter_url("recordingState=notStarted&embed=transcodeSeasonBooking"))
assert filter_response.status_code == 200
def test_recordingState_with_embed_11(self):
filter_response = call_ref_url("get", make_booking_filter_url("recordingState=notStarted&embed=reminderBooking"))
assert filter_response.status_code == 200
def test_recordingState_with_embed_12(self):
filter_response = call_ref_url("get", make_booking_filter_url("recordingState=notStarted&embed=eventBooking,seasonBooking"))
assert filter_response.status_code == 200
def test_recordingState_with_embed_13(self):
filter_response = call_ref_url("get", make_booking_filter_url("recordingState=notStarted&embed=eventBooking,seasonBooking,transcodeBooking"))
assert filter_response.status_code == 200
def test_recordingState_with_embed_14(self):
filter_response = call_ref_url("get", make_booking_filter_url("recordingState=notStarted&embed=eventBooking,seasonBooking,transcodeBooking,transcodeSeasonBooking"))
assert filter_response.status_code == 200
def test_recordingState_with_embed_15(self):
filter_response = call_ref_url("get", make_booking_filter_url("recordingState=notStarted&embed=eventBooking,seasonBooking,transcodeBooking,transcodeSeasonBooking,reminderBooking"))
assert filter_response.status_code == 200
def test_recordingState_with_bookingType_16(self):
filter_response = call_ref_url("get", make_booking_filter_url("recordingState=notStarted&bookingType=manual"))
assert filter_response.status_code == 200
def test_recordingState_with_bookingType_17(self):
filter_response = call_ref_url("get", make_booking_filter_url("recordingState=notStarted&bookingType=event"))
assert filter_response.status_code == 200
def test_recordingState_with_bookingType_18(self):
filter_response = call_ref_url("get", make_booking_filter_url("recordingState=notStarted&bookingType=manual,event"))
assert filter_response.status_code == 200
def test_recordingState_with_recordingContentState_19(self):
filter_response = call_ref_url("get", make_booking_filter_url("recordingState=inProgress&recordingContentState=partial"))
assert filter_response.status_code == 200
def test_recordingState_with_recordingContentState_20(self):
filter_response = call_ref_url("get", make_booking_filter_url("recordingState=inProgress&recordingContentState=complete"))
assert filter_response.status_code == 200
def test_recordingState_with_recordingContentState_21(self):
filter_response = call_ref_url("get", make_booking_filter_url("recordingState=inProgress&recordingContentState=partial,complete"))
assert filter_response.status_code == 200
def test_recordingState_with_downloadContentState_22(self):
filter_response = call_ref_url("get", make_booking_filter_url("recordingState=inProgress&downloadContentState=partial"))
assert filter_response.status_code == 200
def test_recordingState_with_downloadContentState_23(self):
filter_response = call_ref_url("get", make_booking_filter_url("recordingState=inProgress&downloadContentState=complete"))
assert filter_response.status_code == 200
def test_recordingState_with_downloadContentState_24(self):
filter_response = call_ref_url("get", make_booking_filter_url("recordingState=inProgress&downloadContentState=partial,complete"))
assert filter_response.status_code == 200
def test_recordingState_with_embed_25(self):
filter_response = call_ref_url("get", make_booking_filter_url("recordingState=inProgress&embed=eventBooking"))
assert filter_response.status_code == 200
def test_recordingState_with_embed_26(self):
filter_response = call_ref_url("get", make_booking_filter_url("recordingState=inProgress&embed=seasonBooking"))
assert filter_response.status_code == 200
def test_recordingState_with_embed_27(self):
filter_response = call_ref_url("get", make_booking_filter_url("recordingState=inProgress&embed=transcodeBooking"))
assert filter_response.status_code == 200
def test_recordingState_with_embed_28(self):
filter_response = call_ref_url("get", make_booking_filter_url("recordingState=inProgress&embed=transcodeSeasonBooking"))
assert filter_response.status_code == 200
def test_recordingState_with_embed_29(self):
filter_response = call_ref_url("get", make_booking_filter_url("recordingState=inProgress&embed=reminderBooking"))
assert filter_response.status_code == 200
def test_recordingState_with_embed_30(self):
filter_response = call_ref_url("get", make_booking_filter_url("recordingState=inProgress&embed=eventBooking,seasonBooking"))
assert filter_response.status_code == 200
def test_recordingState_with_embed_31(self):
filter_response = call_ref_url("get", make_booking_filter_url("recordingState=inProgress&embed=eventBooking,seasonBooking,transcodeBooking"))
assert filter_response.status_code == 200
def test_recordingState_with_embed_32(self):
filter_response = call_ref_url("get", make_booking_filter_url("recordingState=inProgress&embed=eventBooking,seasonBooking,transcodeBooking,transcodeSeasonBooking"))
assert filter_response.status_code == 200
def test_recordingState_with_embed_33(self):
filter_response = call_ref_url("get", make_booking_filter_url("recordingState=inProgress&embed=eventBooking,seasonBooking,transcodeBooking,transcodeSeasonBooking,reminderBooking"))
assert filter_response.status_code == 200
def test_recordingState_with_bookingType_34(self):
filter_response = call_ref_url("get", make_booking_filter_url("recordingState=inProgress&bookingType=manual"))
assert filter_response.status_code == 200
def test_recordingState_with_bookingType_35(self):
filter_response = call_ref_url("get", make_booking_filter_url("recordingState=inProgress&bookingType=event"))
assert filter_response.status_code == 200
def test_recordingState_with_bookingType_36(self):
filter_response = call_ref_url("get", make_booking_filter_url("recordingState=inProgress&bookingType=manual,event"))
assert filter_response.status_code == 200
def test_recordingState_with_recordingContentState_37(self):
filter_response = call_ref_url("get", make_booking_filter_url("recordingState=notStarted,inProgress&recordingContentState=partial"))
assert filter_response.status_code == 200
def test_recordingState_with_recordingContentState_38(self):
filter_response = call_ref_url("get", make_booking_filter_url("recordingState=notStarted,inProgress&recordingContentState=complete"))
assert filter_response.status_code == 200
def test_recordingState_with_recordingContentState_39(self):
filter_response = call_ref_url("get", make_booking_filter_url("recordingState=notStarted,inProgress&recordingContentState=partial,complete"))
assert filter_response.status_code == 200
def test_recordingState_with_downloadContentState_40(self):
filter_response = call_ref_url("get", make_booking_filter_url("recordingState=notStarted,inProgress&downloadContentState=partial"))
assert filter_response.status_code == 200
def test_recordingState_with_downloadContentState_41(self):
filter_response = call_ref_url("get", make_booking_filter_url("recordingState=notStarted,inProgress&downloadContentState=complete"))
assert filter_response.status_code == 200
def test_recordingState_with_downloadContentState_42(self):
filter_response = call_ref_url("get", make_booking_filter_url("recordingState=notStarted,inProgress&downloadContentState=partial,complete"))
assert filter_response.status_code == 200
def test_recordingState_with_embed_43(self):
filter_response = call_ref_url("get", make_booking_filter_url("recordingState=notStarted,inProgress&embed=eventBooking"))
assert filter_response.status_code == 200
def test_recordingState_with_embed_44(self):
filter_response = call_ref_url("get", make_booking_filter_url("recordingState=notStarted,inProgress&embed=seasonBooking"))
assert filter_response.status_code == 200
def test_recordingState_with_embed_45(self):
filter_response = call_ref_url("get", make_booking_filter_url("recordingState=notStarted,inProgress&embed=transcodeBooking"))
assert filter_response.status_code == 200
def test_recordingState_with_embed_46(self):
filter_response = call_ref_url("get", make_booking_filter_url("recordingState=notStarted,inProgress&embed=transcodeSeasonBooking"))
assert filter_response.status_code == 200
def test_recordingState_with_embed_47(self):
filter_response = call_ref_url("get", make_booking_filter_url("recordingState=notStarted,inProgress&embed=reminderBooking"))
assert filter_response.status_code == 200
def test_recordingState_with_embed_48(self):
filter_response = call_ref_url("get", make_booking_filter_url("recordingState=notStarted,inProgress&embed=eventBooking,seasonBooking"))
assert filter_response.status_code == 200
def test_recordingState_with_embed_49(self):
filter_response = call_ref_url("get", make_booking_filter_url("recordingState=notStarted,inProgress&embed=eventBooking,seasonBooking,transcodeBooking"))
assert filter_response.status_code == 200
def test_recordingState_with_embed_50(self):
filter_response = call_ref_url("get", make_booking_filter_url("recordingState=notStarted,inProgress&embed=eventBooking,seasonBooking,transcodeBooking,transcodeSeasonBooking"))
assert filter_response.status_code == 200
def test_recordingState_with_embed_51(self):
filter_response = call_ref_url("get", make_booking_filter_url("recordingState=notStarted,inProgress&embed=eventBooking,seasonBooking,transcodeBooking,transcodeSeasonBooking,reminderBooking"))
assert filter_response.status_code == 200
def test_recordingState_with_bookingType_52(self):
filter_response = call_ref_url("get", make_booking_filter_url("recordingState=notStarted,inProgress&bookingType=manual"))
assert filter_response.status_code == 200
def test_recordingState_with_bookingType_53(self):
filter_response = call_ref_url("get", make_booking_filter_url("recordingState=notStarted,inProgress&bookingType=event"))
assert filter_response.status_code == 200
def test_recordingState_with_bookingType_54(self):
filter_response = call_ref_url("get", make_booking_filter_url("recordingState=notStarted,inProgress&bookingType=manual,event"))
assert filter_response.status_code == 200
class TestrecordingContentStateMajor:
def test_recordingContentState_with_downloadContentState_1(self):
filter_response = call_ref_url("get", make_booking_filter_url("recordingContentState=partial&downloadContentState=partial"))
assert filter_response.status_code == 200
def test_recordingContentState_with_downloadContentState_2(self):
filter_response = call_ref_url("get", make_booking_filter_url("recordingContentState=partial&downloadContentState=complete"))
assert filter_response.status_code == 200
def test_recordingContentState_with_downloadContentState_3(self):
filter_response = call_ref_url("get", make_booking_filter_url("recordingContentState=partial&downloadContentState=partial,complete"))
assert filter_response.status_code == 200
def test_recordingContentState_with_embed_4(self):
filter_response = call_ref_url("get", make_booking_filter_url("recordingContentState=partial&embed=eventBooking"))
assert filter_response.status_code == 200
def test_recordingContentState_with_embed_5(self):
filter_response = call_ref_url("get", make_booking_filter_url("recordingContentState=partial&embed=seasonBooking"))
assert filter_response.status_code == 200
def test_recordingContentState_with_embed_6(self):
filter_response = call_ref_url("get", make_booking_filter_url("recordingContentState=partial&embed=transcodeBooking"))
assert filter_response.status_code == 200
def test_recordingContentState_with_embed_7(self):
filter_response = call_ref_url("get", make_booking_filter_url("recordingContentState=partial&embed=transcodeSeasonBooking"))
assert filter_response.status_code == 200
def test_recordingContentState_with_embed_8(self):
filter_response = call_ref_url("get", make_booking_filter_url("recordingContentState=partial&embed=reminderBooking"))
assert filter_response.status_code == 200
def test_recordingContentState_with_embed_9(self):
filter_response = call_ref_url("get", make_booking_filter_url("recordingContentState=partial&embed=eventBooking,seasonBooking"))
assert filter_response.status_code == 200
def test_recordingContentState_with_embed_10(self):
filter_response = call_ref_url("get", make_booking_filter_url("recordingContentState=partial&embed=eventBooking,seasonBooking,transcodeBooking"))
assert filter_response.status_code == 200
def test_recordingContentState_with_embed_11(self):
filter_response = call_ref_url("get", make_booking_filter_url("recordingContentState=partial&embed=eventBooking,seasonBooking,transcodeBooking,transcodeSeasonBooking"))
assert filter_response.status_code == 200
def test_recordingContentState_with_embed_12(self):
filter_response = call_ref_url("get", make_booking_filter_url("recordingContentState=partial&embed=eventBooking,seasonBooking,transcodeBooking,transcodeSeasonBooking,reminderBooking"))
assert filter_response.status_code == 200
def test_recordingContentState_with_bookingType_13(self):
filter_response = call_ref_url("get", make_booking_filter_url("recordingContentState=partial&bookingType=manual"))
assert filter_response.status_code == 200
def test_recordingContentState_with_bookingType_14(self):
filter_response = call_ref_url("get", make_booking_filter_url("recordingContentState=partial&bookingType=event"))
assert filter_response.status_code == 200
def test_recordingContentState_with_bookingType_15(self):
filter_response = call_ref_url("get", make_booking_filter_url("recordingContentState=partial&bookingType=manual,event"))
assert filter_response.status_code == 200
def test_recordingContentState_with_downloadContentState_16(self):
filter_response = call_ref_url("get", make_booking_filter_url("recordingContentState=complete&downloadContentState=partial"))
assert filter_response.status_code == 200
def test_recordingContentState_with_downloadContentState_17(self):
filter_response = call_ref_url("get", make_booking_filter_url("recordingContentState=complete&downloadContentState=complete"))
assert filter_response.status_code == 200
def test_recordingContentState_with_downloadContentState_18(self):
filter_response = call_ref_url("get", make_booking_filter_url("recordingContentState=complete&downloadContentState=partial,complete"))
assert filter_response.status_code == 200
def test_recordingContentState_with_embed_19(self):
filter_response = call_ref_url("get", make_booking_filter_url("recordingContentState=complete&embed=eventBooking"))
assert filter_response.status_code == 200
def test_recordingContentState_with_embed_20(self):
filter_response = call_ref_url("get", make_booking_filter_url("recordingContentState=complete&embed=seasonBooking"))
assert filter_response.status_code == 200
def test_recordingContentState_with_embed_21(self):
filter_response = call_ref_url("get", make_booking_filter_url("recordingContentState=complete&embed=transcodeBooking"))
assert filter_response.status_code == 200
def test_recordingContentState_with_embed_22(self):
filter_response = call_ref_url("get", make_booking_filter_url("recordingContentState=complete&embed=transcodeSeasonBooking"))
assert filter_response.status_code == 200
def test_recordingContentState_with_embed_23(self):
filter_response = call_ref_url("get", make_booking_filter_url("recordingContentState=complete&embed=reminderBooking"))
assert filter_response.status_code == 200
def test_recordingContentState_with_embed_24(self):
filter_response = call_ref_url("get", make_booking_filter_url("recordingContentState=complete&embed=eventBooking,seasonBooking"))
assert filter_response.status_code == 200
def test_recordingContentState_with_embed_25(self):
filter_response = call_ref_url("get", make_booking_filter_url("recordingContentState=complete&embed=eventBooking,seasonBooking,transcodeBooking"))
assert filter_response.status_code == 200
def test_recordingContentState_with_embed_26(self):
filter_response = call_ref_url("get", make_booking_filter_url("recordingContentState=complete&embed=eventBooking,seasonBooking,transcodeBooking,transcodeSeasonBooking"))
assert filter_response.status_code == 200
def test_recordingContentState_with_embed_27(self):
filter_response = call_ref_url("get", make_booking_filter_url("recordingContentState=complete&embed=eventBooking,seasonBooking,transcodeBooking,transcodeSeasonBooking,reminderBooking"))
assert filter_response.status_code == 200
def test_recordingContentState_with_bookingType_28(self):
filter_response = call_ref_url("get", make_booking_filter_url("recordingContentState=complete&bookingType=manual"))
assert filter_response.status_code == 200
def test_recordingContentState_with_bookingType_29(self):
filter_response = call_ref_url("get", make_booking_filter_url("recordingContentState=complete&bookingType=event"))
assert filter_response.status_code == 200
def test_recordingContentState_with_bookingType_30(self):
filter_response = call_ref_url("get", make_booking_filter_url("recordingContentState=complete&bookingType=manual,event"))
assert filter_response.status_code == 200
def test_recordingContentState_with_downloadContentState_31(self):
filter_response = call_ref_url("get", make_booking_filter_url("recordingContentState=partial,complete&downloadContentState=partial"))
assert filter_response.status_code == 200
def test_recordingContentState_with_downloadContentState_32(self):
filter_response = call_ref_url("get", make_booking_filter_url("recordingContentState=partial,complete&downloadContentState=complete"))
assert filter_response.status_code == 200
def test_recordingContentState_with_downloadContentState_33(self):
filter_response = call_ref_url("get", make_booking_filter_url("recordingContentState=partial,complete&downloadContentState=partial,complete"))
assert filter_response.status_code == 200
def test_recordingContentState_with_embed_34(self):
filter_response = call_ref_url("get", make_booking_filter_url("recordingContentState=partial,complete&embed=eventBooking"))
assert filter_response.status_code == 200
def test_recordingContentState_with_embed_35(self):
filter_response = call_ref_url("get", make_booking_filter_url("recordingContentState=partial,complete&embed=seasonBooking"))
assert filter_response.status_code == 200
def test_recordingContentState_with_embed_36(self):
filter_response = call_ref_url("get", make_booking_filter_url("recordingContentState=partial,complete&embed=transcodeBooking"))
assert filter_response.status_code == 200
def test_recordingContentState_with_embed_37(self):
filter_response = call_ref_url("get", make_booking_filter_url("recordingContentState=partial,complete&embed=transcodeSeasonBooking"))
assert filter_response.status_code == 200
def test_recordingContentState_with_embed_38(self):
filter_response = call_ref_url("get", make_booking_filter_url("recordingContentState=partial,complete&embed=reminderBooking"))
assert filter_response.status_code == 200
def test_recordingContentState_with_embed_39(self):
filter_response = call_ref_url("get", make_booking_filter_url("recordingContentState=partial,complete&embed=eventBooking,seasonBooking"))
assert filter_response.status_code == 200
def test_recordingContentState_with_embed_40(self):
filter_response = call_ref_url("get", make_booking_filter_url("recordingContentState=partial,complete&embed=eventBooking,seasonBooking,transcodeBooking"))
assert filter_response.status_code == 200
def test_recordingContentState_with_embed_41(self):
filter_response = call_ref_url("get", make_booking_filter_url("recordingContentState=partial,complete&embed=eventBooking,seasonBooking,transcodeBooking,transcodeSeasonBooking"))
assert filter_response.status_code == 200
def test_recordingContentState_with_embed_42(self):
filter_response = call_ref_url("get", make_booking_filter_url("recordingContentState=partial,complete&embed=eventBooking,seasonBooking,transcodeBooking,transcodeSeasonBooking,reminderBooking"))
assert filter_response.status_code == 200
def test_recordingContentState_with_bookingType_43(self):
filter_response = call_ref_url("get", make_booking_filter_url("recordingContentState=partial,complete&bookingType=manual"))
assert filter_response.status_code == 200
def test_recordingContentState_with_bookingType_44(self):
filter_response = call_ref_url("get", make_booking_filter_url("recordingContentState=partial,complete&bookingType=event"))
assert filter_response.status_code == 200
def test_recordingContentState_with_bookingType_45(self):
filter_response = call_ref_url("get", make_booking_filter_url("recordingContentState=partial,complete&bookingType=manual,event"))
assert filter_response.status_code == 200
class TestdownloadContentStateMajor:
def test_downloadContentState_with_embed_1(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadContentState=partial&embed=eventBooking"))
assert filter_response.status_code == 200
def test_downloadContentState_with_embed_2(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadContentState=partial&embed=seasonBooking"))
assert filter_response.status_code == 200
def test_downloadContentState_with_embed_3(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadContentState=partial&embed=transcodeBooking"))
assert filter_response.status_code == 200
def test_downloadContentState_with_embed_4(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadContentState=partial&embed=transcodeSeasonBooking"))
assert filter_response.status_code == 200
def test_downloadContentState_with_embed_5(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadContentState=partial&embed=reminderBooking"))
assert filter_response.status_code == 200
def test_downloadContentState_with_embed_6(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadContentState=partial&embed=eventBooking,seasonBooking"))
assert filter_response.status_code == 200
def test_downloadContentState_with_embed_7(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadContentState=partial&embed=eventBooking,seasonBooking,transcodeBooking"))
assert filter_response.status_code == 200
def test_downloadContentState_with_embed_8(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadContentState=partial&embed=eventBooking,seasonBooking,transcodeBooking,transcodeSeasonBooking"))
assert filter_response.status_code == 200
def test_downloadContentState_with_embed_9(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadContentState=partial&embed=eventBooking,seasonBooking,transcodeBooking,transcodeSeasonBooking,reminderBooking"))
assert filter_response.status_code == 200
def test_downloadContentState_with_bookingType_10(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadContentState=partial&bookingType=manual"))
assert filter_response.status_code == 200
def test_downloadContentState_with_bookingType_11(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadContentState=partial&bookingType=event"))
assert filter_response.status_code == 200
def test_downloadContentState_with_bookingType_12(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadContentState=partial&bookingType=manual,event"))
assert filter_response.status_code == 200
def test_downloadContentState_with_embed_13(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadContentState=complete&embed=eventBooking"))
assert filter_response.status_code == 200
def test_downloadContentState_with_embed_14(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadContentState=complete&embed=seasonBooking"))
assert filter_response.status_code == 200
def test_downloadContentState_with_embed_15(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadContentState=complete&embed=transcodeBooking"))
assert filter_response.status_code == 200
def test_downloadContentState_with_embed_16(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadContentState=complete&embed=transcodeSeasonBooking"))
assert filter_response.status_code == 200
def test_downloadContentState_with_embed_17(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadContentState=complete&embed=reminderBooking"))
assert filter_response.status_code == 200
def test_downloadContentState_with_embed_18(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadContentState=complete&embed=eventBooking,seasonBooking"))
assert filter_response.status_code == 200
def test_downloadContentState_with_embed_19(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadContentState=complete&embed=eventBooking,seasonBooking,transcodeBooking"))
assert filter_response.status_code == 200
def test_downloadContentState_with_embed_20(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadContentState=complete&embed=eventBooking,seasonBooking,transcodeBooking,transcodeSeasonBooking"))
assert filter_response.status_code == 200
def test_downloadContentState_with_embed_21(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadContentState=complete&embed=eventBooking,seasonBooking,transcodeBooking,transcodeSeasonBooking,reminderBooking"))
assert filter_response.status_code == 200
def test_downloadContentState_with_bookingType_22(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadContentState=complete&bookingType=manual"))
assert filter_response.status_code == 200
def test_downloadContentState_with_bookingType_23(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadContentState=complete&bookingType=event"))
assert filter_response.status_code == 200
def test_downloadContentState_with_bookingType_24(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadContentState=complete&bookingType=manual,event"))
assert filter_response.status_code == 200
def test_downloadContentState_with_embed_25(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadContentState=partial,complete&embed=eventBooking"))
assert filter_response.status_code == 200
def test_downloadContentState_with_embed_26(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadContentState=partial,complete&embed=seasonBooking"))
assert filter_response.status_code == 200
def test_downloadContentState_with_embed_27(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadContentState=partial,complete&embed=transcodeBooking"))
assert filter_response.status_code == 200
def test_downloadContentState_with_embed_28(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadContentState=partial,complete&embed=transcodeSeasonBooking"))
assert filter_response.status_code == 200
def test_downloadContentState_with_embed_29(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadContentState=partial,complete&embed=reminderBooking"))
assert filter_response.status_code == 200
def test_downloadContentState_with_embed_30(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadContentState=partial,complete&embed=eventBooking,seasonBooking"))
assert filter_response.status_code == 200
def test_downloadContentState_with_embed_31(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadContentState=partial,complete&embed=eventBooking,seasonBooking,transcodeBooking"))
assert filter_response.status_code == 200
def test_downloadContentState_with_embed_32(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadContentState=partial,complete&embed=eventBooking,seasonBooking,transcodeBooking,transcodeSeasonBooking"))
assert filter_response.status_code == 200
def test_downloadContentState_with_embed_33(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadContentState=partial,complete&embed=eventBooking,seasonBooking,transcodeBooking,transcodeSeasonBooking,reminderBooking"))
assert filter_response.status_code == 200
def test_downloadContentState_with_bookingType_34(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadContentState=partial,complete&bookingType=manual"))
assert filter_response.status_code == 200
def test_downloadContentState_with_bookingType_35(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadContentState=partial,complete&bookingType=event"))
assert filter_response.status_code == 200
def test_downloadContentState_with_bookingType_36(self):
filter_response = call_ref_url("get", make_booking_filter_url("downloadContentState=partial,complete&bookingType=manual,event"))
assert filter_response.status_code == 200
class TestembedMajor:
def test_embed_with_bookingType_1(self):
filter_response = call_ref_url("get", make_booking_filter_url("embed=eventBooking&bookingType=manual"))
assert filter_response.status_code == 200
def test_embed_with_bookingType_2(self):
filter_response = call_ref_url("get", make_booking_filter_url("embed=eventBooking&bookingType=event"))
assert filter_response.status_code == 200
def test_embed_with_bookingType_3(self):
filter_response = call_ref_url("get", make_booking_filter_url("embed=eventBooking&bookingType=manual,event"))
assert filter_response.status_code == 200
def test_embed_with_bookingType_4(self):
filter_response = call_ref_url("get", make_booking_filter_url("embed=seasonBooking&bookingType=manual"))
assert filter_response.status_code == 200
def test_embed_with_bookingType_5(self):
filter_response = call_ref_url("get", make_booking_filter_url("embed=seasonBooking&bookingType=event"))
assert filter_response.status_code == 200
def test_embed_with_bookingType_6(self):
filter_response = call_ref_url("get", make_booking_filter_url("embed=seasonBooking&bookingType=manual,event"))
assert filter_response.status_code == 200
def test_embed_with_bookingType_7(self):
filter_response = call_ref_url("get", make_booking_filter_url("embed=transcodeBooking&bookingType=manual"))
assert filter_response.status_code == 200
def test_embed_with_bookingType_8(self):
filter_response = call_ref_url("get", make_booking_filter_url("embed=transcodeBooking&bookingType=event"))
assert filter_response.status_code == 200
def test_embed_with_bookingType_9(self):
filter_response = call_ref_url("get", make_booking_filter_url("embed=transcodeBooking&bookingType=manual,event"))
assert filter_response.status_code == 200
def test_embed_with_bookingType_10(self):
filter_response = call_ref_url("get", make_booking_filter_url("embed=transcodeSeasonBooking&bookingType=manual"))
assert filter_response.status_code == 200
def test_embed_with_bookingType_11(self):
filter_response = call_ref_url("get", make_booking_filter_url("embed=transcodeSeasonBooking&bookingType=event"))
assert filter_response.status_code == 200
def test_embed_with_bookingType_12(self):
filter_response = call_ref_url("get", make_booking_filter_url("embed=transcodeSeasonBooking&bookingType=manual,event"))
assert filter_response.status_code == 200
def test_embed_with_bookingType_13(self):
filter_response = call_ref_url("get", make_booking_filter_url("embed=reminderBooking&bookingType=manual"))
assert filter_response.status_code == 200
def test_embed_with_bookingType_14(self):
filter_response = call_ref_url("get", make_booking_filter_url("embed=reminderBooking&bookingType=event"))
assert filter_response.status_code == 200
def test_embed_with_bookingType_15(self):
filter_response = call_ref_url("get", make_booking_filter_url("embed=reminderBooking&bookingType=manual,event"))
assert filter_response.status_code == 200
def test_embed_with_bookingType_16(self):
filter_response = call_ref_url("get", make_booking_filter_url("embed=eventBooking,seasonBooking&bookingType=manual"))
assert filter_response.status_code == 200
def test_embed_with_bookingType_17(self):
filter_response = call_ref_url("get", make_booking_filter_url("embed=eventBooking,seasonBooking&bookingType=event"))
assert filter_response.status_code == 200
def test_embed_with_bookingType_18(self):
filter_response = call_ref_url("get", make_booking_filter_url("embed=eventBooking,seasonBooking&bookingType=manual,event"))
assert filter_response.status_code == 200
def test_embed_with_bookingType_19(self):
filter_response = call_ref_url("get", make_booking_filter_url("embed=eventBooking,seasonBooking,transcodeBooking&bookingType=manual"))
assert filter_response.status_code == 200
def test_embed_with_bookingType_20(self):
filter_response = call_ref_url("get", make_booking_filter_url("embed=eventBooking,seasonBooking,transcodeBooking&bookingType=event"))
assert filter_response.status_code == 200
def test_embed_with_bookingType_21(self):
filter_response = call_ref_url("get", make_booking_filter_url("embed=eventBooking,seasonBooking,transcodeBooking&bookingType=manual,event"))
assert filter_response.status_code == 200
def test_embed_with_bookingType_22(self):
filter_response = call_ref_url("get", make_booking_filter_url("embed=eventBooking,seasonBooking,transcodeBooking,transcodeSeasonBooking&bookingType=manual"))
assert filter_response.status_code == 200
def test_embed_with_bookingType_23(self):
filter_response = call_ref_url("get", make_booking_filter_url("embed=eventBooking,seasonBooking,transcodeBooking,transcodeSeasonBooking&bookingType=event"))
assert filter_response.status_code == 200
def test_embed_with_bookingType_24(self):
filter_response = call_ref_url("get", make_booking_filter_url("embed=eventBooking,seasonBooking,transcodeBooking,transcodeSeasonBooking&bookingType=manual,event"))
assert filter_response.status_code == 200
def test_embed_with_bookingType_25(self):
filter_response = call_ref_url("get", make_booking_filter_url("embed=eventBooking,seasonBooking,transcodeBooking,transcodeSeasonBooking,reminderBooking&bookingType=manual"))
assert filter_response.status_code == 200
def test_embed_with_bookingType_26(self):
filter_response = call_ref_url("get", make_booking_filter_url("embed=eventBooking,seasonBooking,transcodeBooking,transcodeSeasonBooking,reminderBooking&bookingType=event"))
assert filter_response.status_code == 200
def test_embed_with_bookingType_27(self):
filter_response = call_ref_url("get", make_booking_filter_url("embed=eventBooking,seasonBooking,transcodeBooking,transcodeSeasonBooking,reminderBooking&bookingType=manual,event"))
assert filter_response.status_code == 200
| soumyaslab/pythonlab | py2/excel_example/test_planner_extended_filters.py | Python | gpl-2.0 | 83,909 | 0.017841 |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions to export object detection inference graph."""
import logging
import os
import tensorflow as tf
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.client import session
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import importer
from tensorflow.python.platform import gfile
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.training import saver as saver_lib
from object_detection.builders import model_builder
from object_detection.core import standard_fields as fields
from object_detection.data_decoders import tf_example_decoder
slim = tf.contrib.slim
# TODO: Replace with freeze_graph.freeze_graph_with_def_protos when
# newer version of Tensorflow becomes more common.
def freeze_graph_with_def_protos(
input_graph_def,
input_saver_def,
input_checkpoint,
output_node_names,
restore_op_name,
filename_tensor_name,
clear_devices,
initializer_nodes,
variable_names_blacklist=''):
"""Converts all variables in a graph and checkpoint into constants."""
del restore_op_name, filename_tensor_name # Unused by updated loading code.
# 'input_checkpoint' may be a prefix if we're using Saver V2 format
if not saver_lib.checkpoint_exists(input_checkpoint):
raise ValueError(
'Input checkpoint "' + input_checkpoint + '" does not exist!')
if not output_node_names:
raise ValueError(
'You must supply the name of a node to --output_node_names.')
# Remove all the explicit device specifications for this node. This helps to
# make the graph more portable.
if clear_devices:
for node in input_graph_def.node:
node.device = ''
_ = importer.import_graph_def(input_graph_def, name='')
with session.Session() as sess:
if input_saver_def:
saver = saver_lib.Saver(saver_def=input_saver_def)
saver.restore(sess, input_checkpoint)
else:
var_list = {}
reader = pywrap_tensorflow.NewCheckpointReader(input_checkpoint)
var_to_shape_map = reader.get_variable_to_shape_map()
for key in var_to_shape_map:
try:
tensor = sess.graph.get_tensor_by_name(key + ':0')
except KeyError:
# This tensor doesn't exist in the graph (for example it's
# 'global_step' or a similar housekeeping element) so skip it.
continue
var_list[key] = tensor
saver = saver_lib.Saver(var_list=var_list)
saver.restore(sess, input_checkpoint)
if initializer_nodes:
sess.run(initializer_nodes)
variable_names_blacklist = (variable_names_blacklist.split(',') if
variable_names_blacklist else None)
output_graph_def = graph_util.convert_variables_to_constants(
sess,
input_graph_def,
output_node_names.split(','),
variable_names_blacklist=variable_names_blacklist)
return output_graph_def
def get_frozen_graph_def(inference_graph_def, use_moving_averages,
input_checkpoint, output_node_names):
"""Freezes all variables in a graph definition."""
saver = None
if use_moving_averages:
variable_averages = tf.train.ExponentialMovingAverage(0.0)
variables_to_restore = variable_averages.variables_to_restore()
saver = tf.train.Saver(variables_to_restore)
else:
saver = tf.train.Saver()
frozen_graph_def = freeze_graph_with_def_protos(
input_graph_def=inference_graph_def,
input_saver_def=saver.as_saver_def(),
input_checkpoint=input_checkpoint,
output_node_names=output_node_names,
restore_op_name='save/restore_all',
filename_tensor_name='save/Const:0',
clear_devices=True,
initializer_nodes='')
return frozen_graph_def
# TODO: Support batch tf example inputs.
def _tf_example_input_placeholder():
tf_example_placeholder = tf.placeholder(
tf.string, shape=[], name='tf_example')
tensor_dict = tf_example_decoder.TfExampleDecoder().decode(
tf_example_placeholder)
image = tensor_dict[fields.InputDataFields.image]
return tf.expand_dims(image, axis=0)
def _image_tensor_input_placeholder():
return tf.placeholder(dtype=tf.uint8,
shape=(1, None, None, 3),
name='image_tensor')
def _encoded_image_string_tensor_input_placeholder():
image_str = tf.placeholder(dtype=tf.string,
shape=[],
name='encoded_image_string_tensor')
image_tensor = tf.image.decode_image(image_str, channels=3)
image_tensor.set_shape((None, None, 3))
return tf.expand_dims(image_tensor, axis=0)
input_placeholder_fn_map = {
'image_tensor': _image_tensor_input_placeholder,
'encoded_image_string_tensor':
_encoded_image_string_tensor_input_placeholder,
'tf_example': _tf_example_input_placeholder,
}
def _add_output_tensor_nodes(postprocessed_tensors):
"""Adds output nodes for detection boxes and scores.
Adds the following nodes for output tensors -
* num_detections: float32 tensor of shape [batch_size].
* detection_boxes: float32 tensor of shape [batch_size, num_boxes, 4]
containing detected boxes.
* detection_scores: float32 tensor of shape [batch_size, num_boxes]
containing scores for the detected boxes.
* detection_classes: float32 tensor of shape [batch_size, num_boxes]
containing class predictions for the detected boxes.
* detection_masks: (Optional) float32 tensor of shape
[batch_size, num_boxes, mask_height, mask_width] containing masks for each
detection box.
Args:
postprocessed_tensors: a dictionary containing the following fields
'detection_boxes': [batch, max_detections, 4]
'detection_scores': [batch, max_detections]
'detection_classes': [batch, max_detections]
'detection_masks': [batch, max_detections, mask_height, mask_width]
(optional).
'num_detections': [batch]
Returns:
A tensor dict containing the added output tensor nodes.
"""
label_id_offset = 1
boxes = postprocessed_tensors.get('detection_boxes')
scores = postprocessed_tensors.get('detection_scores')
classes = postprocessed_tensors.get('detection_classes') + label_id_offset
masks = postprocessed_tensors.get('detection_masks')
num_detections = postprocessed_tensors.get('num_detections')
outputs = {}
outputs['detection_boxes'] = tf.identity(boxes, name='detection_boxes')
outputs['detection_scores'] = tf.identity(scores, name='detection_scores')
outputs['detection_classes'] = tf.identity(classes, name='detection_classes')
outputs['num_detections'] = tf.identity(num_detections, name='num_detections')
if masks is not None:
outputs['detection_masks'] = tf.identity(masks, name='detection_masks')
return outputs
def _write_inference_graph(inference_graph_path,
checkpoint_path=None,
use_moving_averages=False,
output_node_names=(
'num_detections,detection_scores,'
'detection_boxes,detection_classes')):
"""Writes inference graph to disk with the option to bake in weights.
If checkpoint_path is not None bakes the weights into the graph thereby
eliminating the need of checkpoint files during inference. If the model
was trained with moving averages, setting use_moving_averages to true
restores the moving averages, otherwise the original set of variables
is restored.
Args:
inference_graph_path: Path to write inference graph.
checkpoint_path: Optional path to the checkpoint file.
use_moving_averages: Whether to export the original or the moving averages
of the trainable variables from the checkpoint.
output_node_names: Output tensor names, defaults are: num_detections,
detection_scores, detection_boxes, detection_classes.
"""
inference_graph_def = tf.get_default_graph().as_graph_def()
if checkpoint_path:
output_graph_def = get_frozen_graph_def(
inference_graph_def=inference_graph_def,
use_moving_averages=use_moving_averages,
input_checkpoint=checkpoint_path,
output_node_names=output_node_names,
)
with gfile.GFile(inference_graph_path, 'wb') as f:
f.write(output_graph_def.SerializeToString())
logging.info('%d ops in the final graph.', len(output_graph_def.node))
return
tf.train.write_graph(inference_graph_def,
os.path.dirname(inference_graph_path),
os.path.basename(inference_graph_path),
as_text=False)
def _write_saved_model(inference_graph_path, inputs, outputs,
checkpoint_path=None, use_moving_averages=False):
"""Writes SavedModel to disk.
If checkpoint_path is not None bakes the weights into the graph thereby
eliminating the need of checkpoint files during inference. If the model
was trained with moving averages, setting use_moving_averages to true
restores the moving averages, otherwise the original set of variables
is restored.
Args:
inference_graph_path: Path to write inference graph.
inputs: The input image tensor to use for detection.
outputs: A tensor dictionary containing the outputs of a DetectionModel.
checkpoint_path: Optional path to the checkpoint file.
use_moving_averages: Whether to export the original or the moving averages
of the trainable variables from the checkpoint.
"""
inference_graph_def = tf.get_default_graph().as_graph_def()
checkpoint_graph_def = None
if checkpoint_path:
output_node_names = ','.join(outputs.keys())
checkpoint_graph_def = get_frozen_graph_def(
inference_graph_def=inference_graph_def,
use_moving_averages=use_moving_averages,
input_checkpoint=checkpoint_path,
output_node_names=output_node_names
)
with tf.Graph().as_default():
with session.Session() as sess:
tf.import_graph_def(checkpoint_graph_def)
builder = tf.saved_model.builder.SavedModelBuilder(inference_graph_path)
tensor_info_inputs = {
'inputs': tf.saved_model.utils.build_tensor_info(inputs)}
tensor_info_outputs = {}
for k, v in outputs.items():
tensor_info_outputs[k] = tf.saved_model.utils.build_tensor_info(v)
detection_signature = (
tf.saved_model.signature_def_utils.build_signature_def(
inputs=tensor_info_inputs,
outputs=tensor_info_outputs,
method_name=signature_constants.PREDICT_METHOD_NAME))
builder.add_meta_graph_and_variables(
sess, [tf.saved_model.tag_constants.SERVING],
signature_def_map={
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
detection_signature,
},
)
builder.save()
def _export_inference_graph(input_type,
detection_model,
use_moving_averages,
checkpoint_path,
inference_graph_path,
export_as_saved_model=False):
"""Export helper."""
if input_type not in input_placeholder_fn_map:
raise ValueError('Unknown input type: {}'.format(input_type))
inputs = tf.to_float(input_placeholder_fn_map[input_type]())
preprocessed_inputs = detection_model.preprocess(inputs)
output_tensors = detection_model.predict(preprocessed_inputs)
postprocessed_tensors = detection_model.postprocess(output_tensors)
outputs = _add_output_tensor_nodes(postprocessed_tensors)
out_node_names = list(outputs.keys())
if export_as_saved_model:
_write_saved_model(inference_graph_path, inputs, outputs, checkpoint_path,
use_moving_averages)
else:
_write_inference_graph(inference_graph_path, checkpoint_path,
use_moving_averages,
output_node_names=','.join(out_node_names))
def export_inference_graph(input_type, pipeline_config, checkpoint_path,
inference_graph_path, export_as_saved_model=False):
"""Exports inference graph for the model specified in the pipeline config.
Args:
input_type: Type of input for the graph. Can be one of [`image_tensor`,
`tf_example`].
pipeline_config: pipeline_pb2.TrainAndEvalPipelineConfig proto.
checkpoint_path: Path to the checkpoint file to freeze.
inference_graph_path: Path to write inference graph to.
export_as_saved_model: If the model should be exported as a SavedModel. If
false, it is saved as an inference graph.
"""
detection_model = model_builder.build(pipeline_config.model,
is_training=False)
_export_inference_graph(input_type, detection_model,
pipeline_config.eval_config.use_moving_averages,
checkpoint_path, inference_graph_path,
export_as_saved_model)
| unnikrishnankgs/va | venv/lib/python3.5/site-packages/tensorflow/models/object_detection/exporter.py | Python | bsd-2-clause | 13,779 | 0.006387 |
import os
import glob
#####################################################
######Init the files##################################
#####################################################
os.remove("a0.txt")
os.remove("a1.txt")
os.remove("a2.txt")
os.remove("a3.txt")
os.remove("a4.txt")
os.remove("a5.txt")
os.remove("a6.txt")
os.remove("a7.txt")
os.remove("a8.txt")
os.remove("a9.txt")
os.remove("n0.txt")
os.remove("n1.txt")
os.remove("n2.txt")
os.remove("n3.txt")
os.remove("n4.txt")
os.remove("n5.txt")
os.remove("n6.txt")
os.remove("n7.txt")
os.remove("n8.txt")
os.remove("n9.txt")
os.remove("v0.txt")
os.remove("v1.txt")
os.remove("v2.txt")
os.remove("v3.txt")
os.remove("v4.txt")
os.remove("v5.txt")
os.remove("v6.txt")
os.remove("v7.txt")
os.remove("v8.txt")
os.remove("v9.txt")
file_a0 = open("a0.txt", "a")
file_a1 = open("a1.txt", "a")
file_a2 = open("a2.txt", "a")
file_a3 = open("a3.txt", "a")
file_a4 = open("a4.txt", "a")
file_a5 = open("a5.txt", "a")
file_a6 = open("a6.txt", "a")
file_a7 = open("a7.txt", "a")
file_a8 = open("a8.txt", "a")
file_a9 = open("a9.txt", "a")
format_a = [file_a0,file_a1,file_a2,file_a3,file_a4,file_a5,file_a6,file_a7,file_a8,file_a9]
file_n0 = open("n0.txt", "a")
file_n1 = open("n1.txt", "a")
file_n2 = open("n2.txt", "a")
file_n3 = open("n3.txt", "a")
file_n4 = open("n4.txt", "a")
file_n5 = open("n5.txt", "a")
file_n6 = open("n6.txt", "a")
file_n7 = open("n7.txt", "a")
file_n8 = open("n8.txt", "a")
file_n9 = open("n9.txt", "a")
format_n = [file_n0,file_n1,file_n2,file_n3,file_n4,file_n5,file_n6,file_n7,file_n8,file_n9]
file_v0 = open("v0.txt", "a")
file_v1 = open("v1.txt", "a")
file_v2 = open("v2.txt", "a")
file_v3 = open("v3.txt", "a")
file_v4 = open("v4.txt", "a")
file_v5 = open("v5.txt", "a")
file_v6 = open("v6.txt", "a")
file_v7 = open("v7.txt", "a")
file_v8 = open("v8.txt", "a")
file_v9 = open("v9.txt", "a")
format_v = [file_v0,file_v1,file_v2,file_v3,file_v4,file_v5,file_v6,file_v7,file_v8,file_v9]
the_attack_files = glob.glob("../Basic_Attack/*.txt")
the_normal_files = glob.glob("../Normal_Data/*.txt")
the_vali_files = glob.glob("../Vali_Data/*.txt")
#####################################################
########Format the files##############################
#####################################################
attack_words = []
normal_words = []
vali_words = []
#####################################################
########Read in the sequences########################
#########separate them into 2D arrays################
#####################################################
for f in the_attack_files:
e = open(f,"r+")
attack_words.extend([e.read().split()])
e.close()
for f in the_normal_files:
e = open(f,"r+")
normal_words.extend([e.read().split()])
e.close()
for f in the_vali_files:
e = open(f,"r+")
vali_words.extend([e.read().split()])
e.close()
files_a = len(attack_words)/10
files_n = len(normal_words)/10
files_v = len(vali_words)/10
print("Normal Words: " + str(len(normal_words)))
print("Average normal words per formatted file: " + str(files_n))
print("Attack Words: " + str(len(attack_words)))
print("Average attack words per formatted file: " + str(files_a))
print("Validation Words: " + str(len(vali_words)))
print("Average validation words per formatted file: " + str(files_v))
input_n = raw_input("Please input a value for n: ")
print("Performing formatting with " + str(input_n) + " grams...")
n = int(input_n)
y = 0
index = 0
to_write = format_n[index]
for norm in normal_words:
for x in range(0,len(norm) - (n-1)):
for form in range(0, n):
if(form < n-1):
to_write.write(str(norm[x+form]) + " ")
elif(form == n-1):
to_write.write(str(norm[x+form]) + " 0\n")
to_write.write("new\n")
y += 1
if(y % files_n == 0 and index < 9):
print( str(y) + " instances in norm_block...")
#print("X: " + str(y))
#print("Ending: " + str(index) + "\n Starting: " + str(index+1))
to_write.close()
index = index + 1
to_write = format_n[index]
y = 0
index = 0
to_write = format_a[index]
for norm in attack_words:
for x in range(0,len(norm) - (n-1)):
for form in range(0, n):
if(form < n-1):
to_write.write(str(norm[x+form]) + " ")
elif(form == n-1):
to_write.write(str(norm[x+form]) + " 1\n")
to_write.write("new\n")
y += 1
if(y % files_a == 0 and index < 9):
print( str(y) + " instances in att_block...")
#print("Ending: " + str(index) + "\n Starting: " + str(index+1))
to_write.close()
index = index + 1
to_write = format_a[index]
y = 0
index = 0
to_write = format_v[index]
for norm in vali_words:
for x in range(0,len(norm) - (n-1)):
for form in range(0,n):
if(form < n-1):
to_write.write(str(norm[x+form]) + " ")
elif(form == n-1):
to_write.write(str(norm[x+form]) + " 0\n")
to_write.write("new\n")
y += 1
if(y % files_v == 0 and index < 9):
print( str(y) + " instances in vali_block...")
#print("Ending: " + str(index) + "\n Starting: " + str(index+1))
to_write.close()
index = index + 1
to_write = format_v[index]
#####################################################
########Generate the n-gram##########################
#########and write that to the file##################
#####################################################
#n = 3
#for norm in normal_words:
# for x in range(0,len(norm)-(n-1)):
# file__.write(str(norm[x]) + " " + str(norm[x+1]) + " " + str(norm[x+2]) + " 0\n")
#for att in attack_words:
# for x in range(0,len(att)-(n-1)):
# file_.write(str(att[x]) + " " + str(att[x+1]) + " " + str(att[x+2]) + " 1\n")
#for vali in vali_words:
# for x in range(0,len(vali)-(n-1)):
# file_v.write(str(vali[x]) + " " + str(vali[x+1]) + " " + str(vali[x+2]) + " 0\n")
# file_v.write("new\n")
print("Data Formatted...")
| doylew/detectionsc | format_py/ngram_nskip.py | Python | mit | 6,042 | 0.015227 |
import random
from django.dispatch import receiver
from django.conf import settings
from readthedocs.restapi.signals import footer_response
from readthedocs.donate.models import SupporterPromo
from readthedocs.donate.constants import INCLUDE, EXCLUDE
from readthedocs.donate.utils import offer_promo
PROMO_GEO_PATH = getattr(settings, 'PROMO_GEO_PATH', None)
if PROMO_GEO_PATH:
import geoip2.database # noqa
from geoip2.errors import AddressNotFoundError # noqa
geo_reader = geoip2.database.Reader(PROMO_GEO_PATH)
def show_to_geo(promo, country_code):
# Remove promo's that exclude this country.
for filter in promo.geo_filters.all():
if filter.filter_type == INCLUDE:
if country_code in filter.codes:
continue
else:
return False
if filter.filter_type == EXCLUDE:
if country_code in filter.codes:
return False
return True
def show_to_programming_language(promo, programming_language):
"""
Filter a promo by a specific programming language
Return True if we haven't set a specific language,
which means show to all languages.
"""
if promo.programming_language:
return programming_language == promo.programming_language
return True
def choose_promo(promo_list):
"""
This is the algorithm to pick which promo to show.
This takes into account how many remaining days this
promo has to be shown.
The algorithm is currently as such:
* Take the remaining number of views each promo has today
* Add them together, with each promo "assigned" to a range
* Pick a random number between 1 and that total
* Choose the ad whose range is in the chosen random number
In the future,
we should take into account the expected views for today
(The number of views from this day last week)
Then we can scale the "total ads sold" against that "expected views",
and that will give us more spread throughout the day.
"""
promo_range = []
total_views_needed = 0
for promo in promo_list:
promo_range.append([
total_views_needed,
total_views_needed + promo.views_needed_today(),
promo
])
total_views_needed += promo.views_needed_today()
choice = random.randint(0, total_views_needed)
for range_list in promo_range:
if range_list[0] <= choice <= range_list[1]:
return range_list[2]
return None
def get_promo(country_code, programming_language, gold_project=False, gold_user=False):
"""
Get a proper promo.
Takes into account:
* Gold User status
* Gold Project status
* Geo
* Programming Language
"""
promo_queryset = SupporterPromo.objects.filter(live=True, display_type='doc')
filtered_promos = []
for obj in promo_queryset:
# Break out if we aren't meant to show to this language
if obj.programming_language and not show_to_programming_language(obj, programming_language):
continue
# Break out if we aren't meant to show to this country
if country_code and not show_to_geo(obj, country_code):
continue
# If we haven't bailed because of language or country, possibly show the promo
filtered_promos.append(obj)
promo_obj = choose_promo(filtered_promos)
# Show a random house ad if we don't have anything else
if not promo_obj:
house_promo = SupporterPromo.objects.filter(live=True,
name='house').order_by('?')
if house_promo.exists():
promo_obj = house_promo.first()
# Support showing a "Thank you" message for gold folks
if gold_user:
gold_promo = SupporterPromo.objects.filter(live=True,
name='gold-user')
if gold_promo.exists():
promo_obj = gold_promo.first()
# Default to showing project-level thanks if it exists
if gold_project:
gold_promo = SupporterPromo.objects.filter(live=True,
name='gold-project')
if gold_promo.exists():
promo_obj = gold_promo.first()
return promo_obj
@receiver(footer_response)
def attach_promo_data(sender, **kwargs):
request = kwargs['request']
context = kwargs['context']
resp_data = kwargs['resp_data']
project = context['project']
# Bail out early if promo's are disabled.
use_promo = getattr(settings, 'USE_PROMOS', True)
if not use_promo:
resp_data['promo'] = False
return
gold_user = gold_project = False
promo_obj = country_code = None
show_promo = project.allow_promos
# The request is by a GoldUser
if request.user.is_authenticated():
if request.user.gold.count() or request.user.goldonce.count():
gold_user = True
# A GoldUser has mapped this project
if project.gold_owners.count():
gold_project = True
# Don't show gold users promos.
# This will get overridden if we have specific promos for them below.
if gold_user or gold_project:
show_promo = False
if PROMO_GEO_PATH:
# Get geo information from the IP, but don't record it anywhere
ip = request.META.get('REMOTE_ADDR')
if ip:
try:
geo_response = geo_reader.city(ip)
country_code = geo_response.country.iso_code
except (AddressNotFoundError, ValueError): # Invalid IP
country_code = None
# Try to get a promo if we should be using one.
if show_promo:
promo_obj = get_promo(
country_code=country_code,
programming_language=project.programming_language,
gold_project=gold_project,
gold_user=gold_user,
)
# If we don't have anything to show, don't show it.
if not promo_obj:
show_promo = False
if show_promo:
promo_dict = offer_promo(promo_obj=promo_obj, project=project)
resp_data['promo_data'] = promo_dict
# Set promo object on return JSON
resp_data['promo'] = show_promo
| tddv/readthedocs.org | readthedocs/donate/signals.py | Python | mit | 6,234 | 0.000642 |
"""Device tracker support for OPNSense routers."""
from homeassistant.components.device_tracker import DeviceScanner
from . import CONF_TRACKER_INTERFACE, OPNSENSE_DATA
async def async_get_scanner(hass, config, discovery_info=None):
"""Configure the OPNSense device_tracker."""
interface_client = hass.data[OPNSENSE_DATA]["interfaces"]
scanner = OPNSenseDeviceScanner(
interface_client, hass.data[OPNSENSE_DATA][CONF_TRACKER_INTERFACE]
)
return scanner
class OPNSenseDeviceScanner(DeviceScanner):
"""This class queries a router running OPNsense."""
def __init__(self, client, interfaces):
"""Initialize the scanner."""
self.last_results = {}
self.client = client
self.interfaces = interfaces
def _get_mac_addrs(self, devices):
"""Create dict with mac address keys from list of devices."""
out_devices = {}
for device in devices:
if not self.interfaces:
out_devices[device["mac"]] = device
elif device["intf_description"] in self.interfaces:
out_devices[device["mac"]] = device
return out_devices
def scan_devices(self):
"""Scan for new devices and return a list with found device IDs."""
self.update_info()
return list(self.last_results)
def get_device_name(self, device):
"""Return the name of the given device or None if we don't know."""
if device not in self.last_results:
return None
hostname = self.last_results[device].get("hostname") or None
return hostname
def update_info(self):
"""Ensure the information from the OPNSense router is up to date.
Return boolean if scanning successful.
"""
devices = self.client.get_arp()
self.last_results = self._get_mac_addrs(devices)
def get_extra_attributes(self, device):
"""Return the extra attrs of the given device."""
if device not in self.last_results:
return None
if not (mfg := self.last_results[device].get("manufacturer")):
return {}
return {"manufacturer": mfg}
| home-assistant/home-assistant | homeassistant/components/opnsense/device_tracker.py | Python | apache-2.0 | 2,172 | 0 |
#!/usr/bin/env python
# Convert line elements with overlapping endpoints into polylines in an
# SVG file.
import os
import sys
try:
from lxml import etree
except ImportError:
import xml.etree.ElementTree as etree
from collections import defaultdict
from optparse import OptionParser
SVG_NS = 'http://www.w3.org/2000/svg'
START = 1
END = 2
class Line(object):
def __init__(self, line_element):
a = line_element.attrib
self.x1 = float(a['x1'])
self.y1 = float(a['y1'])
self.x2 = float(a['x2'])
self.y2 = float(a['y2'])
self.strokeWidth = float(a['stroke-width'])
def reverse(self):
self.x1, self.x2 = self.x2, self.x1
self.y1, self.y2 = self.y2, self.y1
def start_hash(self):
return str(self.x1) + ',' + str(self.y1)
def end_hash(self):
return str(self.x2) + ',' + str(self.y2)
def endpoint(self, direction):
if direction == START:
return self.start_hash()
else:
return self.end_hash()
def get_other_hash(self, key):
h = self.start_hash()
if h == key:
h = self.end_hash()
return h
def __repr__(self):
return '((%s,%s),(%s,%s),sw:%s)' % (self.x1, self.y1,
self.x2, self.y2,
self.strokeWidth)
class EndpointHash(object):
def __init__(self, lines):
self.endpoints = defaultdict(list)
for l in lines:
self.endpoints[l.start_hash()].append(l)
self.endpoints[l.end_hash()].append(l)
def count_overlapping_points(self):
count = 0
for key, lines in self.endpoints.iteritems():
l = len(lines)
if l > 1:
count += 1
return count
def _del_line(self, key, line):
self.endpoints[key].remove(line)
if len(self.endpoints[key]) == 0:
del self.endpoints[key]
def remove_line(self, line):
key = line.start_hash()
self._del_line(key, line)
self._del_line(line.get_other_hash(key), line)
def pop_connected_line(self, line, key):
if key in self.endpoints:
line = self.endpoints[key][0]
self.remove_line(line)
return line
else:
return
def parse_svg(fname):
print "Parsing '%s'..." % (fname)
return etree.parse(fname)
def get_lines(svg):
lines = []
for l in svg.getroot().iter('{%s}line' % SVG_NS):
lines.append(Line(l))
return lines
def align_lines(l1, l2):
if ( l1.x1 == l2.x1 and l1.y1 == l2.y1
or l1.x2 == l2.x2 and l1.y2 == l2.y2):
l2.reverse()
def connect_lines(lines, endpoint_hash, line, direction, poly):
while True:
key = line.endpoint(direction)
connected_line = endpoint_hash.pop_connected_line(line, key)
if connected_line:
if direction == START:
poly.insert(0, connected_line)
else:
poly.append(connected_line)
align_lines(line, connected_line)
lines.remove(connected_line)
line = connected_line
else:
break
def find_polylines(lines, endpoint_hash):
polylines = []
while lines:
line = lines.pop()
endpoint_hash.remove_line(line)
poly = [line]
connect_lines(lines, endpoint_hash, line, START, poly)
connect_lines(lines, endpoint_hash, line, END, poly)
polylines.append(poly)
return polylines
def optimize(svg):
lines = get_lines(svg)
print '%s line segments found' % len(lines)
lines_by_width = defaultdict(list)
for l in lines:
lines_by_width[l.strokeWidth].append(l)
del lines
print '%s different stroke widths found:' % len(lines_by_width)
for width, lines in lines_by_width.iteritems():
print ' strokeWidth: %s (%s lines)' % (width, len(lines))
polylines = []
for width, lines in lines_by_width.iteritems():
print 'Finding polylines (strokeWidth: %s)... ' % width
endpoint_hash = EndpointHash(lines)
overlapping_points = endpoint_hash.count_overlapping_points()
print (' %s line segments, %s overlapping points'
% (len(lines), overlapping_points)),
p = find_polylines(lines, endpoint_hash)
print '-> %s polylines' % len(p)
polylines += p
return polylines
def write_svg(polylines, outfile):
print "Writing '%s'..." % outfile
f = open(outfile, 'w')
f.write("""<?xml version="1.0" standalone="no"?>
<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN"
"http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
<svg width="100%" height="100%" xmlns="http://www.w3.org/2000/svg" version="1.1">
""")
def point_to_str(x, y):
return '%s,%s ' % (x, y)
for p in polylines:
points = []
for line in p:
if not points:
points.append(point_to_str(line.x1, line.y1))
points.append(point_to_str(line.x2, line.y2))
f.write('<polyline fill="none" stroke="#000" stroke-width="%s" points="%s"/>\n'
% (p[0].strokeWidth, ' '.join(points)))
f.write('</svg>\n')
f.close()
def get_filesize(fname):
return os.stat(fname).st_size
def print_size_stats(infile, outfile):
insize = get_filesize(infile)
outsize = get_filesize(outfile)
print ('Original file size: %.2fKiB, new file size: %.2fKiB (%.2f)'
% (insize / 1024., outsize / 1024., float(outsize) / insize * 100))
def main():
usage = 'Usage: %prog INFILE OUTFILE'
parser = OptionParser(usage=usage)
options, args = parser.parse_args()
if len(args) < 2:
parser.error('input and output files must be specified')
return 2
infile = args[0]
outfile = args[1]
svg = parse_svg(infile)
polylines = optimize(svg)
print '%s polyline(s) found in total' % len(polylines)
write_svg(polylines, outfile)
print_size_stats(infile, outfile)
return 0
if __name__ == '__main__':
try:
sys.exit(main())
except KeyboardInterrupt:
sys.exit(1)
| johnnovak/polylinize.py | polylinize.py | Python | mit | 6,224 | 0.002731 |
from __future__ import absolute_import
import errno
import logging
import sys
import warnings
from socket import error as SocketError, timeout as SocketTimeout
import socket
from .exceptions import (
ClosedPoolError,
ProtocolError,
EmptyPoolError,
HeaderParsingError,
HostChangedError,
LocationValueError,
MaxRetryError,
ProxyError,
ReadTimeoutError,
SSLError,
TimeoutError,
InsecureRequestWarning,
NewConnectionError,
)
from .packages.ssl_match_hostname import CertificateError
from .packages import six
from .packages.six.moves import queue
from .connection import (
port_by_scheme,
DummyConnection,
HTTPConnection,
HTTPSConnection,
VerifiedHTTPSConnection,
HTTPException,
BaseSSLError,
)
from .request import RequestMethods
from .response import HTTPResponse
from .util.connection import is_connection_dropped
from .util.request import set_file_position
from .util.response import assert_header_parsing
from .util.retry import Retry
from .util.timeout import Timeout
from .util.url import (
get_host,
parse_url,
Url,
_normalize_host as normalize_host,
_encode_target,
)
from .util.queue import LifoQueue
xrange = six.moves.xrange
log = logging.getLogger(__name__)
_Default = object()
# Pool objects
class ConnectionPool(object):
"""
Base class for all connection pools, such as
:class:`.HTTPConnectionPool` and :class:`.HTTPSConnectionPool`.
"""
scheme = None
QueueCls = LifoQueue
def __init__(self, host, port=None):
if not host:
raise LocationValueError("No host specified.")
self.host = _normalize_host(host, scheme=self.scheme)
self._proxy_host = host.lower()
self.port = port
def __str__(self):
return "%s(host=%r, port=%r)" % (type(self).__name__, self.host, self.port)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
# Return False to re-raise any potential exceptions
return False
def close(self):
"""
Close all pooled connections and disable the pool.
"""
pass
# This is taken from http://hg.python.org/cpython/file/7aaba721ebc0/Lib/socket.py#l252
_blocking_errnos = {errno.EAGAIN, errno.EWOULDBLOCK}
class HTTPConnectionPool(ConnectionPool, RequestMethods):
"""
Thread-safe connection pool for one host.
:param host:
Host used for this HTTP Connection (e.g. "localhost"), passed into
:class:`httplib.HTTPConnection`.
:param port:
Port used for this HTTP Connection (None is equivalent to 80), passed
into :class:`httplib.HTTPConnection`.
:param strict:
Causes BadStatusLine to be raised if the status line can't be parsed
as a valid HTTP/1.0 or 1.1 status line, passed into
:class:`httplib.HTTPConnection`.
.. note::
Only works in Python 2. This parameter is ignored in Python 3.
:param timeout:
Socket timeout in seconds for each individual connection. This can
be a float or integer, which sets the timeout for the HTTP request,
or an instance of :class:`urllib3.util.Timeout` which gives you more
fine-grained control over request timeouts. After the constructor has
been parsed, this is always a `urllib3.util.Timeout` object.
:param maxsize:
Number of connections to save that can be reused. More than 1 is useful
in multithreaded situations. If ``block`` is set to False, more
connections will be created but they will not be saved once they've
been used.
:param block:
If set to True, no more than ``maxsize`` connections will be used at
a time. When no free connections are available, the call will block
until a connection has been released. This is a useful side effect for
particular multithreaded situations where one does not want to use more
than maxsize connections per host to prevent flooding.
:param headers:
Headers to include with all requests, unless other headers are given
explicitly.
:param retries:
Retry configuration to use by default with requests in this pool.
:param _proxy:
Parsed proxy URL, should not be used directly, instead, see
:class:`urllib3.connectionpool.ProxyManager`"
:param _proxy_headers:
A dictionary with proxy headers, should not be used directly,
instead, see :class:`urllib3.connectionpool.ProxyManager`"
:param \\**conn_kw:
Additional parameters are used to create fresh :class:`urllib3.connection.HTTPConnection`,
:class:`urllib3.connection.HTTPSConnection` instances.
"""
scheme = "http"
ConnectionCls = HTTPConnection
ResponseCls = HTTPResponse
def __init__(
self,
host,
port=None,
strict=False,
timeout=Timeout.DEFAULT_TIMEOUT,
maxsize=1,
block=False,
headers=None,
retries=None,
_proxy=None,
_proxy_headers=None,
**conn_kw
):
ConnectionPool.__init__(self, host, port)
RequestMethods.__init__(self, headers)
self.strict = strict
if not isinstance(timeout, Timeout):
timeout = Timeout.from_float(timeout)
if retries is None:
retries = Retry.DEFAULT
self.timeout = timeout
self.retries = retries
self.pool = self.QueueCls(maxsize)
self.block = block
self.proxy = _proxy
self.proxy_headers = _proxy_headers or {}
# Fill the queue up so that doing get() on it will block properly
for _ in xrange(maxsize):
self.pool.put(None)
# These are mostly for testing and debugging purposes.
self.num_connections = 0
self.num_requests = 0
self.conn_kw = conn_kw
if self.proxy:
# Enable Nagle's algorithm for proxies, to avoid packet fragmentation.
# We cannot know if the user has added default socket options, so we cannot replace the
# list.
self.conn_kw.setdefault("socket_options", [])
def _new_conn(self):
"""
Return a fresh :class:`HTTPConnection`.
"""
self.num_connections += 1
log.debug(
"Starting new HTTP connection (%d): %s:%s",
self.num_connections,
self.host,
self.port or "80",
)
conn = self.ConnectionCls(
host=self.host,
port=self.port,
timeout=self.timeout.connect_timeout,
strict=self.strict,
**self.conn_kw
)
return conn
def _get_conn(self, timeout=None):
"""
Get a connection. Will return a pooled connection if one is available.
If no connections are available and :prop:`.block` is ``False``, then a
fresh connection is returned.
:param timeout:
Seconds to wait before giving up and raising
:class:`urllib3.exceptions.EmptyPoolError` if the pool is empty and
:prop:`.block` is ``True``.
"""
conn = None
try:
conn = self.pool.get(block=self.block, timeout=timeout)
except AttributeError: # self.pool is None
raise ClosedPoolError(self, "Pool is closed.")
except queue.Empty:
if self.block:
raise EmptyPoolError(
self,
"Pool reached maximum size and no more connections are allowed.",
)
pass # Oh well, we'll create a new connection then
# If this is a persistent connection, check if it got disconnected
if conn and is_connection_dropped(conn):
log.debug("Resetting dropped connection: %s", self.host)
conn.close()
if getattr(conn, "auto_open", 1) == 0:
# This is a proxied connection that has been mutated by
# httplib._tunnel() and cannot be reused (since it would
# attempt to bypass the proxy)
conn = None
return conn or self._new_conn()
def _put_conn(self, conn):
"""
Put a connection back into the pool.
:param conn:
Connection object for the current host and port as returned by
:meth:`._new_conn` or :meth:`._get_conn`.
If the pool is already full, the connection is closed and discarded
because we exceeded maxsize. If connections are discarded frequently,
then maxsize should be increased.
If the pool is closed, then the connection will be closed and discarded.
"""
try:
self.pool.put(conn, block=False)
return # Everything is dandy, done.
except AttributeError:
# self.pool is None.
pass
except queue.Full:
# This should never happen if self.block == True
log.warning("Connection pool is full, discarding connection: %s", self.host)
# Connection never got put back into the pool, close it.
if conn:
conn.close()
def _validate_conn(self, conn):
"""
Called right before a request is made, after the socket is created.
"""
pass
def _prepare_proxy(self, conn):
# Nothing to do for HTTP connections.
pass
def _get_timeout(self, timeout):
""" Helper that always returns a :class:`urllib3.util.Timeout` """
if timeout is _Default:
return self.timeout.clone()
if isinstance(timeout, Timeout):
return timeout.clone()
else:
# User passed us an int/float. This is for backwards compatibility,
# can be removed later
return Timeout.from_float(timeout)
def _raise_timeout(self, err, url, timeout_value):
"""Is the error actually a timeout? Will raise a ReadTimeout or pass"""
if isinstance(err, SocketTimeout):
raise ReadTimeoutError(
self, url, "Read timed out. (read timeout=%s)" % timeout_value
)
# See the above comment about EAGAIN in Python 3. In Python 2 we have
# to specifically catch it and throw the timeout error
if hasattr(err, "errno") and err.errno in _blocking_errnos:
raise ReadTimeoutError(
self, url, "Read timed out. (read timeout=%s)" % timeout_value
)
# Catch possible read timeouts thrown as SSL errors. If not the
# case, rethrow the original. We need to do this because of:
# http://bugs.python.org/issue10272
if "timed out" in str(err) or "did not complete (read)" in str(
err
): # Python < 2.7.4
raise ReadTimeoutError(
self, url, "Read timed out. (read timeout=%s)" % timeout_value
)
def _make_request(
self, conn, method, url, timeout=_Default, chunked=False, **httplib_request_kw
):
"""
Perform a request on a given urllib connection object taken from our
pool.
:param conn:
a connection from one of our connection pools
:param timeout:
Socket timeout in seconds for the request. This can be a
float or integer, which will set the same timeout value for
the socket connect and the socket read, or an instance of
:class:`urllib3.util.Timeout`, which gives you more fine-grained
control over your timeouts.
"""
self.num_requests += 1
timeout_obj = self._get_timeout(timeout)
timeout_obj.start_connect()
conn.timeout = timeout_obj.connect_timeout
# Trigger any extra validation we need to do.
try:
self._validate_conn(conn)
except (SocketTimeout, BaseSSLError) as e:
# Py2 raises this as a BaseSSLError, Py3 raises it as socket timeout.
self._raise_timeout(err=e, url=url, timeout_value=conn.timeout)
raise
# conn.request() calls httplib.*.request, not the method in
# urllib3.request. It also calls makefile (recv) on the socket.
if chunked:
conn.request_chunked(method, url, **httplib_request_kw)
else:
conn.request(method, url, **httplib_request_kw)
# Reset the timeout for the recv() on the socket
read_timeout = timeout_obj.read_timeout
# App Engine doesn't have a sock attr
if getattr(conn, "sock", None):
# In Python 3 socket.py will catch EAGAIN and return None when you
# try and read into the file pointer created by http.client, which
# instead raises a BadStatusLine exception. Instead of catching
# the exception and assuming all BadStatusLine exceptions are read
# timeouts, check for a zero timeout before making the request.
if read_timeout == 0:
raise ReadTimeoutError(
self, url, "Read timed out. (read timeout=%s)" % read_timeout
)
if read_timeout is Timeout.DEFAULT_TIMEOUT:
conn.sock.settimeout(socket.getdefaulttimeout())
else: # None or a value
conn.sock.settimeout(read_timeout)
# Receive the response from the server
try:
try:
# Python 2.7, use buffering of HTTP responses
httplib_response = conn.getresponse(buffering=True)
except TypeError:
# Python 3
try:
httplib_response = conn.getresponse()
except BaseException as e:
# Remove the TypeError from the exception chain in
# Python 3 (including for exceptions like SystemExit).
# Otherwise it looks like a bug in the code.
six.raise_from(e, None)
except (SocketTimeout, BaseSSLError, SocketError) as e:
self._raise_timeout(err=e, url=url, timeout_value=read_timeout)
raise
# AppEngine doesn't have a version attr.
http_version = getattr(conn, "_http_vsn_str", "HTTP/?")
log.debug(
'%s://%s:%s "%s %s %s" %s %s',
self.scheme,
self.host,
self.port,
method,
url,
http_version,
httplib_response.status,
httplib_response.length,
)
try:
assert_header_parsing(httplib_response.msg)
except (HeaderParsingError, TypeError) as hpe: # Platform-specific: Python 3
log.warning(
"Failed to parse headers (url=%s): %s",
self._absolute_url(url),
hpe,
exc_info=True,
)
return httplib_response
def _absolute_url(self, path):
return Url(scheme=self.scheme, host=self.host, port=self.port, path=path).url
def close(self):
"""
Close all pooled connections and disable the pool.
"""
if self.pool is None:
return
# Disable access to the pool
old_pool, self.pool = self.pool, None
try:
while True:
conn = old_pool.get(block=False)
if conn:
conn.close()
except queue.Empty:
pass # Done.
def is_same_host(self, url):
"""
Check if the given ``url`` is a member of the same host as this
connection pool.
"""
if url.startswith("/"):
return True
# TODO: Add optional support for socket.gethostbyname checking.
scheme, host, port = get_host(url)
if host is not None:
host = _normalize_host(host, scheme=scheme)
# Use explicit default port for comparison when none is given
if self.port and not port:
port = port_by_scheme.get(scheme)
elif not self.port and port == port_by_scheme.get(scheme):
port = None
return (scheme, host, port) == (self.scheme, self.host, self.port)
def urlopen(
self,
method,
url,
body=None,
headers=None,
retries=None,
redirect=True,
assert_same_host=True,
timeout=_Default,
pool_timeout=None,
release_conn=None,
chunked=False,
body_pos=None,
**response_kw
):
"""
Get a connection from the pool and perform an HTTP request. This is the
lowest level call for making a request, so you'll need to specify all
the raw details.
.. note::
More commonly, it's appropriate to use a convenience method provided
by :class:`.RequestMethods`, such as :meth:`request`.
.. note::
`release_conn` will only behave as expected if
`preload_content=False` because we want to make
`preload_content=False` the default behaviour someday soon without
breaking backwards compatibility.
:param method:
HTTP request method (such as GET, POST, PUT, etc.)
:param body:
Data to send in the request body (useful for creating
POST requests, see HTTPConnectionPool.post_url for
more convenience).
:param headers:
Dictionary of custom headers to send, such as User-Agent,
If-None-Match, etc. If None, pool headers are used. If provided,
these headers completely replace any pool-specific headers.
:param retries:
Configure the number of retries to allow before raising a
:class:`~urllib3.exceptions.MaxRetryError` exception.
Pass ``None`` to retry until you receive a response. Pass a
:class:`~urllib3.util.retry.Retry` object for fine-grained control
over different types of retries.
Pass an integer number to retry connection errors that many times,
but no other types of errors. Pass zero to never retry.
If ``False``, then retries are disabled and any exception is raised
immediately. Also, instead of raising a MaxRetryError on redirects,
the redirect response will be returned.
:type retries: :class:`~urllib3.util.retry.Retry`, False, or an int.
:param redirect:
If True, automatically handle redirects (status codes 301, 302,
303, 307, 308). Each redirect counts as a retry. Disabling retries
will disable redirect, too.
:param assert_same_host:
If ``True``, will make sure that the host of the pool requests is
consistent else will raise HostChangedError. When False, you can
use the pool on an HTTP proxy and request foreign hosts.
:param timeout:
If specified, overrides the default timeout for this one
request. It may be a float (in seconds) or an instance of
:class:`urllib3.util.Timeout`.
:param pool_timeout:
If set and the pool is set to block=True, then this method will
block for ``pool_timeout`` seconds and raise EmptyPoolError if no
connection is available within the time period.
:param release_conn:
If False, then the urlopen call will not release the connection
back into the pool once a response is received (but will release if
you read the entire contents of the response such as when
`preload_content=True`). This is useful if you're not preloading
the response's content immediately. You will need to call
``r.release_conn()`` on the response ``r`` to return the connection
back into the pool. If None, it takes the value of
``response_kw.get('preload_content', True)``.
:param chunked:
If True, urllib3 will send the body using chunked transfer
encoding. Otherwise, urllib3 will send the body using the standard
content-length form. Defaults to False.
:param int body_pos:
Position to seek to in file-like body in the event of a retry or
redirect. Typically this won't need to be set because urllib3 will
auto-populate the value when needed.
:param \\**response_kw:
Additional parameters are passed to
:meth:`urllib3.response.HTTPResponse.from_httplib`
"""
if headers is None:
headers = self.headers
if not isinstance(retries, Retry):
retries = Retry.from_int(retries, redirect=redirect, default=self.retries)
if release_conn is None:
release_conn = response_kw.get("preload_content", True)
# Check host
if assert_same_host and not self.is_same_host(url):
raise HostChangedError(self, url, retries)
# Ensure that the URL we're connecting to is properly encoded
if url.startswith("/"):
url = six.ensure_str(_encode_target(url))
else:
url = six.ensure_str(parse_url(url).url)
conn = None
# Track whether `conn` needs to be released before
# returning/raising/recursing. Update this variable if necessary, and
# leave `release_conn` constant throughout the function. That way, if
# the function recurses, the original value of `release_conn` will be
# passed down into the recursive call, and its value will be respected.
#
# See issue #651 [1] for details.
#
# [1] <https://github.com/urllib3/urllib3/issues/651>
release_this_conn = release_conn
# Merge the proxy headers. Only do this in HTTP. We have to copy the
# headers dict so we can safely change it without those changes being
# reflected in anyone else's copy.
if self.scheme == "http":
headers = headers.copy()
headers.update(self.proxy_headers)
# Must keep the exception bound to a separate variable or else Python 3
# complains about UnboundLocalError.
err = None
# Keep track of whether we cleanly exited the except block. This
# ensures we do proper cleanup in finally.
clean_exit = False
# Rewind body position, if needed. Record current position
# for future rewinds in the event of a redirect/retry.
body_pos = set_file_position(body, body_pos)
try:
# Request a connection from the queue.
timeout_obj = self._get_timeout(timeout)
conn = self._get_conn(timeout=pool_timeout)
conn.timeout = timeout_obj.connect_timeout
is_new_proxy_conn = self.proxy is not None and not getattr(
conn, "sock", None
)
if is_new_proxy_conn:
self._prepare_proxy(conn)
# Make the request on the httplib connection object.
httplib_response = self._make_request(
conn,
method,
url,
timeout=timeout_obj,
body=body,
headers=headers,
chunked=chunked,
)
# If we're going to release the connection in ``finally:``, then
# the response doesn't need to know about the connection. Otherwise
# it will also try to release it and we'll have a double-release
# mess.
response_conn = conn if not release_conn else None
# Pass method to Response for length checking
response_kw["request_method"] = method
# Import httplib's response into our own wrapper object
response = self.ResponseCls.from_httplib(
httplib_response,
pool=self,
connection=response_conn,
retries=retries,
**response_kw
)
# Everything went great!
clean_exit = True
except queue.Empty:
# Timed out by queue.
raise EmptyPoolError(self, "No pool connections are available.")
except (
TimeoutError,
HTTPException,
SocketError,
ProtocolError,
BaseSSLError,
SSLError,
CertificateError,
) as e:
# Discard the connection for these exceptions. It will be
# replaced during the next _get_conn() call.
clean_exit = False
if isinstance(e, (BaseSSLError, CertificateError)):
e = SSLError(e)
elif isinstance(e, (SocketError, NewConnectionError)) and self.proxy:
e = ProxyError("Cannot connect to proxy.", e)
elif isinstance(e, (SocketError, HTTPException)):
e = ProtocolError("Connection aborted.", e)
retries = retries.increment(
method, url, error=e, _pool=self, _stacktrace=sys.exc_info()[2]
)
retries.sleep()
# Keep track of the error for the retry warning.
err = e
finally:
if not clean_exit:
# We hit some kind of exception, handled or otherwise. We need
# to throw the connection away unless explicitly told not to.
# Close the connection, set the variable to None, and make sure
# we put the None back in the pool to avoid leaking it.
conn = conn and conn.close()
release_this_conn = True
if release_this_conn:
# Put the connection back to be reused. If the connection is
# expired then it will be None, which will get replaced with a
# fresh connection during _get_conn.
self._put_conn(conn)
if not conn:
# Try again
log.warning(
"Retrying (%r) after connection broken by '%r': %s", retries, err, url
)
return self.urlopen(
method,
url,
body,
headers,
retries,
redirect,
assert_same_host,
timeout=timeout,
pool_timeout=pool_timeout,
release_conn=release_conn,
chunked=chunked,
body_pos=body_pos,
**response_kw
)
def drain_and_release_conn(response):
try:
# discard any remaining response body, the connection will be
# released back to the pool once the entire response is read
response.read()
except (
TimeoutError,
HTTPException,
SocketError,
ProtocolError,
BaseSSLError,
SSLError,
):
pass
# Handle redirect?
redirect_location = redirect and response.get_redirect_location()
if redirect_location:
if response.status == 303:
method = "GET"
try:
retries = retries.increment(method, url, response=response, _pool=self)
except MaxRetryError:
if retries.raise_on_redirect:
# Drain and release the connection for this response, since
# we're not returning it to be released manually.
drain_and_release_conn(response)
raise
return response
# drain and return the connection to the pool before recursing
drain_and_release_conn(response)
retries.sleep_for_retry(response)
log.debug("Redirecting %s -> %s", url, redirect_location)
return self.urlopen(
method,
redirect_location,
body,
headers,
retries=retries,
redirect=redirect,
assert_same_host=assert_same_host,
timeout=timeout,
pool_timeout=pool_timeout,
release_conn=release_conn,
chunked=chunked,
body_pos=body_pos,
**response_kw
)
# Check if we should retry the HTTP response.
has_retry_after = bool(response.getheader("Retry-After"))
if retries.is_retry(method, response.status, has_retry_after):
try:
retries = retries.increment(method, url, response=response, _pool=self)
except MaxRetryError:
if retries.raise_on_status:
# Drain and release the connection for this response, since
# we're not returning it to be released manually.
drain_and_release_conn(response)
raise
return response
# drain and return the connection to the pool before recursing
drain_and_release_conn(response)
retries.sleep(response)
log.debug("Retry: %s", url)
return self.urlopen(
method,
url,
body,
headers,
retries=retries,
redirect=redirect,
assert_same_host=assert_same_host,
timeout=timeout,
pool_timeout=pool_timeout,
release_conn=release_conn,
chunked=chunked,
body_pos=body_pos,
**response_kw
)
return response
class HTTPSConnectionPool(HTTPConnectionPool):
"""
Same as :class:`.HTTPConnectionPool`, but HTTPS.
When Python is compiled with the :mod:`ssl` module, then
:class:`.VerifiedHTTPSConnection` is used, which *can* verify certificates,
instead of :class:`.HTTPSConnection`.
:class:`.VerifiedHTTPSConnection` uses one of ``assert_fingerprint``,
``assert_hostname`` and ``host`` in this order to verify connections.
If ``assert_hostname`` is False, no verification is done.
The ``key_file``, ``cert_file``, ``cert_reqs``, ``ca_certs``,
``ca_cert_dir``, ``ssl_version``, ``key_password`` are only used if :mod:`ssl`
is available and are fed into :meth:`urllib3.util.ssl_wrap_socket` to upgrade
the connection socket into an SSL socket.
"""
scheme = "https"
ConnectionCls = HTTPSConnection
def __init__(
self,
host,
port=None,
strict=False,
timeout=Timeout.DEFAULT_TIMEOUT,
maxsize=1,
block=False,
headers=None,
retries=None,
_proxy=None,
_proxy_headers=None,
key_file=None,
cert_file=None,
cert_reqs=None,
key_password=None,
ca_certs=None,
ssl_version=None,
assert_hostname=None,
assert_fingerprint=None,
ca_cert_dir=None,
**conn_kw
):
HTTPConnectionPool.__init__(
self,
host,
port,
strict,
timeout,
maxsize,
block,
headers,
retries,
_proxy,
_proxy_headers,
**conn_kw
)
self.key_file = key_file
self.cert_file = cert_file
self.cert_reqs = cert_reqs
self.key_password = key_password
self.ca_certs = ca_certs
self.ca_cert_dir = ca_cert_dir
self.ssl_version = ssl_version
self.assert_hostname = assert_hostname
self.assert_fingerprint = assert_fingerprint
def _prepare_conn(self, conn):
"""
Prepare the ``connection`` for :meth:`urllib3.util.ssl_wrap_socket`
and establish the tunnel if proxy is used.
"""
if isinstance(conn, VerifiedHTTPSConnection):
conn.set_cert(
key_file=self.key_file,
key_password=self.key_password,
cert_file=self.cert_file,
cert_reqs=self.cert_reqs,
ca_certs=self.ca_certs,
ca_cert_dir=self.ca_cert_dir,
assert_hostname=self.assert_hostname,
assert_fingerprint=self.assert_fingerprint,
)
conn.ssl_version = self.ssl_version
return conn
def _prepare_proxy(self, conn):
"""
Establish tunnel connection early, because otherwise httplib
would improperly set Host: header to proxy's IP:port.
"""
conn.set_tunnel(self._proxy_host, self.port, self.proxy_headers)
conn.connect()
def _new_conn(self):
"""
Return a fresh :class:`httplib.HTTPSConnection`.
"""
self.num_connections += 1
log.debug(
"Starting new HTTPS connection (%d): %s:%s",
self.num_connections,
self.host,
self.port or "443",
)
if not self.ConnectionCls or self.ConnectionCls is DummyConnection:
raise SSLError(
"Can't connect to HTTPS URL because the SSL module is not available."
)
actual_host = self.host
actual_port = self.port
if self.proxy is not None:
actual_host = self.proxy.host
actual_port = self.proxy.port
conn = self.ConnectionCls(
host=actual_host,
port=actual_port,
timeout=self.timeout.connect_timeout,
strict=self.strict,
cert_file=self.cert_file,
key_file=self.key_file,
key_password=self.key_password,
**self.conn_kw
)
return self._prepare_conn(conn)
def _validate_conn(self, conn):
"""
Called right before a request is made, after the socket is created.
"""
super(HTTPSConnectionPool, self)._validate_conn(conn)
# Force connect early to allow us to validate the connection.
if not getattr(conn, "sock", None): # AppEngine might not have `.sock`
conn.connect()
if not conn.is_verified:
warnings.warn(
(
"Unverified HTTPS request is being made. "
"Adding certificate verification is strongly advised. See: "
"https://urllib3.readthedocs.io/en/latest/advanced-usage.html"
"#ssl-warnings"
),
InsecureRequestWarning,
)
def connection_from_url(url, **kw):
"""
Given a url, return an :class:`.ConnectionPool` instance of its host.
This is a shortcut for not having to parse out the scheme, host, and port
of the url before creating an :class:`.ConnectionPool` instance.
:param url:
Absolute URL string that must include the scheme. Port is optional.
:param \\**kw:
Passes additional parameters to the constructor of the appropriate
:class:`.ConnectionPool`. Useful for specifying things like
timeout, maxsize, headers, etc.
Example::
>>> conn = connection_from_url('http://google.com/')
>>> r = conn.request('GET', '/')
"""
scheme, host, port = get_host(url)
port = port or port_by_scheme.get(scheme, 80)
if scheme == "https":
return HTTPSConnectionPool(host, port=port, **kw)
else:
return HTTPConnectionPool(host, port=port, **kw)
def _normalize_host(host, scheme):
"""
Normalize hosts for comparisons and use with sockets.
"""
host = normalize_host(host, scheme)
# httplib doesn't like it when we include brackets in IPv6 addresses
# Specifically, if we include brackets but also pass the port then
# httplib crazily doubles up the square brackets on the Host header.
# Instead, we need to make sure we never pass ``None`` as the port.
# However, for backward compatibility reasons we can't actually
# *assert* that. See http://bugs.python.org/issue28539
if host.startswith("[") and host.endswith("]"):
host = host[1:-1]
return host
| LukeMurphey/splunk-google-drive | src/bin/google_drive_app/urllib3/connectionpool.py | Python | apache-2.0 | 36,488 | 0.000658 |
import os
import sys
here = os.path.abspath(os.path.split(__file__)[0])
sys.path.insert(0, os.path.join(here, os.pardir, os.pardir, os.pardir))
import localpaths as _localpaths # noqa: F401
| UK992/servo | tests/wpt/web-platform-tests/tools/wptrunner/wptrunner/tests/__init__.py | Python | mpl-2.0 | 193 | 0 |
import sys
import time
import json
import logging
import random
import tornado.options
from tornado.options import define, options
from tornado import gen
define('srp_root',default='http://192.168.56.1')
#define('srp_root',default='https://remote-staging.utorrent.com')
#define('srp_root',default='https://remote.utorrent.com')
define('debug',default=True)
define('verbose',default=1, type=int)
tornado.options.parse_command_line()
if options.debug:
import pdb
import tornado.ioloop
from falcon_api.session import Session
from falcon_api.util import asyncsleep
from falcon_api.classic import Client
import tornado.httpclient
httpclient = tornado.httpclient.AsyncHTTPClient(force_instance=True, max_clients=1)
@gen.engine
def test_login():
username = sys.argv[1]
password = sys.argv[2]
# check result..
#torrent = 'http://www.clearbits.net/get/503-control-alt-deus---made-of-fire.torrent'
hash = ''.join([random.choice( list('abcdef') + map(str,range(10)) ) for _ in range(40)])
torrent = 'magnet:?xt=urn:btih:%s' % hash
for _ in range(1):
client = Client(username, password)
client.sync()
yield gen.Task( asyncsleep, 1 )
#client.add_url(torrent)
client.stop()
tasks = []
for hash, torrent in client.torrents.items():
if torrent.get('progress') == 1000:
tasks.append( gen.Task( torrent.fetch_files ) )
tasks.append( gen.Task( torrent.fetch_metadata ) )
responses = yield gen.Multi( tasks )
logging.info('responses %s' % [r.code for r in responses])
tasks = []
for hash, torrent in client.torrents.items():
if torrent.get('progress') == 1000:
for file in torrent.files:
link = file.webseed_link()
print link
request = tornado.httpclient.HTTPRequest(link,
validate_cert=False)
tasks.append( gen.Task( httpclient.fetch, request ) )
while tasks:
some_tasks = [tasks.pop() for _ in range(5)]
logging.info('executing tasks of len %s' % len(some_tasks))
responses = yield gen.Multi( some_tasks )
logging.info('responses %s' % [(r.code, len(r.body)) for r in responses])
if False:
tasks = []
for hash, torrent in client.torrents.items():
if torrent.get('progress') == 1000:
link = torrent.webseed_link()
print torrent.get('name'), torrent.get('progress'), link
request = tornado.httpclient.HTTPRequest(link,
validate_cert=False)
tasks.append( gen.Task( httpclient.fetch, request ) )
responses = yield gen.Multi( tasks )
logging.info('responses %s' % [r.code for r in responses])
if __name__ == '__main__':
ioloop = tornado.ioloop.IOLoop.instance()
test_login()
ioloop.start()
| leiferikb/bitpop-private | bitpop_specific/extensions/bittorrent_surf/app/lib/falcon-api/python/falcon_api/test/classic.py | Python | bsd-3-clause | 2,962 | 0.015868 |
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2010 Benny Malengier
# Copyright (C) 2011 Tim G L Lyons
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#-------------------------------------------------------------------------
#
# Standard Python modules
#
#-------------------------------------------------------------------------
from ...const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
#-------------------------------------------------------------------------
#
# Gramps modules
#
#-------------------------------------------------------------------------
from . import MatchesFilterBase
#-------------------------------------------------------------------------
#
# MatchesFilter
#
#-------------------------------------------------------------------------
class MatchesSourceFilterBase(MatchesFilterBase):
"""
Rule that checks against another filter.
"""
labels = [_('Source filter name:')]
name = 'Objects with source matching the <source filter>'
description = "Matches objects with sources that match the " \
"specified source filter name"
category = _('Citation/source filters')
# we want to have this filter show source filters
namespace = 'Source'
def prepare(self, db, user):
MatchesFilterBase.prepare(self, db, user)
self.MSF_filt = self.find_filter()
def apply(self, db, object):
if self.MSF_filt is None :
return False
for citation_handle in object.get_citation_list():
citation = db.get_citation_from_handle(citation_handle)
sourcehandle = citation.get_reference_handle()
if self.MSF_filt.check(db, sourcehandle):
return True
return False
| beernarrd/gramps | gramps/gen/filters/rules/_matchessourcefilterbase.py | Python | gpl-2.0 | 2,456 | 0.0057 |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Test runner objects that's only for end-to-end tests.
This package defines runners, which are used to execute test pipeline and
verify results.
"""
# Protect against environments where dataflow runner is not available.
# pylint: disable=wrong-import-order, wrong-import-position
from __future__ import absolute_import
try:
from apache_beam.runners.dataflow.test_dataflow_runner import TestDataflowRunner
from apache_beam.runners.direct.test_direct_runner import TestDirectRunner
except ImportError:
pass
# pylint: enable=wrong-import-order, wrong-import-position
| mxm/incubator-beam | sdks/python/apache_beam/runners/test/__init__.py | Python | apache-2.0 | 1,360 | 0.002941 |
# Copyright (c) 2020, Manfred Moitzi
# License: MIT License
import sys
import time
from datetime import datetime
from pathlib import Path
from ezdxf.acc import USE_C_EXT
from ezdxf.render.forms import ellipse
if USE_C_EXT is False:
print("C-extension disabled or not available.")
sys.exit(1)
from ezdxf.math._construct import (
has_clockwise_orientation as py_has_clockwise_orientation,
)
from ezdxf.acc.construct import (
has_clockwise_orientation as cy_has_clockwise_orientation,
)
from ezdxf.math._construct import (
intersection_line_line_2d as py_intersection_line_line_2d,
)
from ezdxf.acc.construct import (
intersection_line_line_2d as cy_intersection_line_line_2d,
)
from ezdxf.version import __version__
from ezdxf.acc.vector import Vec2
def open_log(name: str):
parent = Path(__file__).parent
p = parent / "logs" / Path(name + ".csv")
if not p.exists():
with open(p, mode="wt") as fp:
fp.write(
'"timestamp"; "pytime"; "cytime"; '
'"python_version"; "ezdxf_version"\n'
)
log_file = open(p, mode="at")
return log_file
def log(name: str, pytime: float, cytime: float):
log_file = open_log(name)
timestamp = datetime.now().isoformat()
log_file.write(
f'{timestamp}; {pytime}; {cytime}; "{sys.version}"; "{__version__}"\n'
)
log_file.close()
def profile1(func, *args) -> float:
t0 = time.perf_counter()
func(*args)
t1 = time.perf_counter()
return t1 - t0
def profile(text, log_name, pyfunc, cyfunc, *args):
pytime = profile1(pyfunc, *args)
cytime = profile1(cyfunc, *args)
ratio = pytime / cytime
print(f"Python - {text} {pytime:.3f}s")
print(f"Cython - {text} {cytime:.3f}s")
print(f"Ratio {ratio:.1f}x")
log(log_name, pytime, cytime)
def profile_py_has_clockwise_orientation(vertices, count):
for _ in range(count):
py_has_clockwise_orientation(vertices)
def profile_cy_has_clockwise_orientation(vertices, count):
for _ in range(count):
cy_has_clockwise_orientation(vertices)
def profile_py_intersection_line_line_2d(count):
line1 = [Vec2(0, 0), Vec2(2, 0)]
line2 = [Vec2(1, -1), Vec2(1, 1)]
for _ in range(count):
py_intersection_line_line_2d(line1, line2)
def profile_cy_intersection_line_line_2d(count):
line1 = [Vec2(0, 0), Vec2(2, 0)]
line2 = [Vec2(1, -1), Vec2(1, 1)]
for _ in range(count):
cy_intersection_line_line_2d(line1, line2)
def profile_py_no_intersection_line_line_2d(count):
line1 = [Vec2(0, 0), Vec2(2, 0)]
line2 = [Vec2(0, 1), Vec2(2, 1)]
for _ in range(count):
py_intersection_line_line_2d(line1, line2)
def profile_cy_no_intersection_line_line_2d(count):
line1 = [Vec2(0, 0), Vec2(2, 0)]
line2 = [Vec2(0, 1), Vec2(2, 1)]
for _ in range(count):
cy_intersection_line_line_2d(line1, line2)
RUNS = 100_000
ellipse_vertices = list(ellipse(count=100, rx=10, ry=5))
print(f"Profiling 2D construction tools as Python and Cython implementations:")
profile(
f"detect {RUNS}x clockwise orientation of {len(ellipse_vertices)} vertices:",
"c2d_has_clockwise_orientation",
profile_py_has_clockwise_orientation,
profile_cy_has_clockwise_orientation,
ellipse_vertices,
RUNS,
)
profile(
f"detect {RUNS}x real 2D line intersections:",
"c2d_intersection_line_line_2d",
profile_py_intersection_line_line_2d,
profile_cy_intersection_line_line_2d,
RUNS,
)
profile(
f"detect {RUNS}x no 2D line intersections:",
"c2d_no_intersection_line_line_2d",
profile_py_no_intersection_line_line_2d,
profile_cy_no_intersection_line_line_2d,
RUNS,
)
| mozman/ezdxf | profiling/construct.py | Python | mit | 3,719 | 0.000269 |
from pylab import *
from scipy.io import loadmat, savemat
import time
import dicom
Rel = 4.5 # assumed Relaxivity of Gd-DTPA at 3 T [s^-1 [mmol Gd-DTPA]^{-1}]
flip_angle = 30 * pi / 180.0 # rad
TR = 5e-3 # sec
nx = 80
ny = 50
nt = 1321
noise_sigma = 0.2
random_seed = 1337
seed(random_seed)
dx = 1
deltat = 1
data_dir = 'DICOM/'
file_ext = 'QIBA_v06_Tofts_beta1'
outfile_base = 'qiba6'
data_dicom = zeros((nt, nx, ny))
t = 0.5*arange(nt) # ms
print 'reading DICOMs from', data_dir
for k in range(nt):
file_name = '%s/%s_%04d.dcm' % (data_dir, file_ext, k+1)
dcm = dicom.read_file(file_name)
data_dicom[k,:,:] = dcm.pixel_array.astype('float')
data_dce = data_dicom[:,10:70,:]
nt, nx, ny = data_dce.shape
T1map = ones((nx, ny)) # s
R1map = 1 / T1map
S0map = ones((nx, ny)) * 50000.0 #
data_aif = mean(mean(data_dicom[:,70:,:], axis=2), axis=1)
noise_sigma *= data_dce[0,0,0]
# subsample data to speed up the run
data_dce = data_dce[::deltat,:,:]
data_aif = data_aif[::deltat] # TODO: do this better
t = t[::deltat]
nt = len(t)
# ## 2. Derive the AIF ##
# turn Sb into Cp
print 'converting plasma ROI to AIF'
def dce_to_r1eff(S, S0, R1, TR, flip):
S = S.T
S0 = S0.T
A = S.copy() / S0 # normalize by pre-contrast signal
E0 = exp(-R1 * TR)
E = (1.0 - A + A*E0 - E0*cos(flip)) /\
(1.0 - A*cos(flip) + A*E0*cos(flip) - E0*cos(flip))
R = (-1.0 / TR) * log(E)
return R.T
def r1eff_to_conc(R1eff, R1map, relaxivity):
return (R1eff - R1map) / relaxivity
T1p = 1.440
R1p = 1 / T1p
Hct = 0.45
S0 = data_aif[:4].mean()
R1_eff_aif = dce_to_r1eff(data_aif, S0, R1p, TR, flip_angle)
Cb = r1eff_to_conc(R1_eff_aif.flatten(), R1p, Rel)
Cp = Cb.flatten() / (1.0 - Hct)
## 3. Reduce the problem size averaging 10x10 ROIs to single pixels. ##"
nx /= dx
ny /= dx
data_dce = data_dce[:,::dx,::dx]
R1map_reduced = R1map[::10,::10].copy()
S0map_reduced = S0map[::10,::10].copy()
data_dce_reduced = data_dce[:,::10,::10].copy()
mask = zeros_like(R1map) == 0
mask_reduced = mask[::10,::10].copy()
print 'writing MAT files'
mat = {}
mat["relaxivity"] = 4.5
mat["TR"] = 5e-3
mat["DCEdata"] = data_dce_reduced
mat["DCEflip"] = 30.0
mat["R10"] = R1map_reduced
mat["S0"] = S0map_reduced
mat["t"] = t
mat["Cp"] = Cp
mat['mask'] = mask_reduced
mat['models'] = [2]
savemat(outfile_base + '.mat', mat)
data_dce = abs(data_dce + noise_sigma*(randn(nt, nx, ny) + 1j*randn(nt, nx, ny)) / sqrt(2.0))
#data_dce = data_dce + noise_sigma*randn(nt, nx, ny)
mat["R10"] = R1map
mat["S0"] = S0map
mat['DCEdata'] = data_dce
mat['mask'] = mask
savemat(outfile_base + 'noisy.mat', mat)
| JuliaPackageMirrors/DCEMRI.jl | test/q6/prep6.py | Python | mit | 2,599 | 0.014236 |
# Copyright (C) 2015 – 2021 Noa-Emil Nissinen (4shadoww)
from core.hakkuframework import *
from core import getpath
import http.client
import socket
conf = {
"name": "apache_users", # Module's name (should be same as file's name)
"version": "1.1", # Module version
"shortdesc": "scan directory of apache users", # Short description
"github": "4shadoww", # Author's github
"author": "4shadoww", # Author
"email": "[email protected]",
"initdate": "2016-03-01",
"lastmod": "2021-07-11",
"apisupport": True
}
# List of the variables
variables = OrderedDict((
("target", ["google.com", "target address"]),
))
# Simple changelog
changelog = "Version 1.0:\nrelease"
def run():
variables['target'][0] = variables['target'][0].replace("http://", "")
variables['target'][0] = variables['target'][0].replace("https://", "")
print_info("your target : " + variables['target'][0])
print_info("loading path list...")
f = open(getpath.db()+'apache_users.txt', 'r')
paths = []
for line in f:
paths.append(line.replace('\n', ''))
f.close()
try:
paths_found = []
for path in paths:
path = path.replace("\n", "")
conn = http.client.HTTPConnection(variables['target'][0])
conn.request("GET", path)
res = conn.getresponse()
if(res.status==200):
print_success("[%s] ... [%s %s]" % (path, res.status, res.reason))
paths_found.append(path)
else:
print_warning("[%s] ... [%s %s]" % (path, res.status, res.reason))
return paths_found
except(socket.gaierror):
print_error("host is down")
return ModuleError("host is down")
| 4shadoww/usploit | modules/apache_users.py | Python | mit | 1,754 | 0.005137 |
from __future__ import unicode_literals
from django.apps import AppConfig
class MergeserverConfig(AppConfig):
name = 'MergeServer'
| zeqing-guo/SPAKeyManager | MergeServer/apps.py | Python | gpl-3.0 | 138 | 0 |
##
# Copyright 2009-2013 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# the Hercules foundation (http://www.herculesstichting.be/in_English)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
General EasyBuild support for installing FastQC
@author: Emilio Palumbo
"""
import os
import stat
from easybuild.tools.filetools import run_cmd
from easybuild.easyblocks.generic.packedbinary import PackedBinary
class EB_FastQC(PackedBinary):
"""Easyblock implementing the build step for FastQC,
this is just give execution permission to the `fastqc` binary before installing.
"""
def install_step(self):
"""Overwrite install_step from PackedBinary"""
os.chdir(self.builddir)
os.chmod("FastQC/fastqc", os.stat("FastQC/fastqc").st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
super(EB_FastQC, self).install_step()
| ULHPC/modules | easybuild/easybuild-easyblocks/easybuild/easyblocks/f/fastqc.py | Python | mit | 1,735 | 0.002882 |
# -*- coding: utf-8 -*-
"""
plotting.py
Part of douglib. Used for general data plotting.
Created on Tue June 06 08:44:12 2014
@author: dthor
"""
# ---------------------------------------------------------------------------
### Imports
# ---------------------------------------------------------------------------
# Standard Library
# Third-Party
import matplotlib.pyplot as pyplot
# Package / Application
from .core import rc_to_radius
def radius_plot(rcd_list, die_xy, center_rc):
""" Plots up data by radius """
# rc_to_radius
x_data = []
y_data = []
for rcd in rcd_list:
x_data.append(rc_to_radius((rcd[0], rcd[1]), die_xy, center_rc))
y_data.append(rcd[2])
pyplot.figure()
pyplot.plot(x_data, y_data, 'bo')
pyplot.xlabel("Radius")
pyplot.ylabel("Value")
pyplot.show()
def main():
"""
Runs only when module is called directly. Runs a quick sanity
check on some of the functions in this module.
"""
import random
die_xy = (2.43, 3.3)
center_rc = (24, 31.5)
fake_rcd_list = []
for row in range(30):
for col in range(54):
value = (random.normalvariate(10, 5) +
rc_to_radius((row, col), die_xy, center_rc))
fake_rcd_list.append([row, col, value])
radius_plot(fake_rcd_list, die_xy, center_rc)
# random.gauss()
if __name__ == "__main__":
main()
| dougthor42/douglib | douglib/plotting.py | Python | mit | 1,474 | 0.000678 |
#!/usr/bin/env python
# **********************************************************************
#
# Copyright (c) 2003-2015 ZeroC, Inc. All rights reserved.
#
# This copy of Ice is licensed to you under the terms described in the
# ICE_LICENSE file included in this distribution.
#
# **********************************************************************
import os, sys, threading, subprocess, getopt, signal
path = [ ".", "..", "../..", "../../..", "../../../.." ]
head = os.path.dirname(sys.argv[0])
if len(head) > 0:
path = [os.path.join(head, p) for p in path]
path = [os.path.abspath(p) for p in path if os.path.exists(os.path.join(p, "scripts", "TestUtil.py")) ]
if len(path) == 0:
raise RuntimeError("can't find toplevel directory!")
sys.path.append(os.path.join(path[0], "scripts"))
import TestUtil
def removeTrustSettings():
serverCert = os.path.join(path[0], "certs", "server.pem")
if os.system("security verify-cert -c " + serverCert + " >& /dev/null") == 0:
sys.stdout.write("removing trust settings for the HTTP server certificate... ")
sys.stdout.flush()
if os.system("security remove-trusted-cert " + serverCert) != 0:
print("\nerror: couldn't remove trust settings for the HTTP server certificate")
else:
print("ok")
else:
print("trust settings already removed")
#
# On OS X, provide an option to allow removing the trust settings
#
if TestUtil.isDarwin():
try:
opts, args = getopt.getopt(sys.argv[1:], "", ["clean"])
if ("--clean", "") in opts:
removeTrustSettings()
sys.exit(0)
except getopt.GetoptError:
pass
version = "3.6.0"
jar = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..",
"java/test/controller/build/libs/testController-%(version)s.jar" % {"version": version})
javaHome = os.environ.get("JAVA_HOME", "")
javaCmd = '%s' % os.path.join(javaHome, "bin", "java") if javaHome else "java"
command = [javaCmd, "-jar", jar]
if len(sys.argv) > 1:
command += sys.argv[1:]
p = subprocess.Popen(command, shell = False, stdin = subprocess.PIPE, stdout = subprocess.PIPE,
stderr = subprocess.STDOUT, bufsize = 0)
def signal_handler(signal, frame):
if p:
p.terminate()
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
if TestUtil.isDarwin():
#
# On OS X, we set the trust settings on the certificate to prevent
# the Web browsers from prompting the user about the unstrusted
# certificate. Some browsers such as Chrome don't provide the
# option to set this trust settings.
#
serverCert = os.path.join(TestUtil.toplevel, "certs", "server.pem")
if os.system("security verify-cert -c " + serverCert + " >& /dev/null") != 0:
sys.stdout.write("adding trust settings for the HTTP server certificate... ")
sys.stdout.flush()
if os.system("security add-trusted-cert -r trustAsRoot " + serverCert) != 0:
print("error: couldn't add trust settings for the HTTP server certificate")
print("ok")
print("run " + sys.argv[0] + " --clean to remove the trust setting")
while(True):
c = p.stdout.read(1)
if not c: break
if c == '\r': continue
# Depending on Python version and platform, the value c could be a
# string or a bytes object.
if type(c) != str:
c = c.decode()
sys.stdout.write(c)
sys.stdout.flush()
| elijah513/ice | scripts/TestController.py | Python | gpl-2.0 | 3,512 | 0.008827 |
import warnings
from xigt.consts import (
ID,
TYPE,
ALIGNMENT,
CONTENT,
SEGMENTATION
)
from xigt.errors import (
XigtError,
XigtStructureError
)
from xigt.ref import id_re
# list.clear() doesn't exist in Python2, but del list[:] has other problems
try:
[].clear
except AttributeError:
def listclear(x): del x[:]
else:
def listclear(x): list.clear(x)
def _has_parent(obj):
return hasattr(obj, '_parent') and obj._parent is not None
class XigtContainerMixin(list):
"""
Common methods for accessing subelements in XigtCorpus, Igt, and
Tier objects.
"""
def __init__(self, container=None, contained_type=None):
self._dict = {}
self._contained_type = contained_type
self._container = container if container is not None else self
def __eq__(self, other):
try:
return (
# quick check for comparing, e.g., XigtCorpus and Igt
self._contained_type == other._contained_type
and len(self) == len(other)
and all(a == b for a, b in zip(self, other))
)
except AttributeError:
return False
def __getitem__(self, obj_id):
if isinstance(obj_id, (int, slice)):
return list.__getitem__(self, obj_id)
elif obj_id in self._dict:
return self._dict[obj_id]
else:
try:
return list.__getitem__(self, int(obj_id))
except ValueError:
pass
raise KeyError(obj_id)
def __setitem__(self, idx, obj):
# only allow list indices, not dict keys (IDs)
# NOTE: this method is destructive. check for broken refs here?
self._assert_type(obj)
try:
cur_obj = list.__getitem__(self, idx)
except TypeError:
idx = int(idx)
cur_obj = list.__getitem__(self, idx)
if cur_obj.id is not None:
del self._dict[cur_obj.id]
self._create_id_mapping(obj)
list.__setitem__(self, idx, obj)
def __delitem__(self, obj_id):
# NOTE: this method is destructive. check for broken refs here?
obj = self[obj_id]
self.remove(obj)
def get(self, obj_id, default=None):
try:
return self[obj_id]
except (KeyError, IndexError):
pass
return default
def select(self, **kwargs):
# handle namespace separately so we can lookup the nsmap
if 'namespace' in kwargs and kwargs['namespace'] in self.nsmap:
kwargs['namespace'] = self.nsmap[kwargs['namespace']]
def match(x):
return all(getattr(x, k, None) == v for k, v in kwargs.items())
return filter(match, self)
def _assert_type(self, obj):
if self._contained_type and not isinstance(obj, self._contained_type):
raise XigtStructureError(
'Only {} objects are allowed in this container.'
.format(self._contained_type.__name__)
)
def append(self, obj):
self._assert_type(obj)
obj._parent = self._container
self._create_id_mapping(obj)
list.append(self, obj)
def insert(self, i, obj):
self._assert_type(obj)
obj._parent = self._container
self._create_id_mapping(obj)
list.insert(self, i, obj)
def extend(self, objs):
for obj in objs:
self.append(obj)
def remove(self, obj):
# NOTE: this method is destructive. check for broken refs here?
if obj.id is not None:
del self._dict[obj.id]
list.remove(self, obj)
def clear(self):
self._dict.clear()
# list.clear doesn't exist in Python2
# list.clear(self)
listclear(self)
def _create_id_mapping(self, obj):
if obj.id is not None:
if obj.id in self._dict:
raise XigtError(
'Id "{}" already exists in collection.'.format(obj.id),
)
self._dict[obj.id] = obj
def refresh_index(self):
self._dict = {}
for obj in self:
self._create_id_mapping(obj)
# deprecated methods
def add(self, obj):
warnings.warn(
'add(x) is deprecated; use append(x) instead.',
DeprecationWarning
)
return self.append(obj)
def add_list(self, objs):
warnings.warn(
'add_list(xs) is deprecated; use extend(xs) instead.',
DeprecationWarning
)
return self.extend(objs)
class XigtAttributeMixin(object):
def __init__(self, id=None, type=None, attributes=None,
namespace=None, nsmap=None):
self.id = id
self.type = type
self.attributes = dict(attributes or [])
self.namespace = namespace
self.nsmap = nsmap
# if id is not None or ID not in self.attributes:
# self.attributes[ID] = id
# if type is not None or TYPE not in self.attributes:
# self.attributes[TYPE] = type
def __eq__(self, other):
try:
return (
self.id == other.id
and self.type == other.type
and self.attributes == other.attributes
and self.namespace == other.namespace
# and self.nsmap == other.nsmap
)
except AttributeError:
return False
def get_attribute(self, key, default=None, inherit=False, namespace=None):
if key is None:
raise ValueError(
'Attribute key must be of type str, not '
+ key.__class__.__name__
)
if not key.startswith('{') and ':' in key:
prefix, suffix = key.split(':', 1)
key = '{%s}%s' % (self.nsmap[prefix], suffix)
elif namespace in self.nsmap:
key = '{%s}%s' % (self.nsmap[namespace], key)
elif namespace:
key = '{%s}%s' % (namespace, key)
try:
return self.attributes[key]
except KeyError:
if inherit and _has_parent(self):
return self._parent.get_attribute(
key, default, inherit, namespace=namespace
)
else:
return default
@property
def id(self):
return self._id
@id.setter
def id(self, value):
if value is not None and not id_re.match(value):
raise ValueError('Invalid ID: {}'.format(value))
self._id = value
@property
def nsmap(self):
if self._nsmap is None:
if _has_parent(self):
return self._parent.nsmap
else:
return {}
else:
return self._nsmap
@nsmap.setter
def nsmap(self, value):
if value is not None:
value = dict(value or [])
self._nsmap = value
# no validation for type yet, so the property isn't necessary
# @property
# def type(self):
# return self._type
# @type.setter
# def type(self, value):
# self._type = value
class XigtReferenceAttributeMixin(object):
def __init__(self, alignment=None, content=None, segmentation=None):
if segmentation and (content or alignment):
raise XigtError(
'The "segmentation" reference attribute cannot co-occur with '
'the "content" or "alignment" reference attributes.'
)
if alignment is not None:
self.attributes[ALIGNMENT] = alignment
if content is not None:
self.attributes[CONTENT] = content
if segmentation is not None:
self.attributes[SEGMENTATION] = segmentation
def referents(self, refattrs=None):
if not getattr(self, 'igt'):
raise XigtError('Cannot retrieve referents; unspecified IGT.')
if not getattr(self, 'id'):
raise XigtError('Cannot retrieve referents; unspecified id.')
return self.igt.referents(self.id, refattrs=refattrs)
def referrers(self, refattrs=None):
if not getattr(self, 'igt'):
raise XigtError('Cannot retrieve referrers; unspecified IGT.')
if not getattr(self, 'id'):
raise XigtError('Cannot retrieve referrers; unspecified id.')
return self.igt.referrers(self.id, refattrs=refattrs)
@property
def alignment(self):
return self.attributes.get(ALIGNMENT)
@alignment.setter
def alignment(self, value):
self.attributes[ALIGNMENT] = value
@property
def content(self):
return self.attributes.get(CONTENT)
@content.setter
def content(self, value):
self.attributes[CONTENT] = value
@property
def segmentation(self):
return self.attributes.get(SEGMENTATION)
@segmentation.setter
def segmentation(self, value):
self.attributes[SEGMENTATION] = value
| goodmami/xigt | xigt/mixins.py | Python | mit | 9,014 | 0.000888 |
"""
# A Better Where
WHERE2 is a near-linear time top-down clustering alogithm.
WHERE2 updated an older where with new Python tricks.
## Standard Header Stuff
"""
from __future__ import division, print_function
from pdb import set_trace
import sys
import types
from demos import *
from libWhere import *
from nasa93 import *
from settingsWhere import *
sys.dont_write_bytecode = True
sys.path.insert(0, '/Users/rkrsn/git/axe/axe/')
"""
## Dimensionality Reduction with Fastmap
Project data in N dimensions down to a single dimension connecting
twp distant points. Divide that data at the median of those projects.
"""
def pairs(lst):
for j in lst[0:]:
last = j
for i in lst[0:]:
yield last, i
def somepairs(m, data):
reps = 1; cmax = -10e32;
for _ in xrange(reps):
one = any(data);
two = furthest(m, one, data)
three = furthest(m, two, data)
c = dist(m, two, three) + 1e-5
if c >= cmax:
cmax = c;
east, west = two, three
return west, east
def allpairs(m, data):
cmax = -10e32
for one in data:
for two in [d for d in data if not d==1]:
c = dist(m, one, two)+1e-5;
if c >= cmax:
cmax = c;
east, west = one, two
return west, east
def fastmap(m, data):
"Divide data into two using distance to two distant items."
west, east = somepairs(m, data)
"""
one = any(data) # 1) pick anything
west = furthest(m, one, data) # 2) west is as far as you can go from anything
east = furthest(m, west, data) # 3) east is as far as you can go from west
"""
c = dist(m, west, east) + 1e-5
# now find everyone's distance
lst = []
for one in data:
a = dist(m, one, west)
b = dist(m, one, east)
x = (a * a + c * c - b * b) / (2 * c) # cosine rule
y = max(0, a ** 2 - x ** 2) ** 0.5 # not used, here for a demo
lst += [(x, one)]
lst = sorted(lst)
mid = len(lst) // 2
wests = map(second, lst[:mid])
easts = map(second, lst[mid:])
return wests, west, easts, east, c
def gt(x, y): return x > y
def lt(x, y): return x < y
"""
In the above:
+ _m_ is some model that generates candidate
solutions that we wish to niche.
+ _(west,east)_ are not _the_ most distant points
(that would require _N*N) distance
calculations). But they are at least very distant
to each other.
This code needs some helper functions. _Dist_ uses
the standard Euclidean measure. Note that you tune
what it uses to define the niches (decisions or
objectives) using the _what_ parameter:
"""
def dist(m, i, j,
what = lambda m: m.decisions):
"Euclidean distance 0 <= d <= 1 between decisions"
n = len(i.cells)
deltas = 0
for c in what(m):
n1 = norm(m, c, i.cells[c])
n2 = norm(m, c, j.cells[c])
inc = (n1 - n2) ** 2
deltas += inc
n += abs(m.w[c])
return deltas ** 0.5 / n ** 0.5
"""
The _Dist_ function normalizes all the raw values zero to one.
"""
def norm(m, c, val) :
"Normalizes val in col c within model m 0..1"
return (atom(val) - atom(m.lo[c])) / (atom(m.hi[c]) - atom(m.lo[c]) + 0.0001)
"""
Now we can define _furthest_:
"""
def furthest(m, i, all,
init = 0,
better = gt):
"find which of all is furthest from 'i'"
out, d = i, init
for j in all:
if i == j: continue
tmp = dist(m, i, j)
if better(tmp, d):
out, d = j, tmp
return out
"""
And of course, _closest_:
"""
def closest(m, i, all):
return furthest(m, i, all, init = 10 ** 32, better = lt)
"""
## WHERE2 = Recursive Fastmap
WHERE2 finds everyone's else's distance from the poles
and divide the data on the mean point of those
distances. This all stops if:
+ Any division has _tooFew_ solutions (say,
less than _sqrt_ of the total number of
solutions).
+ Something has gone horribly wrong and you are
recursing _tooDeep_
This code is controlled by the options in [_The_ settings](settingspy). For
example, if _The.pruning_ is true, we may ignore
some sub-tree (this process is discussed, later on).
Also, if _The.verbose_ is true, the _show_
function prints out a little tree showing the
progress (and to print indents in that tree, we use
the string _The.b4_). For example, here's WHERE2
dividing 93 examples from NASA93.
---| _where |-----------------
93
|.. 46
|.. |.. 23
|.. |.. |.. 11
|.. |.. |.. |.. 5.
|.. |.. |.. |.. 6.
|.. |.. |.. 12
|.. |.. |.. |.. 6.
|.. |.. |.. |.. 6.
|.. |.. 23
|.. |.. |.. 11
|.. |.. |.. |.. 5.
|.. |.. |.. |.. 6.
|.. |.. |.. 12
|.. |.. |.. |.. 6.
|.. |.. |.. |.. 6.
|.. 47
|.. |.. 23
|.. |.. |.. 11
|.. |.. |.. |.. 5.
|.. |.. |.. |.. 6.
|.. |.. |.. 12
|.. |.. |.. |.. 6.
|.. |.. |.. |.. 6.
|.. |.. 24
|.. |.. |.. 12
|.. |.. |.. |.. 6.
|.. |.. |.. |.. 6.
|.. |.. |.. 12
|.. |.. |.. |.. 6.
|.. |.. |.. |.. 6.
WHERE2 returns clusters, where each cluster contains
multiple solutions.
"""
def where2(m, data, lvl = 0, up = None, verbose = False):
node = o(val = None, _up = up, _kids = [])
def tooDeep(): return lvl > The.what.depthMax
def tooFew() : return len(data) < The.what.minSize
def show(suffix):
if verbose:
print(The.what.b4 * lvl, len(data),
suffix, ' ; ', id(node) % 1000, sep = '')
if tooDeep() or tooFew():
show(".")
node.val = data
else:
show("")
wests, west, easts, east, c = fastmap(m, data)
node.update(c = c, east = east, west = west)
goLeft, goRight = maybePrune(m, lvl, west, east)
if goLeft:
node._kids += [where2(m, wests, lvl + 1, node)]
if goRight:
node._kids += [where2(m, easts, lvl + 1, node)]
return node
"""
## An Experimental Extensions
Lately I've been experimenting with a system that
prunes as it divides the data. GALE checks for
domination between the poles and ignores data in
halves with a dominated pole. This means that for
_N_ solutions we only ever have to evaluate
_2*log(N)_ of them- which is useful if each
evaluation takes a long time.
The niches found in this way
contain non-dominated poles; i.e. they are
approximations to the Pareto frontier.
Preliminary results show that this is a useful
approach but you should treat those results with a
grain of salt.
In any case, this code supports that pruning as an
optional extra (and is enabled using the
_slots.pruning_ flag). In summary, this code says if
the scores for the poles are more different that
_slots.wriggle_ and one pole has a better score than
the other, then ignore the other pole.
"""
def maybePrune(m, lvl, west, east):
"Usually, go left then right, unless dominated."
goLeft, goRight = True, True # default
if The.prune and lvl >= The.what.depthMin:
sw = scores(m, west)
se = scores(m, east)
if abs(sw - se) > The.wriggle: # big enough to consider
if se > sw: goLeft = False # no left
if sw > se: goRight = False # no right
return goLeft, goRight
"""
Note that I do not allow pruning until we have
descended at least _slots.depthMin_ into the tree.
### Model-specific Stuff
WHERE2 talks to models via the the following model-specific variables:
+ _m.cols_: list of indices in a list
+ _m.names_: a list of names for each column.
+ _m.decisions_: the subset of cols relating to decisions.
+ _m.obectives_: the subset of cols relating to objectives.
+ _m.eval(m,eg)_: function for computing variables from _eg_.
+ _m.lo[c]_ : the lowest value in column _c_.
+ _m.hi[c]_ : the highest value in column _c_.
+ _m.w[c]_: the weight for each column. Usually equal to one.
If an objective and if we are minimizing that objective, then the weight is negative.
### Model-general stuff
Using the model-specific stuff, WHERE2 defines some
useful general functions.
"""
def some(m, x) :
"with variable x of model m, pick one value at random"
return m.lo[x] + by(m.hi[x] - m.lo[x])
def scores(m, it):
"Score an individual."
if not it.scored:
m.eval(m, it)
new, w = 0, 0
for c in m.objectives:
val = it.cells[c]
w += abs(m.w[c])
tmp = norm(m, c, val)
if m.w[c] < 0:
tmp = 1 - tmp
new += (tmp ** 2)
it.score = (new ** 0.5) / (w ** 0.5 + 1e-4)
it.scored = True
return it.score
"""
## Tree Code
Tools for manipulating the tree returned by _where2_.
### Primitive: Walk the nodes
"""
def nodes(tree, seen = None, steps = 0):
if seen is None: seen = []
if tree:
if not id(tree) in seen:
seen.append(id(tree))
yield tree, steps
for kid in tree._kids:
for sub, steps1 in nodes(kid, seen, steps + 1):
yield sub, steps1
"""
### Return nodes that are leaves
"""
def leaves(tree, seen = None, steps = 0):
for node, steps1 in nodes(tree, seen, steps):
if not node._kids:
yield node, steps1
"""
### Return nodes nearest to furthest
"""
# walk sideways..
def neighbors(leaf, seen = None, steps = -1):
"""Walk the tree from 'leaf' increasingly
distant leaves. """
if seen is None: seen = []
for down, steps1 in leaves(leaf, seen, steps + 1):
yield down, steps1
if leaf:
for up, steps1 in neighbors(leaf._up, seen, steps + 1):
yield up, steps1
"""
### Return nodes in Groups, Closest to Furthest
"""
def around(leaf, f = lambda x: x):
tmp, last = [], None
for node, dist in neighbors(leaf):
if dist > 0:
if dist == last:
tmp += [f(node)]
else:
if tmp:
yield last, tmp
tmp = [f(node)]
last = dist
if tmp:
yield last, tmp
"""
## Demo Code
### Code Showing the scores
"""
# @go
def _scores():
m = nasa93()
out = []
for row in m._rows:
scores(m, row)
out += [(row.score, [row.cells[c] for c in m.objectives])]
for s, x in sorted(out):
print(s, x)
"""
### Code Showing the Distances
"""
# @go
def _distances(m = nasa93):
m = m()
seed(The.seed)
for i in m._rows:
j = closest(m, i, m._rows)
k = furthest(m, i, m._rows)
idec = [i.cells[c] for c in m.decisions]
jdec = [j.cells[c] for c in m.decisions]
kdec = [k.cells[c] for c in m.decisions]
print("\n",
gs(idec), g(scores(m, i)), "\n",
gs(jdec), "closest ", g(dist(m, i, j)), "\n",
gs(kdec), "furthest", g(dist(m, i, k)))
"""
### A Demo for Where2.
"""
def prepare(m, settings = None):
"Prepare the 'The' class"
seed(1)
global The
The = settings if settings else defaults().update(verbose = True,
minSize = len(m._rows) ** 0.5,
prune = False,
wriggle = 0.3)
return The
def _where(m = nasa93):
m = m()
seed(1)
told = N()
for r in m._rows:
s = scores(m, r)
told += s
global The
The = defaults().update(verbose = True,
minSize = len(m._rows) ** 0.5,
prune = False,
wriggle = 0.3 * told.sd())
tree = where2(m, m._rows)
n = 0
for node, _ in leaves(tree):
ID = id(node) % 1000
print(node.val)
"""
print(m,' ',end="")
n += m
print(id(node) % 1000, ' ',end='')
for near,dist in neighbors(node):
print(dist,id(near) % 1000,' ',end='')
print("")
print(n)
filter = lambda z: id(z) % 1000
for node,_ in leaves(tree):
print(filter(node),
[x for x in around(node,filter)])
"""
| ai-se/Tree-Learner | _imports/where2.py | Python | unlicense | 11,309 | 0.02131 |
# Exercise 16: Reading and Writing Files
from sys import argv
script, filename = argv
print "We're going to erase %r." % filename
print "If you don't want that, hit CTRL-C (^C)."
print "If you do want that, hit RETURN."
raw_input("?")
print "Opening the file..."
target = open(filename, 'w')
print "Truncating the file. Goodbye!"
target.truncate()
print "Now I'm going to ask you for three lines."
line1 = raw_input("line 1: ")
line2 = raw_input("line 2: ")
line3 = raw_input("line 3: ")
print "I'm going to write these to the file."
target.write(line1)
target.write("\n")
target.write(line2)
target.write("\n")
target.write(line3)
target.write("\n")
print "And finally, we close it."
target.close()
# $ python ex16.py test.txt
| paulcarroty/Learn-Python-The-Hard-Way | ex16.py | Python | gpl-3.0 | 740 | 0 |
import unittest
class StringProcessingTestBase(unittest.TestCase):
# The backslash character. Needed since there are limitations when
# using backslashes at the end of raw-strings in front of the
# terminating " or '.
bs = "\\"
# Basic test strings all StringProcessing functions should test.
test_strings = [
r"out1 'escaped-escape: \\ ' out2",
r"out1 'escaped-quote: \' ' out2",
r"out1 'escaped-anything: \X ' out2",
r"out1 'two escaped escapes: \\\\ ' out2",
r"out1 'escaped-quote at end: \'' out2",
r"out1 'escaped-escape at end: \\' out2",
r"out1 'str1' out2 'str2' out2",
r"out1 \' 'str1' out2 'str2' out2",
r"out1 \\\' 'str1' out2 'str2' out2",
r"out1 \\ 'str1' out2 'str2' out2",
r"out1 \\\\ 'str1' out2 'str2' out2",
r"out1 \\'str1' out2 'str2' out2",
r"out1 \\\\'str1' out2 'str2' out2",
r"out1 'str1''str2''str3' out2",
r"",
r"out1 out2 out3",
bs,
2 * bs]
# Test string for multi-pattern tests (since we want to variate the
# pattern, not the test string).
multi_pattern_test_string = (r"abcabccba###\\13q4ujsabbc\+'**'ac"
r"###.#.####-ba")
# Multiple patterns for the multi-pattern tests.
multi_patterns = [r"abc",
r"ab",
r"ab|ac",
2 * bs,
r"#+",
r"(a)|(b)|(#.)",
r"(?:a(b)*c)+",
r"1|\+"]
# Test strings for the remove_empty_matches feature (alias auto-trim).
auto_trim_test_pattern = r";"
auto_trim_test_strings = [r";;;;;;;;;;;;;;;;",
r"\\;\\\\\;\\#;\\\';;\;\\\\;+ios;;",
r"1;2;3;4;5;6;",
r"1;2;3;4;5;6;7",
r"",
r"Hello world",
r"\;",
r"\\;",
r"abc;a;;;;;asc"]
# Test strings for search-in-between functions.
search_in_between_begin_pattern = r"("
search_in_between_end_pattern = r")"
search_in_between_test_strings = [
r"()assk(This is a word)and((in a word) another ) one anyway.",
r"bcc5(((((((((((((((((((1)2)3)))))))))))))))))",
r"Let's (do (it ) more ) complicated ) ) ) () (hello.)",
r"()assk\\(This\ is a word\)and((in a\\\ word\\\\\) another \)) "
r"one anyway.",
r"bcc5\(\(\((((((\\\(((((((((((1)2)3))\\\\\)))))))))))))\)\)",
r"Let's \(do (it ) more ) \\ complicated ) ) ) () (hello.)\\z"]
@staticmethod
def _construct_message(func, args, kwargs):
"""
Constructs the error message for the call result assertions.
:param func: The function that was called.
:param args: The argument tuple the function was invoked with.
:param kwargs: The named arguments dict the function was invoked with.
:param return: The error message.
"""
args = [repr(x) for x in args]
kwargs = [str(key) + '=' + repr(value)
for key, value in kwargs.items()]
return "Called {}({}).".format(func.__name__, ", ".join(args + kwargs))
def assertResultsEqual(self,
func,
invocation_and_results,
postprocess=lambda result: result):
"""
Tests each given invocation against the given results with the
specified function.
:param func: The function to test.
:param invocation_and_results: A dict containing the invocation tuple
as key and the result as value.
:param postprocess: A function that shall process the
returned result from the tested
function. The function must accept only
one parameter as postprocessing input.
Performs no postprocessing by default.
"""
for args, result in invocation_and_results.items():
self.assertEqual(
postprocess(func(*args)),
result,
self._construct_message(func, args, {}))
def assertResultsEqualEx(self,
func,
invocation_and_results,
postprocess=lambda result: result):
"""
Tests each given invocation against the given results with the
specified function. This is an extended version of
``assertResultsEqual()`` that supports also ``**kwargs``.
:param func: The function to test.
:param invocation_and_results: A dict containing the invocation tuple
as key and the result as value. The
tuple contains (args, kwargs).
:param postprocess: A function that shall process the
returned result from the tested
function. The function must accept only
one parameter as postprocessing input.
Performs no postprocessing by default.
"""
for (args, kwargs), result in invocation_and_results.items():
self.assertEqual(
postprocess(func(*args, **kwargs)),
result,
self._construct_message(func, args, kwargs))
| yland/coala | tests/parsing/StringProcessing/StringProcessingTestBase.py | Python | agpl-3.0 | 5,848 | 0.000171 |
# -*- coding: utf-8 -*-
import os
import KBEngine
from KBEDebug import *
def onBaseAppReady(isBootstrap):
"""
KBEngine method.
baseapp已经准备好了
@param isBootstrap: 是否为第一个启动的baseapp
@type isBootstrap: BOOL
"""
INFO_MSG('onBaseAppReady: isBootstrap=%s, appID=%s, bootstrapGroupIndex=%s, bootstrapGlobalIndex=%s' % \
(isBootstrap, os.getenv("KBE_COMPONENTID"), os.getenv("KBE_BOOTIDX_GROUP"), os.getenv("KBE_BOOTIDX_GLOBAL")))
def onReadyForLogin(isBootstrap):
"""
KBEngine method.
如果返回值大于等于1.0则初始化全部完成, 否则返回准备的进度值0.0~1.0。
在此可以确保脚本层全部初始化完成之后才开放登录。
@param isBootstrap: 是否为第一个启动的baseapp
@type isBootstrap: BOOL
"""
return 1.0
def onReadyForShutDown():
"""
KBEngine method.
进程询问脚本层:我要shutdown了,脚本是否准备好了?
如果返回True,则进程会进入shutdown的流程,其它值会使得进程在过一段时间后再次询问。
用户可以在收到消息时进行脚本层的数据清理工作,以让脚本层的工作成果不会因为shutdown而丢失。
"""
INFO_MSG('onReadyForShutDown()')
return True
def onBaseAppShutDown(state):
"""
KBEngine method.
这个baseapp被关闭前的回调函数
@param state: 0 : 在断开所有客户端之前
1 : 在将所有entity写入数据库之前
2 : 所有entity被写入数据库之后
@type state: int
"""
INFO_MSG('onBaseAppShutDown: state=%i' % state)
def onInit(isReload):
"""
KBEngine method.
当引擎启动后初始化完所有的脚本后这个接口被调用
@param isReload: 是否是被重写加载脚本后触发的
@type isReload: bool
"""
INFO_MSG('onInit::isReload:%s' % isReload)
def onFini():
"""
KBEngine method.
引擎正式关闭
"""
INFO_MSG('onFini()')
def onCellAppDeath(addr):
"""
KBEngine method.
某个cellapp死亡
"""
WARNING_MSG('onCellAppDeath: %s' % (str(addr)))
def onGlobalData(key, value):
"""
KBEngine method.
globalData有改变
"""
DEBUG_MSG('onGlobalData: %s' % key)
def onGlobalDataDel(key):
"""
KBEngine method.
globalData有删除
"""
DEBUG_MSG('onDelGlobalData: %s' % key)
def onGlobalBases(key, value):
"""
KBEngine method.
globalBases有改变
"""
DEBUG_MSG('onGlobalBases: %s' % key)
def onGlobalBasesDel(key):
"""
KBEngine method.
globalBases有删除
"""
DEBUG_MSG('onGlobalBasesDel: %s' % key)
def onLoseChargeCB(ordersID, dbid, success, datas):
"""
KBEngine method.
有一个不明订单被处理, 可能是超时导致记录被billing
清除, 而又收到第三方充值的处理回调
"""
DEBUG_MSG('onLoseChargeCB: ordersID=%s, dbid=%i, success=%i, datas=%s' % \
(ordersID, dbid, success, datas))
| dreamsxin/kbengine | assets/scripts/base/kbemain.py | Python | lgpl-3.0 | 2,796 | 0.051105 |
'''
This module controls the dialog to set filter criteria
'''
from PyQt5 import QtCore, Qt, QtWidgets
from views.filter_dialog import Ui_FilterDialog
class FilterGamesController(QtWidgets.QDialog):
'''
Controller object for the filter games dialog.
'''
def __init__(self, table, parent=None):
QtWidgets.QDialog.__init__(self, parent)
self.user_interface = Ui_FilterDialog()
self.user_interface.setupUi(self)
self.table = table
self.canceled = False
self.filtering_all = True
self.initialize_ui()
self.setup_signals()
def initialize_ui(self):
'''
Connects interface's sections with their corresponding models
'''
def assign_model(model, list_widget):
'''
Private function to populate a specific section in the
dialog with the values stored in a model
parameters:
- model: the model assigned to the dialog section
- list_widget: the list widget to be populated
'''
model_qt = Qt.QStandardItemModel()
values_list = model.get_list()
for value in values_list:
item = Qt.QStandardItem(value)
item.setFlags(QtCore.Qt.ItemIsUserCheckable | QtCore.Qt.ItemIsEnabled)
item.setData(QtCore.Qt.Checked, QtCore.Qt.CheckStateRole)
if model.get_filtered(value):
item.setCheckState(QtCore.Qt.Unchecked)
model_qt.appendRow(item)
list_widget.setModel(model_qt)
assign_model(self.table.models['system_list_model'], self.user_interface.listSystem)
assign_model(self.table.models['status_list_model'], self.user_interface.listStatus)
assign_model(self.table.models['label_list_model'], self.user_interface.listLabel)
assign_model(self.table.models['difficulty_list_model'], self.user_interface.listDifficulty)
def setup_signals(self):
'''
Connects interface's widgets signals to the corresponding slots
'''
def select_all(list_view):
'''
Generic callback for a 'select all' button
parameters:
-list_view: the list affected when the user clicks 'select all'
'''
model_qt = list_view.model()
for index in range(model_qt.rowCount()):
item = model_qt.item(index)
if item.isCheckable() and item.checkState() == QtCore.Qt.Unchecked:
item.setCheckState(QtCore.Qt.Checked)
def deselect_all(list_view):
'''
Generic callback for a 'deselect all' button
parameters:
- list_view: the list affected when the user clicks 'deselect all'
'''
model_qt = list_view.model()
for index in range(model_qt.rowCount()):
item = model_qt.item(index)
if item.isCheckable() and item.checkState() == QtCore.Qt.Checked:
item.setCheckState(QtCore.Qt.Unchecked)
self.user_interface.pushButtonSelectAllSystem.clicked.connect(
lambda: select_all(self.user_interface.listSystem))
self.user_interface.pushButtonDeselectAllSystem.clicked.connect(
lambda: deselect_all(self.user_interface.listSystem))
self.user_interface.pushButtonSelectAllStatus.clicked.connect(
lambda: select_all(self.user_interface.listStatus))
self.user_interface.pushButtonDeselectAllStatus.clicked.connect(
lambda: deselect_all(self.user_interface.listStatus))
self.user_interface.pushButtonSelectAllLabel.clicked.connect(
lambda: select_all(self.user_interface.listLabel))
self.user_interface.pushButtonDeselectAllLabel.clicked.connect(
lambda: deselect_all(self.user_interface.listLabel))
self.user_interface.pushButtonSelectAllDifficulty.clicked.connect(
lambda: select_all(self.user_interface.listDifficulty))
self.user_interface.pushButtonDeselectAllDifficulty.clicked.connect(
lambda: deselect_all(self.user_interface.listDifficulty))
self.user_interface.pushButtonOk.clicked.connect(self.ok_clicked)
self.user_interface.pushButtonCancel.clicked.connect(self.cancel_clicked)
def ok_clicked(self):
'''
Callback for when the user clicks the 'ok' button. The dialog is closed and
the parent is informed by means of an attribute that the changes have to
take effect
'''
self.canceled = False
self.hide()
def cancel_clicked(self):
'''
Callback for when the user clicks the 'cancel' button. The dialog is closed
and the parent is informed by means of an attribute that changes shouldn't
take effect
'''
self.canceled = True
self.hide()
def closeEvent(self, event):
'''
Overriding the closeEvent from the QDialog class. This tells the main window
controller to behave as if the Cancel button was pressed.
parameters:
- event: the passed event (not used in this overriden version)
'''
# pylint: disable=invalid-name
# pylint: disable=unused-argument
self.canceled = True
def apply_filtering(self):
'''
Updates the models with information about which values to be filted
'''
def apply_filtering_per_type(model, list_widget):
'''
Updates a specific model
parameters:
- model: the model to be updated
- list_widget: the list associated to that model
'''
model_qt = list_widget.model()
for index in range(model_qt.rowCount()):
item = model_qt.item(index)
model.set_filtered(str(item.text()), item.checkState() != QtCore.Qt.Checked)
if not self.canceled:
apply_filtering_per_type(
self.table.models['system_list_model'],
self.user_interface.listSystem)
apply_filtering_per_type(
self.table.models['status_list_model'],
self.user_interface.listStatus)
apply_filtering_per_type(
self.table.models['label_list_model'],
self.user_interface.listLabel)
apply_filtering_per_type(
self.table.models['difficulty_list_model'],
self.user_interface.listDifficulty)
self.table.hide_rows()
models = [self.table.models['system_list_model'],
self.table.models['status_list_model'],
self.table.models['label_list_model'],
self.table.models['difficulty_list_model']]
model = 0
while model < len(models) and not models[model].is_any_filtered():
model = model + 1
self.filtering_all = model >= len(models)
| pablosuau/pyBacklogger | controllers/filter_games_controller.py | Python | gpl-2.0 | 7,069 | 0.002405 |
from interface.design.ui_screen import Ui_wnd_gifextract
from PyQt5 import QtWidgets
import sys
import listener
import config
import ffmpeg
import queue
import interface.menus.Frame_CreateGif
import interface.menus.Frame_ExtractFrames
import interface.menus.Frame_Queue
class Screen(QtWidgets.QMainWindow):
def __init__(self, parent=None):
def setupFFMpeg():
self.ffmpeg = ffmpeg.FFmpeg(self.config)
def setupConfig():
self.config = config.Config(self)
def setupQueue():
self.queue = queue.JobQueue(self)
def setupTabs():
self.tab_video = interface.menus.Frame_ExtractFrames.Frame(self)
self.ui.tabWidget.addTab(self.tab_video, "Frame Extraction")
self.tab_gif = interface.menus.Frame_CreateGif.Frame(self)
self.ui.tabWidget.addTab(self.tab_gif, "Gif Creation")
self.tab_queue = interface.menus.Frame_Queue.Frame(self)
self.ui.tabWidget.addTab(self.tab_queue, "Queue")
QtWidgets.QWidget.__init__(self, parent)
self.ui = Ui_wnd_gifextract()
self.ui.setupUi(self)
self.slots = listener.Slots(self)
self.createLinks()
setupConfig()
setupTabs()
setupFFMpeg()
setupQueue()
def createLinks(self):
self.ui.actionPreferences.triggered.connect(self.openOptions)
def openOptions(self):
import interface.menus.ConfigMenu
options = interface.menus.ConfigMenu.ConfigMenu(self, self.config)
options.show()
if __name__ == "__main__":
app = QtWidgets.QApplication(sys.argv)
program = Screen()
program.show()
sys.exit(app.exec_()) | KaiAPaulhus/GifExtract | src/alpha.py | Python | mit | 1,716 | 0.003497 |
#
# Copyright 2007 Zuza Software Foundation
#
# This file is part of translate.
#
# translate is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# translate is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
"""Convert iCalendar files to Gettext PO localization files.
See: http://docs.translatehouse.org/projects/translate-toolkit/en/latest/commands/ical2po.html
for examples and usage instructions.
"""
from translate.convert import convert
from translate.storage import ical, po
class ical2po:
"""Convert one or two iCalendar files to a single PO file."""
SourceStoreClass = ical.icalfile
TargetStoreClass = po.pofile
TargetUnitClass = po.pounit
def __init__(
self,
input_file,
output_file,
template_file=None,
blank_msgstr=False,
duplicate_style="msgctxt",
):
"""Initialize the converter."""
self.blank_msgstr = blank_msgstr
self.duplicate_style = duplicate_style
self.extraction_msg = None
self.output_file = output_file
self.source_store = self.SourceStoreClass(input_file)
self.target_store = self.TargetStoreClass()
self.template_store = None
if template_file is not None:
self.template_store = self.SourceStoreClass(template_file)
def convert_unit(self, unit):
"""Convert a source format unit to a target format unit."""
target_unit = self.TargetUnitClass(encoding="UTF-8")
target_unit.addlocation("".join(unit.getlocations()))
target_unit.addnote(unit.getnotes("developer"), "developer")
target_unit.source = unit.source
target_unit.target = ""
return target_unit
def convert_store(self):
"""Convert a single source format file to a target format file."""
self.extraction_msg = "extracted from %s" % self.source_store.filename
for source_unit in self.source_store.units:
self.target_store.addunit(self.convert_unit(source_unit))
def merge_stores(self):
"""Convert two source format files to a target format file."""
self.extraction_msg = "extracted from {}, {}".format(
self.template_store.filename,
self.source_store.filename,
)
self.source_store.makeindex()
for template_unit in self.template_store.units:
target_unit = self.convert_unit(template_unit)
template_unit_name = "".join(template_unit.getlocations())
add_translation = (
not self.blank_msgstr
and template_unit_name in self.source_store.locationindex
)
if add_translation:
source_unit = self.source_store.locationindex[template_unit_name]
target_unit.target = source_unit.source
self.target_store.addunit(target_unit)
def run(self):
"""Run the converter."""
if self.template_store is None:
self.convert_store()
else:
self.merge_stores()
if self.extraction_msg:
self.target_store.header().addnote(self.extraction_msg, "developer")
self.target_store.removeduplicates(self.duplicate_style)
if self.target_store.isempty():
return 0
self.target_store.serialize(self.output_file)
return 1
def run_converter(
input_file, output_file, template_file=None, pot=False, duplicatestyle="msgctxt"
):
"""Wrapper around converter."""
return ical2po(
input_file,
output_file,
template_file,
blank_msgstr=pot,
duplicate_style=duplicatestyle,
).run()
formats = {
"ics": ("po", run_converter),
("ics", "ics"): ("po", run_converter),
}
def main(argv=None):
parser = convert.ConvertOptionParser(
formats, usetemplates=True, usepots=True, description=__doc__
)
parser.add_duplicates_option()
parser.passthrough.append("pot")
parser.run(argv)
if __name__ == "__main__":
main()
| miurahr/translate | translate/convert/ical2po.py | Python | gpl-2.0 | 4,527 | 0.000884 |