repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
ianzhengnan/learnpy | task_master.py | 1 | 1106 | import random, time, queue
from multiprocessing.managers import BaseManager
from multiprocessing import freeze_support
task_queue = queue.Queue()
result_queue = queue.Queue()
class QueueManager(BaseManager):
pass
def return_task_queue():
global task_queue
return task_queue
def return_result_queue():
global result_queue
return result_queue
def test():
QueueManager.register('get_task_queue', callable = return_task_queue)
QueueManager.register('get_result_queue', callable = return_result_queue)
manager = QueueManager(address=('127.0.0.1', 5000), authkey=b'abc')
manager.start()
task = manager.get_task_queue()
result = manager.get_result_queue()
for i in range(10):
n = random.randint(0,10000)
print('Put task %d...' % n)
task.put(n)
print('Try get result...')
for i in range(10):
r = result.get(timeout=10)
print('Result: %s' % r)
manager.shutdown()
print('Master exit.')
if __name__ == '__main__':
freeze_support()
test() | apache-2.0 |
9kopb/django-easy-maps | easy_maps/migrations/0005_auto__add_unique_address_address.py | 3 | 1806 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Removing index on 'Address', fields ['address']
if db.backend_name != 'sqlite3':
# South forgets indexes when altering tables in sqlite,
# see http://south.aeracode.org/ticket/757 .
# This means delete_index will raise an exception with sqlite
# because the index is 'forgotten' in previous migrations.
db.delete_index('easy_maps_address', ['address'])
# Adding unique constraint on 'Address', fields ['address']
db.create_unique('easy_maps_address', ['address'])
def backwards(self, orm):
# Removing unique constraint on 'Address', fields ['address']
db.delete_unique('easy_maps_address', ['address'])
# Adding index on 'Address', fields ['address']
db.create_index('easy_maps_address', ['address'])
models = {
'easy_maps.address': {
'Meta': {'object_name': 'Address'},
'address': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'computed_address': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'geocode_error': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'longitude': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['easy_maps']
| mit |
eddo888/Tools | parser.py | 1 | 15876 | #!/usr/bin/env python2
# $Date$
# $Revision$
# $Author$
# $HeadURL$
# $Id$
import sys, re, os
import xml.parsers.expat
from xml.dom import minidom
from Tools.pretty import *
from Tools.colours import Colours
tokens = [
['&' , '####amp####'],
['&' , '&'],
['<' , '<'],
['>' , '>'],
['\"' , '"'],
#['\'' , '''],
['####amp####' , '&'],
]
def escapeData(data):
for d in tokens:
data = data.replace(d[0],d[1])
return data
def doParse(
input,
output,
colour=False,
areturn=False,
rformat=False,
html=False,
preserve=False,
comments=False,
fname=None
):
myParser = MyParser(colour=colour, areturn=areturn, rformat=rformat, html=html, output=output, preserve=preserve, comments=comments)
try:
myParser.parser.ParseFile(input)
except:
printer = PrettyPrinter(colour=True, output=sys.stderr)
sys.stderr.write('%s \n'%(fname or 'rendering as text '))
printer.prettify(sys.exc_info())
del printer
if input != sys.stdin:
input.seek(0)
if output != sys.stdout:
output.seek(0)
output.write(input.read())
del myParser
return
def printXML(xml, colour=False, areturn=False, rformat=False,output=sys.stdout):
myParser = MyParser(
colour=colour,
rformat=rformat,
areturn=areturn,
output=output
)
myParser.parser.Parse(xml)
del myParser
return
class MyParser:
indent = 0
stateStartLast = 1
stateEndLast = 2
stateTextLast = 3
stateCDataLast = 4
stateCDataStart = 5
stateCDataEnd = 6
state = stateEndLast
def __init__(self, colour=False, areturn=False, rformat=False, html=False, output=sys.stdout, preserve=False, comments=True):
self.output = output
self.lt='<'
self.gt='>'
self.amp='&'
self.quot='\"'
self.apos='\''
self.lf='\n'
self.indentChar = ' '
self.preserve = preserve
self.colours = Colours(colour=colour, html=html)
if html:
self.lt ='<'
self.gt ='>'
self.amp ='&'
self.quot='"'
self.apos='''
self.lf ='<br/>'
self.indentChar = ' '
self.areturn = areturn
self.rformat = rformat
self.parser = xml.parsers.expat.ParserCreate()
self.parser.StartElementHandler = self.startElementHandler
self.parser.EndElementHandler = self.endElementHandler
self.parser.CharacterDataHandler = self.characterDataHandler
self.parser.StartCdataSectionHandler = self.startCdataSectionHandler
self.parser.EndCdataSectionHandler = self.endCdataSectionHandler
self.parser.XmlDeclHandler = self.xmlDeclHandler
self.parser.StartDoctypeDeclHandler = self.startDoctypeDeclHandler
self.parser.EndDoctypeDeclHandler = self.endDoctypeDeclHandler
self.parser.ProcessingInstructionHandler = self.processingInstructionHandler
if comments:
self.parser.CommentHandler = self.commentHandler
# Doctype => \&handle_doctype,
# Proc => => \&handle_proc,
self.leader = re.compile('(^\s+)')
self.pattern = re.compile('(^\s+|\s+$)')
self.lfCount = 0
return
##parser.ElementDeclHandler(name, model)
##parser.AttlistDeclHandler(elname, attname, type, default, required)
##parser.UnparsedEntityDeclHandler(entityName, base, systemId, publicId, notationName)
##parser.EntityDeclHandler(entityName, is_parameter_entity, value, base, systemId, publicId, notationName)
##parser.NotationDeclHandler(notationName, base, systemId, publicId)
##parser.StartNamespaceDeclHandler(prefix, uri)
##parser.EndNamespaceDeclHandler(prefix)
##parser.DefaultHandler(data)
##parser.DefaultHandlerExpand(data)
##parser.NotStandaloneHandler()
##parser.ExternalEntityRefHandler(context, base, systemId, publicId)
def close(self):
if self.parser:
self.parser.Parse('',1)
del self.parser
return
def startElementHandler(self, name, attrs):
if self.rformat:
self.areturn = True
if self.state == self.stateStartLast:
self.output.write(''.join([
self.colours.White,
self.gt,
self.colours.Off,
self.lf
]))
self.output.flush()
if self.preserve and self.lfCount > 2 and self.state == self.stateEndLast:
self.output.write(self.lf)
self.lfCount =0
if ':' in name:
(pre,ele) = tuple(name.split(':'))
pre='%s:'%pre
else:
(pre,ele) = ('',name)
self.output.write(''.join([
(self.indent) * self.indentChar,
self.colours.White,
self.lt,
self.colours.Off,
pre,
self.colours.Teal,
ele,
self.colours.Off
]))
self.output.flush()
for attr in sorted(attrs.keys()):
if self.areturn:
self.output.write(''.join([
self.lf,
(self.indent+1) * self.indentChar,
]))
else:
self.output.write(' ')
self.output.write(''.join([
self.colours.Green ,
attr ,
self.colours.Off ,
self.colours.White ,
'=' ,
self.colours.Off ,
self.quot ,
self.colours.Purple ,
escapeData(attrs[attr]) ,
self.colours.Off ,
self.quot ,
]))
self.output.flush()
if len(attrs) > 0 and self.areturn:
self.output.write(''.join([
self.lf,
(self.indent) * self.indentChar,
]))
self.indent += 1
self.state = self.stateStartLast
if self.rformat:
self.rformat = False
self.areturn = False
return
def endElementHandler(self, name):
if ':' in name:
(pre,ele) = tuple(name.split(':'))
pre='%s:'%pre
else:
(pre,ele) = ('',name)
self.indent -= 1
if self.state == self.stateCDataEnd:
if self.lfCount > 1:
self.output.write(self.lf)
self.lfCount = 0
if self.state == self.stateStartLast:
self.output.write(''.join([
self.colours.White ,
'/' ,
self.gt ,
self.colours.Off ,
self.lf ,
]))
self.output.flush()
elif self.state != self.stateTextLast and self.state != self.stateCDataEnd:
self.output.write(''.join([
(self.indent) * self.indentChar,
self.colours.White ,
self.lt ,
self.colours.Off ,
self.colours.White ,
'/' ,
pre,
self.colours.Teal,
ele ,
self.colours.Off ,
self.colours.White ,
self.gt ,
self.colours.Off ,
self.lf ,
]))
self.output.flush()
else:
self.output.write(''.join([
self.colours.White ,
self.lt ,
self.colours.Off ,
self.colours.White ,
'/' ,
pre,
self.colours.Teal,
ele ,
self.colours.Off ,
self.colours.White ,
self.gt ,
self.colours.Off ,
self.lf ,
]))
self.output.flush()
self.state = self.stateEndLast
return
def characterDataHandler(self, data):
if not self.state == self.stateCDataStart and not self.state == self.stateCDataLast:
data = escapeData(data)
leader = ''
match = self.leader.match(data)
if match:
leader = match.group(1)
self.lfCount = self.lfCount + data.count('\n')
if not self.state == self.stateTextLast and not self.state == self.stateCDataLast:
data = self.leader.sub('', data)
if len(data) == 0:
return
if self.state == self.stateStartLast:
self.output.write(''.join([
self.colours.White,
self.gt,
self.colours.Off,
]))
if self.lfCount > 1:
self.output.write(leader)
self.output.write(self.lf)
self.output.write(data)
self.state = self.stateTextLast
elif self.state == self.stateCDataStart:
if self.lfCount > 0:
self.output.write(leader)
self.output.write(self.lf)
self.output.write(data)
self.state = self.stateCDataLast
elif self.state == self.stateCDataLast:
self.output.write(data)
elif self.state == self.stateTextLast:
self.output.write(data)
elif self.state != self.stateEndLast:
self.output.write(data)
return
def commentHandler(self, data):
if self.state == self.stateStartLast:
self.output.write(''.join([
self.colours.White ,
self.gt ,
self.colours.Off ,
self.lf ,
]))
self.output.flush()
self.output.write(''.join([
(self.indent) * self.indentChar,
self.colours.Orange ,
self.lt ,
'!--' ,
data ,
'--' ,
self.gt ,
self.colours.Off ,
self.lf ,
]))
self.output.flush()
self.state = self.stateEndLast
return
def startCdataSectionHandler(self):
if not self.state == self.stateStartLast:
self.output.write((self.indent) * self.indentChar)
if self.state == self.stateStartLast:
self.output.write(''.join([
self.colours.White ,
self.gt ,
self.colours.Off ,
]))
self.output.flush()
self.output.write(''.join([
self.colours.White ,
self.lt ,
'![',
self.colours.Green,
'CDATA',
self.colours.White,
'[' ,
self.colours.Off ,
]))
self.output.flush()
self.state = self.stateCDataStart
return
def endCdataSectionHandler(self):
self.output.write(''.join([
self.colours.White ,
']]' ,
self.gt ,
self.colours.Off ,
]))
self.output.flush()
self.state = self.stateCDataEnd
return
def xmlDeclHandler(self, version, encoding, standalone):
self.output.write(''.join([
(self.indent) * self.indentChar,
self.colours.White,
self.lt ,
'?',
self.colours.Orange,
'xml ' ,
self.colours.Off ,
self.colours.Green ,
'version' ,
self.colours.Off ,
self.colours.White ,
'=' ,
self.quot ,
self.colours.Off ,
self.colours.Purple ,
version ,
self.colours.Off ,
self.colours.White ,
self.quot ,
self.colours.Off ,
]))
self.output.flush()
if encoding:
self.output.write(''.join([
self.colours.Green ,
' encoding' ,
self.colours.Off ,
self.colours.White ,
'=' ,
self.colours.Off ,
self.quot ,
self.colours.Purple ,
encoding ,
self.colours.Off ,
self.quot ,
]))
self.output.flush()
self.output.write(''.join([
self.colours.White ,
'?' ,
self.gt ,
self.colours.Off ,
self.lf ,
]))
self.output.flush()
return
def startDoctypeDeclHandler(self, doctypeName, systemId, publicId, has_internal_subset):
self.output.write((self.indent) * self.indentChar)
if not publicId:
self.output.write(''.join([
self.colours.White ,
self.lt ,
'!DOCTYPE ' ,
self.colours.Off ,
self.colours.White ,
doctypeName ,
self.colours.Off ,
self.colours.White ,
' SYSTEM ' ,
self.quot ,
self.colours.Off ,
self.colours.Green ,
systemId ,
self.colours.Off ,
self.colours.White ,
self.quot ,
self.quot ,
self.gt ,
self.colours.Off ,
self.lf ,
]))
self.output.flush()
else:
self.output.write(''.join([
self.colours.White ,
self.lt ,
'!DOCTYPE ' ,
self.colours.Off ,
self.colours.White ,
doctypeName ,
self.colours.Off ,
self.colours.White ,
' PUBLIC ' ,
self.quot ,
self.colours.Off ,
self.colours.Green ,
publicId ,
self.colours.Off ,
self.quot ,
' ' ,
self.quot ,
self.colours.Green ,
systemId ,
self.colours.Off ,
self.colours.White ,
self.quot,
self.gt ,
self.colours.Off ,
self.lf ,
]))
self.output.flush()
return
def endDoctypeDeclHandler(self):
return
def processingInstructionHandler(self, target, data):
self.output.write(''.join([
(self.indent) * self.indentChar,
self.colours.White ,
self.lt ,
'?' ,
target ,
self.colours.Off ,
]))
self.output.flush()
pn = re.compile('\s*(\S+)=[\'"]([^\'"]*)[\'"]\s*')
b = pn.split(data)
while '' in b: b.remove('')
for i in range(len(b)/2):
self.output.write(''.join([
self.colours.Red ,
b[2*i] ,
self.colours.Off ,
self.colours.White ,
'=' ,
self.colours.Off ,
self.quot ,
self.colours.Green ,
b[2*i],
self.colours.Off ,
self.quot ,
]))
self.output.flush()
self.output.write(''.join([
self.colours.White ,
'?' ,
self.gt ,
self.colours.Off ,
self.lf ,
]))
self.output.flush()
return
def main():
with open('../scripts/_test/Sample.xml') as input:
doParse(input, sys.stdout, colour=True, rformat=True)
if __name__ == '__main__': main()
| mit |
chenjun0210/tensorflow | tensorflow/contrib/cudnn_rnn/__init__.py | 54 | 1524 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ops for fused Cudnn RNN models.
@@CudnnGRU
@@CudnnLSTM
@@CudnnRNNRelu
@@CudnnRNNTanh
@@RNNParamsSaveable
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.cudnn_rnn.python.ops.cudnn_rnn_ops import CudnnGRU
from tensorflow.contrib.cudnn_rnn.python.ops.cudnn_rnn_ops import CudnnLSTM
from tensorflow.contrib.cudnn_rnn.python.ops.cudnn_rnn_ops import CudnnRNNRelu
from tensorflow.contrib.cudnn_rnn.python.ops.cudnn_rnn_ops import CudnnRNNTanh
from tensorflow.contrib.cudnn_rnn.python.ops.cudnn_rnn_ops import RNNParamsSaveable
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = [
"CudnnGRU",
"CudnnLSTM",
"CudnnRNNRelu",
"CudnnRNNTanh",
"RNNParamsSaveable",
]
remove_undocumented(__name__)
| apache-2.0 |
mozilla-b2g/fxos-certsuite | mcts/web-platform-tests/tests/tools/wptserve/tests/functional/base.py | 293 | 1831 | import base64
import logging
import os
import unittest
import urllib
import urllib2
import urlparse
import wptserve
logging.basicConfig()
here = os.path.split(__file__)[0]
doc_root = os.path.join(here, "docroot")
class Request(urllib2.Request):
def __init__(self, *args, **kwargs):
urllib2.Request.__init__(self, *args, **kwargs)
self.method = "GET"
def get_method(self):
return self.method
def add_data(self, data):
if hasattr(data, "iteritems"):
data = urllib.urlencode(data)
print data
self.add_header("Content-Length", str(len(data)))
urllib2.Request.add_data(self, data)
class TestUsingServer(unittest.TestCase):
def setUp(self):
self.server = wptserve.server.WebTestHttpd(host="localhost",
port=0,
use_ssl=False,
certificate=None,
doc_root=doc_root)
self.server.start(False)
def tearDown(self):
self.server.stop()
def abs_url(self, path, query=None):
return urlparse.urlunsplit(("http", "%s:%i" % (self.server.host, self.server.port), path, query, None))
def request(self, path, query=None, method="GET", headers=None, body=None, auth=None):
req = Request(self.abs_url(path, query))
req.method = method
if headers is None:
headers = {}
for name, value in headers.iteritems():
req.add_header(name, value)
if body is not None:
req.add_data(body)
if auth is not None:
req.add_header("Authorization", "Basic %s" % base64.encodestring('%s:%s' % auth))
return urllib2.urlopen(req)
| mpl-2.0 |
hanwenyan/ud858 | Lesson_4/00_Conference_Central/conference.py | 35 | 3749 | #!/usr/bin/env python
"""
conference.py -- Udacity conference server-side Python App Engine API;
uses Google Cloud Endpoints
$Id: conference.py,v 1.25 2014/05/24 23:42:19 wesc Exp wesc $
created by wesc on 2014 apr 21
"""
__author__ = '[email protected] (Wesley Chun)'
from datetime import datetime
import endpoints
from protorpc import messages
from protorpc import message_types
from protorpc import remote
from google.appengine.ext import ndb
from models import Profile
from models import ProfileMiniForm
from models import ProfileForm
from models import TeeShirtSize
from utils import getUserId
from settings import WEB_CLIENT_ID
EMAIL_SCOPE = endpoints.EMAIL_SCOPE
API_EXPLORER_CLIENT_ID = endpoints.API_EXPLORER_CLIENT_ID
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
@endpoints.api( name='conference',
version='v1',
allowed_client_ids=[WEB_CLIENT_ID, API_EXPLORER_CLIENT_ID],
scopes=[EMAIL_SCOPE])
class ConferenceApi(remote.Service):
"""Conference API v0.1"""
# - - - Profile objects - - - - - - - - - - - - - - - - - - -
def _copyProfileToForm(self, prof):
"""Copy relevant fields from Profile to ProfileForm."""
# copy relevant fields from Profile to ProfileForm
pf = ProfileForm()
for field in pf.all_fields():
if hasattr(prof, field.name):
# convert t-shirt string to Enum; just copy others
if field.name == 'teeShirtSize':
setattr(pf, field.name, getattr(TeeShirtSize, getattr(prof, field.name)))
else:
setattr(pf, field.name, getattr(prof, field.name))
pf.check_initialized()
return pf
def _getProfileFromUser(self):
"""Return user Profile from datastore, creating new one if non-existent."""
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
# get Profile from datastore
user_id = getUserId(user)
p_key = ndb.Key(Profile, user_id)
profile = p_key.get()
# create new Profile if not there
if not profile:
profile = Profile(
key = p_key,
displayName = user.nickname(),
mainEmail= user.email(),
teeShirtSize = str(TeeShirtSize.NOT_SPECIFIED),
)
profile.put()
return profile # return Profile
def _doProfile(self, save_request=None):
"""Get user Profile and return to user, possibly updating it first."""
# get user Profile
prof = self._getProfileFromUser()
# if saveProfile(), process user-modifyable fields
if save_request:
for field in ('displayName', 'teeShirtSize'):
if hasattr(save_request, field):
val = getattr(save_request, field)
if val:
setattr(prof, field, str(val))
prof.put()
# return ProfileForm
return self._copyProfileToForm(prof)
@endpoints.method(message_types.VoidMessage, ProfileForm,
path='profile', http_method='GET', name='getProfile')
def getProfile(self, request):
"""Return user profile."""
return self._doProfile()
@endpoints.method(ProfileMiniForm, ProfileForm,
path='profile', http_method='POST', name='saveProfile')
def saveProfile(self, request):
"""Update & return user profile."""
return self._doProfile(request)
# - - - Conference objects - - - - - - - - - - - - - - - - - - -
# TODO
# registers API
api = endpoints.api_server([ConferenceApi])
| gpl-3.0 |
bminchew/PySAR | pysar/insar/sarcorrelation.py | 1 | 4976 | #!/usr/bin/env python
"""
sarcorrelation.py : Calculates interferometric correlation
usage::
$ sarcorrelation.py int_file amp_input [options]
Parameters
----------
int_file : complex interferogram file
amp_input : amplitude file(s); one of:
-a bip_amp (bit-interleaved amplitude file)
-s amp1_file amp2_file
-p power1_file power2_file
Options
-------
-o output_file : name of ouput file [sarcor.out]
-c str_option : output real amplitude (str_option = 'a'), real phase (str_option = 'p'),
in radians or complex (str_option = 'c') correlation ['a']
-n value : data null value (float only) [0]
Notes
-----
* input data is assumed to be single precision
"""
from __future__ import print_function, division
import sys,os
import numpy as np
from pysar.etc.excepts import InputError
np.seterr(divide='ignore')
###==--==--==--==--==--==--==--==--==--==--==--==--==--==--==--==--==--==###
def main(args):
cor = Correlation(args)
cor.read_data()
cor.calculate()
cor.write_data()
###==--==--==--==--==--==--==--==--==--==--==--==--==--==--==--==--==--==###
class Correlation():
def __init__(self,args):
self.intfile = args[0]
self.null = 0.
self.ampow = 'a'
self.ampfile = None
self.amp1file, self.amp2file = None, None
self.outfile = 'sarcor.out'
self.outap = 'a'
for i,a in enumerate(args[1:]):
if a == '-a':
self.ampfile = args[2+i] # 2 because I skip the first argument in args
elif a == '-s':
self.amp1file = args[2+i]
self.amp2file = args[3+i]
elif a == '-p':
self.amp1file = args[2+i]
self.amp2file = args[3+i]
self.ampow = 'p'
elif a == '-o':
self.outfile = args[2+i]
elif a == '-c':
self.outap = args[2+i]
elif a == '-n':
try:
self.null = np.float32(args[2+i])
except:
raise InputError('null value must be float; %s given' % args[2+i])
self._check_args()
###--------------------------------------###
def _check_args(self):
if self.ampfile is None:
if self.amp1file is None or self.amp2file is None:
errstr = 'a single bil amplitude file or two real-valued amplitude or power files '
errstr += 'must be provided'
raise InputError(errstr)
if self.outap != 'a' and self.outap != 'p' and self.outap != 'c':
errstr = "unrecognized option %s for output type; " % self.outap
errstr += "must be 'a' for amplitude, 'p' for phase, or 'c' for complex"
raise InputError(errstr)
###--------------------------------------###
def read_data(self):
print('reading')
fid = open(self.intfile,'r')
self.igram = np.fromfile(fid,dtype=np.complex64)
fid.close()
if self.ampfile is None:
fid = open(self.amp1file,'r')
self.amp1 = np.fromfile(fid,dtype=np.float32)
fid.close()
fid = open(self.amp2file,'r')
self.amp2 = np.fromfile(fid,dtype=np.float32)
fid.close()
else:
fid = open(self.ampfile,'r')
amp = np.fromfile(fid,dtype=np.float32)
fid.close()
self.amp1, self.amp2 = amp[::2], amp[1::2]
###--------------------------------------###
def calculate(self):
print('calculating correlation')
redonull, redozero = False, False
teps = 2.*np.finfo(np.float32).eps
nullmask = np.abs(self.igram - self.null) < teps
nullmask += np.abs(self.amp1 - self.null) < teps
nullmask += np.abs(self.amp2 - self.null) < teps
zeromask = self.amp1 < teps
zeromask += self.amp2 < teps
if len(nullmask[nullmask]) > 1:
redonull = True
self.amp1[nullmask], self.amp2[nullmask] = 1., 1.
if len(zeromask[zeromask]) > 1:
redozero = True
self.amp1[zeromask], self.amp2[zeromask] = 1., 1.
if self.ampow == 'a':
self.cor = self.igram/(self.amp1*self.amp2)
else:
self.cor = self.igram/(np.sqrt(self.amp1*self.amp2))
if self.outap == 'a':
self.cor = np.abs(self.cor)
elif self.outap == 'p':
self.cor = np.arctan2(self.cor.imag,self.cor.real)
if redonull:
self.cor[nullmask] = self.null
if redozero:
self.cor[zeromask] = self.null
###--------------------------------------###
def write_data(self):
print('writing')
fid = open(self.outfile,'w')
self.cor.tofile(fid)
fid.close()
###==--==--==--==--==--==--==--==--==--==--==--==--==--==--==--==--==--==###
if __name__ == '__main__':
args = sys.argv[1:]
if len(args) < 3:
print(__doc__)
sys.exit()
main(args)
| gpl-3.0 |
liuqr/edx-xiaodun | lms/djangoapps/psychometrics/models.py | 38 | 2026 | #
# db model for psychometrics data
#
# this data is collected in real time
#
from django.db import models
from courseware.models import StudentModule
class PsychometricData(models.Model):
"""
This data is a table linking student, module, and module performance,
including number of attempts, grade, max grade, and time of checks.
Links to instances of StudentModule, but only those for capa problems.
Note that StudentModule.module_state_key is nominally a Location instance (url string).
That means it is of the form {tag}://{org}/{course}/{category}/{name}[@{revision}]
and for capa problems, category = "problem".
checktimes is extracted from tracking logs, or added by capa module via psychometrics callback.
"""
studentmodule = models.ForeignKey(StudentModule, db_index=True, unique=True) # contains student, module_state_key, course_id
done = models.BooleanField(default=False)
attempts = models.IntegerField(default=0) # extracted from studentmodule.state
checktimes = models.TextField(null=True, blank=True) # internally stored as list of datetime objects
# keep in mind
# grade = studentmodule.grade
# max_grade = studentmodule.max_grade
# student = studentmodule.student
# course_id = studentmodule.course_id
# location = studentmodule.module_state_key
def __unicode__(self):
sm = self.studentmodule
return "[PsychometricData] %s url=%s, grade=%s, max=%s, attempts=%s, ct=%s" % (sm.student,
sm.module_state_key,
sm.grade,
sm.max_grade,
self.attempts,
self.checktimes)
| agpl-3.0 |
moksha11/xen-hv | dist/install/usr/lib64/python2.6/site-packages/xen/util/utils.py | 43 | 1937 | import traceback
import sys
import os
def exception_string(e):
(ty,v,tb) = sys.exc_info()
return traceback.format_exception_only(ty,v)
def daemonize(prog, args, stdin_tmpfile=None):
"""Runs a program as a daemon with the list of arguments. Returns the PID
of the daemonized program, or returns 0 on error.
"""
r, w = os.pipe()
pid = os.fork()
if pid == 0:
os.close(r)
w = os.fdopen(w, 'w')
os.setsid()
try:
pid2 = os.fork()
except:
pid2 = None
if pid2 == 0:
os.chdir("/")
null_fd = os.open("/dev/null", os.O_RDWR)
if stdin_tmpfile is not None:
os.dup2(stdin_tmpfile.fileno(), 0)
else:
os.dup2(null_fd, 0)
os.dup2(null_fd, 1)
os.dup2(null_fd, 2)
for fd in range(3, 256):
try:
os.close(fd)
except:
pass
os.execvp(prog, args)
os._exit(1)
else:
w.write(str(pid2 or 0))
w.close()
os._exit(0)
os.close(w)
r = os.fdopen(r)
daemon_pid = int(r.read())
r.close()
os.waitpid(pid, 0)
return daemon_pid
# Global variable to store the sysfs mount point
sysfs_mount_point = None
PROC_MOUNTS_PATH = '/proc/mounts'
def find_sysfs_mount():
global sysfs_mount_point
if not sysfs_mount_point is None:
return sysfs_mount_point
try:
mounts_file = open(PROC_MOUNTS_PATH, 'r')
for line in mounts_file:
sline = line.split()
if len(sline) < 3:
continue
if sline[2] == 'sysfs':
sysfs_mount_point= sline[1]
break
mounts_file.close()
return sysfs_mount_point
except IOError, (errno, strerr):
raise
return None
| gpl-2.0 |
andreafrittoli/testtracker | testtracker/ttracker/models.py | 2 | 2984 | # This is an auto-generated Django model module.
# You'll have to do the following manually to clean this up:
# * Rearrange models' order
# * Make sure each model has one field with primary_key=True
# * Remove `managed = False` lines if you wish to allow Django to create, modify, and delete the table
# Feel free to rename the models, but don't rename db_table values or field names.
#
# Also note: You'll have to insert the output of 'django-admin.py sqlcustom [app_label]'
# into your database.
from __future__ import unicode_literals
from django.db import models
class AlembicVersion(models.Model):
version_num = models.CharField(max_length=32)
class Meta:
managed = False
db_table = 'alembic_version'
class RunMetadata(models.Model):
id = models.CharField(primary_key=True, max_length=36)
key = models.CharField(max_length=255, blank=True)
value = models.CharField(max_length=255, blank=True)
run = models.ForeignKey('Runs')
class Meta:
managed = False
db_table = 'run_metadata'
class Runs(models.Model):
id = models.CharField(primary_key=True, max_length=36)
skips = models.IntegerField(blank=True, null=True)
fails = models.IntegerField(blank=True, null=True)
passes = models.IntegerField(blank=True, null=True)
run_time = models.FloatField(blank=True, null=True)
artifacts = models.TextField(blank=True)
class Meta:
managed = False
db_table = 'runs'
class TestMetadata(models.Model):
id = models.CharField(primary_key=True, max_length=36)
key = models.CharField(max_length=255, blank=True)
value = models.CharField(max_length=255, blank=True)
test_run = models.ForeignKey('Tests')
class Meta:
managed = False
db_table = 'test_metadata'
class TestRunMetadata(models.Model):
id = models.CharField(primary_key=True, max_length=36)
key = models.CharField(max_length=255, blank=True)
value = models.CharField(max_length=255, blank=True)
test_run = models.ForeignKey('TestRuns')
class Meta:
managed = False
db_table = 'test_run_metadata'
class TestRuns(models.Model):
id = models.CharField(primary_key=True, max_length=36)
test = models.ForeignKey('Tests')
run = models.ForeignKey(Runs)
status = models.CharField(max_length=256, blank=True)
start_time = models.DateTimeField(blank=True, null=True)
stop_time = models.DateTimeField(blank=True, null=True)
class Meta:
managed = False
db_table = 'test_runs'
class Tests(models.Model):
id = models.CharField(primary_key=True, max_length=36)
test_id = models.CharField(max_length=256)
run_count = models.IntegerField(blank=True, null=True)
success = models.IntegerField(blank=True, null=True)
failure = models.IntegerField(blank=True, null=True)
run_time = models.FloatField(blank=True, null=True)
class Meta:
managed = False
db_table = 'tests'
| gpl-2.0 |
shdxiang/yunba-smartoffice | tests/get_status.py | 2 | 1501 | #!/usr/bin/env python
import time
import sys
import logging
import argparse
from socketIO_client import SocketIO
from messenger import Messenger
logger = logging.getLogger('get_status')
logging.basicConfig(level=logging.DEBUG)
APPKEY = '5697113d4407a3cd028abead'
#TOPIC = 'yunba_smart_plug'
#ALIAS = 'plc_0'
class Status(Messenger):
def __init__(self, topic, alias, cmd):
self.__logger = logging.getLogger('get_status.Status')
self.__logger.info('init')
Messenger.__init__(self, APPKEY, 'status', 'status')
self.topic = topic
self.alias = alias
self.cmd = cmd
def __del__(self):
self.__logger.info('del')
def on_connack(self, args):
self.__logger.debug('on_connack: %s', args)
self.socketIO.emit('subscribe', {'topic': self.topic})
self.socketIO.emit('set_alias', {'alias': 'status'})
def on_set_alias(self, args):
self.__logger.debug('on_set_alias: %s', args)
self.publish_to_alias(self.alias, '{"cmd": "'+ self.cmd + '", "devid": "' + self.alias + '"}')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Status')
parser.add_argument('topic', type=str, help='topic to subscribe')
parser.add_argument('alias', type=str, help='publish to this alias')
parser.add_argument('cmd', type=str, help='cmd')
args = parser.parse_args()
s = Status(args.topic, args.alias, args.cmd)
while True:
s.loop()
time.sleep(0.1)
| mit |
calfonso/ansible | lib/ansible/modules/web_infrastructure/django_manage.py | 22 | 11134 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Scott Anderson <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: django_manage
short_description: Manages a Django application.
description:
- Manages a Django application using the I(manage.py) application frontend to I(django-admin). With the I(virtualenv) parameter, all
management commands will be executed by the given I(virtualenv) installation.
version_added: "1.1"
options:
command:
choices: [ 'cleanup', 'collectstatic', 'flush', 'loaddata', 'migrate', 'runfcgi', 'syncdb', 'test', 'validate', ]
description:
- The name of the Django management command to run. Built in commands are cleanup, collectstatic, flush, loaddata, migrate, runfcgi, syncdb,
test, and validate.
- Other commands can be entered, but will fail if they're unknown to Django. Other commands that may prompt for user input should be run
with the I(--noinput) flag.
required: true
app_path:
description:
- The path to the root of the Django application where B(manage.py) lives.
required: true
settings:
description:
- The Python path to the application's settings module, such as 'myapp.settings'.
required: false
pythonpath:
description:
- A directory to add to the Python path. Typically used to include the settings module if it is located external to the application directory.
required: false
virtualenv:
description:
- An optional path to a I(virtualenv) installation to use while running the manage application.
aliases: [virtualenv]
apps:
description:
- A list of space-delimited apps to target. Used by the 'test' command.
required: false
cache_table:
description:
- The name of the table used for database-backed caching. Used by the 'createcachetable' command.
required: false
database:
description:
- The database to target. Used by the 'createcachetable', 'flush', 'loaddata', and 'syncdb' commands.
required: false
failfast:
description:
- Fail the command immediately if a test fails. Used by the 'test' command.
required: false
default: "no"
type: bool
fixtures:
description:
- A space-delimited list of fixture file names to load in the database. B(Required) by the 'loaddata' command.
required: false
skip:
description:
- Will skip over out-of-order missing migrations, you can only use this parameter with I(migrate)
required: false
version_added: "1.3"
merge:
description:
- Will run out-of-order or missing migrations as they are not rollback migrations, you can only use this parameter with 'migrate' command
required: false
version_added: "1.3"
link:
description:
- Will create links to the files instead of copying them, you can only use this parameter with 'collectstatic' command
required: false
version_added: "1.3"
notes:
- I(virtualenv) (U(http://www.virtualenv.org)) must be installed on the remote host if the virtualenv parameter is specified.
- This module will create a virtualenv if the virtualenv parameter is specified and a virtualenv does not already exist at the given location.
- This module assumes English error messages for the 'createcachetable' command to detect table existence, unfortunately.
- To be able to use the migrate command with django versions < 1.7, you must have south installed and added as an app in your settings.
- To be able to use the collectstatic command, you must have enabled staticfiles in your settings.
- As of ansible 2.x, your I(manage.py) application must be executable (rwxr-xr-x), and must have a valid I(shebang), i.e. "#!/usr/bin/env python",
for invoking the appropriate Python interpreter.
requirements: [ "virtualenv", "django" ]
author: "Scott Anderson (@tastychutney)"
'''
EXAMPLES = """
# Run cleanup on the application installed in 'django_dir'.
- django_manage:
command: cleanup
app_path: "{{ django_dir }}"
# Load the initial_data fixture into the application
- django_manage:
command: loaddata
app_path: "{{ django_dir }}"
fixtures: "{{ initial_data }}"
# Run syncdb on the application
- django_manage:
command: syncdb
app_path: "{{ django_dir }}"
settings: "{{ settings_app_name }}"
pythonpath: "{{ settings_dir }}"
virtualenv: "{{ virtualenv_dir }}"
# Run the SmokeTest test case from the main app. Useful for testing deploys.
- django_manage:
command: test
app_path: "{{ django_dir }}"
apps: main.SmokeTest
# Create an initial superuser.
- django_manage:
command: "createsuperuser --noinput --username=admin [email protected]"
app_path: "{{ django_dir }}"
"""
import os
import sys
from ansible.module_utils.basic import AnsibleModule
def _fail(module, cmd, out, err, **kwargs):
msg = ''
if out:
msg += "stdout: %s" % (out, )
if err:
msg += "\n:stderr: %s" % (err, )
module.fail_json(cmd=cmd, msg=msg, **kwargs)
def _ensure_virtualenv(module):
venv_param = module.params['virtualenv']
if venv_param is None:
return
vbin = os.path.join(venv_param, 'bin')
activate = os.path.join(vbin, 'activate')
if not os.path.exists(activate):
virtualenv = module.get_bin_path('virtualenv', True)
vcmd = '%s %s' % (virtualenv, venv_param)
vcmd = [virtualenv, venv_param]
rc, out_venv, err_venv = module.run_command(vcmd)
if rc != 0:
_fail(module, vcmd, out_venv, err_venv)
os.environ["PATH"] = "%s:%s" % (vbin, os.environ["PATH"])
os.environ["VIRTUAL_ENV"] = venv_param
def createcachetable_filter_output(line):
return "Already exists" not in line
def flush_filter_output(line):
return "Installed" in line and "Installed 0 object" not in line
def loaddata_filter_output(line):
return "Installed" in line and "Installed 0 object" not in line
def syncdb_filter_output(line):
return ("Creating table " in line) or ("Installed" in line and "Installed 0 object" not in line)
def migrate_filter_output(line):
return ("Migrating forwards " in line) or ("Installed" in line and "Installed 0 object" not in line) or ("Applying" in line)
def collectstatic_filter_output(line):
return line and "0 static files" not in line
def main():
command_allowed_param_map = dict(
cleanup=(),
createcachetable=('cache_table', 'database', ),
flush=('database', ),
loaddata=('database', 'fixtures', ),
syncdb=('database', ),
test=('failfast', 'testrunner', 'liveserver', 'apps', ),
validate=(),
migrate=('apps', 'skip', 'merge', 'database',),
collectstatic=('clear', 'link', ),
)
command_required_param_map = dict(
loaddata=('fixtures', ),
)
# forces --noinput on every command that needs it
noinput_commands = (
'flush',
'syncdb',
'migrate',
'test',
'collectstatic',
)
# These params are allowed for certain commands only
specific_params = ('apps', 'clear', 'database', 'failfast', 'fixtures', 'liveserver', 'testrunner')
# These params are automatically added to the command if present
general_params = ('settings', 'pythonpath', 'database',)
specific_boolean_params = ('clear', 'failfast', 'skip', 'merge', 'link')
end_of_command_params = ('apps', 'cache_table', 'fixtures')
module = AnsibleModule(
argument_spec=dict(
command=dict(default=None, required=True),
app_path=dict(default=None, required=True, type='path'),
settings=dict(default=None, required=False),
pythonpath=dict(default=None, required=False, aliases=['python_path']),
virtualenv=dict(default=None, required=False, type='path', aliases=['virtual_env']),
apps=dict(default=None, required=False),
cache_table=dict(default=None, required=False),
clear=dict(default=None, required=False, type='bool'),
database=dict(default=None, required=False),
failfast=dict(default='no', required=False, type='bool', aliases=['fail_fast']),
fixtures=dict(default=None, required=False),
liveserver=dict(default=None, required=False, aliases=['live_server']),
testrunner=dict(default=None, required=False, aliases=['test_runner']),
skip=dict(default=None, required=False, type='bool'),
merge=dict(default=None, required=False, type='bool'),
link=dict(default=None, required=False, type='bool'),
),
)
command = module.params['command']
app_path = module.params['app_path']
virtualenv = module.params['virtualenv']
for param in specific_params:
value = module.params[param]
if param in specific_boolean_params:
value = module.boolean(value)
if value and param not in command_allowed_param_map[command]:
module.fail_json(msg='%s param is incompatible with command=%s' % (param, command))
for param in command_required_param_map.get(command, ()):
if not module.params[param]:
module.fail_json(msg='%s param is required for command=%s' % (param, command))
_ensure_virtualenv(module)
cmd = "./manage.py %s" % (command, )
if command in noinput_commands:
cmd = '%s --noinput' % cmd
for param in general_params:
if module.params[param]:
cmd = '%s --%s=%s' % (cmd, param, module.params[param])
for param in specific_boolean_params:
if module.boolean(module.params[param]):
cmd = '%s --%s' % (cmd, param)
# these params always get tacked on the end of the command
for param in end_of_command_params:
if module.params[param]:
cmd = '%s %s' % (cmd, module.params[param])
rc, out, err = module.run_command(cmd, cwd=app_path)
if rc != 0:
if command == 'createcachetable' and 'table' in err and 'already exists' in err:
out = 'Already exists.'
else:
if "Unknown command:" in err:
_fail(module, cmd, err, "Unknown django command: %s" % command)
_fail(module, cmd, out, err, path=os.environ["PATH"], syspath=sys.path)
changed = False
lines = out.split('\n')
filt = globals().get(command + "_filter_output", None)
if filt:
filtered_output = list(filter(filt, lines))
if len(filtered_output):
changed = filtered_output
module.exit_json(changed=changed, out=out, cmd=cmd, app_path=app_path, virtualenv=virtualenv,
settings=module.params['settings'], pythonpath=module.params['pythonpath'])
if __name__ == '__main__':
main()
| gpl-3.0 |
ganeshmurthy/qpid-dispatch | python/qpid_dispatch_internal/tools/__init__.py | 7 | 1221 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from .display import Display, Header, Sorter, YN, Commas, TimeLong, TimeShort, Sortable, BodyFormat, PlainNum
from .display import NumKMG
__all__ = ["Display", "Header", "Sorter", "YN", "Commas", "TimeLong", "TimeShort", "Sortable", "BodyFormat", "PlainNum",
"NumKMG"]
| apache-2.0 |
dlab-berkeley/collaboratool-archive | bsd2/vagrant-ansible/ansible/lib/ansible/runner/action_plugins/debug.py | 2 | 1760 | # Copyright 2012, Dag Wieers <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import ansible
from ansible import utils
from ansible.runner.return_data import ReturnData
class ActionModule(object):
''' Print statements during execution '''
NEEDS_TMPPATH = False
def __init__(self, runner):
self.runner = runner
def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, **kwargs):
args = {}
if complex_args:
args.update(complex_args)
# attempt to prevent confusing messages when the variable didn't interpolate
module_args = module_args.replace("{{ ","{{").replace(" }}","}}")
kv = utils.parse_kv(module_args)
args.update(kv)
if not 'msg' in args:
args['msg'] = 'Hello world!'
if 'fail' in args and utils.boolean(args['fail']):
result = dict(failed=True, msg=args['msg'])
else:
result = dict(msg=args['msg'])
# force flag to make debug output module always verbose
result['verbose_always'] = True
return ReturnData(conn=conn, result=result)
| apache-2.0 |
embeddedarm/android_external_chromium_org | third_party/android_platform/development/scripts/stack_core.py | 50 | 9531 | #!/usr/bin/env python
#
# Copyright (C) 2013 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""stack symbolizes native crash dumps."""
import re
import symbol
def PrintTraceLines(trace_lines):
"""Print back trace."""
maxlen = max(map(lambda tl: len(tl[1]), trace_lines))
print
print "Stack Trace:"
print " RELADDR " + "FUNCTION".ljust(maxlen) + " FILE:LINE"
for tl in trace_lines:
(addr, symbol_with_offset, location) = tl
print " %8s %s %s" % (addr, symbol_with_offset.ljust(maxlen), location)
return
def PrintValueLines(value_lines):
"""Print stack data values."""
maxlen = max(map(lambda tl: len(tl[2]), value_lines))
print
print "Stack Data:"
print " ADDR VALUE " + "FUNCTION".ljust(maxlen) + " FILE:LINE"
for vl in value_lines:
(addr, value, symbol_with_offset, location) = vl
print " %8s %8s %s %s" % (addr, value, symbol_with_offset.ljust(maxlen), location)
return
UNKNOWN = "<unknown>"
HEAP = "[heap]"
STACK = "[stack]"
def PrintOutput(trace_lines, value_lines, more_info):
if trace_lines:
PrintTraceLines(trace_lines)
if value_lines:
# TODO(cjhopman): it seems that symbol.SymbolInformation always fails to
# find information for addresses in value_lines in chrome libraries, and so
# value_lines have little value to us and merely clutter the output.
# Since information is sometimes contained in these lines (from system
# libraries), don't completely disable them.
if more_info:
PrintValueLines(value_lines)
def PrintDivider():
print
print "-----------------------------------------------------\n"
def ConvertTrace(lines, more_info):
"""Convert strings containing native crash to a stack."""
process_info_line = re.compile("(pid: [0-9]+, tid: [0-9]+.*)")
signal_line = re.compile("(signal [0-9]+ \(.*\).*)")
register_line = re.compile("(([ ]*[0-9a-z]{2} [0-9a-f]{8}){4})")
thread_line = re.compile("(.*)(\-\-\- ){15}\-\-\-")
dalvik_jni_thread_line = re.compile("(\".*\" prio=[0-9]+ tid=[0-9]+ NATIVE.*)")
dalvik_native_thread_line = re.compile("(\".*\" sysTid=[0-9]+ nice=[0-9]+.*)")
# Note that both trace and value line matching allow for variable amounts of
# whitespace (e.g. \t). This is because the we want to allow for the stack
# tool to operate on AndroidFeedback provided system logs. AndroidFeedback
# strips out double spaces that are found in tombsone files and logcat output.
#
# Examples of matched trace lines include lines from tombstone files like:
# #00 pc 001cf42e /data/data/com.my.project/lib/libmyproject.so
# #00 pc 001cf42e /data/data/com.my.project/lib/libmyproject.so (symbol)
# Or lines from AndroidFeedback crash report system logs like:
# 03-25 00:51:05.520 I/DEBUG ( 65): #00 pc 001cf42e /data/data/com.my.project/lib/libmyproject.so
# Please note the spacing differences.
trace_line = re.compile("(.*)\#(?P<frame>[0-9]+)[ \t]+(..)[ \t]+(0x)?(?P<address>[0-9a-f]{0,8})[ \t]+(?P<lib>[^\r\n \t]*)(?P<symbol_present> \((?P<symbol_name>.*)\))?") # pylint: disable-msg=C6310
# Examples of matched value lines include:
# bea4170c 8018e4e9 /data/data/com.my.project/lib/libmyproject.so
# bea4170c 8018e4e9 /data/data/com.my.project/lib/libmyproject.so (symbol)
# 03-25 00:51:05.530 I/DEBUG ( 65): bea4170c 8018e4e9 /data/data/com.my.project/lib/libmyproject.so
# Again, note the spacing differences.
value_line = re.compile("(.*)([0-9a-f]{8})[ \t]+([0-9a-f]{8})[ \t]+([^\r\n \t]*)( \((.*)\))?")
# Lines from 'code around' sections of the output will be matched before
# value lines because otheriwse the 'code around' sections will be confused as
# value lines.
#
# Examples include:
# 801cf40c ffffc4cc 00b2f2c5 00b2f1c7 00c1e1a8
# 03-25 00:51:05.530 I/DEBUG ( 65): 801cf40c ffffc4cc 00b2f2c5 00b2f1c7 00c1e1a8
code_line = re.compile("(.*)[ \t]*[a-f0-9]{8}[ \t]*[a-f0-9]{8}[ \t]*[a-f0-9]{8}[ \t]*[a-f0-9]{8}[ \t]*[a-f0-9]{8}[ \t]*[ \r\n]") # pylint: disable-msg=C6310
trace_lines = []
value_lines = []
last_frame = -1
# It is faster to get symbol information with a single call rather than with
# separate calls for each line. Since symbol.SymbolInformation caches results,
# we can extract all the addresses that we will want symbol information for
# from the log and call symbol.SymbolInformation so that the results are
# cached in the following lookups.
code_addresses = {}
for ln in lines:
line = unicode(ln, errors='ignore')
lib, address = None, None
match = trace_line.match(line)
if match:
address, lib = match.group('address', 'lib')
match = value_line.match(line)
if match and not code_line.match(line):
(_0, _1, address, lib, _2, _3) = match.groups()
if lib:
code_addresses.setdefault(lib, set()).add(address)
for lib in code_addresses:
symbol.SymbolInformationForSet(
symbol.TranslateLibPath(lib), code_addresses[lib], more_info)
for ln in lines:
# AndroidFeedback adds zero width spaces into its crash reports. These
# should be removed or the regular expresssions will fail to match.
line = unicode(ln, errors='ignore')
process_header = process_info_line.search(line)
signal_header = signal_line.search(line)
register_header = register_line.search(line)
thread_header = thread_line.search(line)
dalvik_jni_thread_header = dalvik_jni_thread_line.search(line)
dalvik_native_thread_header = dalvik_native_thread_line.search(line)
if process_header or signal_header or register_header or thread_header \
or dalvik_jni_thread_header or dalvik_native_thread_header:
if trace_lines or value_lines:
PrintOutput(trace_lines, value_lines, more_info)
PrintDivider()
trace_lines = []
value_lines = []
last_frame = -1
if process_header:
print process_header.group(1)
if signal_header:
print signal_header.group(1)
if register_header:
print register_header.group(1)
if thread_header:
print thread_header.group(1)
if dalvik_jni_thread_header:
print dalvik_jni_thread_header.group(1)
if dalvik_native_thread_header:
print dalvik_native_thread_header.group(1)
continue
if trace_line.match(line):
match = trace_line.match(line)
frame, code_addr, area, symbol_present, symbol_name = match.group(
'frame', 'address', 'lib', 'symbol_present', 'symbol_name')
if frame <= last_frame and (trace_lines or value_lines):
PrintOutput(trace_lines, value_lines, more_info)
PrintDivider()
trace_lines = []
value_lines = []
last_frame = frame
if area == UNKNOWN or area == HEAP or area == STACK:
trace_lines.append((code_addr, "", area))
else:
# If a calls b which further calls c and c is inlined to b, we want to
# display "a -> b -> c" in the stack trace instead of just "a -> c"
info = symbol.SymbolInformation(area, code_addr, more_info)
nest_count = len(info) - 1
for (source_symbol, source_location, object_symbol_with_offset) in info:
if not source_symbol:
if symbol_present:
source_symbol = symbol.CallCppFilt(symbol_name)
else:
source_symbol = UNKNOWN
if not source_location:
source_location = area
if nest_count > 0:
nest_count = nest_count - 1
trace_lines.append(("v------>", source_symbol, source_location))
else:
if not object_symbol_with_offset:
object_symbol_with_offset = source_symbol
trace_lines.append((code_addr,
object_symbol_with_offset,
source_location))
if code_line.match(line):
# Code lines should be ignored. If this were exluded the 'code around'
# sections would trigger value_line matches.
continue;
if value_line.match(line):
match = value_line.match(line)
(unused_, addr, value, area, symbol_present, symbol_name) = match.groups()
if area == UNKNOWN or area == HEAP or area == STACK or not area:
value_lines.append((addr, value, "", area))
else:
info = symbol.SymbolInformation(area, value, more_info)
(source_symbol, source_location, object_symbol_with_offset) = info.pop()
if not source_symbol:
if symbol_present:
source_symbol = symbol.CallCppFilt(symbol_name)
else:
source_symbol = UNKNOWN
if not source_location:
source_location = area
if not object_symbol_with_offset:
object_symbol_with_offset = source_symbol
value_lines.append((addr,
value,
object_symbol_with_offset,
source_location))
PrintOutput(trace_lines, value_lines, more_info)
| bsd-3-clause |
hfp/tensorflow-xsmm | tensorflow/python/ops/functional_ops.py | 3 | 44679 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Functional operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.framework import attr_value_pb2
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_functional_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variable_scope as vs
# pylint: disable=unused-import
from tensorflow.python.ops.gen_functional_ops import remote_call
# pylint: enable=unused-import
from tensorflow.python.ops.gen_functional_ops import symbolic_gradient
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import compat
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import tf_export
# TODO(yuanbyu, mrry): Handle stride to support sliding windows.
@tf_export("foldl")
def foldl(fn, elems, initializer=None, parallel_iterations=10, back_prop=True,
swap_memory=False, name=None):
"""foldl on the list of tensors unpacked from `elems` on dimension 0.
This foldl operator repeatedly applies the callable `fn` to a sequence
of elements from first to last. The elements are made of the tensors
unpacked from `elems` on dimension 0. The callable fn takes two tensors as
arguments. The first argument is the accumulated value computed from the
preceding invocation of fn. If `initializer` is None, `elems` must contain
at least one element, and its first element is used as the initializer.
Suppose that `elems` is unpacked into `values`, a list of tensors. The shape
of the result tensor is fn(initializer, values[0]).shape`.
This method also allows multi-arity `elems` and output of `fn`. If `elems`
is a (possibly nested) list or tuple of tensors, then each of these tensors
must have a matching first (unpack) dimension. The signature of `fn` may
match the structure of `elems`. That is, if `elems` is
`(t1, [t2, t3, [t4, t5]])`, then an appropriate signature for `fn` is:
`fn = lambda (t1, [t2, t3, [t4, t5]]):`.
Args:
fn: The callable to be performed.
elems: A tensor or (possibly nested) sequence of tensors, each of which
will be unpacked along their first dimension. The nested sequence
of the resulting slices will be the first argument to `fn`.
initializer: (optional) A tensor or (possibly nested) sequence of tensors,
as the initial value for the accumulator.
parallel_iterations: (optional) The number of iterations allowed to run
in parallel.
back_prop: (optional) True enables support for back propagation.
swap_memory: (optional) True enables GPU-CPU memory swapping.
name: (optional) Name prefix for the returned tensors.
Returns:
A tensor or (possibly nested) sequence of tensors, resulting from applying
`fn` consecutively to the list of tensors unpacked from `elems`, from first
to last.
Raises:
TypeError: if `fn` is not callable.
Example:
```python
elems = tf.constant([1, 2, 3, 4, 5, 6])
sum = foldl(lambda a, x: a + x, elems)
# sum == 21
```
"""
if not callable(fn):
raise TypeError("fn must be callable.")
def create_ta(elem):
return tensor_array_ops.TensorArray(
dtype=elem.dtype, size=n, dynamic_size=False,
infer_shape=True).unstack(elem)
in_graph_mode = not context.executing_eagerly()
with ops.name_scope(name, "foldl", [elems]):
# TODO(akshayka): Remove the in_graph_mode check once caching devices are
# supported in Eager
if in_graph_mode:
# Any get_variable calls in fn will cache the first call locally
# and not issue repeated network I/O requests for each iteration.
varscope = vs.get_variable_scope()
varscope_caching_device_was_none = False
if varscope.caching_device is None:
# TODO(ebrevdo): Change to using colocate_with here and in other
# methods.
varscope.set_caching_device(lambda op: op.device)
varscope_caching_device_was_none = True
# Convert elems to tensor array. n may be known statically.
elems_flat = [
ops.convert_to_tensor(elem, name="elem") for elem in nest.flatten(elems)
]
n = (tensor_shape.dimension_value(elems_flat[0].shape[0])
or array_ops.shape(elems_flat[0])[0])
elems_ta = nest.map_structure(create_ta, elems)
if initializer is None:
a = nest.map_structure(lambda elem: elem.read(0), elems_ta)
i = constant_op.constant(1)
else:
a = initializer
i = constant_op.constant(0)
def compute(i, a):
elem_i = nest.map_structure(lambda elem: elem.read(i), elems_ta)
a = fn(a, elem_i)
return [i + 1, a]
_, r_a = control_flow_ops.while_loop(
lambda i, a: i < n, compute, [i, a],
parallel_iterations=parallel_iterations,
back_prop=back_prop,
swap_memory=swap_memory,
maximum_iterations=n)
# TODO(akshayka): Remove the in_graph_mode check once caching devices are
# supported in Eager
if in_graph_mode and varscope_caching_device_was_none:
varscope.set_caching_device(None)
return r_a
@tf_export("foldr")
def foldr(fn, elems, initializer=None, parallel_iterations=10, back_prop=True,
swap_memory=False, name=None):
"""foldr on the list of tensors unpacked from `elems` on dimension 0.
This foldr operator repeatedly applies the callable `fn` to a sequence
of elements from last to first. The elements are made of the tensors
unpacked from `elems`. The callable fn takes two tensors as arguments.
The first argument is the accumulated value computed from the preceding
invocation of fn. If `initializer` is None, `elems` must contain at least
one element, and its first element is used as the initializer.
Suppose that `elems` is unpacked into `values`, a list of tensors. The shape
of the result tensor is `fn(initializer, values[0]).shape`.
This method also allows multi-arity `elems` and output of `fn`. If `elems`
is a (possibly nested) list or tuple of tensors, then each of these tensors
must have a matching first (unpack) dimension. The signature of `fn` may
match the structure of `elems`. That is, if `elems` is
`(t1, [t2, t3, [t4, t5]])`, then an appropriate signature for `fn` is:
`fn = lambda (t1, [t2, t3, [t4, t5]]):`.
Args:
fn: The callable to be performed.
elems: A tensor or (possibly nested) sequence of tensors, each of which
will be unpacked along their first dimension. The nested sequence
of the resulting slices will be the first argument to `fn`.
initializer: (optional) A tensor or (possibly nested) sequence of tensors,
as the initial value for the accumulator.
parallel_iterations: (optional) The number of iterations allowed to run
in parallel.
back_prop: (optional) True enables support for back propagation.
swap_memory: (optional) True enables GPU-CPU memory swapping.
name: (optional) Name prefix for the returned tensors.
Returns:
A tensor or (possibly nested) sequence of tensors, resulting from applying
`fn` consecutively to the list of tensors unpacked from `elems`, from last
to first.
Raises:
TypeError: if `fn` is not callable.
Example:
```python
elems = [1, 2, 3, 4, 5, 6]
sum = foldr(lambda a, x: a + x, elems)
# sum == 21
```
"""
if not callable(fn):
raise TypeError("fn must be callable.")
def create_ta(elem):
return tensor_array_ops.TensorArray(
dtype=elem.dtype, size=n, dynamic_size=False,
infer_shape=True).unstack(elem)
in_graph_mode = not context.executing_eagerly()
with ops.name_scope(name, "foldr", [elems]):
# TODO(akshayka): Remove the in_graph_mode check once caching devices are
# supported in Eager
if in_graph_mode:
# Any get_variable calls in fn will cache the first call locally and not
# issue repeated network I/O requests for each iteration.
varscope = vs.get_variable_scope()
varscope_caching_device_was_none = False
if varscope.caching_device is None:
# TODO(ebrevdo): Change to using colocate_with here and in other
# methods.
varscope.set_caching_device(lambda op: op.device)
varscope_caching_device_was_none = True
# Convert elems to tensor array. n may be known statically.
elems_flat = [
ops.convert_to_tensor(elem, name="elem") for elem in nest.flatten(elems)
]
n = (tensor_shape.dimension_value(elems_flat[0].shape[0])
or array_ops.shape(elems_flat[0])[0])
elems_ta = nest.map_structure(create_ta, elems)
if initializer is None:
i = n - 1
a = nest.map_structure(lambda elem: elem.read(i), elems_ta)
else:
i = n
a = initializer
def compute(i, a):
i -= 1
elem = nest.map_structure(lambda elem: elem.read(i), elems_ta)
a_out = fn(a, elem)
return [i, a_out]
_, r_a = control_flow_ops.while_loop(
lambda i, a: i > 0,
compute, [i, a],
parallel_iterations=parallel_iterations,
back_prop=back_prop,
swap_memory=swap_memory,
maximum_iterations=n)
# TODO(akshayka): Remove the in_graph_mode check once caching devices are
# supported in Eager
if in_graph_mode and varscope_caching_device_was_none:
varscope.set_caching_device(None)
return r_a
@tf_export("map_fn")
def map_fn(fn, elems, dtype=None, parallel_iterations=None, back_prop=True,
swap_memory=False, infer_shape=True, name=None):
"""map on the list of tensors unpacked from `elems` on dimension 0.
The simplest version of `map_fn` repeatedly applies the callable `fn` to a
sequence of elements from first to last. The elements are made of the
tensors unpacked from `elems`. `dtype` is the data type of the return
value of `fn`. Users must provide `dtype` if it is different from
the data type of `elems`.
Suppose that `elems` is unpacked into `values`, a list of tensors. The shape
of the result tensor is `[values.shape[0]] + fn(values[0]).shape`.
This method also allows multi-arity `elems` and output of `fn`. If `elems`
is a (possibly nested) list or tuple of tensors, then each of these tensors
must have a matching first (unpack) dimension. The signature of `fn` may
match the structure of `elems`. That is, if `elems` is
`(t1, [t2, t3, [t4, t5]])`, then an appropriate signature for `fn` is:
`fn = lambda (t1, [t2, t3, [t4, t5]]):`.
Furthermore, `fn` may emit a different structure than its input. For example,
`fn` may look like: `fn = lambda t1: return (t1 + 1, t1 - 1)`. In this case,
the `dtype` parameter is not optional: `dtype` must be a type or (possibly
nested) tuple of types matching the output of `fn`.
To apply a functional operation to the nonzero elements of a SparseTensor
one of the following methods is recommended. First, if the function is
expressible as TensorFlow ops, use
```python
result = SparseTensor(input.indices, fn(input.values), input.dense_shape)
```
If, however, the function is not expressible as a TensorFlow op, then use
```python
result = SparseTensor(
input.indices, map_fn(fn, input.values), input.dense_shape)
```
instead.
When executing eagerly, map_fn does not execute in parallel even if
`parallel_iterations` is set to a value > 1. You can still get the
performance benefits of running a function in parallel by using the
`tf.contrib.eager.defun` decorator,
```python
# Assume the function being used in map_fn is fn.
# To ensure map_fn calls fn in parallel, use the defun decorator.
@tf.contrib.eager.defun
def func(tensor):
return tf.map_fn(fn, tensor)
```
Note that if you use the defun decorator, any non-TensorFlow Python code
that you may have written in your function won't get executed. See
`tf.contrib.eager.defun` for more details. The recommendation would be to
debug without defun but switch to defun to get performance benefits of
running map_fn in parallel.
Args:
fn: The callable to be performed. It accepts one argument, which will
have the same (possibly nested) structure as `elems`. Its output
must have the same structure as `dtype` if one is provided, otherwise
it must have the same structure as `elems`.
elems: A tensor or (possibly nested) sequence of tensors, each of which
will be unpacked along their first dimension. The nested sequence
of the resulting slices will be applied to `fn`.
dtype: (optional) The output type(s) of `fn`. If `fn` returns a structure
of Tensors differing from the structure of `elems`, then `dtype` is not
optional and must have the same structure as the output of `fn`.
parallel_iterations: (optional) The number of iterations allowed to run
in parallel. When graph building, the default value is 10. While executing
eagerly, the default value is set to 1.
back_prop: (optional) True enables support for back propagation.
swap_memory: (optional) True enables GPU-CPU memory swapping.
infer_shape: (optional) False disables tests for consistent output shapes.
name: (optional) Name prefix for the returned tensors.
Returns:
A tensor or (possibly nested) sequence of tensors. Each tensor packs the
results of applying `fn` to tensors unpacked from `elems` along the first
dimension, from first to last.
Raises:
TypeError: if `fn` is not callable or the structure of the output of
`fn` and `dtype` do not match, or if elems is a SparseTensor.
ValueError: if the lengths of the output of `fn` and `dtype` do not match.
Examples:
```python
elems = np.array([1, 2, 3, 4, 5, 6])
squares = map_fn(lambda x: x * x, elems)
# squares == [1, 4, 9, 16, 25, 36]
```
```python
elems = (np.array([1, 2, 3]), np.array([-1, 1, -1]))
alternate = map_fn(lambda x: x[0] * x[1], elems, dtype=tf.int64)
# alternate == [-1, 2, -3]
```
```python
elems = np.array([1, 2, 3])
alternates = map_fn(lambda x: (x, -x), elems, dtype=(tf.int64, tf.int64))
# alternates[0] == [1, 2, 3]
# alternates[1] == [-1, -2, -3]
```
"""
if not callable(fn):
raise TypeError("fn must be callable.")
if isinstance(elems, sparse_tensor.SparseTensor):
raise TypeError(
"To perform a map on the values of a sparse tensor use either "
" SparseTensor(input.indices, fn(input.values), input.dense_shape) or "
" SparseTensor(input.indices, map_fn(fn, input.values), "
"input.dense_shape)")
in_graph_mode = not context.executing_eagerly()
# Set the default number of parallel_iterations depending on graph/eager mode.
if in_graph_mode and not parallel_iterations:
parallel_iterations = 10
elif not in_graph_mode and not parallel_iterations:
parallel_iterations = 1
if not in_graph_mode and parallel_iterations > 1:
logging.log_first_n(logging.WARN, "Setting parallel_iterations > 1 has no "
"effect when executing eagerly. Consider calling map_fn"
" with tf.contrib.eager.defun to execute fn in "
"parallel.", 1)
parallel_iterations = 1
input_is_sequence = nest.is_sequence(elems)
input_flatten = lambda x: nest.flatten(x) if input_is_sequence else [x]
def input_pack(x):
return nest.pack_sequence_as(elems, x) if input_is_sequence else x[0]
if dtype is None:
output_is_sequence = input_is_sequence
output_flatten = input_flatten
output_pack = input_pack
else:
output_is_sequence = nest.is_sequence(dtype)
output_flatten = lambda x: nest.flatten(x) if output_is_sequence else [x]
def output_pack(x):
return (nest.pack_sequence_as(dtype, x)
if output_is_sequence else x[0])
elems_flat = input_flatten(elems)
with ops.name_scope(name, "map", elems_flat):
# TODO(akshayka): Remove the in_graph_mode check once caching devices are
# supported in Eager
if in_graph_mode:
# Any get_variable calls in fn will cache the first call locally
# and not issue repeated network I/O requests for each iteration.
varscope = vs.get_variable_scope()
varscope_caching_device_was_none = False
if varscope.caching_device is None:
# TODO(ebrevdo): Change to using colocate_with here and in other
# methods.
varscope.set_caching_device(lambda op: op.device)
varscope_caching_device_was_none = True
elems_flat = [
ops.convert_to_tensor(elem, name="elem") for elem in elems_flat]
dtype = dtype or input_pack([elem.dtype for elem in elems_flat])
dtype_flat = output_flatten(dtype)
# Convert elems to tensor array. n may be known statically.
static_shape = elems_flat[0].shape
if static_shape.ndims is not None and static_shape.ndims < 1:
if len(elems_flat) == 1:
raise ValueError("elems must be a 1+ dimensional Tensor, not a scalar")
else:
raise ValueError(
"elements in elems must be 1+ dimensional Tensors, not scalars"
)
n = (tensor_shape.dimension_value(static_shape[0])
or array_ops.shape(elems_flat[0])[0])
# TensorArrays are always flat
elems_ta = [
tensor_array_ops.TensorArray(dtype=elem.dtype, size=n,
dynamic_size=False,
infer_shape=True)
for elem in elems_flat]
# Unpack elements
elems_ta = [
elem_ta.unstack(elem) for elem_ta, elem in zip(elems_ta, elems_flat)]
i = constant_op.constant(0)
accs_ta = [
tensor_array_ops.TensorArray(dtype=dt, size=n,
dynamic_size=False,
infer_shape=infer_shape)
for dt in dtype_flat]
def compute(i, tas):
"""The loop body of map_fn.
Args:
i: the loop counter
tas: the flat TensorArray accumulator list
Returns:
(i + 1, tas): the updated counter + updated TensorArrays
Raises:
TypeError: if dtype and packed_fn_values structure do not match
ValueType: if dtype and packed_fn_values lengths do not match
"""
packed_values = input_pack([elem_ta.read(i) for elem_ta in elems_ta])
packed_fn_values = fn(packed_values)
nest.assert_same_structure(dtype or elems, packed_fn_values)
flat_fn_values = output_flatten(packed_fn_values)
tas = [ta.write(i, value) for (ta, value) in zip(tas, flat_fn_values)]
return (i + 1, tas)
_, r_a = control_flow_ops.while_loop(
lambda i, _: i < n, compute, (i, accs_ta),
parallel_iterations=parallel_iterations,
back_prop=back_prop,
swap_memory=swap_memory,
maximum_iterations=n)
results_flat = [r.stack() for r in r_a]
n_static = tensor_shape.Dimension(tensor_shape.dimension_value(
elems_flat[0].get_shape().with_rank_at_least(1)[0]))
for elem in elems_flat[1:]:
n_static.merge_with(tensor_shape.Dimension(tensor_shape.dimension_value(
elem.get_shape().with_rank_at_least(1)[0])))
for r in results_flat:
r.set_shape(tensor_shape.TensorShape(n_static).concatenate(
r.get_shape()[1:]))
# TODO(akshayka): Remove the in_graph_mode check once caching devices are
# supported in Eager
if in_graph_mode and varscope_caching_device_was_none:
varscope.set_caching_device(None)
return output_pack(results_flat)
@tf_export("scan")
def scan(fn, elems, initializer=None, parallel_iterations=10, back_prop=True,
swap_memory=False, infer_shape=True, reverse=False, name=None):
"""scan on the list of tensors unpacked from `elems` on dimension 0.
The simplest version of `scan` repeatedly applies the callable `fn` to a
sequence of elements from first to last. The elements are made of the tensors
unpacked from `elems` on dimension 0. The callable fn takes two tensors as
arguments. The first argument is the accumulated value computed from the
preceding invocation of fn. If `initializer` is None, `elems` must contain
at least one element, and its first element is used as the initializer.
Suppose that `elems` is unpacked into `values`, a list of tensors. The shape
of the result tensor is `[len(values)] + fn(initializer, values[0]).shape`.
If reverse=True, it's fn(initializer, values[-1]).shape.
This method also allows multi-arity `elems` and accumulator. If `elems`
is a (possibly nested) list or tuple of tensors, then each of these tensors
must have a matching first (unpack) dimension. The second argument of
`fn` must match the structure of `elems`.
If no `initializer` is provided, the output structure and dtypes of `fn`
are assumed to be the same as its input; and in this case, the first
argument of `fn` must match the structure of `elems`.
If an `initializer` is provided, then the output of `fn` must have the same
structure as `initializer`; and the first argument of `fn` must match
this structure.
For example, if `elems` is `(t1, [t2, t3])` and `initializer` is
`[i1, i2]` then an appropriate signature for `fn` in `python2` is:
`fn = lambda (acc_p1, acc_p2), (t1, [t2, t3]):` and `fn` must return a list,
`[acc_n1, acc_n2]`. An alternative correct signature for `fn`, and the
one that works in `python3`, is:
`fn = lambda a, t:`, where `a` and `t` correspond to the input tuples.
Args:
fn: The callable to be performed. It accepts two arguments. The first
will have the same structure as `initializer` if one is provided,
otherwise it will have the same structure as `elems`. The second
will have the same (possibly nested) structure as `elems`. Its output
must have the same structure as `initializer` if one is provided,
otherwise it must have the same structure as `elems`.
elems: A tensor or (possibly nested) sequence of tensors, each of which
will be unpacked along their first dimension. The nested sequence
of the resulting slices will be the first argument to `fn`.
initializer: (optional) A tensor or (possibly nested) sequence of tensors,
initial value for the accumulator, and the expected output type of `fn`.
parallel_iterations: (optional) The number of iterations allowed to run
in parallel.
back_prop: (optional) True enables support for back propagation.
swap_memory: (optional) True enables GPU-CPU memory swapping.
infer_shape: (optional) False disables tests for consistent output shapes.
reverse: (optional) True scans the tensor last to first (instead of first
to last).
name: (optional) Name prefix for the returned tensors.
Returns:
A tensor or (possibly nested) sequence of tensors. Each tensor packs the
results of applying `fn` to tensors unpacked from `elems` along the first
dimension, and the previous accumulator value(s), from first to last (or
last to first, if `reverse=True`).
Raises:
TypeError: if `fn` is not callable or the structure of the output of
`fn` and `initializer` do not match.
ValueError: if the lengths of the output of `fn` and `initializer`
do not match.
Examples:
```python
elems = np.array([1, 2, 3, 4, 5, 6])
sum = scan(lambda a, x: a + x, elems)
# sum == [1, 3, 6, 10, 15, 21]
sum = scan(lambda a, x: a + x, elems, reverse=True)
# sum == [22, 21, 18, 15, 11, 6]
```
```python
elems = np.array([1, 2, 3, 4, 5, 6])
initializer = np.array(0)
sum_one = scan(
lambda a, x: x[0] - x[1] + a, (elems + 1, elems), initializer)
# sum_one == [1, 2, 3, 4, 5, 6]
```
```python
elems = np.array([1, 0, 0, 0, 0, 0])
initializer = (np.array(0), np.array(1))
fibonaccis = scan(lambda a, _: (a[1], a[0] + a[1]), elems, initializer)
# fibonaccis == ([1, 1, 2, 3, 5, 8], [1, 2, 3, 5, 8, 13])
```
"""
if not callable(fn):
raise TypeError("fn must be callable.")
input_is_sequence = nest.is_sequence(elems)
input_flatten = lambda x: nest.flatten(x) if input_is_sequence else [x]
def input_pack(x):
return nest.pack_sequence_as(elems, x) if input_is_sequence else x[0]
if initializer is None:
output_is_sequence = input_is_sequence
output_flatten = input_flatten
output_pack = input_pack
else:
output_is_sequence = nest.is_sequence(initializer)
output_flatten = lambda x: nest.flatten(x) if output_is_sequence else [x]
def output_pack(x):
return (nest.pack_sequence_as(initializer, x)
if output_is_sequence else x[0])
elems_flat = input_flatten(elems)
in_graph_mode = not context.executing_eagerly()
with ops.name_scope(name, "scan", elems_flat):
# TODO(akshayka): Remove the in_graph_mode check once caching devices are
# supported in Eager
if in_graph_mode:
# Any get_variable calls in fn will cache the first call locally
# and not issue repeated network I/O requests for each iteration.
varscope = vs.get_variable_scope()
varscope_caching_device_was_none = False
if varscope.caching_device is None:
# TODO(ebrevdo): Change to using colocate_with here and in other
# methods.
varscope.set_caching_device(lambda op: op.device)
varscope_caching_device_was_none = True
# Convert elems to tensor array.
elems_flat = [
ops.convert_to_tensor(elem, name="elem") for elem in elems_flat]
# Convert elems to tensor array. n may be known statically.
n = (tensor_shape.dimension_value(elems_flat[0].shape[0])
or array_ops.shape(elems_flat[0])[0])
# TensorArrays are always flat
elems_ta = [
tensor_array_ops.TensorArray(dtype=elem.dtype, size=n,
dynamic_size=False,
infer_shape=True)
for elem in elems_flat]
# Unpack elements
elems_ta = [
elem_ta.unstack(elem) for elem_ta, elem in zip(elems_ta, elems_flat)]
if initializer is None:
a_flat = [elem.read(n - 1 if reverse else 0) for elem in elems_ta]
i = constant_op.constant(1)
else:
initializer_flat = output_flatten(initializer)
a_flat = [ops.convert_to_tensor(init) for init in initializer_flat]
i = constant_op.constant(0)
# Create a tensor array to store the intermediate values.
accs_ta = [
tensor_array_ops.TensorArray(
dtype=init.dtype, size=n,
element_shape=init.shape if infer_shape else None,
dynamic_size=False,
infer_shape=infer_shape)
for init in a_flat]
if initializer is None:
accs_ta = [acc_ta.write(n - 1 if reverse else 0, a)
for (acc_ta, a) in zip(accs_ta, a_flat)]
def compute(i, a_flat, tas):
"""The loop body of scan.
Args:
i: the loop counter.
a_flat: the accumulator value(s), flattened.
tas: the output accumulator TensorArray(s), flattened.
Returns:
[i + 1, a_flat, tas]: the updated counter + new accumulator values +
updated TensorArrays
Raises:
TypeError: if initializer and fn() output structure do not match
ValueType: if initializer and fn() output lengths do not match
"""
packed_elems = input_pack([elem_ta.read(i) for elem_ta in elems_ta])
packed_a = output_pack(a_flat)
a_out = fn(packed_a, packed_elems)
nest.assert_same_structure(
elems if initializer is None else initializer, a_out)
flat_a_out = output_flatten(a_out)
tas = [ta.write(i, value) for (ta, value) in zip(tas, flat_a_out)]
if reverse:
next_i = i - 1
else:
next_i = i + 1
return (next_i, flat_a_out, tas)
if reverse:
initial_i = n - 1 - i
condition = lambda i, _1, _2: i >= 0
else:
initial_i = i
condition = lambda i, _1, _2: i < n
_, _, r_a = control_flow_ops.while_loop(
condition, compute, (initial_i, a_flat, accs_ta),
parallel_iterations=parallel_iterations,
back_prop=back_prop, swap_memory=swap_memory,
maximum_iterations=n)
results_flat = [r.stack() for r in r_a]
n_static = tensor_shape.Dimension(tensor_shape.dimension_value(
elems_flat[0].get_shape().with_rank_at_least(1)[0]))
for elem in elems_flat[1:]:
n_static.merge_with(tensor_shape.Dimension(tensor_shape.dimension_value(
elem.get_shape().with_rank_at_least(1)[0])))
for r in results_flat:
r.set_shape(tensor_shape.TensorShape(n_static).concatenate(
r.get_shape()[1:]))
# TODO(akshayka): Remove the in_graph_mode check once caching devices are
# supported in Eager
if in_graph_mode and varscope_caching_device_was_none:
varscope.set_caching_device(None)
return output_pack(results_flat)
# pylint: disable=invalid-name
def If(cond, inputs, then_branch, else_branch, name=None):
r"""output = Cond(inputs) ? then_branch(inputs) : else_branch(inputs).
Args:
cond: A `Tensor`. A scalar. If the scalar is not a boolean, the scalar is
converted to a boolean according to the following rule: if the
scalar is a numerical value, non-zero means True and zero means
False; if the scalar is a string, non-empty means True and empty
means False.
inputs: A list of input tensors.
then_branch: A function takes 'inputs' and returns a list of tensors,
whose types are the same as what else_branch returns.
else_branch: A function takes 'inputs' and returns a list of tensors.
whose types are the same as what then_branch returns.
name: A name for the operation (optional).
Returns:
A list of tensors returned by either then_branch(inputs)
or else_branch(inputs).
"""
# pylint: disable=protected-access
return gen_functional_ops._if(
cond,
inputs, [_.type for _ in then_branch.definition.signature.output_arg],
then_branch,
else_branch,
name=name)
def Gradient(inputs, f, name=None):
r"""Computes the gradient function for function f via backpropagation.
Args:
inputs: A list of tensors of size N + M.
f: The function we want to compute the gradient for.
The function 'f' must be a numerical function which takes N inputs and
produces M outputs. Its gradient function 'g', which is a function
taking N + M inputs and produces N outputs.
I.e. if we have
(y1, y2, ..., yM) = f(x1, x2, ..., xN),
then, g is
(dL/dx1, dL/dx2, ..., dL/dxN) = g(x1, x2, ..., xN,
dL/dy1, dL/dy2, ..., dL/dyM),
where L is a scalar-value function of (x1, x2, ..., xN) (e.g., the
loss function). dL/dxi is the partial derivative of L with respect
to xi.
name: A name for the operation (optional).
Returns:
A list of tensors of size N.
"""
# TODO(zhifengc): Pretty-print the above spec in latex.
# TODO(zhfiengc): Needs some math expert to say the comment above better.
tlist = [_.type for _ in f.definition.signature.input_arg]
return symbolic_gradient(input=inputs, Tout=tlist, f=f, name=name)
def _LoopBodyCaptureWrapper(func):
"""Returns a wrapper for `func` that handles loop-carried captured inputs."""
@function.Defun(
*func.declared_input_types, func_name="%s_Wrapper" % func.name)
def Wrapper(*args):
"""A wrapper that handles loop-carried captured inputs."""
result = func(*args)
extra_args = tuple(function.get_extra_args())
# Nullary functions return an Operation. Normal functions can't do this
# because their return values are converted to Tensors.
if isinstance(result, ops.Operation):
return extra_args
# Unary functions return a single Tensor value.
elif not isinstance(result, tuple):
return (result,) + extra_args
# N-ary functions return a tuple of Tensors.
else:
return result + extra_args
return Wrapper
# pylint: disable=invalid-name,protected-access
def While(input_, cond, body, name=None, hostmem=None):
r"""output = input; While (Cond(output)) { output = Body(output) }.
Args:
input_: A list of `Tensor` objects.
A list of input tensors whose types are T.
cond: . A function takes 'input' and returns a tensor. If the tensor is
a scalar of non-boolean, the scalar is converted to a boolean
according to the following rule: if the scalar is a numerical
value, non-zero means True and zero means False; if the scalar is
a string, non-empty means True and empty means False. If the
tensor is not a scalar, non-emptiness means True and False
otherwise.
body: . A function takes a list of tensors and returns another
list tensors. Both lists have the same types as specified
by T.
name: A name for the operation (optional).
hostmem: A list of integer. If i is in the list, input[i] is a
host memory tensor.
Raises:
ValueError: if `cond` has implicitly captured inputs or if `cond` and `body`
have different signatures.
Returns:
A list of `Tensor` objects. Has the same type as `input`.
A list of output tensors whose types are T.
"""
if cond.captured_inputs:
raise ValueError("While op 'cond' argument must be a function "
"without implicitly captured inputs.")
if cond.declared_input_types != body.declared_input_types:
raise ValueError(
"While op 'cond' and 'body' signatures do not match. %r vs %r" %
(cond.declared_input_types, body.declared_input_types))
if body.captured_inputs:
cond_dtypes = list(
body.declared_input_types) + [t.dtype for t in body.captured_inputs]
@function.Defun(*cond_dtypes, func_name="%s_Wrapper" % cond.name)
def CondWrapper(*args):
"""A wrapper that handles loop-carried captured inputs."""
return cond(*args[:len(body.declared_input_types)])
ret = gen_functional_ops._while(
input_ + body.captured_inputs,
CondWrapper,
_LoopBodyCaptureWrapper(body),
name=name)
# Slice off the loop-carried captured inputs.
ret = ret[:-len(body.captured_inputs)]
else:
ret = gen_functional_ops._while(input_, cond, body, name=name)
if hostmem:
input_attr = attr_value_pb2.AttrValue()
input_attr.list.i.extend(hostmem)
ret[0].op._set_attr("_input_hostmem", input_attr) # pylint: disable=protected-access
output_attr = attr_value_pb2.AttrValue()
output_attr.list.i.extend(hostmem)
ret[0].op._set_attr("_output_hostmem", output_attr) # pylint: disable=protected-access
return ret
# b/36459430
#
# Ideally, we do not need this rewrite For loop into a While loop.
# However, today, if a While runs on GPU and the condition returns a
# boolean, the While kernel crashes. Even if we fix the crash, the
# bool needs to be copied between GPU and CPU. So, a for loop is much
# preferred when running on GPU.
#
# On the other hand, For op has no directly XLA kernel. So, when we run
# a for loop, we need to rewrite it using a While op.
#
# It should be possible and probably better to write a XLA C++ kernel
# implementing the logic in _ForUsingWhile.
def _ForUsingWhile(start,
limit,
delta,
inputs,
forbody,
name=None,
hostmem=None):
"""Helper to implement a For loop using a While."""
# To support negative delta (e.g., range(100, 0, -3)), we iterate
# over the range(n) and use iter * delta + start as the real
# iteration index. (e.g., for i in range(34): iter = i * (-3) +
# 100).
d = math_ops.abs(delta)
# XLA on TPUs doesn't support integer division
n = math_ops.cast(
math_ops.cast((math_ops.abs(limit - start) + d - 1), dtypes.float32) /
math_ops.cast(d, dtypes.float32), dtypes.int32)
# Carried loop variables ("extra_args") are implicitly added to the input list
# of the WhileBody function. WhileCond does not call forbody, and so does not
# depend on any of forbody's extra_args. Since WhileCond and WhileBody
# must have identical inputs, we have to augment the cond signature to take
# the same types as the carried loop variables.
body_sig = [dtypes.int32] * 4 + list(forbody.declared_input_types)[1:]
cond_name = "%s_Cond" % forbody.name
@function.Defun(*body_sig, func_name=cond_name)
def WhileCond(i, n, *args):
del args
return i < n
body_name = "%s_Body" % forbody.name
@function.Defun(*body_sig, func_name=body_name)
def WhileBody(i, n, start, delta, *args):
"""A While wrapper for forbody that handles loop-carried captured inputs."""
for_result = forbody(start + i * delta, *args)
# Nullary functions return an Operation. Normal functions can't do this
# because their return values are converted to Tensors.
if isinstance(for_result, ops.Operation):
for_result = ()
# Unary functions return a single Tensor value.
elif isinstance(for_result, ops.Tensor):
for_result = (for_result,)
return (i + 1, n, start, delta) + tuple(for_result)
if hostmem is not None:
hostmem = [0, 1, 2, 3] + [(4 + _) for _ in hostmem]
else:
hostmem = [0, 1, 2, 3]
results = While(
input_=[0, n, start, delta] + inputs,
cond=WhileCond,
body=WhileBody,
name=name,
hostmem=hostmem)
# Slice off the loop-carried captured inputs.
return list(results[4:len(results)])
def For(start,
limit,
delta,
inputs,
body,
name=None,
hostmem=None,
rewrite_with_while=None):
r"""out = input; for i in range(start, limit, delta) out = body(i, out).
Args:
start: A `Tensor` of type `int32`.
limit: A `Tensor` of type `int32`.
delta: A `Tensor` of type `int32`.
inputs: A list of `Tensor` objects.
A list of input tensors whose types are T.
body: A function takes a list of tensors and returns another
list of tensors. Both lists have the same types as (int32, T...).
name: A name for the operation (optional).
hostmem: A list of integer. If i is in the list, inputs[i] is a
host memory tensor. In other words, (i+1)-th argument of the body
function is expecting a host memory.
rewrite_with_while: If True, using While op to implement the For.
Returns:
A list of `Tensor` objects. Has the same type as `input`.
A list of output tensors whose types are T.
"""
if rewrite_with_while:
return _ForUsingWhile(start, limit, delta, inputs, body, name, hostmem)
if body.captured_inputs:
ret = gen_functional_ops._for(
start,
limit,
delta,
inputs + body.captured_inputs,
_LoopBodyCaptureWrapper(body),
name=name)
# Slice off the loop-carried captured inputs.
ret = ret[:-len(body.captured_inputs)]
else:
ret = gen_functional_ops._for(start, limit, delta, inputs, body, name=name)
if hostmem:
num_for_params = 3 # start/limit/delta
input_attr = attr_value_pb2.AttrValue()
input_attr.list.i.extend([num_for_params + i for i in hostmem])
ret[0].op._set_attr("_input_hostmem", input_attr) # pylint: disable=protected-access
output_attr = attr_value_pb2.AttrValue()
output_attr.list.i.extend(hostmem)
ret[0].op._set_attr("_output_hostmem", output_attr) # pylint: disable=protected-access
return ret
# pylint: enable=invalid-name,protected-access
_rewriter_config_optimizer_disabled = None
def _get_disabled_rewriter_config():
global _rewriter_config_optimizer_disabled
if _rewriter_config_optimizer_disabled is None:
config = config_pb2.ConfigProto()
rewriter_config = config.graph_options.rewrite_options
rewriter_config.disable_meta_optimizer = True
_rewriter_config_optimizer_disabled = config.SerializeToString()
return _rewriter_config_optimizer_disabled
def partitioned_call(args, f, tout=None, executing_eagerly=None, config=None,
executor_type=None):
"""Executes a function while respecting device annotations.
Currently, only those functions that execute within the same address space
can be executed.
Args:
args: The arguments of the function, including captured inputs.
f: The function to execute; an instance of `_DefinedFunction` or
`_EagerDefinedFunction`.
tout: a list containing the output dtypes enums; if `None`, inferred from
the signature of `f`.
executing_eagerly: (Optional) A boolean indicating whether the context is
executing eagerly. If `None`, fetched from the global context.
config: (Optional) A `tensorflow::ConfigProto` proto, serialized. If
`None`, all optimizations are disabled. Currently only handled for eager
defined functions.
executor_type: (Optional) A string for the name of the executor to be used
in the function call. If not set, or set to an empty string, the default
tensorflow executor will be used.
Returns:
The list of `Tensor`s returned by invoking `f(args)`. If the function does
not return anything, then returns `None` if eager execution is enabled, or
the `Operation` if not.
"""
if tout is None:
tout = tuple(x.type for x in f.definition.signature.output_arg)
if executing_eagerly is None:
executing_eagerly = context.executing_eagerly()
if config is None:
config = _get_disabled_rewriter_config()
if executor_type is None:
executor_type = ""
if executing_eagerly or len(tout):
if f.stateful_ops:
outputs = gen_functional_ops.stateful_partitioned_call(
args=args, Tout=tout, f=f, config_proto=config,
executor_type=executor_type)
else:
outputs = gen_functional_ops.partitioned_call(
args=args, Tout=tout, f=f, config_proto=config,
executor_type=executor_type)
return outputs if outputs else None
# The generated binding returns an empty list for functions that don't
# return any Tensors, hence the need to use `create_op` directly.
args = [ops.internal_convert_to_tensor(x) for x in args]
tin_attr = attr_value_pb2.AttrValue(
list=attr_value_pb2.AttrValue.ListValue(
type=[x.dtype.as_datatype_enum for x in args]))
tout_attr = attr_value_pb2.AttrValue(
list=attr_value_pb2.AttrValue.ListValue(type=tout))
func_attr = attr_value_pb2.AttrValue(
func=attr_value_pb2.NameAttrList(name=f.name))
executor_type_attr = attr_value_pb2.AttrValue(
s=compat.as_bytes(executor_type))
# When running in graph mode, the graph and function graphs are optimized
# (i.e. run through grappler) per the session options, so we can disable any
# eager-specific rewriting.
config_proto = attr_value_pb2.AttrValue(s=_get_disabled_rewriter_config())
graph = ops.get_default_graph()
f.add_to_graph(graph)
op_name = "StatefulPartitionedCall" if f.stateful_ops else "PartitionedCall"
op = graph.create_op(
op_name,
args,
tout,
compute_shapes=False,
name="PartitionedFunctionCall",
attrs={
"Tin": tin_attr,
"Tout": tout_attr,
"f": func_attr,
"config_proto": config_proto,
"executor_type": executor_type_attr,
})
outputs = op.outputs
return outputs if outputs else op
| apache-2.0 |
bluecoiner/bluecoin-new | qa/rpc-tests/disablewallet.py | 102 | 1820 | #!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Exercise API with -disablewallet.
#
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
class DisableWalletTest (BitcoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 1
def setup_network(self, split=False):
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, [['-disablewallet']])
self.is_network_split = False
self.sync_all()
def run_test (self):
# Check regression: https://github.com/bitcoin/bitcoin/issues/6963#issuecomment-154548880
x = self.nodes[0].validateaddress('3J98t1WpEZ73CNmQviecrnyiWrnqRhWNLy')
assert(x['isvalid'] == False)
x = self.nodes[0].validateaddress('mneYUmWYsuk7kySiURxCi3AGxrAqZxLgPZ')
assert(x['isvalid'] == True)
# Checking mining to an address without a wallet
try:
self.nodes[0].generatetoaddress(1, 'mneYUmWYsuk7kySiURxCi3AGxrAqZxLgPZ')
except JSONRPCException as e:
assert("Invalid address" not in e.error['message'])
assert("ProcessNewBlock, block not accepted" not in e.error['message'])
assert("Couldn't create new block" not in e.error['message'])
try:
self.nodes[0].generatetoaddress(1, '3J98t1WpEZ73CNmQviecrnyiWrnqRhWNLy')
raise AssertionError("Must not mine to invalid address!")
except JSONRPCException as e:
assert("Invalid address" in e.error['message'])
if __name__ == '__main__':
DisableWalletTest ().main ()
| mit |
rockyzhang/zhangyanhit-python-for-android-mips | python3-alpha/python3-src/Lib/email/test/test_email_torture.py | 85 | 3657 | # Copyright (C) 2002-2004 Python Software Foundation
#
# A torture test of the email package. This should not be run as part of the
# standard Python test suite since it requires several meg of email messages
# collected in the wild. These source messages are not checked into the
# Python distro, but are available as part of the standalone email package at
# http://sf.net/projects/mimelib
import sys
import os
import unittest
from io import StringIO
from types import ListType
from email.test.test_email import TestEmailBase
from test.support import TestSkipped, run_unittest
import email
from email import __file__ as testfile
from email.iterators import _structure
def openfile(filename):
from os.path import join, dirname, abspath
path = abspath(join(dirname(testfile), os.pardir, 'moredata', filename))
return open(path, 'r')
# Prevent this test from running in the Python distro
try:
openfile('crispin-torture.txt')
except IOError:
raise TestSkipped
class TortureBase(TestEmailBase):
def _msgobj(self, filename):
fp = openfile(filename)
try:
msg = email.message_from_file(fp)
finally:
fp.close()
return msg
class TestCrispinTorture(TortureBase):
# Mark Crispin's torture test from the SquirrelMail project
def test_mondo_message(self):
eq = self.assertEqual
neq = self.ndiffAssertEqual
msg = self._msgobj('crispin-torture.txt')
payload = msg.get_payload()
eq(type(payload), ListType)
eq(len(payload), 12)
eq(msg.preamble, None)
eq(msg.epilogue, '\n')
# Probably the best way to verify the message is parsed correctly is to
# dump its structure and compare it against the known structure.
fp = StringIO()
_structure(msg, fp=fp)
neq(fp.getvalue(), """\
multipart/mixed
text/plain
message/rfc822
multipart/alternative
text/plain
multipart/mixed
text/richtext
application/andrew-inset
message/rfc822
audio/basic
audio/basic
image/pbm
message/rfc822
multipart/mixed
multipart/mixed
text/plain
audio/x-sun
multipart/mixed
image/gif
image/gif
application/x-be2
application/atomicmail
audio/x-sun
message/rfc822
multipart/mixed
text/plain
image/pgm
text/plain
message/rfc822
multipart/mixed
text/plain
image/pbm
message/rfc822
application/postscript
image/gif
message/rfc822
multipart/mixed
audio/basic
audio/basic
message/rfc822
multipart/mixed
application/postscript
text/plain
message/rfc822
multipart/mixed
text/plain
multipart/parallel
image/gif
audio/basic
application/atomicmail
message/rfc822
audio/x-sun
""")
def _testclasses():
mod = sys.modules[__name__]
return [getattr(mod, name) for name in dir(mod) if name.startswith('Test')]
def suite():
suite = unittest.TestSuite()
for testclass in _testclasses():
suite.addTest(unittest.makeSuite(testclass))
return suite
def test_main():
for testclass in _testclasses():
run_unittest(testclass)
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| apache-2.0 |
ep1cman/workload-automation | wlauto/tests/test_instrumentation.py | 5 | 7244 | # Copyright 2013-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pylint: disable=W0231,W0613,E0611,W0603,R0201
from unittest import TestCase
from nose.tools import assert_equal, raises, assert_true, assert_false
from wlauto import Instrument
from wlauto.core import signal, instrumentation
from wlauto.instrumentation import instrument_is_installed, instrument_is_enabled, clear_instrumentation
class MockInstrument(Instrument):
name = 'mock'
def __init__(self):
Instrument.__init__(self, None)
self.before = 0
self.after = 0
def before_workload_execution(self, context):
self.before += 1
def after_workload_execution(self, context):
self.after += 1
class MockInstrument2(Instrument):
name = 'mock_2'
def __init__(self):
Instrument.__init__(self, None)
self.before = 0
self.after = 0
self.result = 0
def before_workload_execution(self, context):
self.before += 1
def after_workload_execution(self, context):
self.after += 1
def after_workload_result_update(self, context):
self.result += 1
class MockInstrument3(Instrument):
name = 'mock_3'
def __init__(self):
Instrument.__init__(self, None)
def slow_before_workload_execution(self, context):
global counter
counter += 1
class MockInstrument4(Instrument):
name = 'mock_4'
def __init__(self):
Instrument.__init__(self, None)
def slow_before_first_iteration_boot(self, context):
global counter
counter = 4
class MockInstrument5(Instrument):
name = 'mock_5'
def __init__(self):
Instrument.__init__(self, None)
def fast_before_first_iteration_boot(self, context):
global counter
counter += 2
class MockInstrument6(Instrument):
name = 'mock_6'
def __init__(self):
Instrument.__init__(self, None)
def before_first_iteration_boot(self, context):
global counter
counter *= 10
class BadInstrument(Instrument):
name = 'bad'
def __init__(self):
pass
# Not specifying the context argument.
def teardown(self):
pass
counter = 0
class InstrumentationTest(TestCase):
def tearDown(self):
clear_instrumentation()
def test_install(self):
instrument = _instantiate(MockInstrument)
instrument2 = _instantiate(MockInstrument2)
instrumentation.install(instrument)
instrumentation.install(instrument2)
signal.send(signal.BEFORE_WORKLOAD_EXECUTION, self, context=None)
signal.send(signal.AFTER_WORKLOAD_EXECUTION, self, context=None)
signal.send(signal.AFTER_WORKLOAD_RESULT_UPDATE, self, context=None)
assert_equal(instrument.before, 1)
assert_equal(instrument.after, 1)
assert_equal(instrument2.before, 1)
assert_equal(instrument2.after, 1)
assert_equal(instrument2.result, 1)
def test_enable_disable(self):
instrument = _instantiate(MockInstrument)
instrument2 = _instantiate(MockInstrument2)
instrumentation.install(instrument)
instrumentation.install(instrument2)
instrumentation.disable_all()
signal.send(signal.BEFORE_WORKLOAD_EXECUTION, self, context=None)
signal.send(signal.AFTER_WORKLOAD_EXECUTION, self, context=None)
signal.send(signal.AFTER_WORKLOAD_RESULT_UPDATE, self, context=None)
assert_equal(instrument.before, 0)
assert_equal(instrument.after, 0)
assert_equal(instrument2.before, 0)
assert_equal(instrument2.after, 0)
assert_equal(instrument2.result, 0)
instrumentation.enable(instrument)
signal.send(signal.BEFORE_WORKLOAD_EXECUTION, self, context=None)
signal.send(signal.AFTER_WORKLOAD_EXECUTION, self, context=None)
signal.send(signal.AFTER_WORKLOAD_RESULT_UPDATE, self, context=None)
assert_equal(instrument.before, 1)
assert_equal(instrument.after, 1)
assert_equal(instrument2.before, 0)
assert_equal(instrument2.after, 0)
assert_equal(instrument2.result, 0)
instrumentation.enable_all()
signal.send(signal.BEFORE_WORKLOAD_EXECUTION, self, context=None)
signal.send(signal.AFTER_WORKLOAD_EXECUTION, self, context=None)
signal.send(signal.AFTER_WORKLOAD_RESULT_UPDATE, self, context=None)
assert_equal(instrument.before, 2)
assert_equal(instrument.after, 2)
assert_equal(instrument2.before, 1)
assert_equal(instrument2.after, 1)
assert_equal(instrument2.result, 1)
def test_check_enabled(self):
instrument = _instantiate(MockInstrument)
instrumentation.install(instrument)
instrumentation.enable(instrument)
assert_true(instrument_is_enabled(instrument))
assert_true(instrument_is_enabled(instrument.name))
instrumentation.disable(instrument)
assert_false(instrument_is_enabled(instrument))
assert_false(instrument_is_enabled(instrument.name))
def test_local_instrument(self):
global counter
counter = 0
self.install_local_instrument()
signal.send(signal.BEFORE_WORKLOAD_EXECUTION, self, context=None)
assert_equal(counter, 1)
def test_priority_prefix_instrument(self):
global counter
counter = 0
instrument1 = _instantiate(MockInstrument4)
instrument2 = _instantiate(MockInstrument5)
instrument3 = _instantiate(MockInstrument6)
instrumentation.install(instrument1)
instrumentation.install(instrument2)
instrumentation.install(instrument3)
signal.send(signal.BEFORE_FIRST_ITERATION_BOOT, self, context=None)
assert_equal(counter, 42)
@raises(ValueError)
def test_bad_argspec(self):
instrument = _instantiate(BadInstrument)
instrumentation.install(instrument)
def test_check_installed(self):
instrumentation.install(_instantiate(MockInstrument))
assert_true(instrument_is_installed('mock'))
assert_true(instrument_is_installed(MockInstrument))
assert_false(instrument_is_installed(MockInstrument2))
def install_local_instrument(self):
instrument = _instantiate(MockInstrument3)
instrumentation.install(instrument)
@raises(ValueError)
def test_duplicate_install(self):
instrument = _instantiate(MockInstrument)
instrument2 = _instantiate(MockInstrument)
instrumentation.install(instrument)
instrumentation.install(instrument2)
def _instantiate(cls):
# Needed to get around Extension's __init__ checks
return cls()
| apache-2.0 |
salamer/django | django/contrib/gis/geoip/prototypes.py | 535 | 3943 | from ctypes import POINTER, Structure, c_char_p, c_float, c_int, string_at
from django.contrib.gis.geoip.libgeoip import free, lgeoip
# #### GeoIP C Structure definitions ####
class GeoIPRecord(Structure):
_fields_ = [('country_code', c_char_p),
('country_code3', c_char_p),
('country_name', c_char_p),
('region', c_char_p),
('city', c_char_p),
('postal_code', c_char_p),
('latitude', c_float),
('longitude', c_float),
# TODO: In 1.4.6 this changed from `int dma_code;` to
# `union {int metro_code; int dma_code;};`. Change
# to a `ctypes.Union` in to accommodate in future when
# pre-1.4.6 versions are no longer distributed.
('dma_code', c_int),
('area_code', c_int),
('charset', c_int),
('continent_code', c_char_p),
]
geoip_char_fields = [name for name, ctype in GeoIPRecord._fields_ if ctype is c_char_p]
GEOIP_DEFAULT_ENCODING = 'iso-8859-1'
geoip_encodings = {
0: 'iso-8859-1',
1: 'utf8',
}
class GeoIPTag(Structure):
pass
RECTYPE = POINTER(GeoIPRecord)
DBTYPE = POINTER(GeoIPTag)
# #### ctypes function prototypes ####
# GeoIP_lib_version appeared in version 1.4.7.
if hasattr(lgeoip, 'GeoIP_lib_version'):
GeoIP_lib_version = lgeoip.GeoIP_lib_version
GeoIP_lib_version.argtypes = None
GeoIP_lib_version.restype = c_char_p
else:
GeoIP_lib_version = None
# For freeing memory allocated within a record
GeoIPRecord_delete = lgeoip.GeoIPRecord_delete
GeoIPRecord_delete.argtypes = [RECTYPE]
GeoIPRecord_delete.restype = None
# For retrieving records by name or address.
def check_record(result, func, cargs):
if result:
# Checking the pointer to the C structure, if valid pull out elements
# into a dictionary.
rec = result.contents
record = {fld: getattr(rec, fld) for fld, ctype in rec._fields_}
# Now converting the strings to unicode using the proper encoding.
encoding = geoip_encodings[record['charset']]
for char_field in geoip_char_fields:
if record[char_field]:
record[char_field] = record[char_field].decode(encoding)
# Free the memory allocated for the struct & return.
GeoIPRecord_delete(result)
return record
else:
return None
def record_output(func):
func.argtypes = [DBTYPE, c_char_p]
func.restype = RECTYPE
func.errcheck = check_record
return func
GeoIP_record_by_addr = record_output(lgeoip.GeoIP_record_by_addr)
GeoIP_record_by_name = record_output(lgeoip.GeoIP_record_by_name)
# For opening & closing GeoIP database files.
GeoIP_open = lgeoip.GeoIP_open
GeoIP_open.restype = DBTYPE
GeoIP_delete = lgeoip.GeoIP_delete
GeoIP_delete.argtypes = [DBTYPE]
GeoIP_delete.restype = None
# This is so the string pointer can be freed within Python.
class geoip_char_p(c_char_p):
pass
def check_string(result, func, cargs):
if result:
s = string_at(result)
free(result)
else:
s = ''
return s.decode(GEOIP_DEFAULT_ENCODING)
GeoIP_database_info = lgeoip.GeoIP_database_info
GeoIP_database_info.restype = geoip_char_p
GeoIP_database_info.errcheck = check_string
# String output routines.
def string_output(func):
def _err_check(result, func, cargs):
if result:
return result.decode(GEOIP_DEFAULT_ENCODING)
return result
func.restype = c_char_p
func.errcheck = _err_check
return func
GeoIP_country_code_by_addr = string_output(lgeoip.GeoIP_country_code_by_addr)
GeoIP_country_code_by_name = string_output(lgeoip.GeoIP_country_code_by_name)
GeoIP_country_name_by_addr = string_output(lgeoip.GeoIP_country_name_by_addr)
GeoIP_country_name_by_name = string_output(lgeoip.GeoIP_country_name_by_name)
| bsd-3-clause |
damienmg/bazel | third_party/def_parser/def_parser_test.py | 17 | 4087 | # Copyright 2017 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
from src.test.py.bazel import test_base
class DEFParserTest(test_base.TestBase):
def createAndBuildProjectFiles(self):
self.ScratchFile('WORKSPACE')
self.ScratchFile('BUILD', ['cc_library(name="hello", srcs=["x.cc"])'])
self.ScratchFile('x.cc', [
'#include <stdio.h>',
'int hello_data;',
'void hello_world() {',
' printf("hello world\\n");',
'}',
])
exit_code, _, stderr = self.RunBazel(['build', '//:hello'])
self.AssertExitCode(exit_code, 0, stderr)
def testParseDefFileFromObjectFile(self):
# Skip this test on non-Windows platforms
if not self.IsWindows():
return
self.createAndBuildProjectFiles()
exit_code, stdout, stderr = self.RunBazel(['info', 'bazel-bin'])
self.AssertExitCode(exit_code, 0, stderr)
bazel_bin = stdout[0]
objfile = os.path.join(bazel_bin, '_objs', 'hello', 'x.o')
self.assertTrue(os.path.isfile(objfile))
output_def = self.Path('x.def');
self.RunProgram([self.Rlocation('io_bazel/third_party/def_parser/def_parser.exe'), output_def, 'my_x.dll', objfile])
self.assertTrue(os.path.isfile(output_def))
with open(output_def, 'r') as def_file:
def_content = def_file.read()
self.assertIn('LIBRARY my_x.dll', def_content)
self.assertIn('hello_data', def_content)
self.assertIn('hello_world', def_content)
def testParseDefFileFromObjectFileWithParamFile(self):
# Skip this test on non-Windows platforms
if not self.IsWindows():
return
self.createAndBuildProjectFiles()
exit_code, stdout, stderr = self.RunBazel(['info', 'bazel-bin'])
self.AssertExitCode(exit_code, 0, stderr)
bazel_bin = stdout[0]
objfile = os.path.join(bazel_bin, '_objs', 'hello', 'x.o')
self.assertTrue(os.path.isfile(objfile))
objfilelist = self.ScratchFile('objfilelist', [objfile])
output_def = self.Path('x.def');
self.RunProgram([self.Rlocation('io_bazel/third_party/def_parser/def_parser.exe'), output_def, 'my_x.dll', '@' + objfilelist])
self.assertTrue(os.path.isfile(output_def))
with open(output_def, 'r') as def_file:
def_content = def_file.read()
self.assertIn('LIBRARY my_x.dll', def_content)
self.assertIn('hello_data', def_content)
self.assertIn('hello_world', def_content)
def testParseDefFileFromAnotherDefFile(self):
# Skip this test on non-Windows platforms
if not self.IsWindows():
return
self.createAndBuildProjectFiles()
exit_code, stdout, stderr = self.RunBazel(['info', 'bazel-bin'])
self.AssertExitCode(exit_code, 0, stderr)
bazel_bin = stdout[0]
objfile = os.path.join(bazel_bin, '_objs', 'hello', 'x.o')
self.assertTrue(os.path.isfile(objfile))
output_def = self.Path('x.def');
self.RunProgram([self.Rlocation('io_bazel/third_party/def_parser/def_parser.exe'), output_def, 'my_x.dll', objfile])
self.assertTrue(os.path.isfile(output_def))
new_output_def = self.Path('new_x.def');
self.RunProgram([self.Rlocation('io_bazel/third_party/def_parser/def_parser.exe'), new_output_def, 'my_x.dll', output_def])
self.assertTrue(os.path.isfile(new_output_def))
with open(new_output_def, 'r') as def_file:
def_content = def_file.read()
self.assertIn('LIBRARY my_x.dll', def_content)
self.assertIn('hello_data', def_content)
self.assertIn('hello_world', def_content)
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
arnavd96/Cinemiezer | myvenv/lib/python3.4/site-packages/boto/services/result.py | 153 | 5596 | #!/usr/bin/env python
# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import os
from datetime import datetime, timedelta
from boto.utils import parse_ts
import boto
class ResultProcessor(object):
LogFileName = 'log.csv'
def __init__(self, batch_name, sd, mimetype_files=None):
self.sd = sd
self.batch = batch_name
self.log_fp = None
self.num_files = 0
self.total_time = 0
self.min_time = timedelta.max
self.max_time = timedelta.min
self.earliest_time = datetime.max
self.latest_time = datetime.min
self.queue = self.sd.get_obj('output_queue')
self.domain = self.sd.get_obj('output_domain')
def calculate_stats(self, msg):
start_time = parse_ts(msg['Service-Read'])
end_time = parse_ts(msg['Service-Write'])
elapsed_time = end_time - start_time
if elapsed_time > self.max_time:
self.max_time = elapsed_time
if elapsed_time < self.min_time:
self.min_time = elapsed_time
self.total_time += elapsed_time.seconds
if start_time < self.earliest_time:
self.earliest_time = start_time
if end_time > self.latest_time:
self.latest_time = end_time
def log_message(self, msg, path):
keys = sorted(msg.keys())
if not self.log_fp:
self.log_fp = open(os.path.join(path, self.LogFileName), 'a')
line = ','.join(keys)
self.log_fp.write(line+'\n')
values = []
for key in keys:
value = msg[key]
if value.find(',') > 0:
value = '"%s"' % value
values.append(value)
line = ','.join(values)
self.log_fp.write(line+'\n')
def process_record(self, record, path, get_file=True):
self.log_message(record, path)
self.calculate_stats(record)
outputs = record['OutputKey'].split(',')
if 'OutputBucket' in record:
bucket = boto.lookup('s3', record['OutputBucket'])
else:
bucket = boto.lookup('s3', record['Bucket'])
for output in outputs:
if get_file:
key_name = output.split(';')[0]
key = bucket.lookup(key_name)
file_name = os.path.join(path, key_name)
print('retrieving file: %s to %s' % (key_name, file_name))
key.get_contents_to_filename(file_name)
self.num_files += 1
def get_results_from_queue(self, path, get_file=True, delete_msg=True):
m = self.queue.read()
while m:
if 'Batch' in m and m['Batch'] == self.batch:
self.process_record(m, path, get_file)
if delete_msg:
self.queue.delete_message(m)
m = self.queue.read()
def get_results_from_domain(self, path, get_file=True):
rs = self.domain.query("['Batch'='%s']" % self.batch)
for item in rs:
self.process_record(item, path, get_file)
def get_results_from_bucket(self, path):
bucket = self.sd.get_obj('output_bucket')
if bucket:
print('No output queue or domain, just retrieving files from output_bucket')
for key in bucket:
file_name = os.path.join(path, key)
print('retrieving file: %s to %s' % (key, file_name))
key.get_contents_to_filename(file_name)
self.num_files + 1
def get_results(self, path, get_file=True, delete_msg=True):
if not os.path.isdir(path):
os.mkdir(path)
if self.queue:
self.get_results_from_queue(path, get_file)
elif self.domain:
self.get_results_from_domain(path, get_file)
else:
self.get_results_from_bucket(path)
if self.log_fp:
self.log_fp.close()
print('%d results successfully retrieved.' % self.num_files)
if self.num_files > 0:
self.avg_time = float(self.total_time)/self.num_files
print('Minimum Processing Time: %d' % self.min_time.seconds)
print('Maximum Processing Time: %d' % self.max_time.seconds)
print('Average Processing Time: %f' % self.avg_time)
self.elapsed_time = self.latest_time-self.earliest_time
print('Elapsed Time: %d' % self.elapsed_time.seconds)
tput = 1.0 / ((self.elapsed_time.seconds/60.0) / self.num_files)
print('Throughput: %f transactions / minute' % tput)
| mit |
psychotechnik/mycv | mycv/apps/projects/migrations/0013_auto__add_field_skill_applicant__add_field_client_applicant__add_field.py | 1 | 12221 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Skill.applicant'
db.add_column(u'projects_skill', 'applicant',
self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='skills', null=True, to=orm['accounts.MyCVUser']),
keep_default=False)
# Adding field 'Client.applicant'
db.add_column(u'projects_client', 'applicant',
self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='clients', null=True, to=orm['accounts.MyCVUser']),
keep_default=False)
# Adding field 'StackItem.applicant'
db.add_column(u'projects_stackitem', 'applicant',
self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='stack_items', null=True, to=orm['accounts.MyCVUser']),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Skill.applicant'
db.delete_column(u'projects_skill', 'applicant_id')
# Deleting field 'Client.applicant'
db.delete_column(u'projects_client', 'applicant_id')
# Deleting field 'StackItem.applicant'
db.delete_column(u'projects_stackitem', 'applicant_id')
models = {
u'accounts.mycvuser': {
'Meta': {'object_name': 'MyCVUser'},
'address': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Address']", 'null': 'True', 'blank': 'True'}),
'avatar': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '254'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'users'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'users'", 'blank': 'True', 'to': u"orm['auth.Permission']"})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'categories.category': {
'Meta': {'ordering': "('tree_id', 'lft')", 'unique_together': "(('parent', 'name'),)", 'object_name': 'Category'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'alternate_title': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'alternate_url': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
u'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
u'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'meta_extra': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'meta_keywords': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': u"orm['categories.Category']"}),
u'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'thumbnail': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'thumbnail_height': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'thumbnail_width': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
u'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'core.address': {
'Meta': {'object_name': 'Address'},
'city': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'street_address': ('django.db.models.fields.CharField', [], {'max_length': '250', 'blank': 'True'}),
'street_address2': ('django.db.models.fields.CharField', [], {'max_length': '250', 'blank': 'True'}),
'zip_code': ('django.db.models.fields.CharField', [], {'max_length': '10', 'blank': 'True'})
},
u'projects.client': {
'Meta': {'ordering': "('order_index', '-end_date')", 'object_name': 'Client'},
'applicant': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'clients'", 'null': 'True', 'to': u"orm['accounts.MyCVUser']"}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'description': ('django.db.models.fields.TextField', [], {'max_length': '2000'}),
'end_date': ('django.db.models.fields.DateField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_draft': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'order_index': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'start_date': ('django.db.models.fields.DateField', [], {}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'title': ('django.db.models.fields.IntegerField', [], {'max_length': '2'}),
'website': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '200', 'blank': 'True'})
},
u'projects.clientobjective': {
'Meta': {'object_name': 'ClientObjective'},
'client': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'objectives'", 'to': u"orm['projects.Client']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order_index': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'})
},
u'projects.project': {
'Meta': {'ordering': "('order_index',)", 'object_name': 'Project'},
'client': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'projects'", 'null': 'True', 'to': u"orm['projects.Client']"}),
'description': ('django.db.models.fields.TextField', [], {'max_length': '2000'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_draft': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'order_index': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'source_url': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'stack_items': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['projects.StackItem']", 'symmetrical': 'False'})
},
u'projects.projectfeature': {
'Meta': {'object_name': 'ProjectFeature'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'features'", 'to': u"orm['projects.Project']"})
},
u'projects.skill': {
'Meta': {'object_name': 'Skill'},
'applicant': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'skills'", 'null': 'True', 'to': u"orm['accounts.MyCVUser']"}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['categories.Category']", 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'default': "''", 'max_length': '500', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'projects.stackitem': {
'Meta': {'object_name': 'StackItem'},
'applicant': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'stack_items'", 'null': 'True', 'to': u"orm['accounts.MyCVUser']"}),
'description': ('django.db.models.fields.TextField', [], {'default': "''", 'max_length': '250', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['projects'] | gpl-2.0 |
2013Commons/hue | desktop/core/ext-py/Django-1.4.5/docs/_ext/applyxrefs.py | 143 | 2148 | """Adds xref targets to the top of files."""
import sys
import os
testing = False
DONT_TOUCH = (
'./index.txt',
)
def target_name(fn):
if fn.endswith('.txt'):
fn = fn[:-4]
return '_' + fn.lstrip('./').replace('/', '-')
def process_file(fn, lines):
lines.insert(0, '\n')
lines.insert(0, '.. %s:\n' % target_name(fn))
try:
f = open(fn, 'w')
except IOError:
print("Can't open %s for writing. Not touching it." % fn)
return
try:
f.writelines(lines)
except IOError:
print("Can't write to %s. Not touching it." % fn)
finally:
f.close()
def has_target(fn):
try:
f = open(fn, 'r')
except IOError:
print("Can't open %s. Not touching it." % fn)
return (True, None)
readok = True
try:
lines = f.readlines()
except IOError:
print("Can't read %s. Not touching it." % fn)
readok = False
finally:
f.close()
if not readok:
return (True, None)
#print fn, len(lines)
if len(lines) < 1:
print("Not touching empty file %s." % fn)
return (True, None)
if lines[0].startswith('.. _'):
return (True, None)
return (False, lines)
def main(argv=None):
if argv is None:
argv = sys.argv
if len(argv) == 1:
argv.extend('.')
files = []
for root in argv[1:]:
for (dirpath, dirnames, filenames) in os.walk(root):
files.extend([(dirpath, f) for f in filenames])
files.sort()
files = [os.path.join(p, fn) for p, fn in files if fn.endswith('.txt')]
#print files
for fn in files:
if fn in DONT_TOUCH:
print("Skipping blacklisted file %s." % fn)
continue
target_found, lines = has_target(fn)
if not target_found:
if testing:
print '%s: %s' % (fn, lines[0]),
else:
print "Adding xref to %s" % fn
process_file(fn, lines)
else:
print "Skipping %s: already has a xref" % fn
if __name__ == '__main__':
sys.exit(main()) | apache-2.0 |
petersanchez/django-allauth | allauth/socialaccount/views.py | 8 | 3507 | from django.contrib import messages
from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse, reverse_lazy
from django.contrib.auth.decorators import login_required
from django.views.generic.base import TemplateView
from django.views.generic.edit import FormView
from ..account.views import (CloseableSignupMixin,
RedirectAuthenticatedUserMixin)
from ..account.adapter import get_adapter as get_account_adapter
from ..utils import get_form_class, get_current_site
from .adapter import get_adapter
from .models import SocialLogin
from .forms import DisconnectForm, SignupForm
from . import helpers
from . import app_settings
class SignupView(RedirectAuthenticatedUserMixin, CloseableSignupMixin,
FormView):
form_class = SignupForm
template_name = 'socialaccount/signup.html'
def get_form_class(self):
return get_form_class(app_settings.FORMS,
'signup',
self.form_class)
def dispatch(self, request, *args, **kwargs):
self.sociallogin = None
data = request.session.get('socialaccount_sociallogin')
if data:
self.sociallogin = SocialLogin.deserialize(data)
if not self.sociallogin:
return HttpResponseRedirect(reverse('account_login'))
return super(SignupView, self).dispatch(request, *args, **kwargs)
def is_open(self):
return get_adapter().is_open_for_signup(self.request,
self.sociallogin)
def get_form_kwargs(self):
ret = super(SignupView, self).get_form_kwargs()
ret['sociallogin'] = self.sociallogin
return ret
def form_valid(self, form):
form.save(self.request)
return helpers.complete_social_signup(self.request,
self.sociallogin)
def get_context_data(self, **kwargs):
ret = super(SignupView, self).get_context_data(**kwargs)
ret.update(dict(site=get_current_site(self.request),
account=self.sociallogin.account))
return ret
def get_authenticated_redirect_url(self):
return reverse(connections)
signup = SignupView.as_view()
class LoginCancelledView(TemplateView):
template_name = "socialaccount/login_cancelled.html"
login_cancelled = LoginCancelledView.as_view()
class LoginErrorView(TemplateView):
template_name = "socialaccount/authentication_error.html"
login_error = LoginErrorView.as_view()
class ConnectionsView(FormView):
template_name = "socialaccount/connections.html"
form_class = DisconnectForm
success_url = reverse_lazy("socialaccount_connections")
def get_form_class(self):
return get_form_class(app_settings.FORMS,
'disconnect',
self.form_class)
def get_form_kwargs(self):
kwargs = super(ConnectionsView, self).get_form_kwargs()
kwargs["request"] = self.request
return kwargs
def form_valid(self, form):
get_account_adapter().add_message(self.request,
messages.INFO,
'socialaccount/messages/'
'account_disconnected.txt')
form.save()
return super(ConnectionsView, self).form_valid(form)
connections = login_required(ConnectionsView.as_view())
| mit |
davidzchen/tensorflow | tensorflow/python/eager/def_function_test.py | 1 | 28978 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import itertools
import pickle
import re
import sys
import weakref
from absl.testing import parameterized
from six.moves import range
from tensorflow.python.autograph.core import converter
from tensorflow.python.eager import def_function
from tensorflow.python.eager import lift_to_graph
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import test_util
from tensorflow.python.module import module
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.saved_model import save_context
from tensorflow.python.saved_model import save_options
def undecorated_function(x):
return x * 3.
class _HasDecoratedMethod(object):
@def_function.function
def f(self, x):
return x * 3.
class DefFunctionTest(test.TestCase, parameterized.TestCase):
def testNoVariables(self):
@def_function.function
def fn(x):
return 2 * x
self.assertAllEqual(fn(constant_op.constant(4.0)), 8.0)
@test_util.disable_tfrt('Variable argument is not supported')
def testFailIfVariablesAreCreatedMoreThanOnce(self):
@def_function.function
def fn(x):
return variables.Variable(1.0) + x
with self.assertRaises(ValueError):
fn(1.0)
@test_util.disable_tfrt('Variable argument is not supported')
def testFailIfVariablesAreCreatedMoreThanOnceNoWeakRef(self):
state = []
@def_function.function
def fn(x):
state.append(variables.Variable(1.0))
return state[-1] + x
with self.assertRaises(ValueError):
fn(1.0)
def testRange(self):
@def_function.function
def f(unused_x):
return 1.0
self.assertAllEqual(f(range(5)), 1.0)
@test_util.disable_tfrt('Variable argument is not supported')
def testCorrectVariableCreation(self):
state = []
@def_function.function
def fn(x):
if not state:
state.append(variables.Variable(2.0))
return state[0] * x
self.assertAllEqual(fn(constant_op.constant(1.0)), 2.0)
self.assertAllEqual(fn(constant_op.constant(3.0)), 6.0)
@test_util.disable_tfrt('Variable argument is not supported')
def testFunctionInitializer(self):
state = []
@def_function.function
def fn(x):
if not state:
state.append(variables.Variable(lambda: 2.0))
return state[0] * x
self.assertAllEqual(fn(constant_op.constant(1.0)), 2.0)
@test_util.disable_tfrt('Variable argument is not supported')
def testFunctionMultipleVariableInitializer(self):
state = []
@def_function.function
def fn(x):
if not state:
state.append(variables.Variable(lambda: 2.0))
state.append(variables.Variable(lambda: 5.0))
return state[0] * x, state[1] * x
self.assertAllEqual(fn(constant_op.constant(1.0)), [2.0, 5.0])
@test_util.disable_tfrt('Variable argument is not supported')
def testFunctionInitializationFunction(self):
state = []
@def_function.function
def fn(x):
if not state:
state.append(variables.Variable(2.0))
return state[0] * x
init_fn = fn.get_initialization_function(constant_op.constant(1.0))
self.assertLen(state, 1)
self.assertFalse(
resource_variable_ops.var_is_initialized_op(state[0].handle))
init_fn()
self.assertEqual(state[0].numpy(), 2.0)
@test_util.disable_tfrt('Variable argument is not supported')
def testVariableInitializerNotConstant(self):
state = []
@def_function.function
def fn(x):
if not state:
state.append(variables.Variable(2.0 * x))
return state[0] * x
self.assertAllEqual(fn(constant_op.constant(1.0)), 2.0)
self.assertAllEqual(fn(constant_op.constant(3.0)), 6.0)
def testLegacyGraphModeVariables(self):
with ops.Graph().as_default(), self.test_session() as sess:
state = []
@def_function.function
def fn(x):
if not state:
state.append(variables.Variable(2.0))
return state[0] * x
result = fn(3.0)
self.evaluate(variables.global_variables_initializer())
self.assertAllEqual(sess.run(state[0]), 2.0)
self.assertAllEqual(self.evaluate(result), 6.0)
@test_util.disable_tfrt('Variable argument is not supported')
def testLegacyGraphModeVariablesNonTrivialInitializer(self):
with ops.Graph().as_default(), self.test_session() as sess:
state = []
@def_function.function
def fn(x):
if not state:
two = constant_op.constant(2.0)
four = two * two
two_again = math_ops.sqrt(four)
state.append(variables.Variable(two_again + four))
return state[0] * x
result = fn(3.0)
self.evaluate(variables.global_variables_initializer())
self.assertAllEqual(sess.run(state[0]), 6.0)
self.assertAllEqual(self.evaluate(result), 18.0)
@test_util.disable_tfrt('Variable argument is not supported')
def testLegacyGraphModeInputDependentInitializerFails(self):
with ops.Graph().as_default():
state = []
@def_function.function
def fn(x):
if not state:
state.append(variables.Variable(2.0 * x))
return state[0] * x
with self.assertRaisesRegex(lift_to_graph.UnliftableError,
r'transitively.* mul .* x'):
fn(constant_op.constant(3.0))
@test_util.disable_tfrt('Variable argument is not supported')
def testMethod(self):
class MyModel(object):
def __init__(self):
self.var = None
@def_function.function
def apply(self, x):
if self.var is None:
self.var = variables.Variable(2.0)
return self.var * x
m0 = MyModel()
self.assertAllEqual(m0.apply(3.0), 6.0)
# Calling twice to exercise that we do not recreate variables.
m0.var.assign(3.0)
self.assertAllEqual(m0.apply(3.0), 9.0)
m1 = MyModel()
self.assertAllEqual(m1.apply(3.0), 6.0)
def test_functools_partial(self):
self.assertAllClose(
3.,
def_function.function(functools.partial(lambda x, y: x + y, 1.))(
constant_op.constant(2.)))
@test_util.disable_tfrt('Partial is not supported')
def test_functools_partial_new_default(self):
def f(x=3, y=7):
return x + y
func = def_function.function(functools.partial(f, y=6))
self.assertEqual(func().numpy(), 9)
self.assertEqual(func(y=8).numpy(), 11)
@test_util.disable_tfrt('Partial is not supported')
def test_functools_partial_keywords(self):
def f(x, y):
return x + y
func = def_function.function(
functools.partial(f, x=array_ops.zeros([1]), y=array_ops.zeros([1])))
self.assertAllEqual(func(), [0.0])
@test_util.disable_tfrt('Partial is not supported')
def test_functools_partial_single_positional(self):
def f(x, y):
return x + y
func = def_function.function(
functools.partial(f, constant_op.constant(1)))
self.assertAllEqual(func(5), 6)
@test_util.disable_tfrt('Partial is not supported')
def test_complicated_partial_with_defaults(self):
def identity(*args):
return args
def dynamic_unroll(core_fn,
input_sequence,
initial_state,
sequence_length=None,
parallel_iterations=1,
swap_memory=False):
del core_fn
self.assertIs(None, sequence_length)
self.assertEqual(1, parallel_iterations)
self.assertTrue(swap_memory)
return input_sequence, initial_state
input_sequence = random_ops.random_uniform([1, 1, 1])
initial_state = random_ops.random_uniform([1, 1])
func = def_function.function(
functools.partial(dynamic_unroll, identity, swap_memory=True))
func(input_sequence, initial_state)
def test_unspecified_default_argument(self):
wrapped = def_function.function(
lambda x, y=2: x + y,
input_signature=[tensor_spec.TensorSpec((), dtypes.int32)])
self.assertEqual(3, wrapped(constant_op.constant(1)).numpy())
def test_concrete_function_from_signature(self):
@def_function.function(
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)])
def compute(x):
return 2. * x
concrete = compute.get_concrete_function()
self.assertAllClose(1., concrete(constant_op.constant(0.5)))
concrete = compute.get_concrete_function(
tensor_spec.TensorSpec(None, dtypes.float32))
self.assertAllClose(4., concrete(constant_op.constant(2.)))
signature_args, _ = concrete.structured_input_signature
self.assertEqual(signature_args,
(tensor_spec.TensorSpec(
None, dtypes.float32, name='x'),))
@test_util.disable_tfrt('Variable argument is not supported')
@test_util.run_in_graph_and_eager_modes
def test_variable_naming(self):
class HasVars(module.Module):
def __init__(self):
self.x = None
self.y = None
self.z = None
@def_function.function
def make_x(self):
if self.x is None:
self.x = variables.Variable(1., name='v')
def make_y(self):
if self.y is None:
self.y = variables.Variable(1., name='v')
def make_z(self):
if self.z is None:
with ops.name_scope('z_scope', skip_on_eager=False):
self.z = variables.Variable(1., name='z')
root = HasVars()
root.make_x()
root.make_y()
root.make_z()
self.assertEqual('v:0', root.x.name)
self.assertEqual('z_scope/z:0', root.z.name)
def test_concrete_function_keyword_arguments(self):
@def_function.function
def f(x):
return x
conc = f.get_concrete_function(
tensor_spec.TensorSpec(None, dtypes.float32, 'y'))
conc(y=constant_op.constant(3.0))
signature_args, _ = conc.structured_input_signature
self.assertEqual('y', signature_args[0].name)
conc = f.get_concrete_function(tensor_spec.TensorSpec(None, dtypes.float32))
conc(x=constant_op.constant(3.0))
signature_args, _ = conc.structured_input_signature
self.assertEqual('x', signature_args[0].name)
@def_function.function
def g(x):
return x[0]
conc = g.get_concrete_function(
[tensor_spec.TensorSpec(None, dtypes.float32, 'z'), 2])
conc(z=constant_op.constant(3.0))
signature_args, _ = conc.structured_input_signature
self.assertEqual('z', signature_args[0][0].name)
def test_error_inner_capture(self):
@def_function.function
def f(inputs):
num_steps, _ = inputs.shape[:2]
outputs = []
for t in math_ops.range(num_steps):
outputs.append(inputs[t])
return outputs
with self.assertRaisesRegex(errors.InaccessibleTensorError,
'defined in another function or code block'):
f(array_ops.zeros(shape=(8, 42, 3)))
@test_util.disable_tfrt('Control flow is not supported')
def testRuntimeErrorNotSticky(self):
@def_function.function
def fail(i):
control_flow_ops.Assert(math_ops.equal(i, 0), ['ick'])
fail(constant_op.constant(0)) # OK
with self.assertRaises(errors.InvalidArgumentError):
fail(constant_op.constant(1)) # InvalidArgument: "ick"
fail(constant_op.constant(0)) # OK
def testUnderscoreName(self):
@def_function.function
def f(_):
return _ + _
self.assertAllEqual(2.0, f(constant_op.constant(1.0)))
def test_serialization_signature_cache(self):
@def_function.function
def f(x, y):
return x, y
f(constant_op.constant([[3., 4.]]), constant_op.constant([2.]))
f(constant_op.constant([[3, 4, 5]]), constant_op.constant([2]))
signatures_args = set()
concrete_functions = f._list_all_concrete_functions_for_serialization()
for concrete_function in concrete_functions:
args, kwargs = concrete_function.structured_input_signature
signatures_args.add(args)
self.assertEqual(dict(), kwargs)
self.assertEqual(
signatures_args,
set(((tensor_spec.TensorSpec([1, 2], dtypes.float32, name='x'),
tensor_spec.TensorSpec([1], dtypes.float32, name='y')),
(tensor_spec.TensorSpec([1, 3], dtypes.int32, name='x'),
tensor_spec.TensorSpec([1], dtypes.int32, name='y')))))
@test_util.assert_no_garbage_created
def testFunctionReferenceCycles(self):
fn = def_function.function(lambda x: 2. * x)
fn(constant_op.constant(4.0))
weak_fn = weakref.ref(fn)
del fn
# Tests that the weak reference we made to the function is now dead, which
# means the object has been deleted. This should be true as long as the
# function itself is not involved in a reference cycle.
self.assertIs(None, weak_fn())
@test_util.assert_no_garbage_created
def testMethodReferenceCycles(self):
has_decorated_method = _HasDecoratedMethod()
has_decorated_method.f(constant_op.constant(5.))
weak_fn = weakref.ref(has_decorated_method.f)
del has_decorated_method
# Tests that the weak reference we made to the function is now dead, which
# means the object has been deleted. This should be true as long as the
# function itself is not involved in a reference cycle.
self.assertIs(None, weak_fn())
@test_util.assert_no_new_pyobjects_executing_eagerly
def testErrorMessageWhenGraphTensorIsPassedToEager(self):
@def_function.function
def failing_function():
a = constant_op.constant(1.)
with ops.init_scope():
_ = a + a
with self.assertRaisesRegex(
TypeError,
re.compile('An op outside of the function.*passed.*Const', re.DOTALL)):
failing_function()
def testNonUniqueNamesGetConcreteFunction(self):
@def_function.function
def non_unique_arg_names(x, **kwargs):
a, b, c = x
d = kwargs['d']
return a + b + c + d
concrete = non_unique_arg_names.get_concrete_function(
(tensor_spec.TensorSpec(None, dtypes.float32),
tensor_spec.TensorSpec(None, dtypes.float32),
tensor_spec.TensorSpec(None, dtypes.float32)),
d=tensor_spec.TensorSpec(None, dtypes.float32))
self.assertAllClose(
10.,
concrete(x=constant_op.constant(1.),
x_1=constant_op.constant(2.),
x_2=constant_op.constant(3.),
d=constant_op.constant(4.)))
self.assertAllClose(
10.,
concrete(constant_op.constant(1.),
constant_op.constant(2.),
constant_op.constant(3.),
constant_op.constant(4.)))
@test_util.disable_tfrt('Variable argument is not supported')
def testVariableCreatorScope(self):
created_variables = []
captured_variables = []
@def_function.function
def f():
if not created_variables:
created_variables.append(variables.Variable(1.))
return created_variables[0] + 1.
def capture_creator(next_creator, **kwargs):
created = next_creator(**kwargs)
captured_variables.append(created)
return created
with variable_scope.variable_creator_scope(capture_creator):
f()
self.assertEqual(created_variables, captured_variables)
@test_util.disable_tfrt('Variable argument is not supported')
def testVarAlreadyInitializedNoClobbering(self):
v_holder = []
@def_function.function
def add_var(x):
if not v_holder:
v = variables.Variable([1., 2.])
v_holder.append(v)
already_initialized = variables.Variable(3.)
with ops.init_scope():
already_initialized.assign(10.)
v_holder.append(already_initialized)
return v_holder[0] + v_holder[1] + x
add_var.get_concrete_function(constant_op.constant(2.))
self.assertAllClose([13., 14.], add_var(constant_op.constant(2.)))
@test_util.disable_tfrt('Variable argument is not supported')
def testSameVariableTwice(self):
v = variables.Variable(1.0)
@def_function.function
def add(a, b):
return a + b
self.assertAllEqual(add(v, v), 2.0)
@test_util.disable_tfrt('Variable argument is not supported')
def testVariableUpdate(self):
v1 = variables.Variable(1.0)
v2 = variables.Variable(2.0)
v3 = variables.Variable(4, dtype=dtypes.int32)
trace_count = [0]
@def_function.function
def double_variable(x):
trace_count[0] += 1
x.assign_add(x.read_value())
self.assertEqual(trace_count[0], 0)
double_variable(v1)
self.assertEqual(trace_count[0], 1)
self.assertEqual(self.evaluate(v1), 2.0)
double_variable(v2)
self.assertEqual(trace_count[0], 2)
self.assertEqual(self.evaluate(v2), 4.0)
double_variable(v3)
self.assertEqual(trace_count[0], 3)
self.assertEqual(self.evaluate(v3), 8)
def testShapeCache(self):
@def_function.function
def func(x):
return 2 * x
func_a = func.get_concrete_function(
tensor_spec.TensorSpec([None], dtypes.int32))
func_b = func.get_concrete_function(
tensor_spec.TensorSpec([None], dtypes.int32))
self.assertIs(func_a, func_b)
def testCacheWithinSaveContext(self):
@def_function.function
def func(x):
return 2 * x
func_a = func.get_concrete_function(constant_op.constant(2.))
func_b = func.get_concrete_function(constant_op.constant(2.))
self.assertIs(func_a, func_b)
with save_context.save_context(save_options.SaveOptions()):
func_c = func.get_concrete_function(constant_op.constant(2.))
self.assertIs(func_a, func_c)
@test_util.disable_tfrt('Nested function is not supported')
def testInitializationInNestedCall(self):
v_holder = []
@def_function.function
def add_var(x):
if not v_holder:
v = variables.Variable([1., 2.])
v_holder.append(v)
already_initialized = variables.Variable(3.)
with ops.init_scope():
already_initialized.assign(10.)
v_holder.append(already_initialized)
return v_holder[0] + v_holder[1] + x
@def_function.function
def wrapper(x):
return add_var(x)
self.assertAllClose([13., 14.], wrapper(constant_op.constant(2.)))
v_holder[1].assign(11.)
self.assertAllClose([14., 15.], wrapper(constant_op.constant(2.)))
@test_util.disable_tfrt('Variable argument is not supported')
@test_util.run_gpu_only
def testDeviceAnnotationRespected(self):
a = []
@def_function.function()
def create_variable():
with ops.init_scope():
initial_value = random_ops.random_uniform(
(2, 2), maxval=1000000, dtype=dtypes.int64)
if not a:
with ops.device('CPU:0'):
a.append(resource_variable_ops.ResourceVariable(initial_value))
return a[0].read_value()
create_variable()
self.assertRegex(a[0].device, 'CPU')
@test_util.disable_tfrt('Variable argument is not supported')
@test_util.run_gpu_only
def testDeviceAnnotationForInitializerRespected(self):
a = []
initial_value = []
def initial_value_fn():
initial_value.append(random_ops.random_uniform((2, 3)))
return initial_value[0]
@def_function.function()
def create_variable():
with ops.init_scope():
if not a:
a.append(variables.Variable(initial_value_fn))
with ops.device('CPU:0'):
create_variable()
self.assertRegex(a[0].device, 'CPU')
self.assertRegex(initial_value[0].device, 'CPU')
def testDecorate(self):
func = def_function.function(lambda: 1)
def decorator(f):
return lambda: 1 + f()
func._decorate(decorator)
self.assertEqual(func().numpy(), 2)
@parameterized.parameters(*itertools.product(
(None, (tensor_spec.TensorSpec([]),)), # input_signature
(True, False), # autograph
(None, converter.Feature.ALL), # autograph_options
(None, 'foo.bar'), # implements
(None, True, False), # relax_shapes
(True, False), # compile
(True, False), # override_function
))
@test_util.disable_tfrt('b/168618526: design proper method to copy tensors'
'for function.')
def testClone(self, input_signature, autograph, autograph_options, implements,
relax_shapes, compile_, override_function):
original_py_function = lambda x: x
compile_ = False
func = def_function.function(
func=original_py_function,
input_signature=input_signature,
autograph=autograph,
experimental_implements=implements,
experimental_autograph_options=autograph_options,
experimental_relax_shapes=relax_shapes,
experimental_compile=compile_)
if override_function:
cloned_py_function = lambda x: x + 1
else:
cloned_py_function = original_py_function
cloned = func._clone(python_function=cloned_py_function)
self.assertEqual(cloned_py_function, cloned._python_function)
self.assertEqual(func._name, cloned._name)
self.assertEqual(input_signature, cloned._input_signature)
self.assertEqual(autograph, cloned._autograph)
self.assertEqual(implements, cloned._implements)
self.assertEqual(autograph_options, cloned._experimental_autograph_options)
self.assertEqual(relax_shapes, cloned._experimental_relax_shapes)
self.assertEqual(compile_, cloned._experimental_compile)
# This test does not run with XLA JIT support linked in so we can only check
# the output of the function if compile is disabled.
if not compile_:
x = array_ops.zeros([])
self.assertEqual(self.evaluate(cloned(x)),
self.evaluate(cloned_py_function(x)))
@test_util.disable_tfrt('Variable argument is not supported')
def testLiftPlaceholderInitializedVariable(self):
with ops.Graph().as_default():
var_list = []
@def_function.function
def use_variable():
if not var_list:
initial_value = array_ops.placeholder(shape=[], dtype=dtypes.float32)
v = variables.Variable(initial_value)
var_list.append(v)
return var_list[0] + 1.
var_plus_one = use_variable()
with self.session() as session:
init_op = var_list[0].initializer
session.run(init_op, feed_dict={init_op.inputs[1]: 2.})
self.assertEqual(3., session.run(var_plus_one))
def testDecorate_rejectedAfterTrace(self):
func = def_function.function(lambda: 1)
self.assertEqual(func().numpy(), 1)
msg = 'Functions cannot be decorated after they have been traced.'
with self.assertRaisesRegex(ValueError, msg):
func._decorate(lambda f: f)
def testGetConcreteFunctionGraphLifetime(self):
@def_function.function
def func():
pass
graph = func.get_concrete_function().graph
del func
# If the graph is deleted, then an exception is raised on reading `captures`
self.assertEmpty(graph.captures)
@parameterized.parameters(*itertools.product(
(None, (tensor_spec.TensorSpec([]),)), # input_signature
(True, False), # autograph
(None, converter.Feature.ALL), # autograph_options
(None, 'foo.bar'), # implements
(None, True, False), # relax_shapes
))
@test_util.disable_tfrt('b/168618526: design proper method to copy tensors'
'for function.')
def test_pickle(self, input_signature, autograph, autograph_options,
implements, relax_shapes):
"""@function objects can be pickled and unpickled."""
original_py_function = undecorated_function
func = def_function.function(
func=original_py_function,
input_signature=input_signature,
autograph=autograph,
experimental_implements=implements,
experimental_autograph_options=autograph_options,
experimental_relax_shapes=relax_shapes,
)
cloned = pickle.loads(pickle.dumps(func))
self.assertEqual(func._name, cloned._name)
self.assertEqual(input_signature, cloned._input_signature)
self.assertEqual(autograph, cloned._autograph)
self.assertEqual(implements, cloned._implements)
self.assertEqual(autograph_options, cloned._experimental_autograph_options)
self.assertEqual(relax_shapes, cloned._experimental_relax_shapes)
x = array_ops.ones([])
self.assertEqual(self.evaluate(cloned(x)), self.evaluate(func(x)))
def test_frequent_retracing_warning(self):
if sys.version_info[0] < 3:
self.skipTest('self.assertLogs() call is not available in Python 2.')
@def_function.function
def f(x):
return x
with self.assertLogs(level='WARN') as logs:
f(1)
f(2)
f(3)
f(4)
self.assertEmpty(logs.output)
f(5)
self.assertLen(logs.output, 1)
self.assertIn('Tracing is expensive', logs.output[0])
def test_frequent_retracing_warning_lambda(self):
if sys.version_info[0] < 3:
self.skipTest('self.assertLogs() call is not available in Python 2.')
f = def_function.function(lambda x: x)
with self.assertLogs(level='WARN') as logs:
f(1)
f(2)
f(3)
f(4)
f(5)
self.assertLen(logs.output, 1)
self.assertIn('Tracing is expensive', logs.output[0])
def test_frequent_retracing_warning_method(self):
if sys.version_info[0] < 3:
self.skipTest('self.assertLogs() call is not available in Python 2.')
class Foo(object):
@def_function.function
def f(self, x):
return x
f = Foo().f
with self.assertLogs(level='WARN') as logs:
f(1)
f(2)
f(3)
f(4)
f(5)
self.assertLen(logs.output, 1)
self.assertIn('Tracing is expensive', logs.output[0])
def test_frequent_retracing_warning_two_independent_tf_functions(self):
if sys.version_info[0] < 3:
self.skipTest('self.assertLogs() call is not available in Python 2.')
@def_function.function
def f(x):
return x
@def_function.function
def g(x):
return x
with self.assertLogs(level='WARN') as logs:
f(1)
f(2)
f(3)
f(4)
g(1)
g(2)
g(3)
g(4)
g(5)
self.assertLen(logs.output, 1)
self.assertIn('Tracing is expensive', logs.output[0])
@test_util.disable_tfrt('Nested function is not supported')
def test_frequent_retracing_warning_nested(self):
if sys.version_info[0] < 3:
self.skipTest('self.assertLogs() call is not available in Python 2.')
@def_function.function
def inner(x):
return x + 1
@def_function.function
def outer1(x):
return inner(x) * 2
@def_function.function
def outer2(x):
return inner(x) * 3
with self.assertLogs(level='WARN') as logs:
inner(1)
inner(2)
inner(3)
inner(4)
outer1(5)
outer1(6)
outer1(7)
outer1(8)
outer2(9)
outer2(10)
outer2(11)
outer2(12)
self.assertEmpty(logs.output)
outer2(13)
self.assertLen(logs.output, 1)
self.assertIn('Tracing is expensive', logs.output[0])
def test_frequent_retracing_warning_on_reinstantiation(self):
if sys.version_info[0] < 3:
self.skipTest('self.assertLogs() call is not available in Python 2.')
with self.assertLogs(level='WARN') as logs:
for i in range(5):
@def_function.function
def f(x):
return x
f(i)
if i < 4:
self.assertEmpty(logs.output)
self.assertLen(logs.output, 1)
self.assertIn('Tracing is expensive', logs.output[0])
if __name__ == '__main__':
ops.enable_eager_execution()
test.main()
| apache-2.0 |
meabsence/python-for-android | python-build/python-libs/gdata/src/gdata/tlslite/mathtls.py | 273 | 11647 | """Miscellaneous helper functions."""
from utils.compat import *
from utils.cryptomath import *
import hmac
import md5
import sha
#1024, 1536, 2048, 3072, 4096, 6144, and 8192 bit groups]
goodGroupParameters = [(2,0xEEAF0AB9ADB38DD69C33F80AFA8FC5E86072618775FF3C0B9EA2314C9C256576D674DF7496EA81D3383B4813D692C6E0E0D5D8E250B98BE48E495C1D6089DAD15DC7D7B46154D6B6CE8EF4AD69B15D4982559B297BCF1885C529F566660E57EC68EDBC3C05726CC02FD4CBF4976EAA9AFD5138FE8376435B9FC61D2FC0EB06E3),\
(2,0x9DEF3CAFB939277AB1F12A8617A47BBBDBA51DF499AC4C80BEEEA9614B19CC4D5F4F5F556E27CBDE51C6A94BE4607A291558903BA0D0F84380B655BB9A22E8DCDF028A7CEC67F0D08134B1C8B97989149B609E0BE3BAB63D47548381DBC5B1FC764E3F4B53DD9DA1158BFD3E2B9C8CF56EDF019539349627DB2FD53D24B7C48665772E437D6C7F8CE442734AF7CCB7AE837C264AE3A9BEB87F8A2FE9B8B5292E5A021FFF5E91479E8CE7A28C2442C6F315180F93499A234DCF76E3FED135F9BB),\
(2,0xAC6BDB41324A9A9BF166DE5E1389582FAF72B6651987EE07FC3192943DB56050A37329CBB4A099ED8193E0757767A13DD52312AB4B03310DCD7F48A9DA04FD50E8083969EDB767B0CF6095179A163AB3661A05FBD5FAAAE82918A9962F0B93B855F97993EC975EEAA80D740ADBF4FF747359D041D5C33EA71D281E446B14773BCA97B43A23FB801676BD207A436C6481F1D2B9078717461A5B9D32E688F87748544523B524B0D57D5EA77A2775D2ECFA032CFBDBF52FB3786160279004E57AE6AF874E7303CE53299CCC041C7BC308D82A5698F3A8D0C38271AE35F8E9DBFBB694B5C803D89F7AE435DE236D525F54759B65E372FCD68EF20FA7111F9E4AFF73),\
(2,0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200CBBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF),\
(5,0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200CBBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199FFFFFFFFFFFFFFFF),\
(5,0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200CBBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C93402849236C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AACC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E6DCC4024FFFFFFFFFFFFFFFF),\
(5,0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200CBBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C93402849236C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AACC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E438777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F5683423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD922222E04A4037C0713EB57A81A23F0C73473FC646CEA306B4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A364597E899A0255DC164F31CC50846851DF9AB48195DED7EA1B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F924009438B481C6CD7889A002ED5EE382BC9190DA6FC026E479558E4475677E9AA9E3050E2765694DFC81F56E880B96E7160C980DD98EDD3DFFFFFFFFFFFFFFFFF)]
def P_hash(hashModule, secret, seed, length):
bytes = createByteArrayZeros(length)
secret = bytesToString(secret)
seed = bytesToString(seed)
A = seed
index = 0
while 1:
A = hmac.HMAC(secret, A, hashModule).digest()
output = hmac.HMAC(secret, A+seed, hashModule).digest()
for c in output:
if index >= length:
return bytes
bytes[index] = ord(c)
index += 1
return bytes
def PRF(secret, label, seed, length):
#Split the secret into left and right halves
S1 = secret[ : int(math.ceil(len(secret)/2.0))]
S2 = secret[ int(math.floor(len(secret)/2.0)) : ]
#Run the left half through P_MD5 and the right half through P_SHA1
p_md5 = P_hash(md5, S1, concatArrays(stringToBytes(label), seed), length)
p_sha1 = P_hash(sha, S2, concatArrays(stringToBytes(label), seed), length)
#XOR the output values and return the result
for x in range(length):
p_md5[x] ^= p_sha1[x]
return p_md5
def PRF_SSL(secret, seed, length):
secretStr = bytesToString(secret)
seedStr = bytesToString(seed)
bytes = createByteArrayZeros(length)
index = 0
for x in range(26):
A = chr(ord('A')+x) * (x+1) # 'A', 'BB', 'CCC', etc..
input = secretStr + sha.sha(A + secretStr + seedStr).digest()
output = md5.md5(input).digest()
for c in output:
if index >= length:
return bytes
bytes[index] = ord(c)
index += 1
return bytes
def makeX(salt, username, password):
if len(username)>=256:
raise ValueError("username too long")
if len(salt)>=256:
raise ValueError("salt too long")
return stringToNumber(sha.sha(salt + sha.sha(username + ":" + password)\
.digest()).digest())
#This function is used by VerifierDB.makeVerifier
def makeVerifier(username, password, bits):
bitsIndex = {1024:0, 1536:1, 2048:2, 3072:3, 4096:4, 6144:5, 8192:6}[bits]
g,N = goodGroupParameters[bitsIndex]
salt = bytesToString(getRandomBytes(16))
x = makeX(salt, username, password)
verifier = powMod(g, x, N)
return N, g, salt, verifier
def PAD(n, x):
nLength = len(numberToString(n))
s = numberToString(x)
if len(s) < nLength:
s = ("\0" * (nLength-len(s))) + s
return s
def makeU(N, A, B):
return stringToNumber(sha.sha(PAD(N, A) + PAD(N, B)).digest())
def makeK(N, g):
return stringToNumber(sha.sha(numberToString(N) + PAD(N, g)).digest())
"""
MAC_SSL
Modified from Python HMAC by Trevor
"""
class MAC_SSL:
"""MAC_SSL class.
This supports the API for Cryptographic Hash Functions (PEP 247).
"""
def __init__(self, key, msg = None, digestmod = None):
"""Create a new MAC_SSL object.
key: key for the keyed hash object.
msg: Initial input for the hash, if provided.
digestmod: A module supporting PEP 247. Defaults to the md5 module.
"""
if digestmod is None:
import md5
digestmod = md5
if key == None: #TREVNEW - for faster copying
return #TREVNEW
self.digestmod = digestmod
self.outer = digestmod.new()
self.inner = digestmod.new()
self.digest_size = digestmod.digest_size
ipad = "\x36" * 40
opad = "\x5C" * 40
self.inner.update(key)
self.inner.update(ipad)
self.outer.update(key)
self.outer.update(opad)
if msg is not None:
self.update(msg)
def update(self, msg):
"""Update this hashing object with the string msg.
"""
self.inner.update(msg)
def copy(self):
"""Return a separate copy of this hashing object.
An update to this copy won't affect the original object.
"""
other = MAC_SSL(None) #TREVNEW - for faster copying
other.digest_size = self.digest_size #TREVNEW
other.digestmod = self.digestmod
other.inner = self.inner.copy()
other.outer = self.outer.copy()
return other
def digest(self):
"""Return the hash value of this hashing object.
This returns a string containing 8-bit data. The object is
not altered in any way by this function; you can continue
updating the object after calling this function.
"""
h = self.outer.copy()
h.update(self.inner.digest())
return h.digest()
def hexdigest(self):
"""Like digest(), but returns a string of hexadecimal digits instead.
"""
return "".join([hex(ord(x))[2:].zfill(2)
for x in tuple(self.digest())])
| apache-2.0 |
blooparksystems/odoo | addons/website_slides/models/slides.py | 2 | 25969 | # -*- coding: utf-8 -*-
import datetime
import io
import json
from PIL import Image
import re
from urllib import urlencode
import urllib2
from urlparse import urlparse
from openerp import api, fields, models, SUPERUSER_ID, _
from openerp.tools import image
from openerp.exceptions import Warning
from openerp.addons.website.models.website import slug
class Channel(models.Model):
""" A channel is a container of slides. It has group-based access configuration
allowing to configure slide upload and access. Slides can be promoted in
channels. """
_name = 'slide.channel'
_description = 'Channel for Slides'
_inherit = ['mail.thread', 'website.seo.metadata', 'website.published.mixin']
_order = 'sequence, id'
_order_by_strategy = {
'most_viewed': 'total_views desc',
'most_voted': 'likes desc',
'latest': 'date_published desc',
}
name = fields.Char('Name', translate=True, required=True)
description = fields.Html('Description', translate=True)
sequence = fields.Integer(default=10, help='Display order')
category_ids = fields.One2many('slide.category', 'channel_id', string="Categories")
slide_ids = fields.One2many('slide.slide', 'channel_id', string="Slides")
promote_strategy = fields.Selection([
('none', 'No Featured Presentation'),
('latest', 'Latest Published'),
('most_voted', 'Most Voted'),
('most_viewed', 'Most Viewed'),
('custom', 'Featured Presentation')],
string="Featuring Policy", default='most_voted', required=True)
custom_slide_id = fields.Many2one('slide.slide', string='Slide to Promote')
promoted_slide_id = fields.Many2one('slide.slide', string='Featured Slide', compute='_compute_promoted_slide_id', store=True)
@api.depends('custom_slide_id', 'promote_strategy', 'slide_ids.likes',
'slide_ids.total_views', "slide_ids.date_published")
def _compute_promoted_slide_id(self):
for record in self:
if record.promote_strategy == 'none':
record.promoted_slide_id = False
elif record.promote_strategy == 'custom':
record.promoted_slide_id = record.custom_slide_id
elif record.promote_strategy:
slides = self.env['slide.slide'].search(
[('website_published', '=', True), ('channel_id', '=', record.id)],
limit=1, order=self._order_by_strategy[record.promote_strategy])
record.promoted_slide_id = slides and slides[0] or False
nbr_presentations = fields.Integer('Number of Presentations', compute='_count_presentations', store=True)
nbr_documents = fields.Integer('Number of Documents', compute='_count_presentations', store=True)
nbr_videos = fields.Integer('Number of Videos', compute='_count_presentations', store=True)
nbr_infographics = fields.Integer('Number of Infographics', compute='_count_presentations', store=True)
total = fields.Integer(compute='_count_presentations', store=True)
@api.depends('slide_ids.slide_type', 'slide_ids.website_published')
def _count_presentations(self):
result = dict.fromkeys(self.ids, dict())
res = self.env['slide.slide'].read_group(
[('website_published', '=', True), ('channel_id', 'in', self.ids)],
['channel_id', 'slide_type'], ['channel_id', 'slide_type'],
lazy=False)
for res_group in res:
result[res_group['channel_id'][0]][res_group['slide_type']] = result[res_group['channel_id'][0]].get(res_group['slide_type'], 0) + res_group['__count']
for record in self:
record.nbr_presentations = result[record.id].get('presentation', 0)
record.nbr_documents = result[record.id].get('document', 0)
record.nbr_videos = result[record.id].get('video', 0)
record.nbr_infographics = result[record.id].get('infographic', 0)
record.total = record.nbr_presentations + record.nbr_documents + record.nbr_videos + record.nbr_infographics
publish_template_id = fields.Many2one(
'mail.template', string='Published Template',
help="Email template to send slide publication through email",
default=lambda self: self.env['ir.model.data'].xmlid_to_res_id('website_slides.slide_template_published'))
share_template_id = fields.Many2one(
'mail.template', string='Shared Template',
help="Email template used when sharing a slide",
default=lambda self: self.env['ir.model.data'].xmlid_to_res_id('website_slides.slide_template_shared'))
visibility = fields.Selection([
('public', 'Public'),
('private', 'Private'),
('partial', 'Show channel but restrict presentations')],
default='public', required=True)
group_ids = fields.Many2many(
'res.groups', 'rel_channel_groups', 'channel_id', 'group_id',
string='Channel Groups', help="Groups allowed to see presentations in this channel")
access_error_msg = fields.Html(
'Error Message', help="Message to display when not accessible due to access rights",
default="<p>This channel is private and its content is restricted to some users.</p>", translate=True)
upload_group_ids = fields.Many2many(
'res.groups', 'rel_upload_groups', 'channel_id', 'group_id',
string='Upload Groups', help="Groups allowed to upload presentations in this channel. If void, every user can upload.")
# not stored access fields, depending on each user
can_see = fields.Boolean('Can See', compute='_compute_access')
can_see_full = fields.Boolean('Full Access', compute='_compute_access')
can_upload = fields.Boolean('Can Upload', compute='_compute_access')
@api.one
@api.depends('visibility', 'group_ids', 'upload_group_ids')
def _compute_access(self):
self.can_see = self.visibility in ['public', 'private'] or bool(self.group_ids & self.env.user.groups_id)
self.can_see_full = self.visibility == 'public' or bool(self.group_ids & self.env.user.groups_id)
self.can_upload = self.can_see and (not self.upload_group_ids or bool(self.upload_group_ids & self.env.user.groups_id))
@api.multi
@api.depends('name')
def _website_url(self, name, arg):
res = super(Channel, self)._website_url(name, arg)
base_url = self.env['ir.config_parameter'].get_param('web.base.url')
res.update({(channel.id, '%s/slides/%s' % (base_url, slug(channel))) for channel in self})
return res
@api.onchange('visibility')
def change_visibility(self):
if self.visibility == 'public':
self.group_ids = False
class Category(models.Model):
""" Channel contain various categories to manage its slides """
_name = 'slide.category'
_description = "Slides Category"
_order = "sequence, id"
name = fields.Char('Name', translate=True, required=True)
channel_id = fields.Many2one('slide.channel', string="Channel", required=True, ondelete='cascade')
sequence = fields.Integer(default=10, help='Display order')
slide_ids = fields.One2many('slide.slide', 'category_id', string="Slides")
nbr_presentations = fields.Integer("Number of Presentations", compute='_count_presentations', store=True)
nbr_documents = fields.Integer("Number of Documents", compute='_count_presentations', store=True)
nbr_videos = fields.Integer("Number of Videos", compute='_count_presentations', store=True)
nbr_infographics = fields.Integer("Number of Infographics", compute='_count_presentations', store=True)
total = fields.Integer(compute='_count_presentations', store=True)
@api.depends('slide_ids.slide_type', 'slide_ids.website_published')
def _count_presentations(self):
result = dict.fromkeys(self.ids, dict())
res = self.env['slide.slide'].read_group(
[('website_published', '=', True), ('category_id', 'in', self.ids)],
['category_id', 'slide_type'], ['category_id', 'slide_type'],
lazy=False)
for res_group in res:
result[res_group['category_id'][0]][res_group['slide_type']] = result[res_group['category_id'][0]].get(res_group['slide_type'], 0) + res_group['__count']
for record in self:
record.nbr_presentations = result[record.id].get('presentation', 0)
record.nbr_documents = result[record.id].get('document', 0)
record.nbr_videos = result[record.id].get('video', 0)
record.nbr_infographics = result[record.id].get('infographic', 0)
record.total = record.nbr_presentations + record.nbr_documents + record.nbr_videos + record.nbr_infographics
class EmbeddedSlide(models.Model):
""" Embedding in third party websites. Track view count, generate statistics. """
_name = 'slide.embed'
_description = 'Embedded Slides View Counter'
_rec_name = 'slide_id'
slide_id = fields.Many2one('slide.slide', string="Presentation", required=True, select=1)
url = fields.Char('Third Party Website URL', required=True)
count_views = fields.Integer('# Views', default=1)
def add_embed_url(self, slide_id, url):
schema = urlparse(url)
baseurl = schema.netloc
embeds = self.search([('url', '=', baseurl), ('slide_id', '=', int(slide_id))], limit=1)
if embeds:
embeds.count_views += 1
else:
embeds = self.create({
'slide_id': slide_id,
'url': baseurl,
})
return embeds.count_views
class SlideTag(models.Model):
""" Tag to search slides accross channels. """
_name = 'slide.tag'
_description = 'Slide Tag'
name = fields.Char('Name', required=True)
_sql_constraints = [
('slide_tag_unique', 'UNIQUE(name)', 'A tag must be unique!'),
]
class Slide(models.Model):
""" This model represents actual presentations. Those must be one of four
types:
- Presentation
- Document
- Infographic
- Video
Slide has various statistics like view count, embed count, like, dislikes """
_name = 'slide.slide'
_inherit = ['mail.thread', 'website.seo.metadata', 'website.published.mixin']
_description = 'Slides'
_PROMOTIONAL_FIELDS = [
'__last_update', 'name', 'image_thumb', 'image_medium', 'slide_type', 'total_views', 'category_id',
'channel_id', 'description', 'tag_ids', 'write_date', 'create_date',
'website_published', 'website_url', 'website_meta_title', 'website_meta_description', 'website_meta_keywords']
_sql_constraints = [
('name_uniq', 'UNIQUE(channel_id, name)', 'The slide name must be unique within a channel')
]
# description
name = fields.Char('Title', required=True, translate=True)
description = fields.Text('Description', translate=True)
channel_id = fields.Many2one('slide.channel', string="Channel", required=True)
category_id = fields.Many2one('slide.category', string="Category", domain="[('channel_id', '=', channel_id)]")
tag_ids = fields.Many2many('slide.tag', 'rel_slide_tag', 'slide_id', 'tag_id', string='Tags')
download_security = fields.Selection(
[('none', 'No One'), ('user', 'Authentified Users Only'), ('public', 'Everyone')],
string='Download Security',
required=True, default='user')
image = fields.Binary('Image')
image_medium = fields.Binary('Medium', compute="_get_image", store=True)
image_thumb = fields.Binary('Thumbnail', compute="_get_image", store=True)
@api.depends('image')
def _get_image(self):
for record in self:
if record.image:
record.image_medium = image.crop_image(record.image, type='top', ratio=(4, 3), thumbnail_ratio=4)
record.image_thumb = image.crop_image(record.image, type='top', ratio=(4, 3), thumbnail_ratio=6)
else:
record.image_medium = False
record.iamge_thumb = False
# content
slide_type = fields.Selection([
('infographic', 'Infographic'),
('presentation', 'Presentation'),
('document', 'Document'),
('video', 'Video')],
string='Type', required=True,
default='document',
help="Document type will be set automatically depending on file type, height and width.")
index_content = fields.Text('Transcript')
datas = fields.Binary('Content')
url = fields.Char('Document URL', help="Youtube or Google Document URL")
document_id = fields.Char('Document ID', help="Youtube or Google Document ID")
mime_type = fields.Char('Mime-type')
@api.onchange('url')
def on_change_url(self):
self.ensure_one()
if self.url:
res = self._parse_document_url(self.url)
if res.get('error'):
raise Warning(_('Could not fetch data from url. Document or access right not available:\n%s') % res['error'])
values = res['values']
if not values.get('document_id'):
raise Warning(_('Please enter valid Youtube or Google Doc URL'))
for key, value in values.iteritems():
setattr(self, key, value)
# website
date_published = fields.Datetime('Publish Date')
website_message_ids = fields.One2many(
'mail.message', 'res_id',
domain=lambda self: [('model', '=', self._name), ('message_type', '=', 'comment')],
string='Website Messages', help="Website communication history")
likes = fields.Integer('Likes')
dislikes = fields.Integer('Dislikes')
# views
embedcount_ids = fields.One2many('slide.embed', 'slide_id', string="Embed Count")
slide_views = fields.Integer('# of Website Views')
embed_views = fields.Integer('# of Embedded Views')
total_views = fields.Integer("Total # Views", default="0", compute='_compute_total', store=True)
@api.depends('slide_views', 'embed_views')
def _compute_total(self):
for record in self:
record.total_views = record.slide_views + record.embed_views
embed_code = fields.Text('Embed Code', readonly=True, compute='_get_embed_code')
def _get_embed_code(self):
base_url = self.env['ir.config_parameter'].get_param('web.base.url')
for record in self:
if record.datas and not record.document_id:
record.embed_code = '<iframe src="%s/slides/embed/%s?page=1" allowFullScreen="true" height="%s" width="%s" frameborder="0"></iframe>' % (base_url, record.id, 315, 420)
elif record.slide_type == 'video' and record.document_id:
if not record.mime_type:
# embed youtube video
record.embed_code = '<iframe src="//www.youtube.com/embed/%s?theme=light" allowFullScreen="true" frameborder="0"></iframe>' % (record.document_id)
else:
# embed google doc video
record.embed_code = '<embed src="https://video.google.com/get_player?ps=docs&partnerid=30&docid=%s" type="application/x-shockwave-flash"></embed>' % (record.document_id)
else:
record.embed_code = False
@api.multi
@api.depends('name')
def _website_url(self, name, arg):
res = super(Slide, self)._website_url(name, arg)
base_url = self.env['ir.config_parameter'].get_param('web.base.url')
#link_tracker is not in dependencies, so use it to shorten url only if installed.
if self.env.registry.get('link.tracker'):
LinkTracker = self.env['link.tracker']
res.update({(slide.id, LinkTracker.sudo().create({'url': '%s/slides/slide/%s' % (base_url, slug(slide))}).short_url) for slide in self})
else:
res.update({(slide.id, '%s/slides/slide/%s' % (base_url, slug(slide))) for slide in self})
return res
@api.model
def create(self, values):
if not values.get('index_content'):
values['index_content'] = values.get('description')
if values.get('slide_type') == 'infographic' and not values.get('image'):
values['image'] = values['datas']
if values.get('website_published') and not values.get('date_published'):
values['date_published'] = datetime.datetime.now()
if values.get('url'):
doc_data = self._parse_document_url(values['url']).get('values', dict())
for key, value in doc_data.iteritems():
values.setdefault(key, value)
# Do not publish slide if user has not publisher rights
if not self.user_has_groups('base.group_website_publisher'):
values['website_published'] = False
slide = super(Slide, self).create(values)
slide.channel_id.message_subscribe_users()
slide._post_publication()
return slide
@api.multi
def write(self, values):
if values.get('url'):
doc_data = self._parse_document_url(values['url']).get('values', dict())
for key, value in doc_data.iteritems():
values.setdefault(key, value)
res = super(Slide, self).write(values)
if values.get('website_published'):
self.date_published = datetime.datetime.now()
self._post_publication()
return res
@api.model
def check_field_access_rights(self, operation, fields):
""" As per channel access configuration (visibility)
- public ==> no restriction on slides access
- private ==> restrict all slides of channel based on access group defined on channel group_ids field
- partial ==> show channel, but presentations based on groups means any user can see channel but not slide's content.
For private: implement using record rule
For partial: user can see channel, but channel gridview have slide detail so we have to implement
partial field access mechanism for public user so he can have access of promotional field (name, view_count) of slides,
but not all fields like data (actual pdf content)
all fields should be accessible only for user group defined on channel group_ids
"""
if self.env.uid == SUPERUSER_ID:
return fields or list(self._fields)
fields = super(Slide, self).check_field_access_rights(operation, fields)
# still read not perform so we can not access self.channel_id
if self.ids:
self.env.cr.execute('SELECT DISTINCT channel_id FROM ' + self._table + ' WHERE id IN %s', (tuple(self.ids),))
channel_ids = [x[0] for x in self.env.cr.fetchall()]
channels = self.env['slide.channel'].sudo().browse(channel_ids)
limited_access = all(channel.visibility == 'partial' and
not len(channel.group_ids & self.env.user.groups_id)
for channel in channels)
if limited_access:
fields = [field for field in fields if field in self._PROMOTIONAL_FIELDS]
return fields
def get_related_slides(self, limit=20):
domain = [('website_published', '=', True), ('channel_id.visibility', '!=', 'private'), ('id', '!=', self.id)]
if self.category_id:
domain += [('category_id', '=', self.category_id.id)]
for record in self.search(domain, limit=limit):
yield record
def get_most_viewed_slides(self, limit=20):
for record in self.search([('website_published', '=', True), ('channel_id.visibility', '!=', 'private'), ('id', '!=', self.id)], limit=limit, order='total_views desc'):
yield record
def _post_publication(self):
base_url = self.env['ir.config_parameter'].get_param('web.base.url')
for slide in self.filtered(lambda slide: slide.website_published):
publish_template = slide.channel_id.publish_template_id
html_body = publish_template.with_context({'base_url': base_url}).render_template(publish_template.body_html, 'slide.slide', slide.id)
slide.channel_id.message_post(body=html_body, subtype='website_slides.mt_channel_slide_published')
return True
@api.one
def send_share_email(self, email):
base_url = self.env['ir.config_parameter'].get_param('web.base.url')
return self.channel_id.share_template_id.with_context({'email': email, 'base_url': base_url}).send_mail(self.id)
# --------------------------------------------------
# Parsing methods
# --------------------------------------------------
@api.model
def _fetch_data(self, base_url, data, content_type=False):
result = {'values': dict()}
try:
if data:
base_url = base_url + '?%s' % urlencode(data)
req = urllib2.Request(base_url)
content = urllib2.urlopen(req).read()
if content_type == 'json':
result['values'] = json.loads(content)
elif content_type in ('image', 'pdf'):
result['values'] = content.encode('base64')
else:
result['values'] = content
except urllib2.HTTPError as e:
result['error'] = e.read()
e.close()
except urllib2.URLError as e:
result['error'] = e.reason
return result
def _find_document_data_from_url(self, url):
expr = re.compile(r'^.*((youtu.be/)|(v/)|(\/u\/\w\/)|(embed\/)|(watch\?))\??v?=?([^#\&\?]*).*')
arg = expr.match(url)
document_id = arg and arg.group(7) or False
if document_id:
return ('youtube', document_id)
expr = re.compile(r'(^https:\/\/docs.google.com|^https:\/\/drive.google.com).*\/d\/([^\/]*)')
arg = expr.match(url)
document_id = arg and arg.group(2) or False
if document_id:
return ('google', document_id)
return (None, False)
def _parse_document_url(self, url, only_preview_fields=False):
document_source, document_id = self._find_document_data_from_url(url)
if document_source and hasattr(self, '_parse_%s_document' % document_source):
return getattr(self, '_parse_%s_document' % document_source)(document_id, only_preview_fields)
return {'error': _('Unknown document')}
def _parse_youtube_document(self, document_id, only_preview_fields):
key = self.env['ir.config_parameter'].sudo().get_param('website_slides.google_app_key')
fetch_res = self._fetch_data('https://www.googleapis.com/youtube/v3/videos', {'id': document_id, 'key': key, 'part': 'snippet', 'fields': 'items(id,snippet)'}, 'json')
if fetch_res.get('error'):
return fetch_res
values = {'slide_type': 'video', 'document_id': document_id}
youtube_values = fetch_res['values'].get('items', list(dict()))[0]
if youtube_values.get('snippet'):
snippet = youtube_values['snippet']
if only_preview_fields:
values.update({
'url_src': snippet['thumbnails']['high']['url'],
'title': snippet['title'],
'description': snippet['description']
})
return values
values.update({
'name': snippet['title'],
'image': self._fetch_data(snippet['thumbnails']['high']['url'], {}, 'image')['values'],
'description': snippet['description'],
})
return {'values': values}
@api.model
def _parse_google_document(self, document_id, only_preview_fields):
def get_slide_type(vals):
# TDE FIXME: WTF ??
image = Image.open(io.BytesIO(vals['image'].decode('base64')))
width, height = image.size
if height > width:
return 'document'
else:
return 'presentation'
key = self.env['ir.config_parameter'].sudo().get_param('website_slides.google_app_key')
fetch_res = self._fetch_data('https://www.googleapis.com/drive/v2/files/%s' % document_id, {'projection': 'BASIC', 'key': key}, "json")
if fetch_res.get('error'):
return fetch_res
google_values = fetch_res['values']
if only_preview_fields:
return {
'url_src': google_values['thumbnailLink'],
'title': google_values['title'],
}
values = {
'name': google_values['title'],
'image': self._fetch_data(google_values['thumbnailLink'].replace('=s220', ''), {}, 'image')['values'],
'mime_type': google_values['mimeType'],
'document_id': document_id,
}
if google_values['mimeType'].startswith('video/'):
values['slide_type'] = 'video'
elif google_values['mimeType'].startswith('image/'):
values['datas'] = values['image']
values['slide_type'] = 'infographic'
elif google_values['mimeType'].startswith('application/vnd.google-apps'):
values['datas'] = self._fetch_data(google_values['exportLinks']['application/pdf'], {}, 'pdf')['values']
values['slide_type'] = get_slide_type(values)
if google_values['exportLinks'].get('text/plain'):
values['index_content'] = self._fetch_data(google_values['exportLinks']['text/plain'], {})['values']
if google_values['exportLinks'].get('text/csv'):
values['index_content'] = self._fetch_data(google_values['exportLinks']['text/csv'], {})['values']
elif google_values['mimeType'] == 'application/pdf':
# TODO: Google Drive PDF document doesn't provide plain text transcript
values['datas'] = self._fetch_data(google_values['webContentLink'], {}, 'pdf')['values']
values['slide_type'] = get_slide_type(values)
return {'values': values}
| gpl-3.0 |
Namita26/ycmd | cpp/ycm/tests/gmock/test/gmock_output_test.py | 986 | 5999 | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests the text output of Google C++ Mocking Framework.
SYNOPSIS
gmock_output_test.py --build_dir=BUILD/DIR --gengolden
# where BUILD/DIR contains the built gmock_output_test_ file.
gmock_output_test.py --gengolden
gmock_output_test.py
"""
__author__ = '[email protected] (Zhanyong Wan)'
import os
import re
import sys
import gmock_test_utils
# The flag for generating the golden file
GENGOLDEN_FLAG = '--gengolden'
PROGRAM_PATH = gmock_test_utils.GetTestExecutablePath('gmock_output_test_')
COMMAND = [PROGRAM_PATH, '--gtest_stack_trace_depth=0', '--gtest_print_time=0']
GOLDEN_NAME = 'gmock_output_test_golden.txt'
GOLDEN_PATH = os.path.join(gmock_test_utils.GetSourceDir(), GOLDEN_NAME)
def ToUnixLineEnding(s):
"""Changes all Windows/Mac line endings in s to UNIX line endings."""
return s.replace('\r\n', '\n').replace('\r', '\n')
def RemoveReportHeaderAndFooter(output):
"""Removes Google Test result report's header and footer from the output."""
output = re.sub(r'.*gtest_main.*\n', '', output)
output = re.sub(r'\[.*\d+ tests.*\n', '', output)
output = re.sub(r'\[.* test environment .*\n', '', output)
output = re.sub(r'\[=+\] \d+ tests .* ran.*', '', output)
output = re.sub(r'.* FAILED TESTS\n', '', output)
return output
def RemoveLocations(output):
"""Removes all file location info from a Google Test program's output.
Args:
output: the output of a Google Test program.
Returns:
output with all file location info (in the form of
'DIRECTORY/FILE_NAME:LINE_NUMBER: 'or
'DIRECTORY\\FILE_NAME(LINE_NUMBER): ') replaced by
'FILE:#: '.
"""
return re.sub(r'.*[/\\](.+)(\:\d+|\(\d+\))\:', 'FILE:#:', output)
def NormalizeErrorMarker(output):
"""Normalizes the error marker, which is different on Windows vs on Linux."""
return re.sub(r' error: ', ' Failure\n', output)
def RemoveMemoryAddresses(output):
"""Removes memory addresses from the test output."""
return re.sub(r'@\w+', '@0x#', output)
def RemoveTestNamesOfLeakedMocks(output):
"""Removes the test names of leaked mock objects from the test output."""
return re.sub(r'\(used in test .+\) ', '', output)
def GetLeakyTests(output):
"""Returns a list of test names that leak mock objects."""
# findall() returns a list of all matches of the regex in output.
# For example, if '(used in test FooTest.Bar)' is in output, the
# list will contain 'FooTest.Bar'.
return re.findall(r'\(used in test (.+)\)', output)
def GetNormalizedOutputAndLeakyTests(output):
"""Normalizes the output of gmock_output_test_.
Args:
output: The test output.
Returns:
A tuple (the normalized test output, the list of test names that have
leaked mocks).
"""
output = ToUnixLineEnding(output)
output = RemoveReportHeaderAndFooter(output)
output = NormalizeErrorMarker(output)
output = RemoveLocations(output)
output = RemoveMemoryAddresses(output)
return (RemoveTestNamesOfLeakedMocks(output), GetLeakyTests(output))
def GetShellCommandOutput(cmd):
"""Runs a command in a sub-process, and returns its STDOUT in a string."""
return gmock_test_utils.Subprocess(cmd, capture_stderr=False).output
def GetNormalizedCommandOutputAndLeakyTests(cmd):
"""Runs a command and returns its normalized output and a list of leaky tests.
Args:
cmd: the shell command.
"""
# Disables exception pop-ups on Windows.
os.environ['GTEST_CATCH_EXCEPTIONS'] = '1'
return GetNormalizedOutputAndLeakyTests(GetShellCommandOutput(cmd))
class GMockOutputTest(gmock_test_utils.TestCase):
def testOutput(self):
(output, leaky_tests) = GetNormalizedCommandOutputAndLeakyTests(COMMAND)
golden_file = open(GOLDEN_PATH, 'rb')
golden = golden_file.read()
golden_file.close()
# The normalized output should match the golden file.
self.assertEquals(golden, output)
# The raw output should contain 2 leaked mock object errors for
# test GMockOutputTest.CatchesLeakedMocks.
self.assertEquals(['GMockOutputTest.CatchesLeakedMocks',
'GMockOutputTest.CatchesLeakedMocks'],
leaky_tests)
if __name__ == '__main__':
if sys.argv[1:] == [GENGOLDEN_FLAG]:
(output, _) = GetNormalizedCommandOutputAndLeakyTests(COMMAND)
golden_file = open(GOLDEN_PATH, 'wb')
golden_file.write(output)
golden_file.close()
else:
gmock_test_utils.Main()
| gpl-3.0 |
Ayub-Khan/edx-platform | lms/djangoapps/verify_student/tests/test_models.py | 27 | 36071 | # -*- coding: utf-8 -*-
from datetime import timedelta, datetime
import ddt
import json
import mock
import requests.exceptions
import pytz
from django.conf import settings
from django.db import IntegrityError
from django.test import TestCase
from mock import patch
from nose.tools import assert_is_none, assert_equals, assert_raises, assert_true, assert_false # pylint: disable=no-name-in-module
from student.tests.factories import UserFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
from opaque_keys.edx.keys import CourseKey
from lms.djangoapps.verify_student.models import (
SoftwareSecurePhotoVerification,
VerificationException, VerificationCheckpoint,
VerificationStatus, SkippedReverification,
VerificationDeadline
)
FAKE_SETTINGS = {
"SOFTWARE_SECURE": {
"FACE_IMAGE_AES_KEY": "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA",
"API_ACCESS_KEY": "BBBBBBBBBBBBBBBBBBBB",
"API_SECRET_KEY": "CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC",
"RSA_PUBLIC_KEY": """-----BEGIN PUBLIC KEY-----
MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAu2fUn20ZQtDpa1TKeCA/
rDA2cEeFARjEr41AP6jqP/k3O7TeqFX6DgCBkxcjojRCs5IfE8TimBHtv/bcSx9o
7PANTq/62ZLM9xAMpfCcU6aAd4+CVqQkXSYjj5TUqamzDFBkp67US8IPmw7I2Gaa
tX8ErZ9D7ieOJ8/0hEiphHpCZh4TTgGuHgjon6vMV8THtq3AQMaAQ/y5R3V7Lezw
dyZCM9pBcvcH+60ma+nNg8GVGBAW/oLxILBtg+T3PuXSUvcu/r6lUFMHk55pU94d
9A/T8ySJm379qU24ligMEetPk1o9CUasdaI96xfXVDyFhrzrntAmdD+HYCSPOQHz
iwIDAQAB
-----END PUBLIC KEY-----""",
"API_URL": "http://localhost/verify_student/fake_endpoint",
"AWS_ACCESS_KEY": "FAKEACCESSKEY",
"AWS_SECRET_KEY": "FAKESECRETKEY",
"S3_BUCKET": "fake-bucket"
}
}
class MockKey(object):
"""
Mocking a boto S3 Key object. It's a really dumb mock because once we
write data to S3, we never read it again. We simply generate a link to it
and pass that to Software Secure. Because of that, we don't even implement
the ability to pull back previously written content in this mock.
Testing that the encryption/decryption roundtrip on the data works is in
test_ssencrypt.py
"""
def __init__(self, bucket):
self.bucket = bucket
def set_contents_from_string(self, contents):
self.contents = contents
def generate_url(self, duration):
return "http://fake-edx-s3.edx.org/"
class MockBucket(object):
"""Mocking a boto S3 Bucket object."""
def __init__(self, name):
self.name = name
class MockS3Connection(object):
"""Mocking a boto S3 Connection"""
def __init__(self, access_key, secret_key):
pass
def get_bucket(self, bucket_name):
return MockBucket(bucket_name)
def mock_software_secure_post(url, headers=None, data=None, **kwargs):
"""
Mocks our interface when we post to Software Secure. Does basic assertions
on the fields we send over to make sure we're not missing headers or giving
total garbage.
"""
data_dict = json.loads(data)
# Basic sanity checking on the keys
EXPECTED_KEYS = [
"EdX-ID", "ExpectedName", "PhotoID", "PhotoIDKey", "SendResponseTo",
"UserPhoto", "UserPhotoKey",
]
for key in EXPECTED_KEYS:
assert_true(
data_dict.get(key),
"'{}' must be present and not blank in JSON submitted to Software Secure".format(key)
)
# The keys should be stored as Base64 strings, i.e. this should not explode
photo_id_key = data_dict["PhotoIDKey"].decode("base64")
user_photo_key = data_dict["UserPhotoKey"].decode("base64")
response = requests.Response()
response.status_code = 200
return response
def mock_software_secure_post_error(url, headers=None, data=None, **kwargs):
"""
Simulates what happens if our post to Software Secure is rejected, for
whatever reason.
"""
response = requests.Response()
response.status_code = 400
return response
def mock_software_secure_post_unavailable(url, headers=None, data=None, **kwargs):
"""Simulates a connection failure when we try to submit to Software Secure."""
raise requests.exceptions.ConnectionError
# Lots of patching to stub in our own settings, S3 substitutes, and HTTP posting
@patch.dict(settings.VERIFY_STUDENT, FAKE_SETTINGS)
@patch('lms.djangoapps.verify_student.models.S3Connection', new=MockS3Connection)
@patch('lms.djangoapps.verify_student.models.Key', new=MockKey)
@patch('lms.djangoapps.verify_student.models.requests.post', new=mock_software_secure_post)
@ddt.ddt
class TestPhotoVerification(ModuleStoreTestCase):
def test_state_transitions(self):
"""
Make sure we can't make unexpected status transitions.
The status transitions we expect are::
→ → → must_retry
↑ ↑ ↓
created → ready → submitted → approved
↓ ↑ ↓
↓ → → denied
"""
user = UserFactory.create()
attempt = SoftwareSecurePhotoVerification(user=user)
assert_equals(attempt.status, "created")
# These should all fail because we're in the wrong starting state.
assert_raises(VerificationException, attempt.submit)
assert_raises(VerificationException, attempt.approve)
assert_raises(VerificationException, attempt.deny)
# Now let's fill in some values so that we can pass the mark_ready() call
attempt.mark_ready()
assert_equals(attempt.status, "ready")
# ready (can't approve or deny unless it's "submitted")
assert_raises(VerificationException, attempt.approve)
assert_raises(VerificationException, attempt.deny)
DENY_ERROR_MSG = '[{"photoIdReasons": ["Not provided"]}]'
# must_retry
attempt.status = "must_retry"
attempt.system_error("System error")
attempt.approve()
attempt.status = "must_retry"
attempt.deny(DENY_ERROR_MSG)
# submitted
attempt.status = "submitted"
attempt.deny(DENY_ERROR_MSG)
attempt.status = "submitted"
attempt.approve()
# approved
assert_raises(VerificationException, attempt.submit)
attempt.approve() # no-op
attempt.system_error("System error") # no-op, something processed it without error
attempt.deny(DENY_ERROR_MSG)
# denied
assert_raises(VerificationException, attempt.submit)
attempt.deny(DENY_ERROR_MSG) # no-op
attempt.system_error("System error") # no-op, something processed it without error
attempt.approve()
def test_name_freezing(self):
"""
You can change your name prior to marking a verification attempt ready,
but changing your name afterwards should not affect the value in the
in the attempt record. Basically, we want to always know what your name
was when you submitted it.
"""
user = UserFactory.create()
user.profile.name = u"Jack \u01B4" # gratuious non-ASCII char to test encodings
attempt = SoftwareSecurePhotoVerification(user=user)
user.profile.name = u"Clyde \u01B4"
attempt.mark_ready()
user.profile.name = u"Rusty \u01B4"
assert_equals(u"Clyde \u01B4", attempt.name)
def create_and_submit(self):
"""Helper method to create a generic submission and send it."""
user = UserFactory.create()
attempt = SoftwareSecurePhotoVerification(user=user)
user.profile.name = u"Rust\u01B4"
attempt.upload_face_image("Just pretend this is image data")
attempt.upload_photo_id_image("Hey, we're a photo ID")
attempt.mark_ready()
attempt.submit()
return attempt
def test_submissions(self):
"""Test that we set our status correctly after a submission."""
# Basic case, things go well.
attempt = self.create_and_submit()
assert_equals(attempt.status, "submitted")
# We post, but Software Secure doesn't like what we send for some reason
with patch('lms.djangoapps.verify_student.models.requests.post', new=mock_software_secure_post_error):
attempt = self.create_and_submit()
assert_equals(attempt.status, "must_retry")
# We try to post, but run into an error (in this case a newtork connection error)
with patch('lms.djangoapps.verify_student.models.requests.post', new=mock_software_secure_post_unavailable):
attempt = self.create_and_submit()
assert_equals(attempt.status, "must_retry")
@mock.patch.dict(settings.FEATURES, {'AUTOMATIC_VERIFY_STUDENT_IDENTITY_FOR_TESTING': True})
def test_submission_while_testing_flag_is_true(self):
""" Test that a fake value is set for field 'photo_id_key' of user's
initial verification when the feature flag 'AUTOMATIC_VERIFY_STUDENT_IDENTITY_FOR_TESTING'
is enabled.
"""
user = UserFactory.create()
attempt = SoftwareSecurePhotoVerification(user=user)
user.profile.name = "test-user"
attempt.upload_photo_id_image("Image data")
attempt.mark_ready()
attempt.submit()
self.assertEqual(attempt.photo_id_key, "fake-photo-id-key")
def test_active_for_user(self):
"""
Make sure we can retrive a user's active (in progress) verification
attempt.
"""
user = UserFactory.create()
# This user has no active at the moment...
assert_is_none(SoftwareSecurePhotoVerification.active_for_user(user))
# Create an attempt and mark it ready...
attempt = SoftwareSecurePhotoVerification(user=user)
attempt.mark_ready()
assert_equals(attempt, SoftwareSecurePhotoVerification.active_for_user(user))
# A new user won't see this...
user2 = UserFactory.create()
user2.save()
assert_is_none(SoftwareSecurePhotoVerification.active_for_user(user2))
# If it's got a different status, it doesn't count
for status in ["submitted", "must_retry", "approved", "denied"]:
attempt.status = status
attempt.save()
assert_is_none(SoftwareSecurePhotoVerification.active_for_user(user))
# But if we create yet another one and mark it ready, it passes again.
attempt_2 = SoftwareSecurePhotoVerification(user=user)
attempt_2.mark_ready()
assert_equals(attempt_2, SoftwareSecurePhotoVerification.active_for_user(user))
# And if we add yet another one with a later created time, we get that
# one instead. We always want the most recent attempt marked ready()
attempt_3 = SoftwareSecurePhotoVerification(
user=user,
created_at=attempt_2.created_at + timedelta(days=1)
)
attempt_3.save()
# We haven't marked attempt_3 ready yet, so attempt_2 still wins
assert_equals(attempt_2, SoftwareSecurePhotoVerification.active_for_user(user))
# Now we mark attempt_3 ready and expect it to come back
attempt_3.mark_ready()
assert_equals(attempt_3, SoftwareSecurePhotoVerification.active_for_user(user))
def test_user_is_verified(self):
"""
Test to make sure we correctly answer whether a user has been verified.
"""
user = UserFactory.create()
attempt = SoftwareSecurePhotoVerification(user=user)
attempt.save()
# If it's any of these, they're not verified...
for status in ["created", "ready", "denied", "submitted", "must_retry"]:
attempt.status = status
attempt.save()
assert_false(SoftwareSecurePhotoVerification.user_is_verified(user), status)
attempt.status = "approved"
attempt.save()
assert_true(SoftwareSecurePhotoVerification.user_is_verified(user), attempt.status)
def test_user_has_valid_or_pending(self):
"""
Determine whether we have to prompt this user to verify, or if they've
already at least initiated a verification submission.
"""
user = UserFactory.create()
attempt = SoftwareSecurePhotoVerification(user=user)
# If it's any of these statuses, they don't have anything outstanding
for status in ["created", "ready", "denied"]:
attempt.status = status
attempt.save()
assert_false(SoftwareSecurePhotoVerification.user_has_valid_or_pending(user), status)
# Any of these, and we are. Note the benefit of the doubt we're giving
# -- must_retry, and submitted both count until we hear otherwise
for status in ["submitted", "must_retry", "approved"]:
attempt.status = status
attempt.save()
assert_true(SoftwareSecurePhotoVerification.user_has_valid_or_pending(user), status)
def test_user_status(self):
# test for correct status when no error returned
user = UserFactory.create()
status = SoftwareSecurePhotoVerification.user_status(user)
self.assertEquals(status, ('none', ''))
# test for when one has been created
attempt = SoftwareSecurePhotoVerification(user=user)
attempt.status = 'approved'
attempt.save()
status = SoftwareSecurePhotoVerification.user_status(user)
self.assertEquals(status, ('approved', ''))
# create another one for the same user, make sure the right one is
# returned
attempt2 = SoftwareSecurePhotoVerification(user=user)
attempt2.status = 'denied'
attempt2.error_msg = '[{"photoIdReasons": ["Not provided"]}]'
attempt2.save()
status = SoftwareSecurePhotoVerification.user_status(user)
self.assertEquals(status, ('approved', ''))
# now delete the first one and verify that the denial is being handled
# properly
attempt.delete()
status = SoftwareSecurePhotoVerification.user_status(user)
self.assertEquals(status, ('must_reverify', "No photo ID was provided."))
def test_parse_error_msg_success(self):
user = UserFactory.create()
attempt = SoftwareSecurePhotoVerification(user=user)
attempt.status = 'denied'
attempt.error_msg = '[{"photoIdReasons": ["Not provided"]}]'
parsed_error_msg = attempt.parsed_error_msg()
self.assertEquals("No photo ID was provided.", parsed_error_msg)
def test_parse_error_msg_failure(self):
user = UserFactory.create()
attempt = SoftwareSecurePhotoVerification(user=user)
attempt.status = 'denied'
# when we can't parse into json
bad_messages = {
'Not Provided',
'[{"IdReasons": ["Not provided"]}]',
'{"IdReasons": ["Not provided"]}',
u'[{"ïḋṚëäṡöṅṡ": ["Ⓝⓞⓣ ⓟⓡⓞⓥⓘⓓⓔⓓ "]}]',
}
for msg in bad_messages:
attempt.error_msg = msg
parsed_error_msg = attempt.parsed_error_msg()
self.assertEquals(parsed_error_msg, "There was an error verifying your ID photos.")
def test_active_at_datetime(self):
user = UserFactory.create()
attempt = SoftwareSecurePhotoVerification.objects.create(user=user)
# Not active before the created date
before = attempt.created_at - timedelta(seconds=1)
self.assertFalse(attempt.active_at_datetime(before))
# Active immediately after created date
after_created = attempt.created_at + timedelta(seconds=1)
self.assertTrue(attempt.active_at_datetime(after_created))
# Active immediately before expiration date
expiration = attempt.created_at + timedelta(days=settings.VERIFY_STUDENT["DAYS_GOOD_FOR"])
before_expiration = expiration - timedelta(seconds=1)
self.assertTrue(attempt.active_at_datetime(before_expiration))
# Not active after the expiration date
after = expiration + timedelta(seconds=1)
self.assertFalse(attempt.active_at_datetime(after))
def test_verification_for_datetime(self):
user = UserFactory.create()
now = datetime.now(pytz.UTC)
# No attempts in the query set, so should return None
query = SoftwareSecurePhotoVerification.objects.filter(user=user)
result = SoftwareSecurePhotoVerification.verification_for_datetime(now, query)
self.assertIs(result, None)
# Should also return None if no deadline specified
query = SoftwareSecurePhotoVerification.objects.filter(user=user)
result = SoftwareSecurePhotoVerification.verification_for_datetime(None, query)
self.assertIs(result, None)
# Make an attempt
attempt = SoftwareSecurePhotoVerification.objects.create(user=user)
# Before the created date, should get no results
before = attempt.created_at - timedelta(seconds=1)
query = SoftwareSecurePhotoVerification.objects.filter(user=user)
result = SoftwareSecurePhotoVerification.verification_for_datetime(before, query)
self.assertIs(result, None)
# Immediately after the created date, should get the attempt
after_created = attempt.created_at + timedelta(seconds=1)
query = SoftwareSecurePhotoVerification.objects.filter(user=user)
result = SoftwareSecurePhotoVerification.verification_for_datetime(after_created, query)
self.assertEqual(result, attempt)
# If no deadline specified, should return first available
query = SoftwareSecurePhotoVerification.objects.filter(user=user)
result = SoftwareSecurePhotoVerification.verification_for_datetime(None, query)
self.assertEqual(result, attempt)
# Immediately before the expiration date, should get the attempt
expiration = attempt.created_at + timedelta(days=settings.VERIFY_STUDENT["DAYS_GOOD_FOR"])
before_expiration = expiration - timedelta(seconds=1)
query = SoftwareSecurePhotoVerification.objects.filter(user=user)
result = SoftwareSecurePhotoVerification.verification_for_datetime(before_expiration, query)
self.assertEqual(result, attempt)
# Immediately after the expiration date, should not get the attempt
after = expiration + timedelta(seconds=1)
query = SoftwareSecurePhotoVerification.objects.filter(user=user)
result = SoftwareSecurePhotoVerification.verification_for_datetime(after, query)
self.assertIs(result, None)
# Create a second attempt in the same window
second_attempt = SoftwareSecurePhotoVerification.objects.create(user=user)
# Now we should get the newer attempt
deadline = second_attempt.created_at + timedelta(days=1)
query = SoftwareSecurePhotoVerification.objects.filter(user=user)
result = SoftwareSecurePhotoVerification.verification_for_datetime(deadline, query)
self.assertEqual(result, second_attempt)
@ddt.unpack
@ddt.data(
{'enrollment_mode': 'honor', 'status': None, 'output': 'N/A'},
{'enrollment_mode': 'audit', 'status': None, 'output': 'N/A'},
{'enrollment_mode': 'verified', 'status': False, 'output': 'Not ID Verified'},
{'enrollment_mode': 'verified', 'status': True, 'output': 'ID Verified'},
)
def test_verification_status_for_user(self, enrollment_mode, status, output):
"""
Verify verification_status_for_user returns correct status.
"""
user = UserFactory.create()
course = CourseFactory.create()
with patch(
'lms.djangoapps.verify_student.models.SoftwareSecurePhotoVerification.user_is_verified'
) as mock_verification:
mock_verification.return_value = status
status = SoftwareSecurePhotoVerification.verification_status_for_user(user, course.id, enrollment_mode)
self.assertEqual(status, output)
def test_initial_verification_for_user(self):
"""Test that method 'get_initial_verification' of model
'SoftwareSecurePhotoVerification' always returns the initial
verification with field 'photo_id_key' set against a user.
"""
user = UserFactory.create()
# No initial verification for the user
result = SoftwareSecurePhotoVerification.get_initial_verification(user=user)
self.assertIs(result, None)
# Make an initial verification with 'photo_id_key'
attempt = SoftwareSecurePhotoVerification(user=user, photo_id_key="dummy_photo_id_key")
attempt.status = 'approved'
attempt.save()
# Check that method 'get_initial_verification' returns the correct
# initial verification attempt
first_result = SoftwareSecurePhotoVerification.get_initial_verification(user=user)
self.assertIsNotNone(first_result)
# Now create a second verification without 'photo_id_key'
attempt = SoftwareSecurePhotoVerification(user=user)
attempt.status = 'submitted'
attempt.save()
# Test method 'get_initial_verification' still returns the correct
# initial verification attempt which have 'photo_id_key' set
second_result = SoftwareSecurePhotoVerification.get_initial_verification(user=user)
self.assertIsNotNone(second_result)
self.assertEqual(second_result, first_result)
@ddt.ddt
class VerificationCheckpointTest(ModuleStoreTestCase):
"""Tests for the VerificationCheckpoint model. """
def setUp(self):
super(VerificationCheckpointTest, self).setUp()
self.user = UserFactory.create()
self.course = CourseFactory.create()
self.checkpoint_midterm = u'i4x://{org}/{course}/edx-reverification-block/midterm_uuid'.format(
org=self.course.id.org, course=self.course.id.course
)
self.checkpoint_final = u'i4x://{org}/{course}/edx-reverification-block/final_uuid'.format(
org=self.course.id.org, course=self.course.id.course
)
@ddt.data('midterm', 'final')
def test_get_or_create_verification_checkpoint(self, checkpoint):
"""
Test that a reverification checkpoint is created properly.
"""
checkpoint_location = u'i4x://{org}/{course}/edx-reverification-block/{checkpoint}'.format(
org=self.course.id.org, course=self.course.id.course, checkpoint=checkpoint
)
# create the 'VerificationCheckpoint' checkpoint
verification_checkpoint = VerificationCheckpoint.objects.create(
course_id=self.course.id,
checkpoint_location=checkpoint_location
)
self.assertEqual(
VerificationCheckpoint.get_or_create_verification_checkpoint(self.course.id, checkpoint_location),
verification_checkpoint
)
def test_get_or_create_verification_checkpoint_for_not_existing_values(self):
# Retrieving a checkpoint that doesn't yet exist will create it
location = u'i4x://edX/DemoX/edx-reverification-block/invalid_location'
checkpoint = VerificationCheckpoint.get_or_create_verification_checkpoint(self.course.id, location)
self.assertIsNot(checkpoint, None)
self.assertEqual(checkpoint.course_id, self.course.id)
self.assertEqual(checkpoint.checkpoint_location, location)
def test_get_or_create_integrity_error(self):
# Create the checkpoint
VerificationCheckpoint.objects.create(
course_id=self.course.id,
checkpoint_location=self.checkpoint_midterm,
)
# Simulate that the get-or-create operation raises an IntegrityError.
# This can happen when two processes both try to get-or-create at the same time
# when the database is set to REPEATABLE READ.
# To avoid IntegrityError situations when calling this method, set the view to
# use a READ COMMITTED transaction instead.
with patch.object(VerificationCheckpoint.objects, "get_or_create") as mock_get_or_create:
mock_get_or_create.side_effect = IntegrityError
with self.assertRaises(IntegrityError):
_ = VerificationCheckpoint.get_or_create_verification_checkpoint(
self.course.id,
self.checkpoint_midterm
)
def test_unique_together_constraint(self):
"""
Test the unique together constraint.
"""
# create the VerificationCheckpoint checkpoint
VerificationCheckpoint.objects.create(course_id=self.course.id, checkpoint_location=self.checkpoint_midterm)
# test creating the VerificationCheckpoint checkpoint with same course
# id and checkpoint name
with self.assertRaises(IntegrityError):
VerificationCheckpoint.objects.create(course_id=self.course.id, checkpoint_location=self.checkpoint_midterm)
def test_add_verification_attempt_software_secure(self):
"""
Test adding Software Secure photo verification attempts for the
reverification checkpoints.
"""
# adding two check points.
first_checkpoint = VerificationCheckpoint.objects.create(
course_id=self.course.id, checkpoint_location=self.checkpoint_midterm
)
second_checkpoint = VerificationCheckpoint.objects.create(
course_id=self.course.id, checkpoint_location=self.checkpoint_final
)
# make an attempt for the 'first_checkpoint'
first_checkpoint.add_verification_attempt(SoftwareSecurePhotoVerification.objects.create(user=self.user))
self.assertEqual(first_checkpoint.photo_verification.count(), 1)
# make another attempt for the 'first_checkpoint'
first_checkpoint.add_verification_attempt(SoftwareSecurePhotoVerification.objects.create(user=self.user))
self.assertEqual(first_checkpoint.photo_verification.count(), 2)
# make new attempt for the 'second_checkpoint'
attempt = SoftwareSecurePhotoVerification.objects.create(user=self.user)
second_checkpoint.add_verification_attempt(attempt)
self.assertEqual(second_checkpoint.photo_verification.count(), 1)
# remove the attempt from 'second_checkpoint'
second_checkpoint.photo_verification.remove(attempt)
self.assertEqual(second_checkpoint.photo_verification.count(), 0)
@ddt.ddt
class VerificationStatusTest(ModuleStoreTestCase):
""" Tests for the VerificationStatus model. """
def setUp(self):
super(VerificationStatusTest, self).setUp()
self.user = UserFactory.create()
self.course = CourseFactory.create()
self.first_checkpoint_location = u'i4x://{org}/{course}/edx-reverification-block/first_checkpoint_uuid'.format(
org=self.course.id.org, course=self.course.id.course
)
self.first_checkpoint = VerificationCheckpoint.objects.create(
course_id=self.course.id,
checkpoint_location=self.first_checkpoint_location
)
self.second_checkpoint_location = u'i4x://{org}/{course}/edx-reverification-block/second_checkpoint_uuid'.\
format(org=self.course.id.org, course=self.course.id.course)
self.second_checkpoint = VerificationCheckpoint.objects.create(
course_id=self.course.id,
checkpoint_location=self.second_checkpoint_location
)
@ddt.data('submitted', "approved", "denied", "error")
def test_add_verification_status(self, status):
""" Adding verification status using the class method. """
# adding verification status
VerificationStatus.add_verification_status(
checkpoint=self.first_checkpoint,
user=self.user,
status=status
)
# test the status from database
result = VerificationStatus.objects.filter(checkpoint=self.first_checkpoint)[0]
self.assertEqual(result.status, status)
self.assertEqual(result.user, self.user)
@ddt.data("approved", "denied", "error")
def test_add_status_from_checkpoints(self, status):
"""Test verification status for reverification checkpoints after
submitting software secure photo verification.
"""
# add initial verification status for checkpoints
initial_status = "submitted"
VerificationStatus.add_verification_status(
checkpoint=self.first_checkpoint,
user=self.user,
status=initial_status
)
VerificationStatus.add_verification_status(
checkpoint=self.second_checkpoint,
user=self.user,
status=initial_status
)
# now add verification status for multiple checkpoint points
VerificationStatus.add_status_from_checkpoints(
checkpoints=[self.first_checkpoint, self.second_checkpoint], user=self.user, status=status
)
# test that verification status entries with new status have been added
# for both checkpoints
result = VerificationStatus.objects.filter(user=self.user, checkpoint=self.first_checkpoint)
self.assertEqual(len(result), len(self.first_checkpoint.checkpoint_status.all()))
self.assertEqual(
list(result.values_list('checkpoint__checkpoint_location', flat=True)),
list(self.first_checkpoint.checkpoint_status.values_list('checkpoint__checkpoint_location', flat=True))
)
result = VerificationStatus.objects.filter(user=self.user, checkpoint=self.second_checkpoint)
self.assertEqual(len(result), len(self.second_checkpoint.checkpoint_status.all()))
self.assertEqual(
list(result.values_list('checkpoint__checkpoint_location', flat=True)),
list(self.second_checkpoint.checkpoint_status.values_list('checkpoint__checkpoint_location', flat=True))
)
def test_get_location_id(self):
"""
Getting location id for a specific checkpoint.
"""
# creating software secure attempt against checkpoint
self.first_checkpoint.add_verification_attempt(SoftwareSecurePhotoVerification.objects.create(user=self.user))
# add initial verification status for checkpoint
VerificationStatus.add_verification_status(
checkpoint=self.first_checkpoint,
user=self.user,
status='submitted',
)
attempt = SoftwareSecurePhotoVerification.objects.filter(user=self.user)
self.assertIsNotNone(VerificationStatus.get_location_id(attempt))
self.assertEqual(VerificationStatus.get_location_id(None), '')
def test_get_user_attempts(self):
"""
Test adding verification status.
"""
VerificationStatus.add_verification_status(
checkpoint=self.first_checkpoint,
user=self.user,
status='submitted'
)
actual_attempts = VerificationStatus.get_user_attempts(
self.user.id,
self.course.id,
self.first_checkpoint_location
)
self.assertEqual(actual_attempts, 1)
class SkippedReverificationTest(ModuleStoreTestCase):
"""
Tests for the SkippedReverification model.
"""
def setUp(self):
super(SkippedReverificationTest, self).setUp()
self.user = UserFactory.create()
self.course = CourseFactory.create()
dummy_checkpoint_location = u'i4x://edX/DemoX/edx-reverification-block/midterm_uuid'
self.checkpoint = VerificationCheckpoint.objects.create(
course_id=self.course.id,
checkpoint_location=dummy_checkpoint_location
)
def test_add_skipped_attempts(self):
"""
Test 'add_skipped_reverification_attempt' method.
"""
# add verification status
SkippedReverification.add_skipped_reverification_attempt(
checkpoint=self.checkpoint, user_id=self.user.id, course_id=unicode(self.course.id)
)
# test the status of skipped reverification from database
result = SkippedReverification.objects.filter(course_id=self.course.id)[0]
self.assertEqual(result.checkpoint, self.checkpoint)
self.assertEqual(result.user, self.user)
self.assertEqual(result.course_id, self.course.id)
def test_unique_constraint(self):
"""Test that adding skipped re-verification with same user and course
id will raise 'IntegrityError' exception.
"""
# add verification object
SkippedReverification.add_skipped_reverification_attempt(
checkpoint=self.checkpoint, user_id=self.user.id, course_id=unicode(self.course.id)
)
with self.assertRaises(IntegrityError):
SkippedReverification.add_skipped_reverification_attempt(
checkpoint=self.checkpoint, user_id=self.user.id, course_id=unicode(self.course.id)
)
# create skipped attempt for different user
user2 = UserFactory.create()
SkippedReverification.add_skipped_reverification_attempt(
checkpoint=self.checkpoint, user_id=user2.id, course_id=unicode(self.course.id)
)
# test the status of skipped reverification from database
result = SkippedReverification.objects.filter(user=user2)[0]
self.assertEqual(result.checkpoint, self.checkpoint)
self.assertEqual(result.user, user2)
self.assertEqual(result.course_id, self.course.id)
def test_check_user_skipped_reverification_exists(self):
"""
Test the 'check_user_skipped_reverification_exists' method's response.
"""
# add verification status
SkippedReverification.add_skipped_reverification_attempt(
checkpoint=self.checkpoint, user_id=self.user.id, course_id=unicode(self.course.id)
)
self.assertTrue(
SkippedReverification.check_user_skipped_reverification_exists(
user_id=self.user.id,
course_id=self.course.id
)
)
user2 = UserFactory.create()
self.assertFalse(
SkippedReverification.check_user_skipped_reverification_exists(
user_id=user2.id,
course_id=self.course.id
)
)
class VerificationDeadlineTest(TestCase):
"""
Tests for the VerificationDeadline model.
"""
def test_caching(self):
deadlines = {
CourseKey.from_string("edX/DemoX/Fall"): datetime.now(pytz.UTC),
CourseKey.from_string("edX/DemoX/Spring"): datetime.now(pytz.UTC) + timedelta(days=1)
}
course_keys = deadlines.keys()
# Initially, no deadlines are set
with self.assertNumQueries(1):
all_deadlines = VerificationDeadline.deadlines_for_courses(course_keys)
self.assertEqual(all_deadlines, {})
# Create the deadlines
for course_key, deadline in deadlines.iteritems():
VerificationDeadline.objects.create(
course_key=course_key,
deadline=deadline,
)
# Warm the cache
with self.assertNumQueries(1):
VerificationDeadline.deadlines_for_courses(course_keys)
# Load the deadlines from the cache
with self.assertNumQueries(0):
all_deadlines = VerificationDeadline.deadlines_for_courses(course_keys)
self.assertEqual(all_deadlines, deadlines)
# Delete the deadlines
VerificationDeadline.objects.all().delete()
# Verify that the deadlines are updated correctly
with self.assertNumQueries(1):
all_deadlines = VerificationDeadline.deadlines_for_courses(course_keys)
self.assertEqual(all_deadlines, {})
| agpl-3.0 |
40223226/2015cd_midterm | static/Brython3.1.1-20150328-091302/Lib/logging/__init__.py | 733 | 66279 | # Copyright 2001-2013 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
Logging package for Python. Based on PEP 282 and comments thereto in
comp.lang.python.
Copyright (C) 2001-2013 Vinay Sajip. All Rights Reserved.
To use, simply 'import logging' and log away!
"""
import sys, os, time, io, traceback, warnings, weakref
from string import Template
__all__ = ['BASIC_FORMAT', 'BufferingFormatter', 'CRITICAL', 'DEBUG', 'ERROR',
'FATAL', 'FileHandler', 'Filter', 'Formatter', 'Handler', 'INFO',
'LogRecord', 'Logger', 'LoggerAdapter', 'NOTSET', 'NullHandler',
'StreamHandler', 'WARN', 'WARNING', 'addLevelName', 'basicConfig',
'captureWarnings', 'critical', 'debug', 'disable', 'error',
'exception', 'fatal', 'getLevelName', 'getLogger', 'getLoggerClass',
'info', 'log', 'makeLogRecord', 'setLoggerClass', 'warn', 'warning',
'getLogRecordFactory', 'setLogRecordFactory', 'lastResort']
try:
import threading
except ImportError: #pragma: no cover
threading = None
__author__ = "Vinay Sajip <[email protected]>"
__status__ = "production"
__version__ = "0.5.1.2"
__date__ = "07 February 2010"
#---------------------------------------------------------------------------
# Miscellaneous module data
#---------------------------------------------------------------------------
#
# _srcfile is used when walking the stack to check when we've got the first
# caller stack frame.
#
if hasattr(sys, 'frozen'): #support for py2exe
_srcfile = "logging%s__init__%s" % (os.sep, __file__[-4:])
else:
_srcfile = __file__
_srcfile = os.path.normcase(_srcfile)
if hasattr(sys, '_getframe'):
currentframe = lambda: sys._getframe(3)
else: #pragma: no cover
def currentframe():
"""Return the frame object for the caller's stack frame."""
try:
raise Exception
except:
return sys.exc_info()[2].tb_frame.f_back
# _srcfile is only used in conjunction with sys._getframe().
# To provide compatibility with older versions of Python, set _srcfile
# to None if _getframe() is not available; this value will prevent
# findCaller() from being called.
#if not hasattr(sys, "_getframe"):
# _srcfile = None
#
#_startTime is used as the base when calculating the relative time of events
#
_startTime = time.time()
#
#raiseExceptions is used to see if exceptions during handling should be
#propagated
#
raiseExceptions = True
#
# If you don't want threading information in the log, set this to zero
#
logThreads = True
#
# If you don't want multiprocessing information in the log, set this to zero
#
logMultiprocessing = True
#
# If you don't want process information in the log, set this to zero
#
logProcesses = True
#---------------------------------------------------------------------------
# Level related stuff
#---------------------------------------------------------------------------
#
# Default levels and level names, these can be replaced with any positive set
# of values having corresponding names. There is a pseudo-level, NOTSET, which
# is only really there as a lower limit for user-defined levels. Handlers and
# loggers are initialized with NOTSET so that they will log all messages, even
# at user-defined levels.
#
CRITICAL = 50
FATAL = CRITICAL
ERROR = 40
WARNING = 30
WARN = WARNING
INFO = 20
DEBUG = 10
NOTSET = 0
_levelNames = {
CRITICAL : 'CRITICAL',
ERROR : 'ERROR',
WARNING : 'WARNING',
INFO : 'INFO',
DEBUG : 'DEBUG',
NOTSET : 'NOTSET',
'CRITICAL' : CRITICAL,
'ERROR' : ERROR,
'WARN' : WARNING,
'WARNING' : WARNING,
'INFO' : INFO,
'DEBUG' : DEBUG,
'NOTSET' : NOTSET,
}
def getLevelName(level):
"""
Return the textual representation of logging level 'level'.
If the level is one of the predefined levels (CRITICAL, ERROR, WARNING,
INFO, DEBUG) then you get the corresponding string. If you have
associated levels with names using addLevelName then the name you have
associated with 'level' is returned.
If a numeric value corresponding to one of the defined levels is passed
in, the corresponding string representation is returned.
Otherwise, the string "Level %s" % level is returned.
"""
return _levelNames.get(level, ("Level %s" % level))
def addLevelName(level, levelName):
"""
Associate 'levelName' with 'level'.
This is used when converting levels to text during message formatting.
"""
_acquireLock()
try: #unlikely to cause an exception, but you never know...
_levelNames[level] = levelName
_levelNames[levelName] = level
finally:
_releaseLock()
def _checkLevel(level):
if isinstance(level, int):
rv = level
elif str(level) == level:
if level not in _levelNames:
raise ValueError("Unknown level: %r" % level)
rv = _levelNames[level]
else:
raise TypeError("Level not an integer or a valid string: %r" % level)
return rv
#---------------------------------------------------------------------------
# Thread-related stuff
#---------------------------------------------------------------------------
#
#_lock is used to serialize access to shared data structures in this module.
#This needs to be an RLock because fileConfig() creates and configures
#Handlers, and so might arbitrary user threads. Since Handler code updates the
#shared dictionary _handlers, it needs to acquire the lock. But if configuring,
#the lock would already have been acquired - so we need an RLock.
#The same argument applies to Loggers and Manager.loggerDict.
#
if threading:
_lock = threading.RLock()
else: #pragma: no cover
_lock = None
def _acquireLock():
"""
Acquire the module-level lock for serializing access to shared data.
This should be released with _releaseLock().
"""
if _lock:
_lock.acquire()
def _releaseLock():
"""
Release the module-level lock acquired by calling _acquireLock().
"""
if _lock:
_lock.release()
#---------------------------------------------------------------------------
# The logging record
#---------------------------------------------------------------------------
class LogRecord(object):
"""
A LogRecord instance represents an event being logged.
LogRecord instances are created every time something is logged. They
contain all the information pertinent to the event being logged. The
main information passed in is in msg and args, which are combined
using str(msg) % args to create the message field of the record. The
record also includes information such as when the record was created,
the source line where the logging call was made, and any exception
information to be logged.
"""
def __init__(self, name, level, pathname, lineno,
msg, args, exc_info, func=None, sinfo=None, **kwargs):
"""
Initialize a logging record with interesting information.
"""
ct = time.time()
self.name = name
self.msg = msg
#
# The following statement allows passing of a dictionary as a sole
# argument, so that you can do something like
# logging.debug("a %(a)d b %(b)s", {'a':1, 'b':2})
# Suggested by Stefan Behnel.
# Note that without the test for args[0], we get a problem because
# during formatting, we test to see if the arg is present using
# 'if self.args:'. If the event being logged is e.g. 'Value is %d'
# and if the passed arg fails 'if self.args:' then no formatting
# is done. For example, logger.warning('Value is %d', 0) would log
# 'Value is %d' instead of 'Value is 0'.
# For the use case of passing a dictionary, this should not be a
# problem.
if args and len(args) == 1 and isinstance(args[0], dict) and args[0]:
args = args[0]
self.args = args
self.levelname = getLevelName(level)
self.levelno = level
self.pathname = pathname
try:
self.filename = os.path.basename(pathname)
self.module = os.path.splitext(self.filename)[0]
except (TypeError, ValueError, AttributeError):
self.filename = pathname
self.module = "Unknown module"
self.exc_info = exc_info
self.exc_text = None # used to cache the traceback text
self.stack_info = sinfo
self.lineno = lineno
self.funcName = func
self.created = ct
self.msecs = (ct - int(ct)) * 1000
self.relativeCreated = (self.created - _startTime) * 1000
if logThreads and threading:
self.thread = threading.get_ident()
self.threadName = threading.current_thread().name
else: # pragma: no cover
self.thread = None
self.threadName = None
if not logMultiprocessing: # pragma: no cover
self.processName = None
else:
self.processName = 'MainProcess'
mp = sys.modules.get('multiprocessing')
if mp is not None:
# Errors may occur if multiprocessing has not finished loading
# yet - e.g. if a custom import hook causes third-party code
# to run when multiprocessing calls import. See issue 8200
# for an example
try:
self.processName = mp.current_process().name
except Exception: #pragma: no cover
pass
if logProcesses and hasattr(os, 'getpid'):
self.process = os.getpid()
else:
self.process = None
def __str__(self):
return '<LogRecord: %s, %s, %s, %s, "%s">'%(self.name, self.levelno,
self.pathname, self.lineno, self.msg)
def getMessage(self):
"""
Return the message for this LogRecord.
Return the message for this LogRecord after merging any user-supplied
arguments with the message.
"""
msg = str(self.msg)
if self.args:
msg = msg % self.args
return msg
#
# Determine which class to use when instantiating log records.
#
_logRecordFactory = LogRecord
def setLogRecordFactory(factory):
"""
Set the factory to be used when instantiating a log record.
:param factory: A callable which will be called to instantiate
a log record.
"""
global _logRecordFactory
_logRecordFactory = factory
def getLogRecordFactory():
"""
Return the factory to be used when instantiating a log record.
"""
return _logRecordFactory
def makeLogRecord(dict):
"""
Make a LogRecord whose attributes are defined by the specified dictionary,
This function is useful for converting a logging event received over
a socket connection (which is sent as a dictionary) into a LogRecord
instance.
"""
rv = _logRecordFactory(None, None, "", 0, "", (), None, None)
rv.__dict__.update(dict)
return rv
#---------------------------------------------------------------------------
# Formatter classes and functions
#---------------------------------------------------------------------------
class PercentStyle(object):
default_format = '%(message)s'
asctime_format = '%(asctime)s'
asctime_search = '%(asctime)'
def __init__(self, fmt):
self._fmt = fmt or self.default_format
def usesTime(self):
return self._fmt.find(self.asctime_search) >= 0
def format(self, record):
return self._fmt % record.__dict__
class StrFormatStyle(PercentStyle):
default_format = '{message}'
asctime_format = '{asctime}'
asctime_search = '{asctime'
def format(self, record):
return self._fmt.format(**record.__dict__)
class StringTemplateStyle(PercentStyle):
default_format = '${message}'
asctime_format = '${asctime}'
asctime_search = '${asctime}'
def __init__(self, fmt):
self._fmt = fmt or self.default_format
self._tpl = Template(self._fmt)
def usesTime(self):
fmt = self._fmt
return fmt.find('$asctime') >= 0 or fmt.find(self.asctime_format) >= 0
def format(self, record):
return self._tpl.substitute(**record.__dict__)
_STYLES = {
'%': PercentStyle,
'{': StrFormatStyle,
'$': StringTemplateStyle
}
class Formatter(object):
"""
Formatter instances are used to convert a LogRecord to text.
Formatters need to know how a LogRecord is constructed. They are
responsible for converting a LogRecord to (usually) a string which can
be interpreted by either a human or an external system. The base Formatter
allows a formatting string to be specified. If none is supplied, the
default value of "%s(message)" is used.
The Formatter can be initialized with a format string which makes use of
knowledge of the LogRecord attributes - e.g. the default value mentioned
above makes use of the fact that the user's message and arguments are pre-
formatted into a LogRecord's message attribute. Currently, the useful
attributes in a LogRecord are described by:
%(name)s Name of the logger (logging channel)
%(levelno)s Numeric logging level for the message (DEBUG, INFO,
WARNING, ERROR, CRITICAL)
%(levelname)s Text logging level for the message ("DEBUG", "INFO",
"WARNING", "ERROR", "CRITICAL")
%(pathname)s Full pathname of the source file where the logging
call was issued (if available)
%(filename)s Filename portion of pathname
%(module)s Module (name portion of filename)
%(lineno)d Source line number where the logging call was issued
(if available)
%(funcName)s Function name
%(created)f Time when the LogRecord was created (time.time()
return value)
%(asctime)s Textual time when the LogRecord was created
%(msecs)d Millisecond portion of the creation time
%(relativeCreated)d Time in milliseconds when the LogRecord was created,
relative to the time the logging module was loaded
(typically at application startup time)
%(thread)d Thread ID (if available)
%(threadName)s Thread name (if available)
%(process)d Process ID (if available)
%(message)s The result of record.getMessage(), computed just as
the record is emitted
"""
converter = time.localtime
def __init__(self, fmt=None, datefmt=None, style='%'):
"""
Initialize the formatter with specified format strings.
Initialize the formatter either with the specified format string, or a
default as described above. Allow for specialized date formatting with
the optional datefmt argument (if omitted, you get the ISO8601 format).
Use a style parameter of '%', '{' or '$' to specify that you want to
use one of %-formatting, :meth:`str.format` (``{}``) formatting or
:class:`string.Template` formatting in your format string.
.. versionchanged: 3.2
Added the ``style`` parameter.
"""
if style not in _STYLES:
raise ValueError('Style must be one of: %s' % ','.join(
_STYLES.keys()))
self._style = _STYLES[style](fmt)
self._fmt = self._style._fmt
self.datefmt = datefmt
default_time_format = '%Y-%m-%d %H:%M:%S'
default_msec_format = '%s,%03d'
def formatTime(self, record, datefmt=None):
"""
Return the creation time of the specified LogRecord as formatted text.
This method should be called from format() by a formatter which
wants to make use of a formatted time. This method can be overridden
in formatters to provide for any specific requirement, but the
basic behaviour is as follows: if datefmt (a string) is specified,
it is used with time.strftime() to format the creation time of the
record. Otherwise, the ISO8601 format is used. The resulting
string is returned. This function uses a user-configurable function
to convert the creation time to a tuple. By default, time.localtime()
is used; to change this for a particular formatter instance, set the
'converter' attribute to a function with the same signature as
time.localtime() or time.gmtime(). To change it for all formatters,
for example if you want all logging times to be shown in GMT,
set the 'converter' attribute in the Formatter class.
"""
ct = self.converter(record.created)
if datefmt:
s = time.strftime(datefmt, ct)
else:
t = time.strftime(self.default_time_format, ct)
s = self.default_msec_format % (t, record.msecs)
return s
def formatException(self, ei):
"""
Format and return the specified exception information as a string.
This default implementation just uses
traceback.print_exception()
"""
sio = io.StringIO()
tb = ei[2]
# See issues #9427, #1553375. Commented out for now.
#if getattr(self, 'fullstack', False):
# traceback.print_stack(tb.tb_frame.f_back, file=sio)
traceback.print_exception(ei[0], ei[1], tb, None, sio)
s = sio.getvalue()
sio.close()
if s[-1:] == "\n":
s = s[:-1]
return s
def usesTime(self):
"""
Check if the format uses the creation time of the record.
"""
return self._style.usesTime()
def formatMessage(self, record):
return self._style.format(record)
def formatStack(self, stack_info):
"""
This method is provided as an extension point for specialized
formatting of stack information.
The input data is a string as returned from a call to
:func:`traceback.print_stack`, but with the last trailing newline
removed.
The base implementation just returns the value passed in.
"""
return stack_info
def format(self, record):
"""
Format the specified record as text.
The record's attribute dictionary is used as the operand to a
string formatting operation which yields the returned string.
Before formatting the dictionary, a couple of preparatory steps
are carried out. The message attribute of the record is computed
using LogRecord.getMessage(). If the formatting string uses the
time (as determined by a call to usesTime(), formatTime() is
called to format the event time. If there is exception information,
it is formatted using formatException() and appended to the message.
"""
record.message = record.getMessage()
if self.usesTime():
record.asctime = self.formatTime(record, self.datefmt)
s = self.formatMessage(record)
if record.exc_info:
# Cache the traceback text to avoid converting it multiple times
# (it's constant anyway)
if not record.exc_text:
record.exc_text = self.formatException(record.exc_info)
if record.exc_text:
if s[-1:] != "\n":
s = s + "\n"
s = s + record.exc_text
if record.stack_info:
if s[-1:] != "\n":
s = s + "\n"
s = s + self.formatStack(record.stack_info)
return s
#
# The default formatter to use when no other is specified
#
_defaultFormatter = Formatter()
class BufferingFormatter(object):
"""
A formatter suitable for formatting a number of records.
"""
def __init__(self, linefmt=None):
"""
Optionally specify a formatter which will be used to format each
individual record.
"""
if linefmt:
self.linefmt = linefmt
else:
self.linefmt = _defaultFormatter
def formatHeader(self, records):
"""
Return the header string for the specified records.
"""
return ""
def formatFooter(self, records):
"""
Return the footer string for the specified records.
"""
return ""
def format(self, records):
"""
Format the specified records and return the result as a string.
"""
rv = ""
if len(records) > 0:
rv = rv + self.formatHeader(records)
for record in records:
rv = rv + self.linefmt.format(record)
rv = rv + self.formatFooter(records)
return rv
#---------------------------------------------------------------------------
# Filter classes and functions
#---------------------------------------------------------------------------
class Filter(object):
"""
Filter instances are used to perform arbitrary filtering of LogRecords.
Loggers and Handlers can optionally use Filter instances to filter
records as desired. The base filter class only allows events which are
below a certain point in the logger hierarchy. For example, a filter
initialized with "A.B" will allow events logged by loggers "A.B",
"A.B.C", "A.B.C.D", "A.B.D" etc. but not "A.BB", "B.A.B" etc. If
initialized with the empty string, all events are passed.
"""
def __init__(self, name=''):
"""
Initialize a filter.
Initialize with the name of the logger which, together with its
children, will have its events allowed through the filter. If no
name is specified, allow every event.
"""
self.name = name
self.nlen = len(name)
def filter(self, record):
"""
Determine if the specified record is to be logged.
Is the specified record to be logged? Returns 0 for no, nonzero for
yes. If deemed appropriate, the record may be modified in-place.
"""
if self.nlen == 0:
return True
elif self.name == record.name:
return True
elif record.name.find(self.name, 0, self.nlen) != 0:
return False
return (record.name[self.nlen] == ".")
class Filterer(object):
"""
A base class for loggers and handlers which allows them to share
common code.
"""
def __init__(self):
"""
Initialize the list of filters to be an empty list.
"""
self.filters = []
def addFilter(self, filter):
"""
Add the specified filter to this handler.
"""
if not (filter in self.filters):
self.filters.append(filter)
def removeFilter(self, filter):
"""
Remove the specified filter from this handler.
"""
if filter in self.filters:
self.filters.remove(filter)
def filter(self, record):
"""
Determine if a record is loggable by consulting all the filters.
The default is to allow the record to be logged; any filter can veto
this and the record is then dropped. Returns a zero value if a record
is to be dropped, else non-zero.
.. versionchanged: 3.2
Allow filters to be just callables.
"""
rv = True
for f in self.filters:
if hasattr(f, 'filter'):
result = f.filter(record)
else:
result = f(record) # assume callable - will raise if not
if not result:
rv = False
break
return rv
#---------------------------------------------------------------------------
# Handler classes and functions
#---------------------------------------------------------------------------
_handlers = weakref.WeakValueDictionary() #map of handler names to handlers
_handlerList = [] # added to allow handlers to be removed in reverse of order initialized
def _removeHandlerRef(wr):
"""
Remove a handler reference from the internal cleanup list.
"""
# This function can be called during module teardown, when globals are
# set to None. If _acquireLock is None, assume this is the case and do
# nothing.
if (_acquireLock is not None and _handlerList is not None and
_releaseLock is not None):
_acquireLock()
try:
if wr in _handlerList:
_handlerList.remove(wr)
finally:
_releaseLock()
def _addHandlerRef(handler):
"""
Add a handler to the internal cleanup list using a weak reference.
"""
_acquireLock()
try:
_handlerList.append(weakref.ref(handler, _removeHandlerRef))
finally:
_releaseLock()
class Handler(Filterer):
"""
Handler instances dispatch logging events to specific destinations.
The base handler class. Acts as a placeholder which defines the Handler
interface. Handlers can optionally use Formatter instances to format
records as desired. By default, no formatter is specified; in this case,
the 'raw' message as determined by record.message is logged.
"""
def __init__(self, level=NOTSET):
"""
Initializes the instance - basically setting the formatter to None
and the filter list to empty.
"""
Filterer.__init__(self)
self._name = None
self.level = _checkLevel(level)
self.formatter = None
# Add the handler to the global _handlerList (for cleanup on shutdown)
_addHandlerRef(self)
self.createLock()
def get_name(self):
return self._name
def set_name(self, name):
_acquireLock()
try:
if self._name in _handlers:
del _handlers[self._name]
self._name = name
if name:
_handlers[name] = self
finally:
_releaseLock()
name = property(get_name, set_name)
def createLock(self):
"""
Acquire a thread lock for serializing access to the underlying I/O.
"""
if threading:
self.lock = threading.RLock()
else: #pragma: no cover
self.lock = None
def acquire(self):
"""
Acquire the I/O thread lock.
"""
if self.lock:
self.lock.acquire()
def release(self):
"""
Release the I/O thread lock.
"""
if self.lock:
self.lock.release()
def setLevel(self, level):
"""
Set the logging level of this handler. level must be an int or a str.
"""
self.level = _checkLevel(level)
def format(self, record):
"""
Format the specified record.
If a formatter is set, use it. Otherwise, use the default formatter
for the module.
"""
if self.formatter:
fmt = self.formatter
else:
fmt = _defaultFormatter
return fmt.format(record)
def emit(self, record):
"""
Do whatever it takes to actually log the specified logging record.
This version is intended to be implemented by subclasses and so
raises a NotImplementedError.
"""
raise NotImplementedError('emit must be implemented '
'by Handler subclasses')
def handle(self, record):
"""
Conditionally emit the specified logging record.
Emission depends on filters which may have been added to the handler.
Wrap the actual emission of the record with acquisition/release of
the I/O thread lock. Returns whether the filter passed the record for
emission.
"""
rv = self.filter(record)
if rv:
self.acquire()
try:
self.emit(record)
finally:
self.release()
return rv
def setFormatter(self, fmt):
"""
Set the formatter for this handler.
"""
self.formatter = fmt
def flush(self):
"""
Ensure all logging output has been flushed.
This version does nothing and is intended to be implemented by
subclasses.
"""
pass
def close(self):
"""
Tidy up any resources used by the handler.
This version removes the handler from an internal map of handlers,
_handlers, which is used for handler lookup by name. Subclasses
should ensure that this gets called from overridden close()
methods.
"""
#get the module data lock, as we're updating a shared structure.
_acquireLock()
try: #unlikely to raise an exception, but you never know...
if self._name and self._name in _handlers:
del _handlers[self._name]
finally:
_releaseLock()
def handleError(self, record):
"""
Handle errors which occur during an emit() call.
This method should be called from handlers when an exception is
encountered during an emit() call. If raiseExceptions is false,
exceptions get silently ignored. This is what is mostly wanted
for a logging system - most users will not care about errors in
the logging system, they are more interested in application errors.
You could, however, replace this with a custom handler if you wish.
The record which was being processed is passed in to this method.
"""
if raiseExceptions and sys.stderr: # see issue 13807
ei = sys.exc_info()
try:
traceback.print_exception(ei[0], ei[1], ei[2],
None, sys.stderr)
sys.stderr.write('Logged from file %s, line %s\n' % (
record.filename, record.lineno))
except IOError: #pragma: no cover
pass # see issue 5971
finally:
del ei
class StreamHandler(Handler):
"""
A handler class which writes logging records, appropriately formatted,
to a stream. Note that this class does not close the stream, as
sys.stdout or sys.stderr may be used.
"""
terminator = '\n'
def __init__(self, stream=None):
"""
Initialize the handler.
If stream is not specified, sys.stderr is used.
"""
Handler.__init__(self)
if stream is None:
stream = sys.stderr
self.stream = stream
def flush(self):
"""
Flushes the stream.
"""
self.acquire()
try:
if self.stream and hasattr(self.stream, "flush"):
self.stream.flush()
finally:
self.release()
def emit(self, record):
"""
Emit a record.
If a formatter is specified, it is used to format the record.
The record is then written to the stream with a trailing newline. If
exception information is present, it is formatted using
traceback.print_exception and appended to the stream. If the stream
has an 'encoding' attribute, it is used to determine how to do the
output to the stream.
"""
try:
msg = self.format(record)
stream = self.stream
stream.write(msg)
stream.write(self.terminator)
self.flush()
except (KeyboardInterrupt, SystemExit): #pragma: no cover
raise
except:
self.handleError(record)
class FileHandler(StreamHandler):
"""
A handler class which writes formatted logging records to disk files.
"""
def __init__(self, filename, mode='a', encoding=None, delay=False):
"""
Open the specified file and use it as the stream for logging.
"""
#keep the absolute path, otherwise derived classes which use this
#may come a cropper when the current directory changes
self.baseFilename = os.path.abspath(filename)
self.mode = mode
self.encoding = encoding
self.delay = delay
if delay:
#We don't open the stream, but we still need to call the
#Handler constructor to set level, formatter, lock etc.
Handler.__init__(self)
self.stream = None
else:
StreamHandler.__init__(self, self._open())
def close(self):
"""
Closes the stream.
"""
self.acquire()
try:
if self.stream:
self.flush()
if hasattr(self.stream, "close"):
self.stream.close()
StreamHandler.close(self)
self.stream = None
finally:
self.release()
def _open(self):
"""
Open the current base file with the (original) mode and encoding.
Return the resulting stream.
"""
return open(self.baseFilename, self.mode, encoding=self.encoding)
def emit(self, record):
"""
Emit a record.
If the stream was not opened because 'delay' was specified in the
constructor, open it before calling the superclass's emit.
"""
if self.stream is None:
self.stream = self._open()
StreamHandler.emit(self, record)
class _StderrHandler(StreamHandler):
"""
This class is like a StreamHandler using sys.stderr, but always uses
whatever sys.stderr is currently set to rather than the value of
sys.stderr at handler construction time.
"""
def __init__(self, level=NOTSET):
"""
Initialize the handler.
"""
Handler.__init__(self, level)
@property
def stream(self):
return sys.stderr
_defaultLastResort = _StderrHandler(WARNING)
lastResort = _defaultLastResort
#---------------------------------------------------------------------------
# Manager classes and functions
#---------------------------------------------------------------------------
class PlaceHolder(object):
"""
PlaceHolder instances are used in the Manager logger hierarchy to take
the place of nodes for which no loggers have been defined. This class is
intended for internal use only and not as part of the public API.
"""
def __init__(self, alogger):
"""
Initialize with the specified logger being a child of this placeholder.
"""
self.loggerMap = { alogger : None }
def append(self, alogger):
"""
Add the specified logger as a child of this placeholder.
"""
if alogger not in self.loggerMap:
self.loggerMap[alogger] = None
#
# Determine which class to use when instantiating loggers.
#
_loggerClass = None
def setLoggerClass(klass):
"""
Set the class to be used when instantiating a logger. The class should
define __init__() such that only a name argument is required, and the
__init__() should call Logger.__init__()
"""
if klass != Logger:
if not issubclass(klass, Logger):
raise TypeError("logger not derived from logging.Logger: "
+ klass.__name__)
global _loggerClass
_loggerClass = klass
def getLoggerClass():
"""
Return the class to be used when instantiating a logger.
"""
return _loggerClass
class Manager(object):
"""
There is [under normal circumstances] just one Manager instance, which
holds the hierarchy of loggers.
"""
def __init__(self, rootnode):
"""
Initialize the manager with the root node of the logger hierarchy.
"""
self.root = rootnode
self.disable = 0
self.emittedNoHandlerWarning = False
self.loggerDict = {}
self.loggerClass = None
self.logRecordFactory = None
def getLogger(self, name):
"""
Get a logger with the specified name (channel name), creating it
if it doesn't yet exist. This name is a dot-separated hierarchical
name, such as "a", "a.b", "a.b.c" or similar.
If a PlaceHolder existed for the specified name [i.e. the logger
didn't exist but a child of it did], replace it with the created
logger and fix up the parent/child references which pointed to the
placeholder to now point to the logger.
"""
rv = None
if not isinstance(name, str):
raise TypeError('A logger name must be a string')
_acquireLock()
try:
if name in self.loggerDict:
rv = self.loggerDict[name]
if isinstance(rv, PlaceHolder):
ph = rv
rv = (self.loggerClass or _loggerClass)(name)
rv.manager = self
self.loggerDict[name] = rv
self._fixupChildren(ph, rv)
self._fixupParents(rv)
else:
rv = (self.loggerClass or _loggerClass)(name)
rv.manager = self
self.loggerDict[name] = rv
self._fixupParents(rv)
finally:
_releaseLock()
return rv
def setLoggerClass(self, klass):
"""
Set the class to be used when instantiating a logger with this Manager.
"""
if klass != Logger:
if not issubclass(klass, Logger):
raise TypeError("logger not derived from logging.Logger: "
+ klass.__name__)
self.loggerClass = klass
def setLogRecordFactory(self, factory):
"""
Set the factory to be used when instantiating a log record with this
Manager.
"""
self.logRecordFactory = factory
def _fixupParents(self, alogger):
"""
Ensure that there are either loggers or placeholders all the way
from the specified logger to the root of the logger hierarchy.
"""
name = alogger.name
i = name.rfind(".")
rv = None
while (i > 0) and not rv:
substr = name[:i]
if substr not in self.loggerDict:
self.loggerDict[substr] = PlaceHolder(alogger)
else:
obj = self.loggerDict[substr]
if isinstance(obj, Logger):
rv = obj
else:
assert isinstance(obj, PlaceHolder)
obj.append(alogger)
i = name.rfind(".", 0, i - 1)
if not rv:
rv = self.root
alogger.parent = rv
def _fixupChildren(self, ph, alogger):
"""
Ensure that children of the placeholder ph are connected to the
specified logger.
"""
name = alogger.name
namelen = len(name)
for c in ph.loggerMap.keys():
#The if means ... if not c.parent.name.startswith(nm)
if c.parent.name[:namelen] != name:
alogger.parent = c.parent
c.parent = alogger
#---------------------------------------------------------------------------
# Logger classes and functions
#---------------------------------------------------------------------------
class Logger(Filterer):
"""
Instances of the Logger class represent a single logging channel. A
"logging channel" indicates an area of an application. Exactly how an
"area" is defined is up to the application developer. Since an
application can have any number of areas, logging channels are identified
by a unique string. Application areas can be nested (e.g. an area
of "input processing" might include sub-areas "read CSV files", "read
XLS files" and "read Gnumeric files"). To cater for this natural nesting,
channel names are organized into a namespace hierarchy where levels are
separated by periods, much like the Java or Python package namespace. So
in the instance given above, channel names might be "input" for the upper
level, and "input.csv", "input.xls" and "input.gnu" for the sub-levels.
There is no arbitrary limit to the depth of nesting.
"""
def __init__(self, name, level=NOTSET):
"""
Initialize the logger with a name and an optional level.
"""
Filterer.__init__(self)
self.name = name
self.level = _checkLevel(level)
self.parent = None
self.propagate = True
self.handlers = []
self.disabled = False
def setLevel(self, level):
"""
Set the logging level of this logger. level must be an int or a str.
"""
self.level = _checkLevel(level)
def debug(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'DEBUG'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.debug("Houston, we have a %s", "thorny problem", exc_info=1)
"""
if self.isEnabledFor(DEBUG):
self._log(DEBUG, msg, args, **kwargs)
def info(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'INFO'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.info("Houston, we have a %s", "interesting problem", exc_info=1)
"""
if self.isEnabledFor(INFO):
self._log(INFO, msg, args, **kwargs)
def warning(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'WARNING'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.warning("Houston, we have a %s", "bit of a problem", exc_info=1)
"""
if self.isEnabledFor(WARNING):
self._log(WARNING, msg, args, **kwargs)
def warn(self, msg, *args, **kwargs):
warnings.warn("The 'warn' method is deprecated, "
"use 'warning' instead", DeprecationWarning, 2)
self.warning(msg, *args, **kwargs)
def error(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'ERROR'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.error("Houston, we have a %s", "major problem", exc_info=1)
"""
if self.isEnabledFor(ERROR):
self._log(ERROR, msg, args, **kwargs)
def exception(self, msg, *args, **kwargs):
"""
Convenience method for logging an ERROR with exception information.
"""
kwargs['exc_info'] = True
self.error(msg, *args, **kwargs)
def critical(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'CRITICAL'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.critical("Houston, we have a %s", "major disaster", exc_info=1)
"""
if self.isEnabledFor(CRITICAL):
self._log(CRITICAL, msg, args, **kwargs)
fatal = critical
def log(self, level, msg, *args, **kwargs):
"""
Log 'msg % args' with the integer severity 'level'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.log(level, "We have a %s", "mysterious problem", exc_info=1)
"""
if not isinstance(level, int):
if raiseExceptions:
raise TypeError("level must be an integer")
else:
return
if self.isEnabledFor(level):
self._log(level, msg, args, **kwargs)
def findCaller(self, stack_info=False):
"""
Find the stack frame of the caller so that we can note the source
file name, line number and function name.
"""
f = currentframe()
#On some versions of IronPython, currentframe() returns None if
#IronPython isn't run with -X:Frames.
if f is not None:
f = f.f_back
rv = "(unknown file)", 0, "(unknown function)", None
while hasattr(f, "f_code"):
co = f.f_code
filename = os.path.normcase(co.co_filename)
if filename == _srcfile:
f = f.f_back
continue
sinfo = None
if stack_info:
sio = io.StringIO()
sio.write('Stack (most recent call last):\n')
traceback.print_stack(f, file=sio)
sinfo = sio.getvalue()
if sinfo[-1] == '\n':
sinfo = sinfo[:-1]
sio.close()
rv = (co.co_filename, f.f_lineno, co.co_name, sinfo)
break
return rv
def makeRecord(self, name, level, fn, lno, msg, args, exc_info,
func=None, extra=None, sinfo=None):
"""
A factory method which can be overridden in subclasses to create
specialized LogRecords.
"""
rv = _logRecordFactory(name, level, fn, lno, msg, args, exc_info, func,
sinfo)
if extra is not None:
for key in extra:
if (key in ["message", "asctime"]) or (key in rv.__dict__):
raise KeyError("Attempt to overwrite %r in LogRecord" % key)
rv.__dict__[key] = extra[key]
return rv
def _log(self, level, msg, args, exc_info=None, extra=None, stack_info=False):
"""
Low-level logging routine which creates a LogRecord and then calls
all the handlers of this logger to handle the record.
"""
sinfo = None
if _srcfile:
#IronPython doesn't track Python frames, so findCaller raises an
#exception on some versions of IronPython. We trap it here so that
#IronPython can use logging.
try:
fn, lno, func, sinfo = self.findCaller(stack_info)
except ValueError: # pragma: no cover
fn, lno, func = "(unknown file)", 0, "(unknown function)"
else: # pragma: no cover
fn, lno, func = "(unknown file)", 0, "(unknown function)"
if exc_info:
if not isinstance(exc_info, tuple):
exc_info = sys.exc_info()
record = self.makeRecord(self.name, level, fn, lno, msg, args,
exc_info, func, extra, sinfo)
self.handle(record)
def handle(self, record):
"""
Call the handlers for the specified record.
This method is used for unpickled records received from a socket, as
well as those created locally. Logger-level filtering is applied.
"""
if (not self.disabled) and self.filter(record):
self.callHandlers(record)
def addHandler(self, hdlr):
"""
Add the specified handler to this logger.
"""
_acquireLock()
try:
if not (hdlr in self.handlers):
self.handlers.append(hdlr)
finally:
_releaseLock()
def removeHandler(self, hdlr):
"""
Remove the specified handler from this logger.
"""
_acquireLock()
try:
if hdlr in self.handlers:
self.handlers.remove(hdlr)
finally:
_releaseLock()
def hasHandlers(self):
"""
See if this logger has any handlers configured.
Loop through all handlers for this logger and its parents in the
logger hierarchy. Return True if a handler was found, else False.
Stop searching up the hierarchy whenever a logger with the "propagate"
attribute set to zero is found - that will be the last logger which
is checked for the existence of handlers.
"""
c = self
rv = False
while c:
if c.handlers:
rv = True
break
if not c.propagate:
break
else:
c = c.parent
return rv
def callHandlers(self, record):
"""
Pass a record to all relevant handlers.
Loop through all handlers for this logger and its parents in the
logger hierarchy. If no handler was found, output a one-off error
message to sys.stderr. Stop searching up the hierarchy whenever a
logger with the "propagate" attribute set to zero is found - that
will be the last logger whose handlers are called.
"""
c = self
found = 0
while c:
for hdlr in c.handlers:
found = found + 1
if record.levelno >= hdlr.level:
hdlr.handle(record)
if not c.propagate:
c = None #break out
else:
c = c.parent
if (found == 0):
if lastResort:
if record.levelno >= lastResort.level:
lastResort.handle(record)
elif raiseExceptions and not self.manager.emittedNoHandlerWarning:
sys.stderr.write("No handlers could be found for logger"
" \"%s\"\n" % self.name)
self.manager.emittedNoHandlerWarning = True
def getEffectiveLevel(self):
"""
Get the effective level for this logger.
Loop through this logger and its parents in the logger hierarchy,
looking for a non-zero logging level. Return the first one found.
"""
logger = self
while logger:
if logger.level:
return logger.level
logger = logger.parent
return NOTSET
def isEnabledFor(self, level):
"""
Is this logger enabled for level 'level'?
"""
if self.manager.disable >= level:
return False
return level >= self.getEffectiveLevel()
def getChild(self, suffix):
"""
Get a logger which is a descendant to this one.
This is a convenience method, such that
logging.getLogger('abc').getChild('def.ghi')
is the same as
logging.getLogger('abc.def.ghi')
It's useful, for example, when the parent logger is named using
__name__ rather than a literal string.
"""
if self.root is not self:
suffix = '.'.join((self.name, suffix))
return self.manager.getLogger(suffix)
class RootLogger(Logger):
"""
A root logger is not that different to any other logger, except that
it must have a logging level and there is only one instance of it in
the hierarchy.
"""
def __init__(self, level):
"""
Initialize the logger with the name "root".
"""
Logger.__init__(self, "root", level)
_loggerClass = Logger
class LoggerAdapter(object):
"""
An adapter for loggers which makes it easier to specify contextual
information in logging output.
"""
def __init__(self, logger, extra):
"""
Initialize the adapter with a logger and a dict-like object which
provides contextual information. This constructor signature allows
easy stacking of LoggerAdapters, if so desired.
You can effectively pass keyword arguments as shown in the
following example:
adapter = LoggerAdapter(someLogger, dict(p1=v1, p2="v2"))
"""
self.logger = logger
self.extra = extra
def process(self, msg, kwargs):
"""
Process the logging message and keyword arguments passed in to
a logging call to insert contextual information. You can either
manipulate the message itself, the keyword args or both. Return
the message and kwargs modified (or not) to suit your needs.
Normally, you'll only need to override this one method in a
LoggerAdapter subclass for your specific needs.
"""
kwargs["extra"] = self.extra
return msg, kwargs
#
# Boilerplate convenience methods
#
def debug(self, msg, *args, **kwargs):
"""
Delegate a debug call to the underlying logger.
"""
self.log(DEBUG, msg, *args, **kwargs)
def info(self, msg, *args, **kwargs):
"""
Delegate an info call to the underlying logger.
"""
self.log(INFO, msg, *args, **kwargs)
def warning(self, msg, *args, **kwargs):
"""
Delegate a warning call to the underlying logger.
"""
self.log(WARNING, msg, *args, **kwargs)
def warn(self, msg, *args, **kwargs):
warnings.warn("The 'warn' method is deprecated, "
"use 'warning' instead", DeprecationWarning, 2)
self.warning(msg, *args, **kwargs)
def error(self, msg, *args, **kwargs):
"""
Delegate an error call to the underlying logger.
"""
self.log(ERROR, msg, *args, **kwargs)
def exception(self, msg, *args, **kwargs):
"""
Delegate an exception call to the underlying logger.
"""
kwargs["exc_info"] = True
self.log(ERROR, msg, *args, **kwargs)
def critical(self, msg, *args, **kwargs):
"""
Delegate a critical call to the underlying logger.
"""
self.log(CRITICAL, msg, *args, **kwargs)
def log(self, level, msg, *args, **kwargs):
"""
Delegate a log call to the underlying logger, after adding
contextual information from this adapter instance.
"""
if self.isEnabledFor(level):
msg, kwargs = self.process(msg, kwargs)
self.logger._log(level, msg, args, **kwargs)
def isEnabledFor(self, level):
"""
Is this logger enabled for level 'level'?
"""
if self.logger.manager.disable >= level:
return False
return level >= self.getEffectiveLevel()
def setLevel(self, level):
"""
Set the specified level on the underlying logger.
"""
self.logger.setLevel(level)
def getEffectiveLevel(self):
"""
Get the effective level for the underlying logger.
"""
return self.logger.getEffectiveLevel()
def hasHandlers(self):
"""
See if the underlying logger has any handlers.
"""
return self.logger.hasHandlers()
root = RootLogger(WARNING)
Logger.root = root
Logger.manager = Manager(Logger.root)
#---------------------------------------------------------------------------
# Configuration classes and functions
#---------------------------------------------------------------------------
BASIC_FORMAT = "%(levelname)s:%(name)s:%(message)s"
def basicConfig(**kwargs):
"""
Do basic configuration for the logging system.
This function does nothing if the root logger already has handlers
configured. It is a convenience method intended for use by simple scripts
to do one-shot configuration of the logging package.
The default behaviour is to create a StreamHandler which writes to
sys.stderr, set a formatter using the BASIC_FORMAT format string, and
add the handler to the root logger.
A number of optional keyword arguments may be specified, which can alter
the default behaviour.
filename Specifies that a FileHandler be created, using the specified
filename, rather than a StreamHandler.
filemode Specifies the mode to open the file, if filename is specified
(if filemode is unspecified, it defaults to 'a').
format Use the specified format string for the handler.
datefmt Use the specified date/time format.
style If a format string is specified, use this to specify the
type of format string (possible values '%', '{', '$', for
%-formatting, :meth:`str.format` and :class:`string.Template`
- defaults to '%').
level Set the root logger level to the specified level.
stream Use the specified stream to initialize the StreamHandler. Note
that this argument is incompatible with 'filename' - if both
are present, 'stream' is ignored.
handlers If specified, this should be an iterable of already created
handlers, which will be added to the root handler. Any handler
in the list which does not have a formatter assigned will be
assigned the formatter created in this function.
Note that you could specify a stream created using open(filename, mode)
rather than passing the filename and mode in. However, it should be
remembered that StreamHandler does not close its stream (since it may be
using sys.stdout or sys.stderr), whereas FileHandler closes its stream
when the handler is closed.
.. versionchanged:: 3.2
Added the ``style`` parameter.
.. versionchanged:: 3.3
Added the ``handlers`` parameter. A ``ValueError`` is now thrown for
incompatible arguments (e.g. ``handlers`` specified together with
``filename``/``filemode``, or ``filename``/``filemode`` specified
together with ``stream``, or ``handlers`` specified together with
``stream``.
"""
# Add thread safety in case someone mistakenly calls
# basicConfig() from multiple threads
_acquireLock()
try:
if len(root.handlers) == 0:
handlers = kwargs.get("handlers")
if handlers is None:
if "stream" in kwargs and "filename" in kwargs:
raise ValueError("'stream' and 'filename' should not be "
"specified together")
else:
if "stream" in kwargs or "filename" in kwargs:
raise ValueError("'stream' or 'filename' should not be "
"specified together with 'handlers'")
if handlers is None:
filename = kwargs.get("filename")
if filename:
mode = kwargs.get("filemode", 'a')
h = FileHandler(filename, mode)
else:
stream = kwargs.get("stream")
h = StreamHandler(stream)
handlers = [h]
fs = kwargs.get("format", BASIC_FORMAT)
dfs = kwargs.get("datefmt", None)
style = kwargs.get("style", '%')
fmt = Formatter(fs, dfs, style)
for h in handlers:
if h.formatter is None:
h.setFormatter(fmt)
root.addHandler(h)
level = kwargs.get("level")
if level is not None:
root.setLevel(level)
finally:
_releaseLock()
#---------------------------------------------------------------------------
# Utility functions at module level.
# Basically delegate everything to the root logger.
#---------------------------------------------------------------------------
def getLogger(name=None):
"""
Return a logger with the specified name, creating it if necessary.
If no name is specified, return the root logger.
"""
if name:
return Logger.manager.getLogger(name)
else:
return root
def critical(msg, *args, **kwargs):
"""
Log a message with severity 'CRITICAL' on the root logger. If the logger
has no handlers, call basicConfig() to add a console handler with a
pre-defined format.
"""
if len(root.handlers) == 0:
basicConfig()
root.critical(msg, *args, **kwargs)
fatal = critical
def error(msg, *args, **kwargs):
"""
Log a message with severity 'ERROR' on the root logger. If the logger has
no handlers, call basicConfig() to add a console handler with a pre-defined
format.
"""
if len(root.handlers) == 0:
basicConfig()
root.error(msg, *args, **kwargs)
def exception(msg, *args, **kwargs):
"""
Log a message with severity 'ERROR' on the root logger, with exception
information. If the logger has no handlers, basicConfig() is called to add
a console handler with a pre-defined format.
"""
kwargs['exc_info'] = True
error(msg, *args, **kwargs)
def warning(msg, *args, **kwargs):
"""
Log a message with severity 'WARNING' on the root logger. If the logger has
no handlers, call basicConfig() to add a console handler with a pre-defined
format.
"""
if len(root.handlers) == 0:
basicConfig()
root.warning(msg, *args, **kwargs)
def warn(msg, *args, **kwargs):
warnings.warn("The 'warn' function is deprecated, "
"use 'warning' instead", DeprecationWarning, 2)
warning(msg, *args, **kwargs)
def info(msg, *args, **kwargs):
"""
Log a message with severity 'INFO' on the root logger. If the logger has
no handlers, call basicConfig() to add a console handler with a pre-defined
format.
"""
if len(root.handlers) == 0:
basicConfig()
root.info(msg, *args, **kwargs)
def debug(msg, *args, **kwargs):
"""
Log a message with severity 'DEBUG' on the root logger. If the logger has
no handlers, call basicConfig() to add a console handler with a pre-defined
format.
"""
if len(root.handlers) == 0:
basicConfig()
root.debug(msg, *args, **kwargs)
def log(level, msg, *args, **kwargs):
"""
Log 'msg % args' with the integer severity 'level' on the root logger. If
the logger has no handlers, call basicConfig() to add a console handler
with a pre-defined format.
"""
if len(root.handlers) == 0:
basicConfig()
root.log(level, msg, *args, **kwargs)
def disable(level):
"""
Disable all logging calls of severity 'level' and below.
"""
root.manager.disable = level
def shutdown(handlerList=_handlerList):
"""
Perform any cleanup actions in the logging system (e.g. flushing
buffers).
Should be called at application exit.
"""
for wr in reversed(handlerList[:]):
#errors might occur, for example, if files are locked
#we just ignore them if raiseExceptions is not set
try:
h = wr()
if h:
try:
h.acquire()
h.flush()
h.close()
except (IOError, ValueError):
# Ignore errors which might be caused
# because handlers have been closed but
# references to them are still around at
# application exit.
pass
finally:
h.release()
except:
if raiseExceptions:
raise
#else, swallow
#Let's try and shutdown automatically on application exit...
import atexit
atexit.register(shutdown)
# Null handler
class NullHandler(Handler):
"""
This handler does nothing. It's intended to be used to avoid the
"No handlers could be found for logger XXX" one-off warning. This is
important for library code, which may contain code to log events. If a user
of the library does not configure logging, the one-off warning might be
produced; to avoid this, the library developer simply needs to instantiate
a NullHandler and add it to the top-level logger of the library module or
package.
"""
def handle(self, record):
"""Stub."""
def emit(self, record):
"""Stub."""
def createLock(self):
self.lock = None
# Warnings integration
_warnings_showwarning = None
def _showwarning(message, category, filename, lineno, file=None, line=None):
"""
Implementation of showwarnings which redirects to logging, which will first
check to see if the file parameter is None. If a file is specified, it will
delegate to the original warnings implementation of showwarning. Otherwise,
it will call warnings.formatwarning and will log the resulting string to a
warnings logger named "py.warnings" with level logging.WARNING.
"""
if file is not None:
if _warnings_showwarning is not None:
_warnings_showwarning(message, category, filename, lineno, file, line)
else:
s = warnings.formatwarning(message, category, filename, lineno, line)
logger = getLogger("py.warnings")
if not logger.handlers:
logger.addHandler(NullHandler())
logger.warning("%s", s)
def captureWarnings(capture):
"""
If capture is true, redirect all warnings to the logging package.
If capture is False, ensure that warnings are not redirected to logging
but to their original destinations.
"""
global _warnings_showwarning
if capture:
if _warnings_showwarning is None:
_warnings_showwarning = warnings.showwarning
warnings.showwarning = _showwarning
else:
if _warnings_showwarning is not None:
warnings.showwarning = _warnings_showwarning
_warnings_showwarning = None
| gpl-3.0 |
magenta/magenta | magenta/pipelines/dag_pipeline_test.py | 1 | 29773 | # Copyright 2021 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for dag_pipeline."""
import collections
from absl.testing import absltest
from magenta.pipelines import dag_pipeline
from magenta.pipelines import pipeline
from magenta.pipelines import statistics
Type0 = collections.namedtuple('Type0', ['x', 'y', 'z'])
Type1 = collections.namedtuple('Type1', ['x', 'y'])
Type2 = collections.namedtuple('Type2', ['z'])
Type3 = collections.namedtuple('Type3', ['s', 't'])
Type4 = collections.namedtuple('Type4', ['s', 't', 'z'])
Type5 = collections.namedtuple('Type5', ['a', 'b', 'c', 'd', 'z'])
# pylint:disable=missing-class-docstring
class UnitA(pipeline.Pipeline):
def __init__(self):
pipeline.Pipeline.__init__(self, Type0, {'t1': Type1, 't2': Type2})
def transform(self, input_object):
t1 = Type1(x=input_object.x, y=input_object.y)
t2 = Type2(z=input_object.z)
return {'t1': [t1], 't2': [t2]}
class UnitB(pipeline.Pipeline):
def __init__(self):
pipeline.Pipeline.__init__(self, Type1, Type3)
def transform(self, input_object):
t3 = Type3(s=input_object.x * 1000, t=input_object.y - 100)
return [t3]
class UnitC(pipeline.Pipeline):
def __init__(self):
pipeline.Pipeline.__init__(
self,
{'A_data': Type2, 'B_data': Type3},
{'regular_data': Type4, 'special_data': Type4})
def transform(self, input_object):
s = input_object['B_data'].s
t = input_object['B_data'].t
z = input_object['A_data'].z
regular = Type4(s=s, t=t, z=0)
special = Type4(s=s + z * 100, t=t - z * 100, z=z)
return {'regular_data': [regular], 'special_data': [special]}
class UnitD(pipeline.Pipeline):
def __init__(self):
pipeline.Pipeline.__init__(
self, {'0': Type4, '1': Type3, '2': Type4}, Type5)
def transform(self, input_object):
assert input_object['1'].s == input_object['0'].s
assert input_object['1'].t == input_object['0'].t
t5 = Type5(
a=input_object['0'].s, b=input_object['0'].t,
c=input_object['2'].s, d=input_object['2'].t, z=input_object['2'].z)
return [t5]
class DAGPipelineTest(absltest.TestCase):
def testDAGPipelineInputAndOutputType(self):
# Tests that the DAGPipeline has the correct `input_type` and
# `output_type` values based on the DAG given to it.
a, b, c, d = UnitA(), UnitB(), UnitC(), UnitD()
dag = {a: dag_pipeline.DagInput(Type0),
b: a['t1'],
c: {'A_data': a['t2'], 'B_data': b},
d: {'0': c['regular_data'], '1': b, '2': c['special_data']},
dag_pipeline.DagOutput('abcdz'): d}
dag_pipe_obj = dag_pipeline.DAGPipeline(dag)
self.assertEqual(dag_pipe_obj.input_type, Type0)
self.assertEqual(dag_pipe_obj.output_type, {'abcdz': Type5})
dag = {a: dag_pipeline.DagInput(Type0),
dag_pipeline.DagOutput('t1'): a['t1'],
dag_pipeline.DagOutput('t2'): a['t2']}
dag_pipe_obj = dag_pipeline.DAGPipeline(dag)
self.assertEqual(dag_pipe_obj.input_type, Type0)
self.assertEqual(dag_pipe_obj.output_type, {'t1': Type1, 't2': Type2})
def testSingleOutputs(self):
# Tests single object and dictionaries in the DAG.
a, b, c, d = UnitA(), UnitB(), UnitC(), UnitD()
dag = {a: dag_pipeline.DagInput(Type0),
b: a['t1'],
c: {'A_data': a['t2'], 'B_data': b},
d: {'0': c['regular_data'], '1': b, '2': c['special_data']},
dag_pipeline.DagOutput('abcdz'): d}
dag_pipe_obj = dag_pipeline.DAGPipeline(dag)
inputs = [Type0(1, 2, 3), Type0(-1, -2, -3), Type0(3, -3, 2)]
for input_object in inputs:
x, y, z = input_object.x, input_object.y, input_object.z
output_dict = dag_pipe_obj.transform(input_object)
self.assertEqual(list(output_dict.keys()), ['abcdz'])
results = output_dict['abcdz']
self.assertLen(results, 1)
result = results[0]
# The following outputs are the result of passing the values in
# `input_object` through the transform functions of UnitA, UnitB, UnitC,
# and UnitD (all defined at the top of this file), connected in the way
# defined by `dag`.
self.assertEqual(result.a, x * 1000)
self.assertEqual(result.b, y - 100)
self.assertEqual(result.c, x * 1000 + z * 100)
self.assertEqual(result.d, y - 100 - z * 100)
self.assertEqual(result.z, z)
def testMultiOutput(self):
# Tests a pipeline.Pipeline that maps a single input to multiple outputs.
class UnitQ(pipeline.Pipeline):
def __init__(self):
pipeline.Pipeline.__init__(self, Type0, {'t1': Type1, 't2': Type2})
def transform(self, input_object):
t1 = [Type1(x=input_object.x + i, y=input_object.y + i)
for i in range(input_object.z)]
t2 = [Type2(z=input_object.z)]
return {'t1': t1, 't2': t2}
q, b, c = UnitQ(), UnitB(), UnitC()
dag = {q: dag_pipeline.DagInput(Type0),
b: q['t1'],
c: {'A_data': q['t2'], 'B_data': b},
dag_pipeline.DagOutput('outputs'): c['regular_data']}
dag_pipe_obj = dag_pipeline.DAGPipeline(dag)
x, y, z = 1, 2, 3
output_dict = dag_pipe_obj.transform(Type0(x, y, z))
self.assertEqual(list(output_dict.keys()), ['outputs'])
results = output_dict['outputs']
self.assertLen(results, 3)
expected_results = [Type4((x + i) * 1000, (y + i) - 100, 0)
for i in range(z)]
self.assertEqual(set(results), set(expected_results))
def testUnequalOutputCounts(self):
# Tests dictionary output type where each output list has a different size.
class UnitQ(pipeline.Pipeline):
def __init__(self):
pipeline.Pipeline.__init__(self, Type0, Type1)
def transform(self, input_object):
return [Type1(x=input_object.x + i, y=input_object.y + i)
for i in range(input_object.z)]
class Partitioner(pipeline.Pipeline):
def __init__(self, input_type, training_set_name, test_set_name):
self.training_set_name = training_set_name
self.test_set_name = test_set_name
pipeline.Pipeline.__init__(
self,
input_type,
{training_set_name: input_type, test_set_name: input_type})
def transform(self, input_object):
if input_object.x < 0:
return {self.training_set_name: [],
self.test_set_name: [input_object]}
return {self.training_set_name: [input_object], self.test_set_name: []}
q = UnitQ()
partition = Partitioner(q.output_type, 'training_set', 'test_set')
dag = {q: dag_pipeline.DagInput(q.input_type),
partition: q,
dag_pipeline.DagOutput('training_set'): partition['training_set'],
dag_pipeline.DagOutput('test_set'): partition['test_set']}
dag_pipe_obj = dag_pipeline.DAGPipeline(dag)
x, y, z = -3, 0, 8
output_dict = dag_pipe_obj.transform(Type0(x, y, z))
self.assertEqual(set(output_dict.keys()), set(['training_set', 'test_set']))
training_results = output_dict['training_set']
test_results = output_dict['test_set']
expected_training_results = [Type1(x + i, y + i) for i in range(-x, z)]
expected_test_results = [Type1(x + i, y + i) for i in range(0, -x)]
self.assertEqual(set(training_results), set(expected_training_results))
self.assertEqual(set(test_results), set(expected_test_results))
def testIntermediateUnequalOutputCounts(self):
# Tests that intermediate output lists which are not the same length are
# handled correctly.
class UnitQ(pipeline.Pipeline):
def __init__(self):
pipeline.Pipeline.__init__(self, Type0, {'xy': Type1, 'z': Type2})
def transform(self, input_object):
return {'xy': [Type1(x=input_object.x + i, y=input_object.y + i)
for i in range(input_object.z)],
'z': [Type2(z=i) for i in [-input_object.z, input_object.z]]}
class Partitioner(pipeline.Pipeline):
def __init__(self, input_type, training_set_name, test_set_name):
self.training_set_name = training_set_name
self.test_set_name = test_set_name
pipeline.Pipeline.__init__(
self,
input_type,
{training_set_name: Type0, test_set_name: Type0})
def transform(self, input_object):
input_dict = input_object
input_object = Type0(input_dict['xy'].x,
input_dict['xy'].y,
input_dict['z'].z)
if input_object.x < 0:
return {self.training_set_name: [],
self.test_set_name: [input_object]}
return {self.training_set_name: [input_object], self.test_set_name: []}
q = UnitQ()
partition = Partitioner(q.output_type, 'training_set', 'test_set')
dag = {q: dag_pipeline.DagInput(q.input_type),
partition: {'xy': q['xy'], 'z': q['z']},
dag_pipeline.DagOutput('training_set'): partition['training_set'],
dag_pipeline.DagOutput('test_set'): partition['test_set']}
dag_pipe_obj = dag_pipeline.DAGPipeline(dag)
x, y, z = -3, 0, 8
output_dict = dag_pipe_obj.transform(Type0(x, y, z))
self.assertEqual(set(output_dict.keys()), set(['training_set', 'test_set']))
training_results = output_dict['training_set']
test_results = output_dict['test_set']
all_expected_results = [Type0(x + i, y + i, zed)
for i in range(0, z) for zed in [-z, z]]
expected_training_results = [sample for sample in all_expected_results
if sample.x >= 0]
expected_test_results = [sample for sample in all_expected_results
if sample.x < 0]
self.assertEqual(set(training_results), set(expected_training_results))
self.assertEqual(set(test_results), set(expected_test_results))
def testDirectConnection(self):
# Tests a direct dict to dict connection in the DAG.
class UnitQ(pipeline.Pipeline):
def __init__(self):
pipeline.Pipeline.__init__(self, Type0, {'xy': Type1, 'z': Type2})
def transform(self, input_object):
return {'xy': [Type1(x=input_object.x, y=input_object.y)],
'z': [Type2(z=input_object.z)]}
class UnitR(pipeline.Pipeline):
def __init__(self):
pipeline.Pipeline.__init__(self, {'xy': Type1, 'z': Type2}, Type4)
def transform(self, input_object):
input_dict = input_object
return [Type4(input_dict['xy'].x,
input_dict['xy'].y,
input_dict['z'].z)]
q, r = UnitQ(), UnitR()
dag = {q: dag_pipeline.DagInput(q.input_type),
r: q,
dag_pipeline.DagOutput('output'): r}
dag_pipe_obj = dag_pipeline.DAGPipeline(dag)
x, y, z = -3, 0, 8
output_dict = dag_pipe_obj.transform(Type0(x, y, z))
self.assertEqual(output_dict, {'output': [Type4(x, y, z)]})
def testOutputConnectedToDict(self):
class UnitQ(pipeline.Pipeline):
def __init__(self):
pipeline.Pipeline.__init__(self, Type0, {'xy': Type1, 'z': Type2})
def transform(self, input_object):
return {'xy': [Type1(x=input_object.x, y=input_object.y)],
'z': [Type2(z=input_object.z)]}
q = UnitQ()
dag = {q: dag_pipeline.DagInput(q.input_type),
dag_pipeline.DagOutput(): q}
dag_pipe_obj = dag_pipeline.DAGPipeline(dag)
self.assertEqual(dag_pipe_obj.output_type, {'xy': Type1, 'z': Type2})
x, y, z = -3, 0, 8
output_dict = dag_pipe_obj.transform(Type0(x, y, z))
self.assertEqual(output_dict, {'xy': [Type1(x, y)], 'z': [Type2(z)]})
dag = {q: dag_pipeline.DagInput(q.input_type),
dag_pipeline.DagOutput(): {'xy': q['xy'], 'z': q['z']}}
dag_pipe_obj = dag_pipeline.DAGPipeline(dag)
self.assertEqual(dag_pipe_obj.output_type, {'xy': Type1, 'z': Type2})
x, y, z = -3, 0, 8
output_dict = dag_pipe_obj.transform(Type0(x, y, z))
self.assertEqual(output_dict, {'xy': [Type1(x, y)], 'z': [Type2(z)]})
def testNoOutputs(self):
# Test that empty lists or dicts as intermediate or final outputs don't
# break anything.
class UnitQ(pipeline.Pipeline):
def __init__(self):
pipeline.Pipeline.__init__(self, Type0, {'xy': Type1, 'z': Type2})
def transform(self, input_object):
return {'xy': [], 'z': []}
class UnitR(pipeline.Pipeline):
def __init__(self):
pipeline.Pipeline.__init__(self, {'xy': Type1, 'z': Type2}, Type4)
def transform(self, input_object):
input_dict = input_object
return [Type4(input_dict['xy'].x,
input_dict['xy'].y,
input_dict['z'].z)]
class UnitS(pipeline.Pipeline):
def __init__(self):
pipeline.Pipeline.__init__(self, Type0, Type1)
def transform(self, unused_input_dict):
return []
q, r, s = UnitQ(), UnitR(), UnitS()
dag = {q: dag_pipeline.DagInput(Type0),
r: q,
dag_pipeline.DagOutput('output'): r}
dag_pipe_obj = dag_pipeline.DAGPipeline(dag)
self.assertEqual(dag_pipe_obj.transform(Type0(1, 2, 3)), {'output': []})
dag = {q: dag_pipeline.DagInput(Type0),
s: dag_pipeline.DagInput(Type0),
r: {'xy': s, 'z': q['z']},
dag_pipeline.DagOutput('output'): r}
dag_pipe_obj = dag_pipeline.DAGPipeline(dag)
self.assertEqual(dag_pipe_obj.transform(Type0(1, 2, 3)), {'output': []})
dag = {s: dag_pipeline.DagInput(Type0),
dag_pipeline.DagOutput('output'): s}
dag_pipe_obj = dag_pipeline.DAGPipeline(dag)
self.assertEqual(dag_pipe_obj.transform(Type0(1, 2, 3)), {'output': []})
dag = {q: dag_pipeline.DagInput(Type0),
dag_pipeline.DagOutput(): q}
dag_pipe_obj = dag_pipeline.DAGPipeline(dag)
self.assertEqual(
dag_pipe_obj.transform(Type0(1, 2, 3)),
{'xy': [], 'z': []})
def testNoPipelines(self):
dag = {dag_pipeline.DagOutput('output'): dag_pipeline.DagInput(Type0)}
dag_pipe_obj = dag_pipeline.DAGPipeline(dag)
self.assertEqual(
dag_pipe_obj.transform(Type0(1, 2, 3)),
{'output': [Type0(1, 2, 3)]})
def testStatistics(self):
class UnitQ(pipeline.Pipeline):
def __init__(self):
pipeline.Pipeline.__init__(self, Type0, Type1)
self.stats = []
def transform(self, input_object):
self._set_stats([statistics.Counter('output_count', input_object.z)])
return [Type1(x=input_object.x + i, y=input_object.y + i)
for i in range(input_object.z)]
class UnitR(pipeline.Pipeline):
def __init__(self):
pipeline.Pipeline.__init__(self, Type1, Type1)
def transform(self, input_object):
self._set_stats([statistics.Counter('input_count', 1)])
return [input_object]
q, r = UnitQ(), UnitR()
dag = {q: dag_pipeline.DagInput(q.input_type),
r: q,
dag_pipeline.DagOutput('output'): r}
dag_pipe_obj = dag_pipeline.DAGPipeline(dag, 'DAGPipelineName')
for x, y, z in [(-3, 0, 8), (1, 2, 3), (5, -5, 5)]:
dag_pipe_obj.transform(Type0(x, y, z))
stats_1 = dag_pipe_obj.get_stats()
stats_2 = dag_pipe_obj.get_stats()
self.assertEqual(stats_1, stats_2)
for stat in stats_1:
self.assertIsInstance(stat, statistics.Counter)
names = sorted([stat.name for stat in stats_1])
self.assertEqual(
names,
(['DAGPipelineName_UnitQ_output_count'] +
['DAGPipelineName_UnitR_input_count'] * z))
for stat in stats_1:
if stat.name == 'DAGPipelineName_UnitQ_output_count':
self.assertEqual(stat.count, z)
else:
self.assertEqual(stat.count, 1)
def testInvalidDAGError(self):
class UnitQ(pipeline.Pipeline):
def __init__(self):
pipeline.Pipeline.__init__(self, Type0, {'a': Type1, 'b': Type2})
def transform(self, input_object):
pass
class UnitR(pipeline.Pipeline):
def __init__(self):
pipeline.Pipeline.__init__(self, Type1, Type2)
def transform(self, input_object):
pass
q, r = UnitQ(), UnitR()
dag = {q: dag_pipeline.DagInput(Type0),
UnitR: q,
dag_pipeline.DagOutput('output'): r}
with self.assertRaises(dag_pipeline.InvalidDAGError):
dag_pipeline.DAGPipeline(dag)
dag = {q: dag_pipeline.DagInput(Type0),
'r': q,
dag_pipeline.DagOutput('output'): r}
with self.assertRaises(dag_pipeline.InvalidDAGError):
dag_pipeline.DAGPipeline(dag)
dag = {q: dag_pipeline.DagInput(Type0),
r: UnitQ,
dag_pipeline.DagOutput('output'): r}
with self.assertRaises(dag_pipeline.InvalidDAGError):
dag_pipeline.DAGPipeline(dag)
dag = {q: dag_pipeline.DagInput(Type0),
r: 123,
dag_pipeline.DagOutput('output'): r}
with self.assertRaises(dag_pipeline.InvalidDAGError):
dag_pipeline.DAGPipeline(dag)
dag = {dag_pipeline.DagInput(Type0): q,
dag_pipeline.DagOutput(): q}
with self.assertRaises(dag_pipeline.InvalidDAGError):
dag_pipeline.DAGPipeline(dag)
dag = {q: dag_pipeline.DagInput(Type0),
q: dag_pipeline.DagOutput('output')}
with self.assertRaises(dag_pipeline.InvalidDAGError):
dag_pipeline.DAGPipeline(dag)
dag = {q: dag_pipeline.DagInput(Type0),
r: {'abc': q['a'], 'def': 123},
dag_pipeline.DagOutput('output'): r}
with self.assertRaises(dag_pipeline.InvalidDAGError):
dag_pipeline.DAGPipeline(dag)
dag = {q: dag_pipeline.DagInput(Type0),
r: {123: q['a']},
dag_pipeline.DagOutput('output'): r}
with self.assertRaises(dag_pipeline.InvalidDAGError):
dag_pipeline.DAGPipeline(dag)
def testTypeMismatchError(self):
class UnitQ(pipeline.Pipeline):
def __init__(self):
pipeline.Pipeline.__init__(self, Type0, Type1)
def transform(self, input_object):
pass
class UnitR(pipeline.Pipeline):
def __init__(self):
pipeline.Pipeline.__init__(self, Type1, {'a': Type2, 'b': Type3})
def transform(self, input_object):
pass
class UnitS(pipeline.Pipeline):
def __init__(self):
pipeline.Pipeline.__init__(self, {'x': Type2, 'y': Type3}, Type4)
def transform(self, input_object):
pass
class UnitT(pipeline.Pipeline):
def __init__(self):
pipeline.Pipeline.__init__(self, {'x': Type2, 'y': Type5}, Type4)
def transform(self, input_object):
pass
q, r, s, t = UnitQ(), UnitR(), UnitS(), UnitT()
dag = {q: dag_pipeline.DagInput(Type1),
r: q,
s: r,
dag_pipeline.DagOutput('output'): s}
with self.assertRaises(dag_pipeline.TypeMismatchError):
dag_pipeline.DAGPipeline(dag)
q2 = UnitQ()
dag = {q: dag_pipeline.DagInput(Type0),
q2: q,
dag_pipeline.DagOutput('output'): q2}
with self.assertRaises(dag_pipeline.TypeMismatchError):
dag_pipeline.DAGPipeline(dag)
dag = {q: dag_pipeline.DagInput(Type0),
r: q,
s: {'x': r['b'], 'y': r['a']},
dag_pipeline.DagOutput('output'): s}
with self.assertRaises(dag_pipeline.TypeMismatchError):
dag_pipeline.DAGPipeline(dag)
dag = {q: dag_pipeline.DagInput(Type0),
r: q,
t: r,
dag_pipeline.DagOutput('output'): t}
with self.assertRaises(dag_pipeline.TypeMismatchError):
dag_pipeline.DAGPipeline(dag)
def testDependencyLoops(self):
class UnitQ(pipeline.Pipeline):
def __init__(self):
pipeline.Pipeline.__init__(self, Type0, Type1)
def transform(self, input_object):
pass
class UnitR(pipeline.Pipeline):
def __init__(self):
pipeline.Pipeline.__init__(self, Type1, Type0)
def transform(self, input_object):
pass
class UnitS(pipeline.Pipeline):
def __init__(self):
pipeline.Pipeline.__init__(self, {'a': Type1, 'b': Type0}, Type1)
def transform(self, input_object):
pass
class UnitT(pipeline.Pipeline):
def __init__(self, name='UnitT'):
pipeline.Pipeline.__init__(self, Type0, Type0, name)
def transform(self, input_object):
pass
q, r, s, t = UnitQ(), UnitR(), UnitS(), UnitT()
dag = {q: dag_pipeline.DagInput(q.input_type),
s: {'a': q, 'b': r},
r: s,
dag_pipeline.DagOutput('output'): r,
dag_pipeline.DagOutput('output_2'): s}
with self.assertRaises(dag_pipeline.BadTopologyError):
dag_pipeline.DAGPipeline(dag)
dag = {s: {'a': dag_pipeline.DagInput(Type1), 'b': r},
r: s,
dag_pipeline.DagOutput('output'): r}
with self.assertRaises(dag_pipeline.BadTopologyError):
dag_pipeline.DAGPipeline(dag)
dag = {dag_pipeline.DagOutput('output'): dag_pipeline.DagInput(Type0),
t: t}
with self.assertRaises(dag_pipeline.BadTopologyError):
dag_pipeline.DAGPipeline(dag)
t2 = UnitT('UnitT2')
dag = {dag_pipeline.DagOutput('output'): dag_pipeline.DagInput(Type0),
t2: t,
t: t2}
with self.assertRaises(dag_pipeline.BadTopologyError):
dag_pipeline.DAGPipeline(dag)
def testDisjointGraph(self):
class UnitQ(pipeline.Pipeline):
def __init__(self):
pipeline.Pipeline.__init__(self, Type0, Type1)
def transform(self, input_object):
pass
class UnitR(pipeline.Pipeline):
def __init__(self):
pipeline.Pipeline.__init__(self, Type1, {'a': Type2, 'b': Type3})
def transform(self, input_object):
pass
q, r = UnitQ(), UnitR()
dag = {q: dag_pipeline.DagInput(q.input_type),
dag_pipeline.DagOutput(): r}
with self.assertRaises(dag_pipeline.NotConnectedError):
dag_pipeline.DAGPipeline(dag)
q, r = UnitQ(), UnitR()
dag = {q: dag_pipeline.DagInput(q.input_type),
dag_pipeline.DagOutput(): {'a': q, 'b': r['b']}}
with self.assertRaises(dag_pipeline.NotConnectedError):
dag_pipeline.DAGPipeline(dag)
# Pipelines that do not output to anywhere are not allowed.
dag = {dag_pipeline.DagOutput('output'):
dag_pipeline.DagInput(q.input_type),
q: dag_pipeline.DagInput(q.input_type),
r: q}
with self.assertRaises(dag_pipeline.NotConnectedError):
dag_pipeline.DAGPipeline(dag)
# Pipelines which need to be executed but don't have inputs are not allowed.
dag = {dag_pipeline.DagOutput('output'):
dag_pipeline.DagInput(q.input_type),
r: q,
dag_pipeline.DagOutput(): r}
with self.assertRaises(dag_pipeline.NotConnectedError):
dag_pipeline.DAGPipeline(dag)
def testBadInputOrOutputError(self):
class UnitQ(pipeline.Pipeline):
def __init__(self, name='UnitQ'):
pipeline.Pipeline.__init__(self, Type0, Type1, name)
def transform(self, input_object):
pass
class UnitR(pipeline.Pipeline):
def __init__(self):
pipeline.Pipeline.__init__(self, Type1, Type0)
def transform(self, input_object):
pass
# Missing Input.
q, r = UnitQ(), UnitR()
dag = {r: q,
dag_pipeline.DagOutput('output'): r}
with self.assertRaises(dag_pipeline.BadInputOrOutputError):
dag_pipeline.DAGPipeline(dag)
# Missing Output.
dag = {q: dag_pipeline.DagInput(Type0),
r: q}
with self.assertRaises(dag_pipeline.BadInputOrOutputError):
dag_pipeline.DAGPipeline(dag)
# Multiple instances of Input with the same type IS allowed.
q2 = UnitQ('UnitQ2')
dag = {q: dag_pipeline.DagInput(Type0),
q2: dag_pipeline.DagInput(Type0),
dag_pipeline.DagOutput(): {'q': q, 'q2': q2}}
_ = dag_pipeline.DAGPipeline(dag)
# Multiple instances with different types is not allowed.
dag = {q: dag_pipeline.DagInput(Type0),
r: dag_pipeline.DagInput(Type1),
dag_pipeline.DagOutput(): {'q': q, 'r': r}}
with self.assertRaises(dag_pipeline.BadInputOrOutputError):
dag_pipeline.DAGPipeline(dag)
def testDuplicateNameError(self):
class UnitQ(pipeline.Pipeline):
def __init__(self, name='UnitQ'):
pipeline.Pipeline.__init__(self, Type0, Type1, name)
def transform(self, input_object):
pass
q, q2 = UnitQ(), UnitQ()
dag = {q: dag_pipeline.DagInput(Type0),
q2: dag_pipeline.DagInput(Type0),
dag_pipeline.DagOutput(): {'q': q, 'q2': q2}}
with self.assertRaises(dag_pipeline.DuplicateNameError):
dag_pipeline.DAGPipeline(dag)
def testInvalidDictionaryOutputError(self):
b = UnitB()
dag = {b: dag_pipeline.DagInput(b.input_type),
dag_pipeline.DagOutput(): b}
with self.assertRaises(dag_pipeline.InvalidDictionaryOutputError):
dag_pipeline.DAGPipeline(dag)
a = UnitA()
dag = {a: dag_pipeline.DagInput(b.input_type),
dag_pipeline.DagOutput('output'): a}
with self.assertRaises(dag_pipeline.InvalidDictionaryOutputError):
dag_pipeline.DAGPipeline(dag)
a2 = UnitA()
dag = {a: dag_pipeline.DagInput(a.input_type),
a2: dag_pipeline.DagInput(a2.input_type),
dag_pipeline.DagOutput('output'): {'t1': a['t1'], 't2': a2['t2']}}
with self.assertRaises(dag_pipeline.InvalidDictionaryOutputError):
dag_pipeline.DAGPipeline(dag)
def testInvalidTransformOutputError(self):
# This happens when the output of a pipeline's `transform` method does not
# match the type signature given by the pipeline's `output_type`.
class UnitQ1(pipeline.Pipeline):
def __init__(self):
pipeline.Pipeline.__init__(self, Type0, Type1)
def transform(self, input_object):
return [Type2(1)]
class UnitQ2(pipeline.Pipeline):
def __init__(self):
pipeline.Pipeline.__init__(self, Type0, Type1)
def transform(self, input_object):
return [Type1(1, 2), Type2(1)]
class UnitQ3(pipeline.Pipeline):
def __init__(self):
pipeline.Pipeline.__init__(self, Type0, Type1)
def transform(self, input_object):
return Type1(1, 2)
class UnitR1(pipeline.Pipeline):
def __init__(self):
pipeline.Pipeline.__init__(self, Type0, {'xy': Type1, 'z': Type2})
def transform(self, input_object):
return {'xy': [Type1(1, 2)], 'z': [Type1(1, 2)]}
class UnitR2(pipeline.Pipeline):
def __init__(self):
pipeline.Pipeline.__init__(self, Type0, {'xy': Type1, 'z': Type2})
def transform(self, input_object):
return {'xy': [Type1(1, 2)]}
class UnitR3(pipeline.Pipeline):
def __init__(self):
pipeline.Pipeline.__init__(self, Type0, {'xy': Type1, 'z': Type2})
def transform(self, input_object):
return [{'xy': [Type1(1, 2)], 'z': Type2(1)}]
class UnitR4(pipeline.Pipeline):
def __init__(self):
pipeline.Pipeline.__init__(self, Type0, {'xy': Type1, 'z': Type2})
def transform(self, input_object):
return [{'xy': [Type1(1, 2), Type2(1)], 'z': [Type2(1)]}]
class UnitR5(pipeline.Pipeline):
def __init__(self):
pipeline.Pipeline.__init__(self, Type0, {'xy': Type1, 'z': Type2})
def transform(self, input_object):
return [{'xy': [Type1(1, 2), Type1(1, 3)], 'z': [Type2(1)], 'q': []}]
for pipeline_class in [UnitQ1, UnitQ2, UnitQ3,
UnitR1, UnitR2, UnitR3, UnitR4, UnitR5]:
pipe = pipeline_class()
if pipeline_class.__name__.startswith('UnitR'):
output = dag_pipeline.DagOutput()
else:
output = dag_pipeline.DagOutput('output')
dag = {pipe: dag_pipeline.DagInput(pipe.input_type),
output: pipe}
dag_pipe_obj = dag_pipeline.DAGPipeline(dag)
with self.assertRaises(dag_pipeline.InvalidTransformOutputError):
dag_pipe_obj.transform(Type0(1, 2, 3))
def testInvalidStatisticsError(self):
class UnitQ(pipeline.Pipeline):
def __init__(self):
pipeline.Pipeline.__init__(self, str, str)
def transform(self, input_object):
self._set_stats([statistics.Counter('stat_1', 5), 1234])
return [input_object]
class UnitR(pipeline.Pipeline):
def __init__(self):
pipeline.Pipeline.__init__(self, str, str)
def transform(self, input_object):
self._set_stats(statistics.Counter('stat_1', 5))
return [input_object]
q = UnitQ()
dag = {q: dag_pipeline.DagInput(q.input_type),
dag_pipeline.DagOutput('output'): q}
dag_pipe_obj = dag_pipeline.DAGPipeline(dag)
with self.assertRaises(pipeline.InvalidStatisticsError):
dag_pipe_obj.transform('hello world')
r = UnitR()
dag = {r: dag_pipeline.DagInput(q.input_type),
dag_pipeline.DagOutput('output'): r}
dag_pipe_obj = dag_pipeline.DAGPipeline(dag)
with self.assertRaises(pipeline.InvalidStatisticsError):
dag_pipe_obj.transform('hello world')
if __name__ == '__main__':
absltest.main()
| apache-2.0 |
jgoclawski/django | tests/messages_tests/base.py | 319 | 14243 | from django import http
from django.contrib.messages import constants, get_level, set_level, utils
from django.contrib.messages.api import MessageFailure
from django.contrib.messages.constants import DEFAULT_LEVELS
from django.contrib.messages.storage import base, default_storage
from django.contrib.messages.storage.base import Message
from django.core.urlresolvers import reverse
from django.test import modify_settings, override_settings
from django.utils.translation import ugettext_lazy
def add_level_messages(storage):
"""
Adds 6 messages from different levels (including a custom one) to a storage
instance.
"""
storage.add(constants.INFO, 'A generic info message')
storage.add(29, 'Some custom level')
storage.add(constants.DEBUG, 'A debugging message', extra_tags='extra-tag')
storage.add(constants.WARNING, 'A warning')
storage.add(constants.ERROR, 'An error')
storage.add(constants.SUCCESS, 'This was a triumph.')
class override_settings_tags(override_settings):
def enable(self):
super(override_settings_tags, self).enable()
# LEVEL_TAGS is a constant defined in the
# django.contrib.messages.storage.base module, so after changing
# settings.MESSAGE_TAGS, we need to update that constant too.
self.old_level_tags = base.LEVEL_TAGS
base.LEVEL_TAGS = utils.get_level_tags()
def disable(self):
super(override_settings_tags, self).disable()
base.LEVEL_TAGS = self.old_level_tags
class BaseTests(object):
storage_class = default_storage
levels = {
'debug': constants.DEBUG,
'info': constants.INFO,
'success': constants.SUCCESS,
'warning': constants.WARNING,
'error': constants.ERROR,
}
def setUp(self):
self.settings_override = override_settings_tags(
TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': (
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
),
},
}],
ROOT_URLCONF='messages_tests.urls',
MESSAGE_TAGS='',
MESSAGE_STORAGE='%s.%s' % (self.storage_class.__module__,
self.storage_class.__name__),
SESSION_SERIALIZER='django.contrib.sessions.serializers.JSONSerializer',
)
self.settings_override.enable()
def tearDown(self):
self.settings_override.disable()
def get_request(self):
return http.HttpRequest()
def get_response(self):
return http.HttpResponse()
def get_storage(self, data=None):
"""
Returns the storage backend, setting its loaded data to the ``data``
argument.
This method avoids the storage ``_get`` method from getting called so
that other parts of the storage backend can be tested independent of
the message retrieval logic.
"""
storage = self.storage_class(self.get_request())
storage._loaded_data = data or []
return storage
def test_add(self):
storage = self.get_storage()
self.assertFalse(storage.added_new)
storage.add(constants.INFO, 'Test message 1')
self.assertTrue(storage.added_new)
storage.add(constants.INFO, 'Test message 2', extra_tags='tag')
self.assertEqual(len(storage), 2)
def test_add_lazy_translation(self):
storage = self.get_storage()
response = self.get_response()
storage.add(constants.INFO, ugettext_lazy('lazy message'))
storage.update(response)
storing = self.stored_messages_count(storage, response)
self.assertEqual(storing, 1)
def test_no_update(self):
storage = self.get_storage()
response = self.get_response()
storage.update(response)
storing = self.stored_messages_count(storage, response)
self.assertEqual(storing, 0)
def test_add_update(self):
storage = self.get_storage()
response = self.get_response()
storage.add(constants.INFO, 'Test message 1')
storage.add(constants.INFO, 'Test message 1', extra_tags='tag')
storage.update(response)
storing = self.stored_messages_count(storage, response)
self.assertEqual(storing, 2)
def test_existing_add_read_update(self):
storage = self.get_existing_storage()
response = self.get_response()
storage.add(constants.INFO, 'Test message 3')
list(storage) # Simulates a read
storage.update(response)
storing = self.stored_messages_count(storage, response)
self.assertEqual(storing, 0)
def test_existing_read_add_update(self):
storage = self.get_existing_storage()
response = self.get_response()
list(storage) # Simulates a read
storage.add(constants.INFO, 'Test message 3')
storage.update(response)
storing = self.stored_messages_count(storage, response)
self.assertEqual(storing, 1)
@override_settings(MESSAGE_LEVEL=constants.DEBUG)
def test_full_request_response_cycle(self):
"""
With the message middleware enabled, tests that messages are properly
stored and then retrieved across the full request/redirect/response
cycle.
"""
data = {
'messages': ['Test message %d' % x for x in range(5)],
}
show_url = reverse('show_message')
for level in ('debug', 'info', 'success', 'warning', 'error'):
add_url = reverse('add_message', args=(level,))
response = self.client.post(add_url, data, follow=True)
self.assertRedirects(response, show_url)
self.assertIn('messages', response.context)
messages = [Message(self.levels[level], msg) for msg in data['messages']]
self.assertEqual(list(response.context['messages']), messages)
for msg in data['messages']:
self.assertContains(response, msg)
@override_settings(MESSAGE_LEVEL=constants.DEBUG)
def test_with_template_response(self):
data = {
'messages': ['Test message %d' % x for x in range(5)],
}
show_url = reverse('show_template_response')
for level in self.levels.keys():
add_url = reverse('add_template_response', args=(level,))
response = self.client.post(add_url, data, follow=True)
self.assertRedirects(response, show_url)
self.assertIn('messages', response.context)
for msg in data['messages']:
self.assertContains(response, msg)
# there shouldn't be any messages on second GET request
response = self.client.get(show_url)
for msg in data['messages']:
self.assertNotContains(response, msg)
def test_context_processor_message_levels(self):
show_url = reverse('show_template_response')
response = self.client.get(show_url)
self.assertIn('DEFAULT_MESSAGE_LEVELS', response.context)
self.assertEqual(response.context['DEFAULT_MESSAGE_LEVELS'], DEFAULT_LEVELS)
@override_settings(MESSAGE_LEVEL=constants.DEBUG)
def test_multiple_posts(self):
"""
Tests that messages persist properly when multiple POSTs are made
before a GET.
"""
data = {
'messages': ['Test message %d' % x for x in range(5)],
}
show_url = reverse('show_message')
messages = []
for level in ('debug', 'info', 'success', 'warning', 'error'):
messages.extend(Message(self.levels[level], msg) for msg in data['messages'])
add_url = reverse('add_message', args=(level,))
self.client.post(add_url, data)
response = self.client.get(show_url)
self.assertIn('messages', response.context)
self.assertEqual(list(response.context['messages']), messages)
for msg in data['messages']:
self.assertContains(response, msg)
@modify_settings(
INSTALLED_APPS={'remove': 'django.contrib.messages'},
MIDDLEWARE_CLASSES={'remove': 'django.contrib.messages.middleware.MessageMiddleware'},
)
@override_settings(
MESSAGE_LEVEL=constants.DEBUG,
TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
}],
)
def test_middleware_disabled(self):
"""
Tests that, when the middleware is disabled, an exception is raised
when one attempts to store a message.
"""
data = {
'messages': ['Test message %d' % x for x in range(5)],
}
reverse('show_message')
for level in ('debug', 'info', 'success', 'warning', 'error'):
add_url = reverse('add_message', args=(level,))
self.assertRaises(MessageFailure, self.client.post, add_url,
data, follow=True)
@modify_settings(
INSTALLED_APPS={'remove': 'django.contrib.messages'},
MIDDLEWARE_CLASSES={'remove': 'django.contrib.messages.middleware.MessageMiddleware'},
)
@override_settings(
TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
}],
)
def test_middleware_disabled_fail_silently(self):
"""
Tests that, when the middleware is disabled, an exception is not
raised if 'fail_silently' = True
"""
data = {
'messages': ['Test message %d' % x for x in range(5)],
'fail_silently': True,
}
show_url = reverse('show_message')
for level in ('debug', 'info', 'success', 'warning', 'error'):
add_url = reverse('add_message', args=(level,))
response = self.client.post(add_url, data, follow=True)
self.assertRedirects(response, show_url)
self.assertNotIn('messages', response.context)
def stored_messages_count(self, storage, response):
"""
Returns the number of messages being stored after a
``storage.update()`` call.
"""
raise NotImplementedError('This method must be set by a subclass.')
def test_get(self):
raise NotImplementedError('This method must be set by a subclass.')
def get_existing_storage(self):
return self.get_storage([Message(constants.INFO, 'Test message 1'),
Message(constants.INFO, 'Test message 2',
extra_tags='tag')])
def test_existing_read(self):
"""
Tests that reading the existing storage doesn't cause the data to be
lost.
"""
storage = self.get_existing_storage()
self.assertFalse(storage.used)
# After iterating the storage engine directly, the used flag is set.
data = list(storage)
self.assertTrue(storage.used)
# The data does not disappear because it has been iterated.
self.assertEqual(data, list(storage))
def test_existing_add(self):
storage = self.get_existing_storage()
self.assertFalse(storage.added_new)
storage.add(constants.INFO, 'Test message 3')
self.assertTrue(storage.added_new)
def test_default_level(self):
# get_level works even with no storage on the request.
request = self.get_request()
self.assertEqual(get_level(request), constants.INFO)
# get_level returns the default level if it hasn't been set.
storage = self.get_storage()
request._messages = storage
self.assertEqual(get_level(request), constants.INFO)
# Only messages of sufficient level get recorded.
add_level_messages(storage)
self.assertEqual(len(storage), 5)
def test_low_level(self):
request = self.get_request()
storage = self.storage_class(request)
request._messages = storage
self.assertTrue(set_level(request, 5))
self.assertEqual(get_level(request), 5)
add_level_messages(storage)
self.assertEqual(len(storage), 6)
def test_high_level(self):
request = self.get_request()
storage = self.storage_class(request)
request._messages = storage
self.assertTrue(set_level(request, 30))
self.assertEqual(get_level(request), 30)
add_level_messages(storage)
self.assertEqual(len(storage), 2)
@override_settings(MESSAGE_LEVEL=29)
def test_settings_level(self):
request = self.get_request()
storage = self.storage_class(request)
self.assertEqual(get_level(request), 29)
add_level_messages(storage)
self.assertEqual(len(storage), 3)
def test_tags(self):
storage = self.get_storage()
storage.level = 0
add_level_messages(storage)
tags = [msg.tags for msg in storage]
self.assertEqual(tags,
['info', '', 'extra-tag debug', 'warning', 'error',
'success'])
def test_level_tag(self):
storage = self.get_storage()
storage.level = 0
add_level_messages(storage)
tags = [msg.level_tag for msg in storage]
self.assertEqual(tags,
['info', '', 'debug', 'warning', 'error',
'success'])
@override_settings_tags(MESSAGE_TAGS={
constants.INFO: 'info',
constants.DEBUG: '',
constants.WARNING: '',
constants.ERROR: 'bad',
29: 'custom',
}
)
def test_custom_tags(self):
storage = self.get_storage()
storage.level = 0
add_level_messages(storage)
tags = [msg.tags for msg in storage]
self.assertEqual(tags,
['info', 'custom', 'extra-tag', '', 'bad', 'success'])
| bsd-3-clause |
ArcticWarriors/scouting-app-2016 | ScoutingWebsite/Scouting2017/model/get_team_metrics.py | 2 | 1861 | '''
Created on Mar 5, 2017
@author: PJ
'''
from django.db.models.aggregates import Avg, Sum
from django.db.models.expressions import Case, When
def get_team_metrics(team, regional_code):
metrics = team.scoreresult_set.filter(competition__code=regional_code).aggregate(
Avg("auto_fuel_high_score"),
Avg("auto_gears"),
Avg("tele_fuel_high_score"),
Avg("tele_gears"),
Sum("foul"),
Sum("tech_foul"),
Sum("yellow_card"),
Sum("red_card"),
rope__avg=Avg(Case(When(rope=True, then=1), When(rope=False, then=0))),
baseline__avg=Avg(Case(When(auto_baseline=True, then=1), When(auto_baseline=False, then=0))),
)
# Format all of the numbers. If we haven't scouted the team, None will be returned. Turn that into NA
for key in metrics:
if metrics[key] == None:
metrics[key] = "NA"
elif "__avg" in key:
metrics[key] = "{:10.2f}".format(metrics[key])
if metrics['tele_fuel_high_score__avg'] != "NA":
metrics['auto_fuel_high_misses__avg'] = float(metrics['auto_fuel_high_shots__avg']) - float(metrics['auto_fuel_high_score__avg'])
metrics['tele_fuel_high_misses__avg'] = float(metrics['tele_fuel_high_shots__avg']) - float(metrics['tele_fuel_high_score__avg'])
else:
metrics['auto_fuel_high_misses__avg'] = "NA"
metrics['tele_fuel_high_misses__avg'] = "NA"
return metrics
| mit |
bringingheavendown/numpy | numpy/distutils/fcompiler/g95.py | 229 | 1379 | # http://g95.sourceforge.net/
from __future__ import division, absolute_import, print_function
from numpy.distutils.fcompiler import FCompiler
compilers = ['G95FCompiler']
class G95FCompiler(FCompiler):
compiler_type = 'g95'
description = 'G95 Fortran Compiler'
# version_pattern = r'G95 \((GCC (?P<gccversion>[\d.]+)|.*?) \(g95!\) (?P<version>.*)\).*'
# $ g95 --version
# G95 (GCC 4.0.3 (g95!) May 22 2006)
version_pattern = r'G95 \((GCC (?P<gccversion>[\d.]+)|.*?) \(g95 (?P<version>.*)!\) (?P<date>.*)\).*'
# $ g95 --version
# G95 (GCC 4.0.3 (g95 0.90!) Aug 22 2006)
executables = {
'version_cmd' : ["<F90>", "--version"],
'compiler_f77' : ["g95", "-ffixed-form"],
'compiler_fix' : ["g95", "-ffixed-form"],
'compiler_f90' : ["g95"],
'linker_so' : ["<F90>", "-shared"],
'archiver' : ["ar", "-cr"],
'ranlib' : ["ranlib"]
}
pic_flags = ['-fpic']
module_dir_switch = '-fmod='
module_include_switch = '-I'
def get_flags(self):
return ['-fno-second-underscore']
def get_flags_opt(self):
return ['-O']
def get_flags_debug(self):
return ['-g']
if __name__ == '__main__':
from distutils import log
log.set_verbosity(2)
compiler = G95FCompiler()
compiler.customize()
print(compiler.get_version())
| bsd-3-clause |
liyitest/rr | openstack_dashboard/api/ceilometer.py | 13 | 49091 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import threading
from ceilometerclient import client as ceilometer_client
from django.conf import settings
from django.utils import datastructures
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon.utils.memoized import memoized # noqa
from openstack_dashboard.api import base
from openstack_dashboard.api import keystone
from openstack_dashboard.api import nova
LOG = logging.getLogger(__name__)
def get_flavor_names(request):
# TODO(lsmola) The flavors can be set per project,
# so it should show only valid ones.
try:
flavors = nova.flavor_list(request, None)
return [f.name for f in flavors]
except Exception:
return ['m1.tiny', 'm1.small', 'm1.medium',
'm1.large', 'm1.xlarge']
def is_iterable(var):
"""Return True if the given is list or tuple."""
return (isinstance(var, (list, tuple)) or
issubclass(var.__class__, (list, tuple)))
def make_query(user_id=None, tenant_id=None, resource_id=None,
user_ids=None, tenant_ids=None, resource_ids=None):
"""Returns query built from given parameters.
This query can be then used for querying resources, meters and
statistics.
:Parameters:
- `user_id`: user_id, has a priority over list of ids
- `tenant_id`: tenant_id, has a priority over list of ids
- `resource_id`: resource_id, has a priority over list of ids
- `user_ids`: list of user_ids
- `tenant_ids`: list of tenant_ids
- `resource_ids`: list of resource_ids
"""
user_ids = user_ids or []
tenant_ids = tenant_ids or []
resource_ids = resource_ids or []
query = []
if user_id:
user_ids = [user_id]
for u_id in user_ids:
query.append({"field": "user_id", "op": "eq", "value": u_id})
if tenant_id:
tenant_ids = [tenant_id]
for t_id in tenant_ids:
query.append({"field": "project_id", "op": "eq", "value": t_id})
if resource_id:
resource_ids = [resource_id]
for r_id in resource_ids:
query.append({"field": "resource_id", "op": "eq", "value": r_id})
return query
class Meter(base.APIResourceWrapper):
"""Represents one Ceilometer meter."""
_attrs = ['name', 'type', 'unit', 'resource_id', 'user_id', 'project_id']
def __init__(self, apiresource):
super(Meter, self).__init__(apiresource)
self._label = self.name
self._description = ""
def augment(self, label=None, description=None):
if label:
self._label = label
if description:
self._description = description
@property
def description(self):
return self._description
@property
def label(self):
return self._label
class Resource(base.APIResourceWrapper):
"""Represents one Ceilometer resource."""
_attrs = ['resource_id', 'source', 'user_id', 'project_id', 'metadata',
'links']
def __init__(self, apiresource, ceilometer_usage=None):
super(Resource, self).__init__(apiresource)
# Save empty strings to IDs rather than None, so it gets
# serialized correctly. We don't want 'None' strings.
self.project_id = self.project_id or ""
self.user_id = self.user_id or ""
self.resource_id = self.resource_id or ""
self._id = "%s__%s__%s" % (self.project_id,
self.user_id,
self.resource_id)
# Meters with statistics data
self._meters = {}
# TODO(lsmola) make parallel obtaining of tenant and user
# make the threading here, thread join into resource_list
if ceilometer_usage and self.project_id:
self._tenant = ceilometer_usage.get_tenant(self.project_id)
else:
self._tenant = None
if ceilometer_usage and self.user_id:
self._user = ceilometer_usage.get_user(self.user_id)
else:
self._user = None
self._query = make_query(tenant_id=self.project_id,
user_id=self.user_id,
resource_id=self.resource_id)
@property
def name(self):
name = self.metadata.get("name", None)
display_name = self.metadata.get("display_name", None)
return name or display_name or ""
@property
def id(self):
return self._id
@property
def tenant(self):
return self._tenant
@property
def user(self):
return self._user
@property
def resource(self):
return self.resource_id
@property
def query(self):
return self._query
@property
def meters(self):
return self._meters
def get_meter(self, meter_name):
return self._meters.get(meter_name, None)
def set_meter(self, meter_name, value):
self._meters[meter_name] = value
class ResourceAggregate(Resource):
"""Represents aggregate of more resources together.
Aggregate of resources can be obtained by specifying
multiple ids in one parameter or by not specifying
one parameter.
It can also be specified by query directly.
Example:
We can obtain an aggregate of resources by specifying
multiple resource_ids in resource_id parameter in init.
Or we can specify only tenant_id, which will return
all resources of that tenant.
"""
def __init__(self, tenant_id=None, user_id=None, resource_id=None,
tenant_ids=None, user_ids=None, resource_ids=None,
ceilometer_usage=None, query=None, identifier=None):
self._id = identifier
self.tenant_id = None
self.user_id = None
self.resource_id = None
# Meters with statistics data
self._meters = {}
if query:
self._query = query
else:
# TODO(lsmola) make parallel obtaining of tenant and user
# make the threading here, thread join into resource_list
if ceilometer_usage and tenant_id:
self.tenant_id = tenant_id
self._tenant = ceilometer_usage.get_tenant(tenant_id)
else:
self._tenant = None
if ceilometer_usage and user_id:
self.user_id = user_id
self._user = ceilometer_usage.get_user(user_id)
else:
self._user = None
if resource_id:
self.resource_id = resource_id
self._query = make_query(tenant_id=tenant_id, user_id=user_id,
resource_id=resource_id,
tenant_ids=tenant_ids,
user_ids=user_ids,
resource_ids=resource_ids)
@property
def id(self):
return self._id
class Sample(base.APIResourceWrapper):
"""Represents one Ceilometer sample."""
_attrs = ['counter_name', 'user_id', 'resource_id', 'timestamp',
'resource_metadata', 'source', 'counter_unit', 'counter_volume',
'project_id', 'counter_type', 'resource_metadata']
@property
def instance(self):
display_name = self.resource_metadata.get('display_name', None)
instance_id = self.resource_metadata.get('instance_id', None)
return display_name or instance_id
@property
def name(self):
name = self.resource_metadata.get("name", None)
display_name = self.resource_metadata.get("display_name", None)
return name or display_name or ""
class Statistic(base.APIResourceWrapper):
"""Represents one Ceilometer statistic."""
_attrs = ['period', 'period_start', 'period_end',
'count', 'min', 'max', 'sum', 'avg',
'duration', 'duration_start', 'duration_end']
@memoized
def ceilometerclient(request):
"""Initialization of Ceilometer client."""
endpoint = base.url_for(request, 'metering')
insecure = getattr(settings, 'OPENSTACK_SSL_NO_VERIFY', False)
cacert = getattr(settings, 'OPENSTACK_SSL_CACERT', None)
return ceilometer_client.Client('2', endpoint,
token=(lambda: request.user.token.id),
insecure=insecure,
cacert=cacert)
def resource_list(request, query=None, ceilometer_usage_object=None):
"""List the resources."""
resources = ceilometerclient(request).resources.list(q=query)
return [Resource(r, ceilometer_usage_object) for r in resources]
def sample_list(request, meter_name, query=None):
"""List the samples for this meters."""
samples = ceilometerclient(request).samples.list(meter_name=meter_name,
q=query)
return [Sample(s) for s in samples]
def meter_list(request, query=None):
"""List the user's meters."""
meters = ceilometerclient(request).meters.list(query)
return [Meter(m) for m in meters]
def statistic_list(request, meter_name, query=None, period=None):
"""List of statistics."""
statistics = ceilometerclient(request).\
statistics.list(meter_name=meter_name, q=query, period=period)
return [Statistic(s) for s in statistics]
class ThreadedUpdateResourceWithStatistics(threading.Thread):
"""Multithread wrapper for update_with_statistics method of
resource_usage.
A join logic is placed in process_list class method. All resources
will have its statistics attribute filled in separate threads.
The resource_usage object is shared between threads. Each thread is
updating one Resource.
:Parameters:
- `resource`: Resource or ResourceAggregate object, that will
be filled by statistic data.
- `resources`: List of Resource or ResourceAggregate object,
that will be filled by statistic data.
- `resource_usage`: Wrapping resource usage object, that holds
all statistics data.
- `meter_names`: List of meter names of the statistics we want.
- `period`: In seconds. If no period is given, only one aggregate
statistic is returned. If given, a faceted result will be
returned, divided into given periods. Periods with no
data are ignored.
- `stats_attr`: String representing the attribute name of the stats.
E.g. (avg, max, min...) If None is given, whole
statistic object is returned,
- `additional_query`: Additional query for the statistics.
E.g. timespan, etc.
"""
# TODO(lsmola) Can be removed once Ceilometer supports sample-api
# and group-by, so all of this optimization will not be necessary.
# It is planned somewhere to I.
def __init__(self, resource_usage, resource, meter_names=None,
period=None, filter_func=None, stats_attr=None,
additional_query=None):
super(ThreadedUpdateResourceWithStatistics, self).__init__()
self.resource_usage = resource_usage
self.resource = resource
self.meter_names = meter_names
self.period = period
self.stats_attr = stats_attr
self.additional_query = additional_query
def run(self):
# Run the job
self.resource_usage.update_with_statistics(
self.resource,
meter_names=self.meter_names, period=self.period,
stats_attr=self.stats_attr, additional_query=self.additional_query)
@classmethod
def process_list(cls, resource_usage, resources, meter_names=None,
period=None, filter_func=None, stats_attr=None,
additional_query=None):
threads = []
for resource in resources:
# add statistics data into resource
thread = cls(resource_usage, resource, meter_names=meter_names,
period=period, stats_attr=stats_attr,
additional_query=additional_query)
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
class CeilometerUsage(object):
"""Represents wrapper of any Ceilometer queries.
One instance of this class should be shared between resources
as this class provides a place where users and tenants are
cached. So there are no duplicate queries to API.
This class also wraps Ceilometer API calls and provides parallel
HTTP calls to API.
This class should also serve as reasonable abstraction, that will
cover huge amount of optimization due to optimization of Ceilometer
service, without changing of the interface.
"""
def __init__(self, request):
self._request = request
# Cached users and tenants.
self._users = {}
self._tenants = {}
def get_user(self, user_id):
"""Returns user fetched from API.
Caching the result, so it doesn't contact API twice with the
same query.
"""
user = self._users.get(user_id, None)
if not user:
user = keystone.user_get(self._request, user_id)
# caching the user, for later use
self._users[user_id] = user
return user
def preload_all_users(self):
"""Preloads all users into dictionary.
It's more effective to preload all users, rather than fetching many
users by separate API get calls.
"""
users = keystone.user_list(self._request)
# Cache all users on right indexes, this is more effective than to
# obtain large number of users one by one by keystone.user_get
for u in users:
self._users[u.id] = u
def get_tenant(self, tenant_id):
"""Returns tenant fetched from API.
Caching the result, so it doesn't contact API twice with the
same query.
"""
tenant = self._tenants.get(tenant_id, None)
if not tenant:
tenant = keystone.tenant_get(self._request, tenant_id)
# caching the tenant for later use
self._tenants[tenant_id] = tenant
return tenant
def preload_all_tenants(self):
"""Preloads all tenants into dictionary.
It's more effective to preload all tenants, rather than fetching each
tenant by separate API get calls.
"""
tenants, more = keystone.tenant_list(self._request)
# Cache all tenants on right indexes, this is more effective than to
# obtain large number of tenants one by one by keystone.tenant_get
for t in tenants:
self._tenants[t.id] = t
def global_data_get(self, used_cls=None, query=None,
with_statistics=False, additional_query=None,
with_users_and_tenants=True):
"""Obtaining a resources for table view.
It obtains resources with statistics data according to declaration
in used_cls class.
:Parameters:
- `user_cls`: Class wrapper for usage data. It acts as wrapper for
settings needed. See the call of this method for
details.
- `query`: Explicit query definition for fetching the resources. If
no query is provided, it takes a default_query from
used_cls. If no default query is provided, it fetches
all the resources and filters them by meters defined
in used_cls.
- `with_statistic`: Define whether statistics data from the meters
defined in used_cls should be fetched.
Can be used to first obtain only the pure
resources, then with the statistics data by
AJAX.
- `additional_query`: Additional query for the statistics.
E.g. timespan, etc.
- `with_users_and_tenants`: If true a user and a tenant object will
be added to each resource object.
"""
default_query = used_cls.default_query
query = query or default_query
filter_func = None
def filter_resources(resource):
"""Method for filtering resources by their links.rel attr.
The links.rel attributes contain all meters the resource has.
"""
for link in resource.links:
if link['rel'] in used_cls.meters:
return True
return False
if not query:
# Not all resource types can be obtained by query, if there is not
# a query, we are filtering all resources by this function.
filter_func = filter_resources
if with_statistics:
# Will add statistic data into resources.
resources = self.resources_with_statistics(
query,
used_cls.meters,
filter_func=filter_func,
stats_attr=used_cls.stats_attr,
additional_query=additional_query,
with_users_and_tenants=with_users_and_tenants)
else:
# Will load only resources without statistical data.
resources = self.resources(
query, filter_func=filter_func,
with_users_and_tenants=with_users_and_tenants)
return [used_cls(resource) for resource in resources]
def query_from_object_id(self, object_id):
"""Obtaining a query from resource id.
Query can be then used to identify a resource in resources or meters
API calls. ID is being built in the Resource initializer, or returned
by Datatable into UpdateRow functionality.
"""
try:
tenant_id, user_id, resource_id = object_id.split("__")
except ValueError:
return []
return make_query(tenant_id=tenant_id, user_id=user_id,
resource_id=resource_id)
def update_with_statistics(self, resource, meter_names=None, period=None,
stats_attr=None, additional_query=None):
"""Adding statistical data into one Resource or ResourceAggregate.
It adds each statistic of each meter_names into the resource
attributes. Attribute name is the meter name with replaced '.' to '_'.
:Parameters:
- `resource`: Resource or ResourceAggregate object, that will
be filled by statistic data.
- `meter_names`: List of meter names of which we want the
statistics.
- `period`: In seconds. If no period is given, only one aggregate
statistic is returned. If given a faceted result will be
returned, dividend into given periods. Periods with no
data are ignored.
- `stats_attr`: String representing the specific name of the stats.
E.g. (avg, max, min...) If defined, meter attribute
will contain just the one value. If None is given,
meter attribute will contain the whole Statistic
object.
- `additional_query`: Additional query for the statistics.
E.g. timespan, etc.
"""
if not meter_names:
raise ValueError("meter_names and resources must be defined to be "
"able to obtain the statistics.")
# query for identifying one resource in meters
query = resource.query
if additional_query:
if not is_iterable(additional_query):
raise ValueError("Additional query must be list of"
" conditions. See the docs for format.")
query = query + additional_query
# TODO(lsmola) thread for each meter will be probably overkill
# but I should test lets say thread pool with 100 of threads
# and apply it only to this code.
# Though I do expect Ceilometer will support bulk requests,
# so all of this optimization will not be necessary.
for meter in meter_names:
statistics = statistic_list(self._request, meter,
query=query, period=period)
meter = meter.replace(".", "_")
if statistics:
if stats_attr:
# I want to load only a specific attribute
resource.set_meter(
meter,
getattr(statistics[0], stats_attr, None))
else:
# I want a dictionary of all statistics
resource.set_meter(meter, statistics)
else:
resource.set_meter(meter, None)
return resource
def resources(self, query=None, filter_func=None,
with_users_and_tenants=False):
"""Obtaining resources with the query or filter_func.
Obtains resources and also fetch tenants and users associated
with those resources if with_users_and_tenants flag is true.
:Parameters:
- `query`: Query for fetching the Ceilometer Resources.
- `filter_func`: Callable for filtering of the obtained
resources.
- `with_users_and_tenants`: If true a user and a tenant object will
be added to each resource object.
"""
if with_users_and_tenants:
ceilometer_usage_object = self
else:
ceilometer_usage_object = None
resources = resource_list(
self._request,
query=query, ceilometer_usage_object=ceilometer_usage_object)
if filter_func:
resources = [resource for resource in resources if
filter_func(resource)]
return resources
def resources_with_statistics(self, query=None, meter_names=None,
period=None, filter_func=None,
stats_attr=None, additional_query=None,
with_users_and_tenants=False):
"""Obtaining resources with statistics data inside.
:Parameters:
- `query`: Query for fetching the Ceilometer Resources.
- `filter_func`: Callable for filtering of the obtained
resources.
- `meter_names`: List of meter names of which we want the
statistics.
- `period`: In seconds. If no period is given, only one aggregate
statistic is returned. If given, a faceted result will
be returned, divided into given periods. Periods with
no data are ignored.
- `stats_attr`: String representing the specific name of the stats.
E.g. (avg, max, min...) If defined, meter attribute
will contain just the one value. If None is given,
meter attribute will contain the whole Statistic
object.
- `additional_query`: Additional query for the statistics.
E.g. timespan, etc.
- `with_users_and_tenants`: If true a user and a tenant object will
be added to each resource object.
"""
resources = self.resources(
query, filter_func=filter_func,
with_users_and_tenants=with_users_and_tenants)
ThreadedUpdateResourceWithStatistics.process_list(
self, resources,
meter_names=meter_names, period=period, stats_attr=stats_attr,
additional_query=additional_query)
return resources
def resource_aggregates(self, queries=None):
"""Obtaining resource aggregates with queries.
Representing a resource aggregate by query is a most general way
how to obtain a resource aggregates.
:Parameters:
- `queries`: Dictionary of named queries that defines a bulk of
resource aggregates.
"""
resource_aggregates = []
for identifier, query in queries.items():
resource_aggregates.append(ResourceAggregate(query=query,
ceilometer_usage=None,
identifier=identifier))
return resource_aggregates
def resource_aggregates_with_statistics(self, queries=None,
meter_names=None, period=None,
filter_func=None, stats_attr=None,
additional_query=None):
"""Obtaining resource aggregates with statistics data inside.
:Parameters:
- `queries`: Dictionary of named queries that defines a bulk of
resource aggregates.
- `meter_names`: List of meter names of which we want the
statistics.
- `period`: In seconds. If no period is given, only one aggregate
statistic is returned. If given, a faceted result will
be returned, divided into given periods. Periods with
no data are ignored.
- `stats_attr`: String representing the specific name of the stats.
E.g. (avg, max, min...) If defined, meter attribute
will contain just the one value. If None is given,
meter attribute will contain the whole Statistic
object.
- `additional_query`: Additional query for the statistics.
E.g. timespan, etc.
"""
resource_aggregates = self.resource_aggregates(queries)
ThreadedUpdateResourceWithStatistics.process_list(
self,
resource_aggregates, meter_names=meter_names, period=period,
stats_attr=stats_attr, additional_query=additional_query)
return resource_aggregates
def diff_lists(a, b):
if not a:
return []
elif not b:
return a
else:
return list(set(a) - set(b))
class Meters(object):
"""Class for listing of available meters.
It is listing meters defined in this class that are available
in Ceilometer meter_list.
It is storing information that is not available in Ceilometer, i.e.
label, description.
"""
def __init__(self, request=None, ceilometer_meter_list=None):
# Storing the request.
self._request = request
# Storing the Ceilometer meter list
if ceilometer_meter_list:
self._ceilometer_meter_list = ceilometer_meter_list
else:
try:
self._ceilometer_meter_list = meter_list(request)
except Exception:
self._ceilometer_meter_list = []
exceptions.handle(self._request,
_('Unable to retrieve Ceilometer meter '
'list.'))
# Storing the meters info categorized by their services.
self._nova_meters_info = self._get_nova_meters_info()
self._neutron_meters_info = self._get_neutron_meters_info()
self._glance_meters_info = self._get_glance_meters_info()
self._cinder_meters_info = self._get_cinder_meters_info()
self._swift_meters_info = self._get_swift_meters_info()
self._kwapi_meters_info = self._get_kwapi_meters_info()
self._ipmi_meters_info = self._get_ipmi_meters_info()
# Storing the meters info of all services together.
all_services_meters = (self._nova_meters_info,
self._neutron_meters_info,
self._glance_meters_info,
self._cinder_meters_info,
self._swift_meters_info,
self._kwapi_meters_info,
self._ipmi_meters_info)
self._all_meters_info = {}
for service_meters in all_services_meters:
self._all_meters_info.update(dict([(meter_name, meter_info)
for meter_name, meter_info
in service_meters.items()]))
# Here will be the cached Meter objects, that will be reused for
# repeated listing.
self._cached_meters = {}
def list_all(self, only_meters=None, except_meters=None):
"""Returns a list of meters based on the meters names.
:Parameters:
- `only_meters`: The list of meter names we want to show.
- `except_meters`: The list of meter names we don't want to show.
"""
return self._list(only_meters=only_meters,
except_meters=except_meters)
def list_nova(self, except_meters=None):
"""Returns a list of meters tied to nova.
:Parameters:
- `except_meters`: The list of meter names we don't want to show.
"""
return self._list(only_meters=self._nova_meters_info.keys(),
except_meters=except_meters)
def list_neutron(self, except_meters=None):
"""Returns a list of meters tied to neutron.
:Parameters:
- `except_meters`: The list of meter names we don't want to show.
"""
return self._list(only_meters=self._neutron_meters_info.keys(),
except_meters=except_meters)
def list_glance(self, except_meters=None):
"""Returns a list of meters tied to glance.
:Parameters:
- `except_meters`: The list of meter names we don't want to show.
"""
return self._list(only_meters=self._glance_meters_info.keys(),
except_meters=except_meters)
def list_cinder(self, except_meters=None):
"""Returns a list of meters tied to cinder.
:Parameters:
- `except_meters`: The list of meter names we don't want to show.
"""
return self._list(only_meters=self._cinder_meters_info.keys(),
except_meters=except_meters)
def list_swift(self, except_meters=None):
"""Returns a list of meters tied to swift.
:Parameters:
- `except_meters`: The list of meter names we don't want to show.
"""
return self._list(only_meters=self._swift_meters_info.keys(),
except_meters=except_meters)
def list_kwapi(self, except_meters=None):
"""Returns a list of meters tied to kwapi.
:Parameters:
- `except_meters`: The list of meter names we don't want to show.
"""
return self._list(only_meters=self._kwapi_meters_info.keys(),
except_meters=except_meters)
def list_ipmi(self, except_meters=None):
"""Returns a list of meters tied to ipmi
:Parameters:
- `except_meters`: The list of meter names we don't want to show
"""
return self._list(only_meters=self._ipmi_meters_info.keys(),
except_meters=except_meters)
def _list(self, only_meters=None, except_meters=None):
"""Returns a list of meters based on the meters names.
:Parameters:
- `only_meters`: The list of meter names we want to show.
- `except_meters`: The list of meter names we don't want to show.
"""
# Get all wanted meter names.
if only_meters:
meter_names = only_meters
else:
meter_names = [meter_name for meter_name
in self._all_meters_info.keys()]
meter_names = diff_lists(meter_names, except_meters)
# Collect meters for wanted meter names.
return self._get_meters(meter_names)
def _get_meters(self, meter_names):
"""Obtain meters based on meter_names.
The meters that do not exist in Ceilometer meter list are left out.
:Parameters:
- `meter_names`: A list of meter names we want to fetch.
"""
meters = []
for meter_name in meter_names:
meter = self._get_meter(meter_name)
if meter:
meters.append(meter)
return meters
def _get_meter(self, meter_name):
"""Obtains a meter.
Obtains meter either from cache or from Ceilometer meter list
joined with statically defined meter info like label and description.
:Parameters:
- `meter_name`: A meter name we want to fetch.
"""
meter = self._cached_meters.get(meter_name, None)
if not meter:
meter_candidates = [m for m in self._ceilometer_meter_list
if m.name == meter_name]
if meter_candidates:
meter_info = self._all_meters_info.get(meter_name, None)
if meter_info:
label = meter_info["label"]
description = meter_info["description"]
else:
label = ""
description = ""
meter = meter_candidates[0]
meter.augment(label=label, description=description)
self._cached_meters[meter_name] = meter
return meter
def _get_nova_meters_info(self):
"""Returns additional info for each meter.
That will be used for augmenting the Ceilometer meter.
"""
# TODO(lsmola) Unless the Ceilometer will provide the information
# below, I need to define it as a static here. I will be joining this
# to info that I am able to obtain from Ceilometer meters, hopefully
# some day it will be supported all.
meters_info = datastructures.SortedDict([
("instance", {
'label': '',
'description': _("Existence of instance"),
}),
("instance:<type>", {
'label': '',
'description': _("Existence of instance <type> "
"(openstack types)"),
}),
("memory", {
'label': '',
'description': _("Volume of RAM"),
}),
("memory.usage", {
'label': '',
'description': _("Volume of RAM used"),
}),
("cpu", {
'label': '',
'description': _("CPU time used"),
}),
("cpu_util", {
'label': '',
'description': _("Average CPU utilization"),
}),
("vcpus", {
'label': '',
'description': _("Number of VCPUs"),
}),
("disk.read.requests", {
'label': '',
'description': _("Number of read requests"),
}),
("disk.write.requests", {
'label': '',
'description': _("Number of write requests"),
}),
("disk.read.bytes", {
'label': '',
'description': _("Volume of reads"),
}),
("disk.write.bytes", {
'label': '',
'description': _("Volume of writes"),
}),
("disk.read.requests.rate", {
'label': '',
'description': _("Average rate of read requests"),
}),
("disk.write.requests.rate", {
'label': '',
'description': _("Average rate of write requests"),
}),
("disk.read.bytes.rate", {
'label': '',
'description': _("Average rate of reads"),
}),
("disk.write.bytes.rate", {
'label': '',
'description': _("Average volume of writes"),
}),
("disk.root.size", {
'label': '',
'description': _("Size of root disk"),
}),
("disk.ephemeral.size", {
'label': '',
'description': _("Size of ephemeral disk"),
}),
("network.incoming.bytes", {
'label': '',
'description': _("Number of incoming bytes "
"on the network for a VM interface"),
}),
("network.outgoing.bytes", {
'label': '',
'description': _("Number of outgoing bytes "
"on the network for a VM interface"),
}),
("network.incoming.packets", {
'label': '',
'description': _("Number of incoming "
"packets for a VM interface"),
}),
("network.outgoing.packets", {
'label': '',
'description': _("Number of outgoing "
"packets for a VM interface"),
}),
("network.incoming.bytes.rate", {
'label': '',
'description': _("Average rate per sec of incoming "
"bytes on a VM network interface"),
}),
("network.outgoing.bytes.rate", {
'label': '',
'description': _("Average rate per sec of outgoing "
"bytes on a VM network interface"),
}),
("network.incoming.packets.rate", {
'label': '',
'description': _("Average rate per sec of incoming "
"packets on a VM network interface"),
}),
("network.outgoing.packets.rate", {
'label': '',
'description': _("Average rate per sec of outgoing "
"packets on a VM network interface"),
}),
])
# Adding flavor based meters into meters_info dict
# TODO(lsmola) this kind of meter will be probably deprecated
# https://bugs.launchpad.net/ceilometer/+bug/1208365 . Delete it then.
for flavor in get_flavor_names(self._request):
name = 'instance:%s' % flavor
meters_info[name] = dict(meters_info["instance:<type>"])
meters_info[name]['description'] = (
_('Duration of instance type %s (openstack flavor)') %
flavor)
# TODO(lsmola) allow to set specific in local_settings. For all meters
# because users can have their own agents and meters.
return meters_info
def _get_neutron_meters_info(self):
"""Returns additional info for each meter.
That will be used for augmenting the Ceilometer meter.
"""
# TODO(lsmola) Unless the Ceilometer will provide the information
# below, I need to define it as a static here. I will be joining this
# to info that I am able to obtain from Ceilometer meters, hopefully
# some day it will be supported all.
return datastructures.SortedDict([
('network', {
'label': '',
'description': _("Existence of network"),
}),
('network.create', {
'label': '',
'description': _("Creation requests for this network"),
}),
('network.update', {
'label': '',
'description': _("Update requests for this network"),
}),
('subnet', {
'label': '',
'description': _("Existence of subnet"),
}),
('subnet.create', {
'label': '',
'description': _("Creation requests for this subnet"),
}),
('subnet.update', {
'label': '',
'description': _("Update requests for this subnet"),
}),
('port', {
'label': '',
'description': _("Existence of port"),
}),
('port.create', {
'label': '',
'description': _("Creation requests for this port"),
}),
('port.update', {
'label': '',
'description': _("Update requests for this port"),
}),
('router', {
'label': '',
'description': _("Existence of router"),
}),
('router.create', {
'label': '',
'description': _("Creation requests for this router"),
}),
('router.update', {
'label': '',
'description': _("Update requests for this router"),
}),
('ip.floating', {
'label': '',
'description': _("Existence of floating ip"),
}),
('ip.floating.create', {
'label': '',
'description': _("Creation requests for this floating ip"),
}),
('ip.floating.update', {
'label': '',
'description': _("Update requests for this floating ip"),
}),
])
def _get_glance_meters_info(self):
"""Returns additional info for each meter.
That will be used for augmenting the Ceilometer meter.
"""
# TODO(lsmola) Unless the Ceilometer will provide the information
# below, I need to define it as a static here. I will be joining this
# to info that I am able to obtain from Ceilometer meters, hopefully
# some day it will be supported all.
return datastructures.SortedDict([
('image', {
'label': '',
'description': _("Image existence check"),
}),
('image.size', {
'label': '',
'description': _("Uploaded image size"),
}),
('image.update', {
'label': '',
'description': _("Number of image updates"),
}),
('image.upload', {
'label': '',
'description': _("Number of image uploads"),
}),
('image.delete', {
'label': '',
'description': _("Number of image deletions"),
}),
('image.download', {
'label': '',
'description': _("Image is downloaded"),
}),
('image.serve', {
'label': '',
'description': _("Image is served out"),
}),
])
def _get_cinder_meters_info(self):
"""Returns additional info for each meter.
That will be used for augmenting the Ceilometer meter.
"""
# TODO(lsmola) Unless the Ceilometer will provide the information
# below, I need to define it as a static here. I will be joining this
# to info that I am able to obtain from Ceilometer meters, hopefully
# some day it will be supported all.
return datastructures.SortedDict([
('volume', {
'label': '',
'description': _("Existence of volume"),
}),
('volume.size', {
'label': '',
'description': _("Size of volume"),
}),
])
def _get_swift_meters_info(self):
"""Returns additional info for each meter.
That will be used for augmenting the Ceilometer meter.
"""
# TODO(lsmola) Unless the Ceilometer will provide the information
# below, I need to define it as a static here. I will be joining this
# to info that I am able to obtain from Ceilometer meters, hopefully
# some day it will be supported all.
return datastructures.SortedDict([
('storage.objects', {
'label': '',
'description': _("Number of objects"),
}),
('storage.objects.size', {
'label': '',
'description': _("Total size of stored objects"),
}),
('storage.objects.containers', {
'label': '',
'description': _("Number of containers"),
}),
('storage.objects.incoming.bytes', {
'label': '',
'description': _("Number of incoming bytes"),
}),
('storage.objects.outgoing.bytes', {
'label': '',
'description': _("Number of outgoing bytes"),
}),
('storage.api.request', {
'label': '',
'description': _("Number of API requests against swift"),
}),
])
def _get_kwapi_meters_info(self):
"""Returns additional info for each meter.
That will be used for augmenting the Ceilometer meter.
"""
# TODO(lsmola) Unless the Ceilometer will provide the information
# below, I need to define it as a static here. I will be joining this
# to info that I am able to obtain from Ceilometer meters, hopefully
# some day it will be supported all.
return datastructures.SortedDict([
('energy', {
'label': '',
'description': _("Amount of energy"),
}),
('power', {
'label': '',
'description': _("Power consumption"),
}),
])
def _get_ipmi_meters_info(self):
"""Returns additional info for each meter
That will be used for augmenting the Ceilometer meter
"""
# TODO(lsmola) Unless the Ceilometer will provide the information
# below, I need to define it as a static here. I will be joining this
# to info that I am able to obtain from Ceilometer meters, hopefully
# some day it will be supported all.
return datastructures.SortedDict([
('hardware.ipmi.node.power', {
'label': '',
'description': _("System Current Power"),
}),
('hardware.ipmi.fan', {
'label': '',
'description': _("Fan RPM"),
}),
('hardware.ipmi.temperature', {
'label': '',
'description': _("Sensor Temperature Reading"),
}),
('hardware.ipmi.current', {
'label': '',
'description': _("Sensor Current Reading"),
}),
('hardware.ipmi.voltage', {
'label': '',
'description': _("Sensor Voltage Reading"),
}),
('hardware.ipmi.node.inlet_temperature', {
'label': '',
'description': _("System Inlet Temperature Reading"),
}),
('hardware.ipmi.node.outlet_temperature', {
'label': '',
'description': _("System Outlet Temperature Reading"),
}),
('hardware.ipmi.node.airflow', {
'label': '',
'description': _("System Airflow Reading"),
}),
('hardware.ipmi.node.cups', {
'label': '',
'description': _("System CUPS Reading"),
}),
('hardware.ipmi.node.cpu_util', {
'label': '',
'description': _("System CPU Utility Reading"),
}),
('hardware.ipmi.node.mem_util', {
'label': '',
'description': _("System Memory Utility Reading"),
}),
('hardware.ipmi.node.io_util', {
'label': '',
'description': _("System IO Utility Reading"),
}),
])
| apache-2.0 |
vstoykov/django-cms | cms/migrations/0002_auto_start.py | 525 | 20033 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
try:
from django.contrib.auth import get_user_model
except ImportError: # django < 1.5
from django.contrib.auth.models import User
else:
User = get_user_model()
user_orm_label = '%s.%s' % (User._meta.app_label, User._meta.object_name)
user_model_label = '%s.%s' % (User._meta.app_label, User._meta.module_name)
user_ptr_name = '%s_ptr' % User._meta.object_name.lower()
class Migration(SchemaMigration):
def forwards(self, orm):
# Dummy migration
pass
def backwards(self, orm):
# Dummy migration
pass
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [],
{'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [],
{'to': "orm['auth.Permission']", 'symmetrical': 'False',
'blank': 'True'})
},
'auth.permission': {
'Meta': {
'ordering': "('content_type__app_label', 'content_type__model', 'codename')",
'unique_together': "(('content_type', 'codename'),)",
'object_name': 'Permission'},
'codename': (
'django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['contenttypes.ContentType']"}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
user_model_label: {
'Meta': {'object_name': User.__name__, 'db_table': "'%s'" % User._meta.db_table},
'date_joined': ('django.db.models.fields.DateTimeField', [],
{'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [],
{'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [],
{'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [],
{'to': "orm['auth.Group']", 'symmetrical': 'False',
'blank': 'True'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [],
{'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [],
{'max_length': '30', 'blank': 'True'}),
'password': (
'django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': (
'django.db.models.fields.related.ManyToManyField', [],
{'to': "orm['auth.Permission']", 'symmetrical': 'False',
'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [],
{'unique': 'True', 'max_length': '30'})
},
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'changed_date': ('django.db.models.fields.DateTimeField', [],
{'auto_now': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [],
{'default': 'datetime.datetime.now'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [],
{'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [],
{'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [],
{'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['cms.CMSPlugin']", 'null': 'True',
'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [],
{'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [],
{'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [],
{'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [],
{'db_index': 'True'})
},
'cms.globalpagepermission': {
'Meta': {'object_name': 'GlobalPagePermission'},
'can_add': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change_advanced_settings': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_change_permissions': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_delete': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_moderate': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_move_page': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_publish': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_recover_page': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_view': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'group': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['auth.Group']", 'null': 'True', 'blank': 'True'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [],
{'symmetrical': 'False', 'to': "orm['sites.Site']",
'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['%s']" % user_orm_label, 'null': 'True', 'blank': 'True'})
},
'cms.page': {
'Meta': {'ordering': "('site', 'tree_id', 'lft')",
'object_name': 'Page'},
'changed_by': (
'django.db.models.fields.CharField', [], {'max_length': '70'}),
'changed_date': ('django.db.models.fields.DateTimeField', [],
{'auto_now': 'True', 'blank': 'True'}),
'created_by': (
'django.db.models.fields.CharField', [], {'max_length': '70'}),
'creation_date': ('django.db.models.fields.DateTimeField', [],
{'auto_now_add': 'True', 'blank': 'True'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_navigation': ('django.db.models.fields.BooleanField', [],
{'default': 'True', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [],
{'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [],
{'db_index': 'True'}),
'limit_visibility_in_menu': (
'django.db.models.fields.SmallIntegerField', [],
{'default': 'None', 'null': 'True', 'db_index': 'True',
'blank': 'True'}),
'login_required': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'moderator_state': ('django.db.models.fields.SmallIntegerField', [],
{'default': '1', 'blank': 'True'}),
'navigation_extenders': ('django.db.models.fields.CharField', [],
{'db_index': 'True', 'max_length': '80',
'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [],
{'blank': 'True', 'related_name': "'children'",
'null': 'True', 'to': "orm['cms.Page']"}),
'placeholders': ('django.db.models.fields.related.ManyToManyField', [],
{'to': "orm['cms.Placeholder']",
'symmetrical': 'False'}),
'publication_date': ('django.db.models.fields.DateTimeField', [],
{'db_index': 'True', 'null': 'True',
'blank': 'True'}),
'publication_end_date': ('django.db.models.fields.DateTimeField', [],
{'db_index': 'True', 'null': 'True',
'blank': 'True'}),
'published': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'publisher_is_draft': ('django.db.models.fields.BooleanField', [],
{'default': 'True', 'db_index': 'True'}),
'publisher_public': (
'django.db.models.fields.related.OneToOneField', [],
{'related_name': "'publisher_draft'", 'unique': 'True', 'null': 'True',
'to': "orm['cms.Page']"}),
'publisher_state': ('django.db.models.fields.SmallIntegerField', [],
{'default': '0', 'db_index': 'True'}),
'reverse_id': ('django.db.models.fields.CharField', [],
{'db_index': 'True', 'max_length': '40', 'null': 'True',
'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [],
{'db_index': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['sites.Site']"}),
'soft_root': ('django.db.models.fields.BooleanField', [],
{'default': 'False', 'db_index': 'True'}),
'template': (
'django.db.models.fields.CharField', [], {'max_length': '100'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [],
{'db_index': 'True'})
},
'cms.pagemoderator': {
'Meta': {'object_name': 'PageModerator'},
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'moderate_children': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'moderate_descendants': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'moderate_page': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'page': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['cms.Page']"}),
'user': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['%s']" % user_orm_label})
},
'cms.pagemoderatorstate': {
'Meta': {'ordering': "('page', 'action', '-created')",
'object_name': 'PageModeratorState'},
'action': ('django.db.models.fields.CharField', [],
{'max_length': '3', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [],
{'auto_now_add': 'True', 'blank': 'True'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [],
{'default': "''", 'max_length': '1000', 'blank': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['cms.Page']"}),
'user': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['%s']" % user_orm_label, 'null': 'True'})
},
'cms.pagepermission': {
'Meta': {'object_name': 'PagePermission'},
'can_add': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change_advanced_settings': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_change_permissions': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_delete': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_moderate': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_move_page': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_publish': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_view': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'grant_on': (
'django.db.models.fields.IntegerField', [], {'default': '5'}),
'group': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['auth.Group']", 'null': 'True', 'blank': 'True'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['cms.Page']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['%s']" % user_orm_label, 'null': 'True', 'blank': 'True'})
},
'cms.pageuser': {
'Meta': {'object_name': 'PageUser', '_ormbases': [user_orm_label]},
'created_by': ('django.db.models.fields.related.ForeignKey', [],
{'related_name': "'created_users'",
'to': "orm['%s']" % user_orm_label}),
'user_ptr': ('django.db.models.fields.related.OneToOneField', [],
{'to': "orm['%s']" % user_orm_label, 'unique': 'True',
'primary_key': 'True'})
},
'cms.pageusergroup': {
'Meta': {'object_name': 'PageUserGroup', '_ormbases': ['auth.Group']},
'created_by': ('django.db.models.fields.related.ForeignKey', [],
{'related_name': "'created_usergroups'",
'to': "orm['%s']" % user_orm_label}),
'group_ptr': ('django.db.models.fields.related.OneToOneField', [],
{'to': "orm['auth.Group']", 'unique': 'True',
'primary_key': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': (
'django.db.models.fields.PositiveSmallIntegerField', [],
{'null': 'True'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [],
{'max_length': '50', 'db_index': 'True'})
},
'cms.title': {
'Meta': {'unique_together': "(('language', 'page'),)",
'object_name': 'Title'},
'application_urls': ('django.db.models.fields.CharField', [],
{'db_index': 'True', 'max_length': '200',
'null': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [],
{'default': 'datetime.datetime.now'}),
'has_url_overwrite': ('django.db.models.fields.BooleanField', [],
{'default': 'False', 'db_index': 'True'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [],
{'max_length': '15', 'db_index': 'True'}),
'menu_title': ('django.db.models.fields.CharField', [],
{'max_length': '255', 'null': 'True', 'blank': 'True'}),
'meta_description': ('django.db.models.fields.TextField', [],
{'max_length': '255', 'null': 'True',
'blank': 'True'}),
'meta_keywords': ('django.db.models.fields.CharField', [],
{'max_length': '255', 'null': 'True',
'blank': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [],
{'related_name': "'title_set'", 'to': "orm['cms.Page']"}),
'page_title': ('django.db.models.fields.CharField', [],
{'max_length': '255', 'null': 'True', 'blank': 'True'}),
'path': ('django.db.models.fields.CharField', [],
{'max_length': '255', 'db_index': 'True'}),
'redirect': ('django.db.models.fields.CharField', [],
{'max_length': '255', 'null': 'True', 'blank': 'True'}),
'slug': (
'django.db.models.fields.SlugField', [], {'max_length': '255'}),
'title': (
'django.db.models.fields.CharField', [], {'max_length': '255'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)",
'unique_together': "(('app_label', 'model'),)",
'object_name': 'ContentType',
'db_table': "'django_content_type'"},
'app_label': (
'django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': (
'django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site',
'db_table': "'django_site'"},
'domain': (
'django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['cms']
| bsd-3-clause |
wevial/bpython | bpython/test/test_line_properties.py | 2 | 10588 | import re
from bpython.test import unittest
from bpython.line import current_word, current_dict_key, current_dict, \
current_string, current_object, current_object_attribute, \
current_from_import_from, current_from_import_import, current_import, \
current_method_definition_name, current_single_word, \
current_string_literal_attr
def cursor(s):
"""'ab|c' -> (2, 'abc')"""
cursor_offset = s.index('|')
line = s[:cursor_offset] + s[cursor_offset+1:]
return cursor_offset, line
def decode(s):
"""'a<bd|c>d' -> ((3, 'abcd'), (1, 3, 'bdc'))"""
if not s.count('|') == 1:
raise ValueError('match helper needs | to occur once')
if s.count('<') != s.count('>') or not s.count('<') in (0, 1):
raise ValueError('match helper needs <, and > to occur just once')
matches = list(re.finditer(r'[<>|]', s))
assert len(matches) in [1, 3], [m.group() for m in matches]
d = {}
for i, m in enumerate(matches):
d[m.group(0)] = m.start() - i
s = s[:m.start() - i] + s[m.end() - i:]
assert len(d) in [1, 3], 'need all the parts just once! %r' % d
if '<' in d:
return (d['|'], s), (d['<'], d['>'], s[d['<']:d['>']])
else:
return (d['|'], s), None
def line_with_cursor(cursor_offset, line):
return line[:cursor_offset] + '|' + line[cursor_offset:]
def encode(cursor_offset, line, result):
"""encode(3, 'abdcd', (1, 3, 'bdc')) -> a<bd|c>d'
Written for prettier assert error messages
"""
encoded_line = line_with_cursor(cursor_offset, line)
if result is None:
return encoded_line
start, end, value = result
assert line[start:end] == value
if start < cursor_offset:
encoded_line = encoded_line[:start] + '<' + encoded_line[start:]
else:
encoded_line = encoded_line[:start+1] + '<' + encoded_line[start+1:]
if end < cursor_offset:
encoded_line = encoded_line[:end+1] + '>' + encoded_line[end+1:]
else:
encoded_line = encoded_line[:end+2] + '>' + encoded_line[end+2:]
return encoded_line
class LineTestCase(unittest.TestCase):
def assertAccess(self, s):
r"""Asserts that self.func matches as described
by s, which uses a little language to describe matches:
abcd<efg>hijklmnopqrstuvwx|yz
/|\ /|\ /|\
| | |
the function should the current cursor position
match this "efg" is between the x and y
"""
(cursor_offset, line), match = decode(s)
result = self.func(cursor_offset, line)
self.assertEqual(
result, match,
"%s(%r) result\n%r (%r) doesn't match expected\n%r (%r)" % (
self.func.__name__, line_with_cursor(cursor_offset, line),
encode(cursor_offset, line, result), result, s, match))
class TestHelpers(LineTestCase):
def test_I(self):
self.assertEqual(cursor('asd|fgh'), (3, 'asdfgh'))
def test_decode(self):
self.assertEqual(decode('a<bd|c>d'), ((3, 'abdcd'), (1, 4, 'bdc')))
self.assertEqual(decode('a|<bdc>d'), ((1, 'abdcd'), (1, 4, 'bdc')))
self.assertEqual(decode('a<bdc>d|'), ((5, 'abdcd'), (1, 4, 'bdc')))
def test_encode(self):
self.assertEqual(encode(3, 'abdcd', (1, 4, 'bdc')), 'a<bd|c>d')
self.assertEqual(encode(1, 'abdcd', (1, 4, 'bdc')), 'a|<bdc>d')
self.assertEqual(encode(4, 'abdcd', (1, 4, 'bdc')), 'a<bdc|>d')
self.assertEqual(encode(5, 'abdcd', (1, 4, 'bdc')), 'a<bdc>d|')
def test_assert_access(self):
def dumb_func(cursor_offset, line):
return (0, 2, 'ab')
self.func = dumb_func
self.assertAccess('<a|b>d')
class TestCurrentWord(LineTestCase):
def setUp(self):
self.func = current_word
def test_simple(self):
self.assertAccess('|')
self.assertAccess('|asdf')
self.assertAccess('<a|sdf>')
self.assertAccess('<asdf|>')
self.assertAccess('<asdfg|>')
self.assertAccess('asdf + <asdfg|>')
self.assertAccess('<asdfg|> + asdf')
def test_inside(self):
self.assertAccess('<asd|>')
self.assertAccess('<asd|fg>')
def test_dots(self):
self.assertAccess('<Object.attr1|>')
self.assertAccess('<Object.attr1.attr2|>')
self.assertAccess('<Object.att|r1.attr2>')
self.assertAccess('stuff[stuff] + {123: 456} + <Object.attr1.attr2|>')
self.assertAccess('stuff[<asd|fg>]')
self.assertAccess('stuff[asdf[<asd|fg>]')
def test_open_paren(self):
self.assertAccess('<foo(|>')
# documenting current behavior - TODO is this intended?
class TestCurrentDictKey(LineTestCase):
def setUp(self):
self.func = current_dict_key
def test_simple(self):
self.assertAccess('asdf|')
self.assertAccess('asdf|')
self.assertAccess('asdf[<>|')
self.assertAccess('asdf[<>|]')
self.assertAccess('object.dict[<abc|>')
self.assertAccess('asdf|')
self.assertAccess('asdf[<(>|]')
self.assertAccess('asdf[<(1>|]')
self.assertAccess('asdf[<(1,>|]')
self.assertAccess('asdf[<(1, >|]')
self.assertAccess('asdf[<(1, 2)>|]')
# TODO self.assertAccess('d[d[<12|>')
self.assertAccess("d[<'a>|")
class TestCurrentDict(LineTestCase):
def setUp(self):
self.func = current_dict
def test_simple(self):
self.assertAccess('asdf|')
self.assertAccess('asdf|')
self.assertAccess('<asdf>[|')
self.assertAccess('<asdf>[|]')
self.assertAccess('<object.dict>[abc|')
self.assertAccess('asdf|')
class TestCurrentString(LineTestCase):
def setUp(self):
self.func = current_string
def test_closed(self):
self.assertAccess('"<as|df>"')
self.assertAccess('"<asdf|>"')
self.assertAccess('"<|asdf>"')
self.assertAccess("'<asdf|>'")
self.assertAccess("'<|asdf>'")
self.assertAccess("'''<asdf|>'''")
self.assertAccess('"""<asdf|>"""')
self.assertAccess('asdf.afd("a") + "<asdf|>"')
def test_open(self):
self.assertAccess('"<as|df>')
self.assertAccess('"<asdf|>')
self.assertAccess('"<|asdf>')
self.assertAccess("'<asdf|>")
self.assertAccess("'<|asdf>")
self.assertAccess("'''<asdf|>")
self.assertAccess('"""<asdf|>')
self.assertAccess('asdf.afd("a") + "<asdf|>')
class TestCurrentObject(LineTestCase):
def setUp(self):
self.func = current_object
def test_simple(self):
self.assertAccess('<Object>.attr1|')
self.assertAccess('<Object>.|')
self.assertAccess('Object|')
self.assertAccess('Object|.')
self.assertAccess('<Object>.|')
self.assertAccess('<Object.attr1>.attr2|')
self.assertAccess('<Object>.att|r1.attr2')
self.assertAccess('stuff[stuff] + {123: 456} + <Object.attr1>.attr2|')
self.assertAccess('stuff[asd|fg]')
self.assertAccess('stuff[asdf[asd|fg]')
class TestCurrentAttribute(LineTestCase):
def setUp(self):
self.func = current_object_attribute
def test_simple(self):
self.assertAccess('Object.<attr1|>')
self.assertAccess('Object.attr1.<attr2|>')
self.assertAccess('Object.<att|r1>.attr2')
self.assertAccess('stuff[stuff] + {123: 456} + Object.attr1.<attr2|>')
self.assertAccess('stuff[asd|fg]')
self.assertAccess('stuff[asdf[asd|fg]')
self.assertAccess('Object.attr1.<|attr2>')
self.assertAccess('Object.<attr1|>.attr2')
class TestCurrentFromImportFrom(LineTestCase):
def setUp(self):
self.func = current_from_import_from
def test_simple(self):
self.assertAccess('from <sys|> import path')
self.assertAccess('from <sys> import path|')
self.assertAccess('if True|: from sys import path')
self.assertAccess('if True: |from sys import path')
self.assertAccess('if True: from <sys> import p|ath')
self.assertAccess('if True: from sys imp|ort path')
self.assertAccess('if True: from sys import |path')
self.assertAccess('if True: from sys import path.stu|ff')
self.assertAccess('if True: from <sys.path> import sep|')
self.assertAccess('from <os.p|>')
class TestCurrentFromImportImport(LineTestCase):
def setUp(self):
self.func = current_from_import_import
def test_simple(self):
self.assertAccess('from sys import <path|>')
self.assertAccess('from sys import <p|ath>')
self.assertAccess('from sys import |path')
self.assertAccess('from sys| import path')
self.assertAccess('from s|ys import path')
self.assertAccess('from |sys import path')
self.assertAccess('from xml.dom import <N|ode>')
# because syntax error
self.assertAccess('from xml.dom import Node.as|d')
class TestCurrentImport(LineTestCase):
def setUp(self):
self.func = current_import
def test_simple(self):
self.assertAccess('import <path|>')
self.assertAccess('import <p|ath>')
self.assertAccess('import |path')
self.assertAccess('import path, <another|>')
self.assertAccess('import path another|')
self.assertAccess('if True: import <path|>')
self.assertAccess('if True: import <xml.dom.minidom|>')
self.assertAccess('if True: import <xml.do|m.minidom>')
self.assertAccess('if True: import <xml.do|m.minidom> as something')
class TestMethodDefinitionName(LineTestCase):
def setUp(self):
self.func = current_method_definition_name
def test_simple(self):
self.assertAccess('def <foo|>')
self.assertAccess(' def bar(x, y)|:')
self.assertAccess(' def <bar|>(x, y)')
class TestSingleWord(LineTestCase):
def setUp(self):
self.func = current_single_word
def test_simple(self):
self.assertAccess('foo.bar|')
self.assertAccess('.foo|')
self.assertAccess(' <foo|>')
class TestCurrentStringLiteral(LineTestCase):
def setUp(self):
self.func = current_string_literal_attr
def test_simple(self):
self.assertAccess('"hey".<a|>')
self.assertAccess('"hey"|')
self.assertAccess('"hey"|.a')
self.assertAccess('"hey".<a|b>')
self.assertAccess('"hey".asdf d|')
self.assertAccess('"hey".<|>')
if __name__ == '__main__':
unittest.main()
| mit |
waytai/p2pool | wstools/MIMEAttachment.py | 294 | 3379 | #TODO add the license
#I had to rewrite this class because the python MIME email.mime (version 2.5)
#are buggy, they use \n instead \r\n for new line which is not compliant
#to standard!
# http://bugs.python.org/issue5525
#TODO do not load all the message in memory stream it from the disk
import re
import random
import sys
#new line
NL='\r\n'
_width = len(repr(sys.maxint-1))
_fmt = '%%0%dd' % _width
class MIMEMessage:
def __init__(self):
self._files = []
self._xmlMessage = ""
self._startCID = ""
self._boundary = ""
def makeBoundary(self):
#create the boundary
msgparts = []
msgparts.append(self._xmlMessage)
for i in self._files:
msgparts.append(i.read())
#this sucks, all in memory
alltext = NL.join(msgparts)
self._boundary = _make_boundary(alltext)
#maybe I can save some memory
del alltext
del msgparts
self._startCID = "<" + (_fmt % random.randrange(sys.maxint)) + (_fmt % random.randrange(sys.maxint)) + ">"
def toString(self):
'''it return a string with the MIME message'''
if len(self._boundary) == 0:
#the makeBoundary hasn't been called yet
self.makeBoundary()
#ok we have everything let's start to spit the message out
#first the XML
returnstr = NL + "--" + self._boundary + NL
returnstr += "Content-Type: text/xml; charset=\"us-ascii\"" + NL
returnstr += "Content-Transfer-Encoding: 7bit" + NL
returnstr += "Content-Id: " + self._startCID + NL + NL
returnstr += self._xmlMessage + NL
#then the files
for file in self._files:
returnstr += "--" + self._boundary + NL
returnstr += "Content-Type: application/octet-stream" + NL
returnstr += "Content-Transfer-Encoding: binary" + NL
returnstr += "Content-Id: <" + str(id(file)) + ">" + NL + NL
file.seek(0)
returnstr += file.read() + NL
#closing boundary
returnstr += "--" + self._boundary + "--" + NL
return returnstr
def attachFile(self, file):
'''
it adds a file to this attachment
'''
self._files.append(file)
def addXMLMessage(self, xmlMessage):
'''
it adds the XML message. we can have only one XML SOAP message
'''
self._xmlMessage = xmlMessage
def getBoundary(self):
'''
this function returns the string used in the mime message as a
boundary. First the write method as to be called
'''
return self._boundary
def getStartCID(self):
'''
This function returns the CID of the XML message
'''
return self._startCID
def _make_boundary(text=None):
#some code taken from python stdlib
# Craft a random boundary. If text is given, ensure that the chosen
# boundary doesn't appear in the text.
token = random.randrange(sys.maxint)
boundary = ('=' * 10) + (_fmt % token) + '=='
if text is None:
return boundary
b = boundary
counter = 0
while True:
cre = re.compile('^--' + re.escape(b) + '(--)?$', re.MULTILINE)
if not cre.search(text):
break
b = boundary + '.' + str(counter)
counter += 1
return b
| gpl-3.0 |
coleifer/irc | bots/markov.py | 3 | 5130 | #!/usr/bin/python
import os
import pickle
import random
import re
import sys
from irc import IRCBot, IRCConnection
class MarkovBot(IRCBot):
"""
Hacking on a markov chain bot - based on:
http://code.activestate.com/recipes/194364-the-markov-chain-algorithm/
http://github.com/ericflo/yourmomdotcom
"""
messages_to_generate = 5
chattiness = .01
max_words = 15
chain_length = 2
stop_word = '\n'
filename = 'markov.db'
last = None
def __init__(self, *args, **kwargs):
super(MarkovBot, self).__init__(*args, **kwargs)
self.load_data()
def load_data(self):
if os.path.exists(self.filename):
fh = open(self.filename, 'rb')
self.word_table = pickle.loads(fh.read())
fh.close()
else:
self.word_table = {}
def save_data(self):
fh = open(self.filename, 'w')
fh.write(pickle.dumps(self.word_table))
fh.close()
def split_message(self, message):
words = message.split()
if len(words) > self.chain_length:
words.extend([self.stop_word] * self.chain_length)
for i in range(len(words) - self.chain_length):
yield (words[i:i + self.chain_length + 1])
def generate_message(self, person, size=15, seed_key=None):
person_words = len(self.word_table.get(person, {}))
if person_words < size:
return
if not seed_key:
seed_key = random.choice(self.word_table[person].keys())
message = []
for i in xrange(self.messages_to_generate):
words = seed_key
gen_words = []
for i in xrange(size):
if words[0] == self.stop_word:
break
gen_words.append(words[0])
try:
words = words[1:] + (random.choice(self.word_table[person][words]),)
except KeyError:
break
if len(gen_words) > len(message):
message = list(gen_words)
return ' '.join(message)
def imitate(self, sender, message, channel):
person = message.replace('imitate ', '').strip()[:10]
if person != self.conn.nick:
return self.generate_message(person)
def cite(self, sender, message, channel):
if self.last:
return self.last
def sanitize_message(self, message):
"""Convert to lower-case and strip out all quotation marks"""
return re.sub('[\"\']', '', message.lower())
def log(self, sender, message, channel):
sender = sender[:10]
self.word_table.setdefault(sender, {})
if message.startswith('/'):
return
try:
say_something = self.is_ping(message) or sender != self.conn.nick and random.random() < self.chattiness
except AttributeError:
say_something = False
messages = []
seed_key = None
if self.is_ping(message):
message = self.fix_ping(message)
for words in self.split_message(self.sanitize_message(message)):
key = tuple(words[:-1])
if key in self.word_table:
self.word_table[sender][key].append(words[-1])
else:
self.word_table[sender][key] = [words[-1]]
if self.stop_word not in key and say_something:
for person in self.word_table:
if person == sender:
continue
if key in self.word_table[person]:
generated = self.generate_message(person, seed_key=key)
if generated:
messages.append((person, generated))
if len(messages):
self.last, message = random.choice(messages)
return message
def load_log_file(self, filename):
fh = open(filename, 'r')
logline_re = re.compile('<\s*(\w+)>[^\]]+\]\s([^\r\n]+)[\r\n]')
for line in fh.readlines():
match = logline_re.search(line)
if match:
sender, message = match.groups()
self.log(sender, message, '', False, None)
def load_text_file(self, filename, sender):
fh = open(filename, 'r')
for line in fh.readlines():
self.log(sender, line, '', False, None)
def command_patterns(self):
return (
self.ping('^imitate \S+', self.imitate),
self.ping('^cite', self.cite),
('.*', self.log),
)
host = 'irc.freenode.net'
port = 6667
nick = 'whatyousay'
conn = IRCConnection(host, port, nick)
markov_bot = MarkovBot(conn)
if len(sys.argv) > 1 and sys.argv[1] == '-log':
if len(sys.argv) == 3:
markov_bot.load_log_file(sys.argv[2])
elif len(sys.argv):
markov_bot.load_text_file(sys.argv[2], sys.argv[3])
else:
conn.connect()
conn.join('#botwars')
try:
conn.enter_event_loop()
except:
pass
markov_bot.save_data()
| mit |
simonolander/euler | euler-126.py | 1 | 1057 | from itertools import count
def layer(x, y, z, n):
return 2*(x*y + y*z + x*z) + 4*(x + y + z + n - 2) * (n - 1)
print(layer(3, 2, 1, 1)) # 22
print(layer(3, 2, 1, 2)) # 46
print(layer(3, 2, 1, 3)) # 78
print(layer(3, 2, 1, 4)) # 118
print(layer(5, 1, 1, 1)) # 22
limit = 30000
memo = {}
for x in count(1):
if layer(x, x, x, 1) > limit:
break
for y in count(x):
if layer(x, y, y, 1) > limit:
break
for z in count(y):
if layer(x, y, z, 1) > limit:
break
for n in count(1):
l = layer(x, y, z, n)
if l > limit:
break
if l not in memo:
memo[l] = [(x, y, z, n)]
else:
memo[l].append((x, y, z, n))
search = 1000
smallest = None
lst = None
for layer_size, count in memo.items():
if len(count) == search:
if smallest is None or layer_size < smallest:
smallest = layer_size
lst = count
print(smallest, lst)
| mit |
licode/scikit-xray | skbeam/core/fitting/models.py | 4 | 5983 | # ######################################################################
# Copyright (c) 2014, Brookhaven Science Associates, Brookhaven #
# National Laboratory. All rights reserved. #
# #
# @author: Li Li ([email protected]) #
# created on 09/10/2014 #
# #
# Original code: #
# @author: Mirna Lerotic, 2nd Look Consulting #
# http://www.2ndlookconsulting.com/ #
# Copyright (c) 2013, Stefan Vogt, Argonne National Laboratory #
# All rights reserved. #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided that the following conditions #
# are met: #
# #
# * Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# #
# * Redistributions in binary form must reproduce the above copyright #
# notice this list of conditions and the following disclaimer in #
# the documentation and/or other materials provided with the #
# distribution. #
# #
# * Neither the name of the Brookhaven Science Associates, Brookhaven #
# National Laboratory nor the names of its contributors may be used #
# to endorse or promote products derived from this software without #
# specific prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS #
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS #
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE #
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, #
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES #
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR #
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) #
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, #
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OTHERWISE) ARISING #
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #
# POSSIBILITY OF SUCH DAMAGE. #
########################################################################
from __future__ import absolute_import, division, print_function
import inspect
import logging
from lmfit import Model
from .lineshapes import (elastic, compton, lorentzian2)
from .base.parameter_data import get_para
logger = logging.getLogger(__name__)
def set_default(model_name, func_name):
"""
Set values and bounds to Model parameters in lmfit.
Parameters
----------
model_name : class object
Model class object from lmfit
func_name : function
function name of physics peak
"""
paras = inspect.getargspec(func_name)
# the first argument is independent variable, also ignored
# default values are not considered for fitting in this function
my_args = paras.args[1:]
para_dict = get_para()
for name in my_args:
if name not in para_dict.keys():
continue
my_dict = para_dict[name]
if my_dict['bound_type'] == 'none':
model_name.set_param_hint(name, vary=True)
elif my_dict['bound_type'] == 'fixed':
model_name.set_param_hint(name, vary=False, value=my_dict['value'])
elif my_dict['bound_type'] == 'lo':
model_name.set_param_hint(name, value=my_dict['value'], vary=True,
min=my_dict['min'])
elif my_dict['bound_type'] == 'hi':
model_name.set_param_hint(name, value=my_dict['value'], vary=True,
max=my_dict['max'])
elif my_dict['bound_type'] == 'lohi':
model_name.set_param_hint(name, value=my_dict['value'], vary=True,
min=my_dict['min'], max=my_dict['max'])
else:
raise TypeError("Boundary type {0} can't be "
"used".format(my_dict['bound_type']))
def _gen_class_docs(func):
"""
Parameters
----------
func : function
function of peak profile
Returns
-------
str :
documentation of the function
"""
return ("Wrap the {} function for fitting within lmfit "
"framework\n".format(func.__name__) + func.__doc__)
# DEFINE NEW MODELS
class ElasticModel(Model):
__doc__ = _gen_class_docs(elastic)
def __init__(self, *args, **kwargs):
super(ElasticModel, self).__init__(elastic, *args, **kwargs)
self.set_param_hint('epsilon', value=2.96, vary=False)
class ComptonModel(Model):
__doc__ = _gen_class_docs(compton)
def __init__(self, *args, **kwargs):
super(ComptonModel, self).__init__(compton, *args, **kwargs)
self.set_param_hint('epsilon', value=2.96, vary=False)
class Lorentzian2Model(Model):
__doc__ = _gen_class_docs(lorentzian2)
def __init__(self, *args, **kwargs):
super(Lorentzian2Model, self).__init__(lorentzian2, *args, **kwargs)
| bsd-3-clause |
DanielleQuinn/studyGroup | scripts/updateCalendar.py | 27 | 6851 | #######################################################################
# date: 2015-07-28
# author: Thea Van Rossum [email protected]
# functionality:
# 1. Creates a Google Calendar API service object
# 2. Deletes all events in the calendar in case changes have been
# made to existing events
# 3. Create events based on all the posts in
# "_posts" (POSTS_DIRECTORY)
# Commented out: 4. Print next 10 events
#
# Will not add an event if it is missing one of the REQUIRED_FIELDS
#
# To modify and use:
# 1. See google docs to get setup with credentials:
# https://developers.google.com/google-apps/calendar/quickstart/python
# 2. Update the variables indicated below (APPLICATION_NAME,
# CALENDAR_ID, TIME_ZONE_SRT
# 3. run from scripts/ using:
# python updateCalendar.py --noauth_local_webserver
########################################################################
import httplib2
import os
import glob
from apiclient import discovery
import oauth2client
from oauth2client import client
from oauth2client import tools
import datetime
import pytz
try:
import argparse
flags = argparse.ArgumentParser(parents=[tools.argparser]).parse_args()
except ImportError:
flags = None
# Modify these variables in step 2 above -------------------
# APPLICATION_NAME: app name you created in step one above:
APPLICATION_NAME = 'test'
# CALENDAR_ID: google account name you created for your calendar:
CALENDAR_ID = '[email protected]'
# TIME_ZONE_STR: check here:
# https://en.wikipedia.org/wiki/List_of_tz_database_time_zones
TIME_ZONE_STR = 'America/Vancouver'
# -----------------------------------------------------------
SCOPES = 'https://www.googleapis.com/auth/calendar'
CLIENT_SECRET_FILE = 'client_secret.json'
DEFAULT_START_TIME = "15:30" # will be overridden by startTime in _posts
DEFAULT_END_TIME = "16:30" # will be overridden by endTime in _posts
REQUIRED_FIELDS = ['title', 'location', 'text', 'link', 'date']
POSTS_DIRECTORY = "../_posts"
def main():
"""
1. Creates a Google Calendar API service object
2. Deletes all events in the calendar in case
changes have been made to existing events
3. Create events based on all the posts in "_posts" (POSTS_DIRECTORY)
Commented out: 4. Print next 10 events
Will not add an event if it is missing one of the REQUIRED_FIELDS
"""
credentials = get_credentials()
http = credentials.authorize(httplib2.Http())
service = discovery.build('calendar', 'v3', http=http)
# clear the calendar
service.calendars().clear(calendarId=CALENDAR_ID).execute()
# create events
for inputPath in glob.glob(os.path.join(POSTS_DIRECTORY, '*.markdown')):
eventDict = parseEventPost(inputPath)
events = getAllEvents(service)
if not isEventComplete(eventDict, inputPath):
print 'Event is incomplete'
else:
event = createEvent(eventDict)
event = service.events().insert(calendarId=CALENDAR_ID, body=event).execute()
print 'Event created: %s' % (event.get('summary'))
def printNextEvents(service, numEvents):
now = datetime.datetime.utcnow().isoformat() + 'Z' # 'Z' indicates UTC time
print 'Getting the upcoming %d events' % numEvents
eventsResult = service.events().list(
calendarId=CALENDAR_ID, timeMin=now, maxResults=numEvents, singleEvents=True,
orderBy='startTime').execute()
events = eventsResult.get('items', [])
if not events:
print 'No upcoming events found.'
for event in events:
start = event['start'].get('dateTime', event['start'].get('date'))
print start, event['summary']
def getAllEvents(service):
eventsResult = service.events().list(
calendarId=CALENDAR_ID, singleEvents=True, orderBy='startTime').execute()
events = eventsResult.get('items', [])
return events
def parseEventPost(inputPath):
eventDict = {}
eventDict['startTime'] = DEFAULT_START_TIME
eventDict['endTime'] = DEFAULT_END_TIME
f = open(inputPath, 'r')
for line in f:
listedline = line.strip().split(':', 1) # split around the : sign
if len(listedline) > 1: # we have the = sign in there
eventDict[listedline[0].strip()] = listedline[1].strip()
return eventDict
def isEventComplete(eventDict, sourcePath):
isComplete = 1
for field in REQUIRED_FIELDS:
if field not in eventDict:
print "Error: event missing %s (%s)" % field, sourcePath
isComplete -= 1
return isComplete
def makeDateTime(dateStr, hourMinStr):
# date like "2014-07-25"
# hourMinStr like "15:30"
timeStr = hourMinStr[1:-1]
date = dateStr.split('-')
TIME_ZONE_HR = ':00'+pytz.timezone(TIME_ZONE_STR).localize(datetime.datetime(int(date[0]), int(date[1]), int(date[2]))).strftime('%z')
TIME_ZONE_HR = TIME_ZONE_HR[:-2] + ':' + TIME_ZONE_HR[-2:]
return dateStr + "T" + timeStr + TIME_ZONE_HR
def createEvent(eventDict):
event = {
'summary': eventDict['title'],
'location': eventDict['location'],
'description': eventDict['text']+"\n"+eventDict['link'],
'start': {
'dateTime': makeDateTime(eventDict['date'], eventDict['startTime']),
'timeZone': TIME_ZONE_STR
},
'end': {
'dateTime': makeDateTime(eventDict['date'], eventDict['endTime']),
'timeZone': TIME_ZONE_STR
},
'reminders': {
'useDefault': False,
'overrides': [
{'method': 'email', 'minutes': 60 * 24 * 2}, # 2 days
],
},
}
return event
def get_credentials():
"""Gets valid user credentials from storage.
If nothing has been stored, or if the stored credentials are invalid,
the OAuth2 flow is completed to obtain the new credentials.
Returns:
Credentials, the obtained credential.
"""
home_dir = os.path.expanduser('~')
credential_dir = os.path.join(home_dir, '.credentials')
if not os.path.exists(credential_dir):
os.makedirs(credential_dir)
credential_path = os.path.join(credential_dir,
'google-sfuStudyGroupCalendar.json')
store = oauth2client.file.Storage(credential_path)
credentials = store.get()
if not credentials or credentials.invalid:
flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)
flow.user_agent = APPLICATION_NAME
if flags:
credentials = tools.run_flow(flow, store, flags)
else: # Needed only for compatability with Python 2.6
credentials = tools.run(flow, store)
print 'Storing credentials to ' + credential_path
return credentials
if __name__ == '__main__':
main()
| apache-2.0 |
Thraxis/SickRage | sickbeard/providers/torrentbytes.py | 1 | 7783 | # Author: Idan Gutman
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
import re
import urllib
import traceback
from sickbeard import logger
from sickbeard import tvcache
from sickbeard.bs4_parser import BS4Parser
from sickrage.providers.TorrentProvider import TorrentProvider
class TorrentBytesProvider(TorrentProvider):
def __init__(self):
TorrentProvider.__init__(self, "TorrentBytes")
self.username = None
self.password = None
self.ratio = None
self.minseed = None
self.minleech = None
self.freeleech = False
self.urls = {'base_url': 'https://www.torrentbytes.net',
'login': 'https://www.torrentbytes.net/takelogin.php',
'detail': 'https://www.torrentbytes.net/details.php?id=%s',
'search': 'https://www.torrentbytes.net/browse.php?search=%s%s',
'download': 'https://www.torrentbytes.net/download.php?id=%s&name=%s'}
self.url = self.urls['base_url']
self.categories = "&c41=1&c33=1&c38=1&c32=1&c37=1"
self.proper_strings = ['PROPER', 'REPACK']
self.cache = TorrentBytesCache(self)
def login(self):
login_params = {'username': self.username,
'password': self.password,
'login': 'Log in!'}
response = self.get_url(self.urls['login'], post_data=login_params, timeout=30)
if not response:
logger.log(u"Unable to connect to provider", logger.WARNING)
return False
if re.search('Username or password incorrect', response):
logger.log(u"Invalid username or password. Check your settings", logger.WARNING)
return False
return True
def search(self, search_params, age=0, ep_obj=None):
results = []
items = {'Season': [], 'Episode': [], 'RSS': []}
if not self.login():
return results
for mode in search_params.keys():
logger.log(u"Search Mode: %s" % mode, logger.DEBUG)
for search_string in search_params[mode]:
if mode != 'RSS':
logger.log(u"Search string: %s " % search_string, logger.DEBUG)
searchURL = self.urls['search'] % (urllib.quote(search_string.encode('utf-8')), self.categories)
logger.log(u"Search URL: %s" % searchURL, logger.DEBUG)
data = self.get_url(searchURL)
if not data:
continue
try:
with BS4Parser(data, 'html5lib') as html:
# Continue only if one Release is found
empty = html.find('Nothing found!')
if empty:
logger.log(u"Data returned from provider does not contain any torrents", logger.DEBUG)
continue
torrent_table = html.find('table', attrs={'border': '1'})
torrent_rows = torrent_table.find_all('tr') if torrent_table else []
for result in torrent_rows[1:]:
cells = result.find_all('td')
size = None
link = cells[1].find('a', attrs={'class': 'index'})
full_id = link['href'].replace('details.php?id=', '')
torrent_id = full_id.split("&")[0]
# Free leech torrents are marked with green [F L] in the title (i.e. <font color=green>[F L]</font>)
freeleechTag = cells[1].find('font', attrs={'color': 'green'})
if freeleechTag and freeleechTag.text == u'[F\xa0L]':
isFreeleechTorrent = True
else:
isFreeleechTorrent = False
if self.freeleech and not isFreeleechTorrent:
continue
try:
if link.has_key('title'):
title = cells[1].find('a', {'class': 'index'})['title']
else:
title = link.contents[0]
download_url = self.urls['download'] % (torrent_id, link.contents[0])
seeders = int(cells[8].find('span').contents[0])
leechers = int(cells[9].find('span').contents[0])
# Need size for failed downloads handling
if size is None:
if re.match(r'[0-9]+,?\.?[0-9]*[KkMmGg]+[Bb]+', cells[6].text):
size = self._convertSize(cells[6].text)
if not size:
size = -1
except (AttributeError, TypeError):
continue
if not all([title, download_url]):
continue
# Filter unseeded torrent
if seeders < self.minseed or leechers < self.minleech:
if mode != 'RSS':
logger.log(u"Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})".format(title, seeders, leechers), logger.DEBUG)
continue
item = title, download_url, size, seeders, leechers
if mode != 'RSS':
logger.log(u"Found result: %s " % title, logger.DEBUG)
items[mode].append(item)
except Exception, e:
logger.log(u"Failed parsing provider. Traceback: %s" % traceback.format_exc(), logger.ERROR)
# For each search mode sort all the items by seeders if available
items[mode].sort(key=lambda tup: tup[3], reverse=True)
results += items[mode]
return results
def seed_ratio(self):
return self.ratio
def _convertSize(self, sizeString):
size = sizeString[:-2]
modifier = sizeString[-2:]
size = float(size)
if modifier in 'KB':
size = size * 1024
elif modifier in 'MB':
size = size * 1024**2
elif modifier in 'GB':
size = size * 1024**3
elif modifier in 'TB':
size = size * 1024**4
return int(size)
class TorrentBytesCache(tvcache.TVCache):
def __init__(self, provider_obj):
tvcache.TVCache.__init__(self, provider_obj)
# only poll TorrentBytes every 20 minutes max
self.minTime = 20
def _getRSSData(self):
search_params = {'RSS': ['']}
return {'entries': self.provider.search(search_params)}
provider = TorrentBytesProvider()
| gpl-3.0 |
kokogaga/arducopter | mk/PX4/Tools/genmsg/test/test_genmsg_msg_loader.py | 215 | 29225 | # Software License Agreement (BSD License)
#
# Copyright (c) 2009, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import os
import sys
import random
def get_test_dir():
return os.path.abspath(os.path.join(os.path.dirname(__file__), 'files'))
def test_exceptions():
from genmsg import MsgNotFound
try:
raise MsgNotFound('hello')
except MsgNotFound:
pass
def test__convert_constant_value():
from genmsg.msg_loader import convert_constant_value
from genmsg import InvalidMsgSpec
assert 0. == convert_constant_value('float32', '0.0')
assert 0. == convert_constant_value('float64', '0.0')
assert 'fo o' == convert_constant_value('string', ' fo o ')
assert 1 == convert_constant_value('byte', '1')
assert 1 == convert_constant_value('char', '1')
assert 1 == convert_constant_value('int8', '1')
assert 12 == convert_constant_value('int16', '12')
assert -13 == convert_constant_value('int32', '-13')
assert 14 == convert_constant_value('int64', '14')
assert 0 == convert_constant_value('uint8', '0')
assert 18 == convert_constant_value('uint16', '18')
assert 19 == convert_constant_value('uint32', '19')
assert 20 == convert_constant_value('uint64', '20')
assert True == convert_constant_value('bool', '1')
assert False == convert_constant_value('bool', '0')
width_fail = [('int8', '129'), ('uint8', '256'),
('int16', '35536'), ('uint16', '-1'),('uint16', '65536'),
('int32', '3000000000'),('int32', '-2700000000'),
('uint32', '-1'),('uint32', '41000000000'),
('uint64', '-1')]
for t, v in width_fail:
try:
convert_constant_value(t, v)
assert False, "should have failed width check: %s, %s"%(t, v)
except InvalidMsgSpec:
pass
type_fail = [('int32', 'f'), ('float32', 'baz')]
for t, v in type_fail:
try:
convert_constant_value(t, v)
assert False, "should have failed type check: %s, %s"%(t, v)
except ValueError:
pass
try:
convert_constant_value('foo', '1')
assert False, "should have failed invalid type"
except InvalidMsgSpec:
pass
def test__load_constant_line():
from genmsg.msgs import Constant, InvalidMsgSpec
from genmsg.msg_loader import _load_constant_line
try:
_load_constant_line("int8 field=alpha")
assert False, "should have raised"
except InvalidMsgSpec:
pass
try:
_load_constant_line("int8 field=")
assert False, "should have raised"
except InvalidMsgSpec:
pass
try:
_load_constant_line("faketype field=1")
assert False, "should have raised"
except InvalidMsgSpec:
pass
c = _load_constant_line("int8 field=1")
assert c == Constant('int8', 'field', 1, '1')
c = _load_constant_line("string val=hello #world")
assert c == Constant('string', 'val', 'hello #world', 'hello #world')
def test__load_field_line():
from genmsg.msgs import InvalidMsgSpec, Field
from genmsg.msg_loader import _load_field_line, InvalidMsgSpec, Field, is_valid_msg_field_name
try:
_load_field_line("string", 'foo')
assert False, "should have raised"
except InvalidMsgSpec:
pass
assert not is_valid_msg_field_name('string[')
try:
_load_field_line("string data!", 'foo')
assert False, "should have raised"
except InvalidMsgSpec:
pass
try:
_load_field_line("string[ data", 'foo')
assert False, "should have raised"
except InvalidMsgSpec:
pass
f =_load_field_line("string str", 'foo')
assert f == ('string', 'str')
f =_load_field_line("string str #nonsense", 'foo')
assert f == ('string', 'str')
f =_load_field_line("String str #nonsense", '')
assert f == ('String', 'str')
f =_load_field_line("String str #nonsense", 'foo')
assert f == ('foo/String', 'str')
# make sure Header is mapped
f =_load_field_line("Header header #nonsense", 'somewhere')
assert f == ('std_msgs/Header', 'header'), f
f =_load_field_line("Header header #nonsense", '')
assert f == ('std_msgs/Header', 'header'), f
def test_load_msg_from_string():
# make sure Header -> std_msgs/Header conversion works
from genmsg.msgs import Constant
from genmsg.msg_loader import load_msg_from_string, MsgContext
context = MsgContext.create_default()
msgspec = load_msg_from_string(context, "Header header", 'test_pkg/HeaderTest')
print(msgspec)
assert msgspec.has_header()
assert msgspec.types == ['std_msgs/Header']
assert msgspec.names == ['header']
assert msgspec.constants == []
assert msgspec.short_name == 'HeaderTest'
assert msgspec.package == 'test_pkg'
assert msgspec.full_name == 'test_pkg/HeaderTest'
msgspec = load_msg_from_string(context, "int8 c=1\nHeader header\nint64 data", 'test_pkg/HeaderValsTest')
assert msgspec.has_header()
assert msgspec.types == ['std_msgs/Header', 'int64']
assert msgspec.names == ['header', 'data']
assert msgspec.constants == [Constant('int8', 'c', 1, '1')]
assert msgspec.short_name == 'HeaderValsTest'
assert msgspec.package == 'test_pkg'
assert msgspec.full_name == 'test_pkg/HeaderValsTest'
msgspec = load_msg_from_string(context, "string data\nint64 data2", 'test_pkg/ValsTest')
assert not msgspec.has_header()
assert msgspec.types == ['string', 'int64']
assert msgspec.names == ['data', 'data2']
assert msgspec.constants == []
assert msgspec.short_name == 'ValsTest'
assert msgspec.full_name == 'test_pkg/ValsTest'
def _validate_TestString(msgspec):
assert ['caller_id', 'orig_caller_id', 'data'] == msgspec.names, msgspec.names
assert ['string', 'string', 'string'] == msgspec.types, msgspec.types
def test_load_msg_from_file():
from genmsg.msgs import InvalidMsgSpec
from genmsg.msg_loader import load_msg_from_file, MsgContext
test_d = get_test_dir()
test_ros_dir = os.path.join(test_d, 'test_ros', 'msg')
test_string_path = os.path.join(test_ros_dir, 'TestString.msg')
msg_context = MsgContext.create_default()
spec = load_msg_from_file(msg_context, test_string_path, 'test_ros/TestString')
assert spec.full_name == 'test_ros/TestString'
assert spec.package == 'test_ros'
assert spec.short_name == 'TestString'
_validate_TestString(spec)
# test repeat
spec_2 = load_msg_from_file(msg_context, test_string_path, 'test_ros/TestString')
assert spec == spec_2
assert spec.package == spec_2.package
assert spec.short_name == spec_2.short_name
# test w/ bad file
test_bad_path = os.path.join(test_ros_dir, 'Bad.msg')
try:
load_msg_from_file(msg_context, test_bad_path, 'test_ros/Bad')
assert False, "should have raised"
except InvalidMsgSpec:
pass
# supposed to register
assert msg_context.is_registered('test_ros/TestString'), msg_context
def test_load_msg_from_string_TestString():
from genmsg.msg_loader import load_msg_from_string, MsgContext
test_d = get_test_dir()
test_ros_dir = os.path.join(test_d, 'test_ros', 'msg')
test_string_path = os.path.join(test_ros_dir, 'TestString.msg')
with open(test_string_path) as f:
text = f.read()
msg_context = MsgContext.create_default()
_validate_TestString(load_msg_from_string(msg_context, text, 'test_ros/TestString'))
# supposed to register
assert msg_context.is_registered('test_ros/TestString'), msg_context
def test_load_msg_by_type():
from genmsg.msg_loader import load_msg_by_type, MsgContext, MsgNotFound
test_d = get_test_dir()
geometry_d = os.path.join(test_d, 'geometry_msgs', 'msg')
test_ros_dir = os.path.join(test_d, 'test_ros', 'msg')
test_string_path = os.path.join(test_ros_dir, 'TestString.msg')
search_path = {
'test_ros': [ test_ros_dir ],
'geometry_msgs': [ geometry_d ],
}
msg_context = MsgContext.create_default()
msgspec = load_msg_by_type(msg_context, 'test_ros/TestString', search_path)
_validate_TestString(msgspec)
# supposed to register
assert msg_context.is_registered('test_ros/TestString'), msg_context
# test invalid search path
try:
load_msg_by_type(msg_context, 'test_ros/TestString', [test_string_path])
assert False, "should have raised"
except ValueError:
pass
# test not found
try:
load_msg_by_type(msg_context, 'test_ros/Fake', search_path)
assert False, "should have raised"
except MsgNotFound:
pass
# test all the known geometry msgs
test_d = get_test_dir()
for f in os.listdir(geometry_d):
if f.endswith('.msg'):
short = f[:-4]
msg_type = 'geometry_msgs/%s'%short
spec = load_msg_by_type(msg_context, msg_type, search_path)
assert spec is not None
assert spec.package == 'geometry_msgs'
assert spec.full_name == msg_type
assert spec.short_name == short
with open(os.path.join(geometry_d, f)) as file_h:
assert spec.text == file_h.read()
# all types with 'Stamped' in name have headers
if 'Stamped' in f:
assert spec.has_header(), msg_type
def test_get_msg_file():
from genmsg import MsgNotFound
from genmsg.msg_loader import get_msg_file
test_d = get_test_dir()
test_ros_dir = os.path.join(test_d, 'test_ros', 'msg')
test_string_path = os.path.join(test_ros_dir, 'TestString.msg')
search_path = {
'test_ros': [ test_ros_dir ],
}
assert test_string_path == get_msg_file('test_ros', 'TestString', search_path)
try:
get_msg_file('test_ros', 'DNE', search_path)
assert False, "should have raised"
except MsgNotFound:
pass
try:
get_msg_file('bad_pkg', 'TestString', search_path)
assert False, "should have raised"
except MsgNotFound:
pass
# test with invalid search path
try:
get_msg_file('test_ros', 'TestString', [test_string_path])
assert False, "should have raised"
except ValueError:
pass
def test_get_srv_file():
from genmsg import MsgNotFound
from genmsg.msg_loader import get_srv_file
test_d = get_test_dir()
test_ros_dir = os.path.join(test_d, 'test_ros', 'srv')
std_srvs_dir = os.path.join(test_d, 'std_srvs', 'srv')
empty_path = os.path.join(std_srvs_dir, 'Empty.srv')
search_path = {
'test_ros': [ test_ros_dir ],
'std_srvs': [ std_srvs_dir ],
}
assert empty_path == get_srv_file('std_srvs', 'Empty', search_path)
try:
get_srv_file('test_ros', 'DNE', search_path)
assert False, "should have raised"
except MsgNotFound:
pass
try:
get_srv_file('bad_pkg', 'TestString', search_path)
assert False, "should have raised"
except MsgNotFound:
pass
# test with invalid search path
try:
get_srv_file('std_srvs', 'Empty', [std_srvs_dir])
assert False, "should have raised"
except ValueError:
pass
def test_MsgContext():
from genmsg.msg_loader import MsgContext, load_msg_from_file
msg_context = MsgContext()
assert not msg_context.is_registered('time')
assert not msg_context.is_registered('duration')
msg_context = MsgContext.create_default()
# tripwires
repr(msg_context)
str(msg_context)
assert msg_context.is_registered('time'), msg_context._registered_packages
assert msg_context.is_registered('duration')
assert not msg_context.is_registered('test_ros/TestString')
assert not msg_context.is_registered('Header')
# start loading stuff into context
test_d = get_test_dir()
test_ros_dir = os.path.join(test_d, 'test_ros', 'msg')
test_string_path = os.path.join(test_ros_dir, 'TestString.msg')
spec = load_msg_from_file(msg_context, test_string_path, 'test_ros/TestString')
msg_context.register('test_ros/TestString', spec)
assert msg_context.get_registered('test_ros/TestString') == spec
try:
msg_context.get_registered('bad/TestString')
assert False, 'should have raised'
except KeyError:
pass
assert msg_context.is_registered('test_ros/TestString')
# test Header
assert not msg_context.is_registered('Header')
assert not msg_context.is_registered('std_msgs/Header')
msg_context.register('std_msgs/Header', spec)
assert msg_context.is_registered('std_msgs/Header')
def test_load_srv_from_file():
from genmsg.msg_loader import MsgContext, load_srv_from_file
msg_context = MsgContext.create_default()
d = get_test_dir()
filename = os.path.join(d, 'test_ros', 'srv', 'AddTwoInts.srv')
with open(filename, 'r') as f:
text = f.read()
full_name = 'test_ros/AddTwoInts'
spec = load_srv_from_file(msg_context, filename, full_name)
assert spec == load_srv_from_file(msg_context, filename, full_name)
assert ['int64', 'int64'] == spec.request.types, spec.request.types
assert ['a', 'b'] == spec.request.names
assert text == spec.text
assert full_name == spec.full_name
def test_load_msg_depends():
#TODO: should there just be a 'load_msg, implicit=True?'
from genmsg.msg_loader import MsgContext, load_msg_by_type, load_msg_depends, MsgNotFound
test_d = get_test_dir()
search_path = {
'test_ros': [ os.path.join(test_d, 'test_ros', 'msg') ],
'std_msgs': [ os.path.join(test_d, 'std_msgs', 'msg') ],
'geometry_msgs': [ os.path.join(test_d, 'geometry_msgs', 'msg') ],
'sensor_msgs': [ os.path.join(test_d, 'sensor_msgs', 'msg') ],
'invalid': [ os.path.join(test_d, 'invalid', 'msg') ],
}
# Test not found
msg_context = MsgContext.create_default()
root_spec = load_msg_by_type(msg_context, 'invalid/BadDepend', search_path)
try:
load_msg_depends(msg_context, root_spec, search_path)
assert False, "should have raised MsgNotFound"
except MsgNotFound:
pass
root_spec = load_msg_by_type(msg_context, 'invalid/BadLocalDepend', search_path)
try:
load_msg_depends(msg_context, root_spec, search_path)
assert False, "should have raised MsgNotFound"
except MsgNotFound:
pass
msg_context = MsgContext.create_default()
root_spec = load_msg_by_type(msg_context, 'std_msgs/Int32', search_path)
load_msg_depends(msg_context, root_spec, search_path)
file_p = os.path.join(test_d, 'std_msgs', 'msg', 'Int32.msg')
assert file_p == msg_context.get_file('std_msgs/Int32')
assert [] == msg_context.get_depends('std_msgs/Int32')
msg_context = MsgContext.create_default()
root_spec = load_msg_by_type(msg_context, 'std_msgs/Header', search_path)
load_msg_depends(msg_context, root_spec, search_path)
file_p = os.path.join(test_d, 'std_msgs', 'msg', 'Header.msg')
assert file_p == msg_context.get_file('std_msgs/Header')
assert [] == msg_context.get_depends('std_msgs/Header')
msg_context = MsgContext.create_default()
root_spec = load_msg_by_type(msg_context, 'Header', search_path)
load_msg_depends(msg_context, root_spec, search_path)
file_p = os.path.join(test_d, 'std_msgs', 'msg', 'Header.msg')
assert file_p == msg_context.get_file('std_msgs/Header')
assert [] == msg_context.get_depends('std_msgs/Header')
msg_context = MsgContext.create_default()
root_spec = load_msg_by_type(msg_context, 'std_msgs/Int32MultiArray', search_path)
load_msg_depends(msg_context, root_spec, search_path)
file_p = os.path.join(test_d, 'std_msgs', 'msg', 'Int32MultiArray.msg')
assert file_p == msg_context.get_file('std_msgs/Int32MultiArray')
val = msg_context.get_all_depends('std_msgs/Int32MultiArray')
assert set(['std_msgs/MultiArrayLayout', 'std_msgs/MultiArrayDimension']) == set(val), val
assert 2 == len(val), val
val = msg_context.get_depends('std_msgs/Int32MultiArray')
assert set(['std_msgs/MultiArrayLayout']) == set(val), val
for s in ['MultiArrayLayout', 'MultiArrayDimension']:
file_p = os.path.join(test_d, 'std_msgs', 'msg', '%s.msg'%s)
assert file_p == msg_context.get_file('std_msgs/%s'%s)
def test_load_msg_depends_stamped():
#TODO: should there just be a 'load_msg, implicit=True?'
from genmsg.msg_loader import MsgContext, load_msg_by_type, load_msg_depends
test_d = get_test_dir()
geometry_d = os.path.join(test_d, 'geometry_msgs', 'msg')
search_path = {
'test_ros': [ os.path.join(test_d, 'test_ros', 'msg') ],
'std_msgs': [ os.path.join(test_d, 'std_msgs', 'msg') ],
'geometry_msgs': [ geometry_d ],
'sensor_msgs': [ os.path.join(test_d, 'sensor_msgs', 'msg') ],
}
# Test with Stamped and deeper hierarchies, Header
msg_context = MsgContext.create_default()
root_spec = load_msg_by_type(msg_context, 'geometry_msgs/PoseStamped', search_path)
load_msg_depends(msg_context, root_spec, search_path)
file_p = os.path.join(test_d, 'geometry_msgs', 'msg', 'PoseStamped.msg')
assert file_p == msg_context.get_file('geometry_msgs/PoseStamped')
val = msg_context.get_all_depends('geometry_msgs/PoseStamped')
assert set(['std_msgs/Header', 'geometry_msgs/Pose', 'geometry_msgs/Point', 'geometry_msgs/Quaternion']) == set(val), val
val = msg_context.get_depends('geometry_msgs/PoseStamped')
assert set(['std_msgs/Header', 'geometry_msgs/Pose']) == set(val), val
for s in ['Header']:
file_p = os.path.join(test_d, 'std_msgs', 'msg', '%s.msg'%s)
assert file_p == msg_context.get_file('std_msgs/%s'%s)
for s in ['Pose', 'Point', 'Quaternion']:
file_p = os.path.join(geometry_d, '%s.msg'%s)
assert file_p == msg_context.get_file('geometry_msgs/%s'%s)
msg_context = MsgContext.create_default()
root_spec = load_msg_by_type(msg_context, 'geometry_msgs/TwistWithCovarianceStamped', search_path)
load_msg_depends(msg_context, root_spec, search_path)
file_p = os.path.join(test_d, 'geometry_msgs', 'msg', 'TwistWithCovarianceStamped.msg')
assert file_p == msg_context.get_file('geometry_msgs/TwistWithCovarianceStamped')
val = msg_context.get_all_depends('geometry_msgs/TwistWithCovarianceStamped')
assert set(['std_msgs/Header', 'geometry_msgs/TwistWithCovariance', 'geometry_msgs/Twist', 'geometry_msgs/Vector3']) == set(val), val
val = msg_context.get_depends('geometry_msgs/TwistWithCovarianceStamped')
assert set(['std_msgs/Header', 'geometry_msgs/TwistWithCovariance']) == set(val), val
for s in ['Header']:
file_p = os.path.join(test_d, 'std_msgs', 'msg', '%s.msg'%s)
assert file_p == msg_context.get_file('std_msgs/%s'%s)
for s in ['TwistWithCovariance', 'Twist', 'Vector3']:
file_p = os.path.join(geometry_d, '%s.msg'%s)
assert file_p == msg_context.get_file('geometry_msgs/%s'%s)
msg_context = MsgContext.create_default()
root_spec = load_msg_by_type(msg_context, 'sensor_msgs/Imu', search_path)
load_msg_depends(msg_context, root_spec, search_path)
file_p = os.path.join(test_d, 'sensor_msgs', 'msg', 'Imu.msg')
assert file_p == msg_context.get_file('sensor_msgs/Imu')
val = msg_context.get_all_depends('sensor_msgs/Imu')
assert set(['std_msgs/Header', 'geometry_msgs/Quaternion', 'geometry_msgs/Vector3']) == set(val), val
val = msg_context.get_depends('sensor_msgs/Imu')
assert set(['std_msgs/Header', 'geometry_msgs/Quaternion', 'geometry_msgs/Vector3']) == set(val), val
for s in ['Header']:
file_p = os.path.join(test_d, 'std_msgs', 'msg', '%s.msg'%s)
assert file_p == msg_context.get_file('std_msgs/%s'%s)
for s in ['Quaternion', 'Vector3']:
file_p = os.path.join(geometry_d, '%s.msg'%s)
assert file_p == msg_context.get_file('geometry_msgs/%s'%s)
def test_load_depends_msg():
from genmsg.msg_loader import MsgContext, load_msg_by_type, load_depends, MsgNotFound, load_srv_by_type
test_d = get_test_dir()
geometry_d = os.path.join(test_d, 'geometry_msgs', 'msg')
msg_search_path = {
'test_ros': [ os.path.join(test_d, 'test_ros', 'msg') ],
'std_msgs': [ os.path.join(test_d, 'std_msgs', 'msg') ],
'geometry_msgs': [ geometry_d ],
'sensor_msgs': [ os.path.join(test_d, 'sensor_msgs', 'msg') ],
'invalid': [ os.path.join(test_d, 'invalid', 'msg') ],
}
# Test not found
msg_context = MsgContext.create_default()
root_spec = load_msg_by_type(msg_context, 'invalid/BadDepend', msg_search_path)
try:
load_depends(msg_context, root_spec, msg_search_path)
assert False, "should have raised MsgNotFound"
except MsgNotFound:
pass
root_spec = load_msg_by_type(msg_context, 'invalid/BadLocalDepend', msg_search_path)
try:
load_depends(msg_context, root_spec, msg_search_path)
assert False, "should have raised MsgNotFound"
except MsgNotFound:
pass
# Test with msgs
msg_context = MsgContext.create_default()
root_spec = load_msg_by_type(msg_context, 'geometry_msgs/PoseStamped', msg_search_path)
load_depends(msg_context, root_spec, msg_search_path)
file_p = os.path.join(test_d, 'geometry_msgs', 'msg', 'PoseStamped.msg')
assert file_p == msg_context.get_file('geometry_msgs/PoseStamped')
val = msg_context.get_all_depends('geometry_msgs/PoseStamped')
assert set(['std_msgs/Header', 'geometry_msgs/Pose', 'geometry_msgs/Point', 'geometry_msgs/Quaternion']) == set(val), val
val = msg_context.get_depends('geometry_msgs/PoseStamped')
assert set(['std_msgs/Header', 'geometry_msgs/Pose']) == set(val), val
for s in ['Header']:
file_p = os.path.join(test_d, 'std_msgs', 'msg', '%s.msg'%s)
assert file_p == msg_context.get_file('std_msgs/%s'%s)
for s in ['Pose', 'Point', 'Quaternion']:
file_p = os.path.join(geometry_d, '%s.msg'%s)
assert file_p == msg_context.get_file('geometry_msgs/%s'%s)
msg_context = MsgContext.create_default()
root_spec = load_msg_by_type(msg_context, 'sensor_msgs/Imu', msg_search_path)
load_depends(msg_context, root_spec, msg_search_path)
file_p = os.path.join(test_d, 'sensor_msgs', 'msg', 'Imu.msg')
assert file_p == msg_context.get_file('sensor_msgs/Imu')
val = msg_context.get_depends('sensor_msgs/Imu')
assert set(['std_msgs/Header', 'geometry_msgs/Quaternion', 'geometry_msgs/Vector3']) == set(val), val
for s in ['Header']:
file_p = os.path.join(test_d, 'std_msgs', 'msg', '%s.msg'%s)
assert file_p == msg_context.get_file('std_msgs/%s'%s)
for s in ['Quaternion', 'Vector3']:
file_p = os.path.join(geometry_d, '%s.msg'%s)
assert file_p == msg_context.get_file('geometry_msgs/%s'%s)
def test_load_depends_srv():
from genmsg.msg_loader import MsgContext, load_msg_by_type, load_depends, MsgNotFound, load_srv_by_type
test_d = get_test_dir()
geometry_d = os.path.join(test_d, 'geometry_msgs', 'msg')
msg_search_path = {
'test_ros': [ os.path.join(test_d, 'test_ros', 'msg') ],
'std_msgs': [ os.path.join(test_d, 'std_msgs', 'msg') ],
'geometry_msgs': [ geometry_d ],
'sensor_msgs': [ os.path.join(test_d, 'sensor_msgs', 'msg') ],
'invalid': [ os.path.join(test_d, 'invalid', 'msg') ],
}
# Test with srvs
srv_search_path = {
'test_ros': [ os.path.join(test_d, 'test_ros', 'srv') ],
'std_srvs': [ os.path.join(test_d, 'std_srvs', 'srv') ],
}
msg_context = MsgContext.create_default()
root_spec = load_srv_by_type(msg_context, 'test_ros/AddTwoInts', srv_search_path)
load_depends(msg_context, root_spec, msg_search_path)
val = msg_context.get_depends('test_ros/AddTwoIntsRequest')
assert val == [], val
val = msg_context.get_depends('test_ros/AddTwoIntsResponse')
assert val == [], val
# test with srv that has depends
msg_context = MsgContext.create_default()
response_deps = ['std_msgs/Header', 'geometry_msgs/Pose', 'geometry_msgs/PoseStamped', 'geometry_msgs/Point', 'geometry_msgs/Quaternion']
root_spec = load_srv_by_type(msg_context, 'test_ros/GetPoseStamped', srv_search_path)
load_depends(msg_context, root_spec, msg_search_path)
for d in response_deps:
assert msg_context.is_registered(d)
val = msg_context.get_depends('test_ros/GetPoseStampedRequest')
assert val == [], val
val = msg_context.get_depends('test_ros/GetPoseStampedResponse')
assert val == ['geometry_msgs/PoseStamped']
# Test with nonsense
class Foo(object): pass
try:
load_depends(msg_context, Foo(), msg_search_path)
assert False, "should have raised"
except ValueError:
pass
def test_load_srv_by_type():
from genmsg.msg_loader import load_srv_by_type, MsgContext, MsgNotFound
test_d = get_test_dir()
test_ros_dir = os.path.join(test_d, 'test_ros', 'srv')
std_srvs_dir = os.path.join(test_d, 'std_srvs', 'srv')
empty_path = os.path.join(std_srvs_dir, 'Empty.srv')
a2i_path = os.path.join(std_srvs_dir, 'AddTwoInts.srv')
search_path = {
'test_ros': [ test_ros_dir ],
'std_srvs': [ std_srvs_dir ],
}
msg_context = MsgContext.create_default()
spec = load_srv_by_type(msg_context, 'std_srvs/Empty', search_path)
assert msg_context.is_registered('std_srvs/EmptyRequest')
assert msg_context.is_registered('std_srvs/EmptyResponse')
assert msg_context.get_registered('std_srvs/EmptyRequest') == spec.request
assert msg_context.get_registered('std_srvs/EmptyResponse') == spec.response
assert msg_context.get_file('std_srvs/EmptyRequest') == empty_path, msg_context.get_file('std_srvs/EmptyRequest')
assert msg_context.get_file('std_srvs/EmptyResponse') == empty_path,msg_context.get_file('std_srvs/EmptyResponse')
assert spec.request.full_name == 'std_srvs/EmptyRequest'
assert spec.response.full_name == 'std_srvs/EmptyResponse'
assert spec.request.short_name == 'EmptyRequest'
assert spec.response.short_name == 'EmptyResponse'
assert spec.request.package == 'std_srvs'
assert spec.response.package == 'std_srvs'
for f in [spec.request.names, spec.request.types, spec.response.names, spec.response.types]:
assert [] == f
spec = load_srv_by_type(msg_context, 'test_ros/AddTwoInts', search_path)
assert msg_context.is_registered('test_ros/AddTwoIntsRequest')
assert msg_context.is_registered('test_ros/AddTwoIntsResponse')
assert msg_context.get_registered('test_ros/AddTwoIntsRequest') == spec.request
assert msg_context.get_registered('test_ros/AddTwoIntsResponse') == spec.response
assert spec.request.types == ['int64', 'int64'], spec.request.types
assert spec.request.names == ['a', 'b'], spec.request.names
assert spec.response.types == ['int64'], spec.response.types
assert spec.response.names == ['sum'], spec.response.names
# test invalid search path
try:
load_srv_by_type(msg_context, 'test_ros/AddTwoInts', [std_srvs_dir])
assert False, "should have raised"
except ValueError:
pass
# test not found
try:
load_srv_by_type(msg_context, 'test_ros/Fake', search_path)
assert False, "should have raised"
except MsgNotFound:
pass
| gpl-3.0 |
ARCCN/elt | server/pox/openflow/libopenflow_01.py | 1 | 126604 | # Copyright 2011,2012 James McCauley
#
# This file is part of POX.
#
# POX is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# POX is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with POX. If not, see <http://www.gnu.org/licenses/>.
# This file was originally based on pyopenflow.py from NOX, which was
# autogenerated from openflow.h via a program by KK Yap. It has been
# substantially altered since then.
from __future__ import print_function
import struct
import operator
import collections
from itertools import chain, repeat
import sys
from pox.lib.packet.packet_base import packet_base
from pox.lib.packet.ethernet import ethernet
from pox.lib.packet.vlan import vlan
from pox.lib.packet.ipv4 import ipv4
from pox.lib.packet.udp import udp
from pox.lib.packet.tcp import tcp
from pox.lib.packet.icmp import icmp
from pox.lib.packet.arp import arp
from pox.lib.addresses import *
from pox.lib.util import assert_type
from pox.lib.util import initHelper
from pox.lib.util import hexdump
EMPTY_ETH = EthAddr(None)
# ----------------------------------------------------------------------
# XID Management
# ----------------------------------------------------------------------
MAX_XID = 0x7fFFffFF
def XIDGenerator (start = 1, stop = MAX_XID):
i = start
while True:
yield i
i += 1
if i > stop:
i = start
def xid_generator (start = 1, stop = MAX_XID):
return XIDGenerator(start, stop).next
def user_xid_generator ():
return xid_generator(0x80000000, 0xffFFffFF)
generate_xid = xid_generator()
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
# Packing / Unpacking
# ----------------------------------------------------------------------
_PAD = b'\x00'
_PAD2 = _PAD*2
_PAD3 = _PAD*3
_PAD4 = _PAD*4
_PAD6 = _PAD*6
class UnderrunError (RuntimeError):
"""
Raised when one tries to unpack more data than is available
"""
pass
def _read (data, offset, length):
if (len(data)-offset) < length:
raise UnderrunError("wanted %s bytes but only have %s"
% (length, len(data)-offset))
return (offset+length, data[offset:offset+length])
def _unpack (fmt, data, offset):
size = struct.calcsize(fmt)
if (len(data)-offset) < size: raise UnderrunError()
return (offset+size, struct.unpack_from(fmt, data, offset))
def _skip (data, offset, num):
offset += num
if offset > len(data): raise UnderrunError()
return offset
def _unpad (data, offset, num):
(offset, o) = _read(data, offset, num)
assert len(o.replace("\x00", "")) == 0
return offset
def _readzs (data, offset, length):
(offset, d) = _read(data, offset, length)
d = d.split("\x00", 1)
#if len(d[1].replace("\x00", "")) > 0:
# raise RuntimeError("Non-zero string padding")
assert True if (len(d) == 1) else (len(d[1].replace("\x00", "")) == 0)
return (offset, d[0])
def _readether (data, offset):
(offset, d) = _read(data, offset, 6)
return (offset, EthAddr(d))
def _readip (data, offset, networkOrder = True):
(offset, d) = _read(data, offset, 4)
return (offset, IPAddr(d, networkOrder = networkOrder))
# ----------------------------------------------------------------------
def _format_body (body, prefix):
if hasattr(body, 'show'):
#TODO: Check this (spacing may well be wrong)
return body.show(prefix + ' ')
else:
return prefix + hexdump(body).replace("\n", "\n" + prefix)
TABLE_ALL = 0xff
TABLE_EMERGENCY = 0xfe
class _ofp_meta (type):
"""
Metaclass for ofp messages/structures
This takes care of making len() work as desired.
"""
def __len__ (cls):
try:
return cls.__len__()
except:
return cls._MIN_LENGTH
class ofp_base (object):
"""
Base class for OpenFlow messages/structures
You should implement a __len__ method. If your length is fixed, it
should be a static method. If your length is not fixed, you should
implement a __len__ instance method and set a class level _MIN_LENGTH
attribute to your minimum length.
"""
__metaclass__ = _ofp_meta
def _assert (self):
r = self._validate()
if r is not None:
raise RuntimeError(r)
return False # Never reached
return True
def _validate (self):
return None
def __ne__ (self, other):
return not self.__eq__(other)
@classmethod
def unpack_new (cls, raw, offset=0):
"""
Unpacks wire format into the appropriate message object.
Returns newoffset,object
"""
o = cls()
r,length = o.unpack(raw, offset)
assert (r-offset) == length, o
return (r, o)
# ----------------------------------------------------------------------
# Class decorators
# ----------------------------------------------------------------------
_message_type_to_class = {}
_message_class_to_types = {} # Do we need this?
#_message_type_to_name = {}
#_message_name_to_type = {}
ofp_type_rev_map = {}
ofp_type_map = {}
def openflow_message (ofp_type, type_val, reply_to=None,
request_for=None, switch=False, controller=False):
#TODO: Reply stuff, switch/controller stuff
#_message_name_to_type[ofp_type] = type_val
#_message_type_to_name[type_val] = ofp_type
ofp_type_rev_map[ofp_type] = type_val
ofp_type_map[type_val] = ofp_type
def f (c):
c.header_type = type_val
c._from_switch = switch
c._from_controller = controller
_message_type_to_class[type_val] = c
_message_class_to_types.setdefault(c, set()).add(type_val)
return c
return f
def openflow_sc_message (*args, **kw):
return openflow_message(switch=True, controller=True, *args, **kw)
def openflow_c_message (*args, **kw):
return openflow_message(controller=True, *args, **kw)
def openflow_s_message (*args, **kw):
return openflow_message(switch=True, *args, **kw)
_queue_prop_type_to_class = {}
_queue_prop_class_to_types = {} # Do we need this?
ofp_queue_prop_type_rev_map = {}
ofp_queue_prop_type_map = {}
def openflow_queue_prop (queue_prop_type, type_val):
ofp_queue_prop_type_rev_map[queue_prop_type] = type_val
ofp_queue_prop_type_map[type_val] = queue_prop_type
def f (c):
c.property = type_val
_queue_prop_type_to_class[type_val] = c
_queue_prop_class_to_types.setdefault(c, set()).add(type_val)
return c
return f
_action_type_to_class = {}
_action_class_to_types = {} # Do we need this?
ofp_action_type_rev_map = {}
ofp_action_type_map = {}
def openflow_action (action_type, type_val):
ofp_action_type_rev_map[action_type] = type_val
ofp_action_type_map[type_val] = action_type
def f (c):
c.type = type_val
_action_type_to_class[type_val] = c
_action_class_to_types.setdefault(c, set()).add(type_val)
return c
return f
class _StatsClassInfo (object):
__slots__ = 'request reply reply_is_list'.split()
def __init__ (self, **kw):
self.request = None
self.reply = None
self.reply_is_list = False
initHelper(self, kw)
def __str__ (self):
r = str(self.reply)
if self.reply_is_list: r = "[%s]" % (r,)
return "request:%s reply:%s" % (self.request, r)
_stats_type_to_class_info = {}
_stats_class_to_type = {}
ofp_stats_type_rev_map = {}
ofp_stats_type_map = {}
def openflow_stats_request (stats_type, type_val=None, is_list=None,
is_reply = False):
if type_val is not None:
ofp_stats_type_rev_map[stats_type] = type_val
ofp_stats_type_map[type_val] = stats_type
else:
type_val = ofp_stats_type_rev_map.get(stats_type)
def f (c):
if type_val is not None:
ti = _stats_type_to_class_info.get(stats_type)
if ti is not None:
_stats_type_to_class_info[type_val] = ti
del _stats_type_to_class_info[stats_type]
else:
ti = _stats_type_to_class_info.setdefault(type_val,
_StatsClassInfo())
_stats_class_to_type[c] = type_val
else:
ti = _stats_type_to_class_info.setdefault(stats_type,
_StatsClassInfo())
if is_list is not None:
ti.reply_is_list = is_list
if is_reply:
ti.reply = c
else:
ti.request = c
if type_val is not None:
if ti.reply and issubclass(ti.reply, ofp_stats_body_base):
ti.reply._type = type_val
if ti.request and issubclass(ti.request, ofp_stats_body_base):
ti.request._type = type_val
return c
return f
def openflow_stats_reply (stats_type, type_val=None, is_list=None,
is_reply = True):
return openflow_stats_request(stats_type, type_val, is_list, is_reply)
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
# Constants, etc.
# ----------------------------------------------------------------------
ofp_error_type_rev_map = {
'OFPET_HELLO_FAILED' : 0,
'OFPET_BAD_REQUEST' : 1,
'OFPET_BAD_ACTION' : 2,
'OFPET_FLOW_MOD_FAILED' : 3,
'OFPET_PORT_MOD_FAILED' : 4,
'OFPET_QUEUE_OP_FAILED' : 5,
}
ofp_hello_failed_code_rev_map = {
'OFPHFC_INCOMPATIBLE' : 0,
'OFPHFC_EPERM' : 1,
}
ofp_bad_request_code_rev_map = {
'OFPBRC_BAD_VERSION' : 0,
'OFPBRC_BAD_TYPE' : 1,
'OFPBRC_BAD_STAT' : 2,
'OFPBRC_BAD_VENDOR' : 3,
'OFPBRC_BAD_SUBTYPE' : 4,
'OFPBRC_EPERM' : 5,
'OFPBRC_BAD_LEN' : 6,
'OFPBRC_BUFFER_EMPTY' : 7,
'OFPBRC_BUFFER_UNKNOWN' : 8,
}
ofp_bad_action_code_rev_map = {
'OFPBAC_BAD_TYPE' : 0,
'OFPBAC_BAD_LEN' : 1,
'OFPBAC_BAD_VENDOR' : 2,
'OFPBAC_BAD_VENDOR_TYPE' : 3,
'OFPBAC_BAD_OUT_PORT' : 4,
'OFPBAC_BAD_ARGUMENT' : 5,
'OFPBAC_EPERM' : 6,
'OFPBAC_TOO_MANY' : 7,
'OFPBAC_BAD_QUEUE' : 8,
}
ofp_flow_mod_failed_code_rev_map = {
'OFPFMFC_ALL_TABLES_FULL' : 0,
'OFPFMFC_OVERLAP' : 1,
'OFPFMFC_EPERM' : 2,
'OFPFMFC_BAD_EMERG_TIMEOUT' : 3,
'OFPFMFC_BAD_COMMAND' : 4,
'OFPFMFC_UNSUPPORTED' : 5,
}
ofp_port_mod_failed_code_rev_map = {
'OFPPMFC_BAD_PORT' : 0,
'OFPPMFC_BAD_HW_ADDR' : 1,
}
ofp_queue_op_failed_code_rev_map = {
'OFPQOFC_BAD_PORT' : 0,
'OFPQOFC_BAD_QUEUE' : 1,
'OFPQOFC_EPERM' : 2,
}
ofp_port_config_rev_map = {
'OFPPC_PORT_DOWN' : 1,
'OFPPC_NO_STP' : 2,
'OFPPC_NO_RECV' : 4,
'OFPPC_NO_RECV_STP' : 8,
'OFPPC_NO_FLOOD' : 16,
'OFPPC_NO_FWD' : 32,
'OFPPC_NO_PACKET_IN' : 64,
}
ofp_port_state_rev_map = {
'OFPPS_STP_LISTEN' : 0,
'OFPPS_LINK_DOWN' : 1,
'OFPPS_STP_LEARN' : 256,
'OFPPS_STP_FORWARD' : 512,
'OFPPS_STP_BLOCK' : 768,
}
OFPPS_STP_MASK = 768
ofp_port_features_rev_map = {
'OFPPF_10MB_HD' : 1,
'OFPPF_10MB_FD' : 2,
'OFPPF_100MB_HD' : 4,
'OFPPF_100MB_FD' : 8,
'OFPPF_1GB_HD' : 16,
'OFPPF_1GB_FD' : 32,
'OFPPF_10GB_FD' : 64,
'OFPPF_COPPER' : 128,
'OFPPF_FIBER' : 256,
'OFPPF_AUTONEG' : 512,
'OFPPF_PAUSE' : 1024,
'OFPPF_PAUSE_ASYM' : 2048,
}
ofp_queue_properties_rev_map = {
'OFPQT_MIN_RATE' : 0,
}
OFPQT_NONE = 0
ofp_capabilities_rev_map = {
'OFPC_FLOW_STATS' : 1,
'OFPC_TABLE_STATS' : 2,
'OFPC_PORT_STATS' : 4,
'OFPC_STP' : 8,
'OFPC_RESERVED' : 16,
'OFPC_IP_REASM' : 32,
'OFPC_QUEUE_STATS' : 64,
'OFPC_ARP_MATCH_IP' : 128,
}
ofp_config_flags_rev_map = {
'OFPC_FRAG_NORMAL' : 0,
'OFPC_FRAG_DROP' : 1,
'OFPC_FRAG_REASM' : 2,
'OFPC_FRAG_MASK' : 3,
}
ofp_flow_mod_command_rev_map = {
'OFPFC_ADD' : 0,
'OFPFC_MODIFY' : 1,
'OFPFC_MODIFY_STRICT' : 2,
'OFPFC_DELETE' : 3,
'OFPFC_DELETE_STRICT' : 4,
}
ofp_flow_mod_flags_rev_map = {
'OFPFF_SEND_FLOW_REM' : 1,
'OFPFF_CHECK_OVERLAP' : 2,
'OFPFF_EMERG' : 4,
}
ofp_stats_reply_flags_rev_map = {
'OFPSF_REPLY_MORE' : 1,
}
ofp_packet_in_reason_rev_map = {
'OFPR_NO_MATCH' : 0,
'OFPR_ACTION' : 1,
}
ofp_flow_removed_reason_rev_map = {
'OFPRR_IDLE_TIMEOUT' : 0,
'OFPRR_HARD_TIMEOUT' : 1,
'OFPRR_DELETE' : 2,
}
ofp_port_reason_rev_map = {
'OFPPR_ADD' : 0,
'OFPPR_DELETE' : 1,
'OFPPR_MODIFY' : 2,
}
ofp_port_rev_map = {
'OFPP_MAX' : 65280,
'OFPP_IN_PORT' : 65528,
'OFPP_TABLE' : 65529,
'OFPP_NORMAL' : 65530,
'OFPP_FLOOD' : 65531,
'OFPP_ALL' : 65532,
'OFPP_CONTROLLER' : 65533,
'OFPP_LOCAL' : 65534,
'OFPP_NONE' : 65535,
}
ofp_flow_wildcards_rev_map = {
'OFPFW_IN_PORT' : 1,
'OFPFW_DL_VLAN' : 2,
'OFPFW_DL_SRC' : 4,
'OFPFW_DL_DST' : 8,
'OFPFW_DL_TYPE' : 16,
'OFPFW_NW_PROTO' : 32,
'OFPFW_TP_SRC' : 64,
'OFPFW_TP_DST' : 128,
'OFPFW_DL_VLAN_PCP' : 1048576,
'OFPFW_NW_TOS' : 1<<21,
}
OFPFW_NW_DST_BITS = 6
OFPFW_NW_SRC_BITS = 6
OFPFW_NW_SRC_SHIFT = 8
OFPFW_NW_DST_SHIFT = 14
OFPFW_NW_SRC_ALL = 8192
OFPFW_NW_SRC_MASK = 16128
OFPFW_NW_DST_ALL = 524288
OFPFW_NW_DST_MASK = 1032192
# Note: Need to handle all flags that are set in this.
# glob-all masks in the packet handling methods.
# (Esp. ofp_match.from_packet)
# Otherwise, packets are not being matched as they should
OFPFW_ALL = ((1 << 22) - 1)
NO_BUFFER = 4294967295
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
# Structure definitions
# ----------------------------------------------------------------------
#1. Openflow Header
class ofp_header (ofp_base):
_MIN_LENGTH = 8
def __init__ (self, **kw):
self.version = OFP_VERSION
#self.header_type = None # Set via class decorator
self._xid = None
if 'header_type' in kw:
self.header_type = kw.pop('header_type')
initHelper(self, kw)
@property
def xid (self):
if self._xid is None:
self._xid = generate_xid()
return self._xid
@xid.setter
def xid (self, val):
self._xid = val
def _validate (self):
if self.header_type not in ofp_type_map:
return "type is not a known message type"
return None
def pack (self):
assert self._assert()
packed = b""
packed += struct.pack("!BBHL", self.version, self.header_type,
len(self), self.xid)
return packed
def unpack (self, raw, offset=0):
offset,length = self._unpack_header(raw, offset)
return offset,length
def _unpack_header (self, raw, offset):
offset,(self.version, self.header_type, length, self.xid) = \
_unpack("!BBHL", raw, offset)
return offset,length
def __eq__ (self, other):
if type(self) != type(other): return False
if self.version != other.version: return False
if self.header_type != other.header_type: return False
if len(self) != len(other): return False
if self.xid != other.xid: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'version: ' + str(self.version) + '\n'
outstr += prefix + 'type: ' + str(self.header_type)# + '\n'
outstr += " (" + ofp_type_map.get(self.header_type, "Unknown") + ")\n"
try:
outstr += prefix + 'length: ' + str(len(self)) + '\n'
except:
pass
outstr += prefix + 'xid: ' + str(self.xid) + '\n'
return outstr
def __str__ (self):
return self.__class__.__name__ + "\n " + self.show(' ').strip()
class ofp_stats_body_base (ofp_base):
"""
Base class for stats bodies
"""
# Stats bodies don't actually have a type field in OpenFlow --
# the type information is in the request or reply. It's really
# convenient, though, so we add it. Note that you generally
# don't need to set this yourself -- the openflow_stats_XXX
# decorator will do it for you.
_type = None
"""
def unpack (self, data, offset=0, avail=None):
"""
class ofp_action_base (ofp_base):
"""
Base class for actions
This is sort of the equivalent of ofp_action_header in the spec.
However, ofp_action_header as the spec defines it is not super
useful for us, as it has the padding in it.
"""
type = None
class ofp_queue_prop_base (ofp_base):
"""
Base class for queue properties
This is sort of the equivalent of ofp_queue_prop_header in the spec.
However, ofp_queue_prop_header as the spec defines it is not super
useful for us, as it has the padding in it.
"""
property = None
#2. Common Structures
##2.1 Port Structures
class ofp_phy_port (ofp_base):
def __init__ (self, **kw):
self.port_no = 0
self.hw_addr = EMPTY_ETH
self.name = ""
self.config = 0
self.state = 0
self.curr = 0
self.advertised = 0
self.supported = 0
self.peer = 0
initHelper(self, kw)
def enable_config (self, mask):
"""
Turn on selected config bits
"""
return self.set_config(0xffFFffFF, mask)
def disable_config (self, mask):
"""
Turn off selected config bits
"""
return self.set_config(0, mask)
def set_config (self, config, mask):
"""
Updates the specified config bits
Returns which bits were changed
"""
old = self.config
self.config &= ~mask
self.config |= config
return old ^ self.config
def __str__ (self):
return "%s:%i" % (self.name, self.port_no)
def _validate (self):
if isinstance(self.hw_addr, bytes) and len(self.hw_addr) == 6:
pass
elif not isinstance(self.hw_addr, EthAddr):
return "hw_addr is not a valid format"
if len(self.name) > OFP_MAX_PORT_NAME_LEN:
return "name is too long"
return None
def pack (self):
assert self._assert()
packed = b""
packed += struct.pack("!H", self.port_no)
packed += (self.hw_addr if isinstance(self.hw_addr, bytes) else
self.hw_addr.toRaw())
packed += self.name.ljust(OFP_MAX_PORT_NAME_LEN,'\0')
packed += struct.pack("!LLLLLL", self.config, self.state, self.curr,
self.advertised, self.supported, self.peer)
return packed
def unpack (self, raw, offset=0):
_offset = offset
offset,(self.port_no,) = _unpack("!H", raw, offset)
offset,self.hw_addr = _readether(raw, offset)
offset,self.name = _readzs(raw, offset, OFP_MAX_PORT_NAME_LEN)
offset,(self.config, self.state, self.curr, self.advertised,
self.supported, self.peer) = _unpack("!LLLLLL", raw, offset)
assert offset - _offset == len(self)
return offset
@staticmethod
def __len__ ():
return 48
def __eq__ (self, other):
if type(self) != type(other): return False
if self.port_no != other.port_no: return False
if self.hw_addr != other.hw_addr: return False
if self.name != other.name: return False
if self.config != other.config: return False
if self.state != other.state: return False
if self.curr != other.curr: return False
if self.advertised != other.advertised: return False
if self.supported != other.supported: return False
if self.peer != other.peer: return False
return True
def __cmp__ (self, other):
if type(other) != type(self): return id(self)-id(other)
if self.port_no < other.port_no: return -1
if self.port_no > other.port_no: return 1
if self == other: return 0
return id(self)-id(other)
def __hash__(self, *args, **kwargs):
return hash(self.port_no) ^ hash(self.hw_addr) ^ \
hash(self.name) ^ hash(self.config) ^ \
hash(self.state) ^ hash(self.curr) ^ \
hash(self.advertised) ^ hash(self.supported) + \
hash(self.peer)
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'port_no: ' + str(self.port_no) + '\n'
outstr += prefix + 'hw_addr: ' + str(EthAddr(self.hw_addr)) + '\n'
outstr += prefix + 'name: ' + str(self.name) + '\n'
outstr += prefix + 'config: ' + str(self.config) + '\n'
outstr += prefix + 'state: ' + str(self.state) + '\n'
outstr += prefix + 'curr: ' + str(self.curr) + '\n'
outstr += prefix + 'advertised: ' + str(self.advertised) + '\n'
outstr += prefix + 'supported: ' + str(self.supported) + '\n'
outstr += prefix + 'peer: ' + str(self.peer) + '\n'
return outstr
def __repr__(self):
return self.show()
##2.2 Queue Structures
class ofp_packet_queue (ofp_base):
_MIN_LENGTH = 8
def __init__ (self, **kw):
self.queue_id = 0
self.properties = []
initHelper(self, kw)
def pack (self):
assert self._assert()
packed = b""
packed += struct.pack("!LH", self.queue_id, len(self))
packed += _PAD2 # Pad
for i in self.properties:
packed += i.pack()
return packed
def unpack (self, raw, offset=0):
_offset = offset
offset,(self.queue_id, length) = _unpack("!LH", raw, offset)
offset = _skip(raw, offset, 2)
length -= (4 + 2 + 2)
offset,self.properties = _unpack_queue_props(raw, length, offset)
assert offset - _offset == len(self)
return offset
def __len__ (self):
l = 8
for i in self.properties:
l += len(i)
return l
def __eq__ (self, other):
if type(self) != type(other): return False
if self.queue_id != other.queue_id: return False
if len(self) != len(other): return False
if self.properties != other.properties: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'queue_id: ' + str(self.queue_id) + '\n'
outstr += prefix + 'len: ' + str(len(self)) + '\n'
outstr += prefix + 'properties: \n'
for obj in self.properties:
outstr += obj.show(prefix + ' ')
return outstr
class ofp_queue_prop_generic (ofp_queue_prop_base):
_MIN_LENGTH = 8
def __init__ (self, **kw):
self.property = None # Purposely bad
self.data = _PAD4
initHelper(self, kw)
def pack (self):
assert self._assert()
packed = b""
packed += struct.pack("!HH", self.property, len(self))
packed += self.data
return packed
def unpack (self, raw, offset=0):
_offset = offset
offset,(self.property, length) = _unpack("!HH", raw, offset)
offset,self.data = _read(raw, offset, length-4)
assert offset - _offset == len(self)
return offset
@staticmethod
def __len__ ():
return 4 + len(self.data)
def __eq__ (self, other):
if type(self) != type(other): return False
if self.property != other.property: return False
if len(self) != len(other): return False
if self.data != other.data: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'property: ' + str(self.property) + '\n'
outstr += prefix + 'len: ' + str(len(self)) + '\n'
return outstr
@openflow_queue_prop('OFPQT_NONE', 0)
class ofp_queue_prop_none (ofp_queue_prop_generic):
pass
@openflow_queue_prop('OFPQT_MIN_RATE', 1)
class ofp_queue_prop_min_rate (ofp_base):
def __init__ (self, **kw):
self.rate = 0
initHelper(self, kw)
def pack (self):
assert self._assert()
packed = b""
packed += struct.pack("!HH", self.property, len(self))
packed += _PAD4
packed += struct.pack("!H", self.rate)
packed += _PAD6
return packed
def unpack (self, raw, offset=0):
_offset = offset
offset,(self.property, length, pad) = \
_unpack("!HHL", raw, offset)
offset,(self.rate,) = _unpack("!H", raw, offset)
offset = _skip(raw, offset, 6)
assert offset - _offset == len(self)
return offset
@staticmethod
def __len__ ():
return 16
def __eq__ (self, other):
if type(self) != type(other): return False
if self.property != other.property: return False
if self.rate != other.rate: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'property: ' + str(self.property) + '\n'
outstr += prefix + 'len: ' + str(len(self)) + '\n'
outstr += prefix + 'rate: ' + str(self.rate) + '\n'
return outstr
##2.3 Flow Match Structures
class ofp_match (ofp_base):
adjust_wildcards = True # Set to true to "fix" outgoing wildcards
@classmethod
def from_packet (cls, packet, in_port = None):
"""
Constructs an exact match for the given packet
@param in_port The switch port the packet arrived on if you want
the resulting match to have its in_port set.
If "packet" is a packet_in, this is ignored.
@param packet A pox.packet.ethernet instance or a packet_in
"""
if isinstance(packet, ofp_packet_in):
in_port = packet.in_port
packet = ethernet(packet.data)
assert assert_type("packet", packet, ethernet, none_ok=False)
match = cls()
if in_port is not None:
match.in_port = in_port
match.dl_src = packet.src
match.dl_dst = packet.dst
match.dl_type = packet.type
p = packet.next
if isinstance(p, vlan):
match.dl_type = p.eth_type
match.dl_vlan = p.id
match.dl_vlan_pcp = p.pcp
p = p.next
else:
match.dl_vlan = OFP_VLAN_NONE
match.dl_vlan_pcp = 0
if isinstance(p, ipv4):
match.nw_src = p.srcip
match.nw_dst = p.dstip
match.nw_proto = p.protocol
match.nw_tos = p.tos
p = p.next
if isinstance(p, udp) or isinstance(p, tcp):
match.tp_src = p.srcport
match.tp_dst = p.dstport
elif isinstance(p, icmp):
match.tp_src = p.type
match.tp_dst = p.code
elif isinstance(p, arp):
if p.opcode <= 255:
match.nw_proto = p.opcode
match.nw_src = p.protosrc
match.nw_dst = p.protodst
return match
def optimize (self):
"""
Reduce the number of wildcards used.
"""
#TODO: Fix for optional cases (i.e. ARP)
if self.dl_vlan == OFP_VLAN_NONE:
self.dl_vlan_pcp = 0
#TODO: What do we do when something is "behind" a wildcard?
# e.g., does nw_src count if dl_type is wild or only if it's 0x0800?
if self.dl_type is not None:
if self.dl_type != 0x0800:
# Not IP
if self.dl_type != 0x0806:
# Not IP or ARP
self.nw_src = IPAddr(0)
self.nw_dst = IPAddr(0)
self.nw_proto = 0
self.nw_tos = 0
self.tp_src = 0
self.tp_dst = 0
else:
# It's IP
if (self.nw_proto != 6 and self.nw_proto != 17
and self.nw_proto != 1):
# Not TCP, UDP, or ICMP
self.tp_src = 0
self.tp_dst = 0
self.wildcards = self._normalize_wildcards(self.wildcards)
return self # for chaining
def clone (self):
n = ofp_match()
for k,v in ofp_match_data.iteritems():
setattr(n, '_' + k, getattr(self, '_' + k))
n.wildcards = self.wildcards
return n
def flip (self):
"""
Return version of this match with src and dst fields swapped
"""
reversed = self.clone()
for field in ('dl','nw','tp'):
setattr(reversed, field + '_src', getattr(self, field + '_dst'))
setattr(reversed, field + '_dst', getattr(self, field + '_src'))
return reversed
def __init__ (self, **kw):
for k,v in ofp_match_data.iteritems():
setattr(self, '_' + k, v[0])
self.wildcards = self._normalize_wildcards(OFPFW_ALL)
# This is basically initHelper(), but tweaked slightly since this
# class does some magic of its own.
for k,v in kw.iteritems():
if not hasattr(self, '_'+k):
raise TypeError(self.__class__.__name__ + " constructor got "
+ "unexpected keyword argument '" + k + "'")
setattr(self, k, v)
def get_nw_dst (self):
if (self.wildcards & OFPFW_NW_DST_ALL) == OFPFW_NW_DST_ALL:
return (None, 0)
w = (self.wildcards & OFPFW_NW_DST_MASK) >> OFPFW_NW_DST_SHIFT
return (self._nw_dst,32-w if w <= 32 else 0)
def get_nw_src (self):
if (self.wildcards & OFPFW_NW_SRC_ALL) == OFPFW_NW_SRC_ALL:
return (None, 0)
w = (self.wildcards & OFPFW_NW_SRC_MASK) >> OFPFW_NW_SRC_SHIFT
return (self._nw_src,32-w if w <= 32 else 0)
def set_nw_dst (self, *args, **kw):
a = self._make_addr(*args, **kw)
if a == None:
self._nw_dst = ofp_match_data['nw_dst'][0]
self.wildcards &= ~OFPFW_NW_DST_MASK
self.wildcards |= ofp_match_data['nw_dst'][1]
return
self._nw_dst = a[0]
self.wildcards &= ~OFPFW_NW_DST_MASK
self.wildcards |= ((32-a[1]) << OFPFW_NW_DST_SHIFT)
def set_nw_src (self, *args, **kw):
a = self._make_addr(*args, **kw)
if a == None:
self._nw_src = ofp_match_data['nw_src'][0]
self.wildcards &= ~OFPFW_NW_SRC_MASK
self.wildcards |= ofp_match_data['nw_src'][1]
return
self._nw_src = a[0]
self.wildcards &= ~OFPFW_NW_SRC_MASK
self.wildcards |= ((32-a[1]) << OFPFW_NW_SRC_SHIFT)
def _make_addr (self, ipOrIPAndBits, bits=None):
if ipOrIPAndBits == None: return None
b = None
if type(ipOrIPAndBits) is tuple:
ip = ipOrIPAndBits[0]
b = int(ipOrIPAndBits[1])
if (type(ipOrIPAndBits) is str) and (len(ipOrIPAndBits) != 4):
if ipOrIPAndBits.find('/') != -1:
#s = ipOrIPAndBits.split('/')
s = parse_cidr(ipOrIPAndBits, infer=False)
ip = s[0]
b = int(s[1]) if b is None else b
else:
ip = ipOrIPAndBits
b = 32 if b is None else b
else:
ip = ipOrIPAndBits
b = 32 if b is None else b
if type(ip) is str:
ip = IPAddr(ip)
if bits != None: b = bits
if b > 32: b = 32
elif b < 0: b = 0
return (ip, b)
def __setattr__ (self, name, value):
if name not in ofp_match_data:
self.__dict__[name] = value
return
if name == 'nw_dst' or name == 'nw_src':
# Special handling
getattr(self, 'set_' + name)(value)
return value
if value is None:
setattr(self, '_' + name, ofp_match_data[name][0])
self.wildcards |= ofp_match_data[name][1]
else:
setattr(self, '_' + name, value)
self.wildcards = self.wildcards & ~ofp_match_data[name][1]
return value
def __getattr__ (self, name):
if name in ofp_match_data:
if ( (self.wildcards & ofp_match_data[name][1])
== ofp_match_data[name][1] ):
# It's wildcarded -- always return None
return None
if name == 'nw_dst' or name == 'nw_src':
# Special handling
return getattr(self, 'get_' + name)()[0]
return self.__dict__['_' + name]
raise AttributeError("attribute not found: "+name)
def _validate (self):
# TODO
return None
def pack (self, flow_mod=False):
assert self._assert()
packed = b""
if self.adjust_wildcards and flow_mod:
wc = self._wire_wildcards(self.wildcards)
else:
wc = self.wildcards
packed += struct.pack("!LH", wc, self.in_port or 0)
if self.dl_src == None:
packed += EMPTY_ETH.toRaw()
elif type(self.dl_src) is bytes:
packed += self.dl_src
else:
packed += self.dl_src.toRaw()
if self.dl_dst == None:
packed += EMPTY_ETH.toRaw()
elif type(self.dl_dst) is bytes:
packed += self.dl_dst
else:
packed += self.dl_dst.toRaw()
def check_ip(val):
return (val or 0) if self.dl_type == 0x0800 else 0
def check_ip_or_arp(val):
return (val or 0) if self.dl_type == 0x0800 \
or self.dl_type == 0x0806 else 0
def check_tp(val):
return (val or 0) if self.dl_type == 0x0800 \
and self.nw_proto in (1,6,17) else 0
packed += struct.pack("!HB", self.dl_vlan or 0, self.dl_vlan_pcp or 0)
packed += _PAD # Hardcode padding
packed += struct.pack("!HBB", self.dl_type or 0,
check_ip(self.nw_tos), check_ip_or_arp(self.nw_proto))
packed += _PAD2 # Hardcode padding
def fix (addr):
if addr is None: return 0
if type(addr) is int: return addr & 0xffFFffFF
if type(addr) is long: return addr & 0xffFFffFF
return addr.toUnsigned()
packed += struct.pack("!LLHH", check_ip_or_arp(fix(self.nw_src)),
check_ip_or_arp(fix(self.nw_dst)),
check_tp(self.tp_src), check_tp(self.tp_dst))
return packed
def _normalize_wildcards (self, wildcards):
"""
nw_src and nw_dst values greater than 32 mean the same thing as 32.
We normalize them here just to be clean and so that comparisons act
as you'd want them to.
"""
if ((wildcards & OFPFW_NW_SRC_MASK) >> OFPFW_NW_SRC_SHIFT) > 32:
wildcards &= ~OFPFW_NW_SRC_MASK
wildcards |= (32 << OFPFW_NW_SRC_SHIFT)
if ((wildcards & OFPFW_NW_DST_MASK) >> OFPFW_NW_DST_SHIFT) > 32:
wildcards &= ~OFPFW_NW_DST_MASK
wildcards |= (32 << OFPFW_NW_DST_SHIFT)
return wildcards
def _wire_wildcards(self, wildcards):
"""
Normalize the wildcard bits to the openflow wire representation.
Note this atrocity from the OF1.1 spec:
Protocol-specific fields within ofp_match will be ignored within
a single table when the corresponding protocol is not specified in the
match. The IP header and transport header fields
will be ignored unless the Ethertype is specified as either IPv4 or
ARP. The tp_src and tp_dst fields will be ignored unless the network
protocol specified is as TCP, UDP or SCTP. Fields that are ignored
don't need to be wildcarded and should be set to 0.
"""
if self.dl_type == 0x0800:
# IP
if self.nw_proto not in (1,6,17):
# not TCP/UDP/ICMP -> Clear TP wildcards for the wire
return wildcards & ~(OFPFW_TP_SRC | OFPFW_TP_DST)
else:
return wildcards
elif self.dl_type == 0x0806:
# ARP: clear NW_TOS / TP wildcards for the wire
return wildcards & ~( OFPFW_NW_TOS | OFPFW_TP_SRC | OFPFW_TP_DST)
else:
# not even IP. Clear NW/TP wildcards for the wire
return wildcards & ~( OFPFW_NW_TOS | OFPFW_NW_PROTO
| OFPFW_NW_SRC_MASK | OFPFW_NW_DST_MASK
| OFPFW_TP_SRC | OFPFW_TP_DST)
def _unwire_wildcards(self, wildcards):
"""
Normalize the wildcard bits from the openflow wire representation.
Note this atrocity from the OF1.1 spec:
Protocol-specific fields within ofp_match will be ignored within
a single table when the corresponding protocol is not specified in the
match. The IP header and transport header fields
will be ignored unless the Ethertype is specified as either IPv4 or
ARP. The tp_src and tp_dst fields will be ignored unless the network
protocol specified is as TCP, UDP or SCTP. Fields that are ignored
don't need to be wildcarded and should be set to 0.
"""
if self._dl_type == 0x0800:
# IP
if self._nw_proto not in (1,6,17):
# not TCP/UDP/ICMP -> Set TP wildcards for the object
return wildcards | (OFPFW_TP_SRC | OFPFW_TP_DST)
else:
return wildcards
elif self._dl_type == 0x0806:
# ARP: Set NW_TOS / TP wildcards for the object
return wildcards | ( OFPFW_NW_TOS | OFPFW_TP_SRC | OFPFW_TP_DST)
else:
# not even IP. Set NW/TP wildcards for the object
return wildcards | ( OFPFW_NW_TOS | OFPFW_NW_PROTO
| OFPFW_NW_SRC_MASK | OFPFW_NW_DST_MASK
| OFPFW_TP_SRC | OFPFW_TP_DST)
@property
def is_wildcarded (self):
return self.wildcards & OFPFW_ALL != 0
@property
def is_exact (self):
return not self.is_wildcarded
def unpack (self, raw, offset=0, flow_mod=False):
_offset = offset
offset,(wildcards, self._in_port) = _unpack("!LH",raw, offset)
offset,self._dl_src = _readether(raw, offset)
offset,self._dl_dst = _readether(raw, offset)
offset,(self._dl_vlan, self._dl_vlan_pcp) = \
_unpack("!HB", raw, offset)
offset = _skip(raw, offset, 1)
offset,(self._dl_type, self._nw_tos, self._nw_proto) = \
_unpack("!HBB", raw, offset)
offset = _skip(raw, offset, 2)
offset,self._nw_src = _readip(raw, offset)
offset,self._nw_dst = _readip(raw, offset)
offset,(self._tp_src, self._tp_dst) = _unpack("!HH", raw, offset)
# Only unwire wildcards for flow_mod
self.wildcards = self._normalize_wildcards(
self._unwire_wildcards(wildcards) if flow_mod else wildcards)
assert offset - _offset == len(self)
return offset
@staticmethod
def __len__ ():
return 40
def hash_code (self):
'''
ofp_match is not properly hashable since it is mutable, but it can
still be useful to easily generate a hash code.
'''
h = self.wildcards
for f in ofp_match_data:
v = getattr(self, f)
if type(v) is int:
h ^= v
elif type(v) is long:
h ^= v
return int(h & 0x7fFFffFF)
def matches_with_wildcards (self, other, consider_other_wildcards=True):
"""
Test whether /this/ match completely encompasses the other match.
Important for non-strict modify flow_mods etc.
"""
assert assert_type("other", other, ofp_match, none_ok=False)
# short cut for equal matches
if(self == other): return True
# only candidate if all wildcard bits in the *other* match are also
# set in this match (i.e., a submatch)
# first compare the bitmask part
if(consider_other_wildcards):
self_bits = self.wildcards&~(OFPFW_NW_SRC_MASK|OFPFW_NW_DST_MASK)
other_bits = other.wildcards&~(OFPFW_NW_SRC_MASK|OFPFW_NW_DST_MASK)
if( self_bits | other_bits != self_bits): return False
def match_fail(mine, others):
return mine != None and mine != others
if match_fail(self.in_port, other.in_port): return False
if match_fail(self.dl_vlan, other.dl_vlan): return False
if match_fail(self.dl_src, other.dl_src): return False
if match_fail(self.dl_dst, other.dl_dst): return False
if match_fail(self.dl_type, other.dl_type): return False
if match_fail(self.nw_proto, other.nw_proto): return False
if match_fail(self.tp_src, other.tp_src): return False
if match_fail(self.tp_dst, other.tp_dst): return False
if match_fail(self.dl_vlan_pcp, other.dl_vlan_pcp): return False
if match_fail(self.nw_tos, other.nw_tos): return False
self_nw_src = self.get_nw_src()
if(self_nw_src[0] != None):
other_nw_src = other.get_nw_src()
if self_nw_src[1] > other_nw_src[1]: return False
if not IPAddr(other_nw_src[0]).inNetwork(
(self_nw_src[0], self_nw_src[1])): return False
self_nw_dst = self.get_nw_dst()
if(self_nw_dst[0] != None):
other_nw_dst = other.get_nw_dst()
if self_nw_dst[1] > other_nw_dst[1]: return False
if not IPAddr(other_nw_dst[0]).inNetwork(
(self_nw_dst[0], self_nw_dst[1])): return False
return True
def __eq__ (self, other):
if type(self) != type(other): return False
if self.wildcards != other.wildcards: return False
if self.in_port != other.in_port: return False
if self.dl_src != other.dl_src: return False
if self.dl_dst != other.dl_dst: return False
if self.dl_vlan != other.dl_vlan: return False
if self.dl_vlan_pcp != other.dl_vlan_pcp: return False
if self.dl_type != other.dl_type: return False
if self.nw_tos != other.nw_tos: return False
if self.nw_proto != other.nw_proto: return False
if self.nw_src != other.nw_src: return False
if self.nw_dst != other.nw_dst: return False
if self.tp_src != other.tp_src: return False
if self.tp_dst != other.tp_dst: return False
return True
def __str__ (self):
return self.__class__.__name__ + "\n " + self.show(' ').strip()
def show (self, prefix=''):
def binstr (n):
s = ''
while True:
s = ('1' if n & 1 else '0') + s
n >>= 1
if n == 0: break
return s
def safehex(n):
if n == None:
return "(None)"
else:
return hex(n)
def show_wildcards(w):
parts = [ k.lower()[len("OFPFW_"):]
for (k,v) in ofp_flow_wildcards_rev_map.iteritems()
if v & w == v ]
nw_src_bits = (w & OFPFW_NW_SRC_MASK) >> OFPFW_NW_SRC_SHIFT
if nw_src_bits > 0:
parts.append("nw_src(/%d)" % (32 - nw_src_bits))
nw_dst_bits = (w & OFPFW_NW_DST_MASK) >> OFPFW_NW_DST_SHIFT
if nw_dst_bits > 0:
parts.append("nw_dst(/%d)" % (32 - nw_dst_bits))
return "|".join(parts)
outstr = ''
outstr += prefix + 'wildcards: '
outstr += show_wildcards(self.wildcards)
outstr += ' (%s = %x)\n' % (binstr(self.wildcards), self.wildcards)
def append (f, formatter=str):
v = self.__getattr__(f)
if v is None: return ''
return prefix + f + ": " + formatter(v) + "\n"
outstr += append('in_port')
outstr += append('dl_src')
outstr += append('dl_dst')
outstr += append('dl_vlan')
outstr += append('dl_vlan_pcp')
outstr += append('dl_type', safehex)
outstr += append('nw_tos')
outstr += append('nw_proto')
outstr += append('nw_src')
outstr += append('nw_dst')
outstr += append('tp_src')
outstr += append('tp_dst')
return outstr
class ofp_action_generic (ofp_action_base):
_MIN_LENGTH = 8
def __init__ (self, **kw):
self.type = None # Purposely bad
self.data = _PAD4
initHelper(self, kw)
def pack (self):
assert self._assert()
packed = b""
packed += struct.pack("!HH", self.type, len(self))
packed += self.data
return packed
def unpack (self, raw, offset=0):
_offset = offset
offset,(self.type, length) = _unpack("!HH", raw, offset)
offset,self.data = _read(raw, offset, length-4)
assert offset - _offset == len(self)
return offset
def __len__ (self):
return 4 + len(self.data)
def __eq__ (self, other):
if type(self) != type(other): return False
if self.type != other.type: return False
if self.data != other.data: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'type: ' + str(self.type) + '\n'
outstr += prefix + 'len: ' + str(len(self)) + '\n'
return outstr
@openflow_action('OFPAT_OUTPUT', 0)
class ofp_action_output (ofp_action_base):
def __init__ (self, **kw):
self.port = None # Purposely bad -- require specification
self.max_len = 0xffFF
initHelper(self, kw)
def pack (self):
if self.port != OFPP_CONTROLLER:
self.max_len = 0
assert self._assert()
packed = b""
packed += struct.pack("!HHHH", self.type, len(self), self.port,
self.max_len)
return packed
def unpack (self, raw, offset=0):
_offset = offset
offset,(self.type, length, self.port, self.max_len) = \
_unpack("!HHHH", raw, offset)
assert offset - _offset == len(self)
return offset
@staticmethod
def __len__ ():
return 8
def __eq__ (self, other):
if type(self) != type(other): return False
if self.type != other.type: return False
if len(self) != len(other): return False
if self.port != other.port: return False
if self.max_len != other.max_len: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'type: ' + str(self.type) + '\n'
outstr += prefix + 'len: ' + str(len(self)) + '\n'
outstr += prefix + 'port: ' + str(self.port) + '\n'
outstr += prefix + 'max_len: ' + str(self.max_len) + '\n'
return outstr
@openflow_action('OFPAT_ENQUEUE', 11)
class ofp_action_enqueue (ofp_action_base):
def __init__ (self, **kw):
self.port = None # Require user to set
self.queue_id = 0
initHelper(self, kw)
def pack (self):
assert self._assert()
packed = b""
packed += struct.pack("!HHH", self.type, len(self), self.port)
packed += _PAD6 # Pad
packed += struct.pack("!L", self.queue_id)
return packed
def unpack (self, raw, offset=0):
_offset = offset
offset,(self.type, length, self.port) = _unpack("!HHH", raw, offset)
offset = _skip(raw, offset, 6)
offset,(self.queue_id,) = _unpack("!L", raw, offset)
assert offset - _offset == len(self)
return offset
@staticmethod
def __len__ ():
return 16
def __eq__ (self, other):
if type(self) != type(other): return False
if self.type != other.type: return False
if len(self) != len(other): return False
if self.port != other.port: return False
if self.queue_id != other.queue_id: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'type: ' + str(self.type) + '\n'
outstr += prefix + 'len: ' + str(len(self)) + '\n'
outstr += prefix + 'port: ' + str(self.port) + '\n'
outstr += prefix + 'queue_id: ' + str(self.queue_id) + '\n'
return outstr
@openflow_action('OFPAT_STRIP_VLAN', 3)
class ofp_action_strip_vlan (ofp_action_base):
def __init__ (self):
pass
def pack (self):
packed = struct.pack("!HHi", self.type, len(self), 0)
return packed
def unpack (self, raw, offset=0):
_offset = offset
offset,(self.type, length) = _unpack("!HH", raw, offset)
offset = _skip(raw, offset, 4)
assert offset - _offset == len(self)
return offset
@staticmethod
def __len__ ():
return 8
def __eq__ (self, other):
if type(self) != type(other): return False
if self.type != other.type: return False
if len(self) != len(other): return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'type: ' + str(self.type) + '\n'
outstr += prefix + 'len: ' + str(len(self)) + '\n'
return outstr
@openflow_action('OFPAT_SET_VLAN_VID', 1)
class ofp_action_vlan_vid (ofp_action_base):
def __init__ (self, **kw):
self.vlan_vid = 0
initHelper(self, kw)
def pack (self):
assert self._assert()
packed = b""
packed += struct.pack("!HHH", self.type, len(self), self.vlan_vid)
packed += _PAD2 # Pad
return packed
def unpack (self, raw, offset=0):
_offset = offset
offset,(self.type, length, self.vlan_vid) = \
_unpack("!HHH", raw, offset)
offset = _skip(raw, offset, 2)
#TODO: check length for this and other actions
assert offset - _offset == len(self)
return offset
@staticmethod
def __len__ ():
return 8
def __eq__ (self, other):
if type(self) != type(other): return False
if self.type != other.type: return False
if len(self) != len(other): return False
if self.vlan_vid != other.vlan_vid: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'type: ' + str(self.type) + '\n'
outstr += prefix + 'len: ' + str(len(self)) + '\n'
outstr += prefix + 'vlan_vid: ' + str(self.vlan_vid) + '\n'
return outstr
@openflow_action('OFPAT_SET_VLAN_PCP', 2)
class ofp_action_vlan_pcp (ofp_action_base):
def __init__ (self, **kw):
self.vlan_pcp = 0
initHelper(self, kw)
def pack (self):
assert self._assert()
packed = b""
packed += struct.pack("!HHB", self.type, len(self), self.vlan_pcp)
packed += _PAD3 # Pad
return packed
def unpack (self, raw, offset=0):
_offset = offset
offset,(self.type, length, self.vlan_pcp) = \
_unpack("!HHB", raw, offset)
offset = _skip(raw, offset, 3)
assert offset - _offset == len(self)
return offset
@staticmethod
def __len__ ():
return 8
def __eq__ (self, other):
if type(self) != type(other): return False
if self.type != other.type: return False
if len(self) != len(other): return False
if self.vlan_pcp != other.vlan_pcp: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'type: ' + str(self.type) + '\n'
outstr += prefix + 'len: ' + str(len(self)) + '\n'
outstr += prefix + 'vlan_pcp: ' + str(self.vlan_pcp) + '\n'
return outstr
@openflow_action('OFPAT_SET_DL_DST', 5)
@openflow_action('OFPAT_SET_DL_SRC', 4)
class ofp_action_dl_addr (ofp_action_base):
@classmethod
def set_dst (cls, dl_addr = None):
return cls(OFPAT_SET_DL_DST, dl_addr)
@classmethod
def set_src (cls, dl_addr = None):
return cls(OFPAT_SET_DL_SRC, dl_addr)
def __init__ (self, type = None, dl_addr = None):
"""
'type' should be OFPAT_SET_DL_SRC or OFPAT_SET_DL_DST.
"""
self.type = type
self.dl_addr = EMPTY_ETH
if dl_addr is not None:
self.dl_addr = EthAddr(dl_addr)
def _validate (self):
if (not isinstance(self.dl_addr, EthAddr)
and not isinstance(self.dl_addr, bytes)):
return "dl_addr is not string or EthAddr"
if isinstance(self.dl_addr, bytes) and len(self.dl_addr) != 6:
return "dl_addr is not of size 6"
return None
def pack (self):
assert self._assert()
packed = b""
packed += struct.pack("!HH", self.type, len(self))
if isinstance(self.dl_addr, EthAddr):
packed += self.dl_addr.toRaw()
else:
packed += self.dl_addr
packed += _PAD6
return packed
def unpack (self, raw, offset=0):
_offset = offset
offset,(self.type, length) = _unpack("!HH", raw, offset)
offset,self.dl_addr = _readether(raw, offset)
offset = _skip(raw, offset, 6)
assert offset - _offset == len(self)
return offset
@staticmethod
def __len__ ():
return 16
def __eq__ (self, other):
if type(self) != type(other): return False
if self.type != other.type: return False
if len(self) != len(other): return False
if self.dl_addr != other.dl_addr: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'type: ' + str(self.type) + '\n'
outstr += prefix + 'len: ' + str(len(self)) + '\n'
outstr += prefix + 'dl_addr: ' + str(self.dl_addr) + '\n'
return outstr
@openflow_action('OFPAT_SET_NW_DST', 7)
@openflow_action('OFPAT_SET_NW_SRC', 6)
class ofp_action_nw_addr (ofp_action_base):
@classmethod
def set_dst (cls, nw_addr = None):
return cls(OFPAT_SET_NW_DST, nw_addr)
@classmethod
def set_src (cls, nw_addr = None):
return cls(OFPAT_SET_NW_SRC, nw_addr)
def __init__ (self, type = None, nw_addr = None):
"""
'type' should be OFPAT_SET_NW_SRC or OFPAT_SET_NW_DST
"""
self.type = type
if nw_addr is not None:
self.nw_addr = IPAddr(nw_addr)
else:
self.nw_addr = IPAddr(0)
def pack (self):
assert self._assert()
packed = b""
packed += struct.pack("!HHl", self.type, len(self),
self.nw_addr.toSigned())
return packed
def unpack (self, raw, offset=0):
_offset = offset
offset,(self.type, length) = _unpack("!HH", raw, offset)
offset,self.nw_addr = _readip(raw, offset)
assert offset - _offset == len(self)
return offset
@staticmethod
def __len__ ():
return 8
def __eq__ (self, other):
if type(self) != type(other): return False
if self.type != other.type: return False
if len(self) != len(other): return False
if self.nw_addr != other.nw_addr: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'type: ' + str(self.type) + '\n'
outstr += prefix + 'len: ' + str(len(self)) + '\n'
outstr += prefix + 'nw_addr: ' + str(self.nw_addr) + '\n'
return outstr
@openflow_action('OFPAT_SET_NW_TOS', 8)
class ofp_action_nw_tos (ofp_action_base):
def __init__ (self, nw_tos = 0):
self.nw_tos = nw_tos
def pack (self):
assert self._assert()
packed = b""
packed += struct.pack("!HHB", self.type, len(self), self.nw_tos)
packed += _PAD3
return packed
def unpack (self, raw, offset=0):
_offset = offset
offset,(self.type, length, self.nw_tos) = _unpack("!HHB", raw, offset)
offset = _skip(raw, offset, 3)
assert offset - _offset == len(self)
return offset
@staticmethod
def __len__ ():
return 8
def __eq__ (self, other):
if type(self) != type(other): return False
if self.type != other.type: return False
if len(self) != len(other): return False
if self.nw_tos != other.nw_tos: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'type: ' + str(self.type) + '\n'
outstr += prefix + 'len: ' + str(len(self)) + '\n'
outstr += prefix + 'nw_tos: ' + str(self.nw_tos) + '\n'
return outstr
@openflow_action('OFPAT_SET_TP_DST', 10)
@openflow_action('OFPAT_SET_TP_SRC', 9)
class ofp_action_tp_port (ofp_action_base):
@classmethod
def set_dst (cls, tp_port = None):
return cls(OFPAT_SET_TP_DST, tp_port)
@classmethod
def set_src (cls, tp_port = None):
return cls(OFPAT_SET_TP_SRC, tp_port)
def __init__ (self, type=None, tp_port = 0):
"""
'type' is OFPAT_SET_TP_SRC/DST
"""
self.type = type
self.tp_port = tp_port
def pack (self):
assert self._assert()
packed = b""
packed += struct.pack("!HHH", self.type, len(self), self.tp_port)
packed += _PAD2
return packed
def unpack (self, raw, offset=0):
_offset = offset
offset,(self.type, length, self.tp_port) = \
_unpack("!HHH", raw, offset)
offset = _skip(raw, offset, 2)
assert offset - _offset == len(self)
return offset
@staticmethod
def __len__ ():
return 8
def __eq__ (self, other):
if type(self) != type(other): return False
if self.type != other.type: return False
if len(self) != len(other): return False
if self.tp_port != other.tp_port: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'type: ' + str(self.type) + '\n'
outstr += prefix + 'len: ' + str(len(self)) + '\n'
outstr += prefix + 'tp_port: ' + str(self.tp_port) + '\n'
return outstr
class ofp_action_vendor_base (ofp_action_base):
"""
Base class for vendor actions
"""
type = 65535 # OFPAT_VENDOR
def _eq (self, other):
"""
Return True if equal
Overide this.
"""
return True
def _init (self, kw):
"""
Initialize fields
Overide this.
"""
pass
def _pack_body (self):
"""
Pack body.
"""
return b""
def _unpack_body (self, raw, offset, avail):
"""
Unpack body in raw starting at offset.
Return new offset
"""
return offset
def _body_length (self):
"""
Return length of body.
Optionally override this.
"""
return len(self._pack_body())
def _show (self, prefix):
"""
Format additional fields as text
"""
return ""
def __init__ (self, **kw):
self._init(kw)
assert hasattr(self, 'vendor')
#self.vendor = 0
initHelper(self, kw)
def _pack_body (self):
if hasattr(self.body, 'pack'):
return self.body.pack()
else:
return bytes(self.body)
def pack (self):
assert self._assert()
body = self._pack_body()
packed = b""
packed += struct.pack("!HHL", self.type, 8 + len(body), self.vendor)
packed += body
return packed
def unpack (self, raw, offset=0):
_offset = offset
offset,(self.type, length, self.vendor) = _unpack("!HHL", raw, offset)
offset = self._unpack_body(raw, offset, length - 8)
assert offset - _offset == len(self)
return offset
def __len__ (self):
return 8 + self._body_length()
def __eq__ (self, other):
if type(self) != type(other): return False
if self.type != other.type: return False
if len(self) != len(other): return False
if self.vendor != other.vendor: return False
return self._eq(other)
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'type: ' + str(self.type) + '\n'
outstr += prefix + 'len: ' + str(len(self)) + '\n'
outstr += prefix + 'vendor: ' + str(self.vendor) + '\n'
outstr += self._show(prefix)
return outstr
@openflow_action('OFPAT_VENDOR', 65535)
class ofp_action_vendor_generic (ofp_action_base):
def __init__ (self, **kw):
self.vendor = 0
self.body = b""
initHelper(self, kw)
def _pack_body (self):
if hasattr(self.body, 'pack'):
return self.body.pack()
else:
return bytes(self.body)
def pack (self):
assert self._assert()
body = self._pack_body()
packed = b""
packed += struct.pack("!HHL", self.type, 8 + len(body), self.vendor)
packed += body
return packed
def unpack (self, raw, offset=0):
_offset = offset
offset,(self.type, length, self.vendor) = _unpack("!HHL", raw, offset)
offset,self.body = _read(raw, offset, length - 8)
assert offset - _offset == len(self)
return offset
def __len__ (self):
return 8 + len(self._pack_body())
def __eq__ (self, other):
if type(self) != type(other): return False
if self.type != other.type: return False
if len(self) != len(other): return False
if self.vendor != other.vendor: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'type: ' + str(self.type) + '\n'
outstr += prefix + 'len: ' + str(len(self)) + '\n'
outstr += prefix + 'vendor: ' + str(self.vendor) + '\n'
return outstr
#3. Controller-to-Switch Messages
##3.1 Handshake
@openflow_s_message("OFPT_FEATURES_REPLY", 6,
reply_to="ofp_features_request")
class ofp_features_reply (ofp_header):
_MIN_LENGTH = 32
def __init__ (self, **kw):
ofp_header.__init__(self)
self.datapath_id = 0
self.n_buffers = 0
self.n_tables = 0
self.capabilities = 0
self.actions = 0
self.ports = []
initHelper(self, kw)
def pack (self):
assert self._assert()
packed = b""
packed += ofp_header.pack(self)
packed += struct.pack("!QLB", self.datapath_id, self.n_buffers,
self.n_tables)
packed += _PAD3
packed += struct.pack("!LL", self.capabilities, self.actions)
for i in self.ports:
packed += i.pack()
return packed
def unpack (self, raw, offset=0):
offset,length = self._unpack_header(raw, offset)
offset,(self.datapath_id, self.n_buffers, self.n_tables) = \
_unpack("!QLB", raw, offset)
offset = _skip(raw, offset, 3)
offset,(self.capabilities, self.actions) = _unpack("!LL", raw, offset)
portCount = (length - 32) / len(ofp_phy_port)
self.ports = []
for i in xrange(0, portCount):
p = ofp_phy_port()
offset = p.unpack(raw, offset)
self.ports.append(p)
assert length == len(self)
return offset,length
def __len__ (self):
return 32 + len(self.ports) * len(ofp_phy_port)
def __eq__ (self, other):
if type(self) != type(other): return False
if not ofp_header.__eq__(self, other): return False
if self.datapath_id != other.datapath_id: return False
if self.n_buffers != other.n_buffers: return False
if self.n_tables != other.n_tables: return False
if self.capabilities != other.capabilities: return False
if self.actions != other.actions: return False
if self.ports != other.ports: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'header: \n'
outstr += ofp_header.show(self, prefix + ' ')
outstr += prefix + 'datapath_id: ' + str(self.datapath_id) + '\n'
outstr += prefix + 'n_buffers: ' + str(self.n_buffers) + '\n'
outstr += prefix + 'n_tables: ' + str(self.n_tables) + '\n'
outstr += prefix + 'capabilities: ' + str(self.capabilities) + '\n'
outstr += prefix + 'actions: ' + str(self.actions) + '\n'
outstr += prefix + 'ports: \n'
for obj in self.ports:
outstr += obj.show(prefix + ' ')
return outstr
ofp_switch_features = ofp_features_reply
##3.2 Switch Configuration
@openflow_c_message("OFPT_SET_CONFIG", 9)
class ofp_set_config (ofp_header): # uses ofp_switch_config
def __init__ (self, **kw):
ofp_header.__init__(self)
self.flags = 0
self.miss_send_len = OFP_DEFAULT_MISS_SEND_LEN
initHelper(self, kw)
def pack (self):
assert self._assert()
packed = b""
packed += ofp_header.pack(self)
packed += struct.pack("!HH", self.flags, self.miss_send_len)
return packed
def unpack (self, raw, offset=0):
offset,length = self._unpack_header(raw, offset)
offset,(self.flags, self.miss_send_len) = _unpack("!HH", raw, offset)
assert length == len(self)
return offset,length
@staticmethod
def __len__ ():
return 12
def __eq__ (self, other):
if type(self) != type(other): return False
if not ofp_header.__eq__(self, other): return False
if self.flags != other.flags: return False
if self.miss_send_len != other.miss_send_len: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'header: \n'
outstr += ofp_header.show(self, prefix + ' ')
outstr += prefix + 'flags: ' + str(self.flags) + '\n'
outstr += prefix + 'miss_send_len: ' + str(self.miss_send_len) + '\n'
return outstr
##3.3 Modify State Messages
@openflow_c_message("OFPT_FLOW_MOD", 14)
class ofp_flow_mod (ofp_header):
_MIN_LENGTH = 72
def __init__ (self, **kw):
ofp_header.__init__(self)
if 'match' in kw:
self.match = None
else:
self.match = ofp_match()
self.cookie = 0
self.command = OFPFC_ADD
self.idle_timeout = 0
self.hard_timeout = 0
self.priority = OFP_DEFAULT_PRIORITY
self._buffer_id = NO_BUFFER
self.out_port = OFPP_NONE
self.flags = 0
self.actions = []
self.data = None # Not in the spec! Special magic! Can be packet_in.
# ofp_flow_mod/ofp_packet_out do some special handling of 'actions'...
# Allow "action" as a synonym for "actions"
if 'action' in kw and 'actions' not in kw:
kw['actions'] = kw['action']
del kw['action']
initHelper(self, kw)
# Allow use of actions=<a single action> for kw args.
if not hasattr(self.actions, '__getitem__'):
self.actions = [self.actions]
@property
def buffer_id (self):
if self._buffer_id == NO_BUFFER: return None
return self._buffer_id
@buffer_id.setter
def buffer_id (self, val):
if val is None: val = NO_BUFFER
self._buffer_id = val
def _validate (self):
if not isinstance(self.match, ofp_match):
return "match is not class ofp_match"
return None
def pack (self):
"""
Packs this object into its wire format.
May normalize fields.
NOTE: If "data" has been specified, this method may actually return
*more than just a single ofp_flow_mod* in packed form.
Specifically, it may also have a barrier and an ofp_packet_out.
"""
po = None
if self.data:
#TODO: It'd be nice to log and then ignore if not data_is_complete.
# Unfortunately, we currently have no logging in here, so we
# assert instead which is a either too drastic or too quiet.
assert self.data.is_complete
assert self.buffer_id is None
self.buffer_id = self.data.buffer_id
if self.buffer_id is None:
po = ofp_packet_out(data=self.data)
po.in_port = self.data.in_port
po.actions.append(ofp_action_output(port = OFPP_TABLE))
# Should maybe check that packet hits the new entry...
# Or just duplicate the actions? (I think that's the best idea)
assert self._assert()
packed = b""
packed += ofp_header.pack(self)
packed += self.match.pack(flow_mod=True)
packed += struct.pack("!QHHHHLHH", self.cookie, self.command,
self.idle_timeout, self.hard_timeout,
self.priority, self._buffer_id, self.out_port,
self.flags)
for i in self.actions:
packed += i.pack()
if po:
packed += ofp_barrier_request().pack()
packed += po.pack()
return packed
def unpack (self, raw, offset=0):
offset,length = self._unpack_header(raw, offset)
offset = self.match.unpack(raw, offset, flow_mod=True)
offset,(self.cookie, self.command, self.idle_timeout,
self.hard_timeout, self.priority, self._buffer_id,
self.out_port, self.flags) = \
_unpack("!QHHHHLHH", raw, offset)
offset,self.actions = _unpack_actions(raw,
length-(32 + len(self.match)), offset)
assert length == len(self)
return offset,length
def __len__ (self):
l = 32 + len(self.match)
for i in self.actions:
l += len(i)
return l
def __eq__ (self, other):
if type(self) != type(other): return False
if not ofp_header.__eq__(self, other): return False
if self.match != other.match: return False
if self.cookie != other.cookie: return False
if self.command != other.command: return False
if self.idle_timeout != other.idle_timeout: return False
if self.hard_timeout != other.hard_timeout: return False
if self.priority != other.priority: return False
if self.buffer_id != other.buffer_id: return False
if self.out_port != other.out_port: return False
if self.flags != other.flags: return False
if self.actions != other.actions: return False
if self.data != other.data: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'header: \n'
outstr += ofp_header.show(self, prefix + ' ')
outstr += prefix + 'match: \n'
outstr += self.match.show(prefix + ' ')
outstr += prefix + 'cookie: ' + str(self.cookie) + '\n'
outstr += prefix + 'command: ' + str(self.command) + '\n'
outstr += prefix + 'idle_timeout: ' + str(self.idle_timeout) + '\n'
outstr += prefix + 'hard_timeout: ' + str(self.hard_timeout) + '\n'
outstr += prefix + 'priority: ' + str(self.priority) + '\n'
outstr += prefix + 'buffer_id: ' + str(self.buffer_id) + '\n'
outstr += prefix + 'out_port: ' + str(self.out_port) + '\n'
outstr += prefix + 'flags: ' + str(self.flags) + '\n'
outstr += prefix + 'actions: \n'
for obj in self.actions:
outstr += obj.show(prefix + ' ')
return outstr
@openflow_c_message("OFPT_PORT_MOD", 15)
class ofp_port_mod (ofp_header):
def __init__ (self, **kw):
ofp_header.__init__(self)
self.port_no = 0
self.hw_addr = EMPTY_ETH
self.config = 0
self.mask = 0
self.advertise = 0
initHelper(self, kw)
def _validate (self):
if (not isinstance(self.hw_addr, bytes)
and not isinstance(self.hw_addr, EthAddr)):
return "hw_addr is not bytes or EthAddr"
if len(self.hw_addr) != 6:
return "hw_addr is not of size 6"
return None
def pack (self):
assert self._assert()
packed = b""
packed += ofp_header.pack(self)
packed += struct.pack("!H", self.port_no)
if isinstance(self.hw_addr, bytes):
packed += self.hw_addr
else:
packed += self.hw_addr.toRaw()
packed += struct.pack("!LLL", self.config, self.mask, self.advertise)
packed += _PAD4
return packed
def unpack (self, raw, offset=0):
offset,length = self._unpack_header(raw, offset)
offset,(self.port_no,) = _unpack("!H", raw, offset)
offset,self.hw_addr = _readether(raw, offset)
offset,(self.config, self.mask, self.advertise) = \
_unpack("!LLL", raw, offset)
offset = _skip(raw, offset, 4)
assert length == len(self)
return offset,length
@staticmethod
def __len__ ():
return 32
def __eq__ (self, other):
if type(self) != type(other): return False
if not ofp_header.__eq__(self, other): return False
if self.port_no != other.port_no: return False
if self.hw_addr != other.hw_addr: return False
if self.config != other.config: return False
if self.mask != other.mask: return False
if self.advertise != other.advertise: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'header: \n'
outstr += ofp_header.show(self, prefix + ' ')
outstr += prefix + 'port_no: ' + str(self.port_no) + '\n'
outstr += prefix + 'hw_addr: ' + str(EthAddr(self.hw_addr)) + '\n'
outstr += prefix + 'config: ' + str(self.config) + '\n'
outstr += prefix + 'mask: ' + str(self.mask) + '\n'
outstr += prefix + 'advertise: ' + str(self.advertise) + '\n'
return outstr
##3.4 Queue Configuration Messages
@openflow_c_message("OFPT_QUEUE_GET_CONFIG_REQUEST", 20)
class ofp_queue_get_config_request (ofp_header):
def __init__ (self, **kw):
ofp_header.__init__(self)
self.port = 0
initHelper(self, kw)
def pack (self):
assert self._assert()
packed = b""
packed += ofp_header.pack(self)
packed += struct.pack("!H", self.port)
packed += _PAD2
return packed
def unpack (self, raw, offset=0):
offset,length = self._unpack_header(raw, offset)
offset,(self.port,) = _unpack("!H", raw, offset)
offset = _skip(raw, offset, 2)
assert length == len(self)
return offset,length
@staticmethod
def __len__ ():
return 12
def __eq__ (self, other):
if type(self) != type(other): return False
if not ofp_header.__eq__(self, other): return False
if self.port != other.port: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'header: \n'
outstr += ofp_header.show(self, prefix + ' ')
outstr += prefix + 'port: ' + str(self.port) + '\n'
return outstr
@openflow_s_message("OFPT_QUEUE_GET_CONFIG_REPLY", 21)
class ofp_queue_get_config_reply (ofp_header):
_MIN_LENGTH = 16
def __init__ (self, **kw):
ofp_header.__init__(self)
self.port = 0
self.queues = []
initHelper(self, kw)
def pack (self):
assert self._assert()
packed = b""
packed += ofp_header.pack(self)
packed += struct.pack("!H", self.port)
packed += _PAD6
for i in self.queues:
packed += i.pack()
return packed
def unpack (self, raw, offset=0):
offset,length = self._unpack_header(raw, offset)
offset,(self.port,) = _unpack("!H", raw, offset)
offset = _skip(raw, offset, 6)
remaining = length - 6 - 2 - len(ofp_header)
del self.queues[:]
# Not tested; probably buggy
while remaining > 0:
q = ofp_packet_queue()
_offset = q.unpack(raw, offset)
l = _offset - offset
offset = _offset
if l < 1: raise RuntimeError("Can't parse")
remaining -= l
self.queues.append(q)
assert length == len(self)
return offset,length
def __len__ (self):
l = 16
for i in self.queues:
l += len(i)
return l
def __eq__ (self, other):
if type(self) != type(other): return False
if not ofp_header.__eq__(self, other): return False
if self.port != other.port: return False
if self.queues != other.queues: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'header: \n'
outstr += ofp_header.show(self, prefix + ' ')
outstr += prefix + 'port: ' + str(self.port) + '\n'
outstr += prefix + 'queues: \n'
for obj in self.queues:
outstr += obj.show(prefix + ' ')
return outstr
@openflow_c_message("OFPT_STATS_REQUEST", 16)
class ofp_stats_request (ofp_header):
_MIN_LENGTH = 12
def __init__ (self, **kw):
ofp_header.__init__(self)
self.type = None # Try to guess
self.flags = 0
self._body = b''
self._body_packed = None # Cache
initHelper(self, kw)
def pack (self):
if self.type is None:
if isinstance(self.body, ofp_stats_body_base):
self.type = self.body._type
else:
raise RuntimeError("Can't determine body type; specify it "
+ "explicitly")
assert self._assert()
packed = b""
packed += ofp_header.pack(self)
packed += struct.pack("!HH", self.type, self.flags)
packed += self._pack_body()
return packed
def _pack_body (self):
if self._body_packed is None:
if hasattr(self.body, 'pack'):
self._body_packed = self._body.pack()
else:
self._body_packed = self._body
return self._body_packed
@property
def body (self):
return self._body
@body.setter
def body (self, data):
self._body = data
self._body_packed_cache = None
def unpack (self, raw, offset=0):
offset,length = self._unpack_header(raw, offset)
offset,(self.type, self.flags) = _unpack("!HH", raw, offset)
offset,body = _read(raw, offset, length - 12)
si = _stats_type_to_class_info.get(self.type)
if si is None:
self.body = ofp_generic_stats_body()
self.body.unpack(body, 0, len(body))
else:
if si.request is None:
raise RuntimeError("No request for " + str(si))
self.body = si.request()
self.body.unpack(body, 0, len(body))
#TODO: assert entire body is unpacked
assert length == len(self)
return offset,length
def __len__ (self):
return 12 + len(self._pack_body())
def __eq__ (self, other):
if type(self) != type(other): return False
if not ofp_header.__eq__(self, other): return False
if self.type != other.type: return False
if self.flags != other.flags: return False
if self._pack_body() != other._pack_body(): return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'header: \n'
outstr += ofp_header.show(self, prefix + ' ')
outstr += prefix + 'type: ' + str(self.type) + '\n'
outstr += prefix + 'flags: ' + str(self.flags) + '\n'
outstr += prefix + 'body:\n'
outstr += _format_body(self.body, prefix + ' ') + '\n'
return outstr
@openflow_s_message("OFPT_STATS_REPLY", 17,
reply_to="ofp_stats_request")
class ofp_stats_reply (ofp_header):
_MIN_LENGTH = 12
def __init__ (self, **kw):
ofp_header.__init__(self)
self.type = None # Guess
self.flags = 0
self.body = b''
self._body_data = (None, None)
initHelper(self, kw)
@property
def is_last_reply (self):
return (self.flags & 1) == 0
@is_last_reply.setter
def is_last_reply (self, value):
self.flags = self.flags & 0xfffe
if not value:
self.flags |= 1
@property
def body_data (self):
if self._body_data[0] is not self.body:
def _pack(b):
return b.pack() if hasattr(b, 'pack') else b
data = b''
if isinstance(self.body, collections.Iterable):
for b in self.body:
data += _pack(b)
else:
data = _pack(self.body)
self._body_data = (self.body, data)
return self._body_data[1]
def pack (self):
if self.type is None:
if isinstance(self.body, ofp_stats_body_base):
self.type = self.body._type
else:
raise RuntimeError("Can't determine body type; specify it "
+ "explicitly")
assert self._assert()
packed = b""
packed += ofp_header.pack(self)
packed += struct.pack("!HH", self.type, self.flags)
packed += self.body_data
return packed
def unpack (self, raw, offset=0):
offset,length = self._unpack_header(raw, offset)
offset,(self.type, self.flags) = _unpack("!HH", raw, offset)
offset,packed = _read(raw, offset, length - 12)
t = _stats_type_to_class_info.get(self.type)
if t is None:
#FIXME: Put in a generic container?
self.body = packed
else:
if t.reply is None:
#FIXME: Put in a generic container?
self.body = packed
else:
if not t.reply_is_list:
self.body = t.reply()
self.body.unpack(packed, 0, len(packed))
else:
prev_len = len(packed)
self.body = []
while len(packed):
part = t.reply()
off = part.unpack(packed, 0, len(packed))
packed = packed[off:]
assert len(packed) != prev_len
prev_len = len(packed)
self.body.append(part)
assert length == len(self)
return offset,length
def __len__ (self):
if isinstance(self.body, list):
return 12 + sum(len(part) for part in self.body)
return 12 + len(self.body)
def __eq__ (self, other):
if type(self) != type(other): return False
if not ofp_header.__eq__(self, other): return False
if self.type != other.type: return False
if self.flags != other.flags: return False
if self.body != other.body: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'header: \n'
outstr += ofp_header.show(self, prefix + ' ')
outstr += prefix + 'type: ' + str(self.type) + '\n'
outstr += prefix + 'flags: ' + str(self.flags) + '\n'
outstr += prefix + 'body:\n'
outstr += _format_body(self.body, prefix + ' ') + '\n'
return outstr
@openflow_stats_reply("OFPST_DESC", 0)
class ofp_desc_stats (ofp_stats_body_base):
def __init__ (self, **kw):
self.mfr_desc= ""
self.hw_desc= ""
self.sw_desc= ""
self.serial_num= ""
self.dp_desc= ""
initHelper(self, kw)
def _validate (self):
if not isinstance(self.mfr_desc, str):
return "mfr_desc is not string"
if len(self.mfr_desc) > DESC_STR_LEN:
return "mfr_desc is not of size 256"
if not isinstance(self.hw_desc, str):
return "hw_desc is not string"
if len(self.hw_desc) > DESC_STR_LEN:
return "hw_desc is not of size 256"
if not isinstance(self.sw_desc, str):
return "sw_desc is not string"
if len(self.sw_desc) > DESC_STR_LEN:
return "sw_desc is not of size 256"
if not isinstance(self.serial_num, str):
return "serial_num is not string"
if len(self.serial_num) > SERIAL_NUM_LEN:
return "serial_num is not of size 32"
if not isinstance(self.dp_desc, str):
return "dp_desc is not string"
if len(self.dp_desc) > DESC_STR_LEN:
return "dp_desc is not of size 256"
return None
def pack (self):
assert self._assert()
packed = b""
packed += self.mfr_desc.ljust(DESC_STR_LEN,'\0')
packed += self.hw_desc.ljust(DESC_STR_LEN,'\0')
packed += self.sw_desc.ljust(DESC_STR_LEN,'\0')
packed += self.serial_num.ljust(SERIAL_NUM_LEN,'\0')
packed += self.dp_desc.ljust(DESC_STR_LEN,'\0')
return packed
def unpack (self, raw, offset, avail):
_offset = offset
offset,self.mfr_desc = _readzs(raw, offset, DESC_STR_LEN)
offset,self.hw_desc = _readzs(raw, offset, DESC_STR_LEN)
offset,self.sw_desc = _readzs(raw, offset, DESC_STR_LEN)
offset,self.serial_num = _readzs(raw, offset, SERIAL_NUM_LEN)
offset,self.dp_desc = _readzs(raw, offset, DESC_STR_LEN)
assert offset - _offset == len(self)
return offset
@staticmethod
def __len__ ():
return 1056
def __eq__ (self, other):
if type(self) != type(other): return False
if self.mfr_desc != other.mfr_desc: return False
if self.hw_desc != other.hw_desc: return False
if self.sw_desc != other.sw_desc: return False
if self.serial_num != other.serial_num: return False
if self.dp_desc != other.dp_desc: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'mfr_desc: ' + str(self.mfr_desc) + '\n'
outstr += prefix + 'hw_desc: ' + str(self.hw_desc) + '\n'
outstr += prefix + 'sw_desc: ' + str(self.sw_desc) + '\n'
outstr += prefix + 'serial_num: ' + str(self.serial_num) + '\n'
outstr += prefix + 'dp_desc: ' + str(self.dp_desc) + '\n'
return outstr
ofp_desc_stats_reply = ofp_desc_stats
# This next one is weird. It only exists so that the type-guessing
# will work for requests. I don't think it's really needed, though.
@openflow_stats_request('OFPST_DESC', 0)
class ofp_desc_stats_request (ofp_stats_body_base):
def __init__ (self, **kw):
pass
def pack (self):
return b""
def unpack (self, raw, offset, avail):
if avail != 0:
raise RuntimeError("Expected empty body")
return offset
@staticmethod
def __len__ ():
return 0
def __eq__ (self, other):
if type(self) != type(other): return False
return True
def show (self, prefix=''):
return "<empty>"
@openflow_stats_request('OFPST_FLOW', 1)
class ofp_flow_stats_request (ofp_stats_body_base):
def __init__ (self, **kw):
self.match = ofp_match()
self.table_id = TABLE_ALL
self.out_port = OFPP_NONE
initHelper(self, kw)
def _validate (self):
if not isinstance(self.match, ofp_match):
return "match is not class ofp_match"
return None
def pack (self):
assert self._assert()
packed = b""
packed += self.match.pack()
packed += struct.pack("!BBH", self.table_id, 0, self.out_port)
return packed
def unpack (self, raw, offset, avail):
_offset = offset
offset = self.match.unpack(raw, offset)
offset,(self.table_id, pad, self.out_port) = \
_unpack("!BBH", raw, offset)
assert pad == 0
assert offset - _offset == len(self)
return offset
@staticmethod
def __len__ ():
return 4 + len(ofp_match)
def __eq__ (self, other):
if type(self) != type(other): return False
if self.match != other.match: return False
if self.table_id != other.table_id: return False
if self.out_port != other.out_port: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'match: \n'
outstr += self.match.show(prefix + ' ')
outstr += prefix + 'table_id: ' + str(self.table_id) + '\n'
outstr += prefix + 'out_port: ' + str(self.out_port) + '\n'
return outstr
@openflow_stats_reply('OFPST_FLOW', is_list = True)
class ofp_flow_stats (ofp_stats_body_base):
_MIN_LENGTH = 88
def __init__ (self, **kw):
self.table_id = 0
self.match = ofp_match()
self.duration_sec = 0
self.duration_nsec = 0
self.priority = OFP_DEFAULT_PRIORITY
self.idle_timeout = 0
self.hard_timeout = 0
self.cookie = 0
self.packet_count = 0
self.byte_count = 0
self.actions = []
initHelper(self, kw)
def _validate (self):
if not isinstance(self.match, ofp_match):
return "match is not class ofp_match"
return None
def pack (self):
assert self._assert()
packed = b""
packed += struct.pack("!HBB", len(self), self.table_id, 0)
packed += self.match.pack()
packed += struct.pack("!LLHHH", self.duration_sec,
self.duration_nsec, self.priority,
self.idle_timeout, self.hard_timeout)
packed += _PAD6 # Pad
packed += struct.pack("!QQQ", self.cookie, self.packet_count,
self.byte_count)
for i in self.actions:
packed += i.pack()
return packed
def unpack (self, raw, offset, avail):
_offset = offset
offset,(length, self.table_id, pad) = _unpack("!HBB", raw, offset)
assert pad == 0
offset = self.match.unpack(raw, offset)
offset,(self.duration_sec, self.duration_nsec, self.priority,
self.idle_timeout, self.hard_timeout) = \
_unpack("!LLHHH", raw, offset)
offset = _skip(raw, offset, 6)
offset,(self.cookie, self.packet_count, self.byte_count) = \
_unpack("!QQQ", raw, offset)
assert (offset - _offset) == 48 + len(self.match)
offset,self.actions = _unpack_actions(raw,
length - (48 + len(self.match)), offset)
assert offset - _offset == len(self)
return offset
def __len__ (self):
l = 48 + len(self.match)
for i in self.actions:
l += len(i)
return l
def __eq__ (self, other):
if type(self) != type(other): return False
if len(self) != len(other): return False
if self.table_id != other.table_id: return False
if self.match != other.match: return False
if self.duration_sec != other.duration_sec: return False
if self.duration_nsec != other.duration_nsec: return False
if self.priority != other.priority: return False
if self.idle_timeout != other.idle_timeout: return False
if self.hard_timeout != other.hard_timeout: return False
if self.cookie != other.cookie: return False
if self.packet_count != other.packet_count: return False
if self.byte_count != other.byte_count: return False
if self.actions != other.actions: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'length: ' + str(len(self)) + '\n'
outstr += prefix + 'table_id: ' + str(self.table_id) + '\n'
outstr += prefix + 'match: \n'
outstr += self.match.show(prefix + ' ')
outstr += prefix + 'duration_sec: ' + str(self.duration_sec) + '\n'
outstr += prefix + 'duration_nsec: ' + str(self.duration_nsec) + '\n'
outstr += prefix + 'priority: ' + str(self.priority) + '\n'
outstr += prefix + 'idle_timeout: ' + str(self.idle_timeout) + '\n'
outstr += prefix + 'hard_timeout: ' + str(self.hard_timeout) + '\n'
outstr += prefix + 'cookie: ' + str(self.cookie) + '\n'
outstr += prefix + 'packet_count: ' + str(self.packet_count) + '\n'
outstr += prefix + 'byte_count: ' + str(self.byte_count) + '\n'
outstr += prefix + 'actions: \n'
for obj in self.actions:
outstr += obj.show(prefix + ' ')
return outstr
ofp_flow_stats_reply = ofp_flow_stats
@openflow_stats_request('OFPST_AGGREGATE', 2)
class ofp_aggregate_stats_request (ofp_stats_body_base):
def __init__ (self, **kw):
self.match = ofp_match()
self.table_id = TABLE_ALL
self.out_port = OFPP_NONE
initHelper(self, kw)
def _validate (self):
if not isinstance(self.match, ofp_match):
return "match is not class ofp_match"
return None
def pack (self):
assert self._assert()
packed = b""
packed += self.match.pack()
packed += struct.pack("!BBH", self.table_id, 0, self.out_port)
return packed
def unpack (self, raw, offset, avail):
_offset = offset
offset = self.match.unpack(raw, offset)
offset,(self.table_id, pad, self.out_port) = \
_unpack("!BBH", raw, offset)
assert pad == 0
assert offset - _offset == len(self)
return offset
@staticmethod
def __len__ ():
return 44
def __eq__ (self, other):
if type(self) != type(other): return False
if self.match != other.match: return False
if self.table_id != other.table_id: return False
if self.out_port != other.out_port: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'match: \n'
outstr += self.match.show(prefix + ' ')
outstr += prefix + 'table_id: ' + str(self.table_id) + '\n'
outstr += prefix + 'out_port: ' + str(self.out_port) + '\n'
return outstr
@openflow_stats_reply('OFPST_AGGREGATE')
class ofp_aggregate_stats (ofp_stats_body_base):
def __init__ (self, **kw):
self.packet_count = 0
self.byte_count = 0
self.flow_count = 0
initHelper(self, kw)
def pack (self):
assert self._assert()
packed = b""
packed += struct.pack("!QQL", self.packet_count, self.byte_count,
self.flow_count)
packed += _PAD4 # Pad
return packed
def unpack (self, raw, offset, avail):
_offset = offset
offset,(self.packet_count, self.byte_count, self.flow_count) = \
_unpack("!QQL", raw, offset)
offset = _skip(raw, offset, 4)
assert offset - _offset == len(self)
return offset
@staticmethod
def __len__ ():
return 24
def __eq__ (self, other):
if type(self) != type(other): return False
if self.packet_count != other.packet_count: return False
if self.byte_count != other.byte_count: return False
if self.flow_count != other.flow_count: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'packet_count: ' + str(self.packet_count) + '\n'
outstr += prefix + 'byte_count: ' + str(self.byte_count) + '\n'
outstr += prefix + 'flow_count: ' + str(self.flow_count) + '\n'
return outstr
ofp_aggregate_stats_reply = ofp_aggregate_stats
@openflow_stats_reply('OFPST_TABLE', 3, is_list = True)
class ofp_table_stats (ofp_stats_body_base):
def __init__ (self, **kw):
self.table_id = 0
self.name = ""
self.wildcards = 0
self.max_entries = 0
self.active_count = 0
self.lookup_count = 0
self.matched_count = 0
initHelper(self, kw)
def _validate (self):
if not isinstance(self.name, str):
return "name is not string"
if len(self.name) > OFP_MAX_TABLE_NAME_LEN:
return "name is too long"
return None
def pack (self):
assert self._assert()
packed = b""
packed += struct.pack("!B", self.table_id)
packed += _PAD3
packed += self.name.ljust(OFP_MAX_TABLE_NAME_LEN,'\0')
packed += struct.pack("!LLLQQ", self.wildcards, self.max_entries,
self.active_count, self.lookup_count,
self.matched_count)
return packed
def unpack (self, raw, offset, avail):
_offset = offset
offset,(self.table_id,) = _unpack("!B", raw, offset)
offset = _skip(raw, offset, 3)
offset,self.name = _readzs(raw, offset, OFP_MAX_TABLE_NAME_LEN)
offset,(self.wildcards, self.max_entries, self.active_count,
self.lookup_count, self.matched_count) = \
_unpack("!LLLQQ", raw, offset)
assert offset - _offset == len(self)
return offset
@staticmethod
def __len__ ():
return 64
def __eq__ (self, other):
if type(self) != type(other): return False
if self.table_id != other.table_id: return False
if self.name != other.name: return False
if self.wildcards != other.wildcards: return False
if self.max_entries != other.max_entries: return False
if self.active_count != other.active_count: return False
if self.lookup_count != other.lookup_count: return False
if self.matched_count != other.matched_count: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'table_id: ' + str(self.table_id) + '\n'
outstr += prefix + 'name: ' + str(self.name) + '\n'
outstr += prefix + 'wildcards: ' + str(self.wildcards) + '\n'
outstr += prefix + 'max_entries: ' + str(self.max_entries) + '\n'
outstr += prefix + 'active_count: ' + str(self.active_count) + '\n'
outstr += prefix + 'lookup_count: ' + str(self.lookup_count) + '\n'
outstr += prefix + 'matched_count: ' + str(self.matched_count) + '\n'
return outstr
ofp_table_stats_reply = ofp_table_stats
@openflow_stats_request("OFPST_PORT", 4)
class ofp_port_stats_request (ofp_stats_body_base):
def __init__ (self, **kw):
self.port_no = OFPP_NONE
initHelper(self, kw)
def pack (self):
assert self._assert()
packed = b""
packed += struct.pack("!H", self.port_no)
packed += _PAD6
return packed
def unpack (self, raw, offset, avail):
_offset = offset
offset,(self.port_no,) = _unpack("!H", raw, offset)
offset = _skip(raw, offset, 6)
assert offset - _offset == len(self)
return offset
@staticmethod
def __len__ ():
return 8
def __eq__ (self, other):
if type(self) != type(other): return False
if self.port_no != other.port_no: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'port_no: ' + str(self.port_no) + '\n'
return outstr
@openflow_stats_reply("OFPST_PORT", is_list = True)
class ofp_port_stats (ofp_stats_body_base):
def __init__ (self, **kw):
self.port_no = OFPP_NONE
self.rx_packets = 0
self.tx_packets = 0
self.rx_bytes = 0
self.tx_bytes = 0
self.rx_dropped = 0
self.tx_dropped = 0
self.rx_errors = 0
self.tx_errors = 0
self.rx_frame_err = 0
self.rx_over_err = 0
self.rx_crc_err = 0
self.collisions = 0
initHelper(self, kw)
def pack (self):
assert self._assert()
packed = b""
packed += struct.pack("!H", self.port_no)
packed += _PAD6
packed += struct.pack("!QQQQQQQQQQQQ", self.rx_packets,
self.tx_packets, self.rx_bytes, self.tx_bytes,
self.rx_dropped, self.tx_dropped,
self.rx_errors, self.tx_errors,
self.rx_frame_err, self.rx_over_err,
self.rx_crc_err, self.collisions)
return packed
def unpack (self, raw, offset, avail):
_offset = offset
offset,(self.port_no,) = _unpack("!H", raw, offset)
offset = _skip(raw, offset, 6)
offset,(self.rx_packets, self.tx_packets, self.rx_bytes,
self.tx_bytes, self.rx_dropped, self.tx_dropped,
self.rx_errors, self.tx_errors, self.rx_frame_err,
self.rx_over_err, self.rx_crc_err, self.collisions) = \
_unpack("!QQQQQQQQQQQQ", raw, offset)
assert offset - _offset == len(self)
return offset
@staticmethod
def __len__ ():
return 104
def __eq__ (self, other):
if type(self) != type(other): return False
if self.port_no != other.port_no: return False
if self.rx_packets != other.rx_packets: return False
if self.tx_packets != other.tx_packets: return False
if self.rx_bytes != other.rx_bytes: return False
if self.tx_bytes != other.tx_bytes: return False
if self.rx_dropped != other.rx_dropped: return False
if self.tx_dropped != other.tx_dropped: return False
if self.rx_errors != other.rx_errors: return False
if self.tx_errors != other.tx_errors: return False
if self.rx_frame_err != other.rx_frame_err: return False
if self.rx_over_err != other.rx_over_err: return False
if self.rx_crc_err != other.rx_crc_err: return False
if self.collisions != other.collisions: return False
return True
def __add__(self, other):
if type(self) != type(other): raise NotImplemented()
port_no = OFPP_NONE
if self.port_no == other.port_no:
port_no = self.port_no
return ofp_port_stats(
port_no=port_no,
rx_packets = self.rx_packets + other.rx_packets,
tx_packets = self.tx_packets + other.tx_packets,
rx_bytes = self.rx_bytes + other.rx_bytes,
tx_bytes = self.tx_bytes + other.tx_bytes,
rx_dropped = self.rx_dropped + other.rx_dropped,
tx_dropped = self.tx_dropped + other.tx_dropped,
rx_errors = self.rx_errors + other.rx_errors,
tx_errors = self.tx_errors + other.tx_errors,
rx_frame_err = self.rx_frame_err + other.rx_frame_err,
rx_over_err = self.rx_over_err + other.rx_over_err,
rx_crc_err = self.rx_crc_err + other.rx_crc_err,
collisions = self.collisions + other.collisions)
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'port_no: ' + str(self.port_no) + '\n'
outstr += prefix + 'rx_packets: ' + str(self.rx_packets) + '\n'
outstr += prefix + 'tx_packets: ' + str(self.tx_packets) + '\n'
outstr += prefix + 'rx_bytes: ' + str(self.rx_bytes) + '\n'
outstr += prefix + 'tx_bytes: ' + str(self.tx_bytes) + '\n'
outstr += prefix + 'rx_dropped: ' + str(self.rx_dropped) + '\n'
outstr += prefix + 'tx_dropped: ' + str(self.tx_dropped) + '\n'
outstr += prefix + 'rx_errors: ' + str(self.rx_errors) + '\n'
outstr += prefix + 'tx_errors: ' + str(self.tx_errors) + '\n'
outstr += prefix + 'rx_frame_err: ' + str(self.rx_frame_err) + '\n'
outstr += prefix + 'rx_over_err: ' + str(self.rx_over_err) + '\n'
outstr += prefix + 'rx_crc_err: ' + str(self.rx_crc_err) + '\n'
outstr += prefix + 'collisions: ' + str(self.collisions) + '\n'
return outstr
ofp_port_stats_reply = ofp_port_stats
@openflow_stats_request("OFPST_QUEUE", 5)
class ofp_queue_stats_request (ofp_stats_body_base):
def __init__ (self, **kw):
self.port_no = OFPP_ALL
self.queue_id = OFPQ_ALL
initHelper(self, kw)
def pack (self):
assert self._assert()
packed = b""
packed += struct.pack("!H", self.port_no)
packed += _PAD2
packed += struct.pack("!L", self.queue_id)
return packed
def unpack (self, raw, offset, avail):
_offset = offset
offset,(self.port_no,pad,self.queue_id) = _unpack("!HHL", raw, offset)
assert pad == 0
assert offset - _offset == len(self)
return offset
@staticmethod
def __len__ ():
return 8
def __eq__ (self, other):
if type(self) != type(other): return False
if self.port_no != other.port_no: return False
if self.queue_id != other.queue_id: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'port_no: ' + str(self.port_no) + '\n'
outstr += prefix + 'queue_id: ' + str(self.queue_id) + '\n'
return outstr
@openflow_stats_reply("OFPST_QUEUE", is_list = True)
class ofp_queue_stats (ofp_stats_body_base):
def __init__ (self, **kw):
self.port_no = 0
self.queue_id = 0
self.tx_bytes = 0
self.tx_packets = 0
self.tx_errors = 0
initHelper(self, kw)
def pack (self):
assert self._assert()
packed = b""
packed += struct.pack("!H", self.port_no)
packed += _PAD2
packed += struct.pack("!LQQQ", self.queue_id, self.tx_bytes,
self.tx_packets, self.tx_errors)
return packed
def unpack (self, raw, offset, avail):
_offset = offset
offset,(self.port_no, pad, self.queue_id, self.tx_bytes,
self.tx_packets, self.tx_errors) = \
_unpack("!HHLQQQ", raw, offset)
assert offset - _offset == len(self)
return offset
@staticmethod
def __len__ ():
return 32
def __eq__ (self, other):
if type(self) != type(other): return False
if self.port_no != other.port_no: return False
if self.queue_id != other.queue_id: return False
if self.tx_bytes != other.tx_bytes: return False
if self.tx_packets != other.tx_packets: return False
if self.tx_errors != other.tx_errors: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'port_no: ' + str(self.port_no) + '\n'
outstr += prefix + 'queue_id: ' + str(self.queue_id) + '\n'
outstr += prefix + 'tx_bytes: ' + str(self.tx_bytes) + '\n'
outstr += prefix + 'tx_packets: ' + str(self.tx_packets) + '\n'
outstr += prefix + 'tx_errors: ' + str(self.tx_errors) + '\n'
return outstr
ofp_queue_stats_reply = ofp_queue_stats
@openflow_stats_request("OFPST_VENDOR", 65535, is_list = False)
@openflow_stats_reply("OFPST_VENDOR", 65535, is_list = False)
class ofp_vendor_stats_generic (ofp_stats_body_base):
_MIN_LENGTH = 4
def __init__ (self, **kw):
self.vendor = None
self.data = b""
initHelper(self, kw)
def _pack_body (self):
if hasattr(self.data, "pack"):
return self.data.pack()
else:
return self.data
def pack (self):
assert self._assert()
packed = struct.pack("!L", self.vendor)
packed += self._pack_body()
return packed
def unpack (self, raw, offset, avail):
if avail is None: RuntimeError("Requires length")
_offset = offset
offset,(self.vendor,) = _unpack("!L", raw, offset)
offset,self.data = _read(raw, offset, avail-4)
return offset
@staticmethod
def __len__ ():
return 4+len(self._pack_body())
def __eq__ (self, other):
if type(self) != type(other): return False
if self.vendor != other.vendor: return False
if self.data != other.data: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'vendor id: ' + str(self.vendor) + '\n'
outstr += prefix + 'data len: ' + str(len(self.data)) + '\n'
return outstr
class ofp_generic_stats_body (ofp_stats_body_base):
_MIN_LENGTH = 0
def __init__ (self, **kw):
self.data = b""
initHelper(self, kw)
def _pack_body (self):
if hasattr(self.data, "pack"):
return self.data.pack()
else:
return self.data
def pack (self):
assert self._assert()
packed += self._pack_body()
return packed
def unpack (self, raw, offset, avail):
if avail is None: RuntimeError("Requires length")
_offset = offset
offset,self.data = _read(raw, offset, avail)
return offset
@staticmethod
def __len__ ():
return len(self._pack_body())
def __eq__ (self, other):
if type(self) != type(other): return False
if self.data != other.data: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'data len: ' + str(len(self.data)) + '\n'
return outstr
@openflow_c_message("OFPT_PACKET_OUT", 13)
class ofp_packet_out (ofp_header):
_MIN_LENGTH = 16
def __init__ (self, **kw):
ofp_header.__init__(self)
self._buffer_id = NO_BUFFER
self.in_port = OFPP_NONE
self.actions = []
self._data = b''
# ofp_flow_mod & ofp_packet_out do some special handling of 'actions'
# Allow "action" as a synonym for "actions"
if 'action' in kw and 'actions' not in kw:
kw['actions'] = kw['action']
del kw['action']
initHelper(self, kw)
# Allow use of actions=<a single action> for kw args.
if not hasattr(self.actions, '__getitem__'):
self.actions = [self.actions]
@property
def buffer_id (self):
if self._buffer_id == NO_BUFFER: return None
return self._buffer_id
@buffer_id.setter
def buffer_id (self, val):
if val is None: val = NO_BUFFER
self._buffer_id = val
@property
def data (self):
return self._data
@data.setter
def data (self, data):
if data is None:
self._data = b''
elif isinstance(data, packet_base):
self._data = data.pack()
elif isinstance(data, ofp_packet_in):
# Enable you to easily resend a packet
self._data = b''
self.buffer_id = data.buffer_id
if self.buffer_id is None:
#TODO: It'd be nice to log and then ignore if data is incomplete
# Unfortunately, we currently have no logging in here, so we
# assert instead which is a either too drastic or too quiet.
assert data.is_complete
self._data = data._data
self.in_port = data.in_port
elif isinstance(data, bytes):
self._data = data
assert assert_type("data", self._data, (bytes,))
def _validate (self):
if self.buffer_id is not None and self.data != b'':
return "can not have both buffer_id and data set"
return None
def pack (self):
assert self._assert()
actions = b''.join((i.pack() for i in self.actions))
actions_len = len(actions)
if self.data is not None:
return b''.join((ofp_header.pack(self),
struct.pack("!LHH", self._buffer_id, self.in_port, actions_len),
actions, self.data))
else:
return b''.join((ofp_header.pack(self),
struct.pack("!LHH", self._buffer_id, self.in_port, actions_len),
actions))
def unpack (self, raw, offset=0):
_offset = offset
offset,length = self._unpack_header(raw, offset)
offset,(self._buffer_id, self.in_port, actions_len) = \
_unpack("!LHH", raw, offset)
offset,self.actions = _unpack_actions(raw, actions_len, offset)
remaining = length - (offset - _offset)
if remaining <= 0:
self.data = None
else:
offset,self.data = _read(raw, offset, remaining)
assert length == len(self)
return offset,length
def __len__ (self):
return 16 + reduce(operator.add, (len(a) for a in self.actions),
0) + (len(self.data) if self.data else 0)
def __eq__ (self, other):
if type(self) != type(other): return False
if not ofp_header.__eq__(self, other): return False
if self.buffer_id != other.buffer_id: return False
if self.in_port != other.in_port: return False
if self.actions != other.actions: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'header: \n'
outstr += ofp_header.show(self, prefix + ' ')
outstr += prefix + 'buffer_id: ' + str(self.buffer_id) + '\n'
outstr += prefix + 'in_port: ' + str(self.in_port) + '\n'
outstr += prefix + 'actions_len: ' + str(len(self.actions)) + '\n'
outstr += prefix + 'actions: \n'
for obj in self.actions:
if obj is None:
raise RuntimeError("An element of self.actions was None! "
+ "Bad formatting...")
outstr += obj.show(prefix + ' ')
return outstr
##3.7 Barrier Message
@openflow_s_message("OFPT_BARRIER_REPLY", 19,
reply_to="ofp_barrier_request")
class ofp_barrier_reply (ofp_header):
def __init__ (self, **kw):
ofp_header.__init__(self)
initHelper(self, kw)
def pack (self):
assert self._assert()
packed = b""
packed += ofp_header.pack(self)
return packed
#def unpack (self, raw, offset=0):
# offset,length = self._unpack_header(raw, offset)
# assert length == len(self)
# return offset,length
@staticmethod
def __len__ ():
return 8
def __eq__ (self, other):
if type(self) != type(other): return False
if not ofp_header.__eq__(self, other): return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'header: \n'
outstr += ofp_header.show(self, prefix + ' ')
return outstr
@openflow_c_message("OFPT_BARRIER_REQUEST", 18,
request_for="ofp_barrier_reply")
class ofp_barrier_request (ofp_header):
def __init__ (self, **kw):
ofp_header.__init__(self)
initHelper(self, kw)
def pack (self):
assert self._assert()
packed = b""
packed += ofp_header.pack(self)
return packed
#def unpack (self, raw, offset=0):
# offset,length = self._unpack_header(raw, offset)
# assert length == len(self)
# return offset,length
@staticmethod
def __len__ ():
return 8
def __eq__ (self, other):
if type(self) != type(other): return False
if not ofp_header.__eq__(self, other): return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'header: \n'
outstr += ofp_header.show(self, prefix + ' ')
return outstr
#4 Asynchronous Messages
@openflow_s_message("OFPT_PACKET_IN", 10)
class ofp_packet_in (ofp_header):
_MIN_LENGTH = 18
def __init__ (self, **kw):
ofp_header.__init__(self)
self.in_port = OFPP_NONE
self._buffer_id = NO_BUFFER
self.reason = 0
self.data = None
self._total_len = None
if 'total_len' in kw:
self._total_len = kw.pop('total_len')
initHelper(self, kw)
def _validate (self):
if self.data and (self.total_len < len(self.data)):
return "total len less than data len"
@property
def total_len (self):
if self._total_len is None:
return len(self.data) if self.data else 0
return self._total_len
@total_len.setter
def total_len (self, value):
self._total_len = value
@property
def buffer_id (self):
if self._buffer_id == NO_BUFFER: return None
return self._buffer_id
@buffer_id.setter
def buffer_id (self, val):
if val is None: val = NO_BUFFER
self._buffer_id = val
@property
def data (self):
return self._data
@data.setter
def data (self, data):
assert assert_type("data", data, (packet_base, str))
if data is None:
self._data = ''
elif isinstance(data, packet_base):
self._data = data.pack()
else:
self._data = data
def pack (self):
assert self._assert()
packed = b""
packed += ofp_header.pack(self)
packed += struct.pack("!LHHBB", self._buffer_id, self.total_len,
self.in_port, self.reason, 0)
packed += self.data
#TODO: Padding? See __len__
return packed
@property
def is_complete (self):
if self.buffer_id is not None: return True
return len(self.data) == self.total_len
def unpack (self, raw, offset=0):
offset,length = self._unpack_header(raw, offset)
offset,(self._buffer_id, self._total_len, self.in_port, self.reason,
pad) = _unpack("!LHHBB", raw, offset)
offset,self.data = _read(raw, offset, length-18)
assert length == len(self)
return offset,length
def __len__ (self):
#FIXME: This is probably wrong, but it's not clear from the
# spec what's supposed to be going on here.
#if len(self.data) < 2:
# return 20 + len(self.data)
return 18 + len(self.data)
def __eq__ (self, other):
if type(self) != type(other): return False
if not ofp_header.__eq__(self, other): return False
if self.buffer_id != other.buffer_id: return False
if self.total_len != other.total_len: return False
if self.in_port != other.in_port: return False
if self.reason != other.reason: return False
if self.data != other.data: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'header: \n'
outstr += ofp_header.show(self, prefix + ' ')
outstr += prefix + 'buffer_id: ' + str(self.buffer_id) + '\n'
outstr += prefix + 'total_len: ' + str(self._total_len) + '\n'
outstr += prefix + 'in_port: ' + str(self.in_port) + '\n'
outstr += prefix + 'reason: ' + str(self.reason) + '\n'
outstr += prefix + 'data: ' + str(self.data) + '\n'
return outstr
@openflow_s_message("OFPT_FLOW_REMOVED", 11)
class ofp_flow_removed (ofp_header):
def __init__ (self, **kw):
ofp_header.__init__(self)
self.match = ofp_match()
self.cookie = 0
self.priority = 0
self.reason = 0
self.duration_sec = 0
self.duration_nsec = 0
self.idle_timeout = 0
self.packet_count = 0
self.byte_count = 0
initHelper(self, kw)
def _validate (self):
if not isinstance(self.match, ofp_match):
return "match is not class ofp_match"
return None
def pack (self):
assert self._assert()
packed = b""
packed += ofp_header.pack(self)
packed += self.match.pack()
packed += struct.pack("!QHB", self.cookie, self.priority, self.reason)
packed += _PAD
packed += struct.pack("!LLH", self.duration_sec, self.duration_nsec,
self.idle_timeout)
packed += _PAD2
packed += struct.pack("!QQ", self.packet_count, self.byte_count)
return packed
def unpack (self, raw, offset=0):
offset,length = self._unpack_header(raw, offset)
offset = self.match.unpack(raw, offset)
offset,(self.cookie, self.priority, self.reason) = \
_unpack("!QHB", raw, offset)
offset = _skip(raw, offset, 1)
offset,(self.duration_sec, self.duration_nsec, self.idle_timeout) = \
_unpack("!LLH", raw, offset)
offset = _skip(raw, offset, 2)
offset,(self.packet_count, self.byte_count) = \
_unpack("!QQ", raw, offset)
assert length == len(self)
return offset,length
@staticmethod
def __len__ ():
return 48 + len(ofp_match)
def __eq__ (self, other):
if type(self) != type(other): return False
if not ofp_header.__eq__(self, other): return False
if self.match != other.match: return False
if self.cookie != other.cookie: return False
if self.priority != other.priority: return False
if self.reason != other.reason: return False
if self.duration_sec != other.duration_sec: return False
if self.duration_nsec != other.duration_nsec: return False
if self.idle_timeout != other.idle_timeout: return False
if self.packet_count != other.packet_count: return False
if self.byte_count != other.byte_count: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'header: \n'
outstr += ofp_header.show(self, prefix + ' ')
outstr += prefix + 'match: \n'
outstr += self.match.show(prefix + ' ')
outstr += prefix + 'cookie: ' + str(self.cookie) + '\n'
outstr += prefix + 'priority: ' + str(self.priority) + '\n'
outstr += prefix + 'reason: ' + str(self.reason) + '\n'
outstr += prefix + 'duration_sec: ' + str(self.duration_sec) + '\n'
outstr += prefix + 'duration_nsec: ' + str(self.duration_nsec) + '\n'
outstr += prefix + 'idle_timeout: ' + str(self.idle_timeout) + '\n'
outstr += prefix + 'packet_count: ' + str(self.packet_count) + '\n'
outstr += prefix + 'byte_count: ' + str(self.byte_count) + '\n'
return outstr
@openflow_s_message("OFPT_PORT_STATUS", 12)
class ofp_port_status (ofp_header):
def __init__ (self, **kw):
ofp_header.__init__(self)
self.reason = 0
self.desc = ofp_phy_port()
initHelper(self, kw)
def _validate (self):
if not isinstance(self.desc, ofp_phy_port):
return "desc is not class ofp_phy_port"
return None
def pack (self):
assert self._assert()
packed = b""
packed += ofp_header.pack(self)
packed += struct.pack("!B", self.reason)
packed += _PAD * 7 # Pad
packed += self.desc.pack()
return packed
def unpack (self, raw, offset=0):
offset,length = self._unpack_header(raw, offset)
offset,(self.reason,) = _unpack("!B", raw, offset)
offset = _skip(raw, offset, 7)
offset = self.desc.unpack(raw, offset)
assert length == len(self)
return offset,length
@staticmethod
def __len__ ():
return 64
def __eq__ (self, other):
if type(self) != type(other): return False
if not ofp_header.__eq__(self, other): return False
if self.reason != other.reason: return False
if self.desc != other.desc: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'header: \n'
outstr += ofp_header.show(self, prefix + ' ')
outstr += prefix + 'reason: ' + str(self.reason) + '\n'
outstr += prefix + 'desc: \n'
outstr += self.desc.show(prefix + ' ')
return outstr
@openflow_s_message("OFPT_ERROR", 1)
class ofp_error (ofp_header):
_MIN_LENGTH = 12
def __init__ (self, **kw):
ofp_header.__init__(self)
self.type = 0
self.code = 0
self.data = b''
initHelper(self, kw)
def pack (self):
assert self._assert()
packed = b""
packed += ofp_header.pack(self)
packed += struct.pack("!HH", self.type, self.code)
packed += self.data
return packed
def unpack (self, raw, offset=0):
offset,length = self._unpack_header(raw, offset)
offset,(self.type, self.code) = _unpack("!HH", raw, offset)
offset,self.data = _read(raw, offset, length - 12)
assert length == len(self)
return offset,length
def __len__ (self):
return 12 + len(self.data)
def __eq__ (self, other):
if type(self) != type(other): return False
if not ofp_header.__eq__(self, other): return False
if self.type != other.type: return False
if self.code != other.code: return False
if self.data != other.data: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'header: \n'
outstr += ofp_header.show(self, prefix + ' ')
t = self.type
c = self.code
if t < len(ofp_error_type):
n = ofp_error_type_map[t]
t = "%s (%i)" % (n, t)
n = 'ofp' + n.lower()[5:] + '_code_map'
if n in sys.modules[__name__].__dict__:
if c in sys.modules[__name__].__dict__[n]:
c = "%s (%i)" % (sys.modules[__name__].__dict__[n][c], c)
outstr += prefix + 'type: ' + str(t) + '\n'
outstr += prefix + 'code: ' + str(c) + '\n'
if len(self.data):
outstr += prefix + 'datalen: %s\n' % (len(self.data),)
outstr += prefix + hexdump(self.data).replace("\n", "\n" + prefix)
return outstr.strip()
#5. Symmetric Messages
@openflow_sc_message("OFPT_HELLO", 0)
class ofp_hello (ofp_header):
def __init__ (self, **kw):
ofp_header.__init__(self)
initHelper(self, kw)
def pack (self):
assert self._assert()
packed = b""
packed += ofp_header.pack(self)
return packed
#def unpack (self, raw, offset=0):
# offset,length = self._unpack_header(raw, offset)
# assert length == len(self)
# return offset,length
@staticmethod
def __len__ ():
return 8
def __eq__ (self, other):
if type(self) != type(other): return False
if not ofp_header.__eq__(self, other): return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'header: \n'
outstr += ofp_header.show(self, prefix + ' ')
return outstr
@openflow_sc_message("OFPT_ECHO_REQUEST", 2,
request_for="ofp_echo_reply")
class ofp_echo_request (ofp_header):
_MIN_LENGTH = 8
def __init__ (self, **kw):
ofp_header.__init__(self)
self.body = b''
initHelper(self, kw)
def pack (self):
assert self._assert()
packed = b""
packed += ofp_header.pack(self)
packed += self.body
return packed
def unpack (self, raw, offset=0):
offset,length = self._unpack_header(raw, offset)
offset,self.body = _read(raw, offset, length - 8)
assert length == len(self)
return offset,length
def __len__ (self):
return 8 + len(self.body)
def __eq__ (self, other):
if type(self) != type(other): return False
if not ofp_header.__eq__(self, other): return False
if self.body != other.body: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'header: \n'
outstr += ofp_header.show(self, prefix + ' ')
outstr += prefix + 'body:\n'
outstr += _format_body(self.body, prefix + ' ') + '\n'
return outstr
@openflow_sc_message("OFPT_ECHO_REPLY", 3,
reply_to="ofp_echo_request")
class ofp_echo_reply (ofp_header):
_MIN_LENGTH = 8
def __init__ (self, **kw):
ofp_header.__init__(self)
self.body = b''
initHelper(self, kw)
def pack (self):
assert self._assert()
packed = b""
packed += ofp_header.pack(self)
packed += self.body
return packed
def unpack (self, raw, offset=0):
offset,length = self._unpack_header(raw, offset)
offset,self.body = _read(raw, offset, length - 8)
assert length == len(self)
return offset,length
def __len__ (self):
return 8 + len(self.body)
def __eq__ (self, other):
if type(self) != type(other): return False
if not ofp_header.__eq__(self, other): return False
if self.body != other.body: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'header: \n'
outstr += ofp_header.show(self, prefix + ' ')
outstr += prefix + 'body:\n'
outstr += _format_body(self.body, prefix + ' ') + '\n'
return outstr
class ofp_vendor_base (ofp_header):
header_type = 4 # OFPT_VENDOR
"""
Base class for vendor messages
"""
pass
@openflow_sc_message("OFPT_VENDOR", 4)
class ofp_vendor_generic (ofp_vendor_base):
_MIN_LENGTH = 12
_collect_raw = False
def __init__ (self, **kw):
ofp_header.__init__(self)
self.vendor = 0
self.data = b''
initHelper(self, kw)
def pack (self):
assert self._assert()
packed = b""
packed += ofp_header.pack(self)
packed += struct.pack("!L", self.vendor)
if hasattr(self.data, "pack"):
packed += self.data.pack()
else:
packed += self.data
return packed
def unpack (self, raw, offset=0):
_offset = offset
offset,length = self._unpack_header(raw, offset)
offset,(self.vendor,) = _unpack("!L", raw, offset)
offset,self.data = _read(raw, offset, length-12)
if self._collect_raw:
self.raw = raw[_offset, _offset+length]
return offset,length
def __len__ (self):
return 12 + len(self.data)
def __eq__ (self, other):
if type(self) != type(other): return False
if not ofp_header.__eq__(self, other): return False
if self.vendor != other.vendor: return False
if self.data != other.data: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'header: \n'
outstr += ofp_header.show(self, prefix + ' ')
outstr += prefix + 'vendor: ' + str(self.vendor) + '\n'
outstr += prefix + 'datalen: ' + str(len(self.data)) + '\n'
#outstr += prefix + hexdump(self.data).replace("\n", "\n" + prefix)
return outstr
@openflow_c_message("OFPT_FEATURES_REQUEST", 5,
request_for="ofp_features_reply")
class ofp_features_request (ofp_header):
def __init__ (self, **kw):
ofp_header.__init__(self)
initHelper(self, kw)
def pack (self):
assert self._assert()
packed = b""
packed += ofp_header.pack(self)
return packed
def unpack (self, raw, offset=0):
offset,length = self._unpack_header(raw, offset)
assert length == len(self)
return offset,length
@staticmethod
def __len__ ():
return 8
def __eq__ (self, other):
if type(self) != type(other): return False
if not ofp_header.__eq__(self, other): return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'header: \n'
outstr += ofp_header.show(self, prefix + ' ')
return outstr
@openflow_c_message("OFPT_GET_CONFIG_REQUEST", 7,
request_for="ofp_get_config_reply")
class ofp_get_config_request (ofp_header):
def __init__ (self, **kw):
ofp_header.__init__(self)
initHelper(self, kw)
def pack (self):
assert self._assert()
packed = b""
packed += ofp_header.pack(self)
return packed
#def unpack (self, raw, offset=0):
# offset,length = self._unpack_header(raw, offset)
# assert length == len(self)
# return offset,length
@staticmethod
def __len__ ():
return 8
def __eq__ (self, other):
if type(self) != type(other): return False
if not ofp_header.__eq__(self, other): return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'header: \n'
outstr += ofp_header.show(self, prefix + ' ')
return outstr
@openflow_s_message("OFPT_GET_CONFIG_REPLY", 8,
reply_to="ofp_get_config_request")
class ofp_get_config_reply (ofp_header): # uses ofp_switch_config
def __init__ (self, **kw):
ofp_header.__init__(self)
self.flags = 0
self.miss_send_len = OFP_DEFAULT_MISS_SEND_LEN
initHelper(self, kw)
def pack (self):
assert self._assert()
packed = b""
packed += ofp_header.pack(self)
packed += struct.pack("!HH", self.flags, self.miss_send_len)
return packed
def unpack (self, raw, offset=0):
offset,length = self._unpack_header(raw, offset)
offset,(self.flags, self.miss_send_len) = \
_unpack("!HH", raw, offset)
assert length == len(self)
return offset,length
@staticmethod
def __len__ ():
return 12
def __eq__ (self, other):
if type(self) != type(other): return False
if not ofp_header.__eq__(self, other): return False
if self.flags != other.flags: return False
if self.miss_send_len != other.miss_send_len: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'header: \n'
outstr += ofp_header.show(self, prefix + ' ')
outstr += prefix + 'flags: ' + str(self.flags) + '\n'
outstr += prefix + 'miss_send_len: ' + str(self.miss_send_len) + '\n'
return outstr
def _unpack_queue_props (b, length, offset=0):
"""
Parses queue props from a buffer
b is a buffer (bytes)
offset, if specified, is where in b to start decoding
returns (next_offset, [Pops])
"""
if (len(b) - offset) < length: raise UnderrunError
props = []
end = length + offset
while offset < end:
(t,l) = struct.unpack_from("!HH", b, offset)
if (len(b) - offset) < l: raise UnderrunError
a = _queue_prop_type_to_class.get(t)
if a is None:
# Use generic prop header for unknown type
a = ofp_queue_prop_generic()
else:
a = a()
a.unpack(b[offset:offset+l])
assert len(a) == l
props.append(a)
offset += l
return (offset, props)
def _unpack_actions (b, length, offset=0):
"""
Parses actions from a buffer
b is a buffer (bytes)
offset, if specified, is where in b to start decoding
returns (next_offset, [Actions])
"""
if (len(b) - offset) < length: raise UnderrunError
actions = []
end = length + offset
while offset < end:
(t,l) = struct.unpack_from("!HH", b, offset)
if (len(b) - offset) < l: raise UnderrunError
a = _action_type_to_class.get(t)
if a is None:
# Use generic action header for unknown type
a = ofp_action_generic()
else:
a = a()
a.unpack(b[offset:offset+l])
assert len(a) == l
actions.append(a)
offset += l
return (offset, actions)
def _init ():
def formatMap (name, m):
o = name + " = {\n"
vk = sorted([(v,k) for k,v in m.iteritems()])
maxlen = 2 + len(reduce(lambda a,b: a if len(a)>len(b) else b,
(v for k,v in vk)))
fstr = " %-" + str(maxlen) + "s : %s,\n"
for v,k in vk:
o += fstr % ("'" + k + "'",v)
o += "}"
return o
"""
maps = []
for k,v in globals().iteritems():
if k.startswith("ofp_") and k.endswith("_map") and type(v) == dict:
maps.append((k,v))
for name,m in maps:
rev = {}
name = name[:-4]
names = globals()[name]
for n in names:
rev[n] = globals()[n]
globals()[name + '_rev_map'] = rev
print(formatMap(name + "_rev_map", rev))
return
"""
maps = []
for k,v in globals().iteritems():
if (k.startswith("ofp_") and k.endswith("_rev_map")
and type(v) == dict):
maps.append((k[:-8],v))
for name,m in maps:
# Try to generate forward maps
forward = dict(((v,k) for k,v in m.iteritems()))
if len(forward) == len(m):
if name + "_map" not in globals():
globals()[name + "_map"] = forward
else:
print(name + "_rev_map is not a map")
# Try to generate lists
v = m.values()
v.sort()
if v[-1] != len(v)-1:
# Allow ones where the last value is a special value (e.g., VENDOR)
del v[-1]
if len(v) > 0 and v[0] == 0 and v[-1] == len(v)-1:
globals()[name] = v
# Generate gobals
for k,v in m.iteritems():
globals()[k] = v
_init()
# Values from macro definitions
OFP_FLOW_PERMANENT = 0
OFP_DL_TYPE_ETH2_CUTOFF = 0x0600
DESC_STR_LEN = 256
OFPFW_ICMP_CODE = OFPFW_TP_DST
OFPQ_MIN_RATE_UNCFG = 0xffff
OFP_VERSION = 0x01
OFP_MAX_TABLE_NAME_LEN = 32
OFP_DL_TYPE_NOT_ETH_TYPE = 0x05ff
OFP_DEFAULT_MISS_SEND_LEN = 128
OFP_MAX_PORT_NAME_LEN = 16
OFP_SSL_PORT = 6633
OFPFW_ICMP_TYPE = OFPFW_TP_SRC
OFP_TCP_PORT = 6633
SERIAL_NUM_LEN = 32
OFP_DEFAULT_PRIORITY = 0x8000
OFP_VLAN_NONE = 0xffff
OFPQ_ALL = 0xffffffff
ofp_match_data = {
'in_port' : (0, OFPFW_IN_PORT),
'dl_src' : (EMPTY_ETH, OFPFW_DL_SRC),
'dl_dst' : (EMPTY_ETH, OFPFW_DL_DST),
'dl_vlan' : (0, OFPFW_DL_VLAN),
'dl_vlan_pcp' : (0, OFPFW_DL_VLAN_PCP),
'dl_type' : (0, OFPFW_DL_TYPE),
'nw_tos' : (0, OFPFW_NW_TOS),
'nw_proto' : (0, OFPFW_NW_PROTO),
'nw_src' : (0, OFPFW_NW_SRC_ALL),
'nw_dst' : (0, OFPFW_NW_DST_ALL),
'tp_src' : (0, OFPFW_TP_SRC),
'tp_dst' : (0, OFPFW_TP_DST),
}
| bsd-3-clause |
was4444/chromium.src | tools/perf/profile_creators/fast_navigation_profile_extender.py | 14 | 8768 | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import time
from profile_creators import profile_extender
from telemetry.core import exceptions
from telemetry.core import util
class FastNavigationProfileExtender(profile_extender.ProfileExtender):
"""Extends a Chrome profile.
This class creates or extends an existing profile by performing a set of tab
navigations in large batches. This is accomplished by opening a large number
of tabs, simultaneously navigating all the tabs, and then waiting for all the
tabs to load. This provides two benefits:
- Takes advantage of the high number of logical cores on modern CPUs.
- The total time spent waiting for navigations to time out scales linearly
with the number of batches, but does not scale with the size of the
batch.
"""
def __init__(self, finder_options, maximum_batch_size):
"""Initializer.
Args:
maximum_batch_size: A positive integer indicating the number of tabs to
simultaneously perform navigations.
"""
super(FastNavigationProfileExtender, self).__init__(finder_options)
# The instance keeps a list of Tabs that can be navigated successfully.
# This means that the Tab is not crashed, and is processing JavaScript in a
# timely fashion.
self._navigation_tabs = []
# The number of tabs to use.
self._NUM_TABS = maximum_batch_size
# The amount of additional time to wait for a batch of pages to finish
# loading for each page in the batch.
self._BATCH_TIMEOUT_PER_PAGE_IN_SECONDS = 20
# The amount of time to wait for a page to quiesce. Some pages will never
# quiesce.
self._TIME_TO_WAIT_FOR_PAGE_TO_QUIESCE_IN_SECONDS = 10
def Run(self):
"""Superclass override."""
try:
self.SetUpBrowser()
self._PerformNavigations()
finally:
self.TearDownBrowser()
# When there hasn't been an exception, verify that the profile was
# correctly extended.
# TODO(erikchen): I've intentionally omitted my implementation of
# VerifyProfileWasExtended() in small_profile_extender, since the profile
# is not being correctly extended. http://crbug.com/484833
# http://crbug.com/484880
self.VerifyProfileWasExtended()
def VerifyProfileWasExtended(self):
"""Verifies that the profile was correctly extended.
Can be overridden by subclasses.
"""
pass
def GetUrlIterator(self):
"""Gets URLs for the browser to navigate to.
Intended for subclass override.
Returns:
An iterator whose elements are urls to be navigated to.
"""
raise NotImplementedError()
def ShouldExitAfterBatchNavigation(self):
"""Returns a boolean indicating whether profile extension is finished.
Intended for subclass override.
"""
raise NotImplementedError()
def CleanUpAfterBatchNavigation(self):
"""A hook for subclasses to perform cleanup after each batch of
navigations.
Can be overridden by subclasses.
"""
pass
def _RefreshNavigationTabs(self):
"""Updates the member self._navigation_tabs to contain self._NUM_TABS
elements, each of which is not crashed. The crashed tabs are intentionally
leaked, since Telemetry doesn't have a good way of killing crashed tabs.
It is also possible for a tab to be stalled in an infinite JavaScript loop.
These tabs will be in self.browser.tabs, but not in self._navigation_tabs.
There is no way to kill these tabs, so they are also leaked. This method is
careful to only use tabs in self._navigation_tabs, or newly created tabs.
"""
live_tabs = [tab for tab in self._navigation_tabs if tab.IsAlive()]
self._navigation_tabs = live_tabs
while len(self._navigation_tabs) < self._NUM_TABS:
self._navigation_tabs.append(self._browser.tabs.New())
def _RemoveNavigationTab(self, tab):
"""Removes a tab which is no longer in a useable state from
self._navigation_tabs. The tab is not removed from self.browser.tabs,
since there is no guarantee that the tab can be safely removed."""
self._navigation_tabs.remove(tab)
def _RetrieveTabUrl(self, tab, timeout):
"""Retrives the URL of the tab."""
# TODO(erikchen): Use tab.url instead, which talks to the browser process
# instead of the renderer process. http://crbug.com/486119
return tab.EvaluateJavaScript('document.URL', timeout)
def _WaitForUrlToChange(self, tab, initial_url, end_time):
"""Waits for the tab to navigate away from its initial url.
If time.time() is larger than end_time, the function does nothing.
Otherwise, the function tries to return no later than end_time.
"""
while True:
seconds_to_wait = end_time - time.time()
if seconds_to_wait <= 0:
break
current_url = self._RetrieveTabUrl(tab, seconds_to_wait)
if current_url != initial_url and current_url != '':
break
# Retrieving the current url is a non-trivial operation. Add a small
# sleep here to prevent this method from contending with the actual
# navigation.
time.sleep(0.01)
def _WaitForTabToBeReady(self, tab, end_time):
"""Waits for the tab to be ready.
If time.time() is larger than end_time, the function does nothing.
Otherwise, the function tries to return no later than end_time.
"""
seconds_to_wait = end_time - time.time()
if seconds_to_wait <= 0:
return
tab.WaitForDocumentReadyStateToBeComplete(seconds_to_wait)
# Wait up to 10 seconds for the page to quiesce. If the page hasn't
# quiesced in 10 seconds, it will probably never quiesce.
seconds_to_wait = end_time - time.time()
seconds_to_wait = max(0, seconds_to_wait)
try:
util.WaitFor(tab.HasReachedQuiescence, seconds_to_wait)
except exceptions.TimeoutException:
pass
def _BatchNavigateTabs(self, batch):
"""Performs a batch of tab navigations with minimal delay.
Args:
batch: A list of tuples (tab, url).
Returns:
A list of tuples (tab, initial_url). |initial_url| is the url of the
|tab| prior to a navigation command being sent to it.
"""
# Attempting to pass in a timeout of 0 seconds results in a synchronous
# socket error from the websocket library. Pass in a very small timeout
# instead so that the websocket library raises a Timeout exception. This
# prevents the logic from accidentally catching different socket
# exceptions.
timeout_in_seconds = 0.01
queued_tabs = []
for tab, url in batch:
initial_url = self._RetrieveTabUrl(tab, 20)
try:
tab.Navigate(url, None, timeout_in_seconds)
except exceptions.TimeoutException:
# We expect to receive a timeout exception, since we're not waiting for
# the navigation to complete.
pass
queued_tabs.append((tab, initial_url))
return queued_tabs
def _WaitForQueuedTabsToLoad(self, queued_tabs):
"""Waits for all the batch navigated tabs to finish loading.
Args:
queued_tabs: A list of tuples (tab, initial_url). Each tab is guaranteed
to have already been sent a navigation command.
"""
total_batch_timeout = (len(queued_tabs) *
self._BATCH_TIMEOUT_PER_PAGE_IN_SECONDS)
end_time = time.time() + total_batch_timeout
for tab, initial_url in queued_tabs:
# Since we didn't wait any time for the tab url navigation to commit, it's
# possible that the tab hasn't started navigating yet.
self._WaitForUrlToChange(tab, initial_url, end_time)
self._WaitForTabToBeReady(tab, end_time)
def _GetUrlsToNavigate(self, url_iterator):
"""Returns an array of urls to navigate to, given a url_iterator."""
urls = []
for _ in xrange(self._NUM_TABS):
try:
urls.append(url_iterator.next())
except StopIteration:
break
return urls
def _PerformNavigations(self):
"""Repeatedly fetches a batch of urls, and navigates to those urls. This
will run until an empty batch is returned, or
ShouldExitAfterBatchNavigation() returns True.
"""
url_iterator = self.GetUrlIterator()
while True:
self._RefreshNavigationTabs()
urls = self._GetUrlsToNavigate(url_iterator)
if len(urls) == 0:
break
batch = []
for i in range(len(urls)):
url = urls[i]
tab = self._navigation_tabs[i]
batch.append((tab, url))
queued_tabs = self._BatchNavigateTabs(batch)
self._WaitForQueuedTabsToLoad(queued_tabs)
self.CleanUpAfterBatchNavigation()
if self.ShouldExitAfterBatchNavigation():
break
| bsd-3-clause |
Telrik/komimport-2.0 | vendor/guzzlehttp/guzzle/docs/conf.py | 100 | 2995 | import sys, os
from sphinx.highlighting import lexers
from pygments.lexers.web import PhpLexer
lexers['php'] = PhpLexer(startinline=True, linenos=1)
lexers['php-annotations'] = PhpLexer(startinline=True, linenos=1)
primary_domain = 'php'
# -- General configuration -----------------------------------------------------
extensions = []
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = u'Guzzle'
copyright = u'2012, Michael Dowling'
version = '3.0.0'
release = '3.0.0'
exclude_patterns = ['_build']
# -- Options for HTML output ---------------------------------------------------
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = "Guzzle documentation"
html_short_title = "Guzzle"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'**': ['localtoc.html', 'searchbox.html']
}
# Output file base name for HTML help builder.
htmlhelp_basename = 'Guzzledoc'
# -- Guzzle Sphinx theme setup ------------------------------------------------
sys.path.insert(0, '/Users/dowling/projects/guzzle_sphinx_theme')
import guzzle_sphinx_theme
html_translator_class = 'guzzle_sphinx_theme.HTMLTranslator'
html_theme_path = guzzle_sphinx_theme.html_theme_path()
html_theme = 'guzzle_sphinx_theme'
# Guzzle theme options (see theme.conf for more information)
html_theme_options = {
"project_nav_name": "Guzzle",
"github_user": "guzzle",
"github_repo": "guzzle",
"disqus_comments_shortname": "guzzle",
"google_analytics_account": "UA-22752917-1"
}
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Guzzle.tex', u'Guzzle Documentation',
u'Michael Dowling', 'manual'),
]
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'guzzle', u'Guzzle Documentation',
[u'Michael Dowling'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Guzzle', u'Guzzle Documentation',
u'Michael Dowling', 'Guzzle', 'One line description of project.',
'Miscellaneous'),
]
| bsd-3-clause |
kewisch/bedrock | bedrock/newsletter/tests/test_footer_form.py | 3 | 2110 | from funfactory.urlresolvers import reverse
from mock import patch
from nose.tools import eq_
from pyquery import PyQuery as pq
from bedrock.mozorg.tests import TestCase
@patch('bedrock.newsletter.utils.get_languages_for_newsletters',
lambda *x: set(['en', 'fr', 'pt']))
@patch('lib.l10n_utils.template_is_active', lambda *x: True)
class TestNewsletterFooter(TestCase):
def setUp(self):
self.view_name = 'newsletter.mozilla-and-you'
def test_country_selected(self):
"""
The correct country for the locale should be initially selected.
"""
with self.activate('en-US'):
resp = self.client.get(reverse(self.view_name))
doc = pq(resp.content)
eq_(doc('#id_country option[selected="selected"]').val(), 'us')
# no country in locale, no country selected
with self.activate('fr'):
resp = self.client.get(reverse(self.view_name))
doc = pq(resp.content)
eq_(doc('#id_country option[selected="selected"]').val(), '')
with self.activate('pt-BR'):
resp = self.client.get(reverse(self.view_name))
doc = pq(resp.content)
eq_(doc('#id_country option[selected="selected"]').val(), 'br')
def test_language_selected(self):
"""
The correct language for the locale should be initially selected or
'en' if it's not an option.
"""
with self.activate('fr'):
resp = self.client.get(reverse(self.view_name))
doc = pq(resp.content)
eq_(doc('#id_lang option[selected="selected"]').val(), 'fr')
# with hyphenated regional locale, should have only lang
with self.activate('pt-BR'):
resp = self.client.get(reverse(self.view_name))
doc = pq(resp.content)
eq_(doc('#id_lang option[selected="selected"]').val(), 'pt')
# not supported. should default to ''
with self.activate('ak'):
resp = self.client.get(reverse(self.view_name))
doc = pq(resp.content)
eq_(doc('#id_lang option[selected="selected"]').val(), '')
| mpl-2.0 |
Microsoft/PTVS | Python/Product/Miniconda/Miniconda3-x64/Lib/site-packages/isapi/simple.py | 23 | 2490 | """Simple base-classes for extensions and filters.
None of the filter and extension functions are considered 'optional' by the
framework. These base-classes provide simple implementations for the
Initialize and Terminate functions, allowing you to omit them,
It is not necessary to use these base-classes - but if you don't, you
must ensure each of the required methods are implemented.
"""
class SimpleExtension:
"Base class for a simple ISAPI extension"
def __init__(self):
pass
def GetExtensionVersion(self, vi):
"""Called by the ISAPI framework to get the extension version
The default implementation uses the classes docstring to
set the extension description."""
# nod to our reload capability - vi is None when we are reloaded.
if vi is not None:
vi.ExtensionDesc = self.__doc__
def HttpExtensionProc(self, control_block):
"""Called by the ISAPI framework for each extension request.
sub-classes must provide an implementation for this method.
"""
raise NotImplementedError("sub-classes should override HttpExtensionProc")
def TerminateExtension(self, status):
"""Called by the ISAPI framework as the extension terminates.
"""
pass
class SimpleFilter:
"Base class for a a simple ISAPI filter"
filter_flags = None
def __init__(self):
pass
def GetFilterVersion(self, fv):
"""Called by the ISAPI framework to get the extension version
The default implementation uses the classes docstring to
set the extension description, and uses the classes
filter_flags attribute to set the ISAPI filter flags - you
must specify filter_flags in your class.
"""
if self.filter_flags is None:
raise RuntimeError("You must specify the filter flags")
# nod to our reload capability - fv is None when we are reloaded.
if fv is not None:
fv.Flags = self.filter_flags
fv.FilterDesc = self.__doc__
def HttpFilterProc(self, fc):
"""Called by the ISAPI framework for each filter request.
sub-classes must provide an implementation for this method.
"""
raise NotImplementedError("sub-classes should override HttpExtensionProc")
def TerminateFilter(self, status):
"""Called by the ISAPI framework as the filter terminates.
"""
pass
| apache-2.0 |
datalogistics/libdlt | tools/dlt_xfer.py | 1 | 4008 | #!/usr/bin/env python3
import os
import argparse
import json
import libdlt
from unis.exceptions import CollectionIndexError
from libdlt.util.common import print_progress
SYS_PATH="/etc/periscope"
USER_DEPOTS=os.path.join(SYS_PATH, "depots.conf")
UNIS_URL = "http://unis.crest.iu.edu:8890"
XFER_TOTAL = 0
def progress(depot, name, total, size, offset):
global XFER_TOTAL
if not size:
XFER_TOTAL = 0
else:
XFER_TOTAL += size
print_progress(XFER_TOTAL, total, name)
def main():
parser = argparse.ArgumentParser(description="DLT File Transfer Tool")
parser.add_argument('files', metavar='FILES', type=str, nargs='+',
help='Files to transfer')
parser.add_argument('-u', '--upload', action='store_true',
help='Perform file upload (default is download)')
parser.add_argument('-H', '--host', type=str, default=UNIS_URL,
help='UNIS instance for uploading eXnode metadata')
parser.add_argument('-b', '--bs', type=str, default='20m',
help='Block size')
parser.add_argument('-d', '--depot-file', type=str, default=None,
help='Depots in a JSON dict used for upload')
parser.add_argument('-o', '--output', type=str, default=None,
help='Output file')
parser.add_argument('-V', '--visualize', type=str, default=None,
help='Periscope URL for visualization')
parser.add_argument('-D', '--debug', type=str, default=None,
help='Include verbose logging output')
parser.add_argument('-t', '--threads', type=int, default=5,
help='Number of threads for operation')
parser.add_argument('-r', '--recursive', action='store_true',
help='Recurse into subdirectories')
parser.add_argument('-c', '--cert', type=str, default=None,
help='SSL Cert/Key for HTTPS endpoints')
args = parser.parse_args()
bs = args.bs
df = args.depot_file
if args.debug in ['TRACE', 'DEBUG']:
import logging as plogging
from lace import logging
plogging.basicConfig(format='%(color)s[%(asctime)-15s] [%(levelname)s] %(name)s%(reset)s %(message)s')
log = logging.getLogger('libdlt')
log.setLevel(logging.DEBUG)
if args.debug == 'TRACE':
from lace.logging import trace
trace.setLevel(logging.DEBUG, True)
depots = None
if df:
try:
f = open(df, "r")
depots = json.loads(f.read())
except Exception as e:
print ("{}, trying {}".format(e, USER_DEPOTS))
try:
f = open(USER_DEPOTS, "r")
depots = json.oads(f.read())
except:
print ("ERROR: No default depot file: {}".format(USER_DEPOTS))
exit(1)
sess = libdlt.Session([{"default": True, "url": args.host, "ssl": args.cert}],
bs=bs, depots=depots, threads=args.threads,
**{"viz_url": args.visualize})
xfer = sess.upload if args.upload else sess.download
flist = []
for f in args.files:
if args.recursive and os.path.isdir(f):
for dirpath, dirnames, files in os.walk(f):
for n in files:
flist.append(os.path.join(dirpath, n))
else:
flist.append(f)
for f in flist:
try:
result = xfer(f, folder=args.output, progress_cb=progress)
diff, res = result.time, result.exnode
except CollectionIndexError as e:
print ("ERROR: invalid file or URL: {}".format(e))
exit(1)
print ("{0} ({1} {2:.2f} MB/s) {3}".format(res.name, res.size,
res.size/1e6/diff,
res.selfRef))
if __name__ == "__main__":
main()
| bsd-3-clause |
whitzhu/kolibri | kolibri/auth/test/test_permissions_classes.py | 10 | 5747 | from __future__ import absolute_import, print_function, unicode_literals
from django.test import TestCase
from mock import Mock
from ..models import FacilityUser, DeviceOwner, Facility, KolibriAnonymousUser
from ..api import KolibriAuthPermissions
from ..permissions.base import BasePermissions
from ..permissions.general import AllowAll, DenyAll
class BasePermissionsThrowExceptionsTestCase(TestCase):
def setUp(self):
self.facility = Facility.objects.create()
self.object = object() # shouldn't matter what the object is, for these tests
self.facility_user = FacilityUser.objects.create(username="qqq", facility=self.facility)
self.device_owner = DeviceOwner.objects.create(username="zzz")
self.anon_user = KolibriAnonymousUser()
self.permissions = BasePermissions()
def test_user_cannot_create(self):
with self.assertRaises(NotImplementedError):
self.assertFalse(self.permissions.user_can_create_object(self.facility_user, self.object))
with self.assertRaises(NotImplementedError):
self.assertFalse(self.permissions.user_can_create_object(self.device_owner, self.object))
with self.assertRaises(NotImplementedError):
self.assertFalse(self.permissions.user_can_create_object(self.anon_user, self.object))
def test_user_cannot_read(self):
with self.assertRaises(NotImplementedError):
self.assertFalse(self.permissions.user_can_read_object(self.facility_user, self.object))
with self.assertRaises(NotImplementedError):
self.assertFalse(self.permissions.user_can_read_object(self.device_owner, self.object))
with self.assertRaises(NotImplementedError):
self.assertFalse(self.permissions.user_can_read_object(self.anon_user, self.object))
def test_user_cannot_update(self):
with self.assertRaises(NotImplementedError):
self.assertFalse(self.permissions.user_can_update_object(self.facility_user, self.object))
with self.assertRaises(NotImplementedError):
self.assertFalse(self.permissions.user_can_update_object(self.device_owner, self.object))
with self.assertRaises(NotImplementedError):
self.assertFalse(self.permissions.user_can_update_object(self.anon_user, self.object))
def test_user_cannot_delete(self):
with self.assertRaises(NotImplementedError):
self.assertFalse(self.permissions.user_can_delete_object(self.facility_user, self.object))
with self.assertRaises(NotImplementedError):
self.assertFalse(self.permissions.user_can_delete_object(self.device_owner, self.object))
with self.assertRaises(NotImplementedError):
self.assertFalse(self.permissions.user_can_delete_object(self.anon_user, self.object))
class TestBooleanOperationsOnPermissionClassesTestCase(TestCase):
def setUp(self):
self.facility = Facility.objects.create()
self.obj = object()
self.user = FacilityUser.objects.create(username='dummyuser', facility=self.facility)
self.queryset = FacilityUser.objects.all()
def assertAllowAll(self, perms, test_filtering=True):
self.assertTrue(perms.user_can_create_object(self.user, self.obj))
self.assertTrue(perms.user_can_read_object(self.user, self.obj))
self.assertTrue(perms.user_can_update_object(self.user, self.obj))
self.assertTrue(perms.user_can_delete_object(self.user, self.obj))
if test_filtering:
self.assertSetEqual(set(self.queryset), set(perms.readable_by_user_filter(self.user, self.queryset)))
def assertDenyAll(self, perms, test_filtering=True):
self.assertFalse(perms.user_can_create_object(self.user, self.obj))
self.assertFalse(perms.user_can_read_object(self.user, self.obj))
self.assertFalse(perms.user_can_update_object(self.user, self.obj))
self.assertFalse(perms.user_can_delete_object(self.user, self.obj))
if test_filtering:
self.assertEqual(len(perms.readable_by_user_filter(self.user, self.queryset)), 0)
def test_allow_or_allow(self):
self.assertAllowAll(AllowAll() | AllowAll())
def test_allow_or_deny(self):
self.assertAllowAll(AllowAll() | DenyAll())
def test_deny_or_allow(self):
self.assertAllowAll(DenyAll() | AllowAll())
def test_deny_or_deny(self):
self.assertDenyAll(DenyAll() | DenyAll())
def test_allow_and_allow(self):
self.assertAllowAll(AllowAll() & AllowAll())
def test_allow_and_deny(self):
self.assertDenyAll(AllowAll() & DenyAll())
def test_deny_and_allow(self):
self.assertDenyAll(DenyAll() & AllowAll())
def test_deny_and_deny(self):
self.assertDenyAll(DenyAll() & DenyAll())
def test_or_is_shortcircuited_for_efficiency(self):
self.assertAllowAll(AllowAll() | BasePermissions(), test_filtering=False)
def test_and_is_shortcircuited_for_efficiency(self):
self.assertDenyAll(DenyAll() & BasePermissions(), test_filtering=False)
def test_or_is_not_shortcircuited_inappropriately(self):
with self.assertRaises(NotImplementedError):
self.assertAllowAll(BasePermissions() | AllowAll())
def test_and_is_not_shortcircuited_inappropriately(self):
with self.assertRaises(NotImplementedError):
self.assertDenyAll(BasePermissions() & DenyAll())
class KolibriAuthPermissionsTestCase(TestCase):
def test_bad_request_method(self):
request = Mock(method="BADWOLF")
view = Mock()
obj = Mock()
perm_obj = KolibriAuthPermissions()
self.assertFalse(perm_obj.has_object_permission(request, view, obj))
| mit |
FedeDR/django-oscar-paypal | paypal/payflow/models.py | 9 | 3364 | from __future__ import unicode_literals
import re
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ugettext
from paypal.payflow import codes
from paypal import base
@python_2_unicode_compatible
class PayflowTransaction(base.ResponseModel):
# This is the linking parameter between the merchant and PayPal. It is
# normally set to the order number
comment1 = models.CharField(_("Comment 1"), max_length=128, db_index=True)
trxtype = models.CharField(_("Transaction type"), max_length=12)
tender = models.CharField(_("Bankcard or PayPal"), max_length=12, null=True)
amount = models.DecimalField(max_digits=12, decimal_places=2, null=True,
blank=True)
# Response params
pnref = models.CharField(_("Payflow transaction ID"), max_length=32,
null=True)
ppref = models.CharField(_("Payment transaction ID"), max_length=32,
unique=True, null=True)
result = models.CharField(max_length=32, null=True, blank=True)
respmsg = models.CharField(_("Response message"), max_length=512)
authcode = models.CharField(_("Auth code"), max_length=32, null=True,
blank=True)
# Fraud/risk params
cvv2match = models.CharField(_("CVV2 check"), null=True, blank=True,
max_length=12)
avsaddr = models.CharField(_("House number check"), null=True, blank=True,
max_length=1)
avszip = models.CharField(_("Zip/Postcode check"), null=True, blank=True,
max_length=1)
class Meta:
ordering = ('-date_created',)
app_label = 'paypal'
def save(self, *args, **kwargs):
self.raw_request = re.sub(r'PWD=.+?&', 'PWD=XXXXXX&', self.raw_request)
self.raw_request = re.sub(r'ACCT=\d+(\d{4})&', 'ACCT=XXXXXXXXXXXX\1&', self.raw_request)
self.raw_request = re.sub(r'CVV2=\d+&', 'CVV2=XXX&', self.raw_request)
return super(PayflowTransaction, self).save(*args, **kwargs)
def get_trxtype_display(self):
return ugettext(codes.trxtype_map.get(self.trxtype, self.trxtype))
get_trxtype_display.short_description = _("Transaction type")
def get_tender_display(self):
return ugettext(codes.tender_map.get(self.tender, ''))
get_tender_display.short_description = _("Tender")
@property
def is_approved(self):
return self.result in ('0', '126')
def is_address_verified(self):
return self.avsaddr == 'Y' and self.avzip == 'Y'
def __str__(self):
return self.pnref
@property
def can_be_voided(self):
if self.trxtype != codes.AUTHORIZATION:
return False
return self.is_approved
@property
def can_be_credited(self):
"""
Test if this txn can be credited
"""
if self.trxtype not in (codes.SALE, codes.DELAYED_CAPTURE):
return False
return self.is_approved
@property
def can_be_captured(self):
"""
Test if this txn can be captured
"""
if self.trxtype != codes.AUTHORIZATION:
return False
return self.is_approved
| bsd-3-clause |
Ban3/Limnoria | plugins/Dict/__init__.py | 4 | 2289 | ###
# Copyright (c) 2004, Jeremiah Fincher
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
"""
Commands that use the dictd protocol to define word.
"""
import supybot
import supybot.world as world
# Use this for the version of this plugin. You may wish to put a CVS keyword
# in here if you\'re keeping the plugin in CVS or some similar system.
__version__ = "%%VERSION%%"
__author__ = supybot.authors.jemfinch
# This is a dictionary mapping supybot.Author instances to lists of
# contributions.
__contributors__ = {}
from . import config
from . import plugin
from imp import reload
reload(plugin) # In case we're being reloaded.
if world.testing:
from . import test
Class = plugin.Class
configure = config.configure
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
| bsd-3-clause |
kennedyshead/home-assistant | homeassistant/components/smarthab/config_flow.py | 2 | 2392 | """SmartHab configuration flow."""
import logging
import pysmarthab
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.const import CONF_EMAIL, CONF_PASSWORD
from . import DOMAIN
_LOGGER = logging.getLogger(__name__)
class SmartHabConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""SmartHab config flow."""
VERSION = 1
def _show_setup_form(self, user_input=None, errors=None):
"""Show the setup form to the user."""
if user_input is None:
user_input = {}
return self.async_show_form(
step_id="user",
data_schema=vol.Schema(
{
vol.Required(
CONF_EMAIL, default=user_input.get(CONF_EMAIL, "")
): str,
vol.Required(CONF_PASSWORD): str,
}
),
errors=errors or {},
)
async def async_step_user(self, user_input=None):
"""Handle a flow initiated by the user."""
errors = {}
if user_input is None:
return self._show_setup_form(user_input, None)
username = user_input[CONF_EMAIL]
password = user_input[CONF_PASSWORD]
# Check if already configured
if self.unique_id is None:
await self.async_set_unique_id(username)
self._abort_if_unique_id_configured()
# Setup connection with SmartHab API
hub = pysmarthab.SmartHab()
try:
await hub.async_login(username, password)
# Verify that passed in configuration works
if hub.is_logged_in():
return self.async_create_entry(
title=username, data={CONF_EMAIL: username, CONF_PASSWORD: password}
)
errors["base"] = "invalid_auth"
except pysmarthab.RequestFailedException:
_LOGGER.exception("Error while trying to reach SmartHab API")
errors["base"] = "service"
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Unexpected error during login")
errors["base"] = "unknown"
return self._show_setup_form(user_input, errors)
async def async_step_import(self, import_info):
"""Handle import from legacy config."""
return await self.async_step_user(import_info)
| apache-2.0 |
sql-machine-learning/sqlflow | python/runtime/pai/submitter_evaluate.py | 1 | 4277 | # Copyright 2020 The SQLFlow Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import runtime.temp_file as temp_file
from runtime import db
from runtime.diagnostics import SQLFlowDiagnostic
from runtime.model import EstimatorType
from runtime.pai import cluster_conf, pai_model, table_ops
from runtime.pai.get_pai_tf_cmd import (ENTRY_FILE, JOB_ARCHIVE_FILE,
PARAMS_FILE, get_pai_tf_cmd)
from runtime.pai.prepare_archive import prepare_archive
from runtime.pai.submit_pai_task import submit_pai_task
from runtime.pai_local.try_run import try_pai_local_run
from runtime.step.create_result_table import create_evaluate_table
def submit_pai_evaluate(datasource,
original_sql,
select,
label_name,
model,
model_params,
result_table,
user=""):
"""Submit a PAI evaluation task
Args:
datasource: string
Like: maxcompute://ak:[email protected]/api?
curr_project=test_ci&scheme=http
original_sql: string
Original "TO PREDICT" statement.
select: string
SQL statement to get prediction data set.
model: string
Model to load and do prediction.
label_name: string
The label name to evaluate.
model_params: dict
Params for training, crossponding to WITH clause.
result_table: string
The table name to save prediction result.
user: string
A string to identify the user, used to load model from the user's
directory.
"""
params = dict(locals())
project = table_ops.get_project(datasource)
if result_table.count(".") == 0:
result_table = "%s.%s" % (project, result_table)
params["result_table"] = result_table
oss_model_path = pai_model.get_oss_model_save_path(datasource,
model,
user=user)
model_type, estimator = pai_model.get_saved_model_type_and_estimator(
datasource, model)
if model_type == EstimatorType.PAIML:
raise SQLFlowDiagnostic("PAI model evaluation is not supported yet.")
if model_type == EstimatorType.XGBOOST:
params["entry_type"] = "evaluate_xgb"
validation_metrics = model_params.get("validation.metrics",
"accuracy_score")
else:
params["entry_type"] = "evaluate_tf"
validation_metrics = model_params.get("validation.metrics", "Accuracy")
validation_metrics = [m.strip() for m in validation_metrics.split(",")]
with db.connect_with_data_source(datasource) as conn:
result_column_names = create_evaluate_table(conn, result_table,
validation_metrics)
with table_ops.create_tmp_tables_guard(select, datasource) as data_table:
params["pai_table"] = data_table
params["result_column_names"] = result_column_names
if try_pai_local_run(params, oss_model_path):
return
conf = cluster_conf.get_cluster_config(model_params)
with temp_file.TemporaryDirectory(prefix="sqlflow", dir="/tmp") as cwd:
prepare_archive(cwd, estimator, oss_model_path, params)
cmd = get_pai_tf_cmd(
conf, "file://" + os.path.join(cwd, JOB_ARCHIVE_FILE),
"file://" + os.path.join(cwd, PARAMS_FILE), ENTRY_FILE, model,
oss_model_path, data_table, "", result_table, project)
submit_pai_task(cmd, datasource)
| apache-2.0 |
hgrimelid/feincms | feincms/views/base.py | 1 | 3756 | from django.contrib.auth.decorators import permission_required
from django.shortcuts import get_object_or_404, render_to_response
from django.template import RequestContext
from django.utils.cache import add_never_cache_headers
try:
from django.template.response import TemplateResponse
except ImportError:
TemplateResponse = None
from feincms.module.page.models import Page
class Handler(object):
"""
This is the default handler for feincms page content.
It isn't a class-based-view like those in Django's generic view framework.
State should not be stored on the ``Handler`` class, because of thread-safety
and cross polination issues.
"""
def __call__(self, request, path=None):
return self.build_response(request,
Page.objects.page_for_path_or_404(path or request.path))
def build_response(self, request, page):
"""
Calls `prepare`, `render` and `finalize`, in this order.
"""
response = self.prepare(request, page)
if response:
return response
response = self.render(request, page)
return self.finalize(request, response, page)
def prepare(self, request, page):
"""
Prepare / pre-process content types. If this method returns anything,
it is treated as a ``HttpResponse`` and handed back to the visitor.
"""
response = page.setup_request(request)
if response:
return response
for content in page.content.all_of_type(tuple(page._feincms_content_types_with_process)):
r = content.process(request)
if r:
return r
def render(self, request, page):
"""
The render step. Must return a HttpResponse.
"""
# This facility can be used by request processors to add values
# to the context.
context = request._feincms_extra_context
context['feincms_page'] = page
if TemplateResponse:
return TemplateResponse(request, page.template.path, context)
else:
return render_to_response(page.template.path,
context_instance=RequestContext(request, context))
def finalize(self, request, response, page):
"""
Runs finalize() on content types having such a method, adds headers and
returns the final response.
"""
for content in page.content.all_of_type(tuple(page._feincms_content_types_with_finalize)):
r = content.finalize(request, response)
if r:
return r
page.finalize_response(request, response)
# Add never cache headers in case frontend editing is active
if hasattr(request, "session") and request.session.get('frontend_editing', False):
add_never_cache_headers(response)
return response
#: Default handler
handler = Handler()
class PreviewHandler(Handler):
"""
This handler is for previewing site content; it takes a page_id so
the page is uniquely identified and does not care whether the page
is active or expired. To balance that, it requires a logged in user.
"""
def __call__(self, request, page_id):
page = get_object_or_404(Page, pk=page_id)
return self.build_response(request, page)
def finalize(self, request, response, page):
"""
Do (nearly) nothing. Do not call any ``finalize`` methods,
because those might add stuff to the cache, set ETags etc.
all of which we cannot use in a preview handler.
"""
add_never_cache_headers(response)
return response
#: Preview handler
preview_handler = permission_required('page.change_page')(PreviewHandler())
| bsd-3-clause |
charlesbastos/ArduPilotMega_demo | Tools/LogAnalyzer/LogAnalyzer.py | 74 | 12240 | #!/usr/bin/env python
#
# A module to analyze and identify any common problems which can be determined from log files
#
# Initial code by Andrew Chapman ([email protected]), 16th Jan 2014
#
# some logging oddities noticed while doing this, to be followed up on:
# - tradheli MOT labels Mot1,Mot2,Mot3,Mot4,GGain
# - Pixhawk doesn't output one of the FMT labels... forget which one
# - MAG offsets seem to be constant (only seen data on Pixhawk)
# - MAG offsets seem to be cast to int before being output? (param is -84.67, logged as -84)
# - copter+plane use 'V' in their vehicle type/version/build line, rover uses lower case 'v'. Copter+Rover give a build number, plane does not
# - CTUN.ThrOut on copter is 0-1000, on plane+rover it is 0-100
# TODO: add test for noisy baro values
# TODO: support loading binary log files (use Tridge's mavlogdump?)
import DataflashLog
import pprint # temp
import imp
import glob
import inspect
import os, sys
import argparse
import datetime
import time
from xml.sax.saxutils import escape
class TestResult(object):
'''all tests return a standardized result type'''
class StatusType:
# NA means not applicable for this log (e.g. copter tests against a plane log), UNKNOWN means it is missing data required for the test
GOOD, FAIL, WARN, UNKNOWN, NA = range(5)
status = None
statusMessage = "" # can be multi-line
class Test(object):
'''base class to be inherited by log tests. Each test should be quite granular so we have lots of small tests with clear results'''
def __init__(self):
self.name = ""
self.result = None # will be an instance of TestResult after being run
self.execTime = None
self.enable = True
def run(self, logdata, verbose=False):
pass
class TestSuite(object):
'''registers test classes, loading using a basic plugin architecture, and can run them all in one run() operation'''
def __init__(self):
self.tests = []
self.logfile = None
self.logdata = None
# dynamically load in Test subclasses from the 'tests' folder
# to prevent one being loaded, move it out of that folder, or set that test's .enable attribute to False
dirName = os.path.dirname(os.path.abspath(__file__))
testScripts = glob.glob(dirName + '/tests/*.py')
testClasses = []
for script in testScripts:
m = imp.load_source("m",script)
for name, obj in inspect.getmembers(m, inspect.isclass):
if name not in testClasses and inspect.getsourcefile(obj) == script:
testClasses.append(name)
self.tests.append(obj())
# and here's an example of explicitly loading a Test class if you wanted to do that
# m = imp.load_source("m", dirName + '/tests/TestBadParams.py')
# self.tests.append(m.TestBadParams())
def run(self, logdata, verbose):
'''run all registered tests in a single call, gathering execution timing info'''
self.logdata = logdata
self.logfile = logdata.filename
for test in self.tests:
# run each test in turn, gathering timing info
if test.enable:
startTime = time.time()
test.run(self.logdata, verbose) # RUN THE TEST
endTime = time.time()
test.execTime = 1000 * (endTime-startTime)
def outputPlainText(self, outputStats):
'''output test results in plain text'''
print 'Dataflash log analysis report for file: ' + self.logfile
print 'Log size: %.2fmb (%d lines)' % (self.logdata.filesizeKB / 1024.0, self.logdata.lineCount)
print 'Log duration: %s' % str(datetime.timedelta(seconds=self.logdata.durationSecs)) + '\n'
if self.logdata.vehicleType == "ArduCopter" and self.logdata.getCopterType():
print 'Vehicle Type: %s (%s)' % (self.logdata.vehicleType, self.logdata.getCopterType())
else:
print 'Vehicle Type: %s' % self.logdata.vehicleType
print 'Firmware Version: %s (%s)' % (self.logdata.firmwareVersion, self.logdata.firmwareHash)
print 'Hardware: %s' % self.logdata.hardwareType
print 'Free RAM: %s' % self.logdata.freeRAM
if self.logdata.skippedLines:
print "\nWARNING: %d malformed log lines skipped during read" % self.logdata.skippedLines
print '\n'
print "Test Results:"
for test in self.tests:
if not test.enable:
continue
statusMessageFirstLine = test.result.statusMessage.strip('\n\r').split('\n')[0]
statusMessageExtra = test.result.statusMessage.strip('\n\r').split('\n')[1:]
execTime = ""
if outputStats:
execTime = " (%6.2fms)" % (test.execTime)
if test.result.status == TestResult.StatusType.GOOD:
print " %20s: GOOD %-55s%s" % (test.name, statusMessageFirstLine, execTime)
elif test.result.status == TestResult.StatusType.FAIL:
print " %20s: FAIL %-55s%s [GRAPH]" % (test.name, statusMessageFirstLine, execTime)
elif test.result.status == TestResult.StatusType.WARN:
print " %20s: WARN %-55s%s [GRAPH]" % (test.name, statusMessageFirstLine, execTime)
elif test.result.status == TestResult.StatusType.NA:
# skip any that aren't relevant for this vehicle/hardware/etc
continue
else:
print " %20s: UNKNOWN %-55s%s" % (test.name, statusMessageFirstLine, execTime)
#if statusMessageExtra:
for line in statusMessageExtra:
print " %29s %s" % ("",line)
print '\n'
print 'The Log Analyzer is currently BETA code.\nFor any support or feedback on the log analyzer please email Andrew Chapman ([email protected])'
print '\n'
def outputXML(self, xmlFile):
'''output test results to an XML file'''
# open the file for writing
xml = None
try:
if xmlFile == '-':
xml = sys.stdout
else:
xml = open(xmlFile, 'w')
except:
sys.stderr.write("Error opening output xml file: %s" % xmlFile)
sys.exit(1)
# output header info
print >>xml, "<?xml version=\"1.0\" encoding=\"UTF-8\"?>"
print >>xml, "<loganalysis>"
print >>xml, "<header>"
print >>xml, " <logfile>" + escape(self.logfile) + "</logfile>"
print >>xml, " <sizekb>" + escape(`self.logdata.filesizeKB`) + "</sizekb>"
print >>xml, " <sizelines>" + escape(`self.logdata.lineCount`) + "</sizelines>"
print >>xml, " <duration>" + escape(str(datetime.timedelta(seconds=self.logdata.durationSecs))) + "</duration>"
print >>xml, " <vehicletype>" + escape(self.logdata.vehicleType) + "</vehicletype>"
if self.logdata.vehicleType == "ArduCopter" and self.logdata.getCopterType():
print >>xml, " <coptertype>" + escape(self.logdata.getCopterType()) + "</coptertype>"
print >>xml, " <firmwareversion>" + escape(self.logdata.firmwareVersion) + "</firmwareversion>"
print >>xml, " <firmwarehash>" + escape(self.logdata.firmwareHash) + "</firmwarehash>"
print >>xml, " <hardwaretype>" + escape(self.logdata.hardwareType) + "</hardwaretype>"
print >>xml, " <freemem>" + escape(`self.logdata.freeRAM`) + "</freemem>"
print >>xml, " <skippedlines>" + escape(`self.logdata.skippedLines`) + "</skippedlines>"
print >>xml, "</header>"
# output parameters
print >>xml, "<params>"
for param, value in self.logdata.parameters.items():
print >>xml, " <param name=\"%s\" value=\"%s\" />" % (param,escape(`value`))
print >>xml, "</params>"
# output test results
print >>xml, "<results>"
for test in self.tests:
if not test.enable:
continue
print >>xml, " <result>"
if test.result.status == TestResult.StatusType.GOOD:
print >>xml, " <name>" + escape(test.name) + "</name>"
print >>xml, " <status>GOOD</status>"
print >>xml, " <message>" + escape(test.result.statusMessage) + "</message>"
elif test.result.status == TestResult.StatusType.FAIL:
print >>xml, " <name>" + escape(test.name) + "</name>"
print >>xml, " <status>FAIL</status>"
print >>xml, " <message>" + escape(test.result.statusMessage) + "</message>"
print >>xml, " <data>(test data will be embeded here at some point)</data>"
elif test.result.status == TestResult.StatusType.WARN:
print >>xml, " <name>" + escape(test.name) + "</name>"
print >>xml, " <status>WARN</status>"
print >>xml, " <message>" + escape(test.result.statusMessage) + "</message>"
print >>xml, " <data>(test data will be embeded here at some point)</data>"
elif test.result.status == TestResult.StatusType.NA:
print >>xml, " <name>" + escape(test.name) + "</name>"
print >>xml, " <status>NA</status>"
else:
print >>xml, " <name>" + escape(test.name) + "</name>"
print >>xml, " <status>UNKNOWN</status>"
print >>xml, " <message>" + escape(test.result.statusMessage) + "</message>"
print >>xml, " </result>"
print >>xml, "</results>"
print >>xml, "</loganalysis>"
xml.close()
def main():
dirName = os.path.dirname(os.path.abspath(__file__))
# deal with command line arguments
parser = argparse.ArgumentParser(description='Analyze an APM Dataflash log for known issues')
parser.add_argument('logfile', type=argparse.FileType('r'), help='path to Dataflash log file (or - for stdin)')
parser.add_argument('-f', '--format', metavar='', type=str, action='store', choices=['bin','log','auto'], default='auto', help='log file format: \'bin\',\'log\' or \'auto\'')
parser.add_argument('-q', '--quiet', metavar='', action='store_const', const=True, help='quiet mode, do not print results')
parser.add_argument('-p', '--profile', metavar='', action='store_const', const=True, help='output performance profiling data')
parser.add_argument('-s', '--skip_bad', metavar='', action='store_const', const=True, help='skip over corrupt dataflash lines')
parser.add_argument('-e', '--empty', metavar='', action='store_const', const=True, help='run an initial check for an empty log')
parser.add_argument('-x', '--xml', type=str, metavar='XML file', nargs='?', const='', default='', help='write output to specified XML file (or - for stdout)')
parser.add_argument('-v', '--verbose', metavar='', action='store_const', const=True, help='verbose output')
args = parser.parse_args()
# load the log
startTime = time.time()
logdata = DataflashLog.DataflashLog(args.logfile.name, format=args.format, ignoreBadlines=args.skip_bad) # read log
endTime = time.time()
if args.profile:
print "Log file read time: %.2f seconds" % (endTime-startTime)
# check for empty log if requested
if args.empty:
emptyErr = DataflashLog.DataflashLogHelper.isLogEmpty(logdata)
if emptyErr:
sys.stderr.write("Empty log file: %s, %s" % (logdata.filename, emptyErr))
sys.exit(1)
#run the tests, and gather timings
testSuite = TestSuite()
startTime = time.time()
testSuite.run(logdata, args.verbose) # run tests
endTime = time.time()
if args.profile:
print "Test suite run time: %.2f seconds" % (endTime-startTime)
# deal with output
if not args.quiet:
testSuite.outputPlainText(args.profile)
if args.xml:
testSuite.outputXML(args.xml)
if not args.quiet:
print "XML output written to file: %s\n" % args.xml
if __name__ == "__main__":
main()
| gpl-3.0 |
MoamerEncsConcordiaCa/tensorflow | tensorflow/python/saved_model/builder.py | 126 | 1271 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SavedModel builder.
Builds a SavedModel that can be saved to storage, is language neutral, and
enables systems to produce, consume, or transform TensorFlow Models.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import
from tensorflow.python.saved_model.builder_impl import SavedModelBuilder
# pylint: enable=unused-import
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = [
"SavedModelBuilder",
]
remove_undocumented(__name__, _allowed_symbols)
| apache-2.0 |
mpharrigan/mdtraj | mdtraj/geometry/distance.py | 1 | 11815 | ##############################################################################
# MDTraj: A Python Library for Loading, Saving, and Manipulating
# Molecular Dynamics Trajectories.
# Copyright 2012-2015 Stanford University and the Authors
#
# Authors: Robert McGibbon
# Contributors: Kyle A Beauchamp, Jason Swails
#
# MDTraj is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
##############################################################################
# Imports
##############################################################################
from __future__ import print_function, division
import numpy as np
from mdtraj.utils import ensure_type
from mdtraj.utils.six.moves import range
from mdtraj.geometry import _geometry
__all__ = ['compute_distances', 'compute_displacements',
'compute_center_of_mass', 'find_closest_contact']
##############################################################################
# Functions
##############################################################################
def compute_distances(traj, atom_pairs, periodic=True, opt=True):
"""Compute the distances between pairs of atoms in each frame.
Parameters
----------
traj : Trajectory
An mtraj trajectory.
atom_pairs : np.ndarray, shape=(num_pairs, 2), dtype=int
Each row gives the indices of two atoms involved in the interaction.
periodic : bool, default=True
If `periodic` is True and the trajectory contains unitcell
information, we will compute distances under the minimum image
convention.
opt : bool, default=True
Use an optimized native library to calculate distances. Our optimized
SSE minimum image convention calculation implementation is over 1000x
faster than the naive numpy implementation.
Returns
-------
distances : np.ndarray, shape=(n_frames, num_pairs), dtype=float
The distance, in each frame, between each pair of atoms.
"""
xyz = ensure_type(traj.xyz, dtype=np.float32, ndim=3, name='traj.xyz', shape=(None, None, 3), warn_on_cast=False)
pairs = ensure_type(atom_pairs, dtype=np.int32, ndim=2, name='atom_pairs', shape=(None, 2), warn_on_cast=False)
if not np.all(np.logical_and(pairs < traj.n_atoms, pairs >= 0)):
raise ValueError('atom_pairs must be between 0 and %d' % traj.n_atoms)
if len(pairs) == 0:
return np.zeros((len(xyz), 0), dtype=np.float32)
if periodic and traj._have_unitcell:
box = ensure_type(traj.unitcell_vectors, dtype=np.float32, ndim=3, name='unitcell_vectors', shape=(len(xyz), 3, 3),
warn_on_cast=False)
orthogonal = np.allclose(traj.unitcell_angles, 90)
if opt:
out = np.empty((xyz.shape[0], pairs.shape[0]), dtype=np.float32)
_geometry._dist_mic(xyz, pairs, box.transpose(0, 2, 1).copy(), out, orthogonal)
return out
else:
return _distance_mic(xyz, pairs, box.transpose(0, 2, 1), orthogonal)
# either there are no unitcell vectors or they dont want to use them
if opt:
out = np.empty((xyz.shape[0], pairs.shape[0]), dtype=np.float32)
_geometry._dist(xyz, pairs, out)
return out
else:
return _distance(xyz, pairs)
def compute_displacements(traj, atom_pairs, periodic=True, opt=True):
"""Compute the displacement vector between pairs of atoms in each frame of a trajectory.
Parameters
----------
traj : Trajectory
Trajectory to compute distances in
atom_pairs : np.ndarray, shape[num_pairs, 2], dtype=int
Each row gives the indices of two atoms.
periodic : bool, default=True
If `periodic` is True and the trajectory contains unitcell
information, we will compute distances under the minimum image
convention.
opt : bool, default=True
Use an optimized native library to calculate distances. Our
optimized minimum image convention calculation implementation is
over 1000x faster than the naive numpy implementation.
Returns
-------
displacements : np.ndarray, shape=[n_frames, n_pairs, 3], dtype=float32
The displacememt vector, in each frame, between each pair of atoms.
"""
xyz = ensure_type(traj.xyz, dtype=np.float32, ndim=3, name='traj.xyz', shape=(None, None, 3), warn_on_cast=False)
pairs = ensure_type(np.asarray(atom_pairs), dtype=np.int32, ndim=2, name='atom_pairs', shape=(None, 2), warn_on_cast=False)
if not np.all(np.logical_and(pairs < traj.n_atoms, pairs >= 0)):
raise ValueError('atom_pairs must be between 0 and %d' % traj.n_atoms)
if periodic and traj._have_unitcell:
box = ensure_type(traj.unitcell_vectors, dtype=np.float32, ndim=3, name='unitcell_vectors', shape=(len(xyz), 3, 3),
warn_on_cast=False)
orthogonal = np.allclose(traj.unitcell_angles, 90)
if opt:
out = np.empty((xyz.shape[0], pairs.shape[0], 3), dtype=np.float32)
_geometry._dist_mic_displacement(xyz, pairs, box.transpose(0, 2, 1).copy(), out, orthogonal)
return out
else:
return _displacement_mic(xyz, pairs, box.transpose(0, 2, 1), orthogonal)
# either there are no unitcell vectors or they dont want to use them
if opt:
out = np.empty((xyz.shape[0], pairs.shape[0], 3), dtype=np.float32)
_geometry._dist_displacement(xyz, pairs, out)
return out
return _displacement(xyz, pairs)
def compute_center_of_mass(traj):
"""Compute the center of mass for each frame.
Parameters
----------
traj : Trajectory
Trajectory to compute center of mass for
Returns
-------
com : np.ndarray, shape=(n_frames, 3)
Coordinates of the center of mass for each frame
"""
com = np.zeros((traj.n_frames, 3))
masses = np.array([a.element.mass for a in traj.top.atoms])
masses /= masses.sum()
for i, x in enumerate(traj.xyz):
com[i, :] = x.astype('float64').T.dot(masses)
return com
def find_closest_contact(traj, group1, group2, frame=0, periodic=True):
"""Find the closest contact between two groups of atoms.
Given a frame of a Trajectory and two groups of atoms, identify the pair of
atoms (one from each group) that form the closest contact between the two groups.
Parameters
----------
traj : Trajectory
An mtraj trajectory.
group1 : np.ndarray, shape=(num_atoms), dtype=int
The indices of atoms in the first group.
group2 : np.ndarray, shape=(num_atoms), dtype=int
The indices of atoms in the second group.
frame : int, default=0
The frame of the Trajectory to take positions from
periodic : bool, default=True
If `periodic` is True and the trajectory contains unitcell
information, we will compute distances under the minimum image
convention.
Returns
-------
result : tuple (int, int, float)
The indices of the two atoms forming the closest contact, and the distance between them.
"""
xyz = ensure_type(traj.xyz, dtype=np.float32, ndim=3, name='traj.xyz', shape=(None, None, 3), warn_on_cast=False)[frame]
atoms1 = ensure_type(group1, dtype=np.int32, ndim=1, name='group1', warn_on_cast=False)
atoms2 = ensure_type(group2, dtype=np.int32, ndim=1, name='group2', warn_on_cast=False)
if periodic and traj._have_unitcell:
box = ensure_type(traj.unitcell_vectors, dtype=np.float32, ndim=3, name='unitcell_vectors', shape=(len(traj.xyz), 3, 3),
warn_on_cast=False)[frame]
else:
box = None
return _geometry._find_closest_contact(xyz, atoms1, atoms2, box)
##############################################################################
# pure python implementation of the core routines
##############################################################################
def _distance(xyz, pairs):
"Distance between pairs of points in each frame"
delta = np.diff(xyz[:, pairs], axis=2)[:, :, 0]
return (delta ** 2.).sum(-1) ** 0.5
def _displacement(xyz, pairs):
"Displacement vector between pairs of points in each frame"
value = np.diff(xyz[:, pairs], axis=2)[:, :, 0]
assert value.shape == (xyz.shape[0], pairs.shape[0], 3), 'v.shape %s, xyz.shape %s, pairs.shape %s' % (str(value.shape), str(xyz.shape), str(pairs.shape))
return value
def _distance_mic(xyz, pairs, box_vectors, orthogonal):
"""Distance between pairs of points in each frame under the minimum image
convention for periodic boundary conditions.
The computation follows scheme B.9 in Tukerman, M. "Statistical
Mechanics: Theory and Molecular Simulation", 2010.
This is a slow pure python implementation, mostly for testing.
"""
out = np.empty((xyz.shape[0], pairs.shape[0]), dtype=np.float32)
for i in range(len(xyz)):
hinv = np.linalg.inv(box_vectors[i])
bv1, bv2, bv3 = box_vectors[i].T
for j, (a,b) in enumerate(pairs):
s1 = np.dot(hinv, xyz[i,a,:])
s2 = np.dot(hinv, xyz[i,b,:])
s12 = s2 - s1
s12 = s12 - np.round(s12)
r12 = np.dot(box_vectors[i], s12)
dist = np.linalg.norm(r12)
if not orthogonal:
for ii in range(-1, 2):
v1 = bv1*ii
for jj in range(-1, 2):
v12 = bv2*jj + v1
for kk in range(-1, 2):
new_r12 = r12 + v12 + bv3*kk
dist = min(dist, np.linalg.norm(new_r12))
out[i, j] = dist
return out
def _displacement_mic(xyz, pairs, box_vectors, orthogonal):
"""Displacement vector between pairs of points in each frame under the
minimum image convention for periodic boundary conditions.
The computation follows scheme B.9 in Tukerman, M. "Statistical
Mechanics: Theory and Molecular Simulation", 2010.
This is a very slow pure python implementation, mostly for testing.
"""
out = np.empty((xyz.shape[0], pairs.shape[0], 3), dtype=np.float32)
for i in range(len(xyz)):
hinv = np.linalg.inv(box_vectors[i])
bv1, bv2, bv3 = box_vectors[i].T
for j, (a,b) in enumerate(pairs):
s1 = np.dot(hinv, xyz[i,a,:])
s2 = np.dot(hinv, xyz[i,b,:])
s12 = s2 - s1
s12 = s12 - np.round(s12)
disp = np.dot(box_vectors[i], s12)
min_disp = disp
dist2 = (disp*disp).sum()
if not orthogonal:
for ii in range(-1, 2):
v1 = bv1*ii
for jj in range(-1, 2):
v12 = bv2*jj+v1
for kk in range(-1, 2):
tmp = disp + v12 + bv3*kk
new_dist2 = (tmp*tmp).sum()
if new_dist2 < dist2:
dist2 = new_dist2
min_disp = tmp
out[i, j] = min_disp
return out
| lgpl-2.1 |
Suite5/DataColibri | articles/migrations/0001_initial.py | 10 | 13440 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Tag'
db.create_table('articles_tag', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(unique=True, max_length=64)),
))
db.send_create_signal('articles', ['Tag'])
# Adding model 'ArticleStatus'
db.create_table('articles_articlestatus', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=50)),
('ordering', self.gf('django.db.models.fields.IntegerField')(default=0)),
('is_live', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal('articles', ['ArticleStatus'])
# Adding model 'Article'
db.create_table('articles_article', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=100)),
('slug', self.gf('django.db.models.fields.SlugField')(max_length=50, db_index=True)),
('status', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['articles.ArticleStatus'])),
('author', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('keywords', self.gf('django.db.models.fields.TextField')(blank=True)),
('description', self.gf('django.db.models.fields.TextField')(blank=True)),
('markup', self.gf('django.db.models.fields.CharField')(default='h', max_length=1)),
('content', self.gf('django.db.models.fields.TextField')()),
('rendered_content', self.gf('django.db.models.fields.TextField')()),
('publish_date', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('expiration_date', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('is_active', self.gf('django.db.models.fields.BooleanField')(default=True)),
('login_required', self.gf('django.db.models.fields.BooleanField')(default=False)),
('use_addthis_button', self.gf('django.db.models.fields.BooleanField')(default=True)),
('addthis_use_author', self.gf('django.db.models.fields.BooleanField')(default=True)),
('addthis_username', self.gf('django.db.models.fields.CharField')(default=None, max_length=50, blank=True)),
))
db.send_create_signal('articles', ['Article'])
# Adding M2M table for field sites on 'Article'
db.create_table('articles_article_sites', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('article', models.ForeignKey(orm['articles.article'], null=False)),
('site', models.ForeignKey(orm['sites.site'], null=False))
))
db.create_unique('articles_article_sites', ['article_id', 'site_id'])
# Adding M2M table for field tags on 'Article'
db.create_table('articles_article_tags', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('article', models.ForeignKey(orm['articles.article'], null=False)),
('tag', models.ForeignKey(orm['articles.tag'], null=False))
))
db.create_unique('articles_article_tags', ['article_id', 'tag_id'])
# Adding M2M table for field followup_for on 'Article'
db.create_table('articles_article_followup_for', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('from_article', models.ForeignKey(orm['articles.article'], null=False)),
('to_article', models.ForeignKey(orm['articles.article'], null=False))
))
db.create_unique('articles_article_followup_for', ['from_article_id', 'to_article_id'])
# Adding M2M table for field related_articles on 'Article'
db.create_table('articles_article_related_articles', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('from_article', models.ForeignKey(orm['articles.article'], null=False)),
('to_article', models.ForeignKey(orm['articles.article'], null=False))
))
db.create_unique('articles_article_related_articles', ['from_article_id', 'to_article_id'])
# Adding model 'Attachment'
db.create_table('articles_attachment', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('article', self.gf('django.db.models.fields.related.ForeignKey')(related_name='attachments', to=orm['articles.Article'])),
('attachment', self.gf('django.db.models.fields.files.FileField')(max_length=100)),
('caption', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
))
db.send_create_signal('articles', ['Attachment'])
def backwards(self, orm):
# Deleting model 'Tag'
db.delete_table('articles_tag')
# Deleting model 'ArticleStatus'
db.delete_table('articles_articlestatus')
# Deleting model 'Article'
db.delete_table('articles_article')
# Removing M2M table for field sites on 'Article'
db.delete_table('articles_article_sites')
# Removing M2M table for field tags on 'Article'
db.delete_table('articles_article_tags')
# Removing M2M table for field followup_for on 'Article'
db.delete_table('articles_article_followup_for')
# Removing M2M table for field related_articles on 'Article'
db.delete_table('articles_article_related_articles')
# Deleting model 'Attachment'
db.delete_table('articles_attachment')
models = {
'articles.article': {
'Meta': {'ordering': "('-publish_date', 'title')", 'object_name': 'Article'},
'addthis_use_author': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'addthis_username': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '50', 'blank': 'True'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'content': ('django.db.models.fields.TextField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'expiration_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'followup_for': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'followups'", 'blank': 'True', 'to': "orm['articles.Article']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'keywords': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'markup': ('django.db.models.fields.CharField', [], {'default': "'h'", 'max_length': '1'}),
'publish_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'related_articles': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'related_articles_rel_+'", 'blank': 'True', 'to': "orm['articles.Article']"}),
'rendered_content': ('django.db.models.fields.TextField', [], {}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sites.Site']", 'symmetrical': 'False', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'}),
'status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['articles.ArticleStatus']"}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['articles.Tag']", 'symmetrical': 'False', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'use_addthis_button': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'articles.articlestatus': {
'Meta': {'ordering': "('ordering', 'name')", 'object_name': 'ArticleStatus'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_live': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'ordering': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'articles.attachment': {
'Meta': {'ordering': "('-article', 'id')", 'object_name': 'Attachment'},
'article': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'attachments'", 'to': "orm['articles.Article']"}),
'attachment': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'caption': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'articles.tag': {
'Meta': {'ordering': "('name',)", 'object_name': 'Tag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['articles']
| mit |
pombreda/fs-googledrive | test_googledrivefs.py | 1 | 3955 | # -*- coding: utf-8 -*-
from __future__ import (print_function, division,
absolute_import, unicode_literals)
import unittest
from mock import Mock
from pytest import fixture
from oauth2client.client import OAuth2Credentials
from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive
from fs.tests import FSTestCases
from googledrivefs import GoogleDriveFS
client_config = {
'auth_uri': 'https://accounts.google.com/o/oauth2/auth',
'client_id': '105537897616-oqt2bc3ffgi3l2bd07o1s3feq68ga5m7'
'.apps.googleusercontent.com',
'client_secret': 'sC6ZXdmHf_qXR0bQ0XaLvfSp',
'redirect_uri': 'urn:ietf:wg:oauth:2.0:oob',
'revoke_uri': None,
'token_uri': 'https://accounts.google.com/o/oauth2/token'}
credentials = '{"_module": "oauth2client.client", "token_expiry": "2014-06-07T17:04:26Z", "access_token": "ya29.KgBLjqMlBwNydhoAAACKi7Trb4b3VyN4LZX5JHHTz9wdUeAOqupcFn65q9p0kA", "token_uri": "https://accounts.google.com/o/oauth2/token", "invalid": false, "token_response": {"access_token": "ya29.KgBLjqMlBwNydhoAAACKi7Trb4b3VyN4LZX5JHHTz9wdUeAOqupcFn65q9p0kA", "token_type": "Bearer", "expires_in": 3600, "refresh_token": "1/1Ani7Ovt_KmBPaQxbyc4ZGvhTHMNu4gwVdPiBR8_8BQ"}, "client_id": "105537897616-oqt2bc3ffgi3l2bd07o1s3feq68ga5m7.apps.googleusercontent.com", "id_token": null, "client_secret": "sC6ZXdmHf_qXR0bQ0XaLvfSp", "revoke_uri": "https://accounts.google.com/o/oauth2/revoke", "_class": "OAuth2Credentials", "refresh_token": "1/1Ani7Ovt_KmBPaQxbyc4ZGvhTHMNu4gwVdPiBR8_8BQ", "user_agent": null}'
def cleanup_googledrive(fs):
"""Remove all files and folders from Google Drive"""
for entry in fs.listdir(files_only=True):
fs.remove(entry)
for entry in fs.listdir(dirs_only=True):
fs.removedir(entry, force=True)
fs.client.auth.service.files().emptyTrash().execute()
class TestGoogleDriveFS():
@fixture
def fs(self):
gauth = GoogleAuth()
gauth.credentials = OAuth2Credentials.from_json(credentials)
gauth.client_config = client_config
gauth.settings["client_config_backend"] = "settings"
drive = GoogleDrive(gauth)
return GoogleDriveFS(drive)
def test_map_ids_to_paths(self, fs):
# Arrange
file_list = [
{'parents': [{'id': '0B_lkT', 'isRoot': True}],
'id': '1APq7o', 'title': 'file_at_root.txt'},
{'parents': [{'id': '0B_lkT', 'isRoot': True}],
'id': '1xp13X', 'title': 'folder_at_root'},
{'parents': [{'id': '1xp13X', 'isRoot': False}],
'id': '13PuVd', 'title': 'file1_in_folder.txt'},
{'parents': [{'id': '1xp13X', 'isRoot': False}],
'id': '1ovGwK', 'title': 'file2_in_folder.txt'},
{'parents': [{'id': '1xp13X', 'isRoot': False}],
'id': '0Ap6n5', 'title': 'folder_in_folder'},
]
fs.client.ListFile = Mock()
fs.client.ListFile.return_value.GetList.return_value = file_list
# Act
ids = fs._map_ids_to_paths()
# Assert
assert ids['/file_at_root.txt'] == '1APq7o'
assert ids['/folder_at_root'] == '1xp13X'
assert ids['/folder_at_root/file1_in_folder.txt'] == '13PuVd'
assert ids['/folder_at_root/file2_in_folder.txt'] == '1ovGwK'
assert ids['/folder_at_root/folder_in_folder'] == '0Ap6n5'
class TestExternalGoogleDriveFS(unittest.TestCase, FSTestCases):
"""This will test the GoogleDriveFS implementation against the
base tests defined in PyFilesystem"""
def setUp(self):
gauth = GoogleAuth()
gauth.credentials = OAuth2Credentials.from_json(credentials)
gauth.client_config = client_config
gauth.settings["client_config_backend"] = "settings"
drive = GoogleDrive(gauth)
self.fs = GoogleDriveFS(drive)
def tearDown(self):
cleanup_googledrive(self.fs)
self.fs.close()
| gpl-2.0 |
nagyistoce/odoo-dev-odoo | addons/resource/faces/pcalendar.py | 433 | 28436 | #@+leo-ver=4
#@+node:@file pcalendar.py
#@@language python
#@<< Copyright >>
#@+node:<< Copyright >>
############################################################################
# Copyright (C) 2005, 2006, 2007, 2008 by Reithinger GmbH
# [email protected]
#
# This file is part of faces.
#
# faces is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# faces is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
############################################################################
#@-node:<< Copyright >>
#@nl
"""
This module contains all classes and functions for the project plan calendar
"""
#@<< Imports >>
#@+node:<< Imports >>
from string import *
import datetime
import time
import re
import locale
import bisect
import sys
TIME_RANGE_PATTERN = re.compile("(\\d+):(\\d+)\\s*-\\s*(\\d+):(\\d+)")
TIME_DELTA_PATTERN = re.compile("([-+]?\\d+(\\.\\d+)?)([dwmyMH])")
DEFAULT_MINIMUM_TIME_UNIT = 15
DEFAULT_WORKING_DAYS_PER_WEEK = 5
DEFAULT_WORKING_DAYS_PER_MONTH = 20
DEFAULT_WORKING_DAYS_PER_YEAR = 200
DEFAULT_WORKING_HOURS_PER_DAY = 8
DEFAULT_WORKING_TIMES = ( (8 * 60, 12 * 60 ),
(13 * 60, 17 * 60 ) )
DEFAULT_WORKING_DAYS = { 0 : DEFAULT_WORKING_TIMES,
1 : DEFAULT_WORKING_TIMES,
2 : DEFAULT_WORKING_TIMES,
3 : DEFAULT_WORKING_TIMES,
4 : DEFAULT_WORKING_TIMES,
5 : (),
6 : () }
#@-node:<< Imports >>
#@nl
#@+others
#@+node:to_time_range
def to_time_range(src):
"""
converts a string to a timerange, i.e
(from, to)
from, to are ints, specifing the minutes since midnight
"""
if not src: return ()
mo = TIME_RANGE_PATTERN.match(src)
if not mo:
raise ValueError("%s is no time range" % src)
from_time = int(mo.group(1)) * 60 + int(mo.group(2))
to_time = int(mo.group(3)) * 60 + int(mo.group(4))
return from_time, to_time
#@-node:to_time_range
#@+node:to_datetime
def to_datetime(src):
"""
a tolerant conversion function to convert different strings
to a datetime.dateime
"""
#to get the original value for wrappers
new = getattr(src, "_value", src)
while new is not src:
src = new
new = getattr(src, "_value", src)
if isinstance(src, _WorkingDateBase):
src = src.to_datetime()
if isinstance(src, datetime.datetime):
return src
src = str(src)
formats = [ "%x %H:%M",
"%x",
"%Y-%m-%d %H:%M",
"%y-%m-%d %H:%M",
"%d.%m.%Y %H:%M",
"%d.%m.%y %H:%M",
"%Y%m%d %H:%M",
"%d/%m/%y %H:%M",
"%d/%m/%Y %H:%M",
"%d/%m/%Y",
"%d/%m/%y",
"%Y-%m-%d",
"%y-%m-%d",
"%d.%m.%Y",
"%d.%m.%y",
"%Y%m%d" ]
for f in formats:
try:
conv = time.strptime(src, f)
return datetime.datetime(*conv[0:-3])
except Exception, e:
pass
raise TypeError("'%s' (%s) is not a datetime" % (src, str(type(src))))
#@-node:
#@+node:_to_days
def _to_days(src):
"""
converts a string of the day abreviations mon, tue, wed,
thu, fri, sat, sun to a dir with correct weekday indices.
For Example
convert_to_days('mon, tue, thu') results in
{ 0:1, 1:1, 3:1 }
"""
tokens = src.split(",")
result = { }
for t in tokens:
try:
index = { "mon" : 0,
"tue" : 1,
"wed" : 2,
"thu" : 3,
"fri" : 4,
"sat" : 5,
"sun" : 6 } [ lower(t.strip()) ]
result[index] = 1
except:
raise ValueError("%s is not a day" % (t))
return result
#@-node:_to_days
#@+node:_add_to_time_spans
def _add_to_time_spans(src, to_add, is_free):
if not isinstance(to_add, (tuple, list)):
to_add = (to_add,)
tmp = []
for start, end, f in src:
tmp.append((start, True, f))
tmp.append((end, False, f))
for v in to_add:
if isinstance(v, (tuple, list)):
start = to_datetime(v[0])
end = to_datetime(v[1])
else:
start = to_datetime(v)
end = start.replace(hour=0, minute=0) + datetime.timedelta(1)
tmp.append((start, start <= end, is_free))
tmp.append((end, start > end, is_free))
tmp.sort()
# 0: date
# 1: is_start
# 2: is_free
sequence = []
free_count = 0
work_count = 0
last = None
for date, is_start, is_free in tmp:
if is_start:
if is_free:
if not free_count and not work_count:
last = date
free_count += 1
else:
if not work_count:
if free_count: sequence.append((last, date, True))
last = date
work_count += 1
else:
if is_free:
assert(free_count > 0)
free_count -= 1
if not free_count and not work_count:
sequence.append((last, date, True))
else:
assert(work_count > 0)
work_count -= 1
if not work_count: sequence.append((last, date, False))
if free_count: last = date
return tuple(sequence)
#@-node:_add_to_time_spans
#@+node:to_timedelta
def to_timedelta(src, cal=None, is_duration=False):
"""
converts a string to a datetime.timedelta. If cal is specified
it will be used for getting the working times. if is_duration=True
working times will not be considered. Valid units are
d for Days
w for Weeks
m for Months
y for Years
H for Hours
M for Minutes
"""
cal = cal or _default_calendar
if isinstance(src, datetime.timedelta):
return datetime.timedelta(src.days, seconds=src.seconds, calendar=cal)
if isinstance(src, (long, int, float)):
src = "%sM" % str(src)
if not isinstance(src, basestring):
raise ValueError("%s is not a duration" % (repr(src)))
src = src.strip()
if is_duration:
d_p_w = 7
d_p_m = 30
d_p_y = 360
d_w_h = 24
else:
d_p_w = cal.working_days_per_week
d_p_m = cal.working_days_per_month
d_p_y = cal.working_days_per_year
d_w_h = cal.working_hours_per_day
def convert_minutes(minutes):
minutes = int(minutes)
hours = minutes / 60
minutes = minutes % 60
days = hours / d_w_h
hours = hours % d_w_h
return [ days, 0, 0, 0, minutes, hours ]
def convert_days(value):
days = int(value)
value -= days
value *= d_w_h
hours = int(value)
value -= hours
value *= 60
minutes = round(value)
return [ days, 0, 0, 0, minutes, hours ]
sum_args = [ 0, 0, 0, 0, 0, 0 ]
split = src.split(" ")
for s in split:
mo = TIME_DELTA_PATTERN.match(s)
if not mo:
raise ValueError(src +
" is not a valid duration: valid"
" units are: d w m y M H")
unit = mo.group(3)
val = float(mo.group(1))
if unit == 'd':
args = convert_days(val)
elif unit == 'w':
args = convert_days(val * d_p_w)
elif unit == 'm':
args = convert_days(val * d_p_m)
elif unit == 'y':
args = convert_days(val * d_p_y)
elif unit == 'M':
args = convert_minutes(val)
elif unit == 'H':
args = convert_minutes(val * 60)
sum_args = [ a + b for a, b in zip(sum_args, args) ]
sum_args = tuple(sum_args)
return datetime.timedelta(*sum_args)
#@-node:to_timedelta
#@+node:timedelta_to_str
def timedelta_to_str(delta, format, cal=None, is_duration=False):
cal = cal or _default_calendar
if is_duration:
d_p_w = 7
d_p_m = 30
d_p_y = 365
d_w_h = 24
else:
d_p_w = cal.working_days_per_week
d_p_m = cal.working_days_per_month
d_p_y = cal.working_days_per_year
d_w_h = cal.working_hours_per_day
has_years = format.find("%y") > -1
has_minutes = format.find("%M") > -1
has_hours = format.find("%H") > -1 or has_minutes
has_days = format.find("%d") > -1
has_weeks = format.find("%w") > -1
has_months = format.find("%m") > -1
result = format
days = delta.days
d_r = (days, format)
minutes = delta.seconds / 60
def rebase(d_r, cond1, cond2, letter, divisor):
#rebase the days
if not cond1: return d_r
days, result = d_r
if cond2:
val = days / divisor
if not val:
result = re.sub("{[^{]*?%" + letter + "[^}]*?}", "", result)
result = result.replace("%" + letter, str(val))
days %= divisor
else:
result = result.replace("%" + letter,
locale.format("%.2f",
(float(days) / divisor)))
return (days, result)
d_r = rebase(d_r, has_years, has_months or has_weeks or has_days, "y", d_p_y)
d_r = rebase(d_r, has_months, has_weeks or has_days, "m", d_p_m)
d_r = rebase(d_r, has_weeks, has_days, "w", d_p_w)
days, result = d_r
if not has_days:
minutes += days * d_w_h * 60
days = 0
if has_hours:
if not days:
result = re.sub("{[^{]*?%d[^}]*?}", "", result)
result = result.replace("%d", str(days))
else:
result = result.replace("%d",
"%.2f" % (days + float(minutes)
/ (d_w_h * 60)))
if has_hours:
if has_minutes:
val = minutes / 60
if not val:
result = re.sub("{[^{]*?%H[^}]*?}", "", result)
result = result.replace("%H", str(val))
minutes %= 60
else:
result = result.replace("%H", "%.2f" % (float(minutes) / 60))
if not minutes:
result = re.sub("{[^{]*?%M[^}]*?}", "", result)
result = result.replace("%M", str(minutes))
result = result.replace("{", "")
result = result.replace("}", "")
return result.strip()
#@-node:timedelta_to_str
#@+node:strftime
def strftime(dt, format):
"""
an extended version of strftime, that introduces some new
directives:
%IW iso week number
%IY iso year
%IB full month name appropriate to iso week
%ib abbreviated month name appropriate to iso week
%im month as decimal number appropriate to iso week
"""
iso = dt.isocalendar()
if iso[0] != dt.year:
iso_date = dt.replace(day=1, month=1)
format = format \
.replace("%IB", iso_date.strftime("%B"))\
.replace("%ib", iso_date.strftime("%b"))\
.replace("%im", iso_date.strftime("%m"))
else:
format = format \
.replace("%IB", "%B")\
.replace("%ib", "%b")\
.replace("%im", "%m")
format = format \
.replace("%IW", str(iso[1]))\
.replace("%IY", str(iso[0]))\
return dt.strftime(format)
#@-node:strftime
#@+node:union
def union(*calendars):
"""
returns a calendar that unifies all working times
"""
#@ << check arguments >>
#@+node:<< check arguments >>
if len(calendars) == 1:
calendars = calendars[0]
#@nonl
#@-node:<< check arguments >>
#@nl
#@ << intersect vacations >>
#@+node:<< intersect vacations >>
free_time = []
for c in calendars:
for start, end, is_free in c.time_spans:
if is_free:
free_time.append((start, False))
free_time.append((end, True))
count = len(calendars)
open = 0
time_spans = []
free_time.sort()
for date, is_end in free_time:
if is_end:
if open == count:
time_spans.append((start, date, True))
open -= 1
else:
open += 1
start = date
#@-node:<< intersect vacations >>
#@nl
#@ << unify extra worktime >>
#@+node:<< unify extra worktime >>
for c in calendars:
for start, end, is_free in c.time_spans:
if not is_free:
time_spans = _add_to_time_spans(time_spans, start, end)
#@nonl
#@-node:<< unify extra worktime >>
#@nl
#@ << unify working times >>
#@+node:<< unify working times >>
working_times = {}
for d in range(0, 7):
times = []
for c in calendars:
for start, end in c.working_times.get(d, []):
times.append((start, False))
times.append((end, True))
times.sort()
open = 0
ti = []
start = None
for time, is_end in times:
if not is_end:
if not start: start = time
open += 1
else:
open -= 1
if not open:
ti.append((start, time))
start = None
if ti:
working_times[d] = ti
#@-node:<< unify working times >>
#@nl
#@ << create result calendar >>
#@+node:<< create result calendar >>
result = Calendar()
result.working_times = working_times
result.time_spans = time_spans
result._recalc_working_time()
result._build_mapping()
#@nonl
#@-node:<< create result calendar >>
#@nl
return result
#@nonl
#@-node:union
#@+node:class _CalendarItem
class _CalendarItem(int):
#@ << class _CalendarItem declarations >>
#@+node:<< class _CalendarItem declarations >>
__slots__ = ()
calender = None
#@-node:<< class _CalendarItem declarations >>
#@nl
#@ @+others
#@+node:__new__
def __new__(cls, val):
try:
return int.__new__(cls, val)
except OverflowError:
return int.__new__(cls, sys.maxint)
#@-node:__new__
#@+node:round
def round(self, round_up=True):
m_t_u = self.calendar.minimum_time_unit
minutes = int(self)
base = (minutes / m_t_u) * m_t_u
minutes %= m_t_u
round_up = round_up and minutes > 0 or minutes > m_t_u / 2
if round_up: base += m_t_u
return self.__class__(base)
#@-node:round
#@-others
#@-node:class _CalendarItem
#@+node:class _Minutes
class _Minutes(_CalendarItem):
#@ << class _Minutes declarations >>
#@+node:<< class _Minutes declarations >>
__slots__ = ()
STR_FORMAT = "{%dd}{ %HH}{ %MM}"
#@-node:<< class _Minutes declarations >>
#@nl
#@ @+others
#@+node:__new__
def __new__(cls, src=0, is_duration=False):
"""
converts a timedelta in working minutes.
"""
if isinstance(src, cls) or type(src) is int:
return _CalendarItem.__new__(cls, src)
cal = cls.calendar
if not isinstance(src, datetime.timedelta):
src = to_timedelta(src, cal, is_duration)
d_w_h = is_duration and 24 or cal.working_hours_per_day
src = src.days * d_w_h * 60 + src.seconds / 60
return _CalendarItem.__new__(cls, src)
#@-node:__new__
#@+node:__cmp__
def __cmp__(self, other):
return cmp(int(self), int(self.__class__(other)))
#@-node:__cmp__
#@+node:__add__
def __add__(self, other):
try:
return self.__class__(int(self) + int(self.__class__(other)))
except:
return NotImplemented
#@-node:__add__
#@+node:__sub__
def __sub__(self, other):
try:
return self.__class__(int(self) - int(self.__class__(other)))
except:
return NotImplemented
#@-node:__sub__
#@+node:to_timedelta
def to_timedelta(self, is_duration=False):
d_w_h = is_duration and 24 or self.calendar.working_hours_per_day
minutes = int(self)
hours = minutes / 60
minutes = minutes % 60
days = hours / d_w_h
hours = hours % d_w_h
return datetime.timedelta(days, hours=hours, minutes=minutes)
#@nonl
#@-node:to_timedelta
#@+node:strftime
def strftime(self, format=None, is_duration=False):
td = self.to_timedelta(is_duration)
return timedelta_to_str(td, format or self.STR_FORMAT,
self.calendar, is_duration)
#@nonl
#@-node:strftime
#@-others
#@-node:class _Minutes
#@+node:class _WorkingDateBase
class _WorkingDateBase(_CalendarItem):
"""
A daytetime which has only valid values within the
workingtimes of a specific calendar
"""
#@ << class _WorkingDateBase declarations >>
#@+node:<< class _WorkingDateBase declarations >>
timetuple = True
STR_FORMAT = "%x %H:%M"
_minutes = _Minutes
__slots__ = ()
#@-node:<< class _WorkingDateBase declarations >>
#@nl
#@ @+others
#@+node:__new__
def __new__(cls, src):
#cls.__bases__[0] is the base of
#the calendar specific StartDate and EndDate
if isinstance(src, cls.__bases__[0]) or type(src) in (int, float):
return _CalendarItem.__new__(cls, src)
src = cls.calendar.from_datetime(to_datetime(src))
return _CalendarItem.__new__(cls, src)
#@-node:__new__
#@+node:__repr__
def __repr__(self):
return self.strftime()
#@-node:__repr__
#@+node:to_datetime
def to_datetime(self):
return self.to_starttime()
#@-node:to_datetime
#@+node:to_starttime
def to_starttime(self):
return self.calendar.to_starttime(self)
#@-node:to_starttime
#@+node:to_endtime
def to_endtime(self):
return self.calendar.to_endtime(self)
#@-node:to_endtime
#@+node:__cmp__
def __cmp__(self, other):
return cmp(int(self), int(self.__class__(other)))
#@-node:__cmp__
#@+node:__add__
def __add__(self, other):
try:
return self.__class__(int(self) + int(self._minutes(other)))
except ValueError, e:
raise e
except:
return NotImplemented
#@-node:__add__
#@+node:__sub__
def __sub__(self, other):
if isinstance(other, (datetime.timedelta, str, _Minutes)):
try:
other = self._minutes(other)
except:
pass
if isinstance(other, self._minutes):
return self.__class__(int(self) - int(other))
try:
return self._minutes(int(self) - int(self.__class__(other)))
except:
return NotImplemented
#@-node:__sub__
#@+node:strftime
def strftime(self, format=None):
return strftime(self.to_datetime(), format or self.STR_FORMAT)
#@-node:strftime
#@-others
#@-node:class _WorkingDateBase
#@+node:class Calendar
class Calendar(object):
"""
A calendar to specify working times and vacations.
The calendars epoch start at 1.1.1979
"""
#@ << declarations >>
#@+node:<< declarations >>
# january the first must be a monday
EPOCH = datetime.datetime(1979, 1, 1)
minimum_time_unit = DEFAULT_MINIMUM_TIME_UNIT
working_days_per_week = DEFAULT_WORKING_DAYS_PER_WEEK
working_days_per_month = DEFAULT_WORKING_DAYS_PER_MONTH
working_days_per_year = DEFAULT_WORKING_DAYS_PER_YEAR
working_hours_per_day = DEFAULT_WORKING_HOURS_PER_DAY
now = EPOCH
#@-node:<< declarations >>
#@nl
#@ @+others
#@+node:__init__
def __init__(self):
self.time_spans = ()
self._dt_num_can = ()
self._num_dt_can = ()
self.working_times = { }
self._recalc_working_time()
self._make_classes()
#@-node:__init__
#@+node:__or__
def __or__(self, other):
if isinstance(other, Calendar):
return union(self, other)
return NotImplemented
#@nonl
#@-node:__or__
#@+node:clone
def clone(self):
result = Calendar()
result.working_times = self.working_times.copy()
result.time_spans = self.time_spans
result._recalc_working_time()
result._build_mapping()
return result
#@nonl
#@-node:clone
#@+node:set_working_days
def set_working_days(self, day_range, trange, *further_tranges):
"""
Sets the working days of an calendar
day_range is a string of day abbreviations like 'mon, tue'
trange and further_tranges is a time range string like
'8:00-10:00'
"""
time_ranges = [ trange ] + list(further_tranges)
time_ranges = filter(bool, map(to_time_range, time_ranges))
days = _to_days(day_range)
for k in days.keys():
self.working_times[k] = time_ranges
self._recalc_working_time()
self._build_mapping()
#@-node:set_working_days
#@+node:set_vacation
def set_vacation(self, value):
"""
Sets vacation time.
value is either a datetime literal or
a sequence of items that can be
a datetime literals and or pair of datetime literals
"""
self.time_spans = _add_to_time_spans(self.time_spans, value, True)
self._build_mapping()
#@-node:set_vacation
#@+node:set_extra_work
def set_extra_work(self, value):
"""
Sets extra working time
value is either a datetime literal or
a sequence of items that can be
a datetime literals and or pair of datetime literals
"""
self.time_spans = _add_to_time_spans(self.time_spans, value, False)
self._build_mapping()
#@-node:set_extra_work
#@+node:from_datetime
def from_datetime(self, value):
assert(isinstance(value, datetime.datetime))
delta = value - self.EPOCH
days = delta.days
minutes = delta.seconds / 60
# calculate the weektime
weeks = days / 7
wtime = self.week_time * weeks
# calculate the daytime
days %= 7
dtime = sum(self.day_times[:days])
# calculate the minute time
slots = self.working_times.get(days, DEFAULT_WORKING_DAYS[days])
mtime = 0
for start, end in slots:
if minutes > end:
mtime += end - start
else:
if minutes > start:
mtime += minutes - start
break
result = wtime + dtime + mtime
# map exceptional timespans
dt_num_can = self._dt_num_can
pos = bisect.bisect(dt_num_can, (value,)) - 1
if pos >= 0:
start, end, nstart, nend, cend = dt_num_can[pos]
if value < end:
if nstart < nend:
delta = value - start
delta = delta.days * 24 * 60 + delta.seconds / 60
result = nstart + delta
else:
result = nstart
else:
result += (nend - cend) # == (result - cend) + nend
return result
#@-node:from_datetime
#@+node:split_time
def split_time(self, value):
#map exceptional timespans
num_dt_can = self._num_dt_can
pos = bisect.bisect(num_dt_can, (value, sys.maxint)) - 1
if pos >= 0:
nstart, nend, start, end, cend = num_dt_can[pos]
if value < nend:
value = start + datetime.timedelta(minutes=value - nstart)
delta = value - self.EPOCH
return delta.days / 7, delta.days % 7, delta.seconds / 60, -1
else:
value += (cend - nend) # (value - nend + cend)
#calculate the weeks since the epoch
weeks = value / self.week_time
value %= self.week_time
#calculate the remaining days
days = 0
for day_time in self.day_times:
if value < day_time: break
value -= day_time
days += 1
#calculate the remaining minutes
minutes = 0
slots = self.working_times.get(days, DEFAULT_WORKING_DAYS[days])
index = 0
for start, end in slots:
delta = end - start
if delta > value:
minutes = start + value
break
else:
value -= delta
index += 1
return weeks, days, minutes, index
#@-node:split_time
#@+node:to_starttime
def to_starttime(self, value):
weeks, days, minutes, index = self.split_time(value)
return self.EPOCH + datetime.timedelta(weeks=weeks,
days=days,
minutes=minutes)
#@-node:to_starttime
#@+node:to_endtime
def to_endtime(self, value):
return self.to_starttime(value - 1) + datetime.timedelta(minutes=1)
#@-node:to_endtime
#@+node:get_working_times
def get_working_times(self, day):
return self.working_times.get(day, DEFAULT_WORKING_DAYS[day])
#@-node:get_working_times
#@+node:_build_mapping
def _build_mapping(self):
self._dt_num_can = self._num_dt_can = ()
dt_num_can = []
num_dt_can = []
delta = self.Minutes()
for start, end, is_free in self.time_spans:
cstart = self.StartDate(start)
cend = self.EndDate(end)
nstart = cstart + delta
if not is_free:
d = end - start
d = d.days * 24 * 60 + d.seconds / 60
nend = nstart + d
else:
nend = nstart
delta += (nend - nstart) - (cend - cstart)
dt_num_can.append((start, end, nstart, nend, cend))
num_dt_can.append((nstart, nend, start, end, cend))
self._dt_num_can = tuple(dt_num_can)
self._num_dt_can = tuple(num_dt_can)
#@-node:_build_mapping
#@+node:_recalc_working_time
def _recalc_working_time(self):
def slot_sum_time(day):
slots = self.working_times.get(day, DEFAULT_WORKING_DAYS[day])
return sum(map(lambda slot: slot[1] - slot[0], slots))
self.day_times = map(slot_sum_time, range(0, 7))
self.week_time = sum(self.day_times)
#@-node:_recalc_working_time
#@+node:_make_classes
def _make_classes(self):
#ensure that the clases are instance specific
class minutes(_Minutes):
calendar = self
__slots__ = ()
class db(_WorkingDateBase):
calendar = self
_minutes = minutes
__slots__ = ()
class wdt(db): __slots__ = ()
class edt(db):
__slots__ = ()
def to_datetime(self):
return self.to_endtime()
self.Minutes, self.StartDate, self.EndDate = minutes, wdt, edt
self.WorkingDate = self.StartDate
#@-node:_make_classes
#@-others
_default_calendar = Calendar()
WorkingDate = _default_calendar.WorkingDate
StartDate = _default_calendar.StartDate
EndDate = _default_calendar.EndDate
Minutes = _default_calendar.Minutes
#@-node:class Calendar
#@-others
if __name__ == '__main__':
cal = Calendar()
start = EndDate("10.1.2005")
delay = Minutes("4H")
start2 = cal.StartDate(start)
start3 = cal.StartDate("10.1.2005")
#@-node:@file pcalendar.py
#@-leo
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
vaygr/ansible | lib/ansible/module_utils/k8s/raw.py | 30 | 8686 | #
# Copyright 2018 Red Hat | Ansible
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function
import copy
from ansible.module_utils.k8s.helper import COMMON_ARG_SPEC, AUTH_ARG_SPEC, OPENSHIFT_ARG_SPEC
from ansible.module_utils.k8s.common import KubernetesAnsibleModule, OpenShiftAnsibleModuleMixin, to_snake
try:
from openshift.helper.exceptions import KubernetesException
except ImportError:
# Exception handled in common
pass
class KubernetesRawModule(KubernetesAnsibleModule):
def __init__(self, *args, **kwargs):
mutually_exclusive = [
('resource_definition', 'src'),
]
KubernetesAnsibleModule.__init__(self, *args,
mutually_exclusive=mutually_exclusive,
supports_check_mode=True,
**kwargs)
self.kind = self.params.pop('kind')
self.api_version = self.params.pop('api_version')
self.resource_definition = self.params.pop('resource_definition')
self.src = self.params.pop('src')
if self.src:
self.resource_definition = self.load_resource_definition(self.src)
if self.resource_definition:
self.api_version = self.resource_definition.get('apiVersion')
self.kind = self.resource_definition.get('kind')
self.api_version = self.api_version.lower()
self.kind = to_snake(self.kind)
if not self.api_version:
self.fail_json(
msg=("Error: no api_version specified. Use the api_version parameter, or provide it as part of a ",
"resource_definition.")
)
if not self.kind:
self.fail_json(
msg="Error: no kind specified. Use the kind parameter, or provide it as part of a resource_definition"
)
self.helper = self.get_helper(self.api_version, self.kind)
@property
def argspec(self):
argspec = copy.deepcopy(COMMON_ARG_SPEC)
argspec.update(copy.deepcopy(AUTH_ARG_SPEC))
return argspec
def execute_module(self):
if self.resource_definition:
resource_params = self.resource_to_parameters(self.resource_definition)
self.params.update(resource_params)
self.authenticate()
state = self.params.pop('state', None)
force = self.params.pop('force', False)
name = self.params.get('name')
namespace = self.params.get('namespace')
existing = None
self.remove_aliases()
return_attributes = dict(changed=False, result=dict())
if self.helper.base_model_name_snake.endswith('list'):
k8s_obj = self._read(name, namespace)
return_attributes['result'] = k8s_obj.to_dict()
self.exit_json(**return_attributes)
try:
existing = self.helper.get_object(name, namespace)
except KubernetesException as exc:
self.fail_json(msg='Failed to retrieve requested object: {0}'.format(exc.message),
error=exc.value.get('status'))
if state == 'absent':
if not existing:
# The object already does not exist
self.exit_json(**return_attributes)
else:
# Delete the object
if not self.check_mode:
try:
self.helper.delete_object(name, namespace)
except KubernetesException as exc:
self.fail_json(msg="Failed to delete object: {0}".format(exc.message),
error=exc.value.get('status'))
return_attributes['changed'] = True
self.exit_json(**return_attributes)
else:
if not existing:
k8s_obj = self._create(namespace)
return_attributes['result'] = k8s_obj.to_dict()
return_attributes['changed'] = True
self.exit_json(**return_attributes)
if existing and force:
k8s_obj = None
request_body = self.helper.request_body_from_params(self.params)
if not self.check_mode:
try:
k8s_obj = self.helper.replace_object(name, namespace, body=request_body)
except KubernetesException as exc:
self.fail_json(msg="Failed to replace object: {0}".format(exc.message),
error=exc.value.get('status'))
return_attributes['result'] = k8s_obj.to_dict()
return_attributes['changed'] = True
self.exit_json(**return_attributes)
# Check if existing object should be patched
k8s_obj = copy.deepcopy(existing)
try:
self.helper.object_from_params(self.params, obj=k8s_obj)
except KubernetesException as exc:
self.fail_json(msg="Failed to patch object: {0}".format(exc.message))
match, diff = self.helper.objects_match(self.helper.fix_serialization(existing), k8s_obj)
if match:
return_attributes['result'] = existing.to_dict()
self.exit_json(**return_attributes)
# Differences exist between the existing obj and requested params
if not self.check_mode:
try:
k8s_obj = self.helper.patch_object(name, namespace, k8s_obj)
except KubernetesException as exc:
self.fail_json(msg="Failed to patch object: {0}".format(exc.message))
return_attributes['result'] = k8s_obj.to_dict()
return_attributes['changed'] = True
self.exit_json(**return_attributes)
def _create(self, namespace):
request_body = None
k8s_obj = None
try:
request_body = self.helper.request_body_from_params(self.params)
except KubernetesException as exc:
self.fail_json(msg="Failed to create object: {0}".format(exc.message))
if not self.check_mode:
try:
k8s_obj = self.helper.create_object(namespace, body=request_body)
except KubernetesException as exc:
self.fail_json(msg="Failed to create object: {0}".format(exc.message),
error=exc.value.get('status'))
return k8s_obj
def _read(self, name, namespace):
k8s_obj = None
try:
k8s_obj = self.helper.get_object(name, namespace)
except KubernetesException as exc:
self.fail_json(msg='Failed to retrieve requested object',
error=exc.value.get('status'))
return k8s_obj
class OpenShiftRawModule(OpenShiftAnsibleModuleMixin, KubernetesRawModule):
@property
def argspec(self):
args = super(OpenShiftRawModule, self).argspec
args.update(copy.deepcopy(OPENSHIFT_ARG_SPEC))
return args
def _create(self, namespace):
if self.kind.lower() == 'project':
return self._create_project()
return KubernetesRawModule._create(self, namespace)
def _create_project(self):
new_obj = None
k8s_obj = None
try:
new_obj = self.helper.object_from_params(self.params)
except KubernetesException as exc:
self.fail_json(msg="Failed to create object: {0}".format(exc.message))
try:
k8s_obj = self.helper.create_project(metadata=new_obj.metadata,
display_name=self.params.get('display_name'),
description=self.params.get('description'))
except KubernetesException as exc:
self.fail_json(msg='Failed to retrieve requested object',
error=exc.value.get('status'))
return k8s_obj
| gpl-3.0 |
ubc/edx-platform | lms/lib/comment_client/user.py | 144 | 6343 | from .utils import merge_dict, perform_request, CommentClientRequestError
import models
import settings
class User(models.Model):
accessible_fields = [
'username', 'follower_ids', 'upvoted_ids', 'downvoted_ids',
'id', 'external_id', 'subscribed_user_ids', 'children', 'course_id',
'group_id', 'subscribed_thread_ids', 'subscribed_commentable_ids',
'subscribed_course_ids', 'threads_count', 'comments_count',
'default_sort_key'
]
updatable_fields = ['username', 'external_id', 'default_sort_key']
initializable_fields = updatable_fields
metric_tag_fields = ['course_id']
base_url = "{prefix}/users".format(prefix=settings.PREFIX)
default_retrieve_params = {'complete': True}
type = 'user'
@classmethod
def from_django_user(cls, user):
return cls(id=str(user.id),
external_id=str(user.id),
username=user.username)
def follow(self, source):
params = {'source_type': source.type, 'source_id': source.id}
response = perform_request(
'post',
_url_for_subscription(self.id),
params,
metric_action='user.follow',
metric_tags=self._metric_tags + ['target.type:{}'.format(source.type)],
)
def unfollow(self, source):
params = {'source_type': source.type, 'source_id': source.id}
response = perform_request(
'delete',
_url_for_subscription(self.id),
params,
metric_action='user.unfollow',
metric_tags=self._metric_tags + ['target.type:{}'.format(source.type)],
)
def vote(self, voteable, value):
if voteable.type == 'thread':
url = _url_for_vote_thread(voteable.id)
elif voteable.type == 'comment':
url = _url_for_vote_comment(voteable.id)
else:
raise CommentClientRequestError("Can only vote / unvote for threads or comments")
params = {'user_id': self.id, 'value': value}
response = perform_request(
'put',
url,
params,
metric_action='user.vote',
metric_tags=self._metric_tags + ['target.type:{}'.format(voteable.type)],
)
voteable._update_from_response(response)
def unvote(self, voteable):
if voteable.type == 'thread':
url = _url_for_vote_thread(voteable.id)
elif voteable.type == 'comment':
url = _url_for_vote_comment(voteable.id)
else:
raise CommentClientRequestError("Can only vote / unvote for threads or comments")
params = {'user_id': self.id}
response = perform_request(
'delete',
url,
params,
metric_action='user.unvote',
metric_tags=self._metric_tags + ['target.type:{}'.format(voteable.type)],
)
voteable._update_from_response(response)
def active_threads(self, query_params={}):
if not self.course_id:
raise CommentClientRequestError("Must provide course_id when retrieving active threads for the user")
url = _url_for_user_active_threads(self.id)
params = {'course_id': self.course_id.to_deprecated_string()}
params = merge_dict(params, query_params)
response = perform_request(
'get',
url,
params,
metric_action='user.active_threads',
metric_tags=self._metric_tags,
paged_results=True,
)
return response.get('collection', []), response.get('page', 1), response.get('num_pages', 1)
def subscribed_threads(self, query_params={}):
if not self.course_id:
raise CommentClientRequestError("Must provide course_id when retrieving subscribed threads for the user")
url = _url_for_user_subscribed_threads(self.id)
params = {'course_id': self.course_id.to_deprecated_string()}
params = merge_dict(params, query_params)
response = perform_request(
'get',
url,
params,
metric_action='user.subscribed_threads',
metric_tags=self._metric_tags,
paged_results=True
)
return response.get('collection', []), response.get('page', 1), response.get('num_pages', 1)
def _retrieve(self, *args, **kwargs):
url = self.url(action='get', params=self.attributes)
retrieve_params = self.default_retrieve_params.copy()
retrieve_params.update(kwargs)
if self.attributes.get('course_id'):
retrieve_params['course_id'] = self.course_id.to_deprecated_string()
if self.attributes.get('group_id'):
retrieve_params['group_id'] = self.group_id
try:
response = perform_request(
'get',
url,
retrieve_params,
metric_action='model.retrieve',
metric_tags=self._metric_tags,
)
except CommentClientRequestError as e:
if e.status_code == 404:
# attempt to gracefully recover from a previous failure
# to sync this user to the comments service.
self.save()
response = perform_request(
'get',
url,
retrieve_params,
metric_action='model.retrieve',
metric_tags=self._metric_tags,
)
else:
raise
self._update_from_response(response)
def _url_for_vote_comment(comment_id):
return "{prefix}/comments/{comment_id}/votes".format(prefix=settings.PREFIX, comment_id=comment_id)
def _url_for_vote_thread(thread_id):
return "{prefix}/threads/{thread_id}/votes".format(prefix=settings.PREFIX, thread_id=thread_id)
def _url_for_subscription(user_id):
return "{prefix}/users/{user_id}/subscriptions".format(prefix=settings.PREFIX, user_id=user_id)
def _url_for_user_active_threads(user_id):
return "{prefix}/users/{user_id}/active_threads".format(prefix=settings.PREFIX, user_id=user_id)
def _url_for_user_subscribed_threads(user_id):
return "{prefix}/users/{user_id}/subscribed_threads".format(prefix=settings.PREFIX, user_id=user_id)
| agpl-3.0 |
morucci/repoxplorer | repoxplorer/auth/__init__.py | 1 | 7152 | # Copyright 2019, Matthieu Huin
# Copyright 2019, Red Hat
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Various authentication engines supported by RepoXplorer."""
import base64
import json
import jwt
from urllib.parse import urljoin
import requests
from pecan import conf
from repoxplorer.exceptions import UnauthorizedException
from repoxplorer import index
from repoxplorer.index import users
class BaseAuthEngine(object):
"""The base auth engine class."""
def is_configured(self) -> bool:
"""Activate the users REST endpoint if authentication is configured."""
return False
def authorize(self, request, uid=None) -> str:
"""Make sure the authenticated user is allowed an action."""
raise UnauthorizedException("Not implemented")
def provision_user(self, request) -> None:
"""If needed, the user can be provisioned based on the user info passed
by the Identity Provider."""
return
class CAuthEngine(BaseAuthEngine):
"""Cauth relies on Apache + mod_auth_authtkt to set a Remote-User header.
User provisioning is done out of the band by Cauth itself, calling the
PUT endpoint on the users API."""
def is_configured(self):
return conf.get('users_endpoint', False)
def authorize(self, request, uid=None):
"""Make sure the request is authorized.
Returns the authorized user's uid or raises if unauthorized."""
if not request.remote_user:
request.remote_user = request.headers.get('Remote-User')
if not request.remote_user:
request.remote_user = request.headers.get('X-Remote-User')
if request.remote_user == '(null)':
if request.headers.get('Authorization'):
auth_header = request.headers.get('Authorization').split()[1]
request.remote_user = base64.b64decode(
auth_header).split(':')[0]
if (request.remote_user == "admin" and
request.headers.get('Admin-Token')):
sent_admin_token = request.headers.get('Admin-Token')
# If remote-user is admin and an admin-token is passed
# authorized if the token is correct
if sent_admin_token == conf.get('admin_token'):
return 'admin'
else:
# If uid targeted by the request is the same
# as the requester then authorize
if uid and uid == request.remote_user:
return uid
if uid and uid != request.remote_user:
raise UnauthorizedException("Admin action only")
raise UnauthorizedException("unauthorized")
class OpenIDConnectEngine(BaseAuthEngine):
"""Expects a Bearer token sent through the 'Authorization' header.
The token is verified against a JWK, pulled from the well-known
configuration of the OIDC provider.
The claims will be used to provision users if authorization is
successful."""
config = conf.get('oidc', {})
def is_configured(self):
return self.config.get('issuer_url', False)
def _get_issuer_info(self):
issuer_url = self.config.get('issuer_url')
verify_ssl = self.config.get('verify_ssl', True)
issuer_info = requests.get(
urljoin(issuer_url, '.well-known/openid-configuration'),
verify=verify_ssl)
if issuer_info.status_code > 399:
raise UnauthorizedException(
"Cannot fetch OpenID provider's configuration")
return issuer_info.json()
def _get_signing_key(self, jwks_uri, key_id):
verify_ssl = self.config.get('verify_ssl', True)
certs = requests.get(jwks_uri, verify=verify_ssl)
if certs.status_code > 399:
raise UnauthorizedException("Cannot fetch JWKS")
for k in certs.json()['keys']:
if k['kid'] == key_id:
return (jwt.algorithms.RSAAlgorithm.from_jwk(json.dumps(k)),
k['alg'])
raise UnauthorizedException("Key %s not found" % key_id)
def _get_raw_token(self, request):
if request.headers.get('Authorization', None) is None:
raise UnauthorizedException('Missing "Authorization" header')
auth_header = request.headers.get('Authorization', None)
if not auth_header.lower().startswith('bearer '):
raise UnauthorizedException('Invalid "Authorization" header')
token = auth_header[len('bearer '):]
return token
def authorize(self, request, uid=None):
token = self._get_raw_token(request)
issuer_info = self._get_issuer_info()
unverified_headers = jwt.get_unverified_header(token)
key_id = unverified_headers.get('kid', None)
if key_id is None:
raise UnauthorizedException("Missing key id in token")
jwks_uri = issuer_info.get('jwks_uri')
if jwks_uri is None:
raise UnauthorizedException("Missing JWKS URI in config")
key, algo = self._get_signing_key(jwks_uri, key_id)
try:
claims = jwt.decode(token, key, algorithms=algo,
issuer=issuer_info['issuer'],
audience=self.config['audience'])
except Exception as e:
raise UnauthorizedException('Invalid access token: %s' % e)
if claims['preferred_username'] == self.config.get('admin_username',
'admin'):
return 'admin'
if uid and uid == claims['preferred_username']:
return uid
if uid and uid != claims['preferred_username']:
raise UnauthorizedException("Only the admin ")
raise UnauthorizedException('unauthorized')
def provision_user(self, request):
raw_token = self._get_raw_token(request)
# verified before so it's totally okay
claims = jwt.decode(raw_token, verify=False)
# TODO assuming the presence of claims, but a specific scope might be
# needed.
# These are expected to be standard though, see
# https://openid.net/specs/openid-connect-core-1_0.html#StandardClaims
email = claims['email']
uid = claims['preferred_username']
name = claims['name']
_users = users.Users(index.Connector(index_suffix='users'))
u = _users.get(uid)
infos = {'uid': uid,
'name': name,
'default-email': email,
'emails': [{'email': email}]}
if u:
_users.update(infos)
else:
_users.create(infos)
| apache-2.0 |
ajose01/rethinkdb | test/interface/metadata_persistence.py | 29 | 2500 | #!/usr/bin/env python
# Copyright 2010-2014 RethinkDB, all rights reserved.
from __future__ import print_function
import sys, os, time
startTime = time.time()
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir, 'common')))
import driver, scenario_common, utils, vcoptparse
op = vcoptparse.OptParser()
scenario_common.prepare_option_parser_mode_flags(op)
_, command_prefix, serve_options = scenario_common.parse_mode_flags(op.parse(sys.argv))
r = utils.import_python_driver()
dbName, tableName = utils.get_test_db_table()
beforeMetaData = None
afterMetaData = None
files = None
# == start first instance of server
print("Starting server (%.2fs)" % (time.time() - startTime))
with driver.Process(console_output=True, output_folder='.', command_prefix=command_prefix, extra_options=serve_options, wait_until_ready=True) as server:
files = server.files
print("Establishing ReQL connection (%.2fs)" % (time.time() - startTime))
conn = r.connect(host=server.host, port=server.driver_port)
print("Creating db/table %s/%s (%.2fs)" % (dbName, tableName, time.time() - startTime))
if dbName not in r.db_list().run(conn):
r.db_create(dbName).run(conn)
if tableName in r.db(dbName).table_list().run(conn):
r.db(dbName).table_drop(tableName).run(conn)
r.db(dbName).table_create(tableName).run(conn)
print("Collecting metadata for first run (%.2fs)" % (time.time() - startTime))
beforeMetaData = r.db('rethinkdb').table('server_config').get(server.uuid).run(conn)
print("Shutting down server (%.2fs)" % (time.time() - startTime))
print("Restarting server with same files (%.2fs)" % (time.time() - startTime))
with driver.Process(files=files, console_output=True, command_prefix=command_prefix, extra_options=serve_options, wait_until_ready=True) as server:
print("Establishing second ReQL connection (%.2fs)" % (time.time() - startTime))
conn = r.connect(host=server.host, port=server.driver_port)
print("Collecting metadata for second run (%.2fs)" % (time.time() - startTime))
afterMetaData = r.db('rethinkdb').table('server_config').get(server.uuid).run(conn)
assert afterMetaData == beforeMetaData, "The server metadata did not match between runs:\n%s\nvs.\n%s" % (str(beforeMetaData), str(afterMetaData))
print("Cleaning up (%.2fs)" % (time.time() - startTime))
print("Done. (%.2fs)" % (time.time() - startTime))
| agpl-3.0 |
sebrandon1/neutron | neutron/tests/unit/agent/linux/test_ip_link_support.py | 38 | 7364 | # Copyright 2014 Mellanox Technologies, Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from neutron.agent.linux import ip_link_support as ip_link
from neutron.tests import base
class TestIpLinkSupport(base.BaseTestCase):
IP_LINK_HELP = """Usage: ip link add [link DEV] [ name ] NAME
[ txqueuelen PACKETS ]
[ address LLADDR ]
[ broadcast LLADDR ]
[ mtu MTU ] [index IDX ]
[ numtxqueues QUEUE_COUNT ]
[ numrxqueues QUEUE_COUNT ]
type TYPE [ ARGS ]
ip link delete DEV type TYPE [ ARGS ]
ip link set { dev DEVICE | group DEVGROUP } [ { up | down } ]
[ arp { on | off } ]
[ dynamic { on | off } ]
[ multicast { on | off } ]
[ allmulticast { on | off } ]
[ promisc { on | off } ]
[ trailers { on | off } ]
[ txqueuelen PACKETS ]
[ name NEWNAME ]
[ address LLADDR ]
[ broadcast LLADDR ]
[ mtu MTU ]
[ netns PID ]
[ netns NAME ]
[ alias NAME ]
[ vf NUM [ mac LLADDR ]
[ vlan VLANID [ qos VLAN-QOS ] ]
[ rate TXRATE ] ]
[ spoofchk { on | off} ] ]
[ state { auto | enable | disable} ] ]
[ master DEVICE ]
[ nomaster ]
ip link show [ DEVICE | group GROUP ] [up]
TYPE := { vlan | veth | vcan | dummy | ifb | macvlan | macvtap |
can | bridge | bond | ipoib | ip6tnl | ipip | sit |
vxlan | gre | gretap | ip6gre | ip6gretap | vti }
"""
IP_LINK_HELP_NO_STATE = """Usage: ip link add link DEV [ name ] NAME
[ txqueuelen PACKETS ]
[ address LLADDR ]
[ broadcast LLADDR ]
[ mtu MTU ]
type TYPE [ ARGS ]
ip link delete DEV type TYPE [ ARGS ]
ip link set DEVICE [ { up | down } ]
[ arp { on | off } ]
[ dynamic { on | off } ]
[ multicast { on | off } ]
[ allmulticast { on | off } ]
[ promisc { on | off } ]
[ trailers { on | off } ]
[ txqueuelen PACKETS ]
[ name NEWNAME ]
[ address LLADDR ]
[ broadcast LLADDR ]
[ mtu MTU ]
[ netns PID ]
[ alias NAME ]
[ vf NUM [ mac LLADDR ]
[ vlan VLANID [ qos VLAN-QOS ] ]
[ rate TXRATE ] ]
ip link show [ DEVICE ]
TYPE := { vlan | veth | vcan | dummy | ifb | macvlan | can }
"""
IP_LINK_HELP_NO_SPOOFCHK = IP_LINK_HELP_NO_STATE
IP_LINK_HELP_NO_VF = """Usage: ip link set DEVICE { up | down |
arp { on | off } |
dynamic { on | off } |
multicast { on | off } |
allmulticast { on | off } |
promisc { on | off } |
trailers { on | off } |
txqueuelen PACKETS |
name NEWNAME |
address LLADDR | broadcast LLADDR |
mtu MTU }
ip link show [ DEVICE ]
"""
def _test_capability(self, capability, subcapability=None,
expected=True, stdout="", stderr=""):
with mock.patch("neutron.agent.linux.utils.execute") as mock_exec:
mock_exec.return_value = (stdout, stderr)
vf_section = ip_link.IpLinkSupport.get_vf_mgmt_section()
capable = ip_link.IpLinkSupport.vf_mgmt_capability_supported(
vf_section, capability, subcapability)
self.assertEqual(expected, capable)
mock_exec.assert_called_once_with(['ip', 'link', 'help'],
check_exit_code=False,
return_stderr=True,
log_fail_as_error=False)
def test_vf_mgmt(self):
self._test_capability(
ip_link.IpLinkConstants.IP_LINK_CAPABILITY_STATE,
stderr=self.IP_LINK_HELP)
def test_execute_with_stdout(self):
self._test_capability(
ip_link.IpLinkConstants.IP_LINK_CAPABILITY_STATE,
stdout=self.IP_LINK_HELP)
def test_vf_mgmt_no_state(self):
self._test_capability(
ip_link.IpLinkConstants.IP_LINK_CAPABILITY_STATE,
expected=False,
stderr=self.IP_LINK_HELP_NO_STATE)
def test_vf_mgmt_no_spoofchk(self):
self._test_capability(
ip_link.IpLinkConstants.IP_LINK_CAPABILITY_SPOOFCHK,
expected=False,
stderr=self.IP_LINK_HELP_NO_SPOOFCHK)
def test_vf_mgmt_no_vf(self):
self._test_capability(
ip_link.IpLinkConstants.IP_LINK_CAPABILITY_STATE,
expected=False,
stderr=self.IP_LINK_HELP_NO_VF)
def test_vf_mgmt_unknown_capability(self):
self._test_capability(
"state1",
expected=False,
stderr=self.IP_LINK_HELP)
def test_vf_mgmt_sub_capability(self):
self._test_capability(
ip_link.IpLinkConstants.IP_LINK_CAPABILITY_VLAN,
ip_link.IpLinkConstants.IP_LINK_SUB_CAPABILITY_QOS,
stderr=self.IP_LINK_HELP)
def test_vf_mgmt_sub_capability_mismatch(self):
self._test_capability(
ip_link.IpLinkConstants.IP_LINK_CAPABILITY_STATE,
ip_link.IpLinkConstants.IP_LINK_SUB_CAPABILITY_QOS,
expected=False,
stderr=self.IP_LINK_HELP)
def test_vf_mgmt_sub_capability_invalid(self):
self._test_capability(
ip_link.IpLinkConstants.IP_LINK_CAPABILITY_VLAN,
"qos1",
expected=False,
stderr=self.IP_LINK_HELP)
def test_vf_mgmt_error(self):
with mock.patch("neutron.agent.linux.utils.execute") as mock_exec:
mock_exec.side_effect = Exception()
self.assertRaises(
ip_link.UnsupportedIpLinkCommand,
ip_link.IpLinkSupport.get_vf_mgmt_section)
| apache-2.0 |
collex100/odoo | addons/base_setup/res_config.py | 261 | 5089 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (C) 2004-2012 OpenERP S.A. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
import re
from openerp.report.render.rml2pdf import customfonts
class base_config_settings(osv.osv_memory):
_name = 'base.config.settings'
_inherit = 'res.config.settings'
_columns = {
'module_multi_company': fields.boolean('Manage multiple companies',
help='Work in multi-company environments, with appropriate security access between companies.\n'
'-This installs the module multi_company.'),
'module_share': fields.boolean('Allow documents sharing',
help="""Share or embbed any screen of Odoo."""),
'module_portal': fields.boolean('Activate the customer portal',
help="""Give your customers access to their documents."""),
'module_auth_oauth': fields.boolean('Use external authentication providers, sign in with google, facebook, ...'),
'module_base_import': fields.boolean("Allow users to import data from CSV files"),
'module_google_drive': fields.boolean('Attach Google documents to any record',
help="""This installs the module google_docs."""),
'module_google_calendar': fields.boolean('Allow the users to synchronize their calendar with Google Calendar',
help="""This installs the module google_calendar."""),
'font': fields.many2one('res.font', string="Report Font", domain=[('mode', 'in', ('Normal', 'Regular', 'all', 'Book'))],
help="Set the font into the report header, it will be used as default font in the RML reports of the user company"),
}
_defaults= {
'font': lambda self,cr,uid,c: self.pool.get('res.users').browse(cr, uid, uid, c).company_id.font.id,
}
def open_company(self, cr, uid, ids, context=None):
user = self.pool.get('res.users').browse(cr, uid, uid, context)
return {
'type': 'ir.actions.act_window',
'name': 'Your Company',
'view_type': 'form',
'view_mode': 'form',
'res_model': 'res.company',
'res_id': user.company_id.id,
'target': 'current',
}
def _change_header(self, header,font):
""" Replace default fontname use in header and setfont tag """
default_para = re.sub('fontName.?=.?".*"', 'fontName="%s"'% font,header)
return re.sub('(<setFont.?name.?=.?)(".*?")(.)', '\g<1>"%s"\g<3>'% font,default_para)
def set_base_defaults(self, cr, uid, ids, context=None):
ir_model_data = self.pool.get('ir.model.data')
wizard = self.browse(cr, uid, ids, context)[0]
if wizard.font:
user = self.pool.get('res.users').browse(cr, uid, uid, context)
font_name = wizard.font.name
user.company_id.write({'font': wizard.font.id,'rml_header': self._change_header(user.company_id.rml_header,font_name), 'rml_header2': self._change_header(user.company_id.rml_header2, font_name), 'rml_header3': self._change_header(user.company_id.rml_header3, font_name)})
return {}
def act_discover_fonts(self, cr, uid, ids, context=None):
return self.pool.get("res.font").font_scan(cr, uid, context=context)
# Preferences wizard for Sales & CRM.
# It is defined here because it is inherited independently in modules sale, crm.
class sale_config_settings(osv.osv_memory):
_name = 'sale.config.settings'
_inherit = 'res.config.settings'
_columns = {
'module_web_linkedin': fields.boolean('Get contacts automatically from linkedIn',
help="""When you create a new contact (person or company), you will be able to load all the data from LinkedIn (photos, address, etc)."""),
'module_crm': fields.boolean('CRM'),
'module_sale' : fields.boolean('SALE'),
'module_mass_mailing': fields.boolean(
'Manage mass mailing campaigns',
help='Get access to statistics with your mass mailing, manage campaigns.'),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: | agpl-3.0 |
ATIX-AG/ansible | lib/ansible/modules/monitoring/datadog_monitor.py | 25 | 13345 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Sebastian Kornehl <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: datadog_monitor
short_description: Manages Datadog monitors
description:
- "Manages monitors within Datadog"
- "Options like described on http://docs.datadoghq.com/api/"
version_added: "2.0"
author: "Sebastian Kornehl (@skornehl)"
requirements: [datadog]
options:
api_key:
description: ["Your DataDog API key."]
required: true
app_key:
description: ["Your DataDog app key."]
required: true
state:
description: ["The designated state of the monitor."]
required: true
choices: ['present', 'absent', 'mute', 'unmute']
tags:
description: ["A list of tags to associate with your monitor when creating or updating. This can help you categorize and filter monitors."]
version_added: "2.2"
type:
description:
- "The type of the monitor."
- The 'event alert'is available starting at Ansible 2.1
choices: ['metric alert', 'service check', 'event alert']
query:
description: ["The monitor query to notify on with syntax varying depending on what type of monitor you are creating."]
name:
description: ["The name of the alert."]
required: true
message:
description:
- A message to include with notifications for this monitor. Email notifications can be sent to specific users by using the same
'@username' notation as events. Monitor message template variables can be accessed by using double square brackets, i.e '[[' and ']]'.
silenced:
description: ["Dictionary of scopes to timestamps or None. Each scope will be muted until the given POSIX timestamp or forever if the value is None. "]
default: ""
notify_no_data:
description: ["A boolean indicating whether this monitor will notify when data stops reporting.."]
type: bool
default: 'no'
no_data_timeframe:
description:
- The number of minutes before a monitor will notify when data stops reporting. Must be at least 2x the monitor timeframe for metric
alerts or 2 minutes for service checks.
required: false
default: 2x timeframe for metric, 2 minutes for service
timeout_h:
description: ["The number of hours of the monitor not reporting data before it will automatically resolve from a triggered state."]
renotify_interval:
description:
- The number of minutes after the last notification before a monitor will re-notify on the current status. It will only re-notify if it's
not resolved.
escalation_message:
description:
- A message to include with a re-notification. Supports the '@username' notification we allow elsewhere. Not applicable if renotify_interval
is None
notify_audit:
description: ["A boolean indicating whether tagged users will be notified on changes to this monitor."]
type: bool
default: 'no'
thresholds:
description:
- A dictionary of thresholds by status. This option is only available for service checks and metric alerts. Because each of them can have
multiple thresholds, we don't define them directly in the query."]
default: {'ok': 1, 'critical': 1, 'warning': 1}
locked:
description: ["A boolean indicating whether changes to this monitor should be restricted to the creator or admins."]
type: bool
default: 'no'
version_added: "2.2"
require_full_window:
description:
- A boolean indicating whether this monitor needs a full window of data before it's evaluated. We highly recommend you set this to False for
sparse metrics, otherwise some evaluations will be skipped.
version_added: "2.3"
new_host_delay:
description: ["A positive integer representing the number of seconds to wait before evaluating the monitor for new hosts.
This gives the host time to fully initialize."]
version_added: "2.4"
id:
description: ["The id of the alert. If set, will be used instead of the name to locate the alert."]
version_added: "2.3"
'''
EXAMPLES = '''
# Create a metric monitor
- datadog_monitor:
type: "metric alert"
name: "Test monitor"
state: "present"
query: "datadog.agent.up.over('host:host1').last(2).count_by_status()"
message: "Host [[host.name]] with IP [[host.ip]] is failing to report to datadog."
api_key: "9775a026f1ca7d1c6c5af9d94d9595a4"
app_key: "87ce4a24b5553d2e482ea8a8500e71b8ad4554ff"
# Deletes a monitor
- datadog_monitor:
name: "Test monitor"
state: "absent"
api_key: "9775a026f1ca7d1c6c5af9d94d9595a4"
app_key: "87ce4a24b5553d2e482ea8a8500e71b8ad4554ff"
# Mutes a monitor
- datadog_monitor:
name: "Test monitor"
state: "mute"
silenced: '{"*":None}'
api_key: "9775a026f1ca7d1c6c5af9d94d9595a4"
app_key: "87ce4a24b5553d2e482ea8a8500e71b8ad4554ff"
# Unmutes a monitor
- datadog_monitor:
name: "Test monitor"
state: "unmute"
api_key: "9775a026f1ca7d1c6c5af9d94d9595a4"
app_key: "87ce4a24b5553d2e482ea8a8500e71b8ad4554ff"
'''
import traceback
# Import Datadog
try:
from datadog import initialize, api
HAS_DATADOG = True
except:
HAS_DATADOG = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
def main():
module = AnsibleModule(
argument_spec=dict(
api_key=dict(required=True, no_log=True),
app_key=dict(required=True, no_log=True),
state=dict(required=True, choises=['present', 'absent', 'mute', 'unmute']),
type=dict(required=False, choises=['metric alert', 'service check', 'event alert']),
name=dict(required=True),
query=dict(required=False),
message=dict(required=False, default=None),
silenced=dict(required=False, default=None, type='dict'),
notify_no_data=dict(required=False, default=False, type='bool'),
no_data_timeframe=dict(required=False, default=None),
timeout_h=dict(required=False, default=None),
renotify_interval=dict(required=False, default=None),
escalation_message=dict(required=False, default=None),
notify_audit=dict(required=False, default=False, type='bool'),
thresholds=dict(required=False, type='dict', default=None),
tags=dict(required=False, type='list', default=None),
locked=dict(required=False, default=False, type='bool'),
require_full_window=dict(required=False, default=None, type='bool'),
new_host_delay=dict(required=False, default=None),
id=dict(required=False)
)
)
# Prepare Datadog
if not HAS_DATADOG:
module.fail_json(msg='datadogpy required for this module')
options = {
'api_key': module.params['api_key'],
'app_key': module.params['app_key']
}
initialize(**options)
# Check if api_key and app_key is correct or not
# if not, then fail here.
response = api.Monitor.get_all()
if isinstance(response, dict):
msg = response.get('errors', None)
if msg:
module.fail_json(msg="Failed to connect Datadog server using given app_key and api_key : {0}".format(msg[0]))
if module.params['state'] == 'present':
install_monitor(module)
elif module.params['state'] == 'absent':
delete_monitor(module)
elif module.params['state'] == 'mute':
mute_monitor(module)
elif module.params['state'] == 'unmute':
unmute_monitor(module)
def _fix_template_vars(message):
if message:
return message.replace('[[', '{{').replace(']]', '}}')
return message
def _get_monitor(module):
if module.params['id'] is not None:
monitor = api.Monitor.get(module.params['id'])
if 'errors' in monitor:
module.fail_json(msg="Failed to retrieve monitor with id %s, errors are %s" % (module.params['id'], str(monitor['errors'])))
return monitor
else:
monitors = api.Monitor.get_all()
for monitor in monitors:
if monitor['name'] == module.params['name']:
return monitor
return {}
def _post_monitor(module, options):
try:
kwargs = dict(type=module.params['type'], query=module.params['query'],
name=module.params['name'], message=_fix_template_vars(module.params['message']),
options=options)
if module.params['tags'] is not None:
kwargs['tags'] = module.params['tags']
msg = api.Monitor.create(**kwargs)
if 'errors' in msg:
module.fail_json(msg=str(msg['errors']))
else:
module.exit_json(changed=True, msg=msg)
except Exception as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
def _equal_dicts(a, b, ignore_keys):
ka = set(a).difference(ignore_keys)
kb = set(b).difference(ignore_keys)
return ka == kb and all(a[k] == b[k] for k in ka)
def _update_monitor(module, monitor, options):
try:
kwargs = dict(id=monitor['id'], query=module.params['query'],
name=module.params['name'], message=_fix_template_vars(module.params['message']),
options=options)
if module.params['tags'] is not None:
kwargs['tags'] = module.params['tags']
msg = api.Monitor.update(**kwargs)
if 'errors' in msg:
module.fail_json(msg=str(msg['errors']))
elif _equal_dicts(msg, monitor, ['creator', 'overall_state', 'modified', 'matching_downtimes', 'overall_state_modified']):
module.exit_json(changed=False, msg=msg)
else:
module.exit_json(changed=True, msg=msg)
except Exception as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
def install_monitor(module):
options = {
"silenced": module.params['silenced'],
"notify_no_data": module.boolean(module.params['notify_no_data']),
"no_data_timeframe": module.params['no_data_timeframe'],
"timeout_h": module.params['timeout_h'],
"renotify_interval": module.params['renotify_interval'],
"escalation_message": module.params['escalation_message'],
"notify_audit": module.boolean(module.params['notify_audit']),
"locked": module.boolean(module.params['locked']),
"require_full_window": module.params['require_full_window'],
"new_host_delay": module.params['new_host_delay']
}
if module.params['type'] == "service check":
options["thresholds"] = module.params['thresholds'] or {'ok': 1, 'critical': 1, 'warning': 1}
if module.params['type'] == "metric alert" and module.params['thresholds'] is not None:
options["thresholds"] = module.params['thresholds']
monitor = _get_monitor(module)
if not monitor:
_post_monitor(module, options)
else:
_update_monitor(module, monitor, options)
def delete_monitor(module):
monitor = _get_monitor(module)
if not monitor:
module.exit_json(changed=False)
try:
msg = api.Monitor.delete(monitor['id'])
module.exit_json(changed=True, msg=msg)
except Exception as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
def mute_monitor(module):
monitor = _get_monitor(module)
if not monitor:
module.fail_json(msg="Monitor %s not found!" % module.params['name'])
elif monitor['options']['silenced']:
module.fail_json(msg="Monitor is already muted. Datadog does not allow to modify muted alerts, consider unmuting it first.")
elif (module.params['silenced'] is not None and len(set(monitor['options']['silenced']) ^ set(module.params['silenced'])) == 0):
module.exit_json(changed=False)
try:
if module.params['silenced'] is None or module.params['silenced'] == "":
msg = api.Monitor.mute(id=monitor['id'])
else:
msg = api.Monitor.mute(id=monitor['id'], silenced=module.params['silenced'])
module.exit_json(changed=True, msg=msg)
except Exception as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
def unmute_monitor(module):
monitor = _get_monitor(module)
if not monitor:
module.fail_json(msg="Monitor %s not found!" % module.params['name'])
elif not monitor['options']['silenced']:
module.exit_json(changed=False)
try:
msg = api.Monitor.unmute(monitor['id'])
module.exit_json(changed=True, msg=msg)
except Exception as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
if __name__ == '__main__':
main()
| gpl-3.0 |
Arafatk/sympy | sympy/polys/densebasic.py | 92 | 36015 | """Basic tools for dense recursive polynomials in ``K[x]`` or ``K[X]``. """
from __future__ import print_function, division
from sympy.core import igcd
from sympy import oo
from sympy.polys.monomials import monomial_min, monomial_div
from sympy.polys.orderings import monomial_key
from sympy.core.compatibility import range
import random
def poly_LC(f, K):
"""
Return leading coefficient of ``f``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import poly_LC
>>> poly_LC([], ZZ)
0
>>> poly_LC([ZZ(1), ZZ(2), ZZ(3)], ZZ)
1
"""
if not f:
return K.zero
else:
return f[0]
def poly_TC(f, K):
"""
Return trailing coefficient of ``f``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import poly_TC
>>> poly_TC([], ZZ)
0
>>> poly_TC([ZZ(1), ZZ(2), ZZ(3)], ZZ)
3
"""
if not f:
return K.zero
else:
return f[-1]
dup_LC = dmp_LC = poly_LC
dup_TC = dmp_TC = poly_TC
def dmp_ground_LC(f, u, K):
"""
Return the ground leading coefficient.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_ground_LC
>>> f = ZZ.map([[[1], [2, 3]]])
>>> dmp_ground_LC(f, 2, ZZ)
1
"""
while u:
f = dmp_LC(f, K)
u -= 1
return dup_LC(f, K)
def dmp_ground_TC(f, u, K):
"""
Return the ground trailing coefficient.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_ground_TC
>>> f = ZZ.map([[[1], [2, 3]]])
>>> dmp_ground_TC(f, 2, ZZ)
3
"""
while u:
f = dmp_TC(f, K)
u -= 1
return dup_TC(f, K)
def dmp_true_LT(f, u, K):
"""
Return the leading term ``c * x_1**n_1 ... x_k**n_k``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_true_LT
>>> f = ZZ.map([[4], [2, 0], [3, 0, 0]])
>>> dmp_true_LT(f, 1, ZZ)
((2, 0), 4)
"""
monom = []
while u:
monom.append(len(f) - 1)
f, u = f[0], u - 1
if not f:
monom.append(0)
else:
monom.append(len(f) - 1)
return tuple(monom), dup_LC(f, K)
def dup_degree(f):
"""
Return the leading degree of ``f`` in ``K[x]``.
Note that the degree of 0 is negative infinity (the SymPy object -oo).
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dup_degree
>>> f = ZZ.map([1, 2, 0, 3])
>>> dup_degree(f)
3
"""
if not f:
return -oo
return len(f) - 1
def dmp_degree(f, u):
"""
Return the leading degree of ``f`` in ``x_0`` in ``K[X]``.
Note that the degree of 0 is negative infinity (the SymPy object -oo).
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_degree
>>> dmp_degree([[[]]], 2)
-oo
>>> f = ZZ.map([[2], [1, 2, 3]])
>>> dmp_degree(f, 1)
1
"""
if dmp_zero_p(f, u):
return -oo
else:
return len(f) - 1
def _rec_degree_in(g, v, i, j):
"""Recursive helper function for :func:`dmp_degree_in`."""
if i == j:
return dmp_degree(g, v)
v, i = v - 1, i + 1
return max([ _rec_degree_in(c, v, i, j) for c in g ])
def dmp_degree_in(f, j, u):
"""
Return the leading degree of ``f`` in ``x_j`` in ``K[X]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_degree_in
>>> f = ZZ.map([[2], [1, 2, 3]])
>>> dmp_degree_in(f, 0, 1)
1
>>> dmp_degree_in(f, 1, 1)
2
"""
if not j:
return dmp_degree(f, u)
if j < 0 or j > u:
raise IndexError("0 <= j <= %s expected, got %s" % (u, j))
return _rec_degree_in(f, u, 0, j)
def _rec_degree_list(g, v, i, degs):
"""Recursive helper for :func:`dmp_degree_list`."""
degs[i] = max(degs[i], dmp_degree(g, v))
if v > 0:
v, i = v - 1, i + 1
for c in g:
_rec_degree_list(c, v, i, degs)
def dmp_degree_list(f, u):
"""
Return a list of degrees of ``f`` in ``K[X]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_degree_list
>>> f = ZZ.map([[1], [1, 2, 3]])
>>> dmp_degree_list(f, 1)
(1, 2)
"""
degs = [-oo]*(u + 1)
_rec_degree_list(f, u, 0, degs)
return tuple(degs)
def dup_strip(f):
"""
Remove leading zeros from ``f`` in ``K[x]``.
Examples
========
>>> from sympy.polys.densebasic import dup_strip
>>> dup_strip([0, 0, 1, 2, 3, 0])
[1, 2, 3, 0]
"""
if not f or f[0]:
return f
i = 0
for cf in f:
if cf:
break
else:
i += 1
return f[i:]
def dmp_strip(f, u):
"""
Remove leading zeros from ``f`` in ``K[X]``.
Examples
========
>>> from sympy.polys.densebasic import dmp_strip
>>> dmp_strip([[], [0, 1, 2], [1]], 1)
[[0, 1, 2], [1]]
"""
if not u:
return dup_strip(f)
if dmp_zero_p(f, u):
return f
i, v = 0, u - 1
for c in f:
if not dmp_zero_p(c, v):
break
else:
i += 1
if i == len(f):
return dmp_zero(u)
else:
return f[i:]
def _rec_validate(f, g, i, K):
"""Recursive helper for :func:`dmp_validate`."""
if type(g) is not list:
if K is not None and not K.of_type(g):
raise TypeError("%s in %s in not of type %s" % (g, f, K.dtype))
return set([i - 1])
elif not g:
return set([i])
else:
j, levels = i + 1, set([])
for c in g:
levels |= _rec_validate(f, c, i + 1, K)
return levels
def _rec_strip(g, v):
"""Recursive helper for :func:`_rec_strip`."""
if not v:
return dup_strip(g)
w = v - 1
return dmp_strip([ _rec_strip(c, w) for c in g ], v)
def dmp_validate(f, K=None):
"""
Return the number of levels in ``f`` and recursively strip it.
Examples
========
>>> from sympy.polys.densebasic import dmp_validate
>>> dmp_validate([[], [0, 1, 2], [1]])
([[1, 2], [1]], 1)
>>> dmp_validate([[1], 1])
Traceback (most recent call last):
...
ValueError: invalid data structure for a multivariate polynomial
"""
levels = _rec_validate(f, f, 0, K)
u = levels.pop()
if not levels:
return _rec_strip(f, u), u
else:
raise ValueError(
"invalid data structure for a multivariate polynomial")
def dup_reverse(f):
"""
Compute ``x**n * f(1/x)``, i.e.: reverse ``f`` in ``K[x]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dup_reverse
>>> f = ZZ.map([1, 2, 3, 0])
>>> dup_reverse(f)
[3, 2, 1]
"""
return dup_strip(list(reversed(f)))
def dup_copy(f):
"""
Create a new copy of a polynomial ``f`` in ``K[x]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dup_copy
>>> f = ZZ.map([1, 2, 3, 0])
>>> dup_copy([1, 2, 3, 0])
[1, 2, 3, 0]
"""
return list(f)
def dmp_copy(f, u):
"""
Create a new copy of a polynomial ``f`` in ``K[X]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_copy
>>> f = ZZ.map([[1], [1, 2]])
>>> dmp_copy(f, 1)
[[1], [1, 2]]
"""
if not u:
return list(f)
v = u - 1
return [ dmp_copy(c, v) for c in f ]
def dup_to_tuple(f):
"""
Convert `f` into a tuple.
This is needed for hashing. This is similar to dup_copy().
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dup_copy
>>> f = ZZ.map([1, 2, 3, 0])
>>> dup_copy([1, 2, 3, 0])
[1, 2, 3, 0]
"""
return tuple(f)
def dmp_to_tuple(f, u):
"""
Convert `f` into a nested tuple of tuples.
This is needed for hashing. This is similar to dmp_copy().
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_to_tuple
>>> f = ZZ.map([[1], [1, 2]])
>>> dmp_to_tuple(f, 1)
((1,), (1, 2))
"""
if not u:
return tuple(f)
v = u - 1
return tuple(dmp_to_tuple(c, v) for c in f)
def dup_normal(f, K):
"""
Normalize univariate polynomial in the given domain.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dup_normal
>>> dup_normal([0, 1.5, 2, 3], ZZ)
[1, 2, 3]
"""
return dup_strip([ K.normal(c) for c in f ])
def dmp_normal(f, u, K):
"""
Normalize a multivariate polynomial in the given domain.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_normal
>>> dmp_normal([[], [0, 1.5, 2]], 1, ZZ)
[[1, 2]]
"""
if not u:
return dup_normal(f, K)
v = u - 1
return dmp_strip([ dmp_normal(c, v, K) for c in f ], u)
def dup_convert(f, K0, K1):
"""
Convert the ground domain of ``f`` from ``K0`` to ``K1``.
Examples
========
>>> from sympy.polys.rings import ring
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dup_convert
>>> R, x = ring("x", ZZ)
>>> dup_convert([R(1), R(2)], R.to_domain(), ZZ)
[1, 2]
>>> dup_convert([ZZ(1), ZZ(2)], ZZ, R.to_domain())
[1, 2]
"""
if K0 is not None and K0 == K1:
return f
else:
return dup_strip([ K1.convert(c, K0) for c in f ])
def dmp_convert(f, u, K0, K1):
"""
Convert the ground domain of ``f`` from ``K0`` to ``K1``.
Examples
========
>>> from sympy.polys.rings import ring
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_convert
>>> R, x = ring("x", ZZ)
>>> dmp_convert([[R(1)], [R(2)]], 1, R.to_domain(), ZZ)
[[1], [2]]
>>> dmp_convert([[ZZ(1)], [ZZ(2)]], 1, ZZ, R.to_domain())
[[1], [2]]
"""
if not u:
return dup_convert(f, K0, K1)
if K0 is not None and K0 == K1:
return f
v = u - 1
return dmp_strip([ dmp_convert(c, v, K0, K1) for c in f ], u)
def dup_from_sympy(f, K):
"""
Convert the ground domain of ``f`` from SymPy to ``K``.
Examples
========
>>> from sympy import S
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dup_from_sympy
>>> dup_from_sympy([S(1), S(2)], ZZ) == [ZZ(1), ZZ(2)]
True
"""
return dup_strip([ K.from_sympy(c) for c in f ])
def dmp_from_sympy(f, u, K):
"""
Convert the ground domain of ``f`` from SymPy to ``K``.
Examples
========
>>> from sympy import S
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_from_sympy
>>> dmp_from_sympy([[S(1)], [S(2)]], 1, ZZ) == [[ZZ(1)], [ZZ(2)]]
True
"""
if not u:
return dup_from_sympy(f, K)
v = u - 1
return dmp_strip([ dmp_from_sympy(c, v, K) for c in f ], u)
def dup_nth(f, n, K):
"""
Return the ``n``-th coefficient of ``f`` in ``K[x]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dup_nth
>>> f = ZZ.map([1, 2, 3])
>>> dup_nth(f, 0, ZZ)
3
>>> dup_nth(f, 4, ZZ)
0
"""
if n < 0:
raise IndexError("'n' must be non-negative, got %i" % n)
elif n >= len(f):
return K.zero
else:
return f[dup_degree(f) - n]
def dmp_nth(f, n, u, K):
"""
Return the ``n``-th coefficient of ``f`` in ``K[x]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_nth
>>> f = ZZ.map([[1], [2], [3]])
>>> dmp_nth(f, 0, 1, ZZ)
[3]
>>> dmp_nth(f, 4, 1, ZZ)
[]
"""
if n < 0:
raise IndexError("'n' must be non-negative, got %i" % n)
elif n >= len(f):
return dmp_zero(u - 1)
else:
return f[dmp_degree(f, u) - n]
def dmp_ground_nth(f, N, u, K):
"""
Return the ground ``n``-th coefficient of ``f`` in ``K[x]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_ground_nth
>>> f = ZZ.map([[1], [2, 3]])
>>> dmp_ground_nth(f, (0, 1), 1, ZZ)
2
"""
v = u
for n in N:
if n < 0:
raise IndexError("`n` must be non-negative, got %i" % n)
elif n >= len(f):
return K.zero
else:
d = dmp_degree(f, v)
if d == -oo:
d = -1
f, v = f[d - n], v - 1
return f
def dmp_zero_p(f, u):
"""
Return ``True`` if ``f`` is zero in ``K[X]``.
Examples
========
>>> from sympy.polys.densebasic import dmp_zero_p
>>> dmp_zero_p([[[[[]]]]], 4)
True
>>> dmp_zero_p([[[[[1]]]]], 4)
False
"""
while u:
if len(f) != 1:
return False
f = f[0]
u -= 1
return not f
def dmp_zero(u):
"""
Return a multivariate zero.
Examples
========
>>> from sympy.polys.densebasic import dmp_zero
>>> dmp_zero(4)
[[[[[]]]]]
"""
r = []
for i in range(u):
r = [r]
return r
def dmp_one_p(f, u, K):
"""
Return ``True`` if ``f`` is one in ``K[X]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_one_p
>>> dmp_one_p([[[ZZ(1)]]], 2, ZZ)
True
"""
return dmp_ground_p(f, K.one, u)
def dmp_one(u, K):
"""
Return a multivariate one over ``K``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_one
>>> dmp_one(2, ZZ)
[[[1]]]
"""
return dmp_ground(K.one, u)
def dmp_ground_p(f, c, u):
"""
Return True if ``f`` is constant in ``K[X]``.
Examples
========
>>> from sympy.polys.densebasic import dmp_ground_p
>>> dmp_ground_p([[[3]]], 3, 2)
True
>>> dmp_ground_p([[[4]]], None, 2)
True
"""
if c is not None and not c:
return dmp_zero_p(f, u)
while u:
if len(f) != 1:
return False
f = f[0]
u -= 1
if c is None:
return len(f) <= 1
else:
return f == [c]
def dmp_ground(c, u):
"""
Return a multivariate constant.
Examples
========
>>> from sympy.polys.densebasic import dmp_ground
>>> dmp_ground(3, 5)
[[[[[[3]]]]]]
>>> dmp_ground(1, -1)
1
"""
if not c:
return dmp_zero(u)
for i in range(u + 1):
c = [c]
return c
def dmp_zeros(n, u, K):
"""
Return a list of multivariate zeros.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_zeros
>>> dmp_zeros(3, 2, ZZ)
[[[[]]], [[[]]], [[[]]]]
>>> dmp_zeros(3, -1, ZZ)
[0, 0, 0]
"""
if not n:
return []
if u < 0:
return [K.zero]*n
else:
return [ dmp_zero(u) for i in range(n) ]
def dmp_grounds(c, n, u):
"""
Return a list of multivariate constants.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_grounds
>>> dmp_grounds(ZZ(4), 3, 2)
[[[[4]]], [[[4]]], [[[4]]]]
>>> dmp_grounds(ZZ(4), 3, -1)
[4, 4, 4]
"""
if not n:
return []
if u < 0:
return [c]*n
else:
return [ dmp_ground(c, u) for i in range(n) ]
def dmp_negative_p(f, u, K):
"""
Return ``True`` if ``LC(f)`` is negative.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_negative_p
>>> dmp_negative_p([[ZZ(1)], [-ZZ(1)]], 1, ZZ)
False
>>> dmp_negative_p([[-ZZ(1)], [ZZ(1)]], 1, ZZ)
True
"""
return K.is_negative(dmp_ground_LC(f, u, K))
def dmp_positive_p(f, u, K):
"""
Return ``True`` if ``LC(f)`` is positive.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_positive_p
>>> dmp_positive_p([[ZZ(1)], [-ZZ(1)]], 1, ZZ)
True
>>> dmp_positive_p([[-ZZ(1)], [ZZ(1)]], 1, ZZ)
False
"""
return K.is_positive(dmp_ground_LC(f, u, K))
def dup_from_dict(f, K):
"""
Create a ``K[x]`` polynomial from a ``dict``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dup_from_dict
>>> dup_from_dict({(0,): ZZ(7), (2,): ZZ(5), (4,): ZZ(1)}, ZZ)
[1, 0, 5, 0, 7]
>>> dup_from_dict({}, ZZ)
[]
"""
if not f:
return []
n, h = max(f.keys()), []
if type(n) is int:
for k in range(n, -1, -1):
h.append(f.get(k, K.zero))
else:
(n,) = n
for k in range(n, -1, -1):
h.append(f.get((k,), K.zero))
return dup_strip(h)
def dup_from_raw_dict(f, K):
"""
Create a ``K[x]`` polynomial from a raw ``dict``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dup_from_raw_dict
>>> dup_from_raw_dict({0: ZZ(7), 2: ZZ(5), 4: ZZ(1)}, ZZ)
[1, 0, 5, 0, 7]
"""
if not f:
return []
n, h = max(f.keys()), []
for k in range(n, -1, -1):
h.append(f.get(k, K.zero))
return dup_strip(h)
def dmp_from_dict(f, u, K):
"""
Create a ``K[X]`` polynomial from a ``dict``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_from_dict
>>> dmp_from_dict({(0, 0): ZZ(3), (0, 1): ZZ(2), (2, 1): ZZ(1)}, 1, ZZ)
[[1, 0], [], [2, 3]]
>>> dmp_from_dict({}, 0, ZZ)
[]
"""
if not u:
return dup_from_dict(f, K)
if not f:
return dmp_zero(u)
coeffs = {}
for monom, coeff in f.items():
head, tail = monom[0], monom[1:]
if head in coeffs:
coeffs[head][tail] = coeff
else:
coeffs[head] = { tail: coeff }
n, v, h = max(coeffs.keys()), u - 1, []
for k in range(n, -1, -1):
coeff = coeffs.get(k)
if coeff is not None:
h.append(dmp_from_dict(coeff, v, K))
else:
h.append(dmp_zero(v))
return dmp_strip(h, u)
def dup_to_dict(f, K=None, zero=False):
"""
Convert ``K[x]`` polynomial to a ``dict``.
Examples
========
>>> from sympy.polys.densebasic import dup_to_dict
>>> dup_to_dict([1, 0, 5, 0, 7])
{(0,): 7, (2,): 5, (4,): 1}
>>> dup_to_dict([])
{}
"""
if not f and zero:
return {(0,): K.zero}
n, result = len(f) - 1, {}
for k in range(0, n + 1):
if f[n - k]:
result[(k,)] = f[n - k]
return result
def dup_to_raw_dict(f, K=None, zero=False):
"""
Convert a ``K[x]`` polynomial to a raw ``dict``.
Examples
========
>>> from sympy.polys.densebasic import dup_to_raw_dict
>>> dup_to_raw_dict([1, 0, 5, 0, 7])
{0: 7, 2: 5, 4: 1}
"""
if not f and zero:
return {0: K.zero}
n, result = len(f) - 1, {}
for k in range(0, n + 1):
if f[n - k]:
result[k] = f[n - k]
return result
def dmp_to_dict(f, u, K=None, zero=False):
"""
Convert a ``K[X]`` polynomial to a ``dict````.
Examples
========
>>> from sympy.polys.densebasic import dmp_to_dict
>>> dmp_to_dict([[1, 0], [], [2, 3]], 1)
{(0, 0): 3, (0, 1): 2, (2, 1): 1}
>>> dmp_to_dict([], 0)
{}
"""
if not u:
return dup_to_dict(f, K, zero=zero)
if dmp_zero_p(f, u) and zero:
return {(0,)*(u + 1): K.zero}
n, v, result = dmp_degree(f, u), u - 1, {}
if n == -oo:
n = -1
for k in range(0, n + 1):
h = dmp_to_dict(f[n - k], v)
for exp, coeff in h.items():
result[(k,) + exp] = coeff
return result
def dmp_swap(f, i, j, u, K):
"""
Transform ``K[..x_i..x_j..]`` to ``K[..x_j..x_i..]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_swap
>>> f = ZZ.map([[[2], [1, 0]], []])
>>> dmp_swap(f, 0, 1, 2, ZZ)
[[[2], []], [[1, 0], []]]
>>> dmp_swap(f, 1, 2, 2, ZZ)
[[[1], [2, 0]], [[]]]
>>> dmp_swap(f, 0, 2, 2, ZZ)
[[[1, 0]], [[2, 0], []]]
"""
if i < 0 or j < 0 or i > u or j > u:
raise IndexError("0 <= i < j <= %s expected" % u)
elif i == j:
return f
F, H = dmp_to_dict(f, u), {}
for exp, coeff in F.items():
H[exp[:i] + (exp[j],) +
exp[i + 1:j] +
(exp[i],) + exp[j + 1:]] = coeff
return dmp_from_dict(H, u, K)
def dmp_permute(f, P, u, K):
"""
Return a polynomial in ``K[x_{P(1)},..,x_{P(n)}]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_permute
>>> f = ZZ.map([[[2], [1, 0]], []])
>>> dmp_permute(f, [1, 0, 2], 2, ZZ)
[[[2], []], [[1, 0], []]]
>>> dmp_permute(f, [1, 2, 0], 2, ZZ)
[[[1], []], [[2, 0], []]]
"""
F, H = dmp_to_dict(f, u), {}
for exp, coeff in F.items():
new_exp = [0]*len(exp)
for e, p in zip(exp, P):
new_exp[p] = e
H[tuple(new_exp)] = coeff
return dmp_from_dict(H, u, K)
def dmp_nest(f, l, K):
"""
Return a multivariate value nested ``l``-levels.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_nest
>>> dmp_nest([[ZZ(1)]], 2, ZZ)
[[[[1]]]]
"""
if not isinstance(f, list):
return dmp_ground(f, l)
for i in range(l):
f = [f]
return f
def dmp_raise(f, l, u, K):
"""
Return a multivariate polynomial raised ``l``-levels.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_raise
>>> f = ZZ.map([[], [1, 2]])
>>> dmp_raise(f, 2, 1, ZZ)
[[[[]]], [[[1]], [[2]]]]
"""
if not l:
return f
if not u:
if not f:
return dmp_zero(l)
k = l - 1
return [ dmp_ground(c, k) for c in f ]
v = u - 1
return [ dmp_raise(c, l, v, K) for c in f ]
def dup_deflate(f, K):
"""
Map ``x**m`` to ``y`` in a polynomial in ``K[x]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dup_deflate
>>> f = ZZ.map([1, 0, 0, 1, 0, 0, 1])
>>> dup_deflate(f, ZZ)
(3, [1, 1, 1])
"""
if dup_degree(f) <= 0:
return 1, f
g = 0
for i in range(len(f)):
if not f[-i - 1]:
continue
g = igcd(g, i)
if g == 1:
return 1, f
return g, f[::g]
def dmp_deflate(f, u, K):
"""
Map ``x_i**m_i`` to ``y_i`` in a polynomial in ``K[X]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_deflate
>>> f = ZZ.map([[1, 0, 0, 2], [], [3, 0, 0, 4]])
>>> dmp_deflate(f, 1, ZZ)
((2, 3), [[1, 2], [3, 4]])
"""
if dmp_zero_p(f, u):
return (1,)*(u + 1), f
F = dmp_to_dict(f, u)
B = [0]*(u + 1)
for M in F.keys():
for i, m in enumerate(M):
B[i] = igcd(B[i], m)
for i, b in enumerate(B):
if not b:
B[i] = 1
B = tuple(B)
if all(b == 1 for b in B):
return B, f
H = {}
for A, coeff in F.items():
N = [ a // b for a, b in zip(A, B) ]
H[tuple(N)] = coeff
return B, dmp_from_dict(H, u, K)
def dup_multi_deflate(polys, K):
"""
Map ``x**m`` to ``y`` in a set of polynomials in ``K[x]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dup_multi_deflate
>>> f = ZZ.map([1, 0, 2, 0, 3])
>>> g = ZZ.map([4, 0, 0])
>>> dup_multi_deflate((f, g), ZZ)
(2, ([1, 2, 3], [4, 0]))
"""
G = 0
for p in polys:
if dup_degree(p) <= 0:
return 1, polys
g = 0
for i in range(len(p)):
if not p[-i - 1]:
continue
g = igcd(g, i)
if g == 1:
return 1, polys
G = igcd(G, g)
return G, tuple([ p[::G] for p in polys ])
def dmp_multi_deflate(polys, u, K):
"""
Map ``x_i**m_i`` to ``y_i`` in a set of polynomials in ``K[X]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_multi_deflate
>>> f = ZZ.map([[1, 0, 0, 2], [], [3, 0, 0, 4]])
>>> g = ZZ.map([[1, 0, 2], [], [3, 0, 4]])
>>> dmp_multi_deflate((f, g), 1, ZZ)
((2, 1), ([[1, 0, 0, 2], [3, 0, 0, 4]], [[1, 0, 2], [3, 0, 4]]))
"""
if not u:
M, H = dup_multi_deflate(polys, K)
return (M,), H
F, B = [], [0]*(u + 1)
for p in polys:
f = dmp_to_dict(p, u)
if not dmp_zero_p(p, u):
for M in f.keys():
for i, m in enumerate(M):
B[i] = igcd(B[i], m)
F.append(f)
for i, b in enumerate(B):
if not b:
B[i] = 1
B = tuple(B)
if all(b == 1 for b in B):
return B, polys
H = []
for f in F:
h = {}
for A, coeff in f.items():
N = [ a // b for a, b in zip(A, B) ]
h[tuple(N)] = coeff
H.append(dmp_from_dict(h, u, K))
return B, tuple(H)
def dup_inflate(f, m, K):
"""
Map ``y`` to ``x**m`` in a polynomial in ``K[x]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dup_inflate
>>> f = ZZ.map([1, 1, 1])
>>> dup_inflate(f, 3, ZZ)
[1, 0, 0, 1, 0, 0, 1]
"""
if m <= 0:
raise IndexError("'m' must be positive, got %s" % m)
if m == 1 or not f:
return f
result = [f[0]]
for coeff in f[1:]:
result.extend([K.zero]*(m - 1))
result.append(coeff)
return result
def _rec_inflate(g, M, v, i, K):
"""Recursive helper for :func:`dmp_inflate`."""
if not v:
return dup_inflate(g, M[i], K)
if M[i] <= 0:
raise IndexError("all M[i] must be positive, got %s" % M[i])
w, j = v - 1, i + 1
g = [ _rec_inflate(c, M, w, j, K) for c in g ]
result = [g[0]]
for coeff in g[1:]:
for _ in range(1, M[i]):
result.append(dmp_zero(w))
result.append(coeff)
return result
def dmp_inflate(f, M, u, K):
"""
Map ``y_i`` to ``x_i**k_i`` in a polynomial in ``K[X]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_inflate
>>> f = ZZ.map([[1, 2], [3, 4]])
>>> dmp_inflate(f, (2, 3), 1, ZZ)
[[1, 0, 0, 2], [], [3, 0, 0, 4]]
"""
if not u:
return dup_inflate(f, M[0], K)
if all(m == 1 for m in M):
return f
else:
return _rec_inflate(f, M, u, 0, K)
def dmp_exclude(f, u, K):
"""
Exclude useless levels from ``f``.
Return the levels excluded, the new excluded ``f``, and the new ``u``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_exclude
>>> f = ZZ.map([[[1]], [[1], [2]]])
>>> dmp_exclude(f, 2, ZZ)
([2], [[1], [1, 2]], 1)
"""
if not u or dmp_ground_p(f, None, u):
return [], f, u
J, F = [], dmp_to_dict(f, u)
for j in range(0, u + 1):
for monom in F.keys():
if monom[j]:
break
else:
J.append(j)
if not J:
return [], f, u
f = {}
for monom, coeff in F.items():
monom = list(monom)
for j in reversed(J):
del monom[j]
f[tuple(monom)] = coeff
u -= len(J)
return J, dmp_from_dict(f, u, K), u
def dmp_include(f, J, u, K):
"""
Include useless levels in ``f``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_include
>>> f = ZZ.map([[1], [1, 2]])
>>> dmp_include(f, [2], 1, ZZ)
[[[1]], [[1], [2]]]
"""
if not J:
return f
F, f = dmp_to_dict(f, u), {}
for monom, coeff in F.items():
monom = list(monom)
for j in J:
monom.insert(j, 0)
f[tuple(monom)] = coeff
u += len(J)
return dmp_from_dict(f, u, K)
def dmp_inject(f, u, K, front=False):
"""
Convert ``f`` from ``K[X][Y]`` to ``K[X,Y]``.
Examples
========
>>> from sympy.polys.rings import ring
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_inject
>>> R, x,y = ring("x,y", ZZ)
>>> dmp_inject([R(1), x + 2], 0, R.to_domain())
([[[1]], [[1], [2]]], 2)
>>> dmp_inject([R(1), x + 2], 0, R.to_domain(), front=True)
([[[1]], [[1, 2]]], 2)
"""
f, h = dmp_to_dict(f, u), {}
v = K.ngens - 1
for f_monom, g in f.items():
g = g.to_dict()
for g_monom, c in g.items():
if front:
h[g_monom + f_monom] = c
else:
h[f_monom + g_monom] = c
w = u + v + 1
return dmp_from_dict(h, w, K.dom), w
def dmp_eject(f, u, K, front=False):
"""
Convert ``f`` from ``K[X,Y]`` to ``K[X][Y]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_eject
>>> dmp_eject([[[1]], [[1], [2]]], 2, ZZ['x', 'y'])
[1, x + 2]
"""
f, h = dmp_to_dict(f, u), {}
n = K.ngens
v = u - K.ngens + 1
for monom, c in f.items():
if front:
g_monom, f_monom = monom[:n], monom[n:]
else:
g_monom, f_monom = monom[-n:], monom[:-n]
if f_monom in h:
h[f_monom][g_monom] = c
else:
h[f_monom] = {g_monom: c}
for monom, c in h.items():
h[monom] = K(c)
return dmp_from_dict(h, v - 1, K)
def dup_terms_gcd(f, K):
"""
Remove GCD of terms from ``f`` in ``K[x]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dup_terms_gcd
>>> f = ZZ.map([1, 0, 1, 0, 0])
>>> dup_terms_gcd(f, ZZ)
(2, [1, 0, 1])
"""
if dup_TC(f, K) or not f:
return 0, f
i = 0
for c in reversed(f):
if not c:
i += 1
else:
break
return i, f[:-i]
def dmp_terms_gcd(f, u, K):
"""
Remove GCD of terms from ``f`` in ``K[X]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_terms_gcd
>>> f = ZZ.map([[1, 0], [1, 0, 0], [], []])
>>> dmp_terms_gcd(f, 1, ZZ)
((2, 1), [[1], [1, 0]])
"""
if dmp_ground_TC(f, u, K) or dmp_zero_p(f, u):
return (0,)*(u + 1), f
F = dmp_to_dict(f, u)
G = monomial_min(*list(F.keys()))
if all(g == 0 for g in G):
return G, f
f = {}
for monom, coeff in F.items():
f[monomial_div(monom, G)] = coeff
return G, dmp_from_dict(f, u, K)
def _rec_list_terms(g, v, monom):
"""Recursive helper for :func:`dmp_list_terms`."""
d, terms = dmp_degree(g, v), []
if not v:
for i, c in enumerate(g):
if not c:
continue
terms.append((monom + (d - i,), c))
else:
w = v - 1
for i, c in enumerate(g):
terms.extend(_rec_list_terms(c, w, monom + (d - i,)))
return terms
def dmp_list_terms(f, u, K, order=None):
"""
List all non-zero terms from ``f`` in the given order ``order``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_list_terms
>>> f = ZZ.map([[1, 1], [2, 3]])
>>> dmp_list_terms(f, 1, ZZ)
[((1, 1), 1), ((1, 0), 1), ((0, 1), 2), ((0, 0), 3)]
>>> dmp_list_terms(f, 1, ZZ, order='grevlex')
[((1, 1), 1), ((1, 0), 1), ((0, 1), 2), ((0, 0), 3)]
"""
def sort(terms, O):
return sorted(terms, key=lambda term: O(term[0]), reverse=True)
terms = _rec_list_terms(f, u, ())
if not terms:
return [((0,)*(u + 1), K.zero)]
if order is None:
return terms
else:
return sort(terms, monomial_key(order))
def dup_apply_pairs(f, g, h, args, K):
"""
Apply ``h`` to pairs of coefficients of ``f`` and ``g``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dup_apply_pairs
>>> h = lambda x, y, z: 2*x + y - z
>>> dup_apply_pairs([1, 2, 3], [3, 2, 1], h, (1,), ZZ)
[4, 5, 6]
"""
n, m = len(f), len(g)
if n != m:
if n > m:
g = [K.zero]*(n - m) + g
else:
f = [K.zero]*(m - n) + f
result = []
for a, b in zip(f, g):
result.append(h(a, b, *args))
return dup_strip(result)
def dmp_apply_pairs(f, g, h, args, u, K):
"""
Apply ``h`` to pairs of coefficients of ``f`` and ``g``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_apply_pairs
>>> h = lambda x, y, z: 2*x + y - z
>>> dmp_apply_pairs([[1], [2, 3]], [[3], [2, 1]], h, (1,), 1, ZZ)
[[4], [5, 6]]
"""
if not u:
return dup_apply_pairs(f, g, h, args, K)
n, m, v = len(f), len(g), u - 1
if n != m:
if n > m:
g = dmp_zeros(n - m, v, K) + g
else:
f = dmp_zeros(m - n, v, K) + f
result = []
for a, b in zip(f, g):
result.append(dmp_apply_pairs(a, b, h, args, v, K))
return dmp_strip(result, u)
def dup_slice(f, m, n, K):
"""Take a continuous subsequence of terms of ``f`` in ``K[x]``. """
k = len(f)
if k >= m:
M = k - m
else:
M = 0
if k >= n:
N = k - n
else:
N = 0
f = f[N:M]
if not f:
return []
else:
return f + [K.zero]*m
def dmp_slice(f, m, n, u, K):
"""Take a continuous subsequence of terms of ``f`` in ``K[X]``. """
return dmp_slice_in(f, m, n, 0, u, K)
def dmp_slice_in(f, m, n, j, u, K):
"""Take a continuous subsequence of terms of ``f`` in ``x_j`` in ``K[X]``. """
if j < 0 or j > u:
raise IndexError("-%s <= j < %s expected, got %s" % (u, u, j))
if not u:
return dup_slice(f, m, n, K)
f, g = dmp_to_dict(f, u), {}
for monom, coeff in f.items():
k = monom[j]
if k < m or k >= n:
monom = monom[:j] + (0,) + monom[j + 1:]
if monom in g:
g[monom] += coeff
else:
g[monom] = coeff
return dmp_from_dict(g, u, K)
def dup_random(n, a, b, K):
"""
Return a polynomial of degree ``n`` with coefficients in ``[a, b]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dup_random
>>> dup_random(3, -10, 10, ZZ) #doctest: +SKIP
[-2, -8, 9, -4]
"""
f = [ K.convert(random.randint(a, b)) for _ in range(0, n + 1) ]
while not f[0]:
f[0] = K.convert(random.randint(a, b))
return f
| bsd-3-clause |
ALISCIFP/tensorflow-resnet-segmentation | convert_mhd2jpg_png_LUNA16.py | 1 | 3214 | #!/usr/bin/env python
# This script belongs to https://github.com/
# this script convert LUNA 16 mhd file to RGB-jpg file.
__author__ = "Zengming Shen,Email:[email protected]"
import os,glob
import argparse
import numpy as np
import SimpleITK as sitk
from PIL import Image
import cv2
import scipy.misc
DATA_DIRECTORY = '/home/zack/Data/LUNA16/'
OUT_DIRECTORY = "/home/zack/Data/LUNA16/"
def mhd2ndarray(data_file):
itkimg = sitk.ReadImage(data_file)
img=sitk.GetArrayFromImage(itkimg)
img = np.transpose(img,(1,2,0))
return img
def ndarry2jpg_png(data_file,out_dir,subsetIndex,flist):
data_path,fn = os.path.split(data_file)
# img_gt_file= data_path+"output/yes_lesion_no_rescale/seg/"+fn
img_gt_file = data_file.replace("subset"+str(subsetIndex),"output/yes_lesion_no_rescale/subset"+str(subsetIndex)+"/seg")
img = mhd2ndarray(data_file)
img_gt = mhd2ndarray(img_gt_file)
img_pad=np.lib.pad(img, ((0, 0),(0,0),(1,1)), 'constant', constant_values=(-3024, -3024))
# img_pos = img_pad-img_pad.min()
# img_pad = img_pos*(255.0/img_pos.max())
for i in xrange(0,img.shape[2]):
img3c = img_pad[:,:,i:i+3]
try:
scipy.misc.imsave(os.path.join(out_dir+"JPEGImages/subset"+str(subsetIndex),fn+"_"+str(i)+".jpg"), img3c)
except ValueError:
print fn
pass
# im = Image.fromarray(img3c)
# im.save(os.path.join(out_dir+"JPEGImages/subset"+str(subsetIndex),fn+"_"+str(i)+"_"+".jpg"))
cv2.imwrite(os.path.join(out_dir+"PNGImages/subset"+str(subsetIndex),fn+"_"+str(i)+".png"),img_gt[:,:,i])
flist.write("/JPEGImages/subset"+str(subsetIndex)+"/"+fn+"_"+str(i)+".jpg "+"/PNGImages/subset"+str(subsetIndex)+"/"+fn+"_"+str(i)+".png\n")
def convert(data_dir,out_dir):
ftrain = open(data_dir + "dataset/train.txt", 'a')
fval = open(data_dir + "dataset/val.txt", 'w')
for i in xrange(3,10):
print "converting subset "+str(i)
os.chdir(data_dir + "subset" + str(i))
if not os.path.exists(data_dir + "JPEGImages/subset" + str(i)):
os.mkdir(data_dir + "JPEGImages/subset" + str(i))
if not os.path.exists(data_dir + "PNGImages/subset" + str(i)):
os.mkdir(data_dir + "PNGImages/subset" + str(i))
for file in glob.glob("*.mhd"):
if i<8:
ndarry2jpg_png(os.path.join(data_dir + "subset" + str(i),file), out_dir, i,ftrain)
else:
ndarry2jpg_png(os.path.join(data_dir + "subset" + str(i),file), out_dir, i,fval)
ftrain.close()
fval.close()
print "done."
def main():
parser = argparse.ArgumentParser()
parser = argparse.ArgumentParser(description="mdh to jpg-png file converter")
parser.add_argument("--data-dir", type=str, default=DATA_DIRECTORY,
help="Path to the directory containing the ILD dataset.")
parser.add_argument("--out-dir", type=str, default=OUT_DIRECTORY,
help="Path to the directory containing the ILD dataset in jpg and png format.")
args = parser.parse_args()
convert(args.data_dir,args.out_dir)
if __name__ == '__main__':
main()
| mit |
theguardian/KodiDB | cherrypy/_cpdispatch.py | 39 | 24149 | """CherryPy dispatchers.
A 'dispatcher' is the object which looks up the 'page handler' callable
and collects config for the current request based on the path_info, other
request attributes, and the application architecture. The core calls the
dispatcher as early as possible, passing it a 'path_info' argument.
The default dispatcher discovers the page handler by matching path_info
to a hierarchical arrangement of objects, starting at request.app.root.
"""
import string
import sys
import types
try:
classtype = (type, types.ClassType)
except AttributeError:
classtype = type
import cherrypy
from cherrypy._cpcompat import set
class PageHandler(object):
"""Callable which sets response.body."""
def __init__(self, callable, *args, **kwargs):
self.callable = callable
self.args = args
self.kwargs = kwargs
def __call__(self):
try:
return self.callable(*self.args, **self.kwargs)
except TypeError:
x = sys.exc_info()[1]
try:
test_callable_spec(self.callable, self.args, self.kwargs)
except cherrypy.HTTPError:
raise sys.exc_info()[1]
except:
raise x
raise
def test_callable_spec(callable, callable_args, callable_kwargs):
"""
Inspect callable and test to see if the given args are suitable for it.
When an error occurs during the handler's invoking stage there are 2
erroneous cases:
1. Too many parameters passed to a function which doesn't define
one of *args or **kwargs.
2. Too little parameters are passed to the function.
There are 3 sources of parameters to a cherrypy handler.
1. query string parameters are passed as keyword parameters to the handler.
2. body parameters are also passed as keyword parameters.
3. when partial matching occurs, the final path atoms are passed as
positional args.
Both the query string and path atoms are part of the URI. If they are
incorrect, then a 404 Not Found should be raised. Conversely the body
parameters are part of the request; if they are invalid a 400 Bad Request.
"""
show_mismatched_params = getattr(
cherrypy.serving.request, 'show_mismatched_params', False)
try:
(args, varargs, varkw, defaults) = inspect.getargspec(callable)
except TypeError:
if isinstance(callable, object) and hasattr(callable, '__call__'):
(args, varargs, varkw, defaults) = inspect.getargspec(callable.__call__)
else:
# If it wasn't one of our own types, re-raise
# the original error
raise
if args and args[0] == 'self':
args = args[1:]
arg_usage = dict([(arg, 0,) for arg in args])
vararg_usage = 0
varkw_usage = 0
extra_kwargs = set()
for i, value in enumerate(callable_args):
try:
arg_usage[args[i]] += 1
except IndexError:
vararg_usage += 1
for key in callable_kwargs.keys():
try:
arg_usage[key] += 1
except KeyError:
varkw_usage += 1
extra_kwargs.add(key)
# figure out which args have defaults.
args_with_defaults = args[-len(defaults or []):]
for i, val in enumerate(defaults or []):
# Defaults take effect only when the arg hasn't been used yet.
if arg_usage[args_with_defaults[i]] == 0:
arg_usage[args_with_defaults[i]] += 1
missing_args = []
multiple_args = []
for key, usage in arg_usage.items():
if usage == 0:
missing_args.append(key)
elif usage > 1:
multiple_args.append(key)
if missing_args:
# In the case where the method allows body arguments
# there are 3 potential errors:
# 1. not enough query string parameters -> 404
# 2. not enough body parameters -> 400
# 3. not enough path parts (partial matches) -> 404
#
# We can't actually tell which case it is,
# so I'm raising a 404 because that covers 2/3 of the
# possibilities
#
# In the case where the method does not allow body
# arguments it's definitely a 404.
message = None
if show_mismatched_params:
message="Missing parameters: %s" % ",".join(missing_args)
raise cherrypy.HTTPError(404, message=message)
# the extra positional arguments come from the path - 404 Not Found
if not varargs and vararg_usage > 0:
raise cherrypy.HTTPError(404)
body_params = cherrypy.serving.request.body.params or {}
body_params = set(body_params.keys())
qs_params = set(callable_kwargs.keys()) - body_params
if multiple_args:
if qs_params.intersection(set(multiple_args)):
# If any of the multiple parameters came from the query string then
# it's a 404 Not Found
error = 404
else:
# Otherwise it's a 400 Bad Request
error = 400
message = None
if show_mismatched_params:
message="Multiple values for parameters: "\
"%s" % ",".join(multiple_args)
raise cherrypy.HTTPError(error, message=message)
if not varkw and varkw_usage > 0:
# If there were extra query string parameters, it's a 404 Not Found
extra_qs_params = set(qs_params).intersection(extra_kwargs)
if extra_qs_params:
message = None
if show_mismatched_params:
message="Unexpected query string "\
"parameters: %s" % ", ".join(extra_qs_params)
raise cherrypy.HTTPError(404, message=message)
# If there were any extra body parameters, it's a 400 Not Found
extra_body_params = set(body_params).intersection(extra_kwargs)
if extra_body_params:
message = None
if show_mismatched_params:
message="Unexpected body parameters: "\
"%s" % ", ".join(extra_body_params)
raise cherrypy.HTTPError(400, message=message)
try:
import inspect
except ImportError:
test_callable_spec = lambda callable, args, kwargs: None
class LateParamPageHandler(PageHandler):
"""When passing cherrypy.request.params to the page handler, we do not
want to capture that dict too early; we want to give tools like the
decoding tool a chance to modify the params dict in-between the lookup
of the handler and the actual calling of the handler. This subclass
takes that into account, and allows request.params to be 'bound late'
(it's more complicated than that, but that's the effect).
"""
def _get_kwargs(self):
kwargs = cherrypy.serving.request.params.copy()
if self._kwargs:
kwargs.update(self._kwargs)
return kwargs
def _set_kwargs(self, kwargs):
self._kwargs = kwargs
kwargs = property(_get_kwargs, _set_kwargs,
doc='page handler kwargs (with '
'cherrypy.request.params copied in)')
if sys.version_info < (3, 0):
punctuation_to_underscores = string.maketrans(
string.punctuation, '_' * len(string.punctuation))
def validate_translator(t):
if not isinstance(t, str) or len(t) != 256:
raise ValueError("The translate argument must be a str of len 256.")
else:
punctuation_to_underscores = str.maketrans(
string.punctuation, '_' * len(string.punctuation))
def validate_translator(t):
if not isinstance(t, dict):
raise ValueError("The translate argument must be a dict.")
class Dispatcher(object):
"""CherryPy Dispatcher which walks a tree of objects to find a handler.
The tree is rooted at cherrypy.request.app.root, and each hierarchical
component in the path_info argument is matched to a corresponding nested
attribute of the root object. Matching handlers must have an 'exposed'
attribute which evaluates to True. The special method name "index"
matches a URI which ends in a slash ("/"). The special method name
"default" may match a portion of the path_info (but only when no longer
substring of the path_info matches some other object).
This is the default, built-in dispatcher for CherryPy.
"""
dispatch_method_name = '_cp_dispatch'
"""
The name of the dispatch method that nodes may optionally implement
to provide their own dynamic dispatch algorithm.
"""
def __init__(self, dispatch_method_name=None,
translate=punctuation_to_underscores):
validate_translator(translate)
self.translate = translate
if dispatch_method_name:
self.dispatch_method_name = dispatch_method_name
def __call__(self, path_info):
"""Set handler and config for the current request."""
request = cherrypy.serving.request
func, vpath = self.find_handler(path_info)
if func:
# Decode any leftover %2F in the virtual_path atoms.
vpath = [x.replace("%2F", "/") for x in vpath]
request.handler = LateParamPageHandler(func, *vpath)
else:
request.handler = cherrypy.NotFound()
def find_handler(self, path):
"""Return the appropriate page handler, plus any virtual path.
This will return two objects. The first will be a callable,
which can be used to generate page output. Any parameters from
the query string or request body will be sent to that callable
as keyword arguments.
The callable is found by traversing the application's tree,
starting from cherrypy.request.app.root, and matching path
components to successive objects in the tree. For example, the
URL "/path/to/handler" might return root.path.to.handler.
The second object returned will be a list of names which are
'virtual path' components: parts of the URL which are dynamic,
and were not used when looking up the handler.
These virtual path components are passed to the handler as
positional arguments.
"""
request = cherrypy.serving.request
app = request.app
root = app.root
dispatch_name = self.dispatch_method_name
# Get config for the root object/path.
fullpath = [x for x in path.strip('/').split('/') if x] + ['index']
fullpath_len = len(fullpath)
segleft = fullpath_len
nodeconf = {}
if hasattr(root, "_cp_config"):
nodeconf.update(root._cp_config)
if "/" in app.config:
nodeconf.update(app.config["/"])
object_trail = [['root', root, nodeconf, segleft]]
node = root
iternames = fullpath[:]
while iternames:
name = iternames[0]
# map to legal Python identifiers (e.g. replace '.' with '_')
objname = name.translate(self.translate)
nodeconf = {}
subnode = getattr(node, objname, None)
pre_len = len(iternames)
if subnode is None:
dispatch = getattr(node, dispatch_name, None)
if dispatch and hasattr(dispatch, '__call__') and not \
getattr(dispatch, 'exposed', False) and \
pre_len > 1:
#Don't expose the hidden 'index' token to _cp_dispatch
#We skip this if pre_len == 1 since it makes no sense
#to call a dispatcher when we have no tokens left.
index_name = iternames.pop()
subnode = dispatch(vpath=iternames)
iternames.append(index_name)
else:
#We didn't find a path, but keep processing in case there
#is a default() handler.
iternames.pop(0)
else:
#We found the path, remove the vpath entry
iternames.pop(0)
segleft = len(iternames)
if segleft > pre_len:
#No path segment was removed. Raise an error.
raise cherrypy.CherryPyException(
"A vpath segment was added. Custom dispatchers may only "
+ "remove elements. While trying to process "
+ "{0} in {1}".format(name, fullpath)
)
elif segleft == pre_len:
#Assume that the handler used the current path segment, but
#did not pop it. This allows things like
#return getattr(self, vpath[0], None)
iternames.pop(0)
segleft -= 1
node = subnode
if node is not None:
# Get _cp_config attached to this node.
if hasattr(node, "_cp_config"):
nodeconf.update(node._cp_config)
# Mix in values from app.config for this path.
existing_len = fullpath_len - pre_len
if existing_len != 0:
curpath = '/' + '/'.join(fullpath[0:existing_len])
else:
curpath = ''
new_segs = fullpath[fullpath_len - pre_len:fullpath_len - segleft]
for seg in new_segs:
curpath += '/' + seg
if curpath in app.config:
nodeconf.update(app.config[curpath])
object_trail.append([name, node, nodeconf, segleft])
def set_conf():
"""Collapse all object_trail config into cherrypy.request.config."""
base = cherrypy.config.copy()
# Note that we merge the config from each node
# even if that node was None.
for name, obj, conf, segleft in object_trail:
base.update(conf)
if 'tools.staticdir.dir' in conf:
base['tools.staticdir.section'] = '/' + '/'.join(fullpath[0:fullpath_len - segleft])
return base
# Try successive objects (reverse order)
num_candidates = len(object_trail) - 1
for i in range(num_candidates, -1, -1):
name, candidate, nodeconf, segleft = object_trail[i]
if candidate is None:
continue
# Try a "default" method on the current leaf.
if hasattr(candidate, "default"):
defhandler = candidate.default
if getattr(defhandler, 'exposed', False):
# Insert any extra _cp_config from the default handler.
conf = getattr(defhandler, "_cp_config", {})
object_trail.insert(i+1, ["default", defhandler, conf, segleft])
request.config = set_conf()
# See http://www.cherrypy.org/ticket/613
request.is_index = path.endswith("/")
return defhandler, fullpath[fullpath_len - segleft:-1]
# Uncomment the next line to restrict positional params to "default".
# if i < num_candidates - 2: continue
# Try the current leaf.
if getattr(candidate, 'exposed', False):
request.config = set_conf()
if i == num_candidates:
# We found the extra ".index". Mark request so tools
# can redirect if path_info has no trailing slash.
request.is_index = True
else:
# We're not at an 'index' handler. Mark request so tools
# can redirect if path_info has NO trailing slash.
# Note that this also includes handlers which take
# positional parameters (virtual paths).
request.is_index = False
return candidate, fullpath[fullpath_len - segleft:-1]
# We didn't find anything
request.config = set_conf()
return None, []
class MethodDispatcher(Dispatcher):
"""Additional dispatch based on cherrypy.request.method.upper().
Methods named GET, POST, etc will be called on an exposed class.
The method names must be all caps; the appropriate Allow header
will be output showing all capitalized method names as allowable
HTTP verbs.
Note that the containing class must be exposed, not the methods.
"""
def __call__(self, path_info):
"""Set handler and config for the current request."""
request = cherrypy.serving.request
resource, vpath = self.find_handler(path_info)
if resource:
# Set Allow header
avail = [m for m in dir(resource) if m.isupper()]
if "GET" in avail and "HEAD" not in avail:
avail.append("HEAD")
avail.sort()
cherrypy.serving.response.headers['Allow'] = ", ".join(avail)
# Find the subhandler
meth = request.method.upper()
func = getattr(resource, meth, None)
if func is None and meth == "HEAD":
func = getattr(resource, "GET", None)
if func:
# Grab any _cp_config on the subhandler.
if hasattr(func, "_cp_config"):
request.config.update(func._cp_config)
# Decode any leftover %2F in the virtual_path atoms.
vpath = [x.replace("%2F", "/") for x in vpath]
request.handler = LateParamPageHandler(func, *vpath)
else:
request.handler = cherrypy.HTTPError(405)
else:
request.handler = cherrypy.NotFound()
class RoutesDispatcher(object):
"""A Routes based dispatcher for CherryPy."""
def __init__(self, full_result=False):
"""
Routes dispatcher
Set full_result to True if you wish the controller
and the action to be passed on to the page handler
parameters. By default they won't be.
"""
import routes
self.full_result = full_result
self.controllers = {}
self.mapper = routes.Mapper()
self.mapper.controller_scan = self.controllers.keys
def connect(self, name, route, controller, **kwargs):
self.controllers[name] = controller
self.mapper.connect(name, route, controller=name, **kwargs)
def redirect(self, url):
raise cherrypy.HTTPRedirect(url)
def __call__(self, path_info):
"""Set handler and config for the current request."""
func = self.find_handler(path_info)
if func:
cherrypy.serving.request.handler = LateParamPageHandler(func)
else:
cherrypy.serving.request.handler = cherrypy.NotFound()
def find_handler(self, path_info):
"""Find the right page handler, and set request.config."""
import routes
request = cherrypy.serving.request
config = routes.request_config()
config.mapper = self.mapper
if hasattr(request, 'wsgi_environ'):
config.environ = request.wsgi_environ
config.host = request.headers.get('Host', None)
config.protocol = request.scheme
config.redirect = self.redirect
result = self.mapper.match(path_info)
config.mapper_dict = result
params = {}
if result:
params = result.copy()
if not self.full_result:
params.pop('controller', None)
params.pop('action', None)
request.params.update(params)
# Get config for the root object/path.
request.config = base = cherrypy.config.copy()
curpath = ""
def merge(nodeconf):
if 'tools.staticdir.dir' in nodeconf:
nodeconf['tools.staticdir.section'] = curpath or "/"
base.update(nodeconf)
app = request.app
root = app.root
if hasattr(root, "_cp_config"):
merge(root._cp_config)
if "/" in app.config:
merge(app.config["/"])
# Mix in values from app.config.
atoms = [x for x in path_info.split("/") if x]
if atoms:
last = atoms.pop()
else:
last = None
for atom in atoms:
curpath = "/".join((curpath, atom))
if curpath in app.config:
merge(app.config[curpath])
handler = None
if result:
controller = result.get('controller')
controller = self.controllers.get(controller, controller)
if controller:
if isinstance(controller, classtype):
controller = controller()
# Get config from the controller.
if hasattr(controller, "_cp_config"):
merge(controller._cp_config)
action = result.get('action')
if action is not None:
handler = getattr(controller, action, None)
# Get config from the handler
if hasattr(handler, "_cp_config"):
merge(handler._cp_config)
else:
handler = controller
# Do the last path atom here so it can
# override the controller's _cp_config.
if last:
curpath = "/".join((curpath, last))
if curpath in app.config:
merge(app.config[curpath])
return handler
def XMLRPCDispatcher(next_dispatcher=Dispatcher()):
from cherrypy.lib import xmlrpcutil
def xmlrpc_dispatch(path_info):
path_info = xmlrpcutil.patched_path(path_info)
return next_dispatcher(path_info)
return xmlrpc_dispatch
def VirtualHost(next_dispatcher=Dispatcher(), use_x_forwarded_host=True, **domains):
"""
Select a different handler based on the Host header.
This can be useful when running multiple sites within one CP server.
It allows several domains to point to different parts of a single
website structure. For example::
http://www.domain.example -> root
http://www.domain2.example -> root/domain2/
http://www.domain2.example:443 -> root/secure
can be accomplished via the following config::
[/]
request.dispatch = cherrypy.dispatch.VirtualHost(
**{'www.domain2.example': '/domain2',
'www.domain2.example:443': '/secure',
})
next_dispatcher
The next dispatcher object in the dispatch chain.
The VirtualHost dispatcher adds a prefix to the URL and calls
another dispatcher. Defaults to cherrypy.dispatch.Dispatcher().
use_x_forwarded_host
If True (the default), any "X-Forwarded-Host"
request header will be used instead of the "Host" header. This
is commonly added by HTTP servers (such as Apache) when proxying.
``**domains``
A dict of {host header value: virtual prefix} pairs.
The incoming "Host" request header is looked up in this dict,
and, if a match is found, the corresponding "virtual prefix"
value will be prepended to the URL path before calling the
next dispatcher. Note that you often need separate entries
for "example.com" and "www.example.com". In addition, "Host"
headers may contain the port number.
"""
from cherrypy.lib import httputil
def vhost_dispatch(path_info):
request = cherrypy.serving.request
header = request.headers.get
domain = header('Host', '')
if use_x_forwarded_host:
domain = header("X-Forwarded-Host", domain)
prefix = domains.get(domain, "")
if prefix:
path_info = httputil.urljoin(prefix, path_info)
result = next_dispatcher(path_info)
# Touch up staticdir config. See http://www.cherrypy.org/ticket/614.
section = request.config.get('tools.staticdir.section')
if section:
section = section[len(prefix):]
request.config['tools.staticdir.section'] = section
return result
return vhost_dispatch
| gpl-2.0 |
criccomini/airflow | airflow/example_dags/example_skip_dag.py | 14 | 1865 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import airflow
from airflow.operators.dummy_operator import DummyOperator
from airflow.models import DAG
from airflow.exceptions import AirflowSkipException
args = {
'owner': 'airflow',
'start_date': airflow.utils.dates.days_ago(2)
}
# Create some placeholder operators
class DummySkipOperator(DummyOperator):
ui_color = '#e8b7e4'
def execute(self, context):
raise AirflowSkipException
dag = DAG(dag_id='example_skip_dag', default_args=args)
def create_test_pipeline(suffix, trigger_rule, dag):
skip_operator = DummySkipOperator(task_id='skip_operator_{}'.format(suffix), dag=dag)
always_true = DummyOperator(task_id='always_true_{}'.format(suffix), dag=dag)
join = DummyOperator(task_id=trigger_rule, dag=dag, trigger_rule=trigger_rule)
join.set_upstream(skip_operator)
join.set_upstream(always_true)
final = DummyOperator(task_id='final_{}'.format(suffix), dag=dag)
final.set_upstream(join)
create_test_pipeline('1', 'all_success', dag)
create_test_pipeline('2', 'one_success', dag)
| apache-2.0 |
poryfly/scikit-learn | sklearn/kernel_ridge.py | 155 | 6545 | """Module :mod:`sklearn.kernel_ridge` implements kernel ridge regression."""
# Authors: Mathieu Blondel <[email protected]>
# Jan Hendrik Metzen <[email protected]>
# License: BSD 3 clause
import numpy as np
from .base import BaseEstimator, RegressorMixin
from .metrics.pairwise import pairwise_kernels
from .linear_model.ridge import _solve_cholesky_kernel
from .utils import check_X_y
from .utils.validation import check_is_fitted
class KernelRidge(BaseEstimator, RegressorMixin):
"""Kernel ridge regression.
Kernel ridge regression (KRR) combines ridge regression (linear least
squares with l2-norm regularization) with the kernel trick. It thus
learns a linear function in the space induced by the respective kernel and
the data. For non-linear kernels, this corresponds to a non-linear
function in the original space.
The form of the model learned by KRR is identical to support vector
regression (SVR). However, different loss functions are used: KRR uses
squared error loss while support vector regression uses epsilon-insensitive
loss, both combined with l2 regularization. In contrast to SVR, fitting a
KRR model can be done in closed-form and is typically faster for
medium-sized datasets. On the other hand, the learned model is non-sparse
and thus slower than SVR, which learns a sparse model for epsilon > 0, at
prediction-time.
This estimator has built-in support for multi-variate regression
(i.e., when y is a 2d-array of shape [n_samples, n_targets]).
Read more in the :ref:`User Guide <kernel_ridge>`.
Parameters
----------
alpha : {float, array-like}, shape = [n_targets]
Small positive values of alpha improve the conditioning of the problem
and reduce the variance of the estimates. Alpha corresponds to
``(2*C)^-1`` in other linear models such as LogisticRegression or
LinearSVC. If an array is passed, penalties are assumed to be specific
to the targets. Hence they must correspond in number.
kernel : string or callable, default="linear"
Kernel mapping used internally. A callable should accept two arguments
and the keyword arguments passed to this object as kernel_params, and
should return a floating point number.
gamma : float, default=None
Gamma parameter for the RBF, polynomial, exponential chi2 and
sigmoid kernels. Interpretation of the default value is left to
the kernel; see the documentation for sklearn.metrics.pairwise.
Ignored by other kernels.
degree : float, default=3
Degree of the polynomial kernel. Ignored by other kernels.
coef0 : float, default=1
Zero coefficient for polynomial and sigmoid kernels.
Ignored by other kernels.
kernel_params : mapping of string to any, optional
Additional parameters (keyword arguments) for kernel function passed
as callable object.
Attributes
----------
dual_coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s) in kernel space
X_fit_ : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data, which is also required for prediction
References
----------
* Kevin P. Murphy
"Machine Learning: A Probabilistic Perspective", The MIT Press
chapter 14.4.3, pp. 492-493
See also
--------
Ridge
Linear ridge regression.
SVR
Support Vector Regression implemented using libsvm.
Examples
--------
>>> from sklearn.kernel_ridge import KernelRidge
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> rng = np.random.RandomState(0)
>>> y = rng.randn(n_samples)
>>> X = rng.randn(n_samples, n_features)
>>> clf = KernelRidge(alpha=1.0)
>>> clf.fit(X, y) # doctest: +NORMALIZE_WHITESPACE
KernelRidge(alpha=1.0, coef0=1, degree=3, gamma=None, kernel='linear',
kernel_params=None)
"""
def __init__(self, alpha=1, kernel="linear", gamma=None, degree=3, coef0=1,
kernel_params=None):
self.alpha = alpha
self.kernel = kernel
self.gamma = gamma
self.degree = degree
self.coef0 = coef0
self.kernel_params = kernel_params
def _get_kernel(self, X, Y=None):
if callable(self.kernel):
params = self.kernel_params or {}
else:
params = {"gamma": self.gamma,
"degree": self.degree,
"coef0": self.coef0}
return pairwise_kernels(X, Y, metric=self.kernel,
filter_params=True, **params)
@property
def _pairwise(self):
return self.kernel == "precomputed"
def fit(self, X, y=None, sample_weight=None):
"""Fit Kernel Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or numpy array of shape [n_samples]
Individual weights for each sample, ignored if None is passed.
Returns
-------
self : returns an instance of self.
"""
# Convert data
X, y = check_X_y(X, y, accept_sparse=("csr", "csc"), multi_output=True,
y_numeric=True)
K = self._get_kernel(X)
alpha = np.atleast_1d(self.alpha)
ravel = False
if len(y.shape) == 1:
y = y.reshape(-1, 1)
ravel = True
copy = self.kernel == "precomputed"
self.dual_coef_ = _solve_cholesky_kernel(K, y, alpha,
sample_weight,
copy)
if ravel:
self.dual_coef_ = self.dual_coef_.ravel()
self.X_fit_ = X
return self
def predict(self, X):
"""Predict using the the kernel ridge model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Samples.
Returns
-------
C : array, shape = [n_samples] or [n_samples, n_targets]
Returns predicted values.
"""
check_is_fitted(self, ["X_fit_", "dual_coef_"])
K = self._get_kernel(X, self.X_fit_)
return np.dot(K, self.dual_coef_)
| bsd-3-clause |
jeffmarcom/checkbox | checkbox_gtk/hyper_text_view.py | 2 | 4689 | #
# This file is part of Checkbox.
#
# Copyright 2008 Canonical Ltd.
#
# Checkbox is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Checkbox is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Checkbox. If not, see <http://www.gnu.org/licenses/>.
#
from gi.repository import Gtk, GObject, Pango, Gdk
class HyperTextView(Gtk.TextView):
__gtype_name__ = "HyperTextView"
__gsignals__ = {"anchor-clicked": (GObject.SignalFlags.RUN_LAST, GObject.TYPE_NONE, (str, str, int))}
__gproperties__ = {
"link": (GObject.TYPE_PYOBJECT, "link color", "link color of TextView", GObject.PARAM_READWRITE),
"active":(GObject.TYPE_PYOBJECT, "active color", "active color of TextView", GObject.PARAM_READWRITE),
"hover": (GObject.TYPE_PYOBJECT, "link:hover color", "link:hover color of TextView", GObject.PARAM_READWRITE),
}
def do_get_property(self, prop):
try:
return getattr(self, prop.name)
except AttributeError:
raise AttributeError("unknown property %s" % prop.name)
def do_set_property(self, prop, val):
if prop.name in list(self.__gproperties__.keys()):
setattr(self, prop.name, val)
else:
raise AttributeError("unknown property %s" % prop.name)
def __init__(self, buffer=None):
super(HyperTextView, self).__init__(buffer=buffer)
self.link = {"foreground": "blue", "underline": Pango.Underline.SINGLE}
self.active = {"foreground": "red", "underline": Pango.Underline.SINGLE}
self.hover = {"foreground": "dark blue", "underline": Pango.Underline.SINGLE}
self.set_editable(False)
self.set_cursor_visible(False)
self.__tags = []
self.connect("motion-notify-event", self._motion)
self.connect("focus-out-event", lambda w, e: self.get_buffer().get_tag_table().foreach(self.__tag_reset, e.window))
def insert(self, text, _iter=None):
b = self.get_buffer()
if _iter is None:
_iter = b.get_end_iter()
b.insert(_iter, text)
def insert_with_anchor(self, text, anchor=None, _iter=None):
b = self.get_buffer()
if _iter is None:
_iter = b.get_end_iter()
if anchor is None:
anchor = text
tag = b.create_tag(None, **self.get_property("link"))
tag.set_data("is_anchor", True)
tag.connect("event", self._tag_event, text, anchor)
self.__tags.append(tag)
b.insert_with_tags(_iter, text, tag)
def _motion(self, view, ev):
window = ev.window
_, x, y, _ = window.get_pointer()
x, y = view.window_to_buffer_coords(Gtk.TextWindowType.TEXT, x, y)
tags = view.get_iter_at_location(x, y).get_tags()
for tag in tags:
if tag.get_data("is_anchor"):
for t in set(self.__tags) - set([tag]):
self.__tag_reset(t, window)
self.__set_anchor(window, tag, Gdk.Cursor.new(Gdk.CursorType.HAND2), self.get_property("hover"))
break
else:
tag_table = self.get_buffer().get_tag_table()
tag_table.foreach(self.__tag_reset, window)
def _tag_event(self, tag, view, ev, _iter, text, anchor):
_type = ev.type
if _type == Gdk.EventType.MOTION_NOTIFY:
return
elif _type in [Gdk.EventType.BUTTON_PRESS, Gdk.EventType.BUTTON_RELEASE]:
button = ev.button
cursor = Gdk.Cursor.new(Gdk.CursorType.HAND2)
if _type == Gdk.EventType.BUTTON_RELEASE:
self.emit("anchor-clicked", text, anchor, button.button)
self.__set_anchor(ev.window, tag, cursor, self.get_property("hover"))
elif button in [1, 2]:
self.__set_anchor(ev.window, tag, cursor, self.get_property("active"))
def __tag_reset(self, tag, window):
if tag.get_data("is_anchor"):
self.__set_anchor(window, tag, None, self.get_property("link"))
def __set_anchor(self, window, tag, cursor, prop):
window.set_cursor(cursor)
for key, val in prop.items():
if val is not None:
tag.set_property(key, val)
GObject.type_register(HyperTextView)
| gpl-3.0 |
reyha/zulip | zerver/views/pointer.py | 11 | 1988 | from __future__ import absolute_import
from django.core.exceptions import ValidationError
from django.http import HttpRequest, HttpResponse
from django.utils.translation import ugettext as _
from six import text_type
from zerver.decorator import to_non_negative_int
from zerver.lib.actions import do_update_pointer
from zerver.lib.request import has_request_variables, JsonableError, REQ
from zerver.lib.response import json_success
from zerver.lib.utils import statsd, generate_random_token
from zerver.models import UserProfile, Message, UserMessage
def get_pointer_backend(request, user_profile):
# type: (HttpRequest, UserProfile) -> HttpResponse
return json_success({'pointer': user_profile.pointer})
@has_request_variables
def update_pointer_backend(request, user_profile,
pointer=REQ(converter=to_non_negative_int)):
# type: (HttpRequest, UserProfile, int) -> HttpResponse
if pointer <= user_profile.pointer:
return json_success()
try:
UserMessage.objects.get(
user_profile=user_profile,
message__id=pointer
)
except UserMessage.DoesNotExist:
raise JsonableError(_("Invalid message ID"))
request._log_data["extra"] = "[%s]" % (pointer,)
update_flags = (request.client.name.lower() in ['android', "zulipandroid"])
do_update_pointer(user_profile, pointer, update_flags=update_flags)
return json_success()
def generate_client_id():
# type: () -> text_type
return generate_random_token(32)
def get_profile_backend(request, user_profile):
# type: (HttpRequest, UserProfile) -> HttpResponse
result = dict(pointer = user_profile.pointer,
client_id = generate_client_id(),
max_message_id = -1)
messages = Message.objects.filter(usermessage__user_profile=user_profile).order_by('-id')[:1]
if messages:
result['max_message_id'] = messages[0].id
return json_success(result)
| apache-2.0 |
shellyw19-meet/meet2017y1final-proj | showcase.py | 1 | 13266 | ### Shelly's code
import turtle
import random
import time
SIZE_X=1300
SIZE_Y=750
turtle.setup(SIZE_X,SIZE_Y)
UP_EDGE = SIZE_Y/2
DOWN_EDGE = -SIZE_Y/2
RIGHT_EDGE = SIZE_X/2
LEFT_EDGE = -SIZE_X/2
#how far the snake moves
SQUARE_SIZE=40
pos_list=[]
turtle.tracer(1,0)
#def first_screen():
w = turtle.clone()
turtle.bgcolor("dodgerblue")
turtle.pencolor("yellow")
turtle.pensize(4)
turtle.penup()
turtle.goto(350,350)
turtle.pendown()
turtle.goto(-350,350)
turtle.goto(-350,-350)
turtle.goto(350,-350)
turtle.goto(350,350)
turtle.penup()
turtle.goto(0,0)
turtle.hideturtle()
bb = turtle.clone()
bb.goto(100,100)
up_gif = turtle.clone()
up_gif.hideturtle()
turtle.register_shape("arrowup.gif")
up_gif.shape("arrowup.gif")
down_gif = turtle.clone()
down_gif.hideturtle()
turtle.register_shape("arrowdown.gif")
down_gif.shape("arrowdown.gif")
left_gif = turtle.clone()
left_gif.hideturtle()
turtle.register_shape("arrowleft.gif")
left_gif.shape("arrowleft.gif")
right_gif = turtle.clone()
right_gif.hideturtle()
turtle.register_shape("arrowright.gif")
right_gif.shape("arrowright.gif")
turtle.pencolor("black")
turtle.ht()
w.ht()
w.pu()
w.goto(-115, 300)
w.write("To go UP Press: ", font = ("Ariel",20,"normal"))
w.goto(-120,-210)
w.write("To go DOWN Press: ", font = ("Ariel",20,"normal"))
w.goto(-325,63)
w.write("To go LEFT Press: ", font = ("Ariel",20,"normal"))
w.goto(80,63)
w.write("To go RIGHT Press: ", font = ("Ariel",20,"normal"))
up_gif.hideturtle()
up_gif.goto(0,238)
up_gif.stamp()
down_gif.hideturtle()
down_gif.goto(0,-275)
down_gif.stamp()
left_gif.hideturtle()
left_gif.goto(-275,0)
left_gif.stamp()
right_gif.hideturtle()
right_gif.goto(275,0)
right_gif.stamp()
w.pencolor("aliceblue")
w.goto(-290,-130)
w.write("the game will start in 5 seconds", font = ("Ariel",25,"normal","bold"))
s_score = turtle.clone()
s_score.pencolor('yellow')
u_score = turtle.clone()
u_score.pencolor('yellow')
e_score = turtle.clone()
e_score.pencolor('yellow')
k_score = turtle.clone()
k_score.pencolor('yellow')
##################
start_time = 60##chpse how much time till the game ends
##################
kenya=turtle.clone()
egypt=turtle.clone()
uganda=turtle.clone()
syria=turtle.clone()
kenya.penup()
turtle.register_shape("kenya3.gif")
kenya.shape('kenya3.gif')
egypt.penup()
turtle.register_shape("EGYPT1.gif")
egypt.shape('EGYPT1.gif')
uganda.penup()
turtle.register_shape("uganda3.gif")
uganda.shape('uganda3.gif')
syria.penup()
turtle.register_shape("syria2.gif")
syria.shape('syria2.gif')
UP_ARROW='Up'
LEFT_ARROW='Left'
RIGHT_ARROW='Right'
DOWN_ARROW='Down'
TIME_STEP=100
SPACEBAR='spacebar'
UP=0
DOWN=1
LEFT=2
RIGHT=3
turn=4
direction=UP
end_time= time.time()+120
plane=turtle.clone()
#the shape of the plane
turtle.register_shape("photoplane1.gif")
plane.shape("photoplane1.gif")
turtle.hideturtle()
new_pos = plane.pos()
new_x_pos = new_pos[0]
new_y_pos = new_pos[1]
pizza = turtle.clone()
hamburger = turtle.clone()
water = turtle.clone()
cola = turtle.clone()
def game():
global new_x_pos, new_y_pos, UP_EDGE, DOWN_EDGE, LEFT_EDGE, RIGHT_EDGE, SIZE_X, SIZE_Y, start_time
##################################
#to Carmi
############
#first_screen()
turtle.bgcolor('dodgerblue')
time.sleep(5)
w.clear()
up_gif.clear()
down_gif.clear()
right_gif.clear()
left_gif.clear()
turtle.pencolor("yellow")
turtle.pensize(4)
turtle.penup()
turtle.goto(350,350)
turtle.pendown()
turtle.goto(-350,350)
turtle.goto(-350,-350)
turtle.goto(350,-350)
turtle.goto(350,350)
turtle.penup()
turtle.goto(0,0)
#####################################################
#maya's code
kenya.showturtle()
kenya.goto(-200,200)
egypt.showturtle()
egypt.goto(-200,00)
uganda.showturtle()
uganda.goto(-200,-160)
syria.showturtle()
syria.goto(100,-160)
###############################################
pizza.hideturtle()
turtle.register_shape("Pizza.gif")
pizza.shape("Pizza.gif")
hamburger.hideturtle()
turtle.register_shape("burger_sandwich2.gif")
hamburger.shape("burger_sandwich2.gif")
water.hideturtle()
turtle.register_shape("water4.gif")
water.shape("water4.gif")
cola.hideturtle()
turtle.register_shape("cocacola7.gif")
cola.shape("cocacola7.gif")
##
##hamburger = "burger_sandwich.gif"
##
##turtle.register_shape(hamburger)
##turtle.shape(hamburger)
##
##water = "water.gif"
##
##turtle.register_shape(water)
##turtle.shape(water)
##
##cola = "cocacola.gif"
##
##turtle.register_shape(cola)
##turtle.shape(cola)
pizza.hideturtle()
pizza.goto(280,280)
a = pizza.stamp()
pizza.showturtle()
cola.hideturtle()
cola.goto(200,280)
cola.stamp()
cola.showturtle()
hamburger.hideturtle()
hamburger.goto(120,280)
print(hamburger.pos())
hamburger.stamp()
hamburger.showturtle()
water.hideturtle()
water.goto(40,280)
print(water.pos())
water.stamp()
water.showturtle()
turtle.penup()
plane.showturtle()
timer()#this is basicly activating he timer
c_food_s()
c_food_u()
c_food_e()
c_food_k()
############################################################################
turtle.onkeypress(game, "space")
turtle.listen()
############################################################################################
#####eliass code
#carmis code
food_list = ["hamburger", "pizza", "cola", "water"]
def r_food():
rand_index = random.randint(0, 3)
return food_list[rand_index]
def up():
global direction, new_x_pos, new_y_pos, UP_EDGE, DOWN_EDGE, RIGHT_EDGE, LEFT_EDGE
direction=UP
new_x_pos = plane.pos()[0]
new_y_pos = plane.pos()[1]
if new_y_pos < UP_EDGE: #and new_y_pos > DOWN_EDGE and new_x_pos < RIGHT_EDGE and new_x_pos > LEFT_EDGE:
move_plane()
print("you pressed up ")
def down():
global direction, new_x_pos, new_y_pos, UP_EDGE, DOWN_EDGE, RIGHT_EDGE, LEFT_EDGE
direction=DOWN
new_x_pos = plane.pos()[0]
new_y_pos = plane.pos()[1]
if new_y_pos > DOWN_EDGE: #and new_y_pos < UP_EDGEand new_x_pos < RIGHT_EDGE and new_x_pos > LEFT_EDGE:
move_plane()
print("you pressed DOWN ")
def right():
global direction, new_x_pos, new_y_pos, UP_EDGE, DOWN_EDGE, RIGHT_EDGE, LEFT_EDGE
direction=RIGHT
new_x_pos = plane.pos()[0]
new_y_pos = plane.pos()[1]
if new_x_pos < RIGHT_EDGE: # and new_y_pos < UP_EDGE and new_y_pos > DOWN_EDGE and new_x_pos > LEFT_EDGE:
move_plane()
print("you pressed RIGHT ")
def left():
global direction, new_x_pos, new_y_pos, UP_EDGE, DOWN_EDGE, RIGHT_EDGE, LEFT_EDGE
direction=LEFT
new_x_pos = plane.pos()[0]
new_y_pos = plane.pos()[1]
if new_x_pos > LEFT_EDGE: #and new_y_pos < UP_EDGE and new_y_pos > DOWN_EDGE and new_x_pos < RIGHT_EDGE:
move_plane()
print("you pressed LEFT ")
def turn():
global direction
direction=turn
turtle.right(90)
turtle.onkeypress(up,UP_ARROW)
turtle.onkeypress(right,RIGHT_ARROW)
turtle.onkeypress(left,LEFT_ARROW)
turtle.onkeypress(down,DOWN_ARROW)
turtle.listen()
turtle.goto(200,0)#this is moing the turtle to 200 to write the timer
def timer():#the game timer
global start_time
turtle.goto(-330,310)
turtle.pencolor("navy")
start_time = start_time-1
print(start_time)
turtle.clear()
turtle.write(start_time,font = ("Ariel",23,"normal","bold"))
if start_time==0:
plane.clear()
hamburger.clear()
pizza.clear()
water.clear()
cola.clear()
uganda.clear()
kenya.clear()
egypt.clear()
syria.clear()
score_1.clear()
bb.clear()
u_score.clear()
s_score.clear()
k_score.clear()
e_score.clear()
pizza.clearstamps()
pizza.hideturtle()
cola.clearstamps()
cola.hideturtle()
hamburger.clearstamps()
hamburger.hideturtle()
water.clearstamps()
water.hideturtle()
syria.clearstamps()
syria.hideturtle()
uganda.clearstamps()
uganda.hideturtle()
kenya.clearstamps()
kenya.hideturtle()
egypt.clearstamps()
egypt.hideturtle()
plane.clearstamps()
plane.hideturtle()
turtle.clear()
turtle.bgcolor("dodgerblue")
turtle.pencolor("yellow")
turtle.hideturtle()
turtle.pensize(4)
turtle.penup()
turtle.goto(350,350)
turtle.pendown()
turtle.goto(-350,350)
turtle.goto(-350,-350)
turtle.goto(350,-350)
turtle.goto(350,350)
turtle.penup()
turtle.goto(0,0)
turtle.hideturtle()
turtle.goto(-235,170)
turtle.pencolor("navy")
turtle.write("You ran out of time!", font = ("Ariel",35,"normal"))
turtle.goto(-150,50)
turtle.pencolor("floralwhite")
turtle.write("Your score was: " + str(score), font = ("Ariel",25,"normal"))
turtle.goto(-320,-162)
turtle.pencolor("darkslategray")
turtle.write("GAME OVER :(" , font = ("Ariel",62,"normal","bold"))
time.sleep(5)
quit()
print("you run out of time ")
turtle.ontimer(timer,1000)
def c_food_s():
global s_score, syria_food
syria_food = r_food()
s_score.goto(173,-145)
s_score.clear()
s_score.write('We want ' + syria_food, font = ("Ariel",11,"normal"))
#return syria_food
def c_food_u():
global u_score, uganda_food
uganda_food = r_food()
u_score.goto(-135,-145)
u_score.clear()
u_score.write('We want ' + uganda_food, font = ("Ariel",11,"normal"))
#return uganda_food
def c_food_e():
global e_score, egypt_food
egypt_food = r_food()
e_score.goto(-135,10)
e_score.clear()
e_score.write('We want ' + egypt_food, font = ("Ariel",11,"normal"))
#return egypt_food
def c_food_k():
global k_score, kenya_food
kenya_food = r_food()
k_score.goto(-135,230)
k_score.clear()
k_score.write('We want ' + kenya_food, font = ("Ariel",11,"normal"))
#return kenya_food
score = 0
plane_food = 'aa'
score_1 = turtle.clone()
score_1.color('white')
score_1.goto(300,0)
def move_plane():#how the plane moves
global plane_food, score
my_pos=plane.pos()
x_pos=my_pos[0]
y_pos=my_pos[1]
if direction==RIGHT:
plane.goto(x_pos+SQUARE_SIZE,y_pos)
if direction==DOWN:
plane.goto(x_pos,y_pos-SQUARE_SIZE)
if direction==LEFT:
plane.goto(x_pos-SQUARE_SIZE,y_pos)
if direction==UP:
plane.goto(x_pos,y_pos+SQUARE_SIZE)
if plane.pos() == hamburger.pos():
bb.clear()
plane_food = 'hamburger'
bb.write('you picked up hamburger',font = ("Ariel",20,"normal"))
if plane.pos()== cola.pos():
bb.clear()
plane_food = 'cola'
bb.write('you picked up cola',font = ("Ariel",20,"normal"))
if plane.pos() == pizza.pos():
bb.clear()
plane_food = 'pizza'
bb.write('you picked up pizza',font = ("Ariel",20,"normal"))
if plane.pos() == water.pos():
bb.clear()
plane_food = 'water'
bb.write('you picked up water',font = ("Ariel",20,"normal"))
print("syria food: " + syria_food)
print("plane_food: " + plane_food)
print("plane_pos: " + str(plane.pos()))
#if plane.pos() == syria.pos() and plane_food == syria_food:
if ((plane.pos()[0] - syria.pos()[0])**2 + (plane.pos()[1] - syria.pos()[1])**2)**0.5 < 50 and plane_food == syria_food:
score = score+1
score_1.clear()
score_1.write('score: ' + str(score),font = ("Ariel",20,"normal", 'bold'))
c_food_s()
#if plane.pos() == uganda.pos() and plane_food == uganda_food:
if ((plane.pos()[0] - uganda.pos()[0])**2 + (plane.pos()[1] - uganda.pos()[1])**2)**0.5 < 50 and plane_food == uganda_food:
score = score+1
score_1.clear()
score_1.write('score: ' + str(score),font = ("Ariel",20,"normal", 'bold'))
c_food_u()
#if plane.pos() == egypt.pos() and plane_food == egypt_food:
if ((plane.pos()[0] - egypt.pos()[0])**2 + (plane.pos()[1] - egypt.pos()[1])**2)**0.5 < 50 and plane_food == egypt_food:
score = score+1
score_1.clear()
score_1.write('score: ' + str(score),font = ("Ariel",20,"normal", 'bold'))
c_food_e()
#if plane.pos() == kenya.pos() and plane_food == kenya_food:
if ((plane.pos()[0] - kenya.pos()[0])**2 + (plane.pos()[1] - kenya.pos()[1])**2)**0.5 < 50 and plane_food == kenya_food:
score = score+1
score_1.clear()
score_1.write('score: ' + str(score),font = ("Ariel",20,"normal", 'bold'))
c_food_k()
game()
clear_list = []
turtle.goto(0, 0)
##
##turtle.ontimer(c_food_u ,1200)
##turtle.ontimer(c_food_s, 900)
##turtle.ontimer(c_food_e ,1500)
##turtle.ontimer(c_food_k ,1700)
| mit |
synconics/odoo | addons/account_payment/__init__.py | 436 | 1279 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
#----------------------------------------------------------
# Init Sales
#----------------------------------------------------------
import account_payment
import wizard
import account_move_line
import account_invoice
import report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
ensemblr/llvm-project-boilerplate | include/llvm/utils/lit/lit/util.py | 3 | 9754 | import errno
import itertools
import math
import os
import platform
import signal
import subprocess
import sys
import threading
def to_bytes(str):
# Encode to UTF-8 to get binary data.
return str.encode('utf-8')
def to_string(bytes):
if isinstance(bytes, str):
return bytes
return to_bytes(bytes)
def convert_string(bytes):
try:
return to_string(bytes.decode('utf-8'))
except AttributeError: # 'str' object has no attribute 'decode'.
return str(bytes)
except UnicodeError:
return str(bytes)
def detectCPUs():
"""
Detects the number of CPUs on a system. Cribbed from pp.
"""
# Linux, Unix and MacOS:
if hasattr(os, "sysconf"):
if "SC_NPROCESSORS_ONLN" in os.sysconf_names:
# Linux & Unix:
ncpus = os.sysconf("SC_NPROCESSORS_ONLN")
if isinstance(ncpus, int) and ncpus > 0:
return ncpus
else: # OSX:
return int(capture(['sysctl', '-n', 'hw.ncpu']))
# Windows:
if "NUMBER_OF_PROCESSORS" in os.environ:
ncpus = int(os.environ["NUMBER_OF_PROCESSORS"])
if ncpus > 0:
# With more than 32 processes, process creation often fails with
# "Too many open files". FIXME: Check if there's a better fix.
return min(ncpus, 32)
return 1 # Default
def mkdir_p(path):
"""mkdir_p(path) - Make the "path" directory, if it does not exist; this
will also make directories for any missing parent directories."""
if not path or os.path.exists(path):
return
parent = os.path.dirname(path)
if parent != path:
mkdir_p(parent)
try:
os.mkdir(path)
except OSError:
e = sys.exc_info()[1]
# Ignore EEXIST, which may occur during a race condition.
if e.errno != errno.EEXIST:
raise
def capture(args, env=None):
"""capture(command) - Run the given command (or argv list) in a shell and
return the standard output. Raises a CalledProcessError if the command
exits with a non-zero status."""
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
env=env)
out, err = p.communicate()
out = convert_string(out)
err = convert_string(err)
if p.returncode != 0:
raise subprocess.CalledProcessError(cmd=args,
returncode=p.returncode,
output="{}\n{}".format(out, err))
return out
def which(command, paths = None):
"""which(command, [paths]) - Look up the given command in the paths string
(or the PATH environment variable, if unspecified)."""
if paths is None:
paths = os.environ.get('PATH','')
# Check for absolute match first.
if os.path.isfile(command):
return command
# Would be nice if Python had a lib function for this.
if not paths:
paths = os.defpath
# Get suffixes to search.
# On Cygwin, 'PATHEXT' may exist but it should not be used.
if os.pathsep == ';':
pathext = os.environ.get('PATHEXT', '').split(';')
else:
pathext = ['']
# Search the paths...
for path in paths.split(os.pathsep):
for ext in pathext:
p = os.path.join(path, command + ext)
if os.path.exists(p) and not os.path.isdir(p):
return p
return None
def checkToolsPath(dir, tools):
for tool in tools:
if not os.path.exists(os.path.join(dir, tool)):
return False
return True
def whichTools(tools, paths):
for path in paths.split(os.pathsep):
if checkToolsPath(path, tools):
return path
return None
def printHistogram(items, title = 'Items'):
items.sort(key = lambda item: item[1])
maxValue = max([v for _,v in items])
# Select first "nice" bar height that produces more than 10 bars.
power = int(math.ceil(math.log(maxValue, 10)))
for inc in itertools.cycle((5, 2, 2.5, 1)):
barH = inc * 10**power
N = int(math.ceil(maxValue / barH))
if N > 10:
break
elif inc == 1:
power -= 1
histo = [set() for i in range(N)]
for name,v in items:
bin = min(int(N * v/maxValue), N-1)
histo[bin].add(name)
barW = 40
hr = '-' * (barW + 34)
print('\nSlowest %s:' % title)
print(hr)
for name,value in items[-20:]:
print('%.2fs: %s' % (value, name))
print('\n%s Times:' % title)
print(hr)
pDigits = int(math.ceil(math.log(maxValue, 10)))
pfDigits = max(0, 3-pDigits)
if pfDigits:
pDigits += pfDigits + 1
cDigits = int(math.ceil(math.log(len(items), 10)))
print("[%s] :: [%s] :: [%s]" % ('Range'.center((pDigits+1)*2 + 3),
'Percentage'.center(barW),
'Count'.center(cDigits*2 + 1)))
print(hr)
for i,row in enumerate(histo):
pct = float(len(row)) / len(items)
w = int(barW * pct)
print("[%*.*fs,%*.*fs) :: [%s%s] :: [%*d/%*d]" % (
pDigits, pfDigits, i*barH, pDigits, pfDigits, (i+1)*barH,
'*'*w, ' '*(barW-w), cDigits, len(row), cDigits, len(items)))
class ExecuteCommandTimeoutException(Exception):
def __init__(self, msg, out, err, exitCode):
assert isinstance(msg, str)
assert isinstance(out, str)
assert isinstance(err, str)
assert isinstance(exitCode, int)
self.msg = msg
self.out = out
self.err = err
self.exitCode = exitCode
# Close extra file handles on UNIX (on Windows this cannot be done while
# also redirecting input).
kUseCloseFDs = not (platform.system() == 'Windows')
def executeCommand(command, cwd=None, env=None, input=None, timeout=0):
"""
Execute command ``command`` (list of arguments or string)
with
* working directory ``cwd`` (str), use None to use the current
working directory
* environment ``env`` (dict), use None for none
* Input to the command ``input`` (str), use string to pass
no input.
* Max execution time ``timeout`` (int) seconds. Use 0 for no timeout.
Returns a tuple (out, err, exitCode) where
* ``out`` (str) is the standard output of running the command
* ``err`` (str) is the standard error of running the command
* ``exitCode`` (int) is the exitCode of running the command
If the timeout is hit an ``ExecuteCommandTimeoutException``
is raised.
"""
p = subprocess.Popen(command, cwd=cwd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=env, close_fds=kUseCloseFDs)
timerObject = None
# FIXME: Because of the way nested function scopes work in Python 2.x we
# need to use a reference to a mutable object rather than a plain
# bool. In Python 3 we could use the "nonlocal" keyword but we need
# to support Python 2 as well.
hitTimeOut = [False]
try:
if timeout > 0:
def killProcess():
# We may be invoking a shell so we need to kill the
# process and all its children.
hitTimeOut[0] = True
killProcessAndChildren(p.pid)
timerObject = threading.Timer(timeout, killProcess)
timerObject.start()
out,err = p.communicate(input=input)
exitCode = p.wait()
finally:
if timerObject != None:
timerObject.cancel()
# Ensure the resulting output is always of string type.
out = convert_string(out)
err = convert_string(err)
if hitTimeOut[0]:
raise ExecuteCommandTimeoutException(
msg='Reached timeout of {} seconds'.format(timeout),
out=out,
err=err,
exitCode=exitCode
)
# Detect Ctrl-C in subprocess.
if exitCode == -signal.SIGINT:
raise KeyboardInterrupt
return out, err, exitCode
def usePlatformSdkOnDarwin(config, lit_config):
# On Darwin, support relocatable SDKs by providing Clang with a
# default system root path.
if 'darwin' in config.target_triple:
try:
cmd = subprocess.Popen(['xcrun', '--show-sdk-path', '--sdk', 'macosx'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = cmd.communicate()
out = out.strip()
res = cmd.wait()
except OSError:
res = -1
if res == 0 and out:
sdk_path = out
lit_config.note('using SDKROOT: %r' % sdk_path)
config.environment['SDKROOT'] = sdk_path
def killProcessAndChildren(pid):
"""
This function kills a process with ``pid`` and all its
running children (recursively). It is currently implemented
using the psutil module which provides a simple platform
neutral implementation.
TODO: Reimplement this without using psutil so we can
remove our dependency on it.
"""
import psutil
try:
psutilProc = psutil.Process(pid)
# Handle the different psutil API versions
try:
# psutil >= 2.x
children_iterator = psutilProc.children(recursive=True)
except AttributeError:
# psutil 1.x
children_iterator = psutilProc.get_children(recursive=True)
for child in children_iterator:
try:
child.kill()
except psutil.NoSuchProcess:
pass
psutilProc.kill()
except psutil.NoSuchProcess:
pass
| mit |
snava10/sqlRunner | websqlrunner/websqlrunner/views.py | 1 | 3365 | import datetime
import re
from django.conf import settings
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render
from django.shortcuts import redirect
from .core.sqlRunner import *
from .core.SqlRunnerThread import *
from .forms import SqlScriptForm
from .forms import RunForm
from .models import SqlScript
from .models import Run
def homepage(request):
if request.method == "POST":
print(request.FILES)
if request.FILES:
print("Files arrived to the server")
form = SqlScriptForm(request.POST, request.FILES)
if form.is_valid():
print("Valid")
sqlscript = form.save(commit=False)
sqlscript.createdby = request.user
sqlscript.save()
return redirect(scripts)
else:
form = SqlScriptForm()
return render(request, "homepage.html", { "form": form })
def scripts(request):
scripts = SqlScript.objects.all()
context = { "scripts" : scripts }
return render(request, "scripts.html", context)
def runs(request):
run_models = Run.objects.all()
context = { "run_models": run_models }
return render(request, "runs.html", context)
def create_run(request, script_id):
script = SqlScript.objects.get(pk=script_id)
form = RunForm(initial={script:script})
context = { "form" : form, "filename" : script.file.name.split('/')[-1] }
return render(request, "run.html", context)
def run(request, script_id):
script = SqlScript.objects.get(pk=script_id)
if request.method == "POST":
form = RunForm(request.POST)
if form.is_valid():
run_model = form.save(commit=False)
run_model.date = datetime.datetime.now()
run_model.user = request.user
run_model.status = "R"
run_model.script = script
run_model.save()
#trigger the script excecution
run_script(script, run_model)
#redirect to the list of runs
return redirect(runs)
else:
return render(request, "run.html", { "form": form, "filename": script.get_file_name() })
form = RunForm()
return render(request, "run.html", { "form": form, "filename": script.get_file_name() })
def run_script(script, run_model):
def success(context):
if context:
run_id = context["runid"]
rmodel = Run.objects.get(pk=run_id)
rmodel.status = "S"
rmodel.save()
def failed(context):
if context:
run_id = context["runid"]
rmodel = Run.objects.get(pk=run_id)
rmodel.status = "F"
rmodel.save()
sql = script.file.read()
conn_strings = list(map(str.strip, run_model.connstrings.split('\n')))
thread_count = 1
threads = []
for conn_string in conn_strings:
sql_runner = SqlRunner.from_sql_server_connection_string(conn_string)
runner_thread = SqlRunnerThread.from_sqlrunner(sql_runner, sql, "thread-%d" % thread_count,
"thread-%d" % thread_count,thread_count)
threads.append(runner_thread)
runner_thread.success_function = success
runner_thread.failed_function = failed
runner_thread.context = { "runid": run_model.id }
runner_thread.start()
| apache-2.0 |
liamf/suds | suds/bindings/binding.py | 191 | 19047 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( [email protected] )
"""
Provides classes for (WS) SOAP bindings.
"""
from logging import getLogger
from suds import *
from suds.sax import Namespace
from suds.sax.parser import Parser
from suds.sax.document import Document
from suds.sax.element import Element
from suds.sudsobject import Factory, Object
from suds.mx import Content
from suds.mx.literal import Literal as MxLiteral
from suds.umx.basic import Basic as UmxBasic
from suds.umx.typed import Typed as UmxTyped
from suds.bindings.multiref import MultiRef
from suds.xsd.query import TypeQuery, ElementQuery
from suds.xsd.sxbasic import Element as SchemaElement
from suds.options import Options
from suds.plugin import PluginContainer
from copy import deepcopy
log = getLogger(__name__)
envns = ('SOAP-ENV', 'http://schemas.xmlsoap.org/soap/envelope/')
class Binding:
"""
The soap binding class used to process outgoing and imcoming
soap messages per the WSDL port binding.
@cvar replyfilter: The reply filter function.
@type replyfilter: (lambda s,r: r)
@ivar wsdl: The wsdl.
@type wsdl: L{suds.wsdl.Definitions}
@ivar schema: The collective schema contained within the wsdl.
@type schema: L{xsd.schema.Schema}
@ivar options: A dictionary options.
@type options: L{Options}
"""
replyfilter = (lambda s,r: r)
def __init__(self, wsdl):
"""
@param wsdl: A wsdl.
@type wsdl: L{wsdl.Definitions}
"""
self.wsdl = wsdl
self.multiref = MultiRef()
def schema(self):
return self.wsdl.schema
def options(self):
return self.wsdl.options
def unmarshaller(self, typed=True):
"""
Get the appropriate XML decoder.
@return: Either the (basic|typed) unmarshaller.
@rtype: L{UmxTyped}
"""
if typed:
return UmxTyped(self.schema())
else:
return UmxBasic()
def marshaller(self):
"""
Get the appropriate XML encoder.
@return: An L{MxLiteral} marshaller.
@rtype: L{MxLiteral}
"""
return MxLiteral(self.schema(), self.options().xstq)
def param_defs(self, method):
"""
Get parameter definitions.
Each I{pdef} is a tuple (I{name}, L{xsd.sxbase.SchemaObject})
@param method: A servic emethod.
@type method: I{service.Method}
@return: A collection of parameter definitions
@rtype: [I{pdef},..]
"""
raise Exception, 'not implemented'
def get_message(self, method, args, kwargs):
"""
Get the soap message for the specified method, args and soapheaders.
This is the entry point for creating the outbound soap message.
@param method: The method being invoked.
@type method: I{service.Method}
@param args: A list of args for the method invoked.
@type args: list
@param kwargs: Named (keyword) args for the method invoked.
@type kwargs: dict
@return: The soap envelope.
@rtype: L{Document}
"""
content = self.headercontent(method)
header = self.header(content)
content = self.bodycontent(method, args, kwargs)
body = self.body(content)
env = self.envelope(header, body)
if self.options().prefixes:
body.normalizePrefixes()
env.promotePrefixes()
else:
env.refitPrefixes()
return Document(env)
def get_reply(self, method, reply):
"""
Process the I{reply} for the specified I{method} by sax parsing the I{reply}
and then unmarshalling into python object(s).
@param method: The name of the invoked method.
@type method: str
@param reply: The reply XML received after invoking the specified method.
@type reply: str
@return: The unmarshalled reply. The returned value is an L{Object} for a
I{list} depending on whether the service returns a single object or a
collection.
@rtype: tuple ( L{Element}, L{Object} )
"""
reply = self.replyfilter(reply)
sax = Parser()
replyroot = sax.parse(string=reply)
plugins = PluginContainer(self.options().plugins)
plugins.message.parsed(reply=replyroot)
soapenv = replyroot.getChild('Envelope')
soapenv.promotePrefixes()
soapbody = soapenv.getChild('Body')
self.detect_fault(soapbody)
soapbody = self.multiref.process(soapbody)
nodes = self.replycontent(method, soapbody)
rtypes = self.returned_types(method)
if len(rtypes) > 1:
result = self.replycomposite(rtypes, nodes)
return (replyroot, result)
if len(rtypes) == 1:
if rtypes[0].unbounded():
result = self.replylist(rtypes[0], nodes)
return (replyroot, result)
if len(nodes):
unmarshaller = self.unmarshaller()
resolved = rtypes[0].resolve(nobuiltin=True)
result = unmarshaller.process(nodes[0], resolved)
return (replyroot, result)
return (replyroot, None)
def detect_fault(self, body):
"""
Detect I{hidden} soapenv:Fault element in the soap body.
@param body: The soap envelope body.
@type body: L{Element}
@raise WebFault: When found.
"""
fault = body.getChild('Fault', envns)
if fault is None:
return
unmarshaller = self.unmarshaller(False)
p = unmarshaller.process(fault)
if self.options().faults:
raise WebFault(p, fault)
return self
def replylist(self, rt, nodes):
"""
Construct a I{list} reply. This mehod is called when it has been detected
that the reply is a list.
@param rt: The return I{type}.
@type rt: L{suds.xsd.sxbase.SchemaObject}
@param nodes: A collection of XML nodes.
@type nodes: [L{Element},...]
@return: A list of I{unmarshalled} objects.
@rtype: [L{Object},...]
"""
result = []
resolved = rt.resolve(nobuiltin=True)
unmarshaller = self.unmarshaller()
for node in nodes:
sobject = unmarshaller.process(node, resolved)
result.append(sobject)
return result
def replycomposite(self, rtypes, nodes):
"""
Construct a I{composite} reply. This method is called when it has been
detected that the reply has multiple root nodes.
@param rtypes: A list of known return I{types}.
@type rtypes: [L{suds.xsd.sxbase.SchemaObject},...]
@param nodes: A collection of XML nodes.
@type nodes: [L{Element},...]
@return: The I{unmarshalled} composite object.
@rtype: L{Object},...
"""
dictionary = {}
for rt in rtypes:
dictionary[rt.name] = rt
unmarshaller = self.unmarshaller()
composite = Factory.object('reply')
for node in nodes:
tag = node.name
rt = dictionary.get(tag, None)
if rt is None:
if node.get('id') is None:
raise Exception('<%s/> not mapped to message part' % tag)
else:
continue
resolved = rt.resolve(nobuiltin=True)
sobject = unmarshaller.process(node, resolved)
value = getattr(composite, tag, None)
if value is None:
if rt.unbounded():
value = []
setattr(composite, tag, value)
value.append(sobject)
else:
setattr(composite, tag, sobject)
else:
if not isinstance(value, list):
value = [value,]
setattr(composite, tag, value)
value.append(sobject)
return composite
def get_fault(self, reply):
"""
Extract the fault from the specified soap reply. If I{faults} is True, an
exception is raised. Otherwise, the I{unmarshalled} fault L{Object} is
returned. This method is called when the server raises a I{web fault}.
@param reply: A soap reply message.
@type reply: str
@return: A fault object.
@rtype: tuple ( L{Element}, L{Object} )
"""
reply = self.replyfilter(reply)
sax = Parser()
faultroot = sax.parse(string=reply)
soapenv = faultroot.getChild('Envelope')
soapbody = soapenv.getChild('Body')
fault = soapbody.getChild('Fault')
unmarshaller = self.unmarshaller(False)
p = unmarshaller.process(fault)
if self.options().faults:
raise WebFault(p, faultroot)
return (faultroot, p.detail)
def mkparam(self, method, pdef, object):
"""
Builds a parameter for the specified I{method} using the parameter
definition (pdef) and the specified value (object).
@param method: A method name.
@type method: str
@param pdef: A parameter definition.
@type pdef: tuple: (I{name}, L{xsd.sxbase.SchemaObject})
@param object: The parameter value.
@type object: any
@return: The parameter fragment.
@rtype: L{Element}
"""
marshaller = self.marshaller()
content = \
Content(tag=pdef[0],
value=object,
type=pdef[1],
real=pdef[1].resolve())
return marshaller.process(content)
def mkheader(self, method, hdef, object):
"""
Builds a soapheader for the specified I{method} using the header
definition (hdef) and the specified value (object).
@param method: A method name.
@type method: str
@param hdef: A header definition.
@type hdef: tuple: (I{name}, L{xsd.sxbase.SchemaObject})
@param object: The header value.
@type object: any
@return: The parameter fragment.
@rtype: L{Element}
"""
marshaller = self.marshaller()
if isinstance(object, (list, tuple)):
tags = []
for item in object:
tags.append(self.mkheader(method, hdef, item))
return tags
content = Content(tag=hdef[0], value=object, type=hdef[1])
return marshaller.process(content)
def envelope(self, header, body):
"""
Build the B{<Envelope/>} for an soap outbound message.
@param header: The soap message B{header}.
@type header: L{Element}
@param body: The soap message B{body}.
@type body: L{Element}
@return: The soap envelope containing the body and header.
@rtype: L{Element}
"""
env = Element('Envelope', ns=envns)
env.addPrefix(Namespace.xsins[0], Namespace.xsins[1])
env.append(header)
env.append(body)
return env
def header(self, content):
"""
Build the B{<Body/>} for an soap outbound message.
@param content: The header content.
@type content: L{Element}
@return: the soap body fragment.
@rtype: L{Element}
"""
header = Element('Header', ns=envns)
header.append(content)
return header
def bodycontent(self, method, args, kwargs):
"""
Get the content for the soap I{body} node.
@param method: A service method.
@type method: I{service.Method}
@param args: method parameter values
@type args: list
@param kwargs: Named (keyword) args for the method invoked.
@type kwargs: dict
@return: The xml content for the <body/>
@rtype: [L{Element},..]
"""
raise Exception, 'not implemented'
def headercontent(self, method):
"""
Get the content for the soap I{Header} node.
@param method: A service method.
@type method: I{service.Method}
@return: The xml content for the <body/>
@rtype: [L{Element},..]
"""
n = 0
content = []
wsse = self.options().wsse
if wsse is not None:
content.append(wsse.xml())
headers = self.options().soapheaders
if not isinstance(headers, (tuple,list,dict)):
headers = (headers,)
if len(headers) == 0:
return content
pts = self.headpart_types(method)
if isinstance(headers, (tuple,list)):
for header in headers:
if isinstance(header, Element):
content.append(deepcopy(header))
continue
if len(pts) == n: break
h = self.mkheader(method, pts[n], header)
ns = pts[n][1].namespace('ns0')
h.setPrefix(ns[0], ns[1])
content.append(h)
n += 1
else:
for pt in pts:
header = headers.get(pt[0])
if header is None:
continue
h = self.mkheader(method, pt, header)
ns = pt[1].namespace('ns0')
h.setPrefix(ns[0], ns[1])
content.append(h)
return content
def replycontent(self, method, body):
"""
Get the reply body content.
@param method: A service method.
@type method: I{service.Method}
@param body: The soap body
@type body: L{Element}
@return: the body content
@rtype: [L{Element},...]
"""
raise Exception, 'not implemented'
def body(self, content):
"""
Build the B{<Body/>} for an soap outbound message.
@param content: The body content.
@type content: L{Element}
@return: the soap body fragment.
@rtype: L{Element}
"""
body = Element('Body', ns=envns)
body.append(content)
return body
def bodypart_types(self, method, input=True):
"""
Get a list of I{parameter definitions} (pdef) defined for the specified method.
Each I{pdef} is a tuple (I{name}, L{xsd.sxbase.SchemaObject})
@param method: A service method.
@type method: I{service.Method}
@param input: Defines input/output message.
@type input: boolean
@return: A list of parameter definitions
@rtype: [I{pdef},]
"""
result = []
if input:
parts = method.soap.input.body.parts
else:
parts = method.soap.output.body.parts
for p in parts:
if p.element is not None:
query = ElementQuery(p.element)
else:
query = TypeQuery(p.type)
pt = query.execute(self.schema())
if pt is None:
raise TypeNotFound(query.ref)
if p.type is not None:
pt = PartElement(p.name, pt)
if input:
if pt.name is None:
result.append((p.name, pt))
else:
result.append((pt.name, pt))
else:
result.append(pt)
return result
def headpart_types(self, method, input=True):
"""
Get a list of I{parameter definitions} (pdef) defined for the specified method.
Each I{pdef} is a tuple (I{name}, L{xsd.sxbase.SchemaObject})
@param method: A service method.
@type method: I{service.Method}
@param input: Defines input/output message.
@type input: boolean
@return: A list of parameter definitions
@rtype: [I{pdef},]
"""
result = []
if input:
headers = method.soap.input.headers
else:
headers = method.soap.output.headers
for header in headers:
part = header.part
if part.element is not None:
query = ElementQuery(part.element)
else:
query = TypeQuery(part.type)
pt = query.execute(self.schema())
if pt is None:
raise TypeNotFound(query.ref)
if part.type is not None:
pt = PartElement(part.name, pt)
if input:
if pt.name is None:
result.append((part.name, pt))
else:
result.append((pt.name, pt))
else:
result.append(pt)
return result
def returned_types(self, method):
"""
Get the L{xsd.sxbase.SchemaObject} returned by the I{method}.
@param method: A service method.
@type method: I{service.Method}
@return: The name of the type return by the method.
@rtype: [I{rtype},..]
"""
result = []
for rt in self.bodypart_types(method, input=False):
result.append(rt)
return result
class PartElement(SchemaElement):
"""
A part used to represent a message part when the part
references a schema type and thus assumes to be an element.
@ivar resolved: The part type.
@type resolved: L{suds.xsd.sxbase.SchemaObject}
"""
def __init__(self, name, resolved):
"""
@param name: The part name.
@type name: str
@param resolved: The part type.
@type resolved: L{suds.xsd.sxbase.SchemaObject}
"""
root = Element('element', ns=Namespace.xsdns)
SchemaElement.__init__(self, resolved.schema, root)
self.__resolved = resolved
self.name = name
self.form_qualified = False
def implany(self):
return self
def optional(self):
return True
def namespace(self, prefix=None):
return Namespace.default
def resolve(self, nobuiltin=False):
if nobuiltin and self.__resolved.builtin():
return self
else:
return self.__resolved
| lgpl-3.0 |
wenjoy/homePage | node_modules/geetest/node_modules/request/node_modules/karma/node_modules/optimist/node_modules/tap/node_modules/yamlish/yamlish-py/test/__init__.py | 161 | 3430 | # -*- coding: utf-8 -*- IGNORE:C0111
from __future__ import absolute_import, print_function, unicode_literals
import logging
import yamlish
import yaml
import tempfile
import textwrap
INPUT = 1
OUTPUT = 2
if yamlish.py3k:
unicode = str
#logging.basicConfig(level=logging.DEBUG)
def _generate_test_name(source):
"""
Clean up human-friendly test name into a method name.
"""
out = source.replace(' ', '_').replace(':', '').replace(',', '').lower()
return "test_%s" % out
def _create_input_test(test_src, tested_function, options=None):
"""
Decorate tested function to be used as a method for TestCase.
"""
def do_test_expected(self):
"""
Execute a test by calling a tested_function on test_src data.
"""
self.maxDiff = None
got = ""
if 'error' in test_src:
self.assertRaises(test_src['error'], tested_function,
test_src['in'], options)
else:
want = test_src['out']
got = tested_function(test_src['in'], options)
logging.debug('got = type %s', type(got))
logging.debug("test_src['out'] = %s",
unicode(test_src['out']))
self.assertEqual(got, want, """Result matches
expected = %s
observed = %s
""" % (want, got))
return do_test_expected
def _create_output_test(test_src, tested_function, options=None):
"""
Decorate tested function to be used as a method for TestCase.
"""
def do_test_expected(self):
"""
Execute a test by calling a tested_function on test_src data.
"""
self.maxDiff = None
# We currently don't throw any exceptions in Writer, so this
# this is always false
if 'error' in test_src:
self.assertRaises(test_src['error'], yamlish.dumps,
test_src['in'], options)
else:
logging.debug("out:\n%s", textwrap.dedent(test_src['out']))
want = yaml.load(textwrap.dedent(test_src['out']))
logging.debug("want:\n%s", want)
with tempfile.NamedTemporaryFile() as test_file:
tested_function(test_src['in'], test_file)
test_file.seek(0)
got_str = test_file.read()
logging.debug("got_str = %s", got_str)
got = yaml.load(got_str)
self.assertEqual(got, want, "Result matches")
return do_test_expected
def generate_testsuite(test_data, test_case_shell, test_fce, direction=INPUT,
options=None):
"""
Generate tests from the test data, class to build upon and function
to use for testing.
"""
for in_test in test_data:
if ('skip' in in_test) and in_test['skip']:
logging.debug("test %s skipped!", in_test['name'])
continue
name = _generate_test_name(in_test['name'])
if direction == INPUT:
test_method = _create_input_test(in_test, test_fce,
options=options)
elif direction == OUTPUT:
test_method = _create_output_test(in_test, test_fce,
options=options)
test_method.__name__ = str('test_%s' % name)
setattr(test_case_shell, test_method.__name__, test_method)
| mit |
pcarrier/linux | tools/perf/scripts/python/compaction-times.py | 958 | 7950 | # report time spent in compaction
# Licensed under the terms of the GNU GPL License version 2
# testing:
# 'echo 1 > /proc/sys/vm/compact_memory' to force compaction of all zones
import os
import sys
import re
import signal
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
usage = "usage: perf script report compaction-times.py -- [-h] [-u] [-p|-pv] [-t | [-m] [-fs] [-ms]] [pid|pid-range|comm-regex]\n"
class popt:
DISP_DFL = 0
DISP_PROC = 1
DISP_PROC_VERBOSE=2
class topt:
DISP_TIME = 0
DISP_MIG = 1
DISP_ISOLFREE = 2
DISP_ISOLMIG = 4
DISP_ALL = 7
class comm_filter:
def __init__(self, re):
self.re = re
def filter(self, pid, comm):
m = self.re.search(comm)
return m == None or m.group() == ""
class pid_filter:
def __init__(self, low, high):
self.low = (0 if low == "" else int(low))
self.high = (0 if high == "" else int(high))
def filter(self, pid, comm):
return not (pid >= self.low and (self.high == 0 or pid <= self.high))
def set_type(t):
global opt_disp
opt_disp = (t if opt_disp == topt.DISP_ALL else opt_disp|t)
def ns(sec, nsec):
return (sec * 1000000000) + nsec
def time(ns):
return "%dns" % ns if opt_ns else "%dus" % (round(ns, -3) / 1000)
class pair:
def __init__(self, aval, bval, alabel = None, blabel = None):
self.alabel = alabel
self.blabel = blabel
self.aval = aval
self.bval = bval
def __add__(self, rhs):
self.aval += rhs.aval
self.bval += rhs.bval
return self
def __str__(self):
return "%s=%d %s=%d" % (self.alabel, self.aval, self.blabel, self.bval)
class cnode:
def __init__(self, ns):
self.ns = ns
self.migrated = pair(0, 0, "moved", "failed")
self.fscan = pair(0,0, "scanned", "isolated")
self.mscan = pair(0,0, "scanned", "isolated")
def __add__(self, rhs):
self.ns += rhs.ns
self.migrated += rhs.migrated
self.fscan += rhs.fscan
self.mscan += rhs.mscan
return self
def __str__(self):
prev = 0
s = "%s " % time(self.ns)
if (opt_disp & topt.DISP_MIG):
s += "migration: %s" % self.migrated
prev = 1
if (opt_disp & topt.DISP_ISOLFREE):
s += "%sfree_scanner: %s" % (" " if prev else "", self.fscan)
prev = 1
if (opt_disp & topt.DISP_ISOLMIG):
s += "%smigration_scanner: %s" % (" " if prev else "", self.mscan)
return s
def complete(self, secs, nsecs):
self.ns = ns(secs, nsecs) - self.ns
def increment(self, migrated, fscan, mscan):
if (migrated != None):
self.migrated += migrated
if (fscan != None):
self.fscan += fscan
if (mscan != None):
self.mscan += mscan
class chead:
heads = {}
val = cnode(0);
fobj = None
@classmethod
def add_filter(cls, filter):
cls.fobj = filter
@classmethod
def create_pending(cls, pid, comm, start_secs, start_nsecs):
filtered = 0
try:
head = cls.heads[pid]
filtered = head.is_filtered()
except KeyError:
if cls.fobj != None:
filtered = cls.fobj.filter(pid, comm)
head = cls.heads[pid] = chead(comm, pid, filtered)
if not filtered:
head.mark_pending(start_secs, start_nsecs)
@classmethod
def increment_pending(cls, pid, migrated, fscan, mscan):
head = cls.heads[pid]
if not head.is_filtered():
if head.is_pending():
head.do_increment(migrated, fscan, mscan)
else:
sys.stderr.write("missing start compaction event for pid %d\n" % pid)
@classmethod
def complete_pending(cls, pid, secs, nsecs):
head = cls.heads[pid]
if not head.is_filtered():
if head.is_pending():
head.make_complete(secs, nsecs)
else:
sys.stderr.write("missing start compaction event for pid %d\n" % pid)
@classmethod
def gen(cls):
if opt_proc != popt.DISP_DFL:
for i in cls.heads:
yield cls.heads[i]
@classmethod
def str(cls):
return cls.val
def __init__(self, comm, pid, filtered):
self.comm = comm
self.pid = pid
self.val = cnode(0)
self.pending = None
self.filtered = filtered
self.list = []
def __add__(self, rhs):
self.ns += rhs.ns
self.val += rhs.val
return self
def mark_pending(self, secs, nsecs):
self.pending = cnode(ns(secs, nsecs))
def do_increment(self, migrated, fscan, mscan):
self.pending.increment(migrated, fscan, mscan)
def make_complete(self, secs, nsecs):
self.pending.complete(secs, nsecs)
chead.val += self.pending
if opt_proc != popt.DISP_DFL:
self.val += self.pending
if opt_proc == popt.DISP_PROC_VERBOSE:
self.list.append(self.pending)
self.pending = None
def enumerate(self):
if opt_proc == popt.DISP_PROC_VERBOSE and not self.is_filtered():
for i, pelem in enumerate(self.list):
sys.stdout.write("%d[%s].%d: %s\n" % (self.pid, self.comm, i+1, pelem))
def is_pending(self):
return self.pending != None
def is_filtered(self):
return self.filtered
def display(self):
if not self.is_filtered():
sys.stdout.write("%d[%s]: %s\n" % (self.pid, self.comm, self.val))
def trace_end():
sys.stdout.write("total: %s\n" % chead.str())
for i in chead.gen():
i.display(),
i.enumerate()
def compaction__mm_compaction_migratepages(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, nr_migrated, nr_failed):
chead.increment_pending(common_pid,
pair(nr_migrated, nr_failed), None, None)
def compaction__mm_compaction_isolate_freepages(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, start_pfn, end_pfn, nr_scanned, nr_taken):
chead.increment_pending(common_pid,
None, pair(nr_scanned, nr_taken), None)
def compaction__mm_compaction_isolate_migratepages(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, start_pfn, end_pfn, nr_scanned, nr_taken):
chead.increment_pending(common_pid,
None, None, pair(nr_scanned, nr_taken))
def compaction__mm_compaction_end(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, zone_start, migrate_start, free_start, zone_end,
sync, status):
chead.complete_pending(common_pid, common_secs, common_nsecs)
def compaction__mm_compaction_begin(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, zone_start, migrate_start, free_start, zone_end,
sync):
chead.create_pending(common_pid, common_comm, common_secs, common_nsecs)
def pr_help():
global usage
sys.stdout.write(usage)
sys.stdout.write("\n")
sys.stdout.write("-h display this help\n")
sys.stdout.write("-p display by process\n")
sys.stdout.write("-pv display by process (verbose)\n")
sys.stdout.write("-t display stall times only\n")
sys.stdout.write("-m display stats for migration\n")
sys.stdout.write("-fs display stats for free scanner\n")
sys.stdout.write("-ms display stats for migration scanner\n")
sys.stdout.write("-u display results in microseconds (default nanoseconds)\n")
comm_re = None
pid_re = None
pid_regex = "^(\d*)-(\d*)$|^(\d*)$"
opt_proc = popt.DISP_DFL
opt_disp = topt.DISP_ALL
opt_ns = True
argc = len(sys.argv) - 1
if argc >= 1:
pid_re = re.compile(pid_regex)
for i, opt in enumerate(sys.argv[1:]):
if opt[0] == "-":
if opt == "-h":
pr_help()
exit(0);
elif opt == "-p":
opt_proc = popt.DISP_PROC
elif opt == "-pv":
opt_proc = popt.DISP_PROC_VERBOSE
elif opt == '-u':
opt_ns = False
elif opt == "-t":
set_type(topt.DISP_TIME)
elif opt == "-m":
set_type(topt.DISP_MIG)
elif opt == "-fs":
set_type(topt.DISP_ISOLFREE)
elif opt == "-ms":
set_type(topt.DISP_ISOLMIG)
else:
sys.exit(usage)
elif i == argc - 1:
m = pid_re.search(opt)
if m != None and m.group() != "":
if m.group(3) != None:
f = pid_filter(m.group(3), m.group(3))
else:
f = pid_filter(m.group(1), m.group(2))
else:
try:
comm_re=re.compile(opt)
except:
sys.stderr.write("invalid regex '%s'" % opt)
sys.exit(usage)
f = comm_filter(comm_re)
chead.add_filter(f)
| gpl-2.0 |
evax/ansible-modules-core | cloud/amazon/ec2_vol.py | 50 | 15330 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: ec2_vol
short_description: create and attach a volume, return volume id and device map
description:
- creates an EBS volume and optionally attaches it to an instance. If both an instance ID and a device name is given and the instance has a device at the device name, then no volume is created and no attachment is made. This module has a dependency on python-boto.
version_added: "1.1"
options:
instance:
description:
- instance ID if you wish to attach the volume. Since 1.9 you can set to None to detach.
required: false
default: null
aliases: []
name:
description:
- volume Name tag if you wish to attach an existing volume (requires instance)
required: false
default: null
aliases: []
version_added: "1.6"
id:
description:
- volume id if you wish to attach an existing volume (requires instance) or remove an existing volume
required: false
default: null
aliases: []
version_added: "1.6"
volume_size:
description:
- size of volume (in GB) to create.
required: false
default: null
aliases: []
volume_type:
description:
- Type of EBS volume; standard (magnetic), gp2 (SSD), io1 (Provisioned IOPS). "Standard" is the old EBS default
and continues to remain the Ansible default for backwards compatibility.
required: false
default: standard
aliases: []
version_added: "1.9"
iops:
description:
- the provisioned IOPs you want to associate with this volume (integer).
required: false
default: 100
aliases: []
version_added: "1.3"
encrypted:
description:
- Enable encryption at rest for this volume.
default: false
version_added: "1.8"
device_name:
description:
- device id to override device mapping. Assumes /dev/sdf for Linux/UNIX and /dev/xvdf for Windows.
required: false
default: null
aliases: []
region:
description:
- The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used.
required: false
default: null
aliases: ['aws_region', 'ec2_region']
zone:
description:
- zone in which to create the volume, if unset uses the zone the instance is in (if set)
required: false
default: null
aliases: ['aws_zone', 'ec2_zone']
snapshot:
description:
- snapshot ID on which to base the volume
required: false
default: null
version_added: "1.5"
validate_certs:
description:
- When set to "no", SSL certificates will not be validated for boto versions >= 2.6.0.
required: false
default: "yes"
choices: ["yes", "no"]
aliases: []
version_added: "1.5"
state:
description:
- whether to ensure the volume is present or absent, or to list existing volumes (The C(list) option was added in version 1.8).
required: false
default: present
choices: ['absent', 'present', 'list']
version_added: "1.6"
author: "Lester Wade (@lwade)"
extends_documentation_fragment: aws
'''
EXAMPLES = '''
# Simple attachment action
- ec2_vol:
instance: XXXXXX
volume_size: 5
device_name: sdd
# Example using custom iops params
- ec2_vol:
instance: XXXXXX
volume_size: 5
iops: 200
device_name: sdd
# Example using snapshot id
- ec2_vol:
instance: XXXXXX
snapshot: "{{ snapshot }}"
# Playbook example combined with instance launch
- ec2:
keypair: "{{ keypair }}"
image: "{{ image }}"
wait: yes
count: 3
register: ec2
- ec2_vol:
instance: "{{ item.id }} "
volume_size: 5
with_items: ec2.instances
register: ec2_vol
# Example: Launch an instance and then add a volume if not already attached
# * Volume will be created with the given name if not already created.
# * Nothing will happen if the volume is already attached.
# * Requires Ansible 2.0
- ec2:
keypair: "{{ keypair }}"
image: "{{ image }}"
zone: YYYYYY
id: my_instance
wait: yes
count: 1
register: ec2
- ec2_vol:
instance: "{{ item.id }}"
name: my_existing_volume_Name_tag
device_name: /dev/xvdf
with_items: ec2.instances
register: ec2_vol
# Remove a volume
- ec2_vol:
id: vol-XXXXXXXX
state: absent
# Detach a volume (since 1.9)
- ec2_vol:
id: vol-XXXXXXXX
instance: None
# List volumes for an instance
- ec2_vol:
instance: i-XXXXXX
state: list
# Create new volume using SSD storage
- ec2_vol:
instance: XXXXXX
volume_size: 50
volume_type: gp2
device_name: /dev/xvdf
'''
import time
from distutils.version import LooseVersion
try:
import boto.ec2
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
def get_volume(module, ec2):
name = module.params.get('name')
id = module.params.get('id')
zone = module.params.get('zone')
filters = {}
volume_ids = None
if zone:
filters['availability_zone'] = zone
if name:
filters = {'tag:Name': name}
if id:
volume_ids = [id]
try:
vols = ec2.get_all_volumes(volume_ids=volume_ids, filters=filters)
except boto.exception.BotoServerError, e:
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
if not vols:
if id:
msg = "Could not find the volume with id: %s" % id
if name:
msg += (" and name: %s" % name)
module.fail_json(msg=msg)
else:
return None
if len(vols) > 1:
module.fail_json(msg="Found more than one volume in zone (if specified) with name: %s" % name)
return vols[0]
def get_volumes(module, ec2):
instance = module.params.get('instance')
if not instance:
module.fail_json(msg = "Instance must be specified to get volumes")
try:
vols = ec2.get_all_volumes(filters={'attachment.instance-id': instance})
except boto.exception.BotoServerError, e:
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
return vols
def delete_volume(module, ec2):
volume_id = module.params['id']
try:
ec2.delete_volume(volume_id)
module.exit_json(changed=True)
except boto.exception.EC2ResponseError as ec2_error:
if ec2_error.code == 'InvalidVolume.NotFound':
module.exit_json(changed=False)
module.fail_json(msg=ec2_error.message)
def boto_supports_volume_encryption():
"""
Check if Boto library supports encryption of EBS volumes (added in 2.29.0)
Returns:
True if boto library has the named param as an argument on the request_spot_instances method, else False
"""
return hasattr(boto, 'Version') and LooseVersion(boto.Version) >= LooseVersion('2.29.0')
def create_volume(module, ec2, zone):
name = module.params.get('name')
id = module.params.get('id')
instance = module.params.get('instance')
iops = module.params.get('iops')
encrypted = module.params.get('encrypted')
volume_size = module.params.get('volume_size')
volume_type = module.params.get('volume_type')
snapshot = module.params.get('snapshot')
# If custom iops is defined we use volume_type "io1" rather than the default of "standard"
if iops:
volume_type = 'io1'
if instance == 'None' or instance == '':
instance = None
volume = get_volume(module, ec2)
if volume:
if volume.attachment_state() is not None:
if instance is None:
return volume
adata = volume.attach_data
if adata.instance_id != instance:
module.fail_json(msg = "Volume %s is already attached to another instance: %s"
% (name or id, adata.instance_id))
else:
module.exit_json(msg="Volume %s is already mapped on instance %s: %s" %
(name or id, adata.instance_id, adata.device),
volume_id=id,
device=adata.device,
changed=False)
else:
try:
if boto_supports_volume_encryption():
volume = ec2.create_volume(volume_size, zone, snapshot, volume_type, iops, encrypted)
else:
volume = ec2.create_volume(volume_size, zone, snapshot, volume_type, iops)
while volume.status != 'available':
time.sleep(3)
volume.update()
if name:
ec2.create_tags([volume.id], {"Name": name})
except boto.exception.BotoServerError, e:
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
return volume
def attach_volume(module, ec2, volume, instance):
device_name = module.params.get('device_name')
if device_name and instance:
try:
attach = volume.attach(instance.id, device_name)
while volume.attachment_state() != 'attached':
time.sleep(3)
volume.update()
except boto.exception.BotoServerError, e:
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
# If device_name isn't set, make a choice based on best practices here:
# http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/block-device-mapping-concepts.html
# In future this needs to be more dynamic but combining block device mapping best practices
# (bounds for devices, as above) with instance.block_device_mapping data would be tricky. For me ;)
# Use password data attribute to tell whether the instance is Windows or Linux
if device_name is None and instance:
try:
if not ec2.get_password_data(instance.id):
device_name = '/dev/sdf'
attach = volume.attach(instance.id, device_name)
while volume.attachment_state() != 'attached':
time.sleep(3)
volume.update()
else:
device_name = '/dev/xvdf'
attach = volume.attach(instance.id, device_name)
while volume.attachment_state() != 'attached':
time.sleep(3)
volume.update()
except boto.exception.BotoServerError, e:
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
def detach_volume(module, ec2):
vol = get_volume(module, ec2)
if not vol or vol.attachment_state() is None:
module.exit_json(changed=False)
else:
vol.detach()
module.exit_json(changed=True)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
instance = dict(),
id = dict(),
name = dict(),
volume_size = dict(),
volume_type = dict(choices=['standard', 'gp2', 'io1'], default='standard'),
iops = dict(),
encrypted = dict(),
device_name = dict(),
zone = dict(aliases=['availability_zone', 'aws_zone', 'ec2_zone']),
snapshot = dict(),
state = dict(choices=['absent', 'present', 'list'], default='present')
)
)
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
id = module.params.get('id')
name = module.params.get('name')
instance = module.params.get('instance')
volume_size = module.params.get('volume_size')
volume_type = module.params.get('volume_type')
iops = module.params.get('iops')
encrypted = module.params.get('encrypted')
device_name = module.params.get('device_name')
zone = module.params.get('zone')
snapshot = module.params.get('snapshot')
state = module.params.get('state')
if instance == 'None' or instance == '':
instance = None
ec2 = ec2_connect(module)
if state == 'list':
returned_volumes = []
vols = get_volumes(module, ec2)
for v in vols:
attachment = v.attach_data
returned_volumes.append({
'create_time': v.create_time,
'id': v.id,
'iops': v.iops,
'size': v.size,
'snapshot_id': v.snapshot_id,
'status': v.status,
'type': v.type,
'zone': v.zone,
'attachment_set': {
'attach_time': attachment.attach_time,
'device': attachment.device,
'status': attachment.status
}
})
module.exit_json(changed=False, volumes=returned_volumes)
if encrypted and not boto_supports_volume_encryption():
module.fail_json(msg="You must use boto >= v2.29.0 to use encrypted volumes")
# Here we need to get the zone info for the instance. This covers situation where
# instance is specified but zone isn't.
# Useful for playbooks chaining instance launch with volume create + attach and where the
# zone doesn't matter to the user.
if instance:
reservation = ec2.get_all_instances(instance_ids=instance)
inst = reservation[0].instances[0]
zone = inst.placement
# Check if there is a volume already mounted there.
if device_name:
if device_name in inst.block_device_mapping:
module.exit_json(msg="Volume mapping for %s already exists on instance %s" % (device_name, instance),
volume_id=inst.block_device_mapping[device_name].volume_id,
device=device_name,
changed=False)
# Delaying the checks until after the instance check allows us to get volume ids for existing volumes
# without needing to pass an unused volume_size
if not volume_size and not (id or name or snapshot):
module.fail_json(msg="You must specify volume_size or identify an existing volume by id, name, or snapshot")
if volume_size and (id or snapshot):
module.fail_json(msg="Cannot specify volume_size together with id or snapshot")
if state == 'absent':
delete_volume(module, ec2)
if state == 'present':
volume = create_volume(module, ec2, zone)
if instance:
attach_volume(module, ec2, volume, inst)
else:
detach_volume(module, ec2)
module.exit_json(volume_id=volume.id, device=device_name, volume_type=volume.type)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
main()
| gpl-3.0 |
tarc/gyp | test/mac/gyptest-lto.py | 69 | 2050 | #!/usr/bin/env python
# Copyright (c) 2015 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that LTO flags work.
"""
import TestGyp
import os
import re
import subprocess
import sys
if sys.platform == 'darwin':
test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'])
CHDIR = 'lto'
test.run_gyp('test.gyp', chdir=CHDIR)
test.build('test.gyp', test.ALL, chdir=CHDIR)
def ObjPath(srcpath, target):
# TODO: Move this into TestGyp if it's needed elsewhere.
if test.format == 'xcode':
return os.path.join(CHDIR, 'build', 'test.build', 'Default',
target + '.build', 'Objects-normal', 'x86_64',
srcpath + '.o')
elif 'ninja' in test.format: # ninja, xcode-ninja
return os.path.join(CHDIR, 'out', 'Default', 'obj',
target + '.' + srcpath + '.o')
elif test.format == 'make':
return os.path.join(CHDIR, 'out', 'Default', 'obj.target',
target, srcpath + '.o')
def ObjType(p, t_expected):
r = re.compile(r'nsyms\s+(\d+)')
o = subprocess.check_output(['file', p])
objtype = 'unknown'
if ': Mach-O ' in o:
objtype = 'mach-o'
elif ': LLVM bit-code ' in o:
objtype = 'llvm'
if objtype != t_expected:
print 'Expected %s, got %s' % (t_expected, objtype)
test.fail_test()
ObjType(ObjPath('cfile', 'lto'), 'llvm')
ObjType(ObjPath('ccfile', 'lto'), 'llvm')
ObjType(ObjPath('mfile', 'lto'), 'llvm')
ObjType(ObjPath('mmfile', 'lto'), 'llvm')
ObjType(ObjPath('asmfile', 'lto'), 'mach-o')
ObjType(ObjPath('cfile', 'lto_static'), 'llvm')
ObjType(ObjPath('ccfile', 'lto_static'), 'llvm')
ObjType(ObjPath('mfile', 'lto_static'), 'llvm')
ObjType(ObjPath('mmfile', 'lto_static'), 'llvm')
ObjType(ObjPath('asmfile', 'lto_static'), 'mach-o')
test.pass_test()
# TODO: Probably test for -object_path_lto too, else dsymutil won't be
# useful maybe?
| bsd-3-clause |
jjdicharry/godot | tools/scripts/makeargs.py | 50 | 1945 |
text="""
#define FUNC$numR(m_r,m_func,$argt)\\
virtual m_r m_func($argtp) { \\
if (Thread::get_caller_ID()!=server_thread) {\\
m_r ret;\\
command_queue.push_and_ret( visual_server, &VisualServer::m_func,$argp,&ret);\\
return ret;\\
} else {\\
return visual_server->m_func($argp);\\
}\\
}
#define FUNC$numRC(m_r,m_func,$argt)\\
virtual m_r m_func($argtp) const { \\
if (Thread::get_caller_ID()!=server_thread) {\\
m_r ret;\\
command_queue.push_and_ret( visual_server, &VisualServer::m_func,$argp,&ret);\\
return ret;\\
} else {\\
return visual_server->m_func($argp);\\
}\\
}
#define FUNC$numS(m_func,$argt)\\
virtual void m_func($argtp) { \\
if (Thread::get_caller_ID()!=server_thread) {\\
command_queue.push_and_sync( visual_server, &VisualServer::m_func,$argp);\\
} else {\\
visual_server->m_func($argp);\\
}\\
}
#define FUNC$numSC(m_func,$argt)\\
virtual void m_func($argtp) const { \\
if (Thread::get_caller_ID()!=server_thread) {\\
command_queue.push_and_sync( visual_server, &VisualServer::m_func,$argp);\\
} else {\\
visual_server->m_func($argp);\\
}\\
}
#define FUNC$num(m_func,$argt)\\
virtual void m_func($argtp) { \\
if (Thread::get_caller_ID()!=server_thread) {\\
command_queue.push( visual_server, &VisualServer::m_func,$argp);\\
} else {\\
visual_server->m_func($argp);\\
}\\
}
#define FUNC$numC(m_func,$argt)\\
virtual void m_func($argtp) const { \\
if (Thread::get_caller_ID()!=server_thread) {\\
command_queue.push( visual_server, &VisualServer::m_func,$argp);\\
} else {\\
visual_server->m_func($argp);\\
}\\
}
"""
for i in range(1,8):
tp=""
p=""
t=""
for j in range(i):
if (j>0):
tp+=", "
p+=", "
t+=", "
tp +=("m_arg"+str(j+1)+" p"+str(j+1))
p+=("p"+str(j+1))
t+=("m_arg"+str(j+1))
t = text.replace("$argtp",tp).replace("$argp",p).replace("$argt",t).replace("$num",str(i))
print(t)
| mit |
angelmtenor/IDSFC | L1_intro/H_olympics_medal_points.py | 1 | 1606 | import numpy as np
from pandas import DataFrame
def numpy_dot():
"""
Imagine a point system in which each country is awarded 4 points for each
gold medal, 2 points for each silver medal, and one point for each
bronze medal.
Using the numpy.dot function, create a new dataframe called
'olympic_points_df' that includes:
a) a column called 'country_name' with the country name
b) a column called 'points' with the total number of points the country
earned at the Sochi olympics.
You do not need to call the function in your code when running it in the
browser - the grader will do that automatically when you submit or test it.
"""
countries = ['Russian Fed.', 'Norway', 'Canada', 'United States',
'Netherlands', 'Germany', 'Switzerland', 'Belarus',
'Austria', 'France', 'Poland', 'China', 'Korea',
'Sweden', 'Czech Republic', 'Slovenia', 'Japan',
'Finland', 'Great Britain', 'Ukraine', 'Slovakia',
'Italy', 'Latvia', 'Australia', 'Croatia', 'Kazakhstan']
gold = [13, 11, 10, 9, 8, 8, 6, 5, 4, 4, 4, 3, 3, 2, 2, 2, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0]
silver = [11, 5, 10, 7, 7, 6, 3, 0, 8, 4, 1, 4, 3, 7, 4, 2, 4, 3, 1, 0, 0, 2, 2, 2, 1, 0]
bronze = [9, 10, 5, 12, 9, 5, 2, 1, 5, 7, 1, 2, 2, 6, 2, 4, 3, 1, 2, 1, 0, 6, 2, 1, 0, 1]
# YOUR CODE HERE
points = np.dot([4, 2, 1], [gold, silver, bronze])
olympic_points_df = DataFrame({'country_name': countries, 'points': points})
return olympic_points_df
print(numpy_dot())
| mit |
Proteu5/LiteMonitor | LiteMonitor-0.3.py | 1 | 13212 | """
The MIT License (MIT)
Copyright (c) 2014 Proteu5
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
Thanks To 'River' For Sparking This Entire Project
@QPython @Helloworld.py @Author: River @Date: 2012-12-31
"""
import json
import time
import base64
import os.path
import urllib2
from json import JSONDecoder
from functools import partial
import androidhelper
droid_V = androidhelper.Android()
droid = androidhelper.Android()
################################### LiteMonitor Configuration ###################################
#
# guiPrompt: Toggle Android GUI Prompt For Pool Selection ( 1 = On / 2 = Off )
#
# poolID: Set Pool Field Number If GUI Prompt Has Been Disabled
#
# mmpool: Insert http://www.mmpool.org's User API-URL
#
# dogeHashFaster: Insert http://doge.hashfaster.com's User API-URL
#
# mVar(i): Sets The 'MinerVariance' Alert Level For Your Ideal Hashrate Threshold
# Controls Auto Message For Hashrate: Ex: You Hash at 2,500kh/s, set to 2400 <>
# If Rate Drops Below Number &mineVar, It Triggers An AutoAlert Stream Of API-Data
# You Can Set mineVar To A High Number To Always Recieve All API-Info
#
# userVibe: Controls for a series of vibration patterns [Only Triggered w/ minerVar]
#
# ts: Time Control For Customer Vibration Configuration
#
# vrrrb: Duration Of Custom Vibration
#
# X2: ts & vrrrb Are Restricted Out Of Common-Sense <> Personally, a vib duration over
# 3 seconds (3000ms) seems too much. However, X2 isn't restricted should you need that option.
# By defualt X2 is set to = 0 and falls back on a debug print should userVibe Option 5 be True
#
# tsX2: Unbound Time Delay [Note: Needs Uncomment]
#
# vrrrbX2: Unbound Vibration Duration [Note: Needs Uncomment]
#
# If you know a bit of Python I have commented just about everything; have some fun and explore.
# PoolID 3 does nothing and will soon, as of now it serves just as a nice template.
#
# Please note that below you will see code for an AboutMe, "Donation" file
# It will auto-decrypt and print in the log. Inside is a way to get in touch with me,
# along with several donation address and a little backstory.
# If you wish to decode the file externally, def decode() can handle the operation and
# below the encrypted string is the decryption key.
#
# On Debugging: It Prints Out A Lot Of Data & I Mean A Lot!
# Most phones can handle it. It will never get in your way, and you will only see it in
# the debug .run.log file. However, if it does slow you down, SublimeText & other text editors
# have a nice 'Batch Replace' feature. This is v-0.3 I will remove them on later releases officially.
### Toggle GUI Pool ID Prompt ###
guiPrompt = 2
## Select Your Pool ##
poolID = 2
### Edit Between " " Keeping The Format ###
## 1 ##
mmpool = "http://mmpool.org/userstats/(UserName)"
mVar1 = 300000
## 2 ##
dogeHashFaster = "http://doge.hashfaster.com/index.php?page=api&action=getuserstatus&api_key=(API-Data)"
mVar2 = 10000
## 3 ##
null = "http://python.com"
mVar3 = 1000000
### Vibration Settings ###
## 1 = SOS | 2 = Psuedo SOS | 3 = Long Vibration | 4 = Short Vibration | 5 = Unlock Custom Vibration ##
userVibe = 1
ts = 1.5
vrrrb = 100
x2 = 0 ## Set 2272 to Use Secondary Pattern: Unrestricted
#tsX2 = 0.2
#vrrrbX2 = 100
## ts = Time Day In Seconds [0.1 - 5] || vrrrb = Vibration Duration in ms [10 - 3000] ##
############################################ LiteMonitor V-0.3 ############################################
if guiPrompt == 1:
line = droid.dialogGetInput("LiteMonitor v-0.3", "Please Enter Pool ID#")
s = (line.result)
i = int(s)
poolVar = i
else:
poolVar = poolID
### Pool Conditionals ###
if poolVar == 1:
api = urllib2.urlopen(mmpool)
elif poolVar == 2:
api = urllib2.urlopen(dogeHashFaster)
else:
api = urllib2.urlopen(null)
### Debug Log Reading Print ###
brace = ('----break for success----')
### See .Run.Log for Raw Data ###
### Edit This For Alert ###
if poolVar == 1:
minerVar = mVar1
elif poolVar == 2:
minerVar = mVar2
else:
minerVar = mVar3
### Customized Debug Messages ###
autoStatMsg = "ALERT: Low HashRate Detected"
H = "Hashrate:"
S = "DGM Shares:"
U = "PPS Paid:"
V = "Balances:"
I = "Pending BTC:"
### Coming Soon ###
#def alert(self):
### Decoding Encrypted String For More Info ###
### Prints Decoded Data File To Donation.bin after Function Below ###
def decode(key, enc):
dec = []
enc = base64.urlsafe_b64decode(enc)
for i in range(len(enc)):
key_c = key[i % len(key)]
dec_c = chr((256 + ord(enc[i]) - ord(key_c)) % 256)
dec.append(dec_c)
return "".join(dec)
### Buffer To Parse Json-API Data ###
def json_miner(fileobj, decoder=JSONDecoder(), buffersize=2048):
buffer = ''
for chunk in iter(partial(fileobj.read, buffersize), ''):
buffer += chunk
while buffer:
try:
result, index = decoder.raw_decode(buffer)
yield result
buffer = buffer[index:]
except ValueError:
# Not enough data to decode, read more
break
### Multi-Pool Class Data Support ###
class cPoolStat1(object):
def __init__(self):
self.b1 = 'balance'
self.b2 = 'coin'
self.b3 = 'bitcoin'
self.m1 = 'hashrate'
self.m2 = 'dgm'
self.m3 = 'pps'
self.m4 = 'balances' # TODO: Fix Parse
self.m5 = 'btc_pending'
class cPoolStat2(object):
def __init__(self):
self.m1a = 'getuserstatus'
self.m1b = 'data'
self.m1c = 'hashrate'
self.m1d = 'shares'
self.m2 = 'sharerate'
self.m3a = 'username'
self.m3b = 'valid'
self.m3c = 'invalid'
class cPoolStat3(object):
def __init__(self):
self.m1 = 'hashrate'
self.m2 = 'NULL'
self.m3 = 'pps'
self.m4 = 'balances'
self.m5 = 'NULL'
### Multi-Pool Class Message Support ###
class cPoolMSG1(object):
def __init__(self):
self.p1 = "[BTC-ALERT] PPS: %s"
self.p2 = "[BTC-ALERT] HashRate: %s"
self.p3 = "[BTC-ALERT] DGM: %s"
self.p4 = "[BTC-ALERT] Balances: %s"
self.p5 = "[BTC-ALERT] Pending BTC: %s"
class cPoolMSG2(object):
def __init__(self):
self.p1 = "[DOGE-ALERT] UserName: %s"
self.p2 = "[DOGE-ALERT] HashRate: %s"
self.p3 = "[DOGE-ALERT] ShareRate: %s"
self.p4 = "[DOGE-ALERT] ValidShares: %s"
self.p5 = "[DOGE-ALERT] InvalidShares: %s"
class cPoolMSG3(object):
def __init__(self):
self.p1 = "[NULL] PPS: %s"
self.p2 = "[BTC-ALERT] HashRate: %s"
self.p3 = "[BTC-ALERT] DGM: %s"
self.p4 = "[NULL] Balances: %s"
self.p5 = "[BTC-ALERT] Pending BTC: %s"
class cVibrateA:
def __init__(self):
droid_V.vibrate(100)
time.sleep(0.3)
droid_V.vibrate(100)
time.sleep(0.3)
droid_V.vibrate(100)
time.sleep(0.3)
droid_V.vibrate(250)
time.sleep(0.3)
droid_V.vibrate(250)
time.sleep(0.3)
droid_V.vibrate(250)
time.sleep(0.3)
droid_V.vibrate(100)
time.sleep(0.3)
droid_V.vibrate(100)
time.sleep(0.2)
droid_V.vibrate(100)
class cVibrateB:
def __init__(self):
droid_V.vibrate(500)
time.sleep(1)
droid_V.vibrate(1000)
time.sleep(1.2)
droid_V.vibrate(500)
class cVibrateC:
def __init__(self):
time.sleep(1)
droid_V.vibrate(2300)
class cVibrateD:
def __init__(self):
time.sleep(1)
droid_V.vibrate(900)
class cVibrateE:
def __init__(self):
if ts >= 6:
time.sleep(0.1)
else:
time.sleep(ts)
if vrrrb >= 3150:
droid_V.vibrate(22)
else:
droid_V.vibrate(vrrrb)
if x2 == 2272:
time.sleep(tsX2)
droid_V.vibrate(vrrrbX2)
else:
print 'x2 Code Error'
### Encrypted Donation File Information ###
### "Hey, I Want To Protect My Addresses, I Am A Bit Of A Security Nut... :)" ###
stringD = 'hOCRnpujUHzS1VmLp5nTmVV8ndWZhqLN0s_a0NiOPIGDm5uCrdPWUZPXmFagpMbFpcfFVK3N2ptVy6SqhJLSo6Wbk9mkmZ-eUJfRx1mblaPKUqSWVNannl9uhIbV04bZm9TLUqnRVNfGllLYoqOVWM_JqYLRppvY2qxVxZ-m0FHXo6CkkdyVo1CRnpqDqa6ioFDXk6OXmWtUWYPT09KGtNvSotDVpmGCpNDGkqXKU5mfptTNlsfTVJrT1JSpy56ehKXRU5qgqZifllCdqVbEx52omaPYl6hePoFUf6LWhM_KxsfVXoHWp5zJmdfVmqHTpmJQf7atUrXWpKbT2KdVioam2JaLX1mno91QnalQlJvHzJyXqJXJUneZqK6ZrKbFy8uGosrGpMbWpW9sPoSBc5vZgJujq8LLl5yBdoORmJaM2YZozJS4qZykc6toqaRzeHuYl3Kbq4HdmWyYba2Nfj1uhIaotamcUpKoq3fKdpakpJXIhaadcce1l7vFlqCqy2SGpnNtsZLRqpI8UJh8hHNqUIKx0nuvgmPcppp9jdObr6rd07jfy9PFdLXaha3YmayooX1vU1Z0h6ipbIKlfJjOvoSJ25OMpZW3fZCld8mUZ6FxZ421tqysp3vTZWiZPmtUWYfM09vNyYarUs7Eq1XFo8jGUabUU2mRpYHJqMfTrVayz5qd1lxXrVHDoFmlpOGcnFCRUJnSz6Wbm5WFpamlmMairWGErYbdytLOPIGDk6HZld3UUZXUl5tQrNCEn9HPnarT2FOW0JRX0ZLLoa2TmeZQnalQn63Rg4immaLGpp6fotRvWZvT28vcxtiCmNDVUpjRotjKn6fKl0BQWNTZotLQpqqQhpakz6CY2JrEnKWbpPFcUJGelFbY0aKsmaLYk6FQldGkpZzHxdrP0NSOUsrXUp7VVNjKnpeFlqWeq9bRm9DIYkCEhj1VgoSfxZ_NplmYn-pQk5ifn6nM0aBWgJnZl4KfosqoqKWQhNrOxoavocPMnpqCodPPmqbUpVakoMLYUtvQqVbHx6FV1ZWchJrVU6yTlt1QkZ6UUKnIxq6omV5vUlVdVLGmqKfJ2Zs='
decoded = decode('0x000006cc9640e2504a493ddffafb2ac25b4da12e3608ad2ba46df35b07d1b392', stringD)
print 'Decrypted string:', decoded
### Will Only Write Out Donation.bin Once, Unless Deleted ###
fname = "Donation.bin"
if os.path.isfile(fname): # checks to see if exists
print brace
else:
with open('Donation.bin', 'wb') as f: # It creates empty file
f.write(decoded) # It writes
f.close() # It closes
# It does nothing else
### Begin API-URL Reading ###
htmltxt = api.read()
f = open('miner.bin', 'wb')
f.write(htmltxt)
with open('miner.bin', 'rb') as f:
byte = f.read()
byte
f.close()
### Parse Class Conditionals ###
if poolVar == 1:
x = cPoolStat1()
elif poolVar == 2:
x = cPoolStat2()
else:
x = cPoolStat3()
### MSG Class Conditionals ###
if poolVar == 1:
q = cPoolMSG1()
elif poolVar == 2:
q = cPoolMSG2()
else:
q = cPoolMSG3()
### Opens Miner API-Data & Parse! ###
with open('miner.bin', 'r') as order:
for data in json_miner(order):
json_string = json.dumps(data,sort_keys=True,indent=2)
data = json.loads(json_string)
print json_string
print data
print brace
if poolVar == 1:
parent = data[x.m1]
elif poolVar == 2:
parent = data[x.m1a][x.m1b][x.m1c]
else:
parent = data[x.m1]
if poolVar == 1:
parent2 = data[x.m2]
elif poolVar == 2:
parent2 = data[x.m1a][x.m1b][x.m2]
else:
parent2 = data[x.m2]
if poolVar == 1:
parent3 = data[x.m3]
elif poolVar == 2:
parent3 = data[x.m1a][x.m1b][x.m1d][x.m3a]
else:
parent3 = data[x.m3]
if poolVar == 1:
#for coin in data['balances']:
parent4 = data[x.m4] #(coin['coin'], coin['balance']) //Support For Cleaner Balances In Works...
elif poolVar == 2:
parent4 = data[x.m1a][x.m1b][x.m1d][x.m3b]
else:
parent4 = data[x.m4]
if poolVar == 1:
parent5 = data[x.m5]
elif poolVar == 2:
parent5 = data[x.m1a][x.m1b][x.m1d][x.m3c]
else:
parent5 = data[x.m5]
print H, parent
### Droid Message No Low-Hash Trigger ###
droid = androidhelper.Android()
line = parent
s = "[API-Data] Hashrate: %s" % (line)
droid.LENGTH_LONG
droid.makeToast(s)
del droid # Deleting Droid Objects
### Droid Message Stream: After Trigger Check ###
stat = float(parent)
if stat <= minerVar: # Checks For Your Pre-Set Hashrate
#alert <--TODO: Future Additions
if userVibe == 1:
cVibrateA()
elif userVibe == 2:
cVibrateB()
elif userVibe == 3:
cVibrateC()
elif userVibe == 4:
cVibrateD()
elif userVibe == 5:
cVibrateE()
else:
cVibrateF()
### Displays Droid Message Stream ###
print autoStatMsg # Pre-Set Alert Message
print U, parent3
print H, parent
print S, parent2
print V, parent4
print I, parent5
lineU = parent3
sU = (q.p1 % (lineU))
lineH = parent
sH = (q.p2 % (lineH))
lineS = parent2
sS = (q.p3 % (lineS))
lineV = parent4
sV = (q.p4 % (lineV))
lineI = parent5
sI = (q.p5 % (lineI))
sM = "[API-SYS] By: Proteu5"
droid2 = androidhelper.Android()
droid2.LENGTH_LONG
droid2.makeToast(sU)
del droid2
droid3 = androidhelper.Android()
droid3.LENGTH_LONG
droid3.makeToast(sH)
del droid3
droid4 = androidhelper.Android()
droid4.LENGTH_LONG
droid4.makeToast(sS)
del droid4
droid5 = androidhelper.Android()
droid5.LENGTH_LONG
droid5.makeToast(sV)
del droid5
droid6 = androidhelper.Android()
droid6.LENGTH_LONG
droid6.makeToast(sI)
del droid6
droid7 = androidhelper.Android()
droid7.LENGTH_LONG
droid7.makeToast(sM)
del droid7
| mit |
karban/field | resources/python/rope/base/oi/__init__.py | 112 | 1684 | """Rope object analysis and inference package
Rope makes some simplifying assumptions about a python program. It
assumes that a program only performs assignments and function calls.
Tracking assignments is simple and `PyName` objects handle that. The
main problem is function calls. Rope uses these two approaches for
obtaining call information:
* Static object analysis: `rope.base.pycore.PyCore.analyze_module()`
It can analyze modules to obtain information about functions. This
is done by analyzing function calls in a module or scope. Currently
SOA analyzes the scopes that are changed while saving or when the
user asks to analyze a module. That is mainly because static
analysis is time-consuming.
* Dynamic object analysis: `rope.base.pycore.PyCore.run_module()`
When you run a module or your testsuite, when DOA is enabled, it
collects information about parameters passed to and objects returned
from functions. The main problem with this approach is that it is
quite slow; Not when looking up the information but when collecting
them.
An instance of `rope.base.oi.objectinfo.ObjectInfoManager` can be used
for accessing these information. It saves the data in a
`rope.base.oi.objectdb.ObjectDB` internally.
Now if our objectdb does not know anything about a function and we
need the value returned by it, static object inference, SOI, comes
into play. It analyzes function body and tries to infer the object
that is returned from it (we usually need the returned value for the
given parameter objects).
Rope might collect and store information for other `PyName`\s, too.
For instance rope stores the object builtin containers hold.
"""
| gpl-2.0 |
rgerkin/python-neo | neo/test/coretest/test_block.py | 1 | 32775 | # -*- coding: utf-8 -*-
"""
Tests of the neo.core.block.Block class
"""
# needed for python 3 compatibility
from __future__ import absolute_import, division, print_function
from datetime import datetime
from copy import deepcopy
import unittest
import numpy as np
try:
from IPython.lib.pretty import pretty
except ImportError as err:
HAVE_IPYTHON = False
else:
HAVE_IPYTHON = True
from neo.core.block import Block
from neo.core.container import filterdata
from neo.core import SpikeTrain, Unit, AnalogSignal
from neo.test.tools import (assert_neo_object_is_compliant,
assert_same_sub_schema)
from neo.test.generate_datasets import (get_fake_value, get_fake_values,
fake_neo, clone_object,
get_annotations, TEST_ANNOTATIONS)
class Test__generate_datasets(unittest.TestCase):
def setUp(self):
np.random.seed(0)
self.annotations = {str(x): TEST_ANNOTATIONS[x] for x in
range(len(TEST_ANNOTATIONS))}
def test__get_fake_values(self):
self.annotations['seed'] = 0
file_datetime = get_fake_value('file_datetime', datetime, seed=0)
rec_datetime = get_fake_value('rec_datetime', datetime, seed=1)
index = get_fake_value('index', int, seed=2)
name = get_fake_value('name', str, seed=3, obj=Block)
description = get_fake_value('description', str, seed=4, obj='Block')
file_origin = get_fake_value('file_origin', str)
attrs1 = {'file_datetime': file_datetime,
'rec_datetime': rec_datetime,
'index': index,
'name': name,
'description': description,
'file_origin': file_origin}
attrs2 = attrs1.copy()
attrs2.update(self.annotations)
res11 = get_fake_values(Block, annotate=False, seed=0)
res12 = get_fake_values('Block', annotate=False, seed=0)
res21 = get_fake_values(Block, annotate=True, seed=0)
res22 = get_fake_values('Block', annotate=True, seed=0)
self.assertEqual(res11, attrs1)
self.assertEqual(res12, attrs1)
self.assertEqual(res21, attrs2)
self.assertEqual(res22, attrs2)
def test__fake_neo__cascade(self):
self.annotations['seed'] = None
obj_type = 'Block'
cascade = True
res = fake_neo(obj_type=obj_type, cascade=cascade)
for child in res.children_recur:
del child.annotations['i']
del child.annotations['j']
self.assertTrue(isinstance(res, Block))
assert_neo_object_is_compliant(res)
self.assertEqual(res.annotations, self.annotations)
self.assertEqual(len(res.segments), 1)
seg = res.segments[0]
self.assertEqual(seg.annotations, self.annotations)
self.assertEqual(len(res.channel_indexes), 1)
chx = res.channel_indexes[0]
self.assertEqual(chx.annotations, self.annotations)
self.assertEqual(len(seg.analogsignals), 1)
self.assertEqual(len(seg.analogsignals), 1)
self.assertEqual(len(seg.irregularlysampledsignals), 1)
self.assertEqual(len(seg.spiketrains), 1)
self.assertEqual(len(seg.events), 1)
self.assertEqual(len(seg.epochs), 1)
self.assertEqual(seg.analogsignals[0].annotations,
self.annotations)
self.assertEqual(seg.analogsignals[0].annotations,
self.annotations)
self.assertEqual(seg.irregularlysampledsignals[0].annotations,
self.annotations)
self.assertEqual(seg.spiketrains[0].annotations,
self.annotations)
self.assertEqual(seg.events[0].annotations,
self.annotations)
self.assertEqual(seg.epochs[0].annotations,
self.annotations)
self.assertEqual(len(chx.units), 1)
unit = chx.units[0]
self.assertEqual(unit.annotations, self.annotations)
self.assertEqual(len(chx.analogsignals), 1)
self.assertEqual(chx.analogsignals[0].annotations,
self.annotations)
self.assertEqual(len(unit.spiketrains), 1)
self.assertEqual(unit.spiketrains[0].annotations,
self.annotations)
def test__fake_neo__nocascade(self):
self.annotations['seed'] = None
obj_type = Block
cascade = False
res = fake_neo(obj_type=obj_type, cascade=cascade)
self.assertTrue(isinstance(res, Block))
assert_neo_object_is_compliant(res)
self.assertEqual(res.annotations, self.annotations)
self.assertEqual(len(res.segments), 0)
self.assertEqual(len(res.channel_indexes), 0)
class TestBlock(unittest.TestCase):
def setUp(self):
self.nchildren = 2
self.seed1 = 0
self.seed2 = 10000
self.blk1 = fake_neo(Block, seed=self.seed1, n=self.nchildren)
self.blk2 = fake_neo(Block, seed=self.seed2, n=self.nchildren)
self.targobj = self.blk1
self.segs1 = self.blk1.segments
self.segs2 = self.blk2.segments
self.chxs1 = self.blk1.channel_indexes
self.chxs2 = self.blk2.channel_indexes
self.units1 = [[unit for unit in chx.units] for chx in self.chxs1]
self.units2 = [[unit for unit in chx.units] for chx in self.chxs2]
self.units1 = sum(self.units1, [])
self.units2 = sum(self.units2, [])
self.sigarrs1 = [[sigarr for sigarr in chx.analogsignals]
for chx in self.chxs1]
self.sigarrs2 = [[sigarr for sigarr in chx.analogsignals]
for chx in self.chxs2]
self.trains1 = [[train for train in unit.spiketrains]
for unit in self.units1]
self.trains2 = [[train for train in unit.spiketrains]
for unit in self.units2]
self.irsigs1 = [[irsig for irsig in chx.irregularlysampledsignals]
for chx in self.chxs1]
self.irsigs2 = [[irsig for irsig in chx.irregularlysampledsignals]
for chx in self.chxs2]
self.epcs1 = [[epc for epc in seg.epochs]
for seg in self.segs1]
self.epcs2 = [[epc for epc in seg.epochs]
for seg in self.segs2]
self.evts1 = [[evt for evt in seg.events]
for seg in self.segs1]
self.evts2 = [[evt for evt in seg.events]
for seg in self.segs2]
self.img_seqs1 = [[imgseq for imgseq in seg.imagesequences]
for seg in self.segs1]
self.img_seqs2 = [[imgseq for imgseq in seg.imagesequences]
for seg in self.segs2]
self.sigarrs1 = sum(self.sigarrs1, [])
self.sigarrs2 = sum(self.sigarrs2, [])
self.trains1 = sum(self.trains1, [])
self.trains2 = sum(self.trains2, [])
self.irsigs1 = sum(self.irsigs1, [])
self.irsigs2 = sum(self.irsigs2, [])
self.epcs1 = sum(self.epcs1, [])
self.epcs2 = sum(self.epcs2, [])
self.evts1 = sum(self.evts1, [])
self.evts2 = sum(self.evts2, [])
self.img_seqs1 = sum(self.img_seqs1, [])
self.img_seqs2 = sum(self.img_seqs2, [])
def test_block_init(self):
blk = Block(name='a block')
assert_neo_object_is_compliant(blk)
self.assertEqual(blk.name, 'a block')
self.assertEqual(blk.file_origin, None)
def check_creation(self, blk):
assert_neo_object_is_compliant(blk)
seed = blk.annotations['seed']
targ0 = get_fake_value('file_datetime', datetime, seed=seed + 0)
self.assertEqual(blk.file_datetime, targ0)
targ1 = get_fake_value('rec_datetime', datetime, seed=seed + 1)
self.assertEqual(blk.rec_datetime, targ1)
targ2 = get_fake_value('index', int, seed=seed + 2, obj=Block)
self.assertEqual(blk.index, targ2)
targ3 = get_fake_value('name', str, seed=seed + 3, obj=Block)
self.assertEqual(blk.name, targ3)
targ4 = get_fake_value('description', str, seed=seed + 4, obj=Block)
self.assertEqual(blk.description, targ4)
targ5 = get_fake_value('file_origin', str)
self.assertEqual(blk.file_origin, targ5)
targ6 = get_annotations()
targ6['seed'] = seed
self.assertEqual(blk.annotations, targ6)
self.assertTrue(hasattr(blk, 'channel_indexes'))
self.assertTrue(hasattr(blk, 'segments'))
self.assertEqual(len(blk.channel_indexes), self.nchildren)
self.assertEqual(len(blk.segments), self.nchildren)
def test__creation(self):
self.check_creation(self.blk1)
self.check_creation(self.blk2)
def test__merge(self):
blk1a = fake_neo(Block,
seed=self.seed1, n=self.nchildren)
assert_same_sub_schema(self.blk1, blk1a)
blk1a.annotate(seed=self.seed2)
blk1a.segments.append(self.segs2[0])
blk1a.merge(self.blk2)
segs1a = clone_object(self.blk1).segments
chxs1a = clone_object(self.chxs1)
assert_same_sub_schema(chxs1a + self.chxs2,
blk1a.channel_indexes)
assert_same_sub_schema(segs1a + self.segs2,
blk1a.segments)
def test__children(self):
segs1a = clone_object(self.blk1).segments
chxs1a = clone_object(self.chxs1)
self.assertEqual(self.blk1._container_child_objects,
('Segment', 'ChannelIndex'))
self.assertEqual(self.blk1._data_child_objects, ())
self.assertEqual(self.blk1._single_parent_objects, ())
self.assertEqual(self.blk1._multi_child_objects, ())
self.assertEqual(self.blk1._multi_parent_objects, ())
self.assertEqual(self.blk1._child_properties,
('Unit',))
self.assertEqual(self.blk1._single_child_objects,
('Segment', 'ChannelIndex'))
self.assertEqual(self.blk1._container_child_containers,
('segments', 'channel_indexes'))
self.assertEqual(self.blk1._data_child_containers, ())
self.assertEqual(self.blk1._single_child_containers,
('segments', 'channel_indexes'))
self.assertEqual(self.blk1._single_parent_containers, ())
self.assertEqual(self.blk1._multi_child_containers, ())
self.assertEqual(self.blk1._multi_parent_containers, ())
self.assertEqual(self.blk1._child_objects,
('Segment', 'ChannelIndex'))
self.assertEqual(self.blk1._child_containers,
('segments', 'channel_indexes'))
self.assertEqual(self.blk1._parent_objects, ())
self.assertEqual(self.blk1._parent_containers, ())
self.assertEqual(len(self.blk1._single_children), 2 * self.nchildren)
self.assertEqual(len(self.blk1._multi_children), 0)
self.assertEqual(len(self.blk1.data_children), 0)
self.assertEqual(len(self.blk1.data_children_recur),
1 * self.nchildren ** 3 + 5 * self.nchildren ** 2)
self.assertEqual(len(self.blk1.container_children), 2 * self.nchildren)
self.assertEqual(len(self.blk1.container_children_recur),
2 * self.nchildren + 1 * self.nchildren ** 2)
self.assertEqual(len(self.blk1.children), 2 * self.nchildren)
self.assertEqual(len(self.blk1.children_recur),
2 * self.nchildren +
6 * self.nchildren ** 2 +
1 * self.nchildren ** 3)
self.assertEqual(self.blk1._multi_children, ())
assert_same_sub_schema(list(self.blk1._single_children),
self.segs1 + self.chxs1)
assert_same_sub_schema(list(self.blk1.container_children),
self.segs1 + self.chxs1)
assert_same_sub_schema(list(self.blk1.container_children_recur),
self.segs1 + self.chxs1 +
self.units1[:2] +
self.units1[2:])
assert_same_sub_schema(list(self.blk1.data_children_recur),
self.sigarrs1[::2] +
self.epcs1[:2] + self.evts1[:2] +
self.irsigs1[::2] +
self.trains1[::2] +
self.img_seqs1[:2] +
self.sigarrs1[1::2] +
self.epcs1[2:] + self.evts1[2:] +
self.irsigs1[1::2] +
self.trains1[1::2] +
self.img_seqs1[2:],
exclude=['channel_index'])
assert_same_sub_schema(list(self.blk1.children),
segs1a + chxs1a)
assert_same_sub_schema(list(self.blk1.children_recur),
self.sigarrs1[::2] +
self.epcs1[:2] + self.evts1[:2] +
self.irsigs1[::2] +
self.trains1[::2] +
self.img_seqs1[:2] +
self.sigarrs1[1::2] +
self.epcs1[2:] + self.evts1[2:] +
self.irsigs1[1::2] +
self.trains1[1::2] +
self.img_seqs1[2:] +
self.segs1 + self.chxs1 +
self.units1[:2] +
self.units1[2:],
exclude=['channel_index'])
def test__size(self):
targ = {'segments': self.nchildren,
'channel_indexes': self.nchildren}
self.assertEqual(self.targobj.size, targ)
def test__filter_none(self):
targ = []
# collecting all data objects in target block
for seg in self.targobj.segments:
targ.extend(seg.analogsignals)
targ.extend(seg.epochs)
targ.extend(seg.events)
targ.extend(seg.irregularlysampledsignals)
targ.extend(seg.spiketrains)
targ.extend(seg.imagesequences)
res1 = self.targobj.filter()
res2 = self.targobj.filter({})
res3 = self.targobj.filter([])
res4 = self.targobj.filter([{}])
res5 = self.targobj.filter([{}, {}])
res6 = self.targobj.filter([{}, {}])
res7 = self.targobj.filter(targdict={})
res8 = self.targobj.filter(targdict=[])
res9 = self.targobj.filter(targdict=[{}])
res10 = self.targobj.filter(targdict=[{}, {}])
assert_same_sub_schema(res1, targ)
assert_same_sub_schema(res2, targ)
assert_same_sub_schema(res3, targ)
assert_same_sub_schema(res4, targ)
assert_same_sub_schema(res5, targ)
assert_same_sub_schema(res6, targ)
assert_same_sub_schema(res7, targ)
assert_same_sub_schema(res8, targ)
assert_same_sub_schema(res9, targ)
assert_same_sub_schema(res10, targ)
def test__filter_annotation_single(self):
targ = ([self.epcs1[1], self.evts1[1]] +
[self.img_seqs1[1]] +
self.sigarrs1[1::2] +
[self.epcs1[3], self.evts1[3]] +
self.irsigs1[1::2] +
self.trains1[1::2] +
[self.img_seqs1[3]])
res0 = self.targobj.filter(j=1)
res1 = self.targobj.filter({'j': 1})
res2 = self.targobj.filter(targdict={'j': 1})
res3 = self.targobj.filter([{'j': 1}])
res4 = self.targobj.filter(targdict=[{'j': 1}])
assert_same_sub_schema(res0, targ)
assert_same_sub_schema(res1, targ)
assert_same_sub_schema(res2, targ)
assert_same_sub_schema(res3, targ)
assert_same_sub_schema(res4, targ)
def test__filter_single_annotation_nores(self):
targ = []
res0 = self.targobj.filter(j=5)
res1 = self.targobj.filter({'j': 5})
res2 = self.targobj.filter(targdict={'j': 5})
res3 = self.targobj.filter([{'j': 5}])
res4 = self.targobj.filter(targdict=[{'j': 5}])
assert_same_sub_schema(res0, targ)
assert_same_sub_schema(res1, targ)
assert_same_sub_schema(res2, targ)
assert_same_sub_schema(res3, targ)
assert_same_sub_schema(res4, targ)
def test__filter_attribute_single(self):
targ = [self.trains1[0]]
name = self.trains1[0].name
res0 = self.targobj.filter(name=name)
res1 = self.targobj.filter({'name': name})
res2 = self.targobj.filter(targdict={'name': name})
assert_same_sub_schema(res0, targ)
assert_same_sub_schema(res1, targ)
assert_same_sub_schema(res2, targ)
def test__filter_attribute_single_nores(self):
targ = []
name = self.trains2[0].name
res0 = self.targobj.filter(name=name)
res1 = self.targobj.filter({'name': name})
res2 = self.targobj.filter(targdict={'name': name})
assert_same_sub_schema(res0, targ)
assert_same_sub_schema(res1, targ)
assert_same_sub_schema(res2, targ)
def test__filter_multi(self):
targ = ([self.epcs1[1], self.evts1[1]] +
[self.img_seqs1[1]] +
self.sigarrs1[1::2] +
[self.epcs1[3], self.evts1[3]] +
self.irsigs1[1::2] +
self.trains1[1::2] +
[self.img_seqs1[3]] +
[self.trains1[0]])
name = self.trains1[0].name
res0 = self.targobj.filter(name=name, j=1)
res1 = self.targobj.filter({'name': name, 'j': 1})
res2 = self.targobj.filter(targdict={'name': name, 'j': 1})
assert_same_sub_schema(res0, targ)
assert_same_sub_schema(res1, targ)
assert_same_sub_schema(res2, targ)
def test__filter_multi_nores(self):
targ = []
name0 = self.sigarrs2[0].name
res0 = self.targobj.filter([{'j': 5}, {}])
res1 = self.targobj.filter({}, j=5)
res2 = self.targobj.filter([{}], i=6)
res3 = self.targobj.filter({'name': name0}, j=1)
res4 = self.targobj.filter(targdict={'name': name0}, j=1)
res5 = self.targobj.filter(name=name0, targdict={'j': 1})
res6 = self.targobj.filter(name=name0, j=5)
res7 = self.targobj.filter({'name': name0, 'j': 5})
res8 = self.targobj.filter(targdict={'name': name0, 'j': 5})
res9 = self.targobj.filter({'name': name0}, j=5)
res10 = self.targobj.filter(targdict={'name': name0}, j=5)
res11 = self.targobj.filter(name=name0, targdict={'j': 5})
res12 = self.targobj.filter({'name': name0}, j=5)
res13 = self.targobj.filter(targdict={'name': name0}, j=5)
res14 = self.targobj.filter(name=name0, targdict={'j': 5})
assert_same_sub_schema(res0, targ)
assert_same_sub_schema(res1, targ)
assert_same_sub_schema(res2, targ)
assert_same_sub_schema(res3, targ)
assert_same_sub_schema(res4, targ)
assert_same_sub_schema(res5, targ)
assert_same_sub_schema(res6, targ)
assert_same_sub_schema(res7, targ)
assert_same_sub_schema(res8, targ)
assert_same_sub_schema(res9, targ)
assert_same_sub_schema(res10, targ)
assert_same_sub_schema(res11, targ)
assert_same_sub_schema(res12, targ)
assert_same_sub_schema(res13, targ)
assert_same_sub_schema(res14, targ)
def test__filter_multi_partres_annotation_attribute(self):
targ = [self.trains1[0]]
name = self.trains1[0].name
res0 = self.targobj.filter(name=name, j=90)
res1 = self.targobj.filter({'name': name, 'j': 90})
res2 = self.targobj.filter(targdict={'name': name, 'j': 90})
assert_same_sub_schema(res0, targ)
assert_same_sub_schema(res1, targ)
assert_same_sub_schema(res2, targ)
def test__filter_multi_partres_annotation_annotation(self):
targ = self.trains1[::2]
res0 = self.targobj.filter([{'j': 0}, {'i': 0}])
res1 = self.targobj.filter({'j': 0}, i=0)
res2 = self.targobj.filter([{'j': 0}], i=0)
assert_same_sub_schema(res0, targ)
assert_same_sub_schema(res1, targ)
assert_same_sub_schema(res2, targ)
def test__filter_no_annotation_but_object(self):
targ = []
for seg in self.targobj.segments:
targ.extend(seg.spiketrains)
res = self.targobj.filter(objects=SpikeTrain)
assert_same_sub_schema(res, targ)
targ = []
for seg in self.targobj.segments:
targ.extend(seg.analogsignals)
res = self.targobj.filter(objects=AnalogSignal)
assert_same_sub_schema(res, targ)
targ = []
for seg in self.targobj.segments:
targ.extend(seg.analogsignals)
targ.extend(seg.spiketrains)
res = self.targobj.filter(objects=[AnalogSignal, SpikeTrain])
assert_same_sub_schema(res, targ)
def test__filter_single_annotation_obj_single(self):
targ = self.trains1[1::2]
res0 = self.targobj.filter(j=1, objects='SpikeTrain')
res1 = self.targobj.filter(j=1, objects=SpikeTrain)
res2 = self.targobj.filter(j=1, objects=['SpikeTrain'])
res3 = self.targobj.filter(j=1, objects=[SpikeTrain])
assert_same_sub_schema(res0, targ)
assert_same_sub_schema(res1, targ)
assert_same_sub_schema(res2, targ)
assert_same_sub_schema(res3, targ)
def test__filter_single_annotation_norecur(self):
targ = []
res0 = self.targobj.filter(j=1, recursive=False)
assert_same_sub_schema(res0, targ)
def test__filter_single_attribute_norecur(self):
targ = []
res0 = self.targobj.filter(name=self.sigarrs1[0].name,
recursive=False)
assert_same_sub_schema(res0, targ)
def test__filter_single_annotation_nodata(self):
targ = []
res0 = self.targobj.filter(j=1, data=False)
assert_same_sub_schema(res0, targ)
def test__filter_single_attribute_nodata(self):
targ = []
res0 = self.targobj.filter(name=self.sigarrs1[0].name, data=False)
assert_same_sub_schema(res0, targ)
def test__filter_single_annotation_nodata_norecur(self):
targ = []
res0 = self.targobj.filter(j=1,
data=False, recursive=False)
assert_same_sub_schema(res0, targ)
def test__filter_single_attribute_nodata_norecur(self):
targ = []
res0 = self.targobj.filter(name=self.sigarrs1[0].name,
data=False, recursive=False)
assert_same_sub_schema(res0, targ)
def test__filter_single_annotation_container(self):
targ = ([self.epcs1[1], self.evts1[1]] +
[self.img_seqs1[1]] +
self.sigarrs1[1::2] +
[self.epcs1[3], self.evts1[3]] +
self.irsigs1[1::2] +
self.trains1[1::2] +
[self.img_seqs1[3]] +
[self.segs1[1], self.chxs1[1],
self.units1[1],
self.units1[3]])
res0 = self.targobj.filter(j=1, container=True)
assert_same_sub_schema(res0, targ)
def test__filter_single_attribute_container_data(self):
targ = [self.trains1[0]]
res0 = self.targobj.filter(name=self.trains1[0].name, container=True)
assert_same_sub_schema(res0, targ)
def test__filter_single_annotation_container_norecur(self):
targ = [self.segs1[1], self.chxs1[1]]
res0 = self.targobj.filter(j=1, container=True, recursive=False)
assert_same_sub_schema(res0, targ)
def test__filter_single_attribute_container_norecur(self):
targ = [self.segs1[0]]
res0 = self.targobj.filter(name=self.segs1[0].name,
container=True, recursive=False)
assert_same_sub_schema(res0, targ)
def test__filter_single_attribute_container_norecur_nores(self):
targ = []
res0 = self.targobj.filter(name=self.trains1[0].name,
container=True, recursive=False)
assert_same_sub_schema(res0, targ)
def test__filter_single_annotation_nodata_container(self):
targ = [self.segs1[1], self.chxs1[1],
self.units1[1],
self.units1[3]]
res0 = self.targobj.filter(j=1,
data=False, container=True)
assert_same_sub_schema(res0, targ)
def test__filter_single_attribute_nodata_container_nores(self):
targ = []
res0 = self.targobj.filter(name=self.trains1[0].name,
data=False, container=True)
assert_same_sub_schema(res0, targ)
def test__filter_single_annotation_nodata_container_norecur(self):
targ = [self.segs1[1], self.chxs1[1]]
res0 = self.targobj.filter(j=1,
data=False, container=True,
recursive=False)
assert_same_sub_schema(res0, targ)
def test__filter_single_attribute_nodata_container_norecur(self):
targ = [self.segs1[0]]
res0 = self.targobj.filter(name=self.segs1[0].name,
data=False, container=True,
recursive=False)
assert_same_sub_schema(res0, targ)
def test__filter_single_attribute_nodata_container_norecur_nores(self):
targ = []
res0 = self.targobj.filter(name=self.trains1[0].name,
data=False, container=True,
recursive=False)
assert_same_sub_schema(res0, targ)
def test__filterdata_multi(self):
data = self.targobj.children_recur
targ = ([self.epcs1[1], self.evts1[1]] +
[self.img_seqs1[1]] +
self.sigarrs1[1::2] +
[self.epcs1[3], self.evts1[3]] +
self.irsigs1[1::2] +
self.trains1[1::2] +
[self.img_seqs1[3]] +
[self.segs1[1], self.chxs1[1],
self.units1[1],
self.units1[3],
self.trains1[0]])
name = self.trains1[0].name
res0 = filterdata(data, name=name, j=1)
res1 = filterdata(data, {'name': name, 'j': 1})
res2 = filterdata(data, targdict={'name': name, 'j': 1})
assert_same_sub_schema(res0, targ)
assert_same_sub_schema(res1, targ)
assert_same_sub_schema(res2, targ)
def test__filterdata_multi_nores(self):
data = self.targobj.children_recur
targ = []
name1 = self.sigarrs1[0].name
name2 = self.sigarrs2[0].name
res0 = filterdata(data, [{'j': 6}, {}])
res1 = filterdata(data, {}, i=6)
res2 = filterdata(data, [{}], i=6)
res3 = filterdata(data, name=name1, targdict={'j': 1})
res4 = filterdata(data, {'name': name1}, j=1)
res5 = filterdata(data, targdict={'name': name1}, j=1)
res6 = filterdata(data, name=name2, j=6)
res7 = filterdata(data, {'name': name2, 'j': 6})
res8 = filterdata(data, targdict={'name': name2, 'j': 6})
res9 = filterdata(data, {'name': name2}, j=6)
res10 = filterdata(data, targdict={'name': name2}, j=6)
res11 = filterdata(data, name=name2, targdict={'j': 6})
res12 = filterdata(data, {'name': name1}, j=6)
res13 = filterdata(data, targdict={'name': name1}, j=6)
res14 = filterdata(data, name=name1, targdict={'j': 6})
assert_same_sub_schema(res0, targ)
assert_same_sub_schema(res1, targ)
assert_same_sub_schema(res2, targ)
assert_same_sub_schema(res3, targ)
assert_same_sub_schema(res4, targ)
assert_same_sub_schema(res5, targ)
assert_same_sub_schema(res6, targ)
assert_same_sub_schema(res7, targ)
assert_same_sub_schema(res8, targ)
assert_same_sub_schema(res9, targ)
assert_same_sub_schema(res10, targ)
assert_same_sub_schema(res11, targ)
assert_same_sub_schema(res12, targ)
assert_same_sub_schema(res13, targ)
assert_same_sub_schema(res14, targ)
def test__filterdata_multi_partres_annotation_attribute(self):
data = self.targobj.children_recur
targ = [self.trains1[0]]
name = self.trains1[0].name
res0 = filterdata(data, name=name, j=90)
res1 = filterdata(data, {'name': name, 'j': 90})
res2 = filterdata(data, targdict={'name': name, 'j': 90})
assert_same_sub_schema(res0, targ)
assert_same_sub_schema(res1, targ)
assert_same_sub_schema(res2, targ)
def test__filterdata_multi_partres_annotation_annotation(self):
data = self.targobj.children_recur
targ = (self.trains1[::2] +
self.segs1[:1] + self.units1[::2])
res0 = filterdata(data, [{'j': 0}, {'i': 0}])
res1 = filterdata(data, {'j': 0}, i=0)
res2 = filterdata(data, [{'j': 0}], i=0)
assert_same_sub_schema(res0, targ)
assert_same_sub_schema(res1, targ)
assert_same_sub_schema(res2, targ)
# @unittest.skipUnless(HAVE_IPYTHON, "requires IPython")
# def test__pretty(self):
# res = pretty(self.blk1)
# ann = get_annotations()
# ann['seed'] = self.seed1
# ann = pretty(ann).replace('\n ', '\n ')
#
# seg0 = pretty(self.segs1[0])
# seg1 = pretty(self.segs1[1])
# seg0 = seg0.replace('\n', '\n ')
# seg1 = seg1.replace('\n', '\n ')
#
# targ = ("Block with " +
# ("%s segments, %s channel_indexes\n" %
# (len(self.segs1), len(self.chxs1))) +
# ("name: '%s'\ndescription: '%s'\n" % (self.blk1.name,
# self.blk1.description)) +
# ("annotations: %s\n" % ann) +
# ("file_origin: '%s'\n" % self.blk1.file_origin) +
# ("file_datetime: %s\n" % repr(self.blk1.file_datetime)) +
# ("rec_datetime: %s\n" % repr(self.blk1.rec_datetime)) +
# ("index: %s\n" % self.blk1.index) +
#
#
# ("# segments (N=%s)\n" % len(self.segs1)) +
# ('%s: %s\n' % (0, seg0)) +
# ('%s: %s' % (1, seg1)))
#
# self.assertEqual(res, targ)
def test_block_list_units(self):
assert_same_sub_schema(self.units1, self.blk1.list_units)
assert_same_sub_schema(self.units2, self.blk2.list_units)
assert_same_sub_schema(self.units1,
self.blk1.list_children_by_class(Unit))
assert_same_sub_schema(self.units2,
self.blk2.list_children_by_class(Unit))
assert_same_sub_schema(self.units1,
self.blk1.list_children_by_class('Unit'))
assert_same_sub_schema(self.units2,
self.blk2.list_children_by_class('Unit'))
def test__deepcopy(self):
blk1_copy = deepcopy(self.blk1)
# Check links from parents to children
assert_same_sub_schema(blk1_copy, self.blk1)
# Check links from children to parents
for segment in blk1_copy.segments:
self.assertEqual(id(segment.block), id(blk1_copy))
for sig in segment.analogsignals:
self.assertEqual(id(sig.segment), id(segment))
for sptr in segment.spiketrains:
self.assertEqual(id(sptr.segment), id(segment))
for chidx in blk1_copy.channel_indexes:
self.assertEqual(id(chidx.block), id(blk1_copy))
for sig in chidx.analogsignals:
self.assertEqual(id(sig.channel_index), id(chidx))
for sig in chidx.irregularlysampledsignals:
self.assertEqual(id(sig.channel_index), id(chidx))
for unit in chidx.units:
self.assertEqual(id(unit.channel_index), id(chidx))
for sptr in unit.spiketrains:
self.assertEqual(id(sptr.unit), id(unit))
if __name__ == "__main__":
unittest.main()
| bsd-3-clause |
yoyo2k/l10n-romania | account_compensation_vat_on_payment/account_compensation.py | 2 | 10256 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2012 Domsense s.r.l. (<http://www.domsense.com>).
# Copyright (C) 2012 Agile Business Group sagl (<http://www.agilebg.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
# from openerp.tools import float_compare
from openerp.tools.translate import _
import openerp.addons.decimal_precision as dp
class account_compensation(osv.Model):
_inherit = "account.compensation"
def is_vat_on_payment(self, compensation):
vat_on_p = 0
for line in compensation.line_ids:
if line.amount:
if line.move_line_id and line.move_line_id.invoice and line.move_line_id.invoice.vat_on_payment:
vat_on_p += 1
return vat_on_p
def action_move_line_create(self, cr, uid, ids, context=None):
if context is None:
context = {}
inv_pool = self.pool.get('account.invoice')
journal_pool = self.pool.get('account.journal')
move_line_pool = self.pool.get('account.move.line')
move_pool = self.pool.get('account.move')
account_obj = self.pool.get('account.account')
tax_obj = self.pool.get('account.tax')
currency_obj = self.pool.get('res.currency')
res = False
for compensation in self.browse(cr, uid, ids, context):
entry_posted = compensation.journal_id.entry_posted
# disable the 'skip draft state' option because "mixed" entry
# (shadow + real) won't pass validation. Anyway every entry will be
# posted later (if 'entry_posted' is enabled)
if entry_posted:
journal_pool.write(
cr, uid, compensation.journal_id.id, {'entry_posted': False})
res = super(account_compensation, self).action_move_line_create(
cr, uid, [compensation.id], context)
# because 'move_id' has been updated by 'action_move_line_create'
compensation.refresh()
if entry_posted:
journal_pool.write(
cr, uid, compensation.journal_id.id, {'entry_posted': True})
if self.is_vat_on_payment(compensation):
lines_to_create = []
amounts_by_invoice = self.allocated_amounts_grouped_by_invoice(
cr, uid, compensation, context)
for inv_id in amounts_by_invoice:
invoice = inv_pool.browse(cr, uid, inv_id, context)
for acc_move_line in invoice.move_id.line_id:
if acc_move_line.real_tax_code_id:
# compute the VAT or base line proportionally to
# the paid amount
new_line_amount = currency_obj.round(cr, uid, compensation.company_id.currency_id, ((amounts_by_invoice[invoice.id][
'allocated'] + amounts_by_invoice[invoice.id]['write-off']) / amounts_by_invoice[invoice.id]['total']) * acc_move_line.tax_amount)
acc = acc_move_line.real_account_id and acc_move_line.real_account_id.id or acc_move_line.account_id.id
# prepare the real move line
vals = {
'name': invoice.number + ' - ' + acc_move_line.name,
'account_id': acc,
'credit': acc_move_line.credit and new_line_amount or 0.0,
'debit': acc_move_line.debit and new_line_amount or 0.0,
'date': compensation.date,
'partner_id': acc_move_line.partner_id and acc_move_line.partner_id.id or False,
'tax_code_id': acc_move_line.real_tax_code_id.id,
'tax_amount': new_line_amount
}
if acc_move_line.product_id:
vals['debit'] = vals['credit'] = 0.00
lines_to_create.append(vals)
# prepare the shadow move line
vals = {
'name': invoice.number + ' - ' + acc_move_line.name,
'account_id': acc_move_line.account_id.id,
'credit': acc_move_line.debit and new_line_amount or 0.0,
'debit': acc_move_line.credit and new_line_amount or 0.0,
'date': compensation.date,
'partner_id': acc_move_line.partner_id and acc_move_line.partner_id.id or False,
'tax_code_id': acc_move_line.tax_code_id.id,
'tax_amount': -new_line_amount
}
if acc_move_line.product_id:
vals['debit'] = vals['credit'] = 0.00
lines_to_create.append(vals)
for line_to_create in lines_to_create:
line_to_create['move_id'] = compensation.move_id.id
move_line_pool.create(cr, uid, line_to_create, context)
self.balance_move(cr, uid, compensation.move_id.id, context)
move_pool.post(cr, uid, [compensation.move_id.id], context=context)
return res
def balance_move(self, cr, uid, move_id, context=None):
currency_obj = self.pool.get('res.currency')
move = self.pool.get('account.move').browse(cr, uid, move_id, context)
amount = 0.0
for line in move.line_id:
amount += line.debit - line.credit
amount = currency_obj.round(
cr, uid, move.company_id.currency_id, amount)
# check if balance differs for more than 1 decimal according to account
# decimal precision
if abs(amount * 10 ** dp.get_precision('Account')(cr)[1]) > 1:
raise osv.except_osv(_('Error'), _(
'The generated payment entry is unbalanced for more than 1 decimal'))
if not currency_obj.is_zero(cr, uid, move.company_id.currency_id, amount):
for line in move.line_id:
# adjust the first move line that's not receivable, payable or
# liquidity
if line.account_id.type != 'receivable' and line.account_id.type != 'payable' and line.account_id.type != 'liquidity':
if line.credit:
line.write({
'credit': line.credit + amount,
}, update_check=False)
elif line.debit:
line.write({
'debit': line.debit - amount,
}, update_check=False)
if line.tax_amount:
line.write({
'tax_amount': line.tax_amount + amount,
}, update_check=False)
break
return amount
def get_invoice_total(self, invoice):
res = 0.0
for inv_move_line in invoice.move_id.line_id:
if inv_move_line.account_id.id == invoice.account_id.id:
# can both be presents?
res += inv_move_line.debit or inv_move_line.credit
return res
def allocated_amounts_grouped_by_invoice(self, cr, uid, compensation, context=None):
'''
this method builds a dictionary in the following form
{
first_invoice_id: {
'allocated': 120.0,
'total': 120.0,
'write-off': 20.0,
}
second_invoice_id: {
'allocated': 50.0,
'total': 100.0,
'write-off': 0.0,
}
}
every amout is expressed in company currency.
In order to compute cashed amount correctly, write-off will be subtract to reconciled amount.
If more than one invoice is paid with this compensation, we distribute write-off equally (if allowed)
'''
res = {}
company_currency = super(account_compensation, self)._get_company_currency(
cr, uid, compensation.id, context)
current_currency = super(account_compensation, self)._get_current_currency(
cr, uid, compensation.id, context)
for line in compensation.line_ids:
if line.amount and line.move_line_id and line.move_line_id.invoice:
if line.move_line_id.invoice.id not in res:
res[line.move_line_id.invoice.id] = {
'allocated': 0.0,
'total': 0.0,
'write-off': 0.0, }
current_amount = line.amount
if company_currency != current_currency:
current_amount = super(account_compensation, self)._convert_amount(
cr, uid, line.amount, compensation.id, context)
res[line.move_line_id.invoice.id][
'allocated'] += current_amount
res[line.move_line_id.invoice.id][
'total'] = self.get_invoice_total(line.move_line_id.invoice)
return res
| agpl-3.0 |
mganeva/mantid | scripts/Interface/reduction_gui/widgets/inelastic/dgs_pd_sc_conversion.py | 1 | 5097 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source
# & Institut Laue - Langevin
# SPDX - License - Identifier: GPL - 3.0 +
#pylint: disable=invalid-name
from __future__ import (absolute_import, division, print_function)
from PyQt4 import QtGui, QtCore
from functools import partial
from reduction_gui.widgets.base_widget import BaseWidget
from reduction_gui.reduction.inelastic.dgs_pd_sc_conversion_script import PdAndScConversionScript
import ui.inelastic.ui_dgs_pd_sc_conversion
import reduction_gui.widgets.util as util
class PdAndScConversionWidget(BaseWidget):
"""
Widget that presents powder and single crystal data conversion options
to the user.
"""
## Widget name
name = "Powder and SC"
def __init__(self, parent=None, state=None, settings=None, data_type=None):
super(PdAndScConversionWidget, self).__init__(parent, state, settings,
data_type=data_type)
class PdAndScConversionFrame(QtGui.QFrame, ui.inelastic.ui_dgs_pd_sc_conversion.Ui_PdScConversionFrame):
def __init__(self, parent=None):
QtGui.QFrame.__init__(self, parent)
self.setupUi(self)
self._content = PdAndScConversionFrame(self)
self._layout.addWidget(self._content)
self._instrument_name = settings.instrument_name
self.initialize_content()
if state is not None:
self.set_state(state)
else:
self.set_state(PdAndScConversionScript(self._instrument_name))
def initialize_content(self):
# Constraints
self._content.q_low_edit.setValidator(QtGui.QDoubleValidator(self._content.q_low_edit))
self._content.q_width_edit.setValidator(QtGui.QDoubleValidator(self._content.q_width_edit))
self._content.q_high_edit.setValidator(QtGui.QDoubleValidator(self._content.q_high_edit))
# Default states
self._save_powder_nxs_state(self._content.save_procnexus_cb.isChecked())
# Connections
self.connect(self._content.save_procnexus_save, QtCore.SIGNAL("clicked()"),
self._save_powder_nxs_save)
self.connect(self._content.save_procnexus_cb, QtCore.SIGNAL("toggled(bool)"),
self._save_powder_nxs_state)
# Validate widgets
self._connect_validated_lineedit(self._content.q_low_edit)
self._connect_validated_lineedit(self._content.q_width_edit)
self._connect_validated_lineedit(self._content.q_high_edit)
def _check_and_set_lineedit_content(self, lineedit, content):
lineedit.setText(content)
util.set_valid(lineedit, not lineedit.text() == '')
def _connect_validated_lineedit(self, ui_ctrl):
call_back = partial(self._validate_edit, ctrl=ui_ctrl)
self.connect(ui_ctrl, QtCore.SIGNAL("editingFinished()"), call_back)
self.connect(ui_ctrl, QtCore.SIGNAL("textEdited(QString)"), call_back)
self.connect(ui_ctrl, QtCore.SIGNAL("textChanged(QString)"), call_back)
def _validate_edit(self, ctrl=None):
is_valid = True
if not ctrl.text():
is_valid = False
util.set_valid(ctrl, is_valid)
def _save_powder_nxs_state(self, state):
self._content.save_procnexus_edit.setEnabled(state)
self._content.save_procnexus_save.setEnabled(state)
def _save_powder_nxs_save(self):
fname = self.data_save_dialog("*.nxs")
if fname:
self._content.save_procnexus_edit.setText(fname)
def set_state(self, state):
"""
Populate the UI elements with the data from the given state.
@param state: PdAndScConversionScript object
"""
self._content.powder_gb.setChecked(state.do_pd_convert)
self._check_and_set_lineedit_content(self._content.q_low_edit,
state.pd_q_range_low)
self._check_and_set_lineedit_content(self._content.q_width_edit,
state.pd_q_range_width)
self._check_and_set_lineedit_content(self._content.q_high_edit,
state.pd_q_range_high)
self._content.save_procnexus_cb.setChecked(state.save_powder_nxs)
self._content.save_procnexus_edit.setText(state.save_powder_nxs_file)
def get_state(self):
"""
Returns an object with the state of the interface
"""
p = PdAndScConversionScript(self._instrument_name)
p.do_pd_convert = self._content.powder_gb.isChecked()
p.pd_q_range_low = self._content.q_low_edit.text()
p.pd_q_range_width = self._content.q_width_edit.text()
p.pd_q_range_high = self._content.q_high_edit.text()
p.save_powder_nxs = self._content.save_procnexus_cb.isChecked()
p.save_powder_nxs_file = self._content.save_procnexus_edit.text()
return p
| gpl-3.0 |
jualvarez/charlex | 03-Bonus/charlexapi/charlas/models.py | 2 | 2443 | from django.db import models
from django.conf import settings
class Orador(models.Model):
class Meta:
verbose_name = "orador"
verbose_name_plural = "oradores"
nombre = models.CharField(verbose_name='nombre', max_length=100)
bio = models.TextField(verbose_name='curriculum vitae')
foto = models.ImageField(verbose_name='foto', upload_to='fotosorador')
def __str__(self):
return self.nombre
class Lugar(models.Model):
class Meta:
verbose_name = "lugar"
verbose_name_plural = "lugares"
nombre = models.CharField(verbose_name='nombre del lugar', max_length=100)
def __str__(self):
return self.nombre
class Charla(models.Model):
class Meta:
verbose_name = "charla"
verbose_name_plural = "charlas"
titulo = models.CharField(verbose_name='título', max_length=100)
orador = models.ForeignKey(Orador, verbose_name='orador')
lugar = models.ForeignKey(Lugar, verbose_name='lugar')
hora = models.DateTimeField(verbose_name='hora')
duracion = models.DurationField(verbose_name='duración')
descripcion = models.TextField(verbose_name='descripción de la charla', null=True)
asistentes = models.ManyToManyField(
settings.AUTH_USER_MODEL,
through='UsuarioCharla',
through_fields=('charla', 'usuario'),
related_name='charlas'
)
fotos = models.ManyToManyField(
settings.AUTH_USER_MODEL,
through='FotoCharla',
through_fields=('charla', 'usuario'),
related_name='fotos_charlas'
)
def __str__(self):
return "%s (%s)" % (self.titulo, self.orador.nombre)
class UsuarioCharla(models.Model):
usuario = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
charla = models.ForeignKey(Charla, on_delete=models.CASCADE)
rating = models.IntegerField(verbose_name='rating', null=True)
class Meta:
unique_together = ('usuario', 'charla')
def __str__(self):
return "%s va a '%s'" % (self.usuario.username, self.charla.titulo)
class FotoCharla(models.Model):
foto = models.ImageField(upload_to='fotoscharla')
usuario = models.ForeignKey(settings.AUTH_USER_MODEL, null=True, on_delete=models.SET_NULL)
charla = models.ForeignKey(Charla, on_delete=models.CASCADE)
def __str__(self):
return "Sacada por %s en '%s'" % (self.usuario.username, self.charla.titulo)
| gpl-3.0 |
vicente-gonzalez-ruiz/QSVC | trunk/src/old_py/motion_expand_COPIA_SIN_LIST.py | 1 | 4326 | #!/usr/bin/python
# -*- coding: iso-8859-15 -*-
# motion_expand.py
# Descomprime los datos con el movimiento.
import os
import sys
from GOP import GOP
from subprocess import check_call
from subprocess import CalledProcessError
from MCTF_parser import MCTF_parser
#MOTION_DECODER_NAME = "gzip"
#MOTION_DECODER_NAME = "kdu_v_expand"
MCTF_MOTION_CODEC = os.environ["MCTF_MOTION_CODEC"]
block_size = 16
block_size_min = 16
GOPs = 1
pixels_in_x = 352
pixels_in_y = 288
TRLs = 5
parser = MCTF_parser(description="Expands the motion data.")
parser.block_size(block_size)
parser.block_size_min(block_size_min)
parser.GOPs(GOPs)
parser.pixels_in_x(pixels_in_x)
parser.pixels_in_y(pixels_in_y)
parser.TRLs(TRLs)
args = parser.parse_known_args()[0]
if args.block_size:
block_size = int(args.block_size)
if args.block_size_min:
block_size_min = int(args.block_size_min)
if args.GOPs:
GOPs = int(args.GOPs)
if args.pixels_in_x:
pixels_in_x = int(args.pixels_in_x)
if args.pixels_in_y:
pixels_in_y = int(args.pixels_in_y)
if args.TRLs:
TRLs = int(args.TRLs)
gop=GOP()
GOP_size = gop.get_size(TRLs)
pictures = GOPs * GOP_size + 1
if block_size < block_size_min:
block_size_min = block_size
# Cálculo del tamaño de bloque usado en el nivel de resolución
# temporal más bajo.
iterations = TRLs - 1
max_block_size = block_size
iters = TRLs - 1
fields = pictures / 2
iteration = 0
while iteration < iterations:
block_size = block_size / 2
if (block_size < block_size_min):
block_size = block_size_min
fields /= 2
iteration += 1
blocks_in_y = pixels_in_y / block_size
blocks_in_x = pixels_in_x / block_size
# Descomprimimos los campos de movimiento.
iteration = 1
fields = pictures / 2
while iteration <= iterations:
try:
check_call("mctf motion_expand_" + MCTF_MOTION_CODEC
+ " --file=" + "\"" + "motion_residue_" + str(iteration) + "\""
+ " --blocks_in_y=" + str(blocks_in_y)
+ " --blocks_in_x=" + str(blocks_in_x)
+ " --fields=" + str(fields)
+ " --pictures=" + str(pictures),
shell=True)
except CalledProcessError:
sys.exit(-1)
fields /= 2
# os.system("svc motion_expand_" + "gzip"
# + " --blocks_in_x=" + str(blocks_in_x)
# + " --blocks_in_y=" + str(blocks_in_y)
# + " --iteration=" + str(iteration)
# + " --file=" + "\"" + prefix + "_motion_residue_" + str(iteration) + "\""
# + " --pictures=" + str(pictures)
# + " --temporal_levels=" + str(temporal_levels)
# )
iteration += 1
fields = GOPs
try:
# Deshacemos la descorrelación bidireccional en el nivel de resolución
# temporal más bajo.
check_call("mctf bidirectional_motion_correlate"
+ " --blocks_in_y=" + str(blocks_in_y)
+ " --blocks_in_x=" + str(blocks_in_x)
+ " --fields=" + str(fields)
+ " --input=" + "\"" + "motion_residue_" + str(TRLs - 1) + "\""
+ " --output=" + "\"" + "motion_" + str(TRLs - 1) + "\"",
shell=True)
except CalledProcessError:
sys.exit(-1)
# Deshacemos la descorrelación interlevel.
iterations = TRLs - 1
iteration = iterations
while iteration > 1:
iteration -= 1
fields = pictures / (2**iteration)
blocks_in_y = pixels_in_y / block_size
blocks_in_x = pixels_in_x / block_size
try:
# Descorrelacionamos los campos de movimiento entre niveles de
# resolución.
check_call("mctf interlevel_motion_correlate"
+ " --blocks_in_x=" + str(blocks_in_x)
+ " --blocks_in_y=" + str(blocks_in_y)
+ " --fields_in_predicted=" + str(fields)
+ " --predicted=" + "\"" + "motion_" + str(iteration) + "\""
+ " --reference=" + "\"" + "motion_" + str(iteration+1) + "\""
+ " --residue=" + "\"" + "motion_residue_" + str(iteration) + "\"",
shell=True)
except CalledProcessError:
sys.exit(-1)
# Calculamos el tamaño de bloque usado en esta iteración temporal.
block_size = block_size/2
if (block_size<block_size_min):
block_size = block_size_min
| gpl-2.0 |
anhaidgroup/py_entitymatching | benchmarks/benchmark_sn_blocker.py | 1 | 11105 | # Write the benchmarking functions here.
# See "Writing benchmarks" in the asv docs for more information.
import os
import sys
import py_entitymatching as mg
p = mg.get_install_path()
datasets_path = os.sep.join([p, 'datasets', 'example_datasets'])
snb = mg.SortedNeighborhoodBlocker()
class TimeBlockTablesAnime:
def setup(self):
path_for_A = os.sep.join([datasets_path, 'anime', 'A.csv'])
path_for_B = os.sep.join([datasets_path, 'anime', 'B.csv'])
self.l_block_attr = 'Year'
self.r_block_attr = 'Year'
self.l_output_attrs = ['Title', 'Year', 'Episodes']
self.r_output_attrs = ['Title', 'Year', 'Episodes']
try:
self.A = mg.read_csv_metadata(path_for_A)
mg.set_key(self.A, 'ID')
self.B = mg.read_csv_metadata(path_for_B)
mg.set_key(self.B, 'ID')
except AssertionError:
print("Dataset \'anime\' not found. Please visit the project"
" website to download the dataset.")
raise SystemExit
def time_block_tables(self):
snb.block_tables(self.A, self.B, self.l_block_attr,
self.r_block_attr, self.l_output_attrs,
self.r_output_attrs)
def teardown(self):
del self.A
del self.B
del self.l_block_attr
del self.r_block_attr
del self.l_output_attrs
del self.r_output_attrs
class TimeBlockTablesBikes:
def setup(self):
p = mg.get_install_path()
path_for_A = os.sep.join([datasets_path, 'bikes', 'A.csv'])
path_for_B = os.sep.join([datasets_path, 'bikes', 'B.csv'])
try:
self.A = mg.read_csv_metadata(path_for_A)
mg.set_key(self.A, 'id')
self.B = mg.read_csv_metadata(path_for_B)
mg.set_key(self.B, 'id')
except AssertionError:
print("Dataset \'bikes\' not found. Please visit the project"
" website to download the dataset.")
raise SystemExit
self.l_block_attr = 'city_posted'
self.r_block_attr = 'city_posted'
self.l_output_attrs = ['bike_name', 'city_posted', 'km_driven', 'price',
'color', 'model_year']
self.r_output_attrs = ['bike_name', 'city_posted', 'km_driven', 'price',
'color', 'model_year']
def time_block_tables(self):
snb.block_tables(self.A, self.B, self.l_block_attr,
self.r_block_attr, self.l_output_attrs,
self.r_output_attrs)
def teardown(self):
del self.A
del self.B
del self.l_block_attr
del self.r_block_attr
del self.l_output_attrs
del self.r_output_attrs
class TimeBlockTablesBooks:
def setup(self):
path_for_A = os.sep.join([datasets_path, 'books', 'A.csv'])
path_for_B = os.sep.join([datasets_path, 'books', 'B.csv'])
self.l_block_attr = 'Author'
self.r_block_attr = 'Author'
self.l_output_attrs = ['Title', 'Author', 'ISBN13', 'Publisher',
'Publication_Date']
self.r_output_attrs = ['Title', 'Author', 'ISBN13', 'Publisher',
'Publication_Date']
try:
self.A = mg.read_csv_metadata(path_for_A)
mg.set_key(self.A, 'ID')
self.B = mg.read_csv_metadata(path_for_B)
mg.set_key(self.B, 'ID')
except AssertionError:
print("Dataset \'books\' not found. Please visit the project"
" website to download the dataset.")
raise SystemExit
def time_block_tables(self):
snb.block_tables(self.A, self.B, self.l_block_attr,
self.r_block_attr, self.l_output_attrs,
self.r_output_attrs)
def teardown(self):
del self.A
del self.B
del self.l_block_attr
del self.r_block_attr
del self.l_output_attrs
del self.r_output_attrs
class TimeBlockTablesCitations:
def setup(self):
path_for_A = os.sep.join([datasets_path, 'citations', 'A.csv'])
path_for_B = os.sep.join([datasets_path, 'citations', 'B.csv'])
self.l_block_attr = 'year'
self.r_block_attr = 'year'
self.l_output_attrs = ['title', 'author', 'year', 'ENTRYTYPE']
self.r_output_attrs = ['title', 'author', 'year', 'ENTRYTYPE']
try:
self.A = mg.read_csv_metadata(path_for_A)
mg.set_key(self.A, 'ID')
self.B = mg.read_csv_metadata(path_for_B)
mg.set_key(self.B, 'ID')
except AssertionError:
print("Dataset \'citations\' not found. Please visit the project"
" website to download the dataset.")
raise SystemExit
def time_block_tables(self):
snb.block_tables(self.A, self.B, self.l_block_attr,
self.r_block_attr, self.l_output_attrs,
self.r_output_attrs)
def teardown(self):
del self.A
del self.B
del self.l_block_attr
del self.r_block_attr
del self.l_output_attrs
del self.r_output_attrs
class TimeBlockTablesElectronics:
def setup(self):
path_for_A = os.sep.join([datasets_path, 'electronics', 'A.csv'])
path_for_B = os.sep.join([datasets_path, 'electronics', 'B.csv'])
self.l_block_attr = 'Brand'
self.r_block_attr = 'Brand'
self.l_output_attrs = ['Brand', 'Amazon_Price']
self.r_output_attrs = ['Brand', 'Price']
try:
self.A = mg.read_csv_metadata(path_for_A)
mg.set_key(self.A, 'ID')
self.B = mg.read_csv_metadata(path_for_B)
mg.set_key(self.B, 'ID')
except AssertionError:
print("Dataset \'electronics\' not found. Please visit the project"
" website to download the dataset.")
raise SystemExit
def time_block_tables(self):
snb.block_tables(self.A, self.B, self.l_block_attr,
self.r_block_attr, self.l_output_attrs,
self.r_output_attrs)
def teardown(self):
del self.A
del self.B
del self.l_block_attr
del self.r_block_attr
del self.l_output_attrs
del self.r_output_attrs
class TimeBlockTablesRestaurants:
def setup(self):
path_for_A = os.sep.join([datasets_path, 'restaurants', 'A.csv'])
path_for_B = os.sep.join([datasets_path, 'restaurants', 'B.csv'])
self.l_block_attr = 'PHONENUMBER'
self.r_block_attr = 'PHONENUMBER'
self.l_output_attrs = ['NAME', 'PHONENUMBER', 'ADDRESS']
self.r_output_attrs = ['NAME', 'PHONENUMBER', 'ADDRESS']
try:
self.A = mg.read_csv_metadata(path_for_A)
mg.set_key(self.A, 'ID')
self.B = mg.read_csv_metadata(path_for_B)
mg.set_key(self.B, 'ID')
except AssertionError:
print("Dataset \'restaurants\' not found. Please visit the project"
" website to download the dataset.")
raise SystemExit
def time_block_tables(self):
snb.block_tables(self.A, self.B, self.l_block_attr,
self.r_block_attr, self.l_output_attrs,
self.r_output_attrs)
def teardown(self):
del self.A
del self.B
del self.l_block_attr
del self.r_block_attr
del self.l_output_attrs
del self.r_output_attrs
class TimeBlockCandsetAnime:
def setup(self):
path_for_A = os.sep.join([datasets_path, 'anime', 'A.csv'])
path_for_B = os.sep.join([datasets_path, 'anime', 'B.csv'])
try:
A = mg.read_csv_metadata(path_for_A)
mg.set_key(A, 'ID')
B = mg.read_csv_metadata(path_for_B)
mg.set_key(B, 'ID')
self.C = snb.block_tables(A, B, 'Year', 'Year',
['Title', 'Year', 'Episodes'],
['Title', 'Year', 'Episodes'])
except AssertionError:
print("Dataset \'anime\' not found. Please visit the project"
" website to download the dataset.")
raise SystemExit
self.l_block_attr = 'Episodes'
self.r_block_attr = 'Episodes'
def time_block_candset(self):
snb.block_candset(self.C, self.l_block_attr, self.r_block_attr)
def teardown(self):
del self.C
del self.l_block_attr
del self.r_block_attr
class TimeBlockCandsetBikes:
timeout = 300.0
def setup(self):
path_for_A = os.sep.join([datasets_path, 'bikes', 'A.csv'])
path_for_B = os.sep.join([datasets_path, 'bikes', 'B.csv'])
try:
A = mg.read_csv_metadata(path_for_A)
mg.set_key(A, 'id')
B = mg.read_csv_metadata(path_for_B)
mg.set_key(B, 'id')
l_output_attrs = ['bike_name', 'city_posted', 'km_driven', 'price',
'color', 'model_year']
r_output_attrs = ['bike_name', 'city_posted', 'km_driven', 'price',
'color', 'model_year']
self.C = snb.block_tables(A, B, 'city_posted', 'city_posted',
l_output_attrs, r_output_attrs)
except AssertionError:
print("Dataset \'bikes\' not found. Please visit the project"
" website to download the dataset.")
raise SystemExit
self.l_block_attr = 'model_year'
self.r_block_attr = 'model_year'
def time_block_candset(self):
snb.block_candset(self.C, self.l_block_attr, self.r_block_attr)
def teardown(self):
del self.C
del self.l_block_attr
del self.r_block_attr
class TimeBlockCandsetBooks:
def setup(self):
path_for_A = os.sep.join([datasets_path, 'books', 'A.csv'])
path_for_B = os.sep.join([datasets_path, 'books', 'B.csv'])
try:
A = mg.read_csv_metadata(path_for_A)
mg.set_key(A, 'ID')
B = mg.read_csv_metadata(path_for_B)
mg.set_key(B, 'ID')
self.C = snb.block_tables(A, B, 'Author', 'Author',
['Title', 'Author', 'ISBN13', 'Publisher'],
['Title', 'Author', 'ISBN13', 'Publisher'])
except AssertionError:
print("Dataset \'books\' not found. Please visit the project"
" website to download the dataset.")
raise SystemExit
self.l_block_attr = 'ISBN13'
self.r_block_attr = 'ISBN13'
def time_block_candset(self):
snb.block_candset(self.C, self.l_block_attr, self.r_block_attr)
def teardown(self):
del self.C
del self.l_block_attr
del self.r_block_attr
| bsd-3-clause |
sencha/chromium-spacewalk | third_party/closure_compiler/processor_test.py | 33 | 3425 | #!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Test resources processing, i.e. <if> and <include> tag handling."""
import unittest
from processor import FileCache, Processor, LineNumber
class ProcessorTest(unittest.TestCase):
"""Test <include> tag processing logic."""
def __init__(self, *args, **kwargs):
unittest.TestCase.__init__(self, *args, **kwargs)
self.maxDiff = None
def setUp(self):
FileCache._cache["/debug.js"] = """
// Copyright 2002 Older Chromium Author dudes.
function debug(msg) { if (window.DEBUG) alert(msg); }
""".strip()
FileCache._cache["/global.js"] = """
// Copyright 2014 Old Chromium Author dudes.
<include src="/debug.js">
var global = 'type checking!';
""".strip()
FileCache._cache["/checked.js"] = """
// Copyright 2028 Future Chromium Author dudes.
/**
* @fileoverview Coolest app ever.
* @author Douglas Crockford ([email protected])
*/
<include src="/global.js">
debug(global);
// Here continues checked.js, a swell file.
""".strip()
self._processor = Processor("/checked.js")
def testInline(self):
self.assertMultiLineEqual("""
// Copyright 2028 Future Chromium Author dudes.
/**
* @fileoverview Coolest app ever.
* @author Douglas Crockford ([email protected])
*/
// Copyright 2014 Old Chromium Author dudes.
// Copyright 2002 Older Chromium Author dudes.
function debug(msg) { if (window.DEBUG) alert(msg); }
var global = 'type checking!';
debug(global);
// Here continues checked.js, a swell file.
""".strip(), self._processor.contents)
def assertLineNumber(self, abs_line, expected_line):
actual_line = self._processor.get_file_from_line(abs_line)
self.assertEqual(expected_line.file, actual_line.file)
self.assertEqual(expected_line.line_number, actual_line.line_number)
def testGetFileFromLine(self):
"""Verify that inlined files retain their original line info."""
self.assertLineNumber(1, LineNumber("/checked.js", 1))
self.assertLineNumber(5, LineNumber("/checked.js", 5))
self.assertLineNumber(6, LineNumber("/global.js", 1))
self.assertLineNumber(7, LineNumber("/debug.js", 1))
self.assertLineNumber(8, LineNumber("/debug.js", 2))
self.assertLineNumber(9, LineNumber("/global.js", 3))
self.assertLineNumber(10, LineNumber("/checked.js", 7))
self.assertLineNumber(11, LineNumber("/checked.js", 8))
def testIncludedFiles(self):
"""Verify that files are tracked correctly as they're inlined."""
self.assertEquals(set(["/global.js", "/debug.js"]),
self._processor.included_files)
class IfStrippingTest(unittest.TestCase):
"""Test that the contents of XML <if> blocks are stripped."""
def __init__(self, *args, **kwargs):
unittest.TestCase.__init__(self, *args, **kwargs)
self.maxDiff = None
def setUp(self):
FileCache._cache["/century.js"] = """
function getCurrentCentury() {
<if expr="netscape_os">
alert("Oh wow!");
return "XX";
</if>
return "XXI";
}
""".strip()
self.processor_ = Processor("/century.js")
def testIfStripping(self):
self.assertMultiLineEqual("""
function getCurrentCentury() {
alert("Oh wow!");
return "XX";
return "XXI";
}
""".strip(), self.processor_.contents)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
openstack-dev/devstack | roles/write-devstack-local-conf/library/devstack_local_conf.py | 3 | 12720 | # Copyright (C) 2017 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
class DependencyGraph(object):
# This is based on the JobGraph from Zuul.
def __init__(self):
self._names = set()
self._dependencies = {} # dependent_name -> set(parent_names)
def add(self, name, dependencies):
# Append the dependency information
self._dependencies.setdefault(name, set())
try:
for dependency in dependencies:
# Make sure a circular dependency is never created
ancestors = self._getParentNamesRecursively(
dependency, soft=True)
ancestors.add(dependency)
if name in ancestors:
raise Exception("Dependency cycle detected in {}".
format(name))
self._dependencies[name].add(dependency)
except Exception:
del self._dependencies[name]
raise
def getDependenciesRecursively(self, parent):
dependencies = []
current_dependencies = self._dependencies[parent]
for current in current_dependencies:
if current not in dependencies:
dependencies.append(current)
for dep in self.getDependenciesRecursively(current):
if dep not in dependencies:
dependencies.append(dep)
return dependencies
def _getParentNamesRecursively(self, dependent, soft=False):
all_parent_items = set()
items_to_iterate = set([dependent])
while len(items_to_iterate) > 0:
current_item = items_to_iterate.pop()
current_parent_items = self._dependencies.get(current_item)
if current_parent_items is None:
if soft:
current_parent_items = set()
else:
raise Exception("Dependent item {} not found: ".format(
dependent))
new_parent_items = current_parent_items - all_parent_items
items_to_iterate |= new_parent_items
all_parent_items |= new_parent_items
return all_parent_items
class VarGraph(DependencyGraph):
def __init__(self, vars):
super(VarGraph, self).__init__()
self.vars = {}
self._varnames = set()
for k, v in vars.items():
self._varnames.add(k)
for k, v in vars.items():
self._addVar(k, str(v))
bash_var_re = re.compile(r'\$\{?(\w+)')
def getDependencies(self, value):
return self.bash_var_re.findall(value)
def _addVar(self, key, value):
if key in self.vars:
raise Exception("Variable {} already added".format(key))
self.vars[key] = value
# Append the dependency information
dependencies = set()
for dependency in self.getDependencies(value):
if dependency == key:
# A variable is allowed to reference itself; no
# dependency link needed in that case.
continue
if dependency not in self._varnames:
# It's not necessary to create a link for an
# external variable.
continue
dependencies.add(dependency)
try:
self.add(key, dependencies)
except Exception:
del self.vars[key]
raise
def getVars(self):
ret = []
keys = sorted(self.vars.keys())
seen = set()
for key in keys:
dependencies = self.getDependenciesRecursively(key)
for var in dependencies + [key]:
if var not in seen:
ret.append((var, self.vars[var]))
seen.add(var)
return ret
class PluginGraph(DependencyGraph):
def __init__(self, base_dir, plugins):
super(PluginGraph, self).__init__()
# The dependency trees expressed by all the plugins we found
# (which may be more than those the job is using).
self._plugin_dependencies = {}
self.loadPluginNames(base_dir)
self.plugins = {}
self._pluginnames = set()
for k, v in plugins.items():
self._pluginnames.add(k)
for k, v in plugins.items():
self._addPlugin(k, str(v))
def loadPluginNames(self, base_dir):
if base_dir is None:
return
git_roots = []
for root, dirs, files in os.walk(base_dir):
if '.git' not in dirs:
continue
# Don't go deeper than git roots
dirs[:] = []
git_roots.append(root)
for root in git_roots:
devstack = os.path.join(root, 'devstack')
if not (os.path.exists(devstack) and os.path.isdir(devstack)):
continue
settings = os.path.join(devstack, 'settings')
if not (os.path.exists(settings) and os.path.isfile(settings)):
continue
self.loadDevstackPluginInfo(settings)
define_re = re.compile(r'^define_plugin\s+(\S+).*')
require_re = re.compile(r'^plugin_requires\s+(\S+)\s+(\S+).*')
def loadDevstackPluginInfo(self, fn):
name = None
reqs = set()
with open(fn) as f:
for line in f:
m = self.define_re.match(line)
if m:
name = m.group(1)
m = self.require_re.match(line)
if m:
if name == m.group(1):
reqs.add(m.group(2))
if name and reqs:
self._plugin_dependencies[name] = reqs
def getDependencies(self, value):
return self._plugin_dependencies.get(value, [])
def _addPlugin(self, key, value):
if key in self.plugins:
raise Exception("Plugin {} already added".format(key))
self.plugins[key] = value
# Append the dependency information
dependencies = set()
for dependency in self.getDependencies(key):
if dependency == key:
continue
dependencies.add(dependency)
try:
self.add(key, dependencies)
except Exception:
del self.plugins[key]
raise
def getPlugins(self):
ret = []
keys = sorted(self.plugins.keys())
seen = set()
for key in keys:
dependencies = self.getDependenciesRecursively(key)
for plugin in dependencies + [key]:
if plugin not in seen:
ret.append((plugin, self.plugins[plugin]))
seen.add(plugin)
return ret
class LocalConf(object):
def __init__(self, localrc, localconf, base_services, services, plugins,
base_dir, projects, project, tempest_plugins):
self.localrc = []
self.warnings = []
self.meta_sections = {}
self.plugin_deps = {}
self.base_dir = base_dir
self.projects = projects
self.project = project
self.tempest_plugins = tempest_plugins
if services or base_services:
self.handle_services(base_services, services or {})
self.handle_localrc(localrc)
# Plugins must be the last items in localrc, otherwise
# the configuration lines which follows them in the file are
# not applied to the plugins (for example, the value of DEST.)
if plugins:
self.handle_plugins(plugins)
if localconf:
self.handle_localconf(localconf)
def handle_plugins(self, plugins):
pg = PluginGraph(self.base_dir, plugins)
for k, v in pg.getPlugins():
if v:
self.localrc.append('enable_plugin {} {}'.format(k, v))
def handle_services(self, base_services, services):
enable_base_services = services.pop('base', True)
if enable_base_services and base_services:
self.localrc.append('ENABLED_SERVICES={}'.format(
",".join(base_services)))
else:
self.localrc.append('disable_all_services')
for k, v in services.items():
if v is False:
self.localrc.append('disable_service {}'.format(k))
elif v is True:
self.localrc.append('enable_service {}'.format(k))
def handle_localrc(self, localrc):
lfg = False
tp = False
if localrc:
vg = VarGraph(localrc)
for k, v in vg.getVars():
# Avoid double quoting
if len(v) and v[0]=='"':
self.localrc.append('{}={}'.format(k, v))
else:
self.localrc.append('{}="{}"'.format(k, v))
if k == 'LIBS_FROM_GIT':
lfg = True
elif k == 'TEMPEST_PLUGINS':
tp = True
if not lfg and (self.projects or self.project):
required_projects = []
if self.projects:
for project_name, project_info in self.projects.items():
if project_info.get('required'):
required_projects.append(project_info['short_name'])
if self.project:
if self.project['short_name'] not in required_projects:
required_projects.append(self.project['short_name'])
if required_projects:
self.localrc.append('LIBS_FROM_GIT={}'.format(
','.join(required_projects)))
if self.tempest_plugins:
if not tp:
tp_dirs = []
for tempest_plugin in self.tempest_plugins:
tp_dirs.append(os.path.join(self.base_dir, tempest_plugin))
self.localrc.append('TEMPEST_PLUGINS="{}"'.format(
' '.join(tp_dirs)))
else:
self.warnings.append('TEMPEST_PLUGINS already defined ({}),'
'requested value {} ignored'.format(
tp, self.tempest_plugins))
def handle_localconf(self, localconf):
for phase, phase_data in localconf.items():
for fn, fn_data in phase_data.items():
ms_name = '[[{}|{}]]'.format(phase, fn)
ms_data = []
for section, section_data in fn_data.items():
ms_data.append('[{}]'.format(section))
for k, v in section_data.items():
ms_data.append('{} = {}'.format(k, v))
ms_data.append('')
self.meta_sections[ms_name] = ms_data
def write(self, path):
with open(path, 'w') as f:
f.write('[[local|localrc]]\n')
f.write('\n'.join(self.localrc))
f.write('\n\n')
for section, lines in self.meta_sections.items():
f.write('{}\n'.format(section))
f.write('\n'.join(lines))
def main():
module = AnsibleModule(
argument_spec=dict(
plugins=dict(type='dict'),
base_services=dict(type='list'),
services=dict(type='dict'),
localrc=dict(type='dict'),
local_conf=dict(type='dict'),
base_dir=dict(type='path'),
path=dict(type='str'),
projects=dict(type='dict'),
project=dict(type='dict'),
tempest_plugins=dict(type='list'),
)
)
p = module.params
lc = LocalConf(p.get('localrc'),
p.get('local_conf'),
p.get('base_services'),
p.get('services'),
p.get('plugins'),
p.get('base_dir'),
p.get('projects'),
p.get('project'),
p.get('tempest_plugins'))
lc.write(p['path'])
module.exit_json(warnings=lc.warnings)
try:
from ansible.module_utils.basic import * # noqa
from ansible.module_utils.basic import AnsibleModule
except ImportError:
pass
if __name__ == '__main__':
main()
| apache-2.0 |
pgielda/linux-renesas | scripts/tracing/draw_functrace.py | 14676 | 3560 | #!/usr/bin/python
"""
Copyright 2008 (c) Frederic Weisbecker <[email protected]>
Licensed under the terms of the GNU GPL License version 2
This script parses a trace provided by the function tracer in
kernel/trace/trace_functions.c
The resulted trace is processed into a tree to produce a more human
view of the call stack by drawing textual but hierarchical tree of
calls. Only the functions's names and the the call time are provided.
Usage:
Be sure that you have CONFIG_FUNCTION_TRACER
# mount -t debugfs nodev /sys/kernel/debug
# echo function > /sys/kernel/debug/tracing/current_tracer
$ cat /sys/kernel/debug/tracing/trace_pipe > ~/raw_trace_func
Wait some times but not too much, the script is a bit slow.
Break the pipe (Ctrl + Z)
$ scripts/draw_functrace.py < raw_trace_func > draw_functrace
Then you have your drawn trace in draw_functrace
"""
import sys, re
class CallTree:
""" This class provides a tree representation of the functions
call stack. If a function has no parent in the kernel (interrupt,
syscall, kernel thread...) then it is attached to a virtual parent
called ROOT.
"""
ROOT = None
def __init__(self, func, time = None, parent = None):
self._func = func
self._time = time
if parent is None:
self._parent = CallTree.ROOT
else:
self._parent = parent
self._children = []
def calls(self, func, calltime):
""" If a function calls another one, call this method to insert it
into the tree at the appropriate place.
@return: A reference to the newly created child node.
"""
child = CallTree(func, calltime, self)
self._children.append(child)
return child
def getParent(self, func):
""" Retrieve the last parent of the current node that
has the name given by func. If this function is not
on a parent, then create it as new child of root
@return: A reference to the parent.
"""
tree = self
while tree != CallTree.ROOT and tree._func != func:
tree = tree._parent
if tree == CallTree.ROOT:
child = CallTree.ROOT.calls(func, None)
return child
return tree
def __repr__(self):
return self.__toString("", True)
def __toString(self, branch, lastChild):
if self._time is not None:
s = "%s----%s (%s)\n" % (branch, self._func, self._time)
else:
s = "%s----%s\n" % (branch, self._func)
i = 0
if lastChild:
branch = branch[:-1] + " "
while i < len(self._children):
if i != len(self._children) - 1:
s += "%s" % self._children[i].__toString(branch +\
" |", False)
else:
s += "%s" % self._children[i].__toString(branch +\
" |", True)
i += 1
return s
class BrokenLineException(Exception):
"""If the last line is not complete because of the pipe breakage,
we want to stop the processing and ignore this line.
"""
pass
class CommentLineException(Exception):
""" If the line is a comment (as in the beginning of the trace file),
just ignore it.
"""
pass
def parseLine(line):
line = line.strip()
if line.startswith("#"):
raise CommentLineException
m = re.match("[^]]+?\\] +([0-9.]+): (\\w+) <-(\\w+)", line)
if m is None:
raise BrokenLineException
return (m.group(1), m.group(2), m.group(3))
def main():
CallTree.ROOT = CallTree("Root (Nowhere)", None, None)
tree = CallTree.ROOT
for line in sys.stdin:
try:
calltime, callee, caller = parseLine(line)
except BrokenLineException:
break
except CommentLineException:
continue
tree = tree.getParent(caller)
tree = tree.calls(callee, calltime)
print CallTree.ROOT
if __name__ == "__main__":
main()
| gpl-2.0 |
f3rdy/pybox | walkdict/walkdict.py | 1 | 1212 | __author__ = 'fthiele'
data = {
'level 1-1':
{
'level 1-1-1':
{
'a': 1,
'b': 2,
'c': 3,
},
'level 1-1-2':
{
'd': 4,
'e': 5,
'f': 6,
},
},
'level 1-2':
{
'level 1-2-1':
{
'g': 7,
'h': 8,
'i': 9,
},
'level 1-2-2':
{
'j': 10,
'k': 11,
'l': 12,
'm': [
13, 14, 15, 16
]
}
}
}
def walkdict(data):
for k, v in data.items():
if isinstance(v, dict):
walkdict(v)
elif isinstance(v, list):
print("{0}: ".format(k), end='')
for item in v:
print(" {0}".format(item), end='')
print(" ")
else:
print("{0} : {1}".format(k, v))
def main():
walkdict(data)
if __name__ == "__main__":
main()
| apache-2.0 |