text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
from .resource import Resource
"""
contentful.locale
~~~~~~~~~~~~~~~~~
This module implements the Locale class.
API Reference: https://www.contentful.com/developers/docs/references/content-delivery-api/#/reference/localization
:copyright: (c) 2016 by Contentful GmbH.
:license: MIT, see LICENSE for more details.
"""
class Locale(Resource):
"""
API Reference: https://www.contentful.com/developers/docs/references/content-delivery-api/#/reference/localization
"""
def __init__(self, item, **kwargs):
super(Locale, self).__init__(item, **kwargs)
self.code = item.get('code', '')
self.name = item.get('name', '')
self.fallback_code = item.get('fallbackCode', '')
self.default = item.get('default', False)
self.optional = item.get('optional', False)
def __repr__(self):
return "<Locale[{0}] code='{1}' default={2} fallback_code={3} optional={4}>".format(
self.name,
self.code,
self.default,
"'{0}'".format(
self.fallback_code
) if self.fallback_code is not None else 'None',
self.optional
)
| contentful/contentful.py | contentful/locale.py | Python | mit | 1,165 | 0.002575 |
# Copyright 2001 by Tarjei Mikkelsen. All rights reserved.
# Revisions copyright 2007 by Michiel de Hoon. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Tests the basic functionality of the KEGG parsers."""
from __future__ import print_function
import os
from Bio.KEGG import Enzyme
from Bio.KEGG import Compound
from Bio.KEGG import Map
from Bio.Pathway import System
# TODO - use unittest instead of print-and-compare testing
test_KEGG_Enzyme_files = ["enzyme.sample", "enzyme.irregular", "enzyme.new"]
test_KEGG_Compound_files = ["compound.sample", "compound.irregular"]
test_KEGG_Map_files = ["map00950.rea"]
def t_KEGG_Enzyme(testfiles):
"""Tests Bio.KEGG.Enzyme functionality."""
for file in testfiles:
fh = open(os.path.join("KEGG", file))
print("Testing Bio.KEGG.Enzyme on " + file + "\n\n")
records = Enzyme.parse(fh)
for record in records:
print(record)
print("\n")
fh.close()
def t_KEGG_Compound(testfiles):
"""Tests Bio.KEGG.Compound functionality."""
for file in testfiles:
fh = open(os.path.join("KEGG", file))
print("Testing Bio.KEGG.Compound on " + file + "\n\n")
records = Compound.parse(fh)
for record in records:
print(record)
print("\n")
fh.close()
def t_KEGG_Map(testfiles):
"""Tests Bio.KEGG.Map functionality."""
for file in testfiles:
fh = open(os.path.join("KEGG", file))
print("Testing Bio.KEGG.Map on " + file + "\n\n")
reactions = Map.parse(fh)
system = System()
for reaction in reactions:
system.add_reaction(reaction)
# sort the reaction output by the string names, so that the
# output will be consistent between python versions
# def str_cmp(first, second):
# return cmp(str(first), str(second))
rxs = system.reactions()
# sort: key instead of compare function (for py3 support)
# The function str_cmp above can be removed if the
# solution below proves resilient
rxs.sort(key=lambda x: str(x))
for x in rxs:
print(str(x))
fh.close()
t_KEGG_Enzyme(test_KEGG_Enzyme_files)
t_KEGG_Compound(test_KEGG_Compound_files)
t_KEGG_Map(test_KEGG_Map_files)
| updownlife/multipleK | dependencies/biopython-1.65/Tests/test_KEGG.py | Python | gpl-2.0 | 2,438 | 0 |
#!/usr/bin/env python
"""
Created Wed Oct 7 15:04:36 CEST 2015
@author: sapfo
"""
import matplotlib
#matplotlib.use('Agg')
import simul_ms
import python_cmdscale
#import python_pca
import exp
import sys
import numpy as np
import pylab as py
from scipy.stats import norm
'''
We want to pick n1, n2, D, T?
Simulate data
Compute the distance matrix
MDS the distance matrix
Get coordinates
Get eigenvalues, eigenvectors
Plot comparing with the other eigenvalues
'''
#################### FIXED #############
n = 30
n1 = 5
n2 = 5
n3 = 5
D = 0.4
D1 = 0.1 #(D1<D)
nreps = 1000
## simulate data
rescaling = 2.0
verbose = False
########### 1 population ##############
print "########### 1 population ##############"
## expected tree length for one population
exp_tree_length = 0
for i in range(2,n+1):
exp_tree_length += 2./(i-1)
nsnps = [100]
T_mds = {}
T_pca = {}
Eigenvalues_mds = []
Distances_noise = []
Expected_Delta = np.zeros((n,n))
for kk in range(1,n):
Expected_Delta += np.eye(n,k=kk)
Expected_Delta += np.eye(n,k=-kk)
Expected_Delta *= 2./exp_tree_length
print Expected_Delta
for nsnp in nsnps:
T_mds[nsnp] = []
T_pca[nsnp] = []
for iteration in range(nreps):
params,data,tree_lengths = simul_ms.ms_one_pops(n=n,nreps=nsnp,verbose=0)
Delta = simul_ms.distance_matrix(data=data,verbose=0)
if verbose: print "Delta: ",Delta
Diff = Delta - Expected_Delta
if verbose: print "Diff: ",Diff
Distances_noise += list(Diff.flatten())
#Expected_Delta = zeros
evals_mds, evecs_mds, Y_mds = python_cmdscale.cmdscale(Delta)
Eigenvalues_mds += list(evals_mds[:-1])
#evals_pca, evecs_pca, Y_pca = python_pca.PCA(data.T)
#print "params: ",params
if verbose: print "average tree length (computed with ms): ",rescaling*np.average(tree_lengths)
if verbose: print "expected tree length (analytical coal): ",exp_tree_length
# mds expected total tree length, bias, rmse
t_mds = (2./(np.average(evals_mds[:-1])))**(1/2.)
T_mds[nsnp].append(t_mds)
if verbose: print "expected T (mds) from eigenvalues: ",T_mds
# pca expected tree length, bias, rmse
#t_pca = 1./np.average(evals_pca[:-1])
#T_pca[nsnp].append(t_pca)
#if verbose: print "expected T (pca) from eigenvalues: ",T_pca
print "expected lambda1 (mds) for (Ivan analytical): ",2./((exp_tree_length)**2)
#print "expected lambda1 (pca) for (Ivan analytical): ",1./((exp_tree_length))
#print "observed lambda1 (mds procedure): ",evals_mds[0]
#print "observed lambda1 (pca procedure): ",evals_pca[0]
#print "observed average lambdas (mds): ",np.average(evals_mds[:-1])
#print "observed average lambdas (pca): ",np.average(evals_pca[:-1])
#print "evals (first 10): ",evals_mds[:10]
mu1,std1 = norm.fit(Eigenvalues_mds)
mu2,std2 = norm.fit(Distances_noise)
fig = py.figure()
py.suptitle("1 population, %s snps, %s rep"%(nsnp,nreps))
ax1 = fig.add_subplot(2,1,1)
py.title("Eigenvalues")
py.hist(Eigenvalues_mds,normed=True,alpha=0.5)
py.vlines(2./((exp_tree_length)**2),0,10,color='red')
xmin,xmax=py.xlim()
x = np.linspace(xmin,xmax,100)
p = norm.pdf(x,mu1,std1)
py.plot(x,p,'k',linewidth=2)
ax1 = fig.add_subplot(2,1,2)
py.title("Distances")
py.hist(Distances_noise,normed=True,alpha=0.5)
xmin,xmax=py.xlim()
x = np.linspace(xmin,xmax,100)
p = norm.pdf(x,mu2,std2)
py.plot(x,p,'k',linewidth=2)
#py.savefig("Eigenvalues_mds_1pop.pdf")
py.show()
sys.exit()
### plotting one population ###
py.plot(Y[:,0],(Y[:,1]),'o',color='blue')
py.title("simulations 1 population n = %s, nreps = %s "%(n,nreps))
py.xlabel("dim 1 (%.2f %%)"%(1.*evals[0]/np.average(evals[:-1])))
py.ylabel("dim 2 (%.2f %%)"%(1.*evals[1]/np.average(evals[:-1])))
########### 2 populations ##############
print "########### 2 populations ##############"
#ms simul
params_2pops,data_2pops,tree_lengths_2pops = simul_ms.ms_two_pops(n1=n1, n2=n2, D=1./rescaling*D,nreps=nreps,verbose=0)
avg_tree_length_2pops = rescaling*np.average(tree_lengths_2pops)
Delta_2pops = simul_ms.distance_matrix(data=data_2pops,verbose=0)
#cmdscale
evals_2pops, evecs_2pops, Y_2pops = python_cmdscale.cmdscale(Delta_2pops)
exp.T_D_two_pops(eigenvalues = evals_2pops,n1=n1,n2=n2,diploid=2)
# analytical
params_exp_2pops,evals_exp_2pops, evec_exp_2pops = exp.two_pops(n1=n1, n2=n2, D=D, T=avg_tree_length_2pops)
print "params_2pops (ms): ",params_2pops
print "params_exp_2pops: ",params_exp_2pops
print "average tree length (ms): ",rescaling*np.average(tree_lengths_2pops)
#print "expected tree length (coal): ",exp_tree_length
print "expected lambda1 (analytical): ",evals_exp_2pops[0]
print "observed lambda1 (cmdscale): ",evals_2pops[0]
print "expected lambda2 (analytical): ",evals_exp_2pops[1]
print "observed lambda2 (cmdscale): ",evals_2pops[1]
print "average observed lambda2...n-1 (cmdscale): ",np.average(evals_2pops[1:-1])
print evals_exp_2pops[:10]
print evals_2pops[:10]
#print "observed lambda1 (mds): ",evals[0]
#print "observed average lambdas (mds): ",np.average(evals[:-1])
### plotting two population ###
py.figure()
py.plot(Y_2pops[:,0][:n1],Y_2pops[:,1][:n1],'x',color='orange')
py.plot(Y_2pops[:,0][n1:],Y_2pops[:,1][n1:],'o',color='blue')
py.title("simulations 2 pops n1 = %s, n2 = %s, D = %s, nreps = %s "%(n1,n2,D,nreps))
py.xlabel("dim 1")
py.ylabel("dim 2")
#py.xlabel("dim 1 (%.2f %%)"%(1.*evals[0]/np.average(evals[:-1])))
#py.ylabel("dim 2 (%.2f %%)"%(1.*evals[1]/np.average(evals[:-1])))
#py.show()
########### 3 populations ##############
print "########### 3 populations ##############"
nreps = 100
#ms simul
params_3pops,data_3pops,tree_lengths_3pops = simul_ms.ms_three_pops(n1=n1, n2=n2, n3=n3, D=1./rescaling*D, D1 = 1./rescaling*D1,nreps=nreps,verbose=0)
avg_tree_length_3pops = rescaling*np.average(tree_lengths_3pops)
Delta_3pops = simul_ms.distance_matrix(data=data_3pops,verbose=0)
#cmdscale
evals_3pops, evecs_3pops, Y_3pops = python_cmdscale.cmdscale(Delta_3pops)
try:
Texp,Dexp,D1exp,Drescaledexp,D1rescaledexp = exp.T_D_D1_three_pops(eigenvalues = evals_3pops,n1=n1,n2=n2,n3=n3,diploid=2)
except:
Texp,Dexp,D1exp,Drescaledexp,D1rescaledexp= 1,1,1,1,1
print "average tree length (ms): ",rescaling*np.average(tree_lengths_3pops)
print "params_3pops (ms): ",params_3pops
# analytical
params_exp_3pops,evals_exp_3pops, evec_exp_3pops = exp.three_pops(n1=n1, n2=n2, n3=n3, D=D, D1=D1, T=avg_tree_length_3pops)
print "params_3pops (ms): ",params_3pops
print "params_exp_3pops: ",params_exp_3pops
print "average tree length (ms): ",rescaling*np.average(tree_lengths_3pops)
#print "expected tree length (coal): ",exp_tree_length
print "expected lambda1 (analytical): ",evals_exp_3pops[0]
print "observed lambda1 (cmdscale): ",evals_3pops[0]
print ""
print "expected lambda2 (analytical): ",evals_exp_3pops[1]
print "observed lambda2 (cmdscale): ",evals_3pops[1]
print ""
print "expected lambda3 (analytical): ",evals_exp_3pops[2]
print "observed lambda3 (cmdscale): ",evals_3pops[2]
print "average observed lambda3...n-1 (cmdscale): ",np.average(evals_3pops[2:-1])
print evals_exp_3pops[:10]
print evals_3pops[:10]
sys.exit()
### plotting three population ###
py.figure()
py.plot(Y_3pops[:,0][:n1],Y_3pops[:,1][:n1],'D',color='orange')
py.plot(Y_3pops[:,0][n1:n1+n2],Y_3pops[:,1][n1:n1+n2],'o',color='blue')
py.plot(Y_3pops[:,0][n1+n2:],Y_3pops[:,1][n1+n2:],'v',color='green')
py.title("simulations 3 pops n1 = %(n1)s, n2 = %(n2)s, n3 = %(n3)s, D = %(D)s, D1 = %(D1)s, nreps = %(nreps)s "%params_3pops)
py.xlabel("dim 1")
py.ylabel("dim 2")
py.show()
########### 4 populations and above ##############
| sapfo/medeas | src/old_scripts/main_simul_eigenvalues_distribution.py | Python | gpl-3.0 | 7,741 | 0.027774 |
# -*- coding: utf-8 -*-
# pylint: disable=C0302,fixme, protected-access
""" The core module contains the SoCo class that implements
the main entry to the SoCo functionality
"""
from __future__ import unicode_literals
import socket
import logging
import re
import requests
from .services import DeviceProperties, ContentDirectory
from .services import RenderingControl, AVTransport, ZoneGroupTopology
from .services import AlarmClock
from .groups import ZoneGroup
from .exceptions import DIDLMetadataError, SoCoUPnPException
from .data_structures import DidlPlaylistContainer,\
SearchResult, Queue, DidlObject, DidlMusicAlbum,\
from_didl_string, to_didl_string, DidlResource
from .utils import really_utf8, camel_to_underscore, really_unicode,\
url_escape_path
from .xml import XML
from soco import config
_LOG = logging.getLogger(__name__)
class _ArgsSingleton(type):
""" A metaclass which permits only a single instance of each derived class
sharing the same `_class_group` class attribute to exist for any given set
of positional arguments.
Attempts to instantiate a second instance of a derived class, or another
class with the same `_class_group`, with the same args will return the
existing instance.
For example:
>>> class ArgsSingletonBase(object):
... __metaclass__ = _ArgsSingleton
...
>>> class First(ArgsSingletonBase):
... _class_group = "greeting"
... def __init__(self, param):
... pass
...
>>> class Second(ArgsSingletonBase):
... _class_group = "greeting"
... def __init__(self, param):
... pass
>>> assert First('hi') is First('hi')
>>> assert First('hi') is First('bye')
AssertionError
>>> assert First('hi') is Second('hi')
"""
_instances = {}
def __call__(cls, *args, **kwargs):
key = cls._class_group if hasattr(cls, '_class_group') else cls
if key not in cls._instances:
cls._instances[key] = {}
if args not in cls._instances[key]:
cls._instances[key][args] = super(_ArgsSingleton, cls).__call__(
*args, **kwargs)
return cls._instances[key][args]
class _SocoSingletonBase( # pylint: disable=too-few-public-methods,no-init
_ArgsSingleton(str('ArgsSingletonMeta'), (object,), {})):
""" The base class for the SoCo class.
Uses a Python 2 and 3 compatible method of declaring a metaclass. See, eg,
here: http://www.artima.com/weblogs/viewpost.jsp?thread=236234 and
here: http://mikewatkins.ca/2008/11/29/python-2-and-3-metaclasses/
"""
pass
# pylint: disable=R0904,too-many-instance-attributes
class SoCo(_SocoSingletonBase):
"""A simple class for controlling a Sonos speaker.
For any given set of arguments to __init__, only one instance of this class
may be created. Subsequent attempts to create an instance with the same
arguments will return the previously created instance. This means that all
SoCo instances created with the same ip address are in fact the *same* SoCo
instance, reflecting the real world position.
Public functions::
play -- Plays the current item.
play_uri -- Plays a track or a music stream by URI.
play_from_queue -- Plays an item in the queue.
pause -- Pause the currently playing track.
stop -- Stop the currently playing track.
seek -- Move the currently playing track a given elapsed time.
next -- Go to the next track.
previous -- Go back to the previous track.
switch_to_line_in -- Switch the speaker's input to line-in.
switch_to_tv -- Switch the speaker's input to TV.
get_current_track_info -- Get information about the currently playing
track.
get_speaker_info -- Get information about the Sonos speaker.
partymode -- Put all the speakers in the network in the same group.
join -- Join this speaker to another "master" speaker.
unjoin -- Remove this speaker from a group.
get_queue -- Get information about the queue.
get_artists -- Get artists from the music library
get_album_artists -- Get album artists from the music library
get_albums -- Get albums from the music library
get_genres -- Get genres from the music library
get_composers -- Get composers from the music library
get_tracks -- Get tracks from the music library
get_playlists -- Get playlists from the music library
get_music_library_information -- Get information from the music library
get_current_transport_info -- get speakers playing state
browse_by_idstring -- Browse (get sub-elements) a given type
add_uri_to_queue -- Adds an URI to the queue
add_to_queue -- Add a track to the end of the queue
remove_from_queue -- Remove a track from the queue
clear_queue -- Remove all tracks from queue
get_favorite_radio_shows -- Get favorite radio shows from Sonos'
Radio app.
get_favorite_radio_stations -- Get favorite radio stations.
create_sonos_playlist -- Create a new empty Sonos playlist
create_sonos_playlist_from_queue -- Create a new Sonos playlist
from the current queue.
add_item_to_sonos_playlist -- Adds a queueable item to a Sonos'
playlist
get_item_album_art_uri -- Get an item's Album Art absolute URI.
search_track -- Search for an artist, artist's albums, or track.
get_albums_for_artist -- Get albums for an artist.
get_tracks_for_album -- Get tracks for an artist's album.
start_library_update -- Trigger an update of the music library.
Properties::
uid -- The speaker's unique identifier
mute -- The speaker's mute status.
volume -- The speaker's volume.
bass -- The speaker's bass EQ.
treble -- The speaker's treble EQ.
loudness -- The status of the speaker's loudness compensation.
cross_fade -- The status of the speaker's crossfade.
status_light -- The state of the Sonos status light.
player_name -- The speaker's name.
play_mode -- The queue's repeat/shuffle settings.
queue_size -- Get size of queue.
library_updating -- Whether music library update is in progress.
album_artist_display_option -- album artist display option
.. warning::
These properties are not cached and will obtain information over the
network, so may take longer than expected to set or return a value. It
may be a good idea for you to cache the value in your own code.
"""
_class_group = 'SoCo'
# Key words used when performing searches
SEARCH_TRANSLATION = {'artists': 'A:ARTIST',
'album_artists': 'A:ALBUMARTIST',
'albums': 'A:ALBUM',
'genres': 'A:GENRE',
'composers': 'A:COMPOSER',
'tracks': 'A:TRACKS',
'playlists': 'A:PLAYLISTS',
'share': 'S:',
'sonos_playlists': 'SQ:',
'categories': 'A:'}
# pylint: disable=super-on-old-class
def __init__(self, ip_address):
# Note: Creation of a SoCo instance should be as cheap and quick as
# possible. Do not make any network calls here
super(SoCo, self).__init__()
# Check if ip_address is a valid IPv4 representation.
# Sonos does not (yet) support IPv6
try:
socket.inet_aton(ip_address)
except socket.error:
raise ValueError("Not a valid IP address string")
#: The speaker's ip address
self.ip_address = ip_address
self.speaker_info = {} # Stores information about the current speaker
# The services which we use
# pylint: disable=invalid-name
self.avTransport = AVTransport(self)
self.contentDirectory = ContentDirectory(self)
self.deviceProperties = DeviceProperties(self)
self.renderingControl = RenderingControl(self)
self.zoneGroupTopology = ZoneGroupTopology(self)
self.alarmClock = AlarmClock(self)
# Some private attributes
self._all_zones = set()
self._groups = set()
self._is_bridge = None
self._is_coordinator = False
self._player_name = None
self._uid = None
self._visible_zones = set()
self._zgs_cache = None
_LOG.debug("Created SoCo instance for ip: %s", ip_address)
def __str__(self):
return "<{0} object at ip {1}>".format(
self.__class__.__name__, self.ip_address)
def __repr__(self):
return '{0}("{1}")'.format(self.__class__.__name__, self.ip_address)
@property
def player_name(self):
""" The speaker's name. A string. """
# We could get the name like this:
# result = self.deviceProperties.GetZoneAttributes()
# return result["CurrentZoneName"]
# but it is probably quicker to get it from the group topology
# and take advantage of any caching
self._parse_zone_group_state()
return self._player_name
@player_name.setter
def player_name(self, playername):
""" Set the speaker's name """
self.deviceProperties.SetZoneAttributes([
('DesiredZoneName', playername),
('DesiredIcon', ''),
('DesiredConfiguration', '')
])
@property
def uid(self):
""" A unique identifier. Looks like: RINCON_000XXXXXXXXXX1400 """
# Since this does not change over time (?) check whether we already
# know the answer. If so, there is no need to go further
if self._uid is not None:
return self._uid
# if not, we have to get it from the zone topology, which
# is probably quicker than any alternative, since the zgt is probably
# cached. This will set self._uid for us for next time, so we won't
# have to do this again
self._parse_zone_group_state()
return self._uid
# An alternative way of getting the uid is as follows:
# self.device_description_url = \
# 'http://{0}:1400/xml/device_description.xml'.format(
# self.ip_address)
# response = requests.get(self.device_description_url).text
# tree = XML.fromstring(response.encode('utf-8'))
# udn = tree.findtext('.//{urn:schemas-upnp-org:device-1-0}UDN')
# # the udn has a "uuid:" prefix before the uid, so we need to strip it
# self._uid = uid = udn[5:]
# return uid
@property
def is_visible(self):
""" Is this zone visible? A zone might be invisible if, for example it
is a bridge, or the slave part of stereo pair.
return True or False
"""
# We could do this:
# invisible = self.deviceProperties.GetInvisible()['CurrentInvisible']
# but it is better to do it in the following way, which uses the
# zone group topology, to capitalise on any caching.
return self in self.visible_zones
@property
def is_bridge(self):
""" Is this zone a bridge? """
# Since this does not change over time (?) check whether we already
# know the answer. If so, there is no need to go further
if self._is_bridge is not None:
return self._is_bridge
# if not, we have to get it from the zone topology. This will set
# self._is_bridge for us for next time, so we won't have to do this
# again
self._parse_zone_group_state()
return self._is_bridge
@property
def is_coordinator(self):
""" Return True if this zone is a group coordinator, otherwise False.
return True or False
"""
# We could do this:
# invisible = self.deviceProperties.GetInvisible()['CurrentInvisible']
# but it is better to do it in the following way, which uses the
# zone group topology, to capitalise on any caching.
self._parse_zone_group_state()
return self._is_coordinator
@property
def play_mode(self):
""" The queue's play mode. Case-insensitive options are:
NORMAL -- Turns off shuffle and repeat.
REPEAT_ALL -- Turns on repeat and turns off shuffle.
SHUFFLE -- Turns on shuffle *and* repeat. (It's strange, I know.)
SHUFFLE_NOREPEAT -- Turns on shuffle and turns off repeat.
"""
result = self.avTransport.GetTransportSettings([
('InstanceID', 0),
])
return result['PlayMode']
@play_mode.setter
def play_mode(self, playmode):
""" Set the speaker's mode """
playmode = playmode.upper()
if playmode not in PLAY_MODES:
raise KeyError("'%s' is not a valid play mode" % playmode)
self.avTransport.SetPlayMode([
('InstanceID', 0),
('NewPlayMode', playmode)
])
@property
def cross_fade(self):
""" The speaker's cross fade state.
True if enabled, False otherwise """
response = self.avTransport.GetCrossfadeMode([
('InstanceID', 0),
])
cross_fade_state = response['CrossfadeMode']
return True if int(cross_fade_state) else False
@cross_fade.setter
def cross_fade(self, crossfade):
""" Set the speaker's cross fade state. """
crossfade_value = '1' if crossfade else '0'
self.avTransport.SetCrossfadeMode([
('InstanceID', 0),
('CrossfadeMode', crossfade_value)
])
def play_from_queue(self, index, start=True):
""" Play a track from the queue by index. The index number is
required as an argument, where the first index is 0.
index: the index of the track to play; first item in the queue is 0
start: If the item that has been set should start playing
Returns:
True if the Sonos speaker successfully started playing the track.
False if the track did not start (this may be because it was not
requested to start because "start=False")
Raises SoCoException (or a subclass) upon errors.
"""
# Grab the speaker's information if we haven't already since we'll need
# it in the next step.
if not self.speaker_info:
self.get_speaker_info()
# first, set the queue itself as the source URI
uri = 'x-rincon-queue:{0}#0'.format(self.uid)
self.avTransport.SetAVTransportURI([
('InstanceID', 0),
('CurrentURI', uri),
('CurrentURIMetaData', '')
])
# second, set the track number with a seek command
self.avTransport.Seek([
('InstanceID', 0),
('Unit', 'TRACK_NR'),
('Target', index + 1)
])
# finally, just play what's set if needed
if start:
return self.play()
return False
def play(self):
"""Play the currently selected track.
Returns:
True if the Sonos speaker successfully started playing the track.
Raises SoCoException (or a subclass) upon errors.
"""
self.avTransport.Play([
('InstanceID', 0),
('Speed', 1)
])
def play_uri(self, uri='', meta='', title='', start=True):
""" Play a given stream. Pauses the queue.
If there is no metadata passed in and there is a title set then a
metadata object will be created. This is often the case if you have
a custom stream, it will need at least the title in the metadata in
order to play.
Arguments:
uri -- URI of a stream to be played.
meta -- The track metadata to show in the player, DIDL format.
title -- The track title to show in the player
start -- If the URI that has been set should start playing
Returns:
True if the Sonos speaker successfully started playing the track.
False if the track did not start (this may be because it was not
requested to start because "start=False")
Raises SoCoException (or a subclass) upon errors.
"""
if meta == '' and title != '':
meta_template = '<DIDL-Lite xmlns:dc="http://purl.org/dc/elements'\
'/1.1/" xmlns:upnp="urn:schemas-upnp-org:metadata-1-0/upnp/" '\
'xmlns:r="urn:schemas-rinconnetworks-com:metadata-1-0/" '\
'xmlns="urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/">'\
'<item id="R:0/0/0" parentID="R:0/0" restricted="true">'\
'<dc:title>{title}</dc:title><upnp:class>'\
'object.item.audioItem.audioBroadcast</upnp:class><desc '\
'id="cdudn" nameSpace="urn:schemas-rinconnetworks-com:'\
'metadata-1-0/">{service}</desc></item></DIDL-Lite>'
tunein_service = 'SA_RINCON65031_'
# Radio stations need to have at least a title to play
meta = meta_template.format(title=title, service=tunein_service)
self.avTransport.SetAVTransportURI([
('InstanceID', 0),
('CurrentURI', uri),
('CurrentURIMetaData', meta)
])
# The track is enqueued, now play it if needed
if start:
return self.play()
return False
def pause(self):
""" Pause the currently playing track.
Returns:
True if the Sonos speaker successfully paused the track.
Raises SoCoException (or a subclass) upon errors.
"""
self.avTransport.Pause([
('InstanceID', 0),
('Speed', 1)
])
def stop(self):
""" Stop the currently playing track.
Returns:
True if the Sonos speaker successfully stopped the playing track.
Raises SoCoException (or a subclass) upon errors.
"""
self.avTransport.Stop([
('InstanceID', 0),
('Speed', 1)
])
def seek(self, timestamp):
""" Seeks to a given timestamp in the current track, specified in the
format of HH:MM:SS or H:MM:SS.
Returns:
True if the Sonos speaker successfully seeked to the timecode.
Raises SoCoException (or a subclass) upon errors.
"""
if not re.match(r'^[0-9][0-9]?:[0-9][0-9]:[0-9][0-9]$', timestamp):
raise ValueError('invalid timestamp, use HH:MM:SS format')
self.avTransport.Seek([
('InstanceID', 0),
('Unit', 'REL_TIME'),
('Target', timestamp)
])
def next(self):
""" Go to the next track.
Returns:
True if the Sonos speaker successfully skipped to the next track.
Raises SoCoException (or a subclass) upon errors.
Keep in mind that next() can return errors
for a variety of reasons. For example, if the Sonos is streaming
Pandora and you call next() several times in quick succession an error
code will likely be returned (since Pandora has limits on how many
songs can be skipped).
"""
self.avTransport.Next([
('InstanceID', 0),
('Speed', 1)
])
def previous(self):
""" Go back to the previously played track.
Returns:
True if the Sonos speaker successfully went to the previous track.
Raises SoCoException (or a subclass) upon errors.
Keep in mind that previous() can return errors
for a variety of reasons. For example, previous() will return an error
code (error code 701) if the Sonos is streaming Pandora since you can't
go back on tracks.
"""
self.avTransport.Previous([
('InstanceID', 0),
('Speed', 1)
])
@property
def mute(self):
""" The speaker's mute state. True if muted, False otherwise """
response = self.renderingControl.GetMute([
('InstanceID', 0),
('Channel', 'Master')
])
mute_state = response['CurrentMute']
return True if int(mute_state) else False
@mute.setter
def mute(self, mute):
""" Mute (or unmute) the speaker """
mute_value = '1' if mute else '0'
self.renderingControl.SetMute([
('InstanceID', 0),
('Channel', 'Master'),
('DesiredMute', mute_value)
])
@property
def volume(self):
""" The speaker's volume. An integer between 0 and 100. """
response = self.renderingControl.GetVolume([
('InstanceID', 0),
('Channel', 'Master'),
])
volume = response['CurrentVolume']
return int(volume)
@volume.setter
def volume(self, volume):
""" Set the speaker's volume """
volume = int(volume)
volume = max(0, min(volume, 100)) # Coerce in range
self.renderingControl.SetVolume([
('InstanceID', 0),
('Channel', 'Master'),
('DesiredVolume', volume)
])
@property
def bass(self):
""" The speaker's bass EQ. An integer between -10 and 10. """
response = self.renderingControl.GetBass([
('InstanceID', 0),
('Channel', 'Master'),
])
bass = response['CurrentBass']
return int(bass)
@bass.setter
def bass(self, bass):
""" Set the speaker's bass """
bass = int(bass)
bass = max(-10, min(bass, 10)) # Coerce in range
self.renderingControl.SetBass([
('InstanceID', 0),
('DesiredBass', bass)
])
@property
def treble(self):
""" The speaker's treble EQ. An integer between -10 and 10. """
response = self.renderingControl.GetTreble([
('InstanceID', 0),
('Channel', 'Master'),
])
treble = response['CurrentTreble']
return int(treble)
@treble.setter
def treble(self, treble):
""" Set the speaker's treble """
treble = int(treble)
treble = max(-10, min(treble, 10)) # Coerce in range
self.renderingControl.SetTreble([
('InstanceID', 0),
('DesiredTreble', treble)
])
@property
def loudness(self):
""" The Sonos speaker's loudness compensation. True if on, otherwise
False.
Loudness is a complicated topic. You can find a nice summary about this
feature here: http://forums.sonos.com/showthread.php?p=4698#post4698
"""
response = self.renderingControl.GetLoudness([
('InstanceID', 0),
('Channel', 'Master'),
])
loudness = response["CurrentLoudness"]
return True if int(loudness) else False
@loudness.setter
def loudness(self, loudness):
""" Switch on/off the speaker's loudness compensation """
loudness_value = '1' if loudness else '0'
self.renderingControl.SetLoudness([
('InstanceID', 0),
('Channel', 'Master'),
('DesiredLoudness', loudness_value)
])
def _parse_zone_group_state(self):
""" The Zone Group State contains a lot of useful information. Retrieve
and parse it, and populate the relevant properties. """
# zoneGroupTopology.GetZoneGroupState()['ZoneGroupState'] returns XML like
# this:
#
# <ZoneGroups>
# <ZoneGroup Coordinator="RINCON_000XXX1400" ID="RINCON_000XXXX1400:0">
# <ZoneGroupMember
# BootSeq="33"
# Configuration="1"
# Icon="x-rincon-roomicon:zoneextender"
# Invisible="1"
# IsZoneBridge="1"
# Location="http://192.168.1.100:1400/xml/device_description.xml"
# MinCompatibleVersion="22.0-00000"
# SoftwareVersion="24.1-74200"
# UUID="RINCON_000ZZZ1400"
# ZoneName="BRIDGE"/>
# </ZoneGroup>
# <ZoneGroup Coordinator="RINCON_000XXX1400" ID="RINCON_000XXX1400:46">
# <ZoneGroupMember
# BootSeq="44"
# Configuration="1"
# Icon="x-rincon-roomicon:living"
# Location="http://192.168.1.101:1400/xml/device_description.xml"
# MinCompatibleVersion="22.0-00000"
# SoftwareVersion="24.1-74200"
# UUID="RINCON_000XXX1400"
# ZoneName="Living Room"/>
# <ZoneGroupMember
# BootSeq="52"
# Configuration="1"
# Icon="x-rincon-roomicon:kitchen"
# Location="http://192.168.1.102:1400/xml/device_description.xml"
# MinCompatibleVersion="22.0-00000"
# SoftwareVersion="24.1-74200"
# UUID="RINCON_000YYY1400"
# ZoneName="Kitchen"/>
# </ZoneGroup>
# </ZoneGroups>
#
def parse_zone_group_member(member_element):
""" Parse a ZoneGroupMember or Satellite element from Zone Group
State, create a SoCo instance for the member, set basic attributes
and return it. """
# Create a SoCo instance for each member. Because SoCo
# instances are singletons, this is cheap if they have already
# been created, and useful if they haven't. We can then
# update various properties for that instance.
member_attribs = member_element.attrib
ip_addr = member_attribs['Location'].\
split('//')[1].split(':')[0]
zone = config.SOCO_CLASS(ip_addr)
# uid doesn't change, but it's not harmful to (re)set it, in case
# the zone is as yet unseen.
zone._uid = member_attribs['UUID']
zone._player_name = member_attribs['ZoneName']
# add the zone to the set of all members, and to the set
# of visible members if appropriate
is_visible = False if member_attribs.get(
'Invisible') == '1' else True
if is_visible:
self._visible_zones.add(zone)
self._all_zones.add(zone)
return zone
# This is called quite frequently, so it is worth optimising it.
# Maintain a private cache. If the zgt has not changed, there is no
# need to repeat all the XML parsing. In addition, switch on network
# caching for a short interval (5 secs).
zgs = self.zoneGroupTopology.GetZoneGroupState(
cache_timeout=5)['ZoneGroupState']
if zgs == self._zgs_cache:
return
self._zgs_cache = zgs
tree = XML.fromstring(zgs.encode('utf-8'))
# Empty the set of all zone_groups
self._groups.clear()
# and the set of all members
self._all_zones.clear()
self._visible_zones.clear()
# Loop over each ZoneGroup Element
for group_element in tree.findall('ZoneGroup'):
coordinator_uid = group_element.attrib['Coordinator']
group_uid = group_element.attrib['ID']
group_coordinator = None
members = set()
for member_element in group_element.findall('ZoneGroupMember'):
zone = parse_zone_group_member(member_element)
# Perform extra processing relevant to direct zone group
# members
#
# If this element has the same UUID as the coordinator, it is
# the coordinator
if zone._uid == coordinator_uid:
group_coordinator = zone
zone._is_coordinator = True
else:
zone._is_coordinator = False
# is_bridge doesn't change, but it does no real harm to
# set/reset it here, just in case the zone has not been seen
# before
zone._is_bridge = True if member_element.attrib.get(
'IsZoneBridge') == '1' else False
# add the zone to the members for this group
members.add(zone)
# Loop over Satellite elements if present, and process as for
# ZoneGroup elements
for satellite_element in member_element.findall('Satellite'):
zone = parse_zone_group_member(satellite_element)
# Assume a satellite can't be a bridge or coordinator, so
# no need to check.
#
# Add the zone to the members for this group.
members.add(zone)
# Now create a ZoneGroup with this info and add it to the list
# of groups
self._groups.add(ZoneGroup(group_uid, group_coordinator, members))
@property
def all_groups(self):
""" Return a set of all the available groups"""
self._parse_zone_group_state()
return self._groups
@property
def group(self):
"""The Zone Group of which this device is a member.
group will be None if this zone is a slave in a stereo pair."""
for group in self.all_groups:
if self in group:
return group
return None
# To get the group directly from the network, try the code below
# though it is probably slower than that above
# current_group_id = self.zoneGroupTopology.GetZoneGroupAttributes()[
# 'CurrentZoneGroupID']
# if current_group_id:
# for group in self.all_groups:
# if group.uid == current_group_id:
# return group
# else:
# return None
@property
def all_zones(self):
""" Return a set of all the available zones"""
self._parse_zone_group_state()
return self._all_zones
@property
def visible_zones(self):
""" Return an set of all visible zones"""
self._parse_zone_group_state()
return self._visible_zones
def partymode(self):
""" Put all the speakers in the network in the same group, a.k.a Party
Mode.
This blog shows the initial research responsible for this:
http://blog.travelmarx.com/2010/06/exploring-sonos-via-upnp.html
The trick seems to be (only tested on a two-speaker setup) to tell each
speaker which to join. There's probably a bit more to it if multiple
groups have been defined.
"""
# Tell every other visible zone to join this one
# pylint: disable = expression-not-assigned
[zone.join(self) for zone in self.visible_zones if zone is not self]
def join(self, master):
""" Join this speaker to another "master" speaker.
.. note:: The signature of this method has changed in 0.8. It now
requires a SoCo instance to be passed as `master`, not an IP
address
"""
self.avTransport.SetAVTransportURI([
('InstanceID', 0),
('CurrentURI', 'x-rincon:{0}'.format(master.uid)),
('CurrentURIMetaData', '')
])
def unjoin(self):
""" Remove this speaker from a group.
Seems to work ok even if you remove what was previously the group
master from it's own group. If the speaker was not in a group also
returns ok.
Returns:
True if this speaker has left the group.
Raises SoCoException (or a subclass) upon errors.
"""
self.avTransport.BecomeCoordinatorOfStandaloneGroup([
('InstanceID', 0)
])
def switch_to_line_in(self):
""" Switch the speaker's input to line-in.
Returns:
True if the Sonos speaker successfully switched to line-in.
If an error occurs, we'll attempt to parse the error and return a UPnP
error code. If that fails, the raw response sent back from the Sonos
speaker will be returned.
Raises SoCoException (or a subclass) upon errors.
"""
self.avTransport.SetAVTransportURI([
('InstanceID', 0),
('CurrentURI', 'x-rincon-stream:{0}'.format(self.uid)),
('CurrentURIMetaData', '')
])
def switch_to_tv(self):
""" Switch the speaker's input to TV.
Returns:
True if the Sonos speaker successfully switched to TV.
If an error occurs, we'll attempt to parse the error and return a UPnP
error code. If that fails, the raw response sent back from the Sonos
speaker will be returned.
Raises SoCoException (or a subclass) upon errors.
"""
self.avTransport.SetAVTransportURI([
('InstanceID', 0),
('CurrentURI', 'x-sonos-htastream:{0}:spdif'.format(self.uid)),
('CurrentURIMetaData', '')
])
@property
def status_light(self):
""" The white Sonos status light between the mute button and the volume
up button on the speaker. True if on, otherwise False.
"""
result = self.deviceProperties.GetLEDState()
LEDState = result["CurrentLEDState"] # pylint: disable=invalid-name
return True if LEDState == "On" else False
@status_light.setter
def status_light(self, led_on):
""" Switch on/off the speaker's status light """
led_state = 'On' if led_on else 'Off'
self.deviceProperties.SetLEDState([
('DesiredLEDState', led_state),
])
def _build_album_art_full_uri(self, url):
""" Ensure an Album Art URI is an absolute URI
:param url: The album art URI
"""
# Add on the full album art link, as the URI version
# does not include the ipaddress
if not url.startswith(('http:', 'https:')):
url = 'http://' + self.ip_address + ':1400' + url
return url
def get_current_track_info(self):
""" Get information about the currently playing track.
Returns:
A dictionary containing the following information about the currently
playing track: playlist_position, duration, title, artist, album,
position and a link to the album art.
If we're unable to return data for a field, we'll return an empty
string. This can happen for all kinds of reasons so be sure to check
values. For example, a track may not have complete metadata and be
missing an album name. In this case track['album'] will be an empty
string.
"""
response = self.avTransport.GetPositionInfo([
('InstanceID', 0),
('Channel', 'Master')
])
track = {'title': '', 'artist': '', 'album': '', 'album_art': '',
'position': ''}
track['playlist_position'] = response['Track']
track['duration'] = response['TrackDuration']
track['uri'] = response['TrackURI']
track['position'] = response['RelTime']
metadata = response['TrackMetaData']
# Store the entire Metadata entry in the track, this can then be
# used if needed by the client to restart a given URI
track['metadata'] = metadata
# Duration seems to be '0:00:00' when listening to radio
if metadata != '' and track['duration'] == '0:00:00':
metadata = XML.fromstring(really_utf8(metadata))
# Try parse trackinfo
trackinfo = metadata.findtext('.//{urn:schemas-rinconnetworks-com:'
'metadata-1-0/}streamContent')
index = trackinfo.find(' - ')
if index > -1:
track['artist'] = trackinfo[:index]
track['title'] = trackinfo[index + 3:]
else:
# Might find some kind of title anyway in metadata
track['title'] = metadata.findtext('.//{http://purl.org/dc/'
'elements/1.1/}title')
if not track['title']:
_LOG.warning('Could not handle track info: "%s"',
trackinfo)
track['title'] = trackinfo
# If the speaker is playing from the line-in source, querying for track
# metadata will return "NOT_IMPLEMENTED".
elif metadata not in ('', 'NOT_IMPLEMENTED', None):
# Track metadata is returned in DIDL-Lite format
metadata = XML.fromstring(really_utf8(metadata))
md_title = metadata.findtext(
'.//{http://purl.org/dc/elements/1.1/}title')
md_artist = metadata.findtext(
'.//{http://purl.org/dc/elements/1.1/}creator')
md_album = metadata.findtext(
'.//{urn:schemas-upnp-org:metadata-1-0/upnp/}album')
track['title'] = ""
if md_title:
track['title'] = md_title
track['artist'] = ""
if md_artist:
track['artist'] = md_artist
track['album'] = ""
if md_album:
track['album'] = md_album
album_art_url = metadata.findtext(
'.//{urn:schemas-upnp-org:metadata-1-0/upnp/}albumArtURI')
if album_art_url is not None:
track['album_art'] = self._build_album_art_full_uri(
album_art_url)
return track
def get_speaker_info(self, refresh=False, timeout=(3, 5)):
""" Get information about the Sonos speaker.
Arguments:
refresh -- Refresh the speaker info cache.
timeout -- How long to wait for the server to send
data before giving up, as a float, or a
(`connect timeout, read timeout`_) tuple.
If not specified a default (3, 5) timeout
will be used, to disable it at all set
explicitly to None.
Returns:
Information about the Sonos speaker, such as the UID, MAC Address, and
Zone Name.
"""
if self.speaker_info and refresh is False:
return self.speaker_info
else:
response = requests.get('http://' + self.ip_address +
':1400/status/zp', timeout=timeout)
dom = XML.fromstring(response.content)
if dom.findtext('.//ZoneName') is not None:
self.speaker_info['zone_name'] = \
dom.findtext('.//ZoneName')
self.speaker_info['zone_icon'] = dom.findtext('.//ZoneIcon')
self.speaker_info['uid'] = self.uid
self.speaker_info['serial_number'] = \
dom.findtext('.//SerialNumber')
self.speaker_info['software_version'] = \
dom.findtext('.//SoftwareVersion')
self.speaker_info['hardware_version'] = \
dom.findtext('.//HardwareVersion')
self.speaker_info['mac_address'] = dom.findtext('.//MACAddress')
return self.speaker_info
def get_current_transport_info(self):
""" Get the current playback state
Returns:
A dictionary containing the following information about the speakers
playing state
current_transport_state (PLAYING, PAUSED_PLAYBACK, STOPPED),
current_trasnport_status (OK, ?), current_speed(1,?)
This allows us to know if speaker is playing or not. Don't know other
states of CurrentTransportStatus and CurrentSpeed.
"""
response = self.avTransport.GetTransportInfo([
('InstanceID', 0),
])
playstate = {
'current_transport_status': '',
'current_transport_state': '',
'current_transport_speed': ''
}
playstate['current_transport_state'] = \
response['CurrentTransportState']
playstate['current_transport_status'] = \
response['CurrentTransportStatus']
playstate['current_transport_speed'] = response['CurrentSpeed']
return playstate
def get_queue(self, start=0, max_items=100, full_album_art_uri=False):
""" Get information about the queue
:param start: Starting number of returned matches
:param max_items: Maximum number of returned matches
:param full_album_art_uri: If the album art URI should include the
IP address
:returns: A :py:class:`~.soco.data_structures.Queue` object
This method is heavly based on Sam Soffes (aka soffes) ruby
implementation
"""
queue = []
response = self.contentDirectory.Browse([
('ObjectID', 'Q:0'),
('BrowseFlag', 'BrowseDirectChildren'),
('Filter', '*'),
('StartingIndex', start),
('RequestedCount', max_items),
('SortCriteria', '')
])
result = response['Result']
metadata = {}
for tag in ['NumberReturned', 'TotalMatches', 'UpdateID']:
metadata[camel_to_underscore(tag)] = int(response[tag])
# I'm not sure this necessary (any more). Even with an empty queue,
# there is still a result object. This shoud be investigated.
if not result:
# pylint: disable=star-args
return Queue(queue, **metadata)
items = from_didl_string(result)
for item in items:
# Check if the album art URI should be fully qualified
if full_album_art_uri:
self._update_album_art_to_full_uri(item)
queue.append(item)
# pylint: disable=star-args
return Queue(queue, **metadata)
@property
def queue_size(self):
""" Get size of queue """
response = self.contentDirectory.Browse([
('ObjectID', 'Q:0'),
('BrowseFlag', 'BrowseMetadata'),
('Filter', '*'),
('StartingIndex', 0),
('RequestedCount', 1),
('SortCriteria', '')
])
dom = XML.fromstring(really_utf8(response['Result']))
queue_size = None
container = dom.find(
'{urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/}container')
if container is not None:
child_count = container.get('childCount')
if child_count is not None:
queue_size = int(child_count)
return queue_size
def get_sonos_playlists(self, *args, **kwargs):
""" Convenience method for:
get_music_library_information('sonos_playlists')
Refer to the docstring for that method
"""
args = tuple(['sonos_playlists'] + list(args))
return self.get_music_library_information(*args, **kwargs)
def get_artists(self, *args, **kwargs):
""" Convenience method for :py:meth:`get_music_library_information`
with `search_type='artists'`. For details on remaining arguments refer
to the docstring for that method.
"""
args = tuple(['artists'] + list(args))
return self.get_music_library_information(*args, **kwargs)
def get_album_artists(self, *args, **kwargs):
""" Convenience method for :py:meth:`get_music_library_information`
with `search_type='album_artists'`. For details on remaining arguments
refer to the docstring for that method.
"""
args = tuple(['album_artists'] + list(args))
return self.get_music_library_information(*args, **kwargs)
def get_albums(self, *args, **kwargs):
""" Convenience method for :py:meth:`get_music_library_information`
with `search_type='albums'`. For details on remaining arguments refer
to the docstring for that method.
"""
args = tuple(['albums'] + list(args))
return self.get_music_library_information(*args, **kwargs)
def get_genres(self, *args, **kwargs):
""" Convenience method for :py:meth:`get_music_library_information`
with `search_type='genres'`. For details on remaining arguments refer
to the docstring for that method.
"""
args = tuple(['genres'] + list(args))
return self.get_music_library_information(*args, **kwargs)
def get_composers(self, *args, **kwargs):
""" Convenience method for :py:meth:`get_music_library_information`
with `search_type='composers'`. For details on remaining arguments
refer to the docstring for that method.
"""
args = tuple(['composers'] + list(args))
return self.get_music_library_information(*args, **kwargs)
def get_tracks(self, *args, **kwargs):
""" Convenience method for :py:meth:`get_music_library_information`
with `search_type='tracks'`. For details on remaining arguments refer
to the docstring for that method.
"""
args = tuple(['tracks'] + list(args))
return self.get_music_library_information(*args, **kwargs)
def get_playlists(self, *args, **kwargs):
""" Convenience method for :py:meth:`get_music_library_information`
with `search_type='playlists'`. For details on remaining arguments
refer to the docstring for that method.
NOTE: The playlists that are referred to here are the playlist (files)
imported from the music library, they are not the Sonos playlists.
"""
args = tuple(['playlists'] + list(args))
return self.get_music_library_information(*args, **kwargs)
# pylint: disable=too-many-locals, too-many-arguments, too-many-branches
def get_music_library_information(self, search_type, start=0,
max_items=100, full_album_art_uri=False,
search_term=None, subcategories=None,
complete_result=False):
""" Retrieve music information objects from the music library
This method is the main method to get music information items, like
e.g. tracks, albums etc., from the music library with. It can be used
in a few different ways:
The **search_term** argument performs a fuzzy search on that string in
the results, so e.g calling::
get_music_library_items('artist', search_term='Metallica')
will perform a fuzzy search for the term 'Metallica' among all the
artists.
Using the **subcategories** argument, will jump directly into that
subcategory of the search and return results from there. So. e.g
knowing that among the artist is one called 'Metallica', calling::
get_music_library_items('artist', subcategories=['Metallica'])
will jump directly into the 'Metallica' sub category and return the
albums associated with Metallica and::
get_music_library_items('artist', subcategories=['Metallica',
'Black'])
will return the tracks of the album 'Black' by the artist 'Metallica'.
The order of sub category types is: Genres->Artists->Albums->Tracks.
It is also possible to combine the two, to perform a fuzzy search in a
sub category.
The **start**, **max_items** and **complete_result** arguments all
has to do with paging of the results. Per default, the searches are
always paged, because there is a limit to how many items we can get at
a time. This paging is exposed to the user with the start and max_items
arguments. So calling::
get_music_library_items('artists', start=0, max_items=100)
get_music_library_items('artists', start=100, max_items=100)
will get the first and next 100 items, respectively. It is also
possible to ask for all the elements at once::
get_music_library_items('artists', complete_result=True)
This will perform the paging internally and simply return all the
items.
:param search_type: The kind of information to retrieve. Can be one of:
'artists', 'album_artists', 'albums', 'genres', 'composers',
'tracks', 'share', 'sonos_playlists', and 'playlists', where
playlists are the imported file based playlists from the
music library
:param start: Starting number of returned matches (zero based).
:param max_items: Maximum number of returned matches. NOTE: The maximum
may be restricted by the unit, presumably due to transfer
size consideration, so check the returned number against the
requested.
:param full_album_art_uri: If the album art URI should include the
IP address
:param search_term: A string that will be used to perform a fuzzy
search among the search results. If used in combination with
subcategories, the fuzzy search will be performed in the
subcategory
:param subcategories: A list of strings that indicate one or more
subcategories to dive into
:param complete_result: Will disable paging (ignore start and
max_items) and return all results for the search. WARNING! Getting
e.g. all the tracks in a large collection might take some time.
:returns: A :py:class:`~.soco.data_structures.SearchResult` object
:raises: :py:class:`SoCoException` upon errors
NOTE: The playlists that are returned with the 'playlists' search, are
the playlists imported from (files in) the music library, they are not
the Sonos playlists.
The information about the which searches can be performed and the form
of the query has been gathered from the Janos project:
http://sourceforge.net/projects/janos/ Props to the authors of that
project.
"""
search = self.SEARCH_TRANSLATION[search_type]
# Add sub categories
if subcategories is not None:
for category in subcategories:
search += '/' + url_escape_path(really_unicode(category))
# Add fuzzy search
if search_term is not None:
search += ':' + url_escape_path(really_unicode(search_term))
item_list = []
metadata = {'total_matches': 100000}
while len(item_list) < metadata['total_matches']:
# Change start and max for complete searches
if complete_result:
start, max_items = len(item_list), 100000
# Try and get this batch of results
try:
response, metadata =\
self._music_lib_search(search, start, max_items)
except SoCoUPnPException as exception:
# 'No such object' UPnP errors
if exception.error_code == '701':
return SearchResult([], search_type, 0, 0, None)
else:
raise exception
# Parse the results
items = from_didl_string(response['Result'])
for item in items:
# Check if the album art URI should be fully qualified
if full_album_art_uri:
self._update_album_art_to_full_uri(item)
# Append the item to the list
item_list.append(item)
# If we are not after the complete results, the stop after 1
# iteration
if not complete_result:
break
metadata['search_type'] = search_type
if complete_result:
metadata['number_returned'] = len(item_list)
# pylint: disable=star-args
return SearchResult(item_list, **metadata)
def browse(self, ml_item=None, start=0, max_items=100,
full_album_art_uri=False, search_term=None, subcategories=None):
"""Browse (get sub-elements) a music library item
:param ml_item: The MusicLibraryItem to browse, if left out or passed
None, the items at the base level will be returned
:type ml_item: MusicLibraryItem
:param start: The starting index of the results
:type start: int
:param max_items: The maximum number of items to return
:type max_items: int
:param full_album_art_uri: If the album art URI should include the IP
address
:type full_album_art_uri: bool
:param search_term: A string that will be used to perform a fuzzy
search among the search results. If used in combination with
subcategories, the fuzzy search will be performed on the
subcategory. NOTE: Searching will not work if ml_item is None.
:type search_term: str
:param subcategories: A list of strings that indicate one or more
subcategories to dive into. NOTE: Providing sub categories will
not work if ml_item is None.
:type subcategories: list
:returns: A :py:class:`~.soco.data_structures.SearchResult` object
:rtype: :py:class:`~.soco.data_structures.SearchResult`
:raises: AttributeError: If ``ml_item`` has no ``item_id`` attribute
SoCoUPnPException: With ``error_code='701'`` if the item cannot be
browsed
"""
if ml_item is None:
search = 'A:'
else:
search = ml_item.item_id
# Add sub categories
if subcategories is not None:
for category in subcategories:
search += '/' + url_escape_path(really_unicode(category))
# Add fuzzy search
if search_term is not None:
search += ':' + url_escape_path(really_unicode(search_term))
try:
response, metadata =\
self._music_lib_search(search, start, max_items)
except SoCoUPnPException as exception:
# 'No such object' UPnP errors
if exception.error_code == '701':
return SearchResult([], 'browse', 0, 0, None)
else:
raise exception
metadata['search_type'] = 'browse'
# Parse the results
containers = from_didl_string(response['Result'])
item_list = []
for container in containers:
# Check if the album art URI should be fully qualified
if full_album_art_uri:
self._update_album_art_to_full_uri(container)
item_list.append(container)
# pylint: disable=star-args
return SearchResult(item_list, **metadata)
# pylint: disable=too-many-arguments
def browse_by_idstring(self, search_type, idstring, start=0,
max_items=100, full_album_art_uri=False):
"""Browse (get sub-elements) a given type
:param search_type: The kind of information to retrieve. Can be one of:
'artists', 'album_artists', 'albums', 'genres', 'composers',
'tracks', 'share', 'sonos_playlists', and 'playlists', where
playlists are the imported file based playlists from the
music library
:param idstring: String ID to search for
:param start: Starting number of returned matches
:param max_items: Maximum number of returned matches. NOTE: The maximum
may be restricted by the unit, presumably due to transfer
size consideration, so check the returned number against the
requested.
:param full_album_art_uri: If the album art URI should include the
IP address
:returns: A dictionary with metadata for the search, with the
keys 'number_returned', 'update_id', 'total_matches' and an
'item_list' list with the search results.
"""
search = self.SEARCH_TRANSLATION[search_type]
# Check if the string ID already has the type, if so we do not want to
# add one also Imported playlist have a full path to them, so they do
# not require the A:PLAYLISTS part first
if idstring.startswith(search) or (search_type == 'playlists'):
search = ""
search_item_id = search + idstring
search_uri = "#" + search_item_id
# Not sure about the res protocol. But this seems to work
res = [DidlResource(
uri=search_uri, protocol_info="x-rincon-playlist:*:*:*")]
search_item = DidlObject(
resources=res, title='', parent_id='',
item_id=search_item_id)
# Call the base version
return self.browse(search_item, start, max_items, full_album_art_uri)
def _music_lib_search(self, search, start, max_items):
"""Perform a music library search and extract search numbers
You can get an overview of all the relevant search prefixes (like
'A:') and their meaning with the request:
.. code ::
response = device.contentDirectory.Browse([
('ObjectID', '0'),
('BrowseFlag', 'BrowseDirectChildren'),
('Filter', '*'),
('StartingIndex', 0),
('RequestedCount', 100),
('SortCriteria', '')
])
Args:
search (str): The ID to search
start: The index of the forst item to return
max_items: The maximum number of items to return
Returns:
tuple: (response, metadata) where response is the returned metadata
and metadata is a dict with the 'number_returned',
'total_matches' and 'update_id' integers
"""
response = self.contentDirectory.Browse([
('ObjectID', search),
('BrowseFlag', 'BrowseDirectChildren'),
('Filter', '*'),
('StartingIndex', start),
('RequestedCount', max_items),
('SortCriteria', '')
])
# Get result information
metadata = {}
for tag in ['NumberReturned', 'TotalMatches', 'UpdateID']:
metadata[camel_to_underscore(tag)] = int(response[tag])
return response, metadata
def add_uri_to_queue(self, uri):
"""Adds the URI to the queue
:param uri: The URI to be added to the queue
:type uri: str
"""
# FIXME: The res.protocol_info should probably represent the mime type
# etc of the uri. But this seems OK.
res = [DidlResource(uri=uri, protocol_info="x-rincon-playlist:*:*:*")]
item = DidlObject(resources=res, title='', parent_id='', item_id='')
return self.add_to_queue(item)
def add_to_queue(self, queueable_item):
""" Adds a queueable item to the queue """
metadata = to_didl_string(queueable_item)
metadata.encode('utf-8')
response = self.avTransport.AddURIToQueue([
('InstanceID', 0),
('EnqueuedURI', queueable_item.resources[0].uri),
('EnqueuedURIMetaData', metadata),
('DesiredFirstTrackNumberEnqueued', 0),
('EnqueueAsNext', 1)
])
qnumber = response['FirstTrackNumberEnqueued']
return int(qnumber)
def remove_from_queue(self, index):
""" Remove a track from the queue by index. The index number is
required as an argument, where the first index is 0.
index: the index of the track to remove; first item in the queue is 0
Returns:
True if the Sonos speaker successfully removed the track
Raises SoCoException (or a subclass) upon errors.
"""
# TODO: what do these parameters actually do?
updid = '0'
objid = 'Q:0/' + str(index + 1)
self.avTransport.RemoveTrackFromQueue([
('InstanceID', 0),
('ObjectID', objid),
('UpdateID', updid),
])
def clear_queue(self):
""" Removes all tracks from the queue.
Returns:
True if the Sonos speaker cleared the queue.
Raises SoCoException (or a subclass) upon errors.
"""
self.avTransport.RemoveAllTracksFromQueue([
('InstanceID', 0),
])
def get_favorite_radio_shows(self, start=0, max_items=100):
""" Get favorite radio shows from Sonos' Radio app.
Returns:
A list containing the total number of favorites, the number of
favorites returned, and the actual list of favorite radio shows,
represented as a dictionary with `title` and `uri` keys.
Depending on what you're building, you'll want to check to see if the
total number of favorites is greater than the amount you
requested (`max_items`), if it is, use `start` to page through and
get the entire list of favorites.
"""
return self.__get_radio_favorites(RADIO_SHOWS, start, max_items)
def get_favorite_radio_stations(self, start=0, max_items=100):
""" Get favorite radio stations from Sonos' Radio app.
Returns:
A list containing the total number of favorites, the number of
favorites returned, and the actual list of favorite radio stations,
represented as a dictionary with `title` and `uri` keys.
Depending on what you're building, you'll want to check to see if the
total number of favorites is greater than the amount you
requested (`max_items`), if it is, use `start` to page through and
get the entire list of favorites.
"""
return self.__get_radio_favorites(RADIO_STATIONS, start, max_items)
def __get_radio_favorites(self, favorite_type, start=0, max_items=100):
""" Helper method for `get_favorite_radio_*` methods.
Arguments:
favorite_type -- Specify either `RADIO_STATIONS` or `RADIO_SHOWS`.
start -- Which number to start the retrieval from. Used for paging.
max_items -- The total number of results to return.
"""
if favorite_type != RADIO_SHOWS or RADIO_STATIONS:
favorite_type = RADIO_STATIONS
response = self.contentDirectory.Browse([
('ObjectID', 'R:0/{0}'.format(favorite_type)),
('BrowseFlag', 'BrowseDirectChildren'),
('Filter', '*'),
('StartingIndex', start),
('RequestedCount', max_items),
('SortCriteria', '')
])
result = {}
favorites = []
results_xml = response['Result']
if results_xml != '':
# Favorites are returned in DIDL-Lite format
metadata = XML.fromstring(really_utf8(results_xml))
for item in metadata.findall(
'{urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/}item'):
favorite = {}
favorite['title'] = item.findtext(
'{http://purl.org/dc/elements/1.1/}title')
favorite['uri'] = item.findtext(
'{urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/}res')
favorites.append(favorite)
result['total'] = response['TotalMatches']
result['returned'] = len(favorites)
result['favorites'] = favorites
return result
def _update_album_art_to_full_uri(self, item):
"""Update an item's Album Art URI to be an absolute URI
:param item: The item to update the URI for
"""
if getattr(item, 'album_art_uri', False):
item.album_art_uri = self._build_album_art_full_uri(
item.album_art_uri)
def create_sonos_playlist(self, title):
""" Create a new empty Sonos playlist.
:params title: Name of the playlist
:returns: An instance of
:py:class:`~.soco.data_structures.DidlPlaylistContainer`
"""
response = self.avTransport.CreateSavedQueue([
('InstanceID', 0),
('Title', title),
('EnqueuedURI', ''),
('EnqueuedURIMetaData', ''),
])
item_id = response['AssignedObjectID']
obj_id = item_id.split(':', 2)[1]
uri = "file:///jffs/settings/savedqueues.rsq#{0}".format(obj_id)
res = [DidlResource(uri=uri, protocol_info="x-rincon-playlist:*:*:*")]
return DidlPlaylistContainer(
resources=res, title=title, parent_id='SQ:', item_id=item_id)
# pylint: disable=invalid-name
def create_sonos_playlist_from_queue(self, title):
""" Create a new Sonos playlist from the current queue.
:params title: Name of the playlist
:returns: An instance of
:py:class:`~.soco.data_structures.DidlPlaylistContainer`
"""
# Note: probably same as Queue service method SaveAsSonosPlaylist
# but this has not been tested. This method is what the
# controller uses.
response = self.avTransport.SaveQueue([
('InstanceID', 0),
('Title', title),
('ObjectID', '')
])
item_id = response['AssignedObjectID']
obj_id = item_id.split(':', 2)[1]
uri = "file:///jffs/settings/savedqueues.rsq#{0}".format(obj_id)
res = [DidlResource(uri=uri, protocol_info="x-rincon-playlist:*:*:*")]
return DidlPlaylistContainer(
resources=res, title=title, parent_id='SQ:', item_id=item_id)
def add_item_to_sonos_playlist(self, queueable_item, sonos_playlist):
""" Adds a queueable item to a Sonos' playlist
:param queueable_item: the item to add to the Sonos' playlist
:param sonos_playlist: the Sonos' playlist to which the item should
be added
"""
# Check if the required attributes are there
for attribute in ['didl_metadata', 'uri']:
if not hasattr(queueable_item, attribute):
message = 'queueable_item has no attribute {0}'.\
format(attribute)
raise AttributeError(message)
# Get the metadata
try:
metadata = XML.tostring(queueable_item.didl_metadata)
except DIDLMetadataError as exception:
message = ('The queueable item could not be enqueued, because it '
'raised a DIDLMetadataError exception with the '
'following message:\n{0}').format(str(exception))
raise ValueError(message)
if isinstance(metadata, str):
metadata = metadata.encode('utf-8')
response, _ = self._music_lib_search(sonos_playlist.item_id, 0, 1)
update_id = response['UpdateID']
self.avTransport.AddURIToSavedQueue([
('InstanceID', 0),
('UpdateID', update_id),
('ObjectID', sonos_playlist.item_id),
('EnqueuedURI', queueable_item.uri),
('EnqueuedURIMetaData', metadata),
('AddAtIndex', 4294967295) # this field has always this value, we
# do not known the meaning of this
# "magic" number.
])
def get_item_album_art_uri(self, item):
""" Get an item's Album Art absolute URI. """
if getattr(item, 'album_art_uri', False):
return self._build_album_art_full_uri(item.album_art_uri)
else:
return None
# pylint: disable=too-many-locals
def search_track(self, artist, album=None, track=None,
full_album_art_uri=False):
"""Search for an artist, artist's albums, or specific track.
:param artist: Artist name
:type artist: str
:param album: Album name
:type album: str
:param track: Track name
:type track: str
:param full_album_art_uri: If the album art URI should include the
IP address
:type full_album_art_uri: bool
:returns: A :py:class:`~.soco.data_structures.SearchResult` object.
:rtype: :py:class:`~.soco.data_structures.SearchResult`
"""
subcategories = [artist]
subcategories.append(album or '')
# Perform the search
result = self.get_album_artists(
full_album_art_uri=full_album_art_uri,
subcategories=subcategories, search_term=track,
complete_result=True)
result._metadata['search_type'] = 'search_track'
return result
def get_albums_for_artist(self, artist, full_album_art_uri=False):
"""Get albums for an artist.
:param artist: Artist name
:type artist: str
:param full_album_art_uri: If the album art URI should include the
IP address
:type full_album_art_uri: bool
:returns: A :py:class:`~.soco.data_structures.SearchResult` object.
:rtype: :py:class:`~.soco.data_structures.SearchResult`
"""
subcategories = [artist]
result = self.get_album_artists(
full_album_art_uri=full_album_art_uri,
subcategories=subcategories,
complete_result=True)
reduced = [item for item in result if item.__class__ == DidlMusicAlbum]
# It is necessary to update the list of items in two places, due to
# a bug in SearchResult
result[:] = reduced
result._metadata.update({
'item_list': reduced,
'search_type': 'albums_for_artist',
'number_returned': len(reduced),
'total_matches': len(reduced)
})
return result
def get_tracks_for_album(self, artist, album, full_album_art_uri=False):
"""Get tracks for an artist's album.
:param artist: Artist name
:type artist: str
:param album: Album name
:type album: str
:param full_album_art_uri: If the album art URI should include the
IP address
:type full_album_art_uri: bool
:returns: A :py:class:`~.soco.data_structures.SearchResult` object.
:rtype: :py:class:`~.soco.data_structures.SearchResult`
"""
subcategories = [artist, album]
result = self.get_album_artists(
full_album_art_uri=full_album_art_uri,
subcategories=subcategories,
complete_result=True)
result._metadata['search_type'] = 'tracks_for_album'
return result
@property
def library_updating(self):
"""True if the music library is in the process of being updated
:returns: True if the music library is in the process of being updated
:rtype: bool
"""
result = self.contentDirectory.GetShareIndexInProgress()
return result['IsIndexing'] != '0'
def start_library_update(self, album_artist_display_option=''):
"""Start an update of the music library.
If specified, album_artist_display_option changes the album
artist compilation setting (see also album_artist_display_option).
"""
return self.contentDirectory.RefreshShareIndex([
('AlbumArtistDisplayOption', album_artist_display_option),
])
@property
def album_artist_display_option(self):
"""Return the current value of the album artist compilation
setting (see
http://www.sonos.com/support/help/3.4/en/sonos_user_guide/
Chap07_new/Compilation_albums.htm)
This is a string. Possible values:
* "WMP" - Use Album Artists
* "ITUNES" - Use iTunes® Compilations
* "NONE" - Do not group compilations
To change the current setting, call `start_library_update` and
pass the new setting.
"""
result = self.contentDirectory.GetAlbumArtistDisplayOption()
return result['AlbumArtistDisplayOption']
# definition section
RADIO_STATIONS = 0
RADIO_SHOWS = 1
NS = {'dc': '{http://purl.org/dc/elements/1.1/}',
'upnp': '{urn:schemas-upnp-org:metadata-1-0/upnp/}',
'': '{urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/}'}
# Valid play modes
PLAY_MODES = ('NORMAL', 'SHUFFLE_NOREPEAT', 'SHUFFLE', 'REPEAT_ALL')
if config.SOCO_CLASS is None:
config.SOCO_CLASS = SoCo
| xxdede/SoCo | soco/core.py | Python | mit | 72,281 | 0 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Registry',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('display_order', models.IntegerField()),
('visibility', models.CharField(default=b'Unpusblished', max_length=20, choices=[(b'Published', b'Published'), (b'Unpusblished', b'Unpusblished')])),
('title', models.CharField(max_length=50)),
('alt_text', models.CharField(max_length=200)),
('registry_image', models.ImageField(upload_to=b'')),
],
options={
'abstract': False,
},
),
]
| zacherytapp/wedding | weddingapp/apps/registry/migrations/0001_initial.py | Python | bsd-3-clause | 1,051 | 0.001903 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" P1 for stopped Virtual Maschine life cycle
"""
# Import Local Modules
from nose.plugins.attrib import attr
from marvin.cloudstackTestCase import cloudstackTestCase
from marvin.lib.utils import cleanup_resources
from marvin.lib.base import (Account,
VirtualMachine,
ServiceOffering,
Volume,
Router,
DiskOffering,
Host,
Iso,
Cluster,
StoragePool,
Template)
from marvin.lib.common import (get_zone,
get_domain,
get_template,
get_builtin_template_info,
update_resource_limit,
find_storage_pool_type)
from marvin.codes import PASS
class TestDeployVM(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.testClient = super(TestDeployVM, cls).getClsTestClient()
cls.api_client = cls.testClient.getApiClient()
cls.testdata = cls.testClient.getParsedTestDataConfig()
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client)
cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
cls.hypervisor = cls.testClient.getHypervisorInfo()
cls.skip = False
if cls.hypervisor.lower() == 'lxc':
if not find_storage_pool_type(cls.apiclient, storagetype='rbd'):
cls.skip = True
return
cls.template = get_template(
cls.api_client,
cls.zone.id,
cls.testdata["ostype"]
)
# Create service offerings, disk offerings etc
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.testdata["service_offering"]
)
cls.disk_offering = DiskOffering.create(
cls.api_client,
cls.testdata["disk_offering"]
)
# Cleanup
cls._cleanup = [
cls.service_offering,
cls.disk_offering,
]
return
@classmethod
def tearDownClass(cls):
try:
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
def setUp(self):
if self.skip:
self.skipTest("RBD storage type is required for data volumes for LXC")
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.testdata["virtual_machine"]["zoneid"] = self.zone.id
self.testdata["iso"]["zoneid"] = self.zone.id
self.testdata["virtual_machine"]["template"] = self.template.id
self.account = Account.create(
self.apiclient,
self.testdata["account"],
domainid=self.domain.id
)
self.cleanup = [self.account]
return
def tearDown(self):
try:
self.debug("Cleaning up the resources")
cleanup_resources(self.apiclient, self.cleanup)
self.debug("Cleanup complete!")
except Exception as e:
self.debug("Warning! Exception in tearDown: %s" % e)
@attr(tags=["advanced", "eip", "advancedns"], required_hardware="false")
def test_01_deploy_vm_no_startvm(self):
"""Test Deploy Virtual Machine with no startVM parameter
"""
# Validate the following:
# 1. deploy Vm without specifying the startvm parameter
# 2. Should be able to login to the VM.
# 3. listVM command should return the deployed VM.State of this VM
# should be "Running".
self.debug("Deploying instance in the account: %s" %
self.account.name)
self.virtual_machine = VirtualMachine.create(
self.apiclient,
self.testdata["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
mode=self.zone.networktype
)
response = self.virtual_machine.getState(
self.apiclient,
VirtualMachine.RUNNING)
self.assertEqual(response[0], PASS, response[1])
return
@attr(
tags=[
"advanced",
"eip",
"advancedns",
"basic",
"sg"],
required_hardware="false")
def test_02_deploy_vm_startvm_true(self):
"""Test Deploy Virtual Machine with startVM=true parameter
"""
# Validate the following:
# 1. deploy Vm with the startvm=true
# 2. Should be able to login to the VM.
# 3. listVM command should return the deployed VM.State of this VM
# should be "Running".
self.debug("Deploying instance in the account: %s" %
self.account.name)
self.virtual_machine = VirtualMachine.create(
self.apiclient,
self.testdata["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
startvm=True,
mode=self.zone.networktype
)
response = self.virtual_machine.getState(
self.apiclient,
VirtualMachine.RUNNING)
self.assertEqual(response[0], PASS, response[1])
return
@attr(
tags=[
"advanced",
"eip",
"advancedns",
"basic",
"sg"],
required_hardware="false")
def test_03_deploy_vm_startvm_false(self):
"""Test Deploy Virtual Machine with startVM=false parameter
"""
# Validate the following:
# 1. deploy Vm with the startvm=false
# 2. Should not be able to login to the VM.
# 3. listVM command should return the deployed VM.State of this VM
# should be "Stopped".
# 4. Check listRouters call for that account. List routers should
# return empty response
self.debug("Deploying instance in the account: %s" %
self.account.name)
self.virtual_machine = VirtualMachine.create(
self.apiclient,
self.testdata["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
startvm=False,
diskofferingid=self.disk_offering.id,
)
response = self.virtual_machine.getState(
self.apiclient,
VirtualMachine.STOPPED)
self.assertEqual(response[0], PASS, response[1])
routers = Router.list(
self.apiclient,
account=self.account.name,
domainid=self.account.domainid,
listall=True
)
self.assertEqual(
routers,
None,
"List routers should return empty response"
)
self.debug("Destroying instance: %s" % self.virtual_machine.name)
self.virtual_machine.delete(self.apiclient, expunge=True)
return
@attr(
tags=[
"advanced",
"eip",
"advancedns",
"basic",
"sg"],
required_hardware="false")
def test_04_deploy_startvm_false_attach_volume(self):
"""Test Deploy Virtual Machine with startVM=false and attach volume
"""
# Validate the following:
# 1. deploy Vm with the startvm=false. Attach volume to the instance
# 2. listVM command should return the deployed VM.State of this VM
# should be "Stopped".
# 3. Attach volume should be successful
# Skipping this test
self.skipTest("Skipping test as proper implementation seems to be missing")
self.debug("Deploying instance in the account: %s" %
self.account.name)
self.virtual_machine = VirtualMachine.create(
self.apiclient,
self.testdata["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
startvm=False,
diskofferingid=self.disk_offering.id,
)
response = self.virtual_machine.getState(
self.apiclient,
VirtualMachine.STOPPED)
self.assertEqual(response[0], PASS, response[1])
self.debug("Creating a volume in account: %s" %
self.account.name)
volume = Volume.create(
self.apiclient,
self.testdata["volume"],
zoneid=self.zone.id,
account=self.account.name,
domainid=self.account.domainid,
diskofferingid=self.disk_offering.id
)
self.debug("Created volume in account: %s" % self.account.name)
self.debug("Attaching volume to instance: %s" %
self.virtual_machine.name)
try:
self.virtual_machine.attach_volume(self.apiclient, volume)
except Exception as e:
self.fail("Attach volume failed with Exception: %s" % e)
return
@attr(
tags=[
"advanced",
"eip",
"advancedns",
"basic",
"sg"],
required_hardware="false")
def test_05_deploy_startvm_false_change_so(self):
"""Test Deploy Virtual Machine with startVM=false and change service offering
"""
# Validate the following:
# 1. deploy Vm with the startvm=false. Attach volume to the instance
# 2. listVM command should return the deployed VM.State of this VM
# should be "Stopped".
# 4. Change service offering
self.debug("Deploying instance in the account: %s" %
self.account.name)
self.virtual_machine = VirtualMachine.create(
self.apiclient,
self.testdata["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
startvm=False,
)
response = self.virtual_machine.getState(
self.apiclient,
VirtualMachine.STOPPED)
self.assertEqual(response[0], PASS, response[1])
medium_service_off = ServiceOffering.create(
self.apiclient,
self.testdata["service_offering"]
)
self.cleanup.append(medium_service_off)
self.debug("Changing service offering for instance: %s" %
self.virtual_machine.name)
try:
self.virtual_machine.change_service_offering(
self.apiclient,
medium_service_off.id
)
except Exception as e:
self.fail("Change service offering failed: %s" % e)
self.debug("Starting the instance: %s" % self.virtual_machine.name)
self.virtual_machine.start(self.apiclient)
self.debug("Instance: %s started" % self.virtual_machine.name)
listedvm = VirtualMachine.list(
self.apiclient,
id=self.virtual_machine.id)
self.assert_(isinstance(listedvm, list))
self.assert_(len(listedvm) > 0)
self.assertEqual(
listedvm[0].serviceofferingid,
medium_service_off.id,
msg="VM did not change service offering")
return
@attr(
tags=[
"advanced",
"eip",
"advancedns",
"basic",
"sg"],
required_hardware="false")
def test_06_deploy_startvm_attach_detach(self):
"""Test Deploy Virtual Machine with startVM=false and
attach detach volumes
"""
# Validate the following:
# 1. deploy Vm with the startvm=false. Attach volume to the instance
# 2. listVM command should return the deployed VM.State of this VM
# should be "Stopped".
# 3. Attach volume should be successful
# 4. Detach volume from instance. Detach should be successful
# Skipping this test
self.skipTest("Skipping test as proper implementation seems to be missing")
self.debug("Deploying instance in the account: %s" %
self.account.name)
self.virtual_machine = VirtualMachine.create(
self.apiclient,
self.testdata["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
startvm=False,
diskofferingid=self.disk_offering.id,
)
response = self.virtual_machine.getState(
self.apiclient,
VirtualMachine.STOPPED)
self.assertEqual(response[0], PASS, response[1])
self.debug("Creating a volume in account: %s" %
self.account.name)
volume = Volume.create(
self.apiclient,
self.testdata["volume"],
zoneid=self.zone.id,
account=self.account.name,
domainid=self.account.domainid,
diskofferingid=self.disk_offering.id
)
self.debug("Created volume in account: %s" % self.account.name)
self.debug("Attaching volume to instance: %s" %
self.virtual_machine.name)
try:
self.virtual_machine.attach_volume(self.apiclient, volume)
except Exception as e:
self.fail("Attach volume failed with Exception: %s" % e)
self.debug("Detaching the disk: %s" % volume.name)
self.virtual_machine.detach_volume(self.apiclient, volume)
self.debug("Datadisk %s detached!" % volume.name)
volumes = Volume.list(
self.apiclient,
virtualmachineid=self.virtual_machine.id,
type='DATADISK',
id=volume.id,
listall=True
)
self.assertEqual(
volumes,
None,
"List Volumes should not list any volume for instance"
)
return
@attr(
tags=[
"advanced",
"eip",
"advancedns",
"basic",
"sg"],
required_hardware="true")
def test_07_deploy_startvm_attach_iso(self):
"""Test Deploy Virtual Machine with startVM=false and attach ISO
"""
# Validate the following:
# 1. deploy Vm with the startvm=false. Attach volume to the instance
# 2. listVM command should return the deployed VM.State of this VM
# should be "Stopped".
# 3. Attach ISO to the instance. Attach ISO should be successful
self.debug("Deploying instance in the account: %s" %
self.account.name)
self.virtual_machine = VirtualMachine.create(
self.apiclient,
self.testdata["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
startvm=False,
diskofferingid=self.disk_offering.id,
)
response = self.virtual_machine.getState(
self.apiclient,
VirtualMachine.STOPPED)
self.assertEqual(response[0], PASS, response[1])
self.debug("Registering a ISO in account: %s" %
self.account.name)
iso = Iso.create(
self.apiclient,
self.testdata["iso"],
account=self.account.name,
domainid=self.account.domainid
)
self.debug("Successfully created ISO with ID: %s" % iso.id)
try:
iso.download(self.apiclient)
except Exception as e:
self.fail("Exception while downloading ISO %s: %s"
% (iso.id, e))
self.debug("Attach ISO with ID: %s to VM ID: %s" % (
iso.id,
self.virtual_machine.id
))
try:
self.virtual_machine.attach_iso(self.apiclient, iso)
except Exception as e:
self.fail("Attach ISO failed!")
vms = VirtualMachine.list(
self.apiclient,
id=self.virtual_machine.id,
listall=True
)
self.assertEqual(
isinstance(vms, list),
True,
"List vms should return a valid list"
)
vm = vms[0]
self.assertEqual(
vm.isoid,
iso.id,
"The ISO status should be reflected in list Vm call"
)
return
@attr(
tags=[
"advanced",
"eip",
"advancedns",
"basic",
"sg"],
required_hardware="false")
def test_08_deploy_attached_volume(self):
"""Test Deploy Virtual Machine with startVM=false and attach volume
already attached to different machine
"""
# Validate the following:
# 1. deploy Vm with the startvm=false. Attach volume to the instance
# 2. listVM command should return the deployed VM.State of this VM
# should be "Stopped".
# 3. Create an instance with datadisk attached to it. Detach DATADISK
# 4. Attach the volume to first virtual machine.
self.debug("Deploying instance in the account: %s" %
self.account.name)
self.virtual_machine_1 = VirtualMachine.create(
self.apiclient,
self.testdata["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
startvm=False,
)
response = self.virtual_machine_1.getState(
self.apiclient,
VirtualMachine.STOPPED)
self.assertEqual(response[0], PASS, response[1])
self.debug("Deploying instance in the account: %s" %
self.account.name)
self.virtual_machine_2 = VirtualMachine.create(
self.apiclient,
self.testdata["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
diskofferingid=self.disk_offering.id
)
self.debug("Deployed instance in account: %s" %
self.account.name)
list_vm_response = VirtualMachine.list(
self.apiclient,
id=self.virtual_machine_2.id
)
self.debug(
"Verify listVirtualMachines response for virtual machine: %s"
% self.virtual_machine_2.id
)
self.assertEqual(
isinstance(list_vm_response, list),
True,
"Check list response returns a valid list"
)
vm_response = list_vm_response[0]
self.assertEqual(
vm_response.state,
"Running",
"VM should be in Stopped state after deployment with startvm=false"
)
self.debug(
"Fetching DATADISK details for instance: %s" %
self.virtual_machine_2.name)
volumes = Volume.list(
self.apiclient,
type='DATADISK',
account=self.account.name,
domainid=self.account.domainid,
virtualmachineid=self.virtual_machine_2.id
)
self.assertEqual(
isinstance(volumes, list),
True,
"List volumes should return a valid list"
)
volume = volumes[0]
self.debug("Detaching the disk: %s" % volume.name)
try:
self.virtual_machine_2.detach_volume(self.apiclient, volume)
self.debug("Datadisk %s detached!" % volume.name)
except Exception as e:
self.fail("Detach volume failed!")
self.debug("Attaching volume to instance: %s" %
self.virtual_machine_1.name)
try:
self.virtual_machine_1.attach_volume(self.apiclient, volume)
except Exception as e:
self.fail("Attach volume failed with %s!" % e)
volumes = Volume.list(
self.apiclient,
virtualmachineid=self.virtual_machine_1.id,
type='DATADISK',
id=volume.id,
listall=True
)
self.assertNotEqual(
volumes,
None,
"List Volumes should not list any volume for instance"
)
return
@attr(
tags=[
"advanced",
"eip",
"advancedns",
"basic",
"sg"],
required_hardware="false")
def test_09_stop_vm_migrate_vol(self):
"""Test Stopped Virtual Machine's ROOT volume migration
"""
# Validate the following:
# 1. deploy Vm with startvm=true
# 2. Should not be able to login to the VM.
# 3. listVM command should return the deployed VM.State of this VM
# should be "Running".
# 4. Stop the vm
# 5.list primary storages in the cluster , should be more than one
# 6.Migrate voluem to another available primary storage
if self.hypervisor.lower() in ['lxc']:
self.skipTest(
"vm migrate is not supported in %s" %
self.hypervisor)
clusters = Cluster.list(
self.apiclient,
zoneid=self.zone.id
)
self.assertEqual(
isinstance(clusters, list),
True,
"Check list response returns a valid list"
)
i = 0
for cluster in clusters:
storage_pools = StoragePool.list(
self.apiclient,
clusterid=cluster.id
)
if len(storage_pools) > 1:
self.cluster_id = cluster.id
i += 1
break
if i == 0:
self.skipTest(
"No cluster with more than one primary storage pool to "
"perform migrate volume test")
hosts = Host.list(
self.apiclient,
clusterid=self.cluster_id
)
self.assertEqual(
isinstance(hosts, list),
True,
"Check list response returns a valid list"
)
host = hosts[0]
self.debug("Deploying instance on host: %s" % host.id)
self.debug("Deploying instance in the account: %s" %
self.account.name)
self.virtual_machine = VirtualMachine.create(
self.apiclient,
self.testdata["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
diskofferingid=self.disk_offering.id,
hostid=host.id,
mode=self.zone.networktype
)
response = self.virtual_machine.getState(
self.apiclient,
VirtualMachine.RUNNING)
self.assertEqual(response[0], PASS, response[1])
try:
self.virtual_machine.stop(self.apiclient)
except Exception as e:
self.fail("failed to stop instance: %s" % e)
volumes = Volume.list(
self.apiclient,
virtualmachineid=self.virtual_machine.id,
type='ROOT',
listall=True
)
self.assertEqual(
isinstance(volumes, list),
True,
"Check volume list response returns a valid list"
)
vol_response = volumes[0]
# get the storage name in which volume is stored
storage_name = vol_response.storage
storage_pools = StoragePool.list(
self.apiclient,
clusterid=self.cluster_id
)
# Get storage pool to migrate volume
for spool in storage_pools:
if spool.name == storage_name:
continue
else:
self.storage_id = spool.id
self.storage_name = spool.name
break
self.debug("Migrating volume to storage pool: %s" % self.storage_name)
Volume.migrate(
self.apiclient,
storageid=self.storage_id,
volumeid=vol_response.id
)
volume = Volume.list(
self.apiclient,
virtualmachineid=self.virtual_machine.id,
type='ROOT',
listall=True
)
self.assertEqual(
volume[0].storage,
self.storage_name,
"Check volume migration response")
return
class TestDeployHaEnabledVM(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.testClient = super(TestDeployHaEnabledVM, cls).getClsTestClient()
cls.api_client = cls.testClient.getApiClient()
cls.testdata = cls.testClient.getParsedTestDataConfig()
# Get Zone, Domain and templates
cls.hypervisor = cls.testClient.getHypervisorInfo()
cls.skip = False
if cls.hypervisor.lower() == 'lxc':
if not find_storage_pool_type(cls.apiclient, storagetype='rbd'):
cls.skip = True
return
cls.domain = get_domain(cls.api_client)
cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
cls.template = get_template(
cls.api_client,
cls.zone.id,
cls.testdata["ostype"]
)
# Create service, disk offerings etc
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.testdata["service_offering"],
offerha=True
)
cls.disk_offering = DiskOffering.create(
cls.api_client,
cls.testdata["disk_offering"]
)
# Cleanup
cls._cleanup = [
cls.service_offering,
cls.disk_offering,
]
return
@classmethod
def tearDownClass(cls):
try:
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
def setUp(self):
if self.skip:
self.skipTest("RBD storage type is required for data volumes for LXC ")
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.hypervisor = self.testClient.getHypervisorInfo()
self.testdata["virtual_machine"]["zoneid"] = self.zone.id
self.testdata["virtual_machine"]["template"] = self.template.id
self.testdata["iso"]["zoneid"] = self.zone.id
self.account = Account.create(
self.apiclient,
self.testdata["account"],
domainid=self.domain.id
)
self.cleanup = [self.account]
return
def tearDown(self):
try:
self.debug("Cleaning up the resources")
cleanup_resources(self.apiclient, self.cleanup)
self.debug("Cleanup complete!")
except Exception as e:
self.debug("Warning! Exception in tearDown: %s" % e)
@attr(
tags=[
"advanced",
"eip",
"advancedns",
"basic",
"sg"],
required_hardware="false")
def test_01_deploy_ha_vm_startvm_false(self):
"""Test Deploy HA enabled Virtual Machine with startvm=false
"""
# Validate the following:
# 1. deployHA enabled Vm with the startvm parameter = false
# 2. listVM command should return the deployed VM. State of this VM
# should be "Created".
self.debug("Deploying instance in the account: %s" %
self.account.name)
self.virtual_machine = VirtualMachine.create(
self.apiclient,
self.testdata["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
diskofferingid=self.disk_offering.id,
startvm=False
)
response = self.virtual_machine.getState(
self.apiclient,
VirtualMachine.STOPPED)
self.assertEqual(response[0], PASS, response[1])
return
@attr(
tags=[
"advanced",
"eip",
"advancedns",
"basic",
"sg"],
required_hardware="true")
def test_02_deploy_ha_vm_from_iso(self):
"""Test Deploy HA enabled Virtual Machine from ISO
"""
# Validate the following:
# 1. deployHA enabled Vm using ISO with the startvm parameter=true
# 2. listVM command should return the deployed VM. State of this VM
# should be "Running".
if self.hypervisor.lower() in ['lxc']:
self.skipTest(
"vm deploy from ISO feature is not supported on %s" %
self.hypervisor.lower())
self.iso = Iso.create(
self.apiclient,
self.testdata["configurableData"]["bootableIso"],
account=self.account.name,
domainid=self.account.domainid,
zoneid=self.zone.id
)
try:
# Download the ISO
self.iso.download(self.apiclient)
except Exception as e:
raise Exception("Exception while downloading ISO %s: %s"
% (self.iso.id, e))
self.debug("Registered ISO: %s" % self.iso.name)
self.debug("Deploying instance in the account: %s" %
self.account.name)
self.virtual_machine = VirtualMachine.create(
self.apiclient,
self.testdata["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
templateid=self.iso.id,
serviceofferingid=self.service_offering.id,
diskofferingid=self.disk_offering.id,
startvm=True,
hypervisor=self.hypervisor
)
response = self.virtual_machine.getState(
self.apiclient,
VirtualMachine.RUNNING)
self.assertEqual(response[0], PASS, response[1])
return
@attr(
tags=[
"advanced",
"eip",
"advancedns",
"basic",
"sg"],
required_hardware="false")
def test_03_deploy_ha_vm_iso_startvm_false(self):
"""Test Deploy HA enabled Virtual Machine from ISO with startvm=false
"""
# Validate the following:
# 1. deployHA enabled Vm using ISO with the startvm parameter=false
# 2. listVM command should return the deployed VM. State of this VM
# should be "Stopped".
if self.hypervisor.lower() in ['lxc']:
self.skipTest("vm deploy from ISO feature is not supported on %s" %
self.hypervisor.lower())
self.debug("Deploying instance in the account: %s" %
self.account.name)
self.virtual_machine = VirtualMachine.create(
self.apiclient,
self.testdata["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
diskofferingid=self.disk_offering.id,
startvm=False
)
response = self.virtual_machine.getState(
self.apiclient,
VirtualMachine.STOPPED)
self.assertEqual(response[0], PASS, response[1])
return
class TestRouterStateAfterDeploy(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.testClient = super(
TestRouterStateAfterDeploy,
cls).getClsTestClient()
cls.api_client = cls.testClient.getApiClient()
cls.testdata = cls.testClient.getParsedTestDataConfig()
# Get Zone, Domain and templates
cls.hypervisor = cls.testClient.getHypervisorInfo()
cls.skip = False
if cls.hypervisor.lower() == 'lxc':
if not find_storage_pool_type(cls.apiclient, storagetype='rbd'):
cls.skip = True
return
cls.domain = get_domain(cls.api_client)
cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
cls.template = get_template(
cls.api_client,
cls.zone.id,
cls.testdata["ostype"]
)
# Create service offerings, disk offerings etc
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.testdata["service_offering"]
)
cls.disk_offering = DiskOffering.create(
cls.api_client,
cls.testdata["disk_offering"]
)
# Cleanup
cls._cleanup = [
cls.service_offering,
cls.disk_offering,
]
return
@classmethod
def tearDownClass(cls):
try:
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
def setUp(self):
if self.skip:
self.skipTest("RBD storage type is required for data volumes for LXC")
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.testdata["virtual_machine"]["zoneid"] = self.zone.id
self.testdata["virtual_machine"]["template"] = self.template.id
self.testdata["iso"]["zoneid"] = self.zone.id
self.account = Account.create(
self.apiclient,
self.testdata["account"],
domainid=self.domain.id
)
self.cleanup = [self.account]
return
def tearDown(self):
try:
self.debug("Cleaning up the resources")
cleanup_resources(self.apiclient, self.cleanup)
self.debug("Cleanup complete!")
except Exception as e:
self.debug("Warning! Exception in tearDown: %s" % e)
@attr(tags=["advanced", "eip", "advancedns"], required_hardware="false")
def test_01_deploy_vm_no_startvm(self):
"""Test Deploy Virtual Machine with no startVM parameter
"""
# Validate the following:
# 1. deploy Vm without specifying the startvm parameter
# 2. Should be able to login to the VM.
# 3. listVM command should return the deployed VM.State of this VM
# should be "Running".
self.debug("Deploying instance in the account: %s" %
self.account.name)
self.virtual_machine_1 = VirtualMachine.create(
self.apiclient,
self.testdata["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
startvm=False
)
response = self.virtual_machine_1.getState(
self.apiclient,
VirtualMachine.STOPPED)
self.assertEqual(response[0], PASS, response[1])
if(self.zone.networktype == "Advanced"):
self.debug("Checking the router state after VM deployment")
routers = Router.list(
self.apiclient,
account=self.account.name,
domainid=self.account.domainid,
listall=True
)
self.assertEqual(
routers,
None,
"List routers should return empty response"
)
self.debug(
"Deploying another instance (startvm=true) in the account: %s" %
self.account.name)
self.virtual_machine_2 = VirtualMachine.create(
self.apiclient,
self.testdata["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
diskofferingid=self.disk_offering.id,
startvm=True
)
response = self.virtual_machine_2.getState(
self.apiclient,
VirtualMachine.RUNNING)
self.assertEqual(response[0], PASS, response[1])
self.debug("Checking the router state after VM deployment")
if (self.zone.networktype == "Basic"):
routers = Router.list(
self.apiclient,
zoneid=self.zone.id,
listall=True
)
else:
routers = Router.list(
self.apiclient,
account=self.account.name,
domainid=self.account.domainid,
listall=True
)
self.assertEqual(
isinstance(routers, list),
True,
"List routers should not return empty response"
)
for router in routers:
self.debug("Router state: %s" % router.state)
self.assertEqual(
router.state,
"Running",
"Router should be in running state when "
"instance is running in the account")
self.debug("Destroying the running VM:%s" %
self.virtual_machine_2.name)
self.virtual_machine_2.delete(self.apiclient, expunge=True)
if(self.zone.networktype == "Advanced"):
routers = Router.list(
self.apiclient,
account=self.account.name,
domainid=self.account.domainid,
listall=True
)
self.assertNotEqual(
routers,
None,
"Router should get deleted after expunge delay+wait"
)
return
class TestDeployVMBasicZone(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.testClient = super(TestDeployVMBasicZone, cls).getClsTestClient()
cls.api_client = cls.testClient.getApiClient()
cls.testdata = cls.testClient.getParsedTestDataConfig()
# Get Zone, Domain and templates
cls.hypervisor = cls.testClient.getHypervisorInfo()
cls.skip = False
if cls.hypervisor.lower() == 'lxc':
if not find_storage_pool_type(cls.apiclient, storagetype='rbd'):
cls.skip = True
return
cls.domain = get_domain(cls.api_client)
cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
cls.template = get_template(
cls.api_client,
cls.zone.id,
cls.testdata["ostype"]
)
# Create service offerings, disk offerings etc
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.testdata["service_offering"]
)
cls.disk_offering = DiskOffering.create(
cls.api_client,
cls.testdata["disk_offering"]
)
# Cleanup
cls._cleanup = [
cls.service_offering,
cls.disk_offering,
]
return
@classmethod
def tearDownClass(cls):
try:
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
def setUp(self):
if self.skip:
self.skipTest("RBD storage type is required for data volumes for LXC")
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.testdata["virtual_machine"]["zoneid"] = self.zone.id
self.testdata["iso"]["zoneid"] = self.zone.id
self.testdata["virtual_machine"]["template"] = self.template.id
self.account = Account.create(
self.apiclient,
self.testdata["account"],
domainid=self.domain.id
)
self.cleanup = [self.account]
return
def tearDown(self):
try:
self.debug("Cleaning up the resources")
cleanup_resources(self.apiclient, self.cleanup)
self.debug("Cleanup complete!")
except Exception as e:
self.debug("Warning! Exception in tearDown: %s" % e)
class TestDeployVMFromTemplate(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.testClient = super(
TestDeployVMFromTemplate,
cls).getClsTestClient()
cls.api_client = cls.testClient.getApiClient()
cls.testdata = cls.testClient.getParsedTestDataConfig()
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client)
cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
# Create service, disk offerings etc
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.testdata["service_offering"],
offerha=True
)
cls.disk_offering = DiskOffering.create(
cls.api_client,
cls.testdata["disk_offering"]
)
# Cleanup
cls._cleanup = [
cls.service_offering,
cls.disk_offering,
]
return
@classmethod
def tearDownClass(cls):
try:
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.hypervisor = self.testClient.getHypervisorInfo()
self.dbclient = self.testClient.getDbConnection()
self.testdata["virtual_machine"]["zoneid"] = self.zone.id
self.account = Account.create(
self.apiclient,
self.testdata["account"],
domainid=self.domain.id
)
builtin_info = get_builtin_template_info(self.apiclient, self.zone.id)
self.testdata["privatetemplate"]["url"] = builtin_info[0]
self.testdata["privatetemplate"]["hypervisor"] = builtin_info[1]
self.testdata["privatetemplate"]["format"] = builtin_info[2]
# Register new template
self.template = Template.register(
self.apiclient,
self.testdata["privatetemplate"],
zoneid=self.zone.id,
account=self.account.name,
domainid=self.account.domainid,
hypervisor=self.hypervisor
)
self.debug(
"Registered a template of format: %s with ID: %s" % (
self.testdata["privatetemplate"]["format"],
self.template.id
))
try:
self.template.download(self.apiclient)
except Exception as e:
raise Exception("Template download failed: %s" % e)
self.cleanup = [self.account]
return
def tearDown(self):
try:
self.debug("Cleaning up the resources")
cleanup_resources(self.apiclient, self.cleanup)
self.debug("Cleanup complete!")
except Exception as e:
self.debug("Warning! Exception in tearDown: %s" % e)
@attr(
tags=[
"advanced",
"eip",
"advancedns",
"basic",
"sg"],
required_hardware="true")
def test_deploy_vm_password_enabled(self):
"""Test Deploy Virtual Machine with startVM=false & enabledpassword in
template
"""
# Validate the following:
# 1. Create the password enabled template
# 2. Deploy Vm with this template and passing startvm=false
# 3. Start VM. Deploy VM should be successful and it should be in Up
# and running state
self.debug("Deploying instance in the account: %s" %
self.account.name)
self.virtual_machine = VirtualMachine.create(
self.apiclient,
self.testdata["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
templateid=self.template.id,
startvm=False,
)
response = self.virtual_machine.getState(
self.apiclient,
VirtualMachine.STOPPED)
self.assertEqual(response[0], PASS, response[1])
self.debug("Starting the instance: %s" % self.virtual_machine.name)
self.virtual_machine.start(self.apiclient)
self.debug("Started the instance: %s" % self.virtual_machine.name)
response = self.virtual_machine.getState(
self.apiclient,
VirtualMachine.RUNNING)
self.assertEqual(response[0], PASS, response[1])
return
class TestVMAccountLimit(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.testClient = super(TestVMAccountLimit, cls).getClsTestClient()
cls.api_client = cls.testClient.getApiClient()
cls.testdata = cls.testClient.getParsedTestDataConfig()
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client)
cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
cls.template = get_template(
cls.api_client,
cls.zone.id,
cls.testdata["ostype"]
)
cls.testdata["virtual_machine"]["zoneid"] = cls.zone.id
# Create Account, VMs etc
cls.account = Account.create(
cls.api_client,
cls.testdata["account"],
domainid=cls.domain.id
)
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.testdata["service_offering"]
)
cls._cleanup = [
cls.service_offering,
cls.account
]
return
@classmethod
def tearDownClass(cls):
try:
# Cleanup resources used
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
return
def tearDown(self):
try:
# Clean up, terminate the created instance, volumes and snapshots
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@attr(
tags=[
"advanced",
"eip",
"advancedns",
"basic",
"sg"],
required_hardware="false")
def test_vm_per_account(self):
"""Test VM limit per account
"""
# Validate the following
# 1. Set the resource limit for VM per account.
# 2. Deploy VMs more than limit in that account.
# 3. AIP should error out
self.debug(
"Updating instance resource limit for account: %s" %
self.account.name)
# Set usage_vm=1 for Account 1
update_resource_limit(
self.apiclient,
0, # Instance
account=self.account.name,
domainid=self.account.domainid,
max=1
)
self.debug(
"Deploying VM instance in account: %s" %
self.account.name)
virtual_machine = VirtualMachine.create(
self.apiclient,
self.testdata["virtual_machine"],
templateid=self.template.id,
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
startvm=False
)
# Verify VM state
self.assertEqual(
virtual_machine.state,
'Stopped',
"Check VM state is Running or not"
)
# Exception should be raised for second instance (account_1)
with self.assertRaises(Exception):
VirtualMachine.create(
self.apiclient,
self.testdata["virtual_machine"],
templateid=self.template.id,
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
startvm=False
)
return
class TestUploadAttachVolume(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.testClient = super(TestUploadAttachVolume, cls).getClsTestClient()
cls.api_client = cls.testClient.getApiClient()
cls.testdata = cls.testClient.getParsedTestDataConfig()
cls.hypervisor = cls.testClient.getHypervisorInfo()
cls.skip = True
if cls.hypervisor.lower() == 'lxc':
if not find_storage_pool_type(cls.apiclient, storagetype='rbd'):
cls.skip = True
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client)
cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
cls.template = get_template(
cls.api_client,
cls.zone.id,
cls.testdata["ostype"]
)
cls.testdata["virtual_machine"]["zoneid"] = cls.zone.id
# Create Account, VMs etc
cls.account = Account.create(
cls.api_client,
cls.testdata["account"],
domainid=cls.domain.id
)
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.testdata["service_offering"]
)
cls._cleanup = [
cls.service_offering,
cls.account
]
return
@classmethod
def tearDownClass(cls):
try:
# Cleanup resources used
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
if self.skip:
self.skipTest("Attach operation for uploaded volume to VM which is not started once is not supported")
# self.skipTest("RBD storage type is required for data volumes for LXC")
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
return
def tearDown(self):
try:
# Clean up, terminate the created instance, volumes and snapshots
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@attr(
tags=[
"advanced",
"eip",
"advancedns",
"basic",
"sg"],
required_hardware="false")
def test_upload_attach_volume(self):
"""Test Upload volume and attach to VM in stopped state
"""
# Validate the following
# 1. Upload the volume using uploadVolume API call
# 2. Deploy VM with startvm=false.
# 3. Attach the volume to the deployed VM in step 2
self.debug(
"Uploading the volume: %s" %
self.testdata["configurableData"]["upload_volume"]["diskname"])
try:
volume = Volume.upload(
self.apiclient,
self.testdata["configurableData"]["upload_volume"],
zoneid=self.zone.id,
account=self.account.name,
domainid=self.account.domainid
)
self.debug("Uploading the volume: %s" % volume.name)
volume.wait_for_upload(self.apiclient)
self.debug("Volume: %s uploaded successfully")
except Exception as e:
self.fail("Failed to upload the volume: %s" % e)
self.debug(
"Deploying VM instance in account: %s" %
self.account.name)
virtual_machine = VirtualMachine.create(
self.apiclient,
self.testdata["virtual_machine"],
templateid=self.template.id,
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
startvm=False
)
# Verify VM state
self.assertEqual(
virtual_machine.state,
'Stopped',
"Check VM state is Running or not"
)
virtual_machine.attach_volume(self.apiclient, volume)
return
class TestDeployOnSpecificHost(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.testClient = super(
TestDeployOnSpecificHost,
cls).getClsTestClient()
cls.api_client = cls.testClient.getApiClient()
cls.testdata = cls.testClient.getParsedTestDataConfig()
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client)
cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
cls.template = get_template(
cls.api_client,
cls.zone.id,
cls.testdata["ostype"]
)
cls.testdata["virtual_machine"]["zoneid"] = cls.zone.id
cls.testdata["virtual_machine"]["template"] = cls.template.id
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.testdata["service_offering"]
)
cls._cleanup = [
cls.service_offering,
]
return
@classmethod
def tearDownClass(cls):
try:
# Cleanup resources used
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.account = Account.create(
self.apiclient,
self.testdata["account"],
admin=True,
domainid=self.domain.id
)
self.cleanup = []
return
def tearDown(self):
try:
self.account.delete(self.apiclient)
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@attr(tags=["advanced", "advancedns", "simulator",
"api", "basic", "eip", "sg"])
def test_deployVmOnGivenHost(self):
"""Test deploy VM on specific host
"""
# Steps for validation
# 1. as admin list available hosts that are Up
# 2. deployVM with hostid=above host
# 3. listVirtualMachines
# 4. destroy VM
# Validate the following
# 1. listHosts returns at least one host in Up state
# 2. VM should be in Running
# 3. VM should be on the host that it was deployed on
hosts = Host.list(
self.apiclient,
zoneid=self.zone.id,
type='Routing',
state='Up',
listall=True
)
self.assertEqual(
isinstance(hosts, list),
True,
"CS should have atleast one host Up and Running"
)
host = hosts[0]
self.debug("Deploting VM on host: %s" % host.name)
try:
vm = VirtualMachine.create(
self.apiclient,
self.testdata["virtual_machine"],
templateid=self.template.id,
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
hostid=host.id
)
self.debug("Deploy VM succeeded")
except Exception as e:
self.fail("Deploy VM failed with exception: %s" % e)
self.debug("Cheking the state of deployed VM")
vms = VirtualMachine.list(
self.apiclient,
id=vm.id,
listall=True,
account=self.account.name,
domainid=self.account.domainid
)
self.assertEqual(
isinstance(vms, list),
True,
"List Vm should return a valid response"
)
vm_response = vms[0]
self.assertEqual(
vm_response.state,
"Running",
"VM should be in running state after deployment"
)
self.assertEqual(
vm_response.hostid,
host.id,
"Host id where VM is deployed should match"
)
return
| GabrielBrascher/cloudstack | test/integration/component/test_stopped_vm.py | Python | apache-2.0 | 58,065 | 0.000224 |
from rtruffle.abstract_node import AbstractNode, NodeInitializeMetaClass
class BaseNode(AbstractNode):
__metaclass__ = NodeInitializeMetaClass
_immutable_fields_ = ['_source_section', '_parent']
_child_nodes_ = []
| SOM-st/RPySOM | src/rtruffle/base_node_2.py | Python | mit | 229 | 0 |
###
# Copyright (c) 2002-2005, Jeremiah Fincher
# Copyright (c) 2009, James McCoy
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
import re
import os
import imp
import sys
import json
import time
import supybot
import supybot.conf as conf
from supybot import commands
import supybot.utils as utils
from supybot.commands import *
import supybot.ircdb as ircdb
import supybot.irclib as irclib
import supybot.utils.minisix as minisix
import supybot.ircmsgs as ircmsgs
import supybot.ircutils as ircutils
import supybot.callbacks as callbacks
import supybot.registry as registry
from supybot import commands
from supybot.i18n import PluginInternationalization, internationalizeDocstring
_ = PluginInternationalization('Misc')
if minisix.PY2:
from itertools import ifilter as filter
def get_suffix(file):
for suffix in imp.get_suffixes():
if file[-len(suffix[0]):] == suffix[0]:
return suffix
return None
def getPluginsInDirectory(directory):
# get modules in a given directory
plugins = []
for filename in os.listdir(directory):
pluginPath = os.path.join(directory, filename)
if os.path.isdir(pluginPath):
if all(os.path.isfile(os.path.join(pluginPath, x))
for x in ['__init__.py', 'config.py', 'plugin.py']):
plugins.append(filename)
return plugins
class RegexpTimeout(Exception):
pass
class Misc(callbacks.Plugin):
"""Miscellaneous commands to access Supybot core. This is a core
Supybot plugin that should not be removed!"""
def __init__(self, irc):
self.__parent = super(Misc, self)
self.__parent.__init__(irc)
self.invalidCommands = \
ircutils.FloodQueue(conf.supybot.abuse.flood.interval())
conf.supybot.abuse.flood.interval.addCallback(self.setFloodQueueTimeout)
def setFloodQueueTimeout(self, *args, **kwargs):
self.invalidCommands.timeout = conf.supybot.abuse.flood.interval()
def callPrecedence(self, irc):
return ([cb for cb in irc.callbacks if cb is not self], [])
def invalidCommand(self, irc, msg, tokens):
assert not msg.repliedTo, 'repliedTo msg in Misc.invalidCommand.'
assert self is irc.callbacks[-1], 'Misc isn\'t last callback.'
assert msg.command in ('PRIVMSG', 'NOTICE')
self.log.debug('Misc.invalidCommand called (tokens %s)', tokens)
# First, we check for invalidCommand floods. This is rightfully done
# here since this will be the last invalidCommand called, and thus it
# will only be called if this is *truly* an invalid command.
maximum = conf.supybot.abuse.flood.command.invalid.maximum()
self.invalidCommands.enqueue(msg)
if self.invalidCommands.len(msg) > maximum and \
conf.supybot.abuse.flood.command.invalid() and \
not ircdb.checkCapability(msg.prefix, 'owner'):
punishment = conf.supybot.abuse.flood.command.invalid.punishment()
banmask = '*!%s@%s' % (msg.user, msg.host)
self.log.info('Ignoring %s for %s seconds due to an apparent '
'invalid command flood.', banmask, punishment)
if tokens and tokens[0] == 'Error:':
self.log.warning('Apparent error loop with another Supybot '
'observed. Consider ignoring this bot '
'permanently.')
ircdb.ignores.add(banmask, time.time() + punishment)
if conf.supybot.abuse.flood.command.invalid.notify():
irc.reply(_('You\'ve given me %s invalid commands within the last '
'%i seconds; I\'m now ignoring you for %s.') %
(maximum,
conf.supybot.abuse.flood.interval(),
utils.timeElapsed(punishment, seconds=False)))
return
# Now, for normal handling.
channel = msg.args[0]
# Only bother with the invaildCommand flood handling if it's actually
# enabled
if conf.supybot.abuse.flood.command.invalid():
# First, we check for invalidCommand floods. This is rightfully done
# here since this will be the last invalidCommand called, and thus it
# will only be called if this is *truly* an invalid command.
maximum = conf.supybot.abuse.flood.command.invalid.maximum()
banmasker = conf.supybot.protocols.irc.banmask.makeBanmask
if self.invalidCommands.len(msg) > maximum and \
not ircdb.checkCapability(msg.prefix, 'owner') and \
msg.prefix != irc.prefix and \
ircutils.isUserHostmask(msg.prefix):
penalty = conf.supybot.abuse.flood.command.invalid.punishment()
banmask = banmasker(msg.prefix, channel=None)
self.log.info('Ignoring %s for %s seconds due to an apparent '
'invalid command flood.', banmask, penalty)
if tokens and tokens[0] == 'Error:':
self.log.warning('Apparent error loop with another Supybot '
'observed. Consider ignoring this bot '
'permanently.')
ircdb.ignores.add(banmask, time.time() + penalty)
if conf.supybot.abuse.flood.command.invalid.notify():
irc.reply('You\'ve given me %s invalid commands within '
'the last minute; I\'m now ignoring you for %s.' %
(maximum,
utils.timeElapsed(penalty, seconds=False)))
return
# Now, for normal handling.
if conf.get(conf.supybot.reply.whenNotCommand, channel):
if len(tokens) >= 2:
cb = irc.getCallback(tokens[0])
if cb:
plugin = cb.name()
irc.error(format(_('The %q plugin is loaded, but there is '
'no command named %q in it. Try "list '
'%s" to see the commands in the %q '
'plugin.'), plugin, tokens[1],
plugin, plugin))
else:
irc.errorInvalid(_('command'), tokens[0], repr=False)
else:
command = tokens and tokens[0] or ''
irc.errorInvalid(_('command'), command, repr=False)
else:
if tokens:
# echo [] will get us an empty token set, but there's no need
# to log this in that case anyway, it being a nested command.
self.log.info('Not replying to %s in %s, not a command.' %
(tokens[0], channel
if channel != irc.nick else _('private')))
if irc.nested:
bracketConfig = conf.supybot.commands.nested.brackets
brackets = conf.get(bracketConfig, channel)
if brackets:
(left, right) = brackets
irc.reply(left + ' '.join(tokens) + right)
else:
pass # Let's just do nothing, I can't think of better.
def isPublic(self, cb):
name = cb.name()
return conf.supybot.plugins.get(name).public()
@internationalizeDocstring
def list(self, irc, msg, args, optlist, cb):
"""[--private] [--unloaded] [<plugin>]
Lists the commands available in the given plugin. If no plugin is
given, lists the public plugins available. If --private is given,
lists the private plugins. If --unloaded is given, it will list
available plugins that are not loaded.
"""
private = False
unloaded = False
for (option, argument) in optlist:
if option == 'private':
private = True
if not self.registryValue('listPrivatePlugins') and \
not ircdb.checkCapability(msg.prefix, 'owner'):
irc.errorNoCapability('owner')
elif option == 'unloaded':
unloaded = True
if not self.registryValue('listUnloadedPlugins') and \
not ircdb.checkCapability(msg.prefix, 'owner'):
irc.errorNoCapability('owner')
if unloaded and private:
irc.error(_('--private and --unloaded are incompatible options.'))
return
if not cb:
if unloaded:
# We were using the path of Misc + .. to detect the install
# directory. However, it fails if Misc is not in the
# installation directory for some reason, so we use a
# supybot module.
installedPluginsDirectory = os.path.join(
os.path.dirname(conf.__file__), 'plugins')
plugins = getPluginsInDirectory(installedPluginsDirectory)
for directory in conf.supybot.directories.plugins()[:]:
plugins.extend(getPluginsInDirectory(directory))
# Remove loaded plugins:
loadedPlugins = [x.name() for x in irc.callbacks]
plugins = [x for x in plugins if x not in loadedPlugins]
plugins.sort()
irc.reply(format('%L', plugins))
else:
names = [cb.name() for cb in irc.callbacks
if (private and not self.isPublic(cb)) or
(not private and self.isPublic(cb))]
names.sort()
if names:
irc.reply(format('%L', names))
else:
if private:
irc.reply(_('There are no private plugins.'))
else:
irc.reply(_('There are no public plugins.'))
else:
commands = cb.listCommands()
if commands:
commands.sort()
irc.reply(format('%L', commands))
else:
irc.reply(format(_('That plugin exists, but has no commands. '
'This probably means that it has some '
'configuration variables that can be '
'changed in order to modify its behavior. '
'Try "config list supybot.plugins.%s" to see '
'what configuration variables it has.'),
cb.name()))
list = wrap(list, [getopts({'private':'', 'unloaded':''}),
additional('plugin')])
@internationalizeDocstring
def apropos(self, irc, msg, args, s):
"""<string>
Searches for <string> in the commands currently offered by the bot,
returning a list of the commands containing that string.
"""
commands = {}
L = []
for cb in irc.callbacks:
if isinstance(cb, callbacks.Plugin):
for command in cb.listCommands():
if s in command:
commands.setdefault(command, []).append(cb.name())
for (key, names) in commands.items():
for name in names:
L.append('%s %s' % (name, key))
if L:
L.sort()
irc.reply(format('%L', L))
else:
irc.reply(_('No appropriate commands were found.'))
apropos = wrap(apropos, ['lowered'])
@internationalizeDocstring
def help(self, irc, msg, args, command):
"""[<plugin>] [<command>]
This command gives a useful description of what <command> does.
<plugin> is only necessary if the command is in more than one plugin.
You may also want to use the 'list' command to list all available
plugins and commands.
"""
if not command:
cHelp = self.registryValue("customHelpString")
if cHelp:
irc.reply(cHelp)
else:
irc.error()
return
command = list(map(callbacks.canonicalName, command))
(maxL, cbs) = irc.findCallbacksForArgs(command)
if maxL == command:
if len(cbs) > 1:
names = sorted([cb.name() for cb in cbs])
irc.error(format(_('That command exists in the %L plugins. '
'Please specify exactly which plugin command '
'you want help with.'), names))
else:
assert cbs, 'Odd, maxL == command, but no cbs.'
irc.reply(_.__call__(cbs[0].getCommandHelp(command, False)))
else:
plugins = [cb.name() for cb in irc.callbacks
if self.isPublic(cb)]
s = format(_('There is no command %q.'),
callbacks.formatCommand(command))
if command[0].lower() in map(str.lower, plugins):
s += (' However, "{0}" is the name of a loaded plugin, and '
'you may be able to find its provided commands '
'using \'list {0}\'.'.format(command[0].title()))
irc.error(s)
help = wrap(help, [any('something')])
@internationalizeDocstring
def version(self, irc, msg, args):
"""takes no arguments
Returns the version of the current bot.
"""
try:
newestUrl = 'https://api.github.com/repos/ProgVal/Limnoria/' + \
'commits/%s'
versions = {}
for branch in ('master', 'testing'):
data = json.loads(utils.web.getUrl(newestUrl % branch)
.decode('utf8'))
version = data['commit']['committer']['date']
# Strip the last 'Z':
version = version.rsplit('T', 1)[0].replace('-', '.')
if minisix.PY2 and isinstance(version, unicode):
version = version.encode('utf8')
versions[branch] = version
newest = _('The newest versions available online are %s.') % \
', '.join([_('%s (in %s)') % (y,x)
for x,y in versions.items()])
except utils.web.Error as e:
self.log.info('Couldn\'t get website version: %s', e)
newest = _('I couldn\'t fetch the newest version '
'from the Limnoria repository.')
s = _('The current (running) version of this Supybot is %s, '
'running on Python %s. %s') % \
(conf.version, sys.version.replace('\n', ' '), newest)
irc.reply(s)
version = wrap(thread(version))
@internationalizeDocstring
def source(self, irc, msg, args):
"""takes no arguments
Returns a URL saying where to get Limnoria.
"""
irc.reply(_('My source is at https://github.com/ProgVal/Limnoria'))
source = wrap(source)
@internationalizeDocstring
def more(self, irc, msg, args, nick):
"""[<nick>]
If the last command was truncated due to IRC message length
limitations, returns the next chunk of the result of the last command.
If <nick> is given, it takes the continuation of the last command from
<nick> instead of the person sending this message.
"""
userHostmask = msg.prefix.split('!', 1)[1]
if nick:
try:
(private, L) = irc._mores[nick]
if not private:
irc._mores[userHostmask] = L[:]
else:
irc.error(_('%s has no public mores.') % nick)
return
except KeyError:
irc.error(_('Sorry, I can\'t find any mores for %s') % nick)
return
try:
L = irc._mores[userHostmask]
number = self.registryValue('mores', msg.args[0])
chunks = [L.pop() for x in range(0, number)]
if L:
if len(L) < 2:
more = _('1 more message')
else:
more = _('%i more messages') % len(L)
chunks[-1] += format(' \x02(%s)\x0F', more)
irc.replies(chunks, noLengthCheck=True, oneToOne=False)
except KeyError:
irc.error(_('You haven\'t asked me a command; perhaps you want '
'to see someone else\'s more. To do so, call this '
'command with that person\'s nick.'))
except IndexError:
irc.error(_('That\'s all, there is no more.'))
more = wrap(more, [additional('seenNick')])
def _validLastMsg(self, msg):
return msg.prefix and \
msg.command == 'PRIVMSG' and \
ircutils.isChannel(msg.args[0])
@internationalizeDocstring
def last(self, irc, msg, args, optlist):
"""[--{from,in,on,with,without,regexp} <value>] [--nolimit]
Returns the last message matching the given criteria. --from requires
a nick from whom the message came; --in requires a channel the message
was sent to; --on requires a network the message was sent on; --with
requires some string that had to be in the message; --regexp requires
a regular expression the message must match; --nolimit returns all
the messages that can be found. By default, the channel this command is
given in is searched.
"""
predicates = {}
nolimit = False
skipfirst = True
if ircutils.isChannel(msg.args[0]):
predicates['in'] = lambda m: ircutils.strEqual(m.args[0],
msg.args[0])
else:
skipfirst = False
for (option, arg) in optlist:
if option == 'from':
def f(m, arg=arg):
return ircutils.hostmaskPatternEqual(arg, m.nick)
predicates['from'] = f
elif option == 'in':
def f(m, arg=arg):
return ircutils.strEqual(m.args[0], arg)
predicates['in'] = f
if arg != msg.args[0]:
skipfirst = False
elif option == 'on':
def f(m, arg=arg):
return m.receivedOn == arg
predicates['on'] = f
elif option == 'with':
def f(m, arg=arg):
return arg.lower() in m.args[1].lower()
predicates.setdefault('with', []).append(f)
elif option == 'without':
def f(m, arg=arg):
return arg.lower() not in m.args[1].lower()
predicates.setdefault('without', []).append(f)
elif option == 'regexp':
def f(m, arg=arg):
def f1(s, arg):
"""Since we can't enqueue match objects into the multiprocessing queue,
we'll just wrap the function to return bools."""
if arg.search(s) is not None:
return True
else:
return False
if ircmsgs.isAction(m):
m1 = ircmsgs.unAction(m)
else:
m1 = m.args[1]
return regexp_wrapper(m1, reobj=arg, timeout=0.1,
plugin_name=self.name(),
fcn_name='last')
predicates.setdefault('regexp', []).append(f)
elif option == 'nolimit':
nolimit = True
iterable = filter(self._validLastMsg, reversed(irc.state.history))
if skipfirst:
# Drop the first message only if our current channel is the same as
# the channel we've been instructed to look at.
next(iterable)
predicates = list(utils.iter.flatten(predicates.values()))
# Make sure the user can't get messages from channels they aren't in
def userInChannel(m):
return m.args[0] in irc.state.channels \
and msg.nick in irc.state.channels[m.args[0]].users
predicates.append(userInChannel)
# Make sure the user can't get messages from a +s channel unless
# they're calling the command from that channel or from a query
def notSecretMsg(m):
return not irc.isChannel(msg.args[0]) \
or msg.args[0] == m.args[0] \
or (m.args[0] in irc.state.channels \
and 's' not in irc.state.channels[m.args[0]].modes)
predicates.append(notSecretMsg)
resp = []
if irc.nested and not \
self.registryValue('last.nested.includeTimestamp'):
tsf = None
else:
tsf = self.registryValue('timestampFormat')
if irc.nested and not self.registryValue('last.nested.includeNick'):
showNick = False
else:
showNick = True
for m in iterable:
for predicate in predicates:
try:
if not predicate(m):
break
except RegexpTimeout:
irc.error(_('The regular expression timed out.'))
return
else:
if nolimit:
resp.append(ircmsgs.prettyPrint(m,
timestampFormat=tsf,
showNick=showNick))
else:
irc.reply(ircmsgs.prettyPrint(m,
timestampFormat=tsf,
showNick=showNick))
return
if not resp:
irc.error(_('I couldn\'t find a message matching that criteria in '
'my history of %s messages.') % len(irc.state.history))
else:
irc.reply(format('%L', resp))
last = wrap(last, [getopts({'nolimit': '',
'on': 'something',
'with': 'something',
'from': 'something',
'without': 'something',
'in': 'callerInGivenChannel',
'regexp': 'regexpMatcher',})])
def _tell(self, irc, msg, args, target, text, notice):
if irc.nested:
irc.error('This command cannot be nested.', Raise=True)
if target.lower() == 'me':
target = msg.nick
if ircutils.isChannel(target):
irc.error(_('Hey, just give the command. No need for the tell.'))
return
if not ircutils.isNick(target):
irc.errorInvalid('nick', target)
if ircutils.nickEqual(target, irc.nick):
irc.error(_('You just told me, why should I tell myself?'),
Raise=True)
if target not in irc.state.nicksToHostmasks and \
not ircdb.checkCapability(msg.prefix, 'owner'):
# We'll let owners do this.
s = _('I haven\'t seen %s, I\'ll let you do the telling.') % target
irc.error(s, Raise=True)
if irc.action:
irc.action = False
text = '* %s %s' % (irc.nick, text)
s = _('%s wants me to tell you: %s') % (msg.nick, text)
irc.replySuccess()
irc.reply(s, to=target, private=True, notice=notice)
@internationalizeDocstring
def tell(self, *args):
"""<nick> <text>
Tells the <nick> whatever <text> is. Use nested commands to your
benefit here.
"""
self._tell(*args, notice=False)
tell = wrap(tell, ['something', 'text'])
@internationalizeDocstring
def noticetell(self, *args):
"""<nick> <text>
Tells the <nick> whatever <text> is, in a notice. Use nested
commands to your benefit here.
"""
self._tell(*args, notice=True)
noticetell = wrap(noticetell, ['something', 'text'])
@internationalizeDocstring
def ping(self, irc, msg, args):
"""takes no arguments
Checks to see if the bot is alive.
"""
irc.reply(_('pong'), prefixNick=False)
@internationalizeDocstring
def completenick(self, irc, msg, args, channel, beginning, optlist):
"""[<channel>] <beginning> [--match-case]
Returns the nick of someone on the channel whose nick begins with the
given <beginning>.
<channel> defaults to the current channel."""
if channel not in irc.state.channels:
irc.error(_('I\'m not even in %s.') % channel, Raise=True)
if ('match-case', True) in optlist:
def match(nick):
return nick.startswith(beginning)
else:
beginning = beginning.lower()
def match(nick):
return nick.lower().startswith(beginning)
for nick in irc.state.channels[channel].users:
if match(nick):
irc.reply(nick)
return
irc.error(_('No such nick.'))
completenick = wrap(completenick, ['channel', 'something',
getopts({'match-case':''})])
@internationalizeDocstring
def clearmores(self, irc, msg, args):
"""takes no arguments
Clears all mores for the current network."""
irc._mores.clear()
irc.replySuccess()
clearmores = wrap(clearmores, ['admin'])
Class = Misc
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
| Ban3/Limnoria | plugins/Misc/plugin.py | Python | bsd-3-clause | 27,456 | 0.001603 |
#!/usr/bin/env python2
import os
from credit import main, exce
from credit import jsonhelper as jh
from credit.tests import testData as td
import unittest
class Test_total_all_net(unittest.TestCase):
def setUp(self):
self.fnum = 10
self.days = 10
self.startname = 'test_display'
self.files = [(self.startname + str(i) + main.SHEETEXT) for i in \
range(self.fnum)]
self.bal = 0
for index, name in enumerate(self.files):
money = (index + 1) * 100
self.bal += money
fakeDict = td.fakeDict(self.days, money)
with open(name, 'w') as f:
f.write(jh.dict_to_json(fakeDict))
def test_total_all(self):
num_files = 0
totals = 0
for sheetname, total in main.total_all():
self.assertTrue((sheetname + main.SHEETEXT) in self.files)
num_files += 1
totals += total
self.assertEqual(num_files, self.fnum)
self.assertTrue(abs(totals - self.bal) < td.ERROR)
def test_net(self):
self.assertTrue(abs(main.net() - self.bal) < td.ERROR)
def tearDown(self):
for name in self.files:
os.remove(name)
| leosartaj/credit | credit/tests/main_tests/test_total_all_net.py | Python | mit | 1,235 | 0.00081 |
# -*- coding: utf-8 -*-
from ..base import * # noqa
build_column('idmen', IntCol(is_permanent = True, label = u"Identifiant du ménage"))
build_column('idfoy', IntCol(is_permanent = True, label = u"Identifiant du foyer"))
build_column('idfam', IntCol(is_permanent = True, label = u"Identifiant de la famille"))
build_column('quimen', EnumCol(QUIMEN, is_permanent = True))
build_column('quifoy', EnumCol(QUIFOY, is_permanent = True))
build_column('quifam', EnumCol(QUIFAM, is_permanent = True))
build_column('birth', DateCol(default = date(1970, 1, 1), is_permanent = True, label = u"Date de naissance"))
build_column('adoption', BoolCol(entity = "ind", label = u"Enfant adopté"))
build_column('alt', BoolCol(label = u'Enfant en garde alternée')) # TODO: cerfa_field
build_column('activite', EnumCol(label = u'Activité',
enum = Enum([u'Actif occupé',
u'Chômeur',
u'Étudiant, élève',
u'Retraité',
u'Autre inactif']), default = 4))
build_column('enceinte', BoolCol(entity = 'ind', label = u"Est enceinte"))
build_column('statmarit', EnumCol(label = u"Statut marital",
default = 2,
enum = Enum([u"Marié",
u"Célibataire",
u"Divorcé",
u"Veuf",
u"Pacsé",
u"Jeune veuf"], start = 1)))
build_column('nbN', PeriodSizeIndependentIntCol(cerfa_field = u'N', entity = 'foy',
label = u"Nombre d'enfants mariés/pacsés et d'enfants non mariés chargés de famille"))
build_column('nbR', PeriodSizeIndependentIntCol(cerfa_field = u'R', entity = 'foy',
label = u"Nombre de titulaires (autres que les enfants) de la carte invalidité d'au moins 80 %"))
build_column('caseE', BoolCol(label = u"Situation pouvant donner droit à une demi-part supplémentaire : vous vivez seul au 1er janvier de l'année de perception des revenus et vous avez élevé un enfant pendant moins de 5 ans durant la période où vous viviez seul",
entity = 'foy',
cerfa_field = u'E', end = date(2012, 12, 31)))
build_column('caseF', BoolCol(label = u"Situation pouvant donner droit à une demi-part supplémentaire : conjoint titulaire d'une pension ou d'une carte d'invalidité (vivant ou décédé l'année de perception des revenus)",
entity = 'foy',
cerfa_field = u'F'))
build_column('caseG', BoolCol(label = u"Titulaire d'une pension de veuve de guerre",
entity = 'foy',
cerfa_field = u'G')) # attention, ne pas confondre caseG et nbG qui se rapportent toutes les 2 à une "case" G, l'une étant une vraie case que l'on remplt et l'autre une case que l'on coche
build_column('caseH', PeriodSizeIndependentIntCol(label = u"Année de naissance des enfants à charge en garde alternée", entity = 'foy',
cerfa_field = u'H'))
# il ne s'agit pas à proprement parlé de la case H, les cases permettant d'indiquer l'année de naissance
# se rapportent bien à nbH mais ne sont pas nommées, choisissons nous de laisser cerfa_field = u'H' pour caseH ?
# De plus les caseH peuvent être multiples puisqu'il peut y avoir plusieurs enfants? donc faut-il les nommer caseH1, caseH2...caseH6 (les 6 présentes dans la déclaration) ?
# il faut aussi créer les cases F, G, R et I qui donnent également les années de naissances des PAC
build_column('caseK', BoolCol(label = u"Situation pouvant donner droit à une demi-part supplémentaire: vous avez eu un enfant décédé après l’âge de 16 ans ou par suite de faits de guerre",
entity = 'foy',
cerfa_field = u'K', end = date(2011, 12, 31)))
build_column('caseL', BoolCol(label = u"Situation pouvant donner droit à une demi-part supplémentaire: vous vivez seul au 1er janvier de l'année de perception des revenus et vous avez élevé un enfant pendant au moins 5 ans durant la période où vous viviez seul",
entity = 'foy',
cerfa_field = u'L'))
build_column('caseN', BoolCol(label = u"Vous ne viviez pas seul au 1er janvier de l'année de perception des revenus",
entity = 'foy',
cerfa_field = u'N'))
build_column('caseP', BoolCol(label = u"Titulaire d'une pension pour une invalidité d'au moins 40 % ou d'une carte d'invalidité d'au moins 80%",
entity = 'foy',
cerfa_field = u'P'))
build_column('caseS', BoolCol(label = u"Vous êtes mariés/pacsés et l'un des deux déclarants âgé de plus de 75 ans est titulaire de la carte du combattant ou d'une pension militaire d'invalidité ou de victime de guerre",
entity = 'foy',
cerfa_field = u'S'))
build_column('caseT', BoolCol(label = u"Vous êtes parent isolé au 1er janvier de l'année de perception des revenus",
entity = 'foy',
cerfa_field = u'T'))
build_column('caseW', BoolCol(label = u"Vous ou votre conjoint (même s'il est décédé), âgés de plus de 75 ans, êtes titulaire de la carte du combattant ou d'une pension militaire d'invalidité ou de victime de guerre",
entity = 'foy',
cerfa_field = u'W'))
# pour inv, il faut que tu regardes si tu es d'accord et si c'est bien la bonne case,
# la case P exsite déjà plus bas ligne 339 sous le nom caseP
build_column('invalide', BoolCol(label = u'Invalide')) # TODO: cerfa_field
class nb_par(Variable):
column = PeriodSizeIndependentIntCol(default = 0)
entity_class = Familles
label = u"Nombre d'adultes (parents) dans la famille"
def function(self, simulation, period):
# Note : Cette variable est "instantanée" : quelque soit la période demandée, elle retourne la valeur au premier
# jour, sans changer la période.
quifam_holder = simulation.compute('quifam', period)
quifam = self.filter_role(quifam_holder, role = PART)
return period, 1 + 1 * (quifam == PART)
class maries(Variable):
column = BoolCol(default = False)
entity_class = Familles
label = u"maries"
def function(self, simulation, period):
"""couple = 1 si couple marié sinon 0 TODO: faire un choix avec couple ?"""
# Note : Cette variable est "instantanée" : quelque soit la période demandée, elle retourne la valeur au premier
# jour, sans changer la période.
statmarit_holder = simulation.compute('statmarit', period)
statmarit = self.filter_role(statmarit_holder, role = CHEF)
return period, statmarit == 1
class concub(Variable):
column = BoolCol(default = False)
entity_class = Familles
label = u"Indicatrice de vie en couple"
def function(self, simulation, period):
'''
concub = 1 si vie en couple TODO: pas très heureux
'''
# Note : Cette variable est "instantanée" : quelque soit la période demandée, elle retourne la valeur au premier
# jour, sans changer la période.
nb_par = simulation.calculate('nb_par', period)
# TODO: concub n'est pas égal à 1 pour les conjoints
return period, nb_par == 2
class isol(Variable):
column = BoolCol(default = False)
entity_class = Familles
label = u"Parent (s'il y a lieu) isolé"
def function(self, simulation, period):
# Note : Cette variable est "instantanée" : quelque soit la période demandée, elle retourne la valeur au premier
# jour, sans changer la période.
nb_par = simulation.calculate('nb_par', period)
return period, nb_par == 1
class est_enfant_dans_famille(Variable):
column = BoolCol
entity_class = Individus
label = u"Indique qe l'individu est un enfant dans une famille"
def function(self, simulation, period):
quifam = simulation.calculate('quifam', period)
return period, quifam > PART
class etu(Variable):
column = BoolCol(default = False)
entity_class = Individus
label = u"Indicatrice individuelle étudiant"
def function(self, simulation, period):
# Note : Cette variable est "instantanée" : quelque soit la période demandée, elle retourne la valeur au premier
# jour, sans changer la période.
activite = simulation.calculate('activite', period)
return period, activite == 2
class rempli_obligation_scolaire(Variable):
column = BoolCol(default = True)
entity_class = Individus
label = u"Rempli l'obligation scolaire"
class ressortissant_eee(Variable):
column = BoolCol(default = True)
entity_class = Individus
label = u"Ressortissant de l'EEE ou de la Suisse."
class duree_possession_titre_sejour(Variable):
column = IntCol
entity_class = Individus
label = u"Durée depuis laquelle l'individu possède un titre de séjour (en années)"
| adrienpacifico/openfisca-france | openfisca_france/model/caracteristiques_socio_demographiques/demographie.py | Python | agpl-3.0 | 9,098 | 0.024722 |
from __future__ import absolute_import
import re
import math
import contextlib
from .LineTransformProcessor import LineTransformProcessor
import makerbot_driver
class AnchorProcessor(LineTransformProcessor):
def __init__(self):
super(AnchorProcessor, self).__init__()
self.is_bundleable = True
self.code_map = {
re.compile('[^(;]*[gG]1 [XY]-?\d'): self._transform_anchor,
}
self.looking_for_first_move = True
self.speed = 1000
self.width_over_height = .8
def _grab_extruder(self, match):
self.extruder = match.group(2)
def _transform_anchor(self, match):
return_lines = [match.string]
if self.looking_for_first_move:
start_position = self.get_start_position()
return_lines = list(
self.create_anchor_command(start_position, return_lines[0]))
return_lines.append(match.string)
self.looking_for_first_move = False
return return_lines
def create_z_move_if_necessary(self, start_movement_codes, end_movement_codes):
"""
The platform must be moved up to the extruder to successfully anchor across the platform.
This function checks the location of the platform, and emits the correct G1 command to
move the platform
@param str start_movement_codes: Where the machine is moving from
@param str end_movement_codes: Where the machine is moving to
@return list: List of movements commands to move the platform
"""
return_codes = []
if 'Z' in start_movement_codes and 'Z' in end_movement_codes:
start_z = start_movement_codes['Z']
end_z = end_movement_codes['Z']
if start_z - end_z is not 0:
return_codes.append('G1 Z%f F%i\n' % (end_z, self.speed))
return return_codes
def create_anchor_command(self, start_position, end_position):
"""
Given two G1 commands, draws an anchor between them. Moves the platform if
necessary
@param str start_position: Where the machine is moving from
@param str end_position: Where the machine is moving to
@return list: The anchor commands
"""
assert start_position is not None and end_position is not None
start_movement_codes = makerbot_driver.Gcode.parse_line(
start_position)[0] # Where the Bot is moving from
end_movement_codes = makerbot_driver.Gcode.parse_line(end_position)[0] # Where the bot is moving to
# Construct the next G1 command based on where the bot is moving to
anchor_command = "G1 "
for d in ['X', 'Y', 'Z']:
if d in end_movement_codes:
part = d + str(end_movement_codes[d]) # The next [XYZ] code
anchor_command += part
anchor_command += ' '
anchor_command += 'F%i ' % (self.speed)
extruder = "E"
extrusion_distance = self.find_extrusion_distance(
start_movement_codes, end_movement_codes)
anchor_command += extruder + str(extrusion_distance) + "\n"
reset_command = "G92 %s0" % (extruder) + "\n"
return_codes = self.create_z_move_if_necessary(start_movement_codes, end_movement_codes)
return_codes.extend([anchor_command, reset_command])
return return_codes
def get_extruder(self, codes):
extruder = 'A'
if 'B' in codes:
extruder = 'B'
elif 'E' in codes:
extruder = 'E'
return extruder
def find_extrusion_distance(self, start_position_codes, end_position_codes):
layer_height = end_position_codes.get('Z', 0)
start_position_point = []
end_position_point = []
for d in ['X', 'Y']:
start_position_point.append(start_position_codes.get(d, 0))
end_position_point.append(end_position_codes.get(d, 0))
distance = self.calc_euclidean_distance(
start_position_point, end_position_point)
cross_section = self.feed_cross_section_area(
float(layer_height), self.width_over_height)
extrusion_distance = cross_section * distance
return extrusion_distance
def feed_cross_section_area(self, height, width):
"""
Taken from MG, (hopefully not wrongfully) assumed to work
"""
radius = height / 2.0
tau = math.pi * 2
return (tau / 2.0) * (radius * radius) + height * (width - height)
def calc_euclidean_distance(self, p1, p2):
assert len(p1) == len(p2)
distance = 0.0
for a, b in zip(p1, p2):
distance += pow(a - b, 2)
distance = math.sqrt(distance)
return distance
def get_start_position(self):
start_position = (-112, -73, 150)
if hasattr(self, 'profile') and None != self.profile:
sp = self.profile.values['print_start_sequence']['start_position']
start_position = (sp['start_x'], sp['start_y'], sp['start_z'])
start_codes = "G1 X%s Y%s Z%s F3300.0 (move to waiting position)"
start_codes = start_codes % start_position
return start_codes
| madhuni/AstroBox | src/ext/makerbot_driver/GcodeProcessors/AnchorProcessor.py | Python | agpl-3.0 | 5,212 | 0.002686 |
from django_nose.tools import assert_false, assert_true
from pontoon.base.tests import TestCase
from pontoon.base.utils import extension_in
class UtilsTests(TestCase):
def test_extension_in(self):
assert_true(extension_in('filename.txt', ['bat', 'txt']))
assert_true(extension_in('filename.biff', ['biff']))
assert_true(extension_in('filename.tar.gz', ['gz']))
assert_false(extension_in('filename.txt', ['png', 'jpg']))
assert_false(extension_in('.dotfile', ['bat', 'txt']))
# Unintuitive, but that's how splitext works.
assert_false(extension_in('filename.tar.gz', ['tar.gz']))
| yfdyh000/pontoon | pontoon/base/tests/test_utils.py | Python | bsd-3-clause | 644 | 0 |
from . animation import Animation
from .. layout import strip
class Strip(Animation):
LAYOUT_CLASS = strip.Strip
LAYOUT_ARGS = 'num',
def __init__(self, layout, start=0, end=-1, **kwds):
super().__init__(layout, **kwds)
self._start = max(start, 0)
self._end = end
if self._end < 0 or self._end >= self.layout.numLEDs:
self._end = self.layout.numLEDs - 1
self._size = self._end - self._start + 1
from .. import deprecated
if deprecated.allowed():
BaseStripAnim = Strip
| ManiacalLabs/BiblioPixel | bibliopixel/animation/strip.py | Python | mit | 543 | 0.001842 |
import mock
import pytest
import yaml
import json
from awx.api.serializers import JobLaunchSerializer
from awx.main.models.credential import Credential
from awx.main.models.inventory import Inventory, Host
from awx.main.models.jobs import Job, JobTemplate, UnifiedJobTemplate
from awx.api.versioning import reverse
@pytest.fixture
def runtime_data(organization, credentialtype_ssh):
cred_obj = Credential.objects.create(
name='runtime-cred',
credential_type=credentialtype_ssh,
inputs={
'username': 'test_user2',
'password': 'pas4word2'
}
)
inv_obj = organization.inventories.create(name="runtime-inv")
return dict(
extra_vars='{"job_launch_var": 4}',
limit='test-servers',
job_type='check',
job_tags='provision',
skip_tags='restart',
inventory=inv_obj.pk,
credentials=[cred_obj.pk],
diff_mode=True,
verbosity=2
)
@pytest.fixture
def job_with_links(machine_credential, inventory):
return Job.objects.create(name='existing-job', credential=machine_credential, inventory=inventory)
@pytest.fixture
def job_template_prompts(project, inventory, machine_credential):
def rf(on_off):
jt = JobTemplate.objects.create(
job_type='run',
project=project,
inventory=inventory,
name='deploy-job-template',
# JT values must differ from prompted vals in order to register
limit='webservers',
job_tags = 'foobar',
skip_tags = 'barfoo',
ask_variables_on_launch=on_off,
ask_tags_on_launch=on_off,
ask_skip_tags_on_launch=on_off,
ask_job_type_on_launch=on_off,
ask_inventory_on_launch=on_off,
ask_limit_on_launch=on_off,
ask_credential_on_launch=on_off,
ask_diff_mode_on_launch=on_off,
ask_verbosity_on_launch=on_off,
)
jt.credentials.add(machine_credential)
return jt
return rf
@pytest.fixture
def job_template_prompts_null(project):
return JobTemplate.objects.create(
job_type='run',
project=project,
inventory=None,
name='deploy-job-template',
ask_variables_on_launch=True,
ask_tags_on_launch=True,
ask_skip_tags_on_launch=True,
ask_job_type_on_launch=True,
ask_inventory_on_launch=True,
ask_limit_on_launch=True,
ask_credential_on_launch=True,
ask_diff_mode_on_launch=True,
ask_verbosity_on_launch=True,
)
def data_to_internal(data):
'''
returns internal representation, model objects, dictionaries, etc
as opposed to integer primary keys and JSON strings
'''
internal = data.copy()
if 'extra_vars' in data:
internal['extra_vars'] = json.loads(data['extra_vars'])
if 'credentials' in data:
internal['credentials'] = set(Credential.objects.get(pk=_id) for _id in data['credentials'])
if 'inventory' in data:
internal['inventory'] = Inventory.objects.get(pk=data['inventory'])
return internal
# End of setup, tests start here
@pytest.mark.django_db
@pytest.mark.job_runtime_vars
def test_job_ignore_unprompted_vars(runtime_data, job_template_prompts, post, admin_user, mocker):
job_template = job_template_prompts(False)
mock_job = mocker.MagicMock(spec=Job, id=968, **runtime_data)
with mocker.patch.object(JobTemplate, 'create_unified_job', return_value=mock_job):
with mocker.patch('awx.api.serializers.JobSerializer.to_representation'):
response = post(reverse('api:job_template_launch', kwargs={'pk':job_template.pk}),
runtime_data, admin_user, expect=201)
assert JobTemplate.create_unified_job.called
assert JobTemplate.create_unified_job.call_args == ()
# Check that job is serialized correctly
job_id = response.data['job']
assert job_id == 968
# If job is created with no arguments, it will inherit JT attributes
mock_job.signal_start.assert_called_once()
# Check that response tells us what things were ignored
assert 'job_launch_var' in response.data['ignored_fields']['extra_vars']
assert 'job_type' in response.data['ignored_fields']
assert 'limit' in response.data['ignored_fields']
assert 'inventory' in response.data['ignored_fields']
assert 'credentials' in response.data['ignored_fields']
assert 'job_tags' in response.data['ignored_fields']
assert 'skip_tags' in response.data['ignored_fields']
@pytest.mark.django_db
@pytest.mark.job_runtime_vars
def test_job_accept_prompted_vars(runtime_data, job_template_prompts, post, admin_user, mocker):
job_template = job_template_prompts(True)
mock_job = mocker.MagicMock(spec=Job, id=968, **runtime_data)
with mocker.patch.object(JobTemplate, 'create_unified_job', return_value=mock_job):
with mocker.patch('awx.api.serializers.JobSerializer.to_representation'):
response = post(reverse('api:job_template_launch', kwargs={'pk':job_template.pk}),
runtime_data, admin_user, expect=201)
assert JobTemplate.create_unified_job.called
called_with = data_to_internal(runtime_data)
JobTemplate.create_unified_job.assert_called_with(**called_with)
job_id = response.data['job']
assert job_id == 968
mock_job.signal_start.assert_called_once()
@pytest.mark.django_db
@pytest.mark.job_runtime_vars
def test_job_accept_empty_tags(job_template_prompts, post, admin_user, mocker):
job_template = job_template_prompts(True)
mock_job = mocker.MagicMock(spec=Job, id=968)
with mocker.patch.object(JobTemplate, 'create_unified_job', return_value=mock_job):
with mocker.patch('awx.api.serializers.JobSerializer.to_representation'):
post(reverse('api:job_template_launch', kwargs={'pk': job_template.pk}),
{'job_tags': '', 'skip_tags': ''}, admin_user, expect=201)
assert JobTemplate.create_unified_job.called
assert JobTemplate.create_unified_job.call_args == ({'job_tags':'', 'skip_tags':''},)
mock_job.signal_start.assert_called_once()
@pytest.mark.django_db
@pytest.mark.job_runtime_vars
def test_job_accept_prompted_vars_null(runtime_data, job_template_prompts_null, post, rando, mocker):
job_template = job_template_prompts_null
# Give user permission to execute the job template
job_template.execute_role.members.add(rando)
# Give user permission to use inventory and credential at runtime
credential = Credential.objects.get(pk=runtime_data['credentials'][0])
credential.use_role.members.add(rando)
inventory = Inventory.objects.get(pk=runtime_data['inventory'])
inventory.use_role.members.add(rando)
mock_job = mocker.MagicMock(spec=Job, id=968, **runtime_data)
with mocker.patch.object(JobTemplate, 'create_unified_job', return_value=mock_job):
with mocker.patch('awx.api.serializers.JobSerializer.to_representation'):
response = post(reverse('api:job_template_launch', kwargs={'pk': job_template.pk}),
runtime_data, rando, expect=201)
assert JobTemplate.create_unified_job.called
expected_call = data_to_internal(runtime_data)
assert JobTemplate.create_unified_job.call_args == (expected_call,)
job_id = response.data['job']
assert job_id == 968
mock_job.signal_start.assert_called_once()
@pytest.mark.django_db
@pytest.mark.job_runtime_vars
def test_job_reject_invalid_prompted_vars(runtime_data, job_template_prompts, post, admin_user):
job_template = job_template_prompts(True)
response = post(
reverse('api:job_template_launch', kwargs={'pk':job_template.pk}),
dict(job_type='foobicate', # foobicate is not a valid job type
inventory=87865, credentials=[48474]), admin_user, expect=400)
assert response.data['job_type'] == [u'"foobicate" is not a valid choice.']
assert response.data['inventory'] == [u'Invalid pk "87865" - object does not exist.']
assert response.data['credentials'] == [u'Invalid pk "48474" - object does not exist.']
@pytest.mark.django_db
@pytest.mark.job_runtime_vars
def test_job_reject_invalid_prompted_extra_vars(runtime_data, job_template_prompts, post, admin_user):
job_template = job_template_prompts(True)
response = post(
reverse('api:job_template_launch', kwargs={'pk':job_template.pk}),
dict(extra_vars='{"unbalanced brackets":'), admin_user, expect=400)
assert 'extra_vars' in response.data
assert 'Cannot parse as' in str(response.data['extra_vars'][0])
@pytest.mark.django_db
@pytest.mark.job_runtime_vars
def test_job_launch_fails_without_inventory(deploy_jobtemplate, post, admin_user):
deploy_jobtemplate.inventory = None
deploy_jobtemplate.save()
response = post(reverse('api:job_template_launch',
kwargs={'pk': deploy_jobtemplate.pk}), {}, admin_user, expect=400)
assert 'inventory' in response.data['resources_needed_to_start'][0]
@pytest.mark.django_db
@pytest.mark.job_runtime_vars
def test_job_launch_fails_without_inventory_access(job_template_prompts, runtime_data, post, rando):
job_template = job_template_prompts(True)
job_template.execute_role.members.add(rando)
# Assure that giving an inventory without access to the inventory blocks the launch
response = post(reverse('api:job_template_launch', kwargs={'pk':job_template.pk}),
dict(inventory=runtime_data['inventory']), rando, expect=403)
assert response.data['detail'] == u'You do not have permission to perform this action.'
@pytest.mark.django_db
@pytest.mark.job_runtime_vars
def test_job_launch_fails_without_credential_access(job_template_prompts, runtime_data, post, rando):
job_template = job_template_prompts(True)
job_template.execute_role.members.add(rando)
# Assure that giving a credential without access blocks the launch
post(reverse('api:job_template_launch', kwargs={'pk':job_template.pk}),
dict(credentials=runtime_data['credentials']), rando, expect=403)
@pytest.mark.django_db
@pytest.mark.job_runtime_vars
def test_job_block_scan_job_type_change(job_template_prompts, post, admin_user):
job_template = job_template_prompts(True)
# Assure that changing the type of a scan job blocks the launch
response = post(reverse('api:job_template_launch', kwargs={'pk':job_template.pk}),
dict(job_type='scan'), admin_user, expect=400)
assert 'job_type' in response.data
@pytest.mark.django_db
def test_job_launch_JT_with_validation(machine_credential, credential, deploy_jobtemplate):
deploy_jobtemplate.extra_vars = '{"job_template_var": 3}'
deploy_jobtemplate.ask_credential_on_launch = True
deploy_jobtemplate.ask_variables_on_launch = True
deploy_jobtemplate.save()
kv = dict(extra_vars={"job_launch_var": 4}, credentials=[machine_credential.pk, credential.pk])
serializer = JobLaunchSerializer(data=kv, context={'template': deploy_jobtemplate})
validated = serializer.is_valid()
assert validated, serializer.errors
kv['credentials'] = [machine_credential] # conversion to internal value
job_obj = deploy_jobtemplate.create_unified_job(**kv)
final_job_extra_vars = yaml.load(job_obj.extra_vars)
assert 'job_launch_var' in final_job_extra_vars
assert 'job_template_var' in final_job_extra_vars
assert set([cred.pk for cred in job_obj.credentials.all()]) == set([machine_credential.id, credential.id])
@pytest.mark.django_db
def test_job_launch_with_default_creds(machine_credential, vault_credential, deploy_jobtemplate):
deploy_jobtemplate.ask_credential_on_launch = True
deploy_jobtemplate.credentials.add(machine_credential)
deploy_jobtemplate.credentials.add(vault_credential)
kv = dict()
serializer = JobLaunchSerializer(data=kv, context={'template': deploy_jobtemplate})
validated = serializer.is_valid()
assert validated
prompted_fields, ignored_fields, errors = deploy_jobtemplate._accept_or_ignore_job_kwargs(**kv)
job_obj = deploy_jobtemplate.create_unified_job(**prompted_fields)
assert job_obj.credential == machine_credential.pk
assert job_obj.vault_credential == vault_credential.pk
@pytest.mark.django_db
def test_job_launch_JT_enforces_unique_credentials_kinds(machine_credential, credentialtype_aws, deploy_jobtemplate):
"""
JT launching should require that extra_credentials have distinct CredentialTypes
"""
creds = []
for i in range(2):
aws = Credential.objects.create(
name='cred-%d' % i,
credential_type=credentialtype_aws,
inputs={
'username': 'test_user',
'password': 'pas4word'
}
)
aws.save()
creds.append(aws)
kv = dict(credentials=creds, credential=machine_credential.id)
serializer = JobLaunchSerializer(data=kv, context={'template': deploy_jobtemplate})
validated = serializer.is_valid()
assert not validated
@pytest.mark.django_db
def test_job_launch_with_empty_creds(machine_credential, vault_credential, deploy_jobtemplate, credential):
deploy_jobtemplate.ask_credential_on_launch = True
deploy_jobtemplate.credentials.add(machine_credential)
deploy_jobtemplate.credentials.add(vault_credential)
# `credentials` list is strictly those already present on deploy_jobtemplate
kv = dict(credentials=[credential.pk, machine_credential.pk, vault_credential.pk])
serializer = JobLaunchSerializer(data=kv, context={'template': deploy_jobtemplate})
validated = serializer.is_valid()
assert validated, serializer.errors
prompted_fields, ignored_fields, errors = deploy_jobtemplate._accept_or_ignore_job_kwargs(**serializer.validated_data)
job_obj = deploy_jobtemplate.create_unified_job(**prompted_fields)
assert job_obj.credential is deploy_jobtemplate.credential
assert job_obj.vault_credential is deploy_jobtemplate.vault_credential
@pytest.mark.django_db
def test_job_launch_fails_with_missing_vault_password(machine_credential, vault_credential,
deploy_jobtemplate, post, rando):
vault_credential.vault_password = 'ASK'
vault_credential.save()
deploy_jobtemplate.credentials.add(vault_credential)
deploy_jobtemplate.execute_role.members.add(rando)
deploy_jobtemplate.save()
response = post(
reverse('api:job_template_launch', kwargs={'pk': deploy_jobtemplate.pk}),
rando,
expect=400
)
assert response.data['passwords_needed_to_start'] == ['vault_password']
@pytest.mark.django_db
@pytest.mark.parametrize('launch_kwargs', [
{'vault_password.abc': 'vault-me-1', 'vault_password.xyz': 'vault-me-2'},
{'credential_passwords': {'vault_password.abc': 'vault-me-1', 'vault_password.xyz': 'vault-me-2'}}
])
def test_job_launch_fails_with_missing_multivault_password(machine_credential, vault_credential,
deploy_jobtemplate, launch_kwargs,
get, post, rando):
vault_cred_first = Credential(
name='Vault #1',
credential_type=vault_credential.credential_type,
inputs={
'vault_password': 'ASK',
'vault_id': 'abc'
}
)
vault_cred_first.save()
vault_cred_second = Credential(
name='Vault #2',
credential_type=vault_credential.credential_type,
inputs={
'vault_password': 'ASK',
'vault_id': 'xyz'
}
)
vault_cred_second.save()
deploy_jobtemplate.credentials.add(vault_cred_first)
deploy_jobtemplate.credentials.add(vault_cred_second)
deploy_jobtemplate.execute_role.members.add(rando)
deploy_jobtemplate.save()
url = reverse('api:job_template_launch', kwargs={'pk': deploy_jobtemplate.pk})
resp = get(url, rando, expect=200)
assert {
'credential_type': vault_cred_first.credential_type_id,
'passwords_needed': ['vault_password.abc'],
'vault_id': u'abc',
'name': u'Vault #1',
'id': vault_cred_first.id
} in resp.data['defaults']['credentials']
assert {
'credential_type': vault_cred_second.credential_type_id,
'passwords_needed': ['vault_password.xyz'],
'vault_id': u'xyz',
'name': u'Vault #2',
'id': vault_cred_second.id
} in resp.data['defaults']['credentials']
assert resp.data['passwords_needed_to_start'] == ['vault_password.abc', 'vault_password.xyz']
assert sum([
cred['passwords_needed'] for cred in resp.data['defaults']['credentials']
if cred['credential_type'] == vault_credential.credential_type_id
], []) == ['vault_password.abc', 'vault_password.xyz']
resp = post(url, rando, expect=400)
assert resp.data['passwords_needed_to_start'] == ['vault_password.abc', 'vault_password.xyz']
with mock.patch.object(Job, 'signal_start') as signal_start:
post(url, launch_kwargs, rando, expect=201)
signal_start.assert_called_with(**{
'vault_password.abc': 'vault-me-1',
'vault_password.xyz': 'vault-me-2'
})
@pytest.mark.django_db
def test_job_launch_fails_with_missing_ssh_password(machine_credential, deploy_jobtemplate, post,
rando):
machine_credential.password = 'ASK'
machine_credential.save()
deploy_jobtemplate.credentials.add(machine_credential)
deploy_jobtemplate.execute_role.members.add(rando)
deploy_jobtemplate.save()
response = post(
reverse('api:job_template_launch', kwargs={'pk': deploy_jobtemplate.pk}),
rando,
expect=400
)
assert response.data['passwords_needed_to_start'] == ['ssh_password']
@pytest.mark.django_db
def test_job_launch_fails_with_missing_vault_and_ssh_password(machine_credential, vault_credential,
deploy_jobtemplate, post, rando):
vault_credential.vault_password = 'ASK'
vault_credential.save()
machine_credential.password = 'ASK'
machine_credential.save()
deploy_jobtemplate.credentials.add(machine_credential)
deploy_jobtemplate.credentials.add(vault_credential)
deploy_jobtemplate.execute_role.members.add(rando)
deploy_jobtemplate.save()
response = post(
reverse('api:job_template_launch', kwargs={'pk': deploy_jobtemplate.pk}),
rando,
expect=400
)
assert sorted(response.data['passwords_needed_to_start']) == ['ssh_password', 'vault_password']
@pytest.mark.django_db
def test_job_launch_pass_with_prompted_vault_password(machine_credential, vault_credential,
deploy_jobtemplate, post, rando):
vault_credential.vault_password = 'ASK'
vault_credential.save()
deploy_jobtemplate.credentials.add(machine_credential)
deploy_jobtemplate.credentials.add(vault_credential)
deploy_jobtemplate.execute_role.members.add(rando)
deploy_jobtemplate.save()
with mock.patch.object(Job, 'signal_start') as signal_start:
post(
reverse('api:job_template_launch', kwargs={'pk': deploy_jobtemplate.pk}),
{'vault_password': 'vault-me'},
rando,
expect=201
)
signal_start.assert_called_with(vault_password='vault-me')
@pytest.mark.django_db
def test_job_launch_JT_with_credentials(machine_credential, credential, net_credential, deploy_jobtemplate):
deploy_jobtemplate.ask_credential_on_launch = True
deploy_jobtemplate.save()
kv = dict(credentials=[credential.pk, net_credential.pk, machine_credential.pk])
serializer = JobLaunchSerializer(data=kv, context={'template': deploy_jobtemplate})
validated = serializer.is_valid()
assert validated, serializer.errors
kv['credentials'] = [credential, net_credential, machine_credential] # convert to internal value
prompted_fields, ignored_fields, errors = deploy_jobtemplate._accept_or_ignore_job_kwargs(
_exclude_errors=['required', 'prompts'], **kv)
job_obj = deploy_jobtemplate.create_unified_job(**prompted_fields)
creds = job_obj.credentials.all()
assert len(creds) == 3
assert credential in creds
assert net_credential in creds
assert machine_credential in creds
@pytest.mark.django_db
@pytest.mark.job_runtime_vars
def test_job_launch_unprompted_vars_with_survey(mocker, survey_spec_factory, job_template_prompts, post, admin_user):
job_template = job_template_prompts(False)
job_template.survey_enabled = True
job_template.survey_spec = survey_spec_factory('survey_var')
job_template.save()
with mocker.patch('awx.main.access.BaseAccess.check_license'):
mock_job = mocker.MagicMock(spec=Job, id=968, extra_vars={"job_launch_var": 3, "survey_var": 4})
with mocker.patch.object(JobTemplate, 'create_unified_job', return_value=mock_job):
with mocker.patch('awx.api.serializers.JobSerializer.to_representation', return_value={}):
response = post(
reverse('api:job_template_launch', kwargs={'pk':job_template.pk}),
dict(extra_vars={"job_launch_var": 3, "survey_var": 4}),
admin_user, expect=201)
assert JobTemplate.create_unified_job.called
assert JobTemplate.create_unified_job.call_args == ({'extra_vars':{'survey_var': 4}},)
job_id = response.data['job']
assert job_id == 968
# Check that the survey variable is accepted and the job variable isn't
mock_job.signal_start.assert_called_once()
@pytest.mark.django_db
@pytest.mark.job_runtime_vars
def test_callback_accept_prompted_extra_var(mocker, survey_spec_factory, job_template_prompts, post, admin_user, host):
job_template = job_template_prompts(True)
job_template.host_config_key = "foo"
job_template.survey_enabled = True
job_template.survey_spec = survey_spec_factory('survey_var')
job_template.save()
with mocker.patch('awx.main.access.BaseAccess.check_license'):
mock_job = mocker.MagicMock(spec=Job, id=968, extra_vars={"job_launch_var": 3, "survey_var": 4})
with mocker.patch.object(UnifiedJobTemplate, 'create_unified_job', return_value=mock_job):
with mocker.patch('awx.api.serializers.JobSerializer.to_representation', return_value={}):
with mocker.patch('awx.api.views.JobTemplateCallback.find_matching_hosts', return_value=[host]):
post(
reverse('api:job_template_callback', kwargs={'pk': job_template.pk}),
dict(extra_vars={"job_launch_var": 3, "survey_var": 4}, host_config_key="foo"),
admin_user, expect=201, format='json')
assert UnifiedJobTemplate.create_unified_job.called
call_args = UnifiedJobTemplate.create_unified_job.call_args[1]
call_args.pop('_eager_fields', None) # internal purposes
assert call_args == {
'extra_vars': {'survey_var': 4, 'job_launch_var': 3},
'limit': 'single-host'
}
mock_job.signal_start.assert_called_once()
@pytest.mark.django_db
@pytest.mark.job_runtime_vars
def test_callback_ignore_unprompted_extra_var(mocker, survey_spec_factory, job_template_prompts, post, admin_user, host):
job_template = job_template_prompts(False)
job_template.host_config_key = "foo"
job_template.save()
with mocker.patch('awx.main.access.BaseAccess.check_license'):
mock_job = mocker.MagicMock(spec=Job, id=968, extra_vars={"job_launch_var": 3, "survey_var": 4})
with mocker.patch.object(UnifiedJobTemplate, 'create_unified_job', return_value=mock_job):
with mocker.patch('awx.api.serializers.JobSerializer.to_representation', return_value={}):
with mocker.patch('awx.api.views.JobTemplateCallback.find_matching_hosts', return_value=[host]):
post(
reverse('api:job_template_callback', kwargs={'pk':job_template.pk}),
dict(extra_vars={"job_launch_var": 3, "survey_var": 4}, host_config_key="foo"),
admin_user, expect=201, format='json')
assert UnifiedJobTemplate.create_unified_job.called
call_args = UnifiedJobTemplate.create_unified_job.call_args[1]
call_args.pop('_eager_fields', None) # internal purposes
assert call_args == {
'limit': 'single-host'
}
mock_job.signal_start.assert_called_once()
@pytest.mark.django_db
@pytest.mark.job_runtime_vars
def test_callback_find_matching_hosts(mocker, get, job_template_prompts, admin_user):
job_template = job_template_prompts(False)
job_template.host_config_key = "foo"
job_template.save()
host_with_alias = Host(name='localhost', inventory=job_template.inventory)
host_with_alias.save()
with mocker.patch('awx.main.access.BaseAccess.check_license'):
r = get(reverse('api:job_template_callback', kwargs={'pk': job_template.pk}),
user=admin_user, expect=200)
assert tuple(r.data['matching_hosts']) == ('localhost',)
@pytest.mark.django_db
@pytest.mark.job_runtime_vars
def test_callback_extra_var_takes_priority_over_host_name(mocker, get, job_template_prompts, admin_user):
job_template = job_template_prompts(False)
job_template.host_config_key = "foo"
job_template.save()
host_with_alias = Host(name='localhost', variables={'ansible_host': 'foobar'}, inventory=job_template.inventory)
host_with_alias.save()
with mocker.patch('awx.main.access.BaseAccess.check_license'):
r = get(reverse('api:job_template_callback', kwargs={'pk': job_template.pk}),
user=admin_user, expect=200)
assert not r.data['matching_hosts']
| wwitzel3/awx | awx/main/tests/functional/api/test_job_runtime_params.py | Python | apache-2.0 | 26,351 | 0.004326 |
from py2neo.server import GraphServer
from py2neo import Node,Relationship
HISTO_LENGTH = 5
def insert(sentence, tokensAndType):
"""
Take a sentence and it's associate tokens and type and store all of it in the db as the last sentence of the dialogue
@type sentence: string
@param sentence: The inserted sentence
@type tokensAndType: list
@param tokensAndType: The sentence's tokens and its SentenceTypes
"""
server = GraphServer("../../../neo4j")
graph=server.graph
# Retrieve all the sentences of the dialogue
sentences = graph.cypher.execute("MATCH (n:Histo)-[r*0..5]->(st:SentenceHisto) RETURN st")
print sentences
numberOfSentences = len(sentences)
# Create a node to insert as the last sentence of the dialogue
sentence = Node("SentenceHisto", sentence=sentence)
sentenceType = graph.find_one("SentenceType",
property_key="label",
property_value = tokensAndType[1][0])
sentenceForm = graph.find_one("SentenceType",
property_key="label",
property_value = tokensAndType[1][1])
# Link the sentence with it's type and it's form
is_of_type = Relationship(sentence, "is_of_type", sentenceType)
is_of_form = Relationship(sentence, "is_of_type", sentenceForm) # pos / neg
graph.create(is_of_type)
graph.create(is_of_form)
print 'nb sentences : ' + str(numberOfSentences)
# If we have just started the dialogue we create the root node and store the sentence as its child
if numberOfSentences == 0:
histo = graph.find_one("Histo",
property_key="label",
property_value = "histo")
has = Relationship(histo, "is_followed_by", sentence)
graph.create(has)
# We only keep an history of the dialogue of HISTO_LENGTH sentences long
# So we delete the first sentence if the length is of HISTO_LENGTH
elif numberOfSentences == HISTO_LENGTH:
graph.cypher.execute("MATCH (n:Histo)-[r:is_followed_by*1]->(:SentenceHisto) FOREACH( rel IN r| DELETE rel)")
histo = graph.find_one("Histo",
property_key="label",
property_value = "histo")
has = Relationship(histo, "is_followed_by", sentences[1][0])
graph.create(has)
is_followed_by = Relationship(sentences[-1][0], "is_followed_by", sentence)
graph.create(is_followed_by)
# We insert the sentence in the histo
else:
is_followed_by = Relationship(sentences[-1][0], "is_followed_by", sentence)
graph.create(is_followed_by)
for token in tokensAndType[0]:
print token
tokenNode = graph.find_one("TokenHisto",
property_key="token",
property_value = token[0])
if tokenNode is None:
tokenNode = Node("TokenHisto", token=token[0], pos=token[1])
is_composed_of = Relationship(sentence, "is_composed_of", tokenNode)
graph.create(is_composed_of)
# Delete the potential existing historic of dialogue before starting a new one
def clean_histo():
"""
Delete the potential existing historic of dialogue before starting a new one
"""
server = GraphServer("../../../neo4j")
graph=server.graph
graph.cypher.execute("MATCH (n:SentenceHisto)-[rels]-(),(t:TokenHisto) delete rels, n,t")
# Extract all the characters from a movie given a sentence of this movie
def get_sentencesMovieCharacters(sentenceId):
"""
Extract all the characters from a movie given a sentence of this movie
@type sentenceId: integer
@param sentenceId: The id of the sentence
@return: A RecordList of Characters
"""
server = GraphServer("../../../neo4j")
graph=server.graph
query = "MATCH (n:Sentence{id:{sentenceId}})<-[r:IS_COMPOSED_OF*2]-(m:Movie), (m:Movie)-[:IS_COMPOSED_OF*2]->(:Sentence)-[IS_SPOKEN_BY]->(c:Character) RETURN COLLECT(DISTINCT c.full_name) as chars"
results = graph.cypher.execute_one(query, sentenceId=sentenceId)
return results
# Given a historic length (how far should we look into it), we compute the next sentence type (affirmative positive for instance) using pre-processed statistics
def findNextSentenceType(lenghtHisto, depthHisto):
"""
Given a historic length (how far should we look into it), we compute the next sentence type (affirmative positive for instance) using pre-processed statistics
@type lenghtHisto: integer
@param lenghtHisto: The maximal size of the historic
@type depthHisto: integer
@param depthHisto: The number of sentences we consider
@return: The next sentence's type
"""
server = GraphServer("../../../neo4j")
graph = server.graph
types =graph.cypher.execute("MATCH (n:Histo)-[r*0.."+str(lenghtHisto)+"]->(sh:SentenceHisto)-[is_of_type]->(st:SentenceType) RETURN st.label AS label")
# Build SentenceType "path"
listTypes=[]
for i in range(len(types)/2):
listTypes.append(types[2*i+1].label +' ' + types[2*i].label)
# Sublist with the good length
if len(listTypes) > depthHisto:
queryTypes = listTypes[-depthHisto:]
else:
queryTypes = listTypes
# Model query :
queryString= "MATCH (s:Stats)"
for label in queryTypes:
queryString+="-->(:TypeStat{label:\'" + label +"\'})"
queryString+="-->(ts:TypeStat) RETURN ts.label AS label ORDER BY ts.prob DESC LIMIT 1"
nextType = graph.cypher.execute(queryString)
return nextType[0].label
# Get the token distribution in the historic, only NN* are taken into account
def computeHistoTokenFrequency(lenghtHisto):
"""
Get the token distribution in the historic, only NN* are taken into account
@type lenghtHisto: integer
@param lenghtHisto: The maximal size of the historic
@return: A RecordList of tokens distribution
"""
server = GraphServer("../../../neo4j")
graph = server.graph
query = "MATCH (n:Histo)-[:is_followed_by*0.."+str(lenghtHisto)+"]->(sh:SentenceHisto)-[:is_composed_of]->(t:TokenHisto) WHERE t.pos =~ 'NN*' RETURN t.token as token,count(t) as total ORDER by total desc LIMIT 10"
return graph.cypher.execute(query)
| Vongo/anna | src/talk/db.py | Python | gpl-2.0 | 5,871 | 0.027764 |
from collections import Counter
from typing import List, Mapping, Union, Optional
import numpy as np
import pandas as pd
import seaborn as sns
from matplotlib import pyplot as plt
from bartpy.runner import run_models
from bartpy.sklearnmodel import SklearnModel
ImportanceMap = Mapping[int, float]
ImportanceDistributionMap = Mapping[int, List[float]]
def feature_split_proportions(model: SklearnModel, columns: Optional[List[int]]=None) -> Mapping[int, float]:
split_variables = []
for sample in model.model_samples:
for tree in sample.trees:
for node in tree.nodes:
splitting_var = node.split.splitting_variable
split_variables.append(splitting_var)
counter = Counter(split_variables)
if columns is None:
columns = sorted(list([x for x in counter.keys() if x is not None]))
proportions = {}
for column in columns:
if column in counter.keys():
proportions[column] = counter[column] / len(split_variables)
else:
proportions[column] = 0.0
return proportions
def plot_feature_split_proportions(model: SklearnModel, ax=None):
if ax is None:
_, ax = plt.subplots(1, 1)
proportions = feature_split_proportions(model)
y_pos = np.arange(len(proportions))
name, count = list(proportions.keys()), list(proportions.values())
props = pd.DataFrame({"name": name, "counts": count}).sort_values("name", ascending=True)
plt.barh(y_pos, props.counts, align='center', alpha=0.5)
plt.yticks(y_pos, props.name)
plt.xlabel('Proportion of all splits')
plt.ylabel('Feature')
plt.title('Proportion of Splits Made on Each Variable')
return ax
def null_feature_split_proportions_distribution(model: SklearnModel,
X: Union[pd.DataFrame, np.ndarray],
y: np.ndarray,
n_permutations: int=10) -> Mapping[int, List[float]]:
"""
Calculate a null distribution of proportion of splits on each variable in X
Works by randomly permuting y to remove any true dependence of y on X and calculating feature importance
Parameters
----------
model: SklearnModel
Model specification to work with
X: np.ndarray
Covariate matrix
y: np.ndarray
Target data
n_permutations: int
How many permutations to run
The higher the number of permutations, the more accurate the null distribution, but the longer it will take to run
Returns
-------
Mapping[int, List[float]]
A list of inclusion proportions for each variable in X
"""
inclusion_dict = {x: [] for x in range(X.shape[1])}
y_s = [np.random.permutation(y) for _ in range(n_permutations)]
X_s = [X for _ in y_s]
fit_models = run_models(model, X_s, y_s)
for model in fit_models:
splits_run = feature_split_proportions(model, list(range(X.shape[1])))
for key, value in splits_run.items():
inclusion_dict[key].append(value)
return inclusion_dict
def plot_null_feature_importance_distributions(null_distributions: Mapping[int, List[float]], ax=None) -> None:
if ax is None:
_, ax = plt.subplots(1, 1)
df = pd.DataFrame(null_distributions)
df = pd.DataFrame(df.unstack()).reset_index().drop("level_1", axis=1)
df.columns = ["variable", "p"]
sns.boxplot(x="variable", y="p", data=df, ax=ax)
ax.set_title("Null Feature Importance Distribution")
return ax
def local_thresholds(null_distributions: ImportanceDistributionMap, percentile: float) -> Mapping[int, float]:
"""
Calculate the required proportion of splits to be selected by variable
Creates a null distribution for each variable based on the % of splits including that variable in each of the permuted models
Each variable has its own threshold that is independent of the other variables
Note - this is significantly less stringent than the global threshold
Parameters
----------
null_distributions: ImportanceDistributionMap
A mapping from variable to distribution of split inclusion proportions under the null
percentile: float
The percentile of the null distribution to use as a cutoff.
The closer to 1.0, the more stringent the threshold
Returns
-------
Mapping[int, float]
A lookup from column to % inclusion threshold
"""
return {feature: np.percentile(null_distributions[feature], percentile) for feature in null_distributions}
def global_thresholds(null_distributions: ImportanceDistributionMap, percentile: float) -> Mapping[int, float]:
"""
Calculate the required proportion of splits to be selected by variable
Creates a distribution of the _highest_ inclusion percentage of any variable in each of the permuted models
Threshold is set as a percentile of this distribution
All variables have the same threshold
Note that this is significantly more stringent than the local threshold
Parameters
----------
null_distributions: ImportanceDistributionMap
A mapping from variable to distribution of split inclusion proportions under the null
percentile: float
The percentile of the null distribution to use as a cutoff.
The closer to 1.0, the more stringent the threshold
Returns
-------
Mapping[int, float]
A lookup from column to % inclusion threshold
"""
q_s = []
df = pd.DataFrame(null_distributions)
for row in df.iter_rows():
q_s.append(np.max(row))
threshold = np.percentile(q_s, percentile)
return {feature: threshold for feature in null_distributions}
def kept_features(feature_proportions: Mapping[int, float], thresholds: Mapping[int, float]) -> List[int]:
"""
Extract the features to keep
Parameters
----------
feature_proportions: Mapping[int, float]
Lookup from variable to % of splits in the model that use that variable
thresholds: Mapping[int, float]
Lookup from variable to required % of splits in the model to be kept
Returns
-------
List[int]
Variable selected for inclusion in the final model
"""
return [x[0] for x in zip(sorted(feature_proportions.keys()), is_kept(feature_proportions, thresholds)) if x[1]]
def is_kept(feature_proportions: Mapping[int, float], thresholds: Mapping[int, float]) -> List[bool]:
"""
Determine whether each variable should be kept after selection
Parameters
----------
feature_proportions: Mapping[int, float]
Lookup from variable to % of splits in the model that use that variable
thresholds: Mapping[int, float]
Lookup from variable to required % of splits in the model to be kept
Returns
-------
List[bool]
An array of length equal to the width of the covariate matrix
True if the variable should be kept, False otherwise
"""
print(sorted(list(feature_proportions.keys())))
return [feature_proportions[feature] > thresholds[feature] for feature in sorted(list(feature_proportions.keys()))]
def partition_into_passed_and_failed_features(feature_proportions, thresholds):
kept = kept_features(feature_proportions, thresholds)
passed_features = {x[0]: x[1] for x in feature_proportions.items() if x[0] in kept}
failed_features = {x[0]: x[1] for x in feature_proportions.items() if x[0] not in kept}
return passed_features, failed_features
def plot_feature_proportions_against_thresholds(feature_proportions, thresholds, ax=None):
if ax is None:
_, ax = plt.subplots(1, 1)
passed_features, failed_features = partition_into_passed_and_failed_features(feature_proportions, thresholds)
ax.bar(thresholds.keys(), [x * 100 for x in thresholds.values()], width=0.01, color="black", alpha=0.5)
ax.scatter(passed_features.keys(), [x * 100 for x in passed_features.values()], c="g")
ax.scatter(failed_features.keys(), [x * 100 for x in failed_features.values()], c="r")
ax.set_title("Feature Importance Compared to Threshold")
ax.set_xlabel("Feature")
ax.set_ylabel("% Splits")
return ax
| JakeColtman/bartpy | bartpy/diagnostics/features.py | Python | mit | 8,265 | 0.00363 |
import datetime
from django.utils import timezone
from django.test import TestCase
from django.urls import reverse
from .models import Question
class QuestionMethodTests(TestCase):
def test_was_published_recently_with_future_question(self):
"""
was_published_recently() should return False for questions whose
pub_date is in the future.
"""
time = timezone.now() + datetime.timedelta(days=30)
future_question = Question(pub_date=time)
self.assertIs(future_question.was_published_recently(), False)
def test_was_published_recently_with_old_question(self):
"""
was_published_recently() should return False for questions whose
pub_date is older than 1 day.
"""
time = timezone.now() - datetime.timedelta(days=30)
old_question = Question(pub_date=time)
self.assertIs(old_question.was_published_recently(), False)
def test_was_published_recently_with_recent_question(self):
"""
was_published_recently() should return True for questions whose
pub_date is within the last day.
"""
time = timezone.now() - datetime.timedelta(hours=1)
recent_question = Question(pub_date=time)
self.assertIs(recent_question.was_published_recently(), True)
def create_question(question_text, days):
"""
Creates a question with the given `question_text` and published the
given number of `days` offset to now (negative for questions published
in the past, positive for questions that have yet to be published).
"""
time = timezone.now() + datetime.timedelta(days=days)
return Question.objects.create(question_text=question_text, pub_date=time)
class QuestionViewTests(TestCase):
def test_index_view_with_no_questions(self):
"""
If no questions exist, an appropriate message should be displayed.
"""
response = self.client.get(reverse('polls:index'))
self.assertEqual(response.status_code, 200)
self.assertContains(response, "No polls are available.")
self.assertQuerysetEqual(response.context['latest_question_list'], [])
def test_index_view_with_a_past_question(self):
"""
Questions with a pub_date in the past should be displayed on the
index page.
"""
create_question(question_text="Past question.", days=-30)
response = self.client.get(reverse('polls:index'))
self.assertQuerysetEqual(
response.context['latest_question_list'],
['<Question: Past question.>']
)
def test_index_view_with_a_future_question(self):
"""
Questions with a pub_date in the future should not be displayed on
the index page.
"""
create_question(question_text="Future question.", days=30)
response = self.client.get(reverse('polls:index'))
self.assertContains(response, "No polls are available.")
self.assertQuerysetEqual(response.context['latest_question_list'], [])
def test_index_view_with_future_question_and_past_question(self):
"""
Even if both past and future questions exist, only past questions
should be displayed.
"""
create_question(question_text="Past question.", days=-30)
create_question(question_text="Future question.", days=30)
response = self.client.get(reverse('polls:index'))
self.assertQuerysetEqual(
response.context['latest_question_list'],
['<Question: Past question.>']
)
def test_index_view_with_two_past_questions(self):
"""
The questions index page may display multiple questions.
"""
create_question(question_text="Past question 1.", days=-30)
create_question(question_text="Past question 2.", days=-5)
response = self.client.get(reverse('polls:index'))
self.assertQuerysetEqual(
response.context['latest_question_list'],
['<Question: Past question 2.>', '<Question: Past question 1.>']
)
class QuestionIndexDetailTests(TestCase):
def test_detail_view_with_a_future_question(self):
"""
The detail view of a question with a pub_date in the future should
return a 404 not found.
"""
future_question = create_question(question_text='Future question.', days=5)
url = reverse('polls:detail', args=(future_question.id,))
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
def test_detail_view_with_a_past_question(self):
"""
The detail view of a question with a pub_date in the past should
display the question's text.
"""
past_question = create_question(question_text='Past Question.', days=-5)
url = reverse('polls:detail', args=(past_question.id,))
response = self.client.get(url)
self.assertContains(response, past_question.question_text) | congpc/DjangoExample | mysite/polls/tests.py | Python | bsd-2-clause | 5,010 | 0.001198 |
#
# @lc app=leetcode id=103 lang=python3
#
# [103] Binary Tree Zigzag Level Order Traversal
#
from typing import List
# Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
# @lc code=start
class Solution:
def zigzagLevelOrder(self, root: TreeNode) -> List[List[int]]:
ret = []
child = [root]
flag = True
while child:
new_child = []
current_layer = []
for node in child:
if node:
current_layer.append(node.val)
if node.left:
new_child.append(node.left)
if node.right:
new_child.append(node.right)
child = new_child
if current_layer:
if flag:
ret.append(current_layer)
else:
ret.append(current_layer[::-1])
flag = not flag
return ret
# @lc code=end
b = [1,2,3,4,None,None,5] # WA1
a = TreeNode(1)
a.left = TreeNode(2)
a.right = TreeNode(3)
a.left.left = TreeNode(4)
a.right.right = TreeNode(5)
s = Solution()
print(s.zigzagLevelOrder(a))
| heyf/cloaked-octo-adventure | leetcode/0103_binary-tree-zigzag-level-order-traversal.py | Python | mit | 1,261 | 0.008723 |
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
from scrapy.item import Item, Field
class SpiderItem(Item):
# define the fields for your item here like:
# name = scrapy.Field()
brand = Field()
name = Field()
type = Field()
category = Field()
shopname = Field()
productionName = Field()
productId = Field()
url = Field()
price = Field()
promotionInfo = Field()
monthlySalesVolume = Field()
evaluationNum = Field()
#goodEvaluationNum = Field()
date = Field()
commentCount = Field()
averageScore = Field()
goodCount = Field()
goodRate = Field()
generalCount = Field()
generalRate = Field()
poorCount = Field()
poorRate = Field()
showCount = Field()#the comment with picture
commentListPageNum = Field()
imageUrl = Field()
imagePath = Field()
| LaveyD/spider | spider/items.py | Python | gpl-3.0 | 957 | 0.003135 |
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import (
scoped_session,
sessionmaker,
)
from zope.sqlalchemy import ZopeTransactionExtension
import tornado.web
from handlers.index import IndexHandler
from handlers.sensors import SensorsHandler
import logging
logging.getLogger().setLevel(logging.DEBUG)
app = tornado.web.Application([
(r'/', IndexHandler),
(r'/sensors', SensorsHandler)
])
DBSession = scoped_session(sessionmaker(extension=ZopeTransactionExtension()))
Base = declarative_base()
| nextgis/entels_front_demo | entels_demo_tornado/__init__.py | Python | gpl-2.0 | 548 | 0 |
import pytest
from forte.solvers import solver_factory, HF, ActiveSpaceSolver
def test_detci_4():
"""CASCI test of Forte DETCI using the SparseList algorithm to build the sigma vector"""
ref_hf_energy = -99.977636678461636
ref_fci_energy = -100.113732484560970
xyz = """
F
H 1 1.0
"""
input = solver_factory(molecule=xyz, basis='6-31g')
state = input.state(charge=0, multiplicity=1, sym='a1')
hf = HF(input, state=state, e_convergence=1.0e-12, d_convergence=1.0e-8)
# create a detci solver
fci = ActiveSpaceSolver(
hf,
type='detci',
states=state,
mo_spaces=input.mo_spaces(frozen_docc=[1, 0, 0, 0]),
options={'active_ref_type': 'cas'}
)
fci.run()
# check results
assert hf.value('hf energy') == pytest.approx(ref_hf_energy, 1.0e-10)
assert fci.value('active space energy')[state] == pytest.approx([ref_fci_energy], 1.0e-10)
if __name__ == "__main__":
test_detci_4()
| evangelistalab/forte | tests/pytest-methods/detci/test_detci-4.py | Python | lgpl-3.0 | 986 | 0.002028 |
import logging
from mimeprovider.documenttype import get_default_document_types
from mimeprovider.client import get_default_client
from mimeprovider.exceptions import MimeException
from mimeprovider.exceptions import MimeBadRequest
from mimeprovider.mimerenderer import MimeRenderer
from mimeprovider.validators import get_default_validator
__all__ = ["MimeProvider"]
__version__ = "0.1.5"
log = logging.getLogger(__name__)
def build_json_ref(request):
def json_ref(route, document=None, **kw):
ref = dict()
ref["$ref"] = request.route_path(route, **kw)
rel_default = None
if document:
rel_default = getattr(document, "object_type",
document.__class__.__name__)
else:
rel_default = route
ref["rel"] = kw.pop("rel_", rel_default)
return ref
return json_ref
class MimeProvider(object):
def __init__(self, documents=[], **kw):
self.renderer_name = kw.get("renderer_name", "mime")
self.attribute_name = kw.get("attribute_name", "mime_body")
self.error_handler = kw.get("error_handler", None)
self.set_default_renderer = kw.get("set_default_renderer", False)
self.validator = kw.get("validator")
if self.validator is None:
self.validator = get_default_validator()
types = kw.get("types")
if types is None:
types = get_default_document_types()
if not types:
raise ValueError("No document types specified")
self.client = kw.get("client")
if self.client is None:
self.client = get_default_client()
self.type_instances = [t() for t in types]
self.mimeobjects = dict()
self.mimetypes = dict(self._generate_base_mimetypes())
self.error_document_type = kw.get(
"error_document_type",
self.type_instances[0])
self.register(*documents)
def _validate(self, document):
if not hasattr(document, "object_type"):
raise ValueError(
("Object does not have required 'object_type' "
"attribute {0!r}").format(document))
def _generate_base_mimetypes(self):
"""
Generate the base mimetypes as described by non customized document
types.
"""
for t in self.type_instances:
if t.custom_mime:
continue
yield t.mime, (t, None, None)
def _generate_document_mimetypes(self, documents):
for t in self.type_instances:
if not t.custom_mime:
continue
for o in documents:
mimetype = t.mime.format(o=o)
validator = None
if hasattr(o, "schema"):
validator = self.validator(o.schema)
m_value = (mimetype, (t, o, validator))
o_value = (o, (t, mimetype, validator))
yield m_value, o_value
def register(self, *documents):
documents = list(documents)
for document in documents:
self._validate(document)
generator = self._generate_document_mimetypes(documents)
for (m, m_value), (o, o_value) in generator:
self.mimeobjects.setdefault(o, []).append(o_value)
if m not in self.mimetypes:
self.mimetypes[m] = m_value
continue
_, cls, validator = self.mimetypes[m]
_, new_cls, validator = m_value
raise ValueError(
"Conflicting handler for {0}, {1} and {2}".format(
m, cls, new_cls))
def get_client(self, *args, **kw):
return self.client(self.mimetypes, self.mimeobjects, *args, **kw)
def get_mime_body(self, request):
if not request.body or not request.content_type:
return None
result = self.mimetypes.get(request.content_type)
if result is None:
raise MimeBadRequest(
"Unsupported Content-Type: " + request.content_type)
document_type, cls, validator = result
# the specific document does not support deserialization.
if not hasattr(cls, "from_data"):
raise MimeBadRequest(
"Unsupported Content-Type: " +
request.content_type)
return document_type.parse(validator, cls, request.body)
@property
def renderer(self):
if self.error_handler is None:
raise ValueError("No 'error_handler' available")
def setup_renderer(helper):
return MimeRenderer(self.mimetypes, self.error_document_type,
self.error_handler, validator=self.validator)
return setup_renderer
def add_config(self, config):
config.add_renderer(self.renderer_name, self.renderer)
if self.set_default_renderer:
config.add_renderer(None, self.renderer)
config.set_request_property(self.get_mime_body, self.attribute_name,
reify=True)
config.set_request_property(build_json_ref, "json_ref", reify=True)
config.add_view(self.error_handler, context=MimeException,
renderer=self.renderer_name)
| udoprog/mimeprovider | mimeprovider/__init__.py | Python | gpl-3.0 | 5,322 | 0 |
import numpy as np
from holoviews.core.overlay import NdOverlay
from holoviews.core.spaces import HoloMap
from holoviews.element import Points
from .testplot import TestMPLPlot, mpl_renderer
from ..utils import ParamLogStream
try:
from matplotlib import pyplot
except:
pass
class TestPointPlot(TestMPLPlot):
def test_points_non_numeric_size_warning(self):
data = (np.arange(10), np.arange(10), list(map(chr, range(94,104))))
points = Points(data, vdims=['z']).opts(plot=dict(size_index=2))
with ParamLogStream() as log:
plot = mpl_renderer.get_plot(points)
log_msg = log.stream.read()
warning = ('%s: z dimension is not numeric, '
'cannot use to scale Points size.\n' % plot.name)
self.assertEqual(log_msg, warning)
def test_points_cbar_extend_both(self):
img = Points(([0, 1], [0, 3])).redim(y=dict(range=(1,2)))
plot = mpl_renderer.get_plot(img(plot=dict(colorbar=True, color_index=1)))
self.assertEqual(plot.handles['cbar'].extend, 'both')
def test_points_cbar_extend_min(self):
img = Points(([0, 1], [0, 3])).redim(y=dict(range=(1, None)))
plot = mpl_renderer.get_plot(img(plot=dict(colorbar=True, color_index=1)))
self.assertEqual(plot.handles['cbar'].extend, 'min')
def test_points_cbar_extend_max(self):
img = Points(([0, 1], [0, 3])).redim(y=dict(range=(None, 2)))
plot = mpl_renderer.get_plot(img(plot=dict(colorbar=True, color_index=1)))
self.assertEqual(plot.handles['cbar'].extend, 'max')
def test_points_cbar_extend_clime(self):
img = Points(([0, 1], [0, 3])).opts(style=dict(clim=(None, None)))
plot = mpl_renderer.get_plot(img(plot=dict(colorbar=True, color_index=1)))
self.assertEqual(plot.handles['cbar'].extend, 'neither')
def test_points_rcparams_do_not_persist(self):
opts = dict(fig_rcparams={'text.usetex': True})
points = Points(([0, 1], [0, 3])).opts(plot=opts)
mpl_renderer.get_plot(points)
self.assertFalse(pyplot.rcParams['text.usetex'])
def test_points_rcparams_used(self):
opts = dict(fig_rcparams={'grid.color': 'red'})
points = Points(([0, 1], [0, 3])).opts(plot=opts)
plot = mpl_renderer.get_plot(points)
ax = plot.state.axes[0]
lines = ax.get_xgridlines()
self.assertEqual(lines[0].get_color(), 'red')
def test_points_padding_square(self):
points = Points([1, 2, 3]).options(padding=0.1)
plot = mpl_renderer.get_plot(points)
x_range, y_range = plot.handles['axis'].get_xlim(), plot.handles['axis'].get_ylim()
self.assertEqual(x_range[0], -0.2)
self.assertEqual(x_range[1], 2.2)
self.assertEqual(y_range[0], 0.8)
self.assertEqual(y_range[1], 3.2)
def test_curve_padding_square_per_axis(self):
curve = Points([1, 2, 3]).options(padding=((0, 0.1), (0.1, 0.2)))
plot = mpl_renderer.get_plot(curve)
x_range, y_range = plot.handles['axis'].get_xlim(), plot.handles['axis'].get_ylim()
self.assertEqual(x_range[0], 0)
self.assertEqual(x_range[1], 2.2)
self.assertEqual(y_range[0], 0.8)
self.assertEqual(y_range[1], 3.4)
def test_points_padding_hard_xrange(self):
points = Points([1, 2, 3]).redim.range(x=(0, 3)).options(padding=0.1)
plot = mpl_renderer.get_plot(points)
x_range, y_range = plot.handles['axis'].get_xlim(), plot.handles['axis'].get_ylim()
self.assertEqual(x_range[0], 0)
self.assertEqual(x_range[1], 3)
self.assertEqual(y_range[0], 0.8)
self.assertEqual(y_range[1], 3.2)
def test_points_padding_soft_xrange(self):
points = Points([1, 2, 3]).redim.soft_range(x=(0, 3)).options(padding=0.1)
plot = mpl_renderer.get_plot(points)
x_range, y_range = plot.handles['axis'].get_xlim(), plot.handles['axis'].get_ylim()
self.assertEqual(x_range[0], -0.2)
self.assertEqual(x_range[1], 3)
self.assertEqual(y_range[0], 0.8)
self.assertEqual(y_range[1], 3.2)
def test_points_padding_unequal(self):
points = Points([1, 2, 3]).options(padding=(0.05, 0.1))
plot = mpl_renderer.get_plot(points)
x_range, y_range = plot.handles['axis'].get_xlim(), plot.handles['axis'].get_ylim()
self.assertEqual(x_range[0], -0.1)
self.assertEqual(x_range[1], 2.1)
self.assertEqual(y_range[0], 0.8)
self.assertEqual(y_range[1], 3.2)
def test_points_padding_nonsquare(self):
points = Points([1, 2, 3]).options(padding=0.1, aspect=2)
plot = mpl_renderer.get_plot(points)
x_range, y_range = plot.handles['axis'].get_xlim(), plot.handles['axis'].get_ylim()
self.assertEqual(x_range[0], -0.1)
self.assertEqual(x_range[1], 2.1)
self.assertEqual(y_range[0], 0.8)
self.assertEqual(y_range[1], 3.2)
def test_points_padding_logx(self):
points = Points([(1, 1), (2, 2), (3,3)]).options(padding=0.1, logx=True)
plot = mpl_renderer.get_plot(points)
x_range, y_range = plot.handles['axis'].get_xlim(), plot.handles['axis'].get_ylim()
self.assertEqual(x_range[0], 0.89595845984076228)
self.assertEqual(x_range[1], 3.3483695221017129)
self.assertEqual(y_range[0], 0.8)
self.assertEqual(y_range[1], 3.2)
def test_points_padding_logy(self):
points = Points([1, 2, 3]).options(padding=0.1, logy=True)
plot = mpl_renderer.get_plot(points)
x_range, y_range = plot.handles['axis'].get_xlim(), plot.handles['axis'].get_ylim()
self.assertEqual(x_range[0], -0.2)
self.assertEqual(x_range[1], 2.2)
self.assertEqual(y_range[0], 0.89595845984076228)
self.assertEqual(y_range[1], 3.3483695221017129)
def test_points_padding_datetime_square(self):
points = Points([(np.datetime64('2016-04-0%d' % i), i) for i in range(1, 4)]).options(
padding=0.1
)
plot = mpl_renderer.get_plot(points)
x_range, y_range = plot.handles['axis'].get_xlim(), plot.handles['axis'].get_ylim()
self.assertEqual(x_range[0], 736054.80000000005)
self.assertEqual(x_range[1], 736057.19999999995)
self.assertEqual(y_range[0], 0.8)
self.assertEqual(y_range[1], 3.2)
def test_points_padding_datetime_nonsquare(self):
points = Points([(np.datetime64('2016-04-0%d' % i), i) for i in range(1, 4)]).options(
padding=0.1, aspect=2
)
plot = mpl_renderer.get_plot(points)
x_range, y_range = plot.handles['axis'].get_xlim(), plot.handles['axis'].get_ylim()
self.assertEqual(x_range[0], 736054.90000000002)
self.assertEqual(x_range[1], 736057.09999999998)
self.assertEqual(y_range[0], 0.8)
self.assertEqual(y_range[1], 3.2)
def test_points_sizes_scalar_update(self):
hmap = HoloMap({i: Points([1, 2, 3]).opts(s=i*10) for i in range(1, 3)})
plot = mpl_renderer.get_plot(hmap)
artist = plot.handles['artist']
plot.update((1,))
self.assertEqual(artist.get_sizes(), np.array([10]))
plot.update((2,))
self.assertEqual(artist.get_sizes(), np.array([20]))
###########################
# Styling mapping #
###########################
def test_point_color_op(self):
points = Points([(0, 0, '#000000'), (0, 1, '#FF0000'), (0, 2, '#00FF00')],
vdims='color').options(color='color')
plot = mpl_renderer.get_plot(points)
artist = plot.handles['artist']
self.assertEqual(artist.get_facecolors(),
np.array([[0, 0, 0, 1], [1, 0, 0, 1], [0, 1, 0, 1]]))
def test_point_color_op_update(self):
points = HoloMap({0: Points([(0, 0, '#000000'), (0, 1, '#FF0000'), (0, 2, '#00FF00')],
vdims='color'),
1: Points([(0, 0, '#0000FF'), (0, 1, '#00FF00'), (0, 2, '#FF0000')],
vdims='color')}).options(color='color')
plot = mpl_renderer.get_plot(points)
artist = plot.handles['artist']
plot.update((1,))
self.assertEqual(artist.get_facecolors(),
np.array([[0, 0, 1, 1], [0, 1, 0, 1], [1, 0, 0, 1]]))
def test_point_line_color_op(self):
points = Points([(0, 0, '#000000'), (0, 1, '#FF0000'), (0, 2, '#00FF00')],
vdims='color').options(edgecolors='color')
plot = mpl_renderer.get_plot(points)
artist = plot.handles['artist']
self.assertEqual(artist.get_edgecolors(),
np.array([[0, 0, 0, 1], [1, 0, 0, 1], [0, 1, 0, 1]]))
def test_point_line_color_op_update(self):
points = HoloMap({0: Points([(0, 0, '#000000'), (0, 1, '#FF0000'), (0, 2, '#00FF00')],
vdims='color'),
1: Points([(0, 0, '#0000FF'), (0, 1, '#00FF00'), (0, 2, '#FF0000')],
vdims='color')}).options(edgecolors='color')
plot = mpl_renderer.get_plot(points)
artist = plot.handles['artist']
plot.update((1,))
self.assertEqual(artist.get_edgecolors(),
np.array([[0, 0, 1, 1], [0, 1, 0, 1], [1, 0, 0, 1]]))
def test_point_fill_color_op(self):
points = Points([(0, 0, '#000000'), (0, 1, '#FF0000'), (0, 2, '#00FF00')],
vdims='color').options(facecolors='color')
plot = mpl_renderer.get_plot(points)
artist = plot.handles['artist']
self.assertEqual(artist.get_facecolors(),
np.array([[0, 0, 0, 1], [1, 0, 0, 1], [0, 1, 0, 1]]))
def test_point_linear_color_op(self):
points = Points([(0, 0, 0), (0, 1, 1), (0, 2, 2)],
vdims='color').options(color='color')
plot = mpl_renderer.get_plot(points)
artist = plot.handles['artist']
self.assertEqual(artist.get_array(), np.array([0, 1, 2]))
self.assertEqual(artist.get_clim(), (0, 2))
def test_point_linear_color_op_update(self):
points = HoloMap({0: Points([(0, 0, 0), (0, 1, 1), (0, 2, 2)],
vdims='color'),
1: Points([(0, 0, 2.5), (0, 1, 3), (0, 2, 1.2)],
vdims='color')}).options(color='color', framewise=True)
plot = mpl_renderer.get_plot(points)
artist = plot.handles['artist']
self.assertEqual(artist.get_clim(), (0, 2))
plot.update((1,))
self.assertEqual(artist.get_array(), np.array([2.5, 3, 1.2]))
self.assertEqual(artist.get_clim(), (1.2, 3))
def test_point_categorical_color_op(self):
points = Points([(0, 0, 'A'), (0, 1, 'B'), (0, 2, 'A')],
vdims='color').options(color='color')
plot = mpl_renderer.get_plot(points)
artist = plot.handles['artist']
self.assertEqual(artist.get_array(), np.array([0, 1, 0]))
self.assertEqual(artist.get_clim(), (0, 1))
def test_point_size_op(self):
points = Points([(0, 0, 1), (0, 1, 4), (0, 2, 8)],
vdims='size').options(s='size')
plot = mpl_renderer.get_plot(points)
artist = plot.handles['artist']
self.assertEqual(artist.get_sizes(), np.array([1, 4, 8]))
def test_point_size_op_update(self):
points = HoloMap({0: Points([(0, 0, 3), (0, 1, 1), (0, 2, 2)],
vdims='size'),
1: Points([(0, 0, 2.5), (0, 1, 3), (0, 2, 1.2)],
vdims='size')}).options(s='size')
plot = mpl_renderer.get_plot(points)
artist = plot.handles['artist']
self.assertEqual(artist.get_sizes(), np.array([3, 1, 2]))
plot.update((1,))
self.assertEqual(artist.get_sizes(), np.array([2.5, 3, 1.2]))
def test_point_line_width_op(self):
points = Points([(0, 0, 1), (0, 1, 4), (0, 2, 8)],
vdims='line_width').options(linewidth='line_width')
plot = mpl_renderer.get_plot(points)
artist = plot.handles['artist']
self.assertEqual(artist.get_linewidths(), [1, 4, 8])
def test_point_line_width_op_update(self):
points = HoloMap({0: Points([(0, 0, 3), (0, 1, 1), (0, 2, 2)],
vdims='line_width'),
1: Points([(0, 0, 2.5), (0, 1, 3), (0, 2, 1.2)],
vdims='line_width')}).options(linewidth='line_width')
plot = mpl_renderer.get_plot(points)
artist = plot.handles['artist']
self.assertEqual(artist.get_linewidths(), [3, 1, 2])
plot.update((1,))
self.assertEqual(artist.get_linewidths(), [2.5, 3, 1.2])
def test_point_marker_op(self):
points = Points([(0, 0, 'circle'), (0, 1, 'triangle'), (0, 2, 'square')],
vdims='marker').options(marker='marker')
with self.assertRaises(Exception):
mpl_renderer.get_plot(points)
def test_point_alpha_op(self):
points = Points([(0, 0, 0), (0, 1, 0.2), (0, 2, 0.7)],
vdims='alpha').options(alpha='alpha')
with self.assertRaises(Exception):
mpl_renderer.get_plot(points)
def test_op_ndoverlay_value(self):
markers = ['d', 's']
overlay = NdOverlay({marker: Points(np.arange(i))
for i, marker in enumerate(markers)},
'Marker').options('Points', marker='Marker')
plot = mpl_renderer.get_plot(overlay)
for subplot, marker in zip(plot.subplots.values(), markers):
style = dict(subplot.style[subplot.cyclic_index])
style = subplot._apply_transforms(subplot.current_frame, {}, style)
self.assertEqual(style['marker'], marker)
def test_point_color_index_color_clash(self):
points = Points([(0, 0, 0), (0, 1, 1), (0, 2, 2)],
vdims='color').options(color='color', color_index='color')
with ParamLogStream() as log:
plot = mpl_renderer.get_plot(points)
log_msg = log.stream.read()
warning = ("%s: Cannot declare style mapping for 'color' option "
"and declare a color_index; ignoring the color_index.\n"
% plot.name)
self.assertEqual(log_msg, warning)
def test_point_size_index_size_clash(self):
points = Points([(0, 0, 0), (0, 1, 1), (0, 2, 2)],
vdims='size').options(s='size', size_index='size')
with ParamLogStream() as log:
plot = mpl_renderer.get_plot(points)
log_msg = log.stream.read()
warning = ("%s: Cannot declare style mapping for 's' option "
"and declare a size_index; ignoring the size_index.\n"
% plot.name)
self.assertEqual(log_msg, warning)
| basnijholt/holoviews | holoviews/tests/plotting/matplotlib/testpointplot.py | Python | bsd-3-clause | 15,100 | 0.002318 |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""iobase.RangeTracker implementations provided with Apache Beam.
"""
import logging
import math
import threading
from six import integer_types
from apache_beam.io import iobase
__all__ = ['OffsetRangeTracker', 'LexicographicKeyRangeTracker',
'OrderedPositionRangeTracker', 'UnsplittableRangeTracker']
class OffsetRangeTracker(iobase.RangeTracker):
"""A 'RangeTracker' for non-negative positions of type 'long'."""
# Offset corresponding to infinity. This can only be used as the upper-bound
# of a range, and indicates reading all of the records until the end without
# specifying exactly what the end is.
# Infinite ranges cannot be split because it is impossible to estimate
# progress within them.
OFFSET_INFINITY = float('inf')
def __init__(self, start, end):
super(OffsetRangeTracker, self).__init__()
if start is None:
raise ValueError('Start offset must not be \'None\'')
if end is None:
raise ValueError('End offset must not be \'None\'')
assert isinstance(start, integer_types)
if end != self.OFFSET_INFINITY:
assert isinstance(end, integer_types)
assert start <= end
self._start_offset = start
self._stop_offset = end
self._last_record_start = -1
self._offset_of_last_split_point = -1
self._lock = threading.Lock()
self._split_points_seen = 0
self._split_points_unclaimed_callback = None
def start_position(self):
return self._start_offset
def stop_position(self):
return self._stop_offset
@property
def last_record_start(self):
return self._last_record_start
def _validate_record_start(self, record_start, split_point):
# This function must only be called under the lock self.lock.
if not self._lock.locked():
raise ValueError(
'This function must only be called under the lock self.lock.')
if record_start < self._last_record_start:
raise ValueError(
'Trying to return a record [starting at %d] which is before the '
'last-returned record [starting at %d]' %
(record_start, self._last_record_start))
if split_point:
if (self._offset_of_last_split_point != -1 and
record_start == self._offset_of_last_split_point):
raise ValueError(
'Record at a split point has same offset as the previous split '
'point: %d' % record_start)
elif self._last_record_start == -1:
raise ValueError(
'The first record [starting at %d] must be at a split point' %
record_start)
if (split_point and self._offset_of_last_split_point != -1 and
record_start == self._offset_of_last_split_point):
raise ValueError(
'Record at a split point has same offset as the previous split '
'point: %d' % record_start)
if not split_point and self._last_record_start == -1:
raise ValueError(
'The first record [starting at %d] must be at a split point' %
record_start)
def try_claim(self, record_start):
with self._lock:
self._validate_record_start(record_start, True)
if record_start >= self.stop_position():
return False
self._offset_of_last_split_point = record_start
self._last_record_start = record_start
self._split_points_seen += 1
return True
def set_current_position(self, record_start):
with self._lock:
self._validate_record_start(record_start, False)
self._last_record_start = record_start
def try_split(self, split_offset):
assert isinstance(split_offset, integer_types)
with self._lock:
if self._stop_offset == OffsetRangeTracker.OFFSET_INFINITY:
logging.debug('refusing to split %r at %d: stop position unspecified',
self, split_offset)
return
if self._last_record_start == -1:
logging.debug('Refusing to split %r at %d: unstarted', self,
split_offset)
return
if split_offset <= self._last_record_start:
logging.debug(
'Refusing to split %r at %d: already past proposed stop offset',
self, split_offset)
return
if (split_offset < self.start_position()
or split_offset >= self.stop_position()):
logging.debug(
'Refusing to split %r at %d: proposed split position out of range',
self, split_offset)
return
logging.debug('Agreeing to split %r at %d', self, split_offset)
split_fraction = (float(split_offset - self._start_offset) / (
self._stop_offset - self._start_offset))
self._stop_offset = split_offset
return self._stop_offset, split_fraction
def fraction_consumed(self):
with self._lock:
fraction = ((1.0 * (self._last_record_start - self.start_position()) /
(self.stop_position() - self.start_position())) if
self.stop_position() != self.start_position() else 0.0)
# self.last_record_start may become larger than self.end_offset when
# reading the records since any record that starts before the first 'split
# point' at or after the defined 'stop offset' is considered to be within
# the range of the OffsetRangeTracker. Hence fraction could be > 1.
# self.last_record_start is initialized to -1, hence fraction may be < 0.
# Bounding the to range [0, 1].
return max(0.0, min(1.0, fraction))
def position_at_fraction(self, fraction):
if self.stop_position() == OffsetRangeTracker.OFFSET_INFINITY:
raise Exception(
'get_position_for_fraction_consumed is not applicable for an '
'unbounded range')
return int(math.ceil(self.start_position() + fraction * (
self.stop_position() - self.start_position())))
def split_points(self):
with self._lock:
split_points_consumed = (
0 if self._split_points_seen == 0 else self._split_points_seen - 1)
split_points_unclaimed = (
self._split_points_unclaimed_callback(self.stop_position())
if self._split_points_unclaimed_callback
else iobase.RangeTracker.SPLIT_POINTS_UNKNOWN)
split_points_remaining = (
iobase.RangeTracker.SPLIT_POINTS_UNKNOWN if
split_points_unclaimed == iobase.RangeTracker.SPLIT_POINTS_UNKNOWN
else (split_points_unclaimed + 1))
return (split_points_consumed, split_points_remaining)
def set_split_points_unclaimed_callback(self, callback):
self._split_points_unclaimed_callback = callback
class OrderedPositionRangeTracker(iobase.RangeTracker):
"""
An abstract base class for range trackers whose positions are comparable.
Subclasses only need to implement the mapping from position ranges
to and from the closed interval [0, 1].
"""
UNSTARTED = object()
def __init__(self, start_position=None, stop_position=None):
self._start_position = start_position
self._stop_position = stop_position
self._lock = threading.Lock()
self._last_claim = self.UNSTARTED
def start_position(self):
return self._start_position
def stop_position(self):
with self._lock:
return self._stop_position
def try_claim(self, position):
with self._lock:
if self._last_claim is not self.UNSTARTED and position < self._last_claim:
raise ValueError(
"Positions must be claimed in order: "
"claim '%s' attempted after claim '%s'" % (
position, self._last_claim))
elif self._start_position is not None and position < self._start_position:
raise ValueError("Claim '%s' is before start '%s'" % (
position, self._start_position))
if self._stop_position is None or position < self._stop_position:
self._last_claim = position
return True
else:
return False
def position_at_fraction(self, fraction):
return self.fraction_to_position(
fraction, self._start_position, self._stop_position)
def try_split(self, position):
with self._lock:
if ((self._stop_position is not None and position >= self._stop_position)
or (self._start_position is not None
and position <= self._start_position)):
raise ValueError("Split at '%s' not in range %s" % (
position, [self._start_position, self._stop_position]))
if self._last_claim is self.UNSTARTED or self._last_claim < position:
fraction = self.position_to_fraction(
position, start=self._start_position, end=self._stop_position)
self._stop_position = position
return position, fraction
else:
return None
def fraction_consumed(self):
if self._last_claim is self.UNSTARTED:
return 0
else:
return self.position_to_fraction(
self._last_claim, self._start_position, self._stop_position)
def position_to_fraction(self, pos, start, end):
"""
Converts a position `pos` betweeen `start` and `end` (inclusive) to a
fraction between 0 and 1.
"""
raise NotImplementedError
def fraction_to_position(self, fraction, start, end):
"""
Converts a fraction between 0 and 1 to a position between start and end.
"""
raise NotImplementedError
class UnsplittableRangeTracker(iobase.RangeTracker):
"""A RangeTracker that always ignores split requests.
This can be used to make a given
:class:`~apache_beam.io.iobase.RangeTracker` object unsplittable by
ignoring all calls to :meth:`.try_split()`. All other calls will be delegated
to the given :class:`~apache_beam.io.iobase.RangeTracker`.
"""
def __init__(self, range_tracker):
"""Initializes UnsplittableRangeTracker.
Args:
range_tracker (~apache_beam.io.iobase.RangeTracker): a
:class:`~apache_beam.io.iobase.RangeTracker` to which all method
calls expect calls to :meth:`.try_split()` will be delegated.
"""
assert isinstance(range_tracker, iobase.RangeTracker)
self._range_tracker = range_tracker
def start_position(self):
return self._range_tracker.start_position()
def stop_position(self):
return self._range_tracker.stop_position()
def position_at_fraction(self, fraction):
return self._range_tracker.position_at_fraction(fraction)
def try_claim(self, position):
return self._range_tracker.try_claim(position)
def try_split(self, position):
return None
def set_current_position(self, position):
self._range_tracker.set_current_position(position)
def fraction_consumed(self):
return self._range_tracker.fraction_consumed()
def split_points(self):
# An unsplittable range only contains a single split point.
return (0, 1)
def set_split_points_unclaimed_callback(self, callback):
self._range_tracker.set_split_points_unclaimed_callback(callback)
class LexicographicKeyRangeTracker(OrderedPositionRangeTracker):
"""
A range tracker that tracks progress through a lexicographically
ordered keyspace of strings.
"""
@classmethod
def fraction_to_position(cls, fraction, start=None, end=None):
"""
Linearly interpolates a key that is lexicographically
fraction of the way between start and end.
"""
assert 0 <= fraction <= 1, fraction
if start is None:
start = ''
if fraction == 1:
return end
elif fraction == 0:
return start
else:
if not end:
common_prefix_len = len(start) - len(start.lstrip('\xFF'))
else:
for ix, (s, e) in enumerate(zip(start, end)):
if s != e:
common_prefix_len = ix
break
else:
common_prefix_len = min(len(start), len(end))
# Convert the relative precision of fraction (~53 bits) to an absolute
# precision needed to represent values between start and end distinctly.
prec = common_prefix_len + int(-math.log(fraction, 256)) + 7
istart = cls._string_to_int(start, prec)
iend = cls._string_to_int(end, prec) if end else 1 << (prec * 8)
ikey = istart + int((iend - istart) * fraction)
# Could be equal due to rounding.
# Adjust to ensure we never return the actual start and end
# unless fraction is exatly 0 or 1.
if ikey == istart:
ikey += 1
elif ikey == iend:
ikey -= 1
return cls._string_from_int(ikey, prec).rstrip('\0')
@classmethod
def position_to_fraction(cls, key, start=None, end=None):
"""
Returns the fraction of keys in the range [start, end) that
are less than the given key.
"""
if not key:
return 0
if start is None:
start = ''
prec = len(start) + 7
if key.startswith(start):
# Higher absolute precision needed for very small values of fixed
# relative position.
prec = max(prec, len(key) - len(key[len(start):].strip('\0')) + 7)
istart = cls._string_to_int(start, prec)
ikey = cls._string_to_int(key, prec)
iend = cls._string_to_int(end, prec) if end else 1 << (prec * 8)
return float(ikey - istart) / (iend - istart)
@staticmethod
def _string_to_int(s, prec):
"""
Returns int(256**prec * f) where f is the fraction
represented by interpreting '.' + s as a base-256
floating point number.
"""
if not s:
return 0
elif len(s) < prec:
s += '\0' * (prec - len(s))
else:
s = s[:prec]
return int(s.encode('hex'), 16)
@staticmethod
def _string_from_int(i, prec):
"""
Inverse of _string_to_int.
"""
h = '%x' % i
return ('0' * (2 * prec - len(h)) + h).decode('hex')
| tgroh/incubator-beam | sdks/python/apache_beam/io/range_trackers.py | Python | apache-2.0 | 14,379 | 0.00918 |
from django.utils.http import url_has_allowed_host_and_scheme
def get_valid_next_url_from_request(request):
next_url = request.POST.get("next") or request.GET.get("next")
if not next_url or not url_has_allowed_host_and_scheme(
url=next_url, allowed_hosts={request.get_host()}
):
return ""
return next_url
| wagtail/wagtail | wagtail/admin/views/pages/utils.py | Python | bsd-3-clause | 339 | 0 |
from bisect import bisect_left
class Solution(object):
def kEmptySlots(self, flowers, k):
"""
:type flowers: List[int]
:type k: int
:rtype: int
"""
S = []
for ithday, n in enumerate(flowers):
idx = bisect_left(S, n)
if idx > 0 and n - S[idx-1] == k+1:
return ithday + 1
elif idx < len(S) and S[idx] - n == k+1:
return ithday + 1
S.insert(idx, n)
return -1
print Solution().kEmptySlots([1,3,2], 1)
print Solution().kEmptySlots([1,2,3], 1)
| xiaonanln/myleetcode-python | src/683. K Empty Slots.py | Python | apache-2.0 | 479 | 0.043841 |
# grid.py
#
# Copyright 2009 danc <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
import numpy as np
from meg import euclid
def cube(location, gridsize, spacing):
'''make 3d grid with given location of center, gridsize, and spacing
g = grid.doit(array([1,1,1]),2,1)
makes a grid (g) centered around location 1,1,1 of size 3, with a spacing of 1'''
gridtmp = np.ones([gridsize,gridsize,gridsize])
grid = spacing * np.squeeze(np.array([np.where(gridtmp)]))
z = np.tile(location,[np.size(grid,1),1])
gridind = (grid + z.T)
newgrid = gridind - np.array([np.mean(gridind,axis=1)]).T + gridind
gridfinal = newgrid.T - (gridind.T-location)
return gridfinal.T#,gridind#, gridtmp,np.array([np.mean(gridtmp,axis=1)]).T,test#,z
def sphere(location, gridsize, spacing):#, radius):
'''make 3d sphere grid with given location of center, gridsize, spacing, and radius
g = grid.sphere(array([1,1,1]),12,.5)
makes a grid (g) centered around location 1,1,1 of size 12, with a spacing of 1'''
radius = (gridsize*spacing)/2.
cgrid = cube(location, gridsize, spacing)
print cgrid.shape, location
e = np.zeros(np.size(cgrid,1))
g = np.copy(e)
for i in range(0,np.size(cgrid,1)):
#e[:,i] = euclid.dist(location[0],cgrid[0][i],location[1],cgrid[1][i],location[2],cgrid[2][i])
e[i] = euclid.dist(location,cgrid[:,i])
#e = e*10
print 'diameter', e.max(), 'mm'
sgrid = cgrid[:,e < radius].reshape([3,np.size(cgrid[:,e < radius])/3])
#cgrid[e > radius].reshape([3,np.size(cgrid[e > radius])/3]) == 0
return sgrid#,cgrid
| badbytes/pymeg | meg/grid.py | Python | gpl-3.0 | 2,412 | 0.01534 |
# Maitre D'
# Demonstrates treating a value as a condition
print("Welcome to the Chateau D' Food")
print("It seems we are quite full this evening.\n")
money = int(input("How many dollars do you slip the Maitre D'"))
if money:
print("Ah, I am reminded of a table. Right this way.")
else:
print("Please, sit. It may be a while.")
input("\n\nPress the enter key to exit.")
| rob-nn/python | first_book/maitre_d.py | Python | gpl-2.0 | 377 | 0.007958 |
# -*- coding: utf-8 -*-
# Copyright © 2016 Cask Data, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""Simple, inelegant Sphinx extension which adds a directive for a
tabbed parsed-literals that may be switched between in HTML.
version: 0.4
The directive adds these parameters, both optional:
:languages: comma-separated list of pygments languages; default "console"
:tabs: comma-separated list of tabs; default "Linux,Windows"
:mapping: comma-separated list of linked-tabs; default "Linux,Windows"
:copyable: flag to indicate that all text can be "copied"
:single: flag to indicate that only one tab should be used, with no label (not yet implemented)
:independent: flag to indicate that this tab set does not link to another tabs
:dependent: name of tab set this tab belongs to; default "linux-windows"
Separate the code blocks with matching comment lines. Tabs must follow in order of :tabs:
option. Comment labels are for convenience, and don't need to match. Note example uses a
tab label with a space in it, and is enclosed in quotes. Note that the comma-separated
lists must not have spaces in them (outside of quotes); ie, use "java,scala", not
"java, scala".
The mapping maps a tab that is displayed to the trigger that will display it.
For example, you could have a set of tabs:
:tabs: "Mac OS X",Windows
:mapping: linux,windows
:dependent: linux-windows
Clicking on a "Linux" tab in another tab-set would activate the "Mac OS X" tab in this tab set.
The mappings can not use special characters. If a tab uses a special character, a mapping is required.
An error is raised, as it cannot be resolved using the defaults.
Note that slightly different rule operate for replacements: a replacement such as
"\|replace|" will work, and the backslash will be interpreted as a single backslash rather
than as escaping the "|".
If there is only one tab, the node is set to "independent" automatically, as there is
nothing to switch. If :languages: is not supplied for the single tab, "shell-session" is
used.
Lines that begin with "$", "#", ">", ">", "cdap >", "cdap >" are treated as command
lines and the text following is auto-selected for copying on mouse-over. (On Safari,
command-V is still required for copying; other browser support click-copying to the
clipboard.)
FIXME: Implement the ":single:" flag.
Examples:
.. tabbed-parsed-literal::
:languages: console,shell-session
:tabs: "Linux or OS/X",Windows
.. Linux
$ cdap-cli.sh start flow HelloWorld.WhoFlow
Successfully started flow 'WhoFlow' of application 'HelloWorld' with stored runtime arguments '{}'
$ curl -o /etc/yum.repos.d/cask.repo http://repository.cask.co/centos/6/x86_64/cdap/|short-version|/cask.repo
.. Windows
> cdap-cli.bat start flow HelloWorld.WhoFlow
Successfully started flow 'WhoFlow' of application 'HelloWorld' with stored runtime arguments '{}'
> <CDAP-SDK-HOME>\libexec\bin\curl.exe -d c:\|release| -X POST 'http://repository.cask.co/centos/6/x86_64/cdap/|short-version|/cask.repo'
If you pass a single set of commands, without comments, the directive will create a
two-tabbed "Linux" and "Windows" with a generated Windows-equivalent command set. Check
the results carefully, and file an issue if it is unable to create the correct command.
Worst-case: you have to use the full format and enter the two commands. Note that any JSON
strings in the commands must be on a single line to convert successfully.
.. tabbed-parsed-literal::
$ cdap-cli.sh start flow HelloWorld.WhoFlow
Successfully started flow 'WhoFlow' of application 'HelloWorld' with stored runtime arguments '{}'
$ curl -o /etc/yum.repos.d/cask.repo http://repository.cask.co/centos/6/x86_64/cdap/|short-version|/cask.repo
.. tabbed-parsed-literal::
:copyable:
:single:
SELECT * FROM dataset_uniquevisitcount ORDER BY value DESC LIMIT 5
Tab sets are either independent or dependent. Independent tabs do not participate in page or site tab setting.
In other words, clicking on a tab does not change any other tabs. Dependent tabs do. Clicking on the "Linux"
tab will change all other tabs to "Linux". You may need to include a mapping listing the relationship, such as this:
.. tabbed-parsed-literal::
:tabs: Linux,Windows,"Distributed CDAP"
:mapping: Linux,Windows,Linux
:languages: console,shell-session,console
...
This maps the tab "Distributed CDAP" to the other "Linux" tabs on the site. Clicking that
tab would change other tabs to the "linux" tab. (Changing to "linux" from another tab will
cause the first "linux" tab to be selected.)
JavaScript and design of tabs was taken from the Apache Spark Project:
http://spark.apache.org/examples.html
"""
from docutils import nodes
from docutils.parsers.rst import directives
from docutils.parsers.rst.directives.body import ParsedLiteral
from docutils.parsers.rst.roles import set_classes
DEFAULT_LANGUAGES = ['console', 'shell-session']
DEFAULT_TABS = ['linux', 'windows']
DEFAULT_TAB_LABELS = ['Linux', 'Windows']
DEFAULT_TAB_SET = 'linux-windows'
TPL_COUNTER = 0
# Sets the handlers for the tabs used by a particular instance of tabbed parsed literal
# Note doubled {{ to pass them through formatting
DEPENDENT_JS_TPL = """\
<script type="text/javascript">
$(function {div_name}() {{
var tabs = {tab_links};
var mapping = {mapping};
var tabSetID = {tabSetID};
for (var i = 0; i < tabs.length; i++) {{
var tab = tabs[i];
$("#{div_name} .example-tab-" + tab).click(changeExampleTab(tab, mapping, "{div_name}", tabSetID));
}}
}});
</script>
"""
# Note doubled {{ to pass them through formatting
INDEPENDENT_JS_TPL = """\
<script type="text/javascript">
function change_{div_name}_ExampleTab(tab) {{
return function(e) {{
e.preventDefault();
var scrollOffset = $(this).offset().top - $(document).scrollTop();
$("#{div_name} .tab-pane").removeClass("active");
$("#{div_name} .tab-pane-" + tab).addClass("active");
$("#{div_name} .example-tab").removeClass("active");
$("#{div_name} .example-tab-" + tab).addClass("active");
$(document).scrollTop($(this).offset().top - scrollOffset);
}}
}}
$(function() {{
var tabs = {tab_links};
for (var i = 0; i < tabs.length; i++) {{
var tab = tabs[i];
$("#{div_name} .example-tab-" + tab).click(change_{div_name}_ExampleTab(tab));
}}
}});
</script>
"""
DIV_START = """
<div id="{div_name}" class="{class}">
"""
NAV_TABS = """
<ul class="nav nav-tabs">
%s</ul>
"""
NAV_TABS_ENTRY = """\
<li class="example-tab example-tab-{tab_link} {active}"><a href="#">{tab_name}</a></li>
"""
TAB_CONTENT_START = """\
<div class="tab-contents">
"""
DIV_END = """
</div>
"""
TAB_CONTENT_ENTRY_START = """\
<div class="tab-pane tab-pane-{tab_link} {active}">
<div class="code code-tab">
"""
DIV_DIV_END = """
</div>
</div>
"""
def dequote(text):
"""
If text has single or double quotes around it, remove them.
Make sure the pair of quotes match.
If a matching pair of quotes is not found, return the text unchanged.
"""
if (text[0] == text[-1]) and text.startswith(("'", '"')):
return text[1:-1]
return text
def clean_alphanumeric(text):
"""
If text has any non-alphanumeric characters, replace them with a hyphen.
"""
text_clean = ''
for charc in text:
text_clean += charc if charc.isalnum() else '-'
return text_clean
def convert(c, state={}):
"""
Converts a Linux command to a Windows-equivalent following a few simple rules:
- Converts a starting '$' to '>'
- Forward-slashes in 'http[s]' and 'localhost' URIs are preserved
- Other forward-slashes become backslashes
- A lone backslash (the Linux line continuation character) becomes a '^'
- '.sh' commands become '.bat' commands
- removes a "-w'\n'" option from curl commands
- In curl commands, a JSON string (beginning with "-d '{") is converted to all
internal double quotes are escaped and the entire string surrounded in double quotes
- state option allows one line to pass state to the next line to be converted
"""
DEBUG = False
# DEBUG = True
w = []
leading_whitespace = ' ' * (len(c) - len(c.lstrip()))
text_list = c.split()
CLI = 'cdap-cli.sh'
CURL = 'curl'
DATA_OPTIONS = ['-d', '--data', '--data-ascii']
HEADER_OPTIONS = ['-H', '--header']
TRAILING_OPTIONS = ["-w'\\n'", '-w"\\n"']
# Local states
IN_CLI = False
IN_CURL = False
IN_CURL_DATA = False
IN_CURL_DATA_JSON = False
IN_CURL_HEADER = False
IN_CURL_HEADER_ARTIFACT = False
STATE_KEYS = ['IN_CLI', 'IN_CURL', 'IN_CURL_DATA', 'IN_CURL_DATA_JSON', 'IN_CURL_HEADER', 'IN_CURL_HEADER_ARTIFACT']
JSON_OPEN_CLOSE = {
"open_array":"'[",
"open_array_win": "\"[",
"open_object":"'{",
"open_object_win": "\"{",
"open-artifact": "'Artifact-",
"close_array": "]'",
"close_array_win": "]\"",
"close_object": "}'",
"close_object_win": "}\"",
}
# Passed state
for s in STATE_KEYS:
if not state.has_key(s):
state[s] = False
if DEBUG: print "\nconverting: %s\nreceived state: %s" % (c, state)
for i, v in enumerate(text_list):
if DEBUG: print "v:%s" % v # v is the parsed snippet, split on spaces
if v == CLI or state['IN_CLI']:
IN_CLI = True
state['IN_CLI'] = True
if v == CURL or state['IN_CURL']:
IN_CURL = True
state['IN_CURL'] = True
if state['IN_CURL_DATA']:
IN_CURL_DATA = True
if state['IN_CURL_DATA_JSON']:
IN_CURL_DATA_JSON = True
if state['IN_CURL_HEADER']:
IN_CURL_HEADER = True
if state['IN_CURL_HEADER_ARTIFACT']:
IN_CURL_HEADER_ARTIFACT = True
if i == 0 and v == '$':
w.append('>')
for s in STATE_KEYS:
state[s] = False
if DEBUG: print "w.append('>')"
continue
if v.endswith('.sh'):
v = v.replace('.sh', '.bat')
if DEBUG: print "v.replace('.sh', '.bat')"
if v == '\\':
w.append('^')
if IN_CLI:
state['IN_CLI'] = True
if IN_CURL:
state['IN_CURL'] = True
if DEBUG: print "w.append('^')"
continue
if IN_CURL and (v in TRAILING_OPTIONS):
if DEBUG: print "IN_CURL and TRAILING_OPTIONS"
continue
if IN_CURL and (v in DATA_OPTIONS):
if DEBUG: print "IN_CURL and DATA_OPTIONS"
IN_CURL_DATA = True
state['IN_CURL_DATA'] = True
w.append(v)
continue
if IN_CURL and (v in HEADER_OPTIONS):
if DEBUG: print "IN_CURL and HEADER_OPTIONS"
IN_CURL_HEADER = True
state['IN_CURL_HEADER'] = True
w.append(v)
continue
if IN_CURL and IN_CURL_DATA:
if DEBUG: print "IN_CURL and IN_CURL_DATA"
if DEBUG: print "IN_CURL_DATA_JSON: %s" % IN_CURL_DATA_JSON
state['IN_CURL'] = True
if v.startswith(JSON_OPEN_CLOSE["open_array"]) or v.startswith(JSON_OPEN_CLOSE["open_object"]):
if DEBUG: print "Start of json"
IN_CURL_DATA_JSON = True
state['IN_CURL_DATA_JSON'] = True
w.append("\"%s" % v.replace('"', '\\"')[1:])
elif v.endswith(JSON_OPEN_CLOSE["close_array"]) or v.endswith(JSON_OPEN_CLOSE["close_object"]):
if DEBUG: print "End of json"
w.append("%s\"" % v.replace('"', '\\"')[:-1])
IN_CURL_DATA = False
state['IN_CURL_DATA'] = False
IN_CURL_DATA_JSON = False
state['IN_CURL_DATA_JSON'] = False
elif IN_CURL_DATA_JSON:
if DEBUG: print "json..."
w.append(v.replace('"', '\\"'))
else:
if DEBUG: print "data..."
w.append(v)
continue
if IN_CURL and IN_CURL_HEADER:
if DEBUG: print "IN_CURL and IN_CURL_HEADER"
state['IN_CURL'] = True
if v.startswith(JSON_OPEN_CLOSE["open-artifact"]):
if DEBUG: print "Start of json"
IN_CURL_HEADER_ARTIFACT = True
# Don't pass this state, as we aren't tracking where the end is, and assume it is at end-of-line
# To track the end, we would need to push and pop opening and closing quotes...
# state['IN_CURL_HEADER_ARTIFACT'] = True
w.append("\"%s" % v.replace('"', '\\"')[1:])
continue
elif IN_CURL_HEADER_ARTIFACT:
if DEBUG: print "json...escaping double-quotes and replacing single-quotes"
w.append(v.replace('"', '\\"').replace("'", '"'))
else:
# Currently, won't reach this, as once IN_CURL_HEADER_ARTIFACT we never leave until end-of-line
if DEBUG: print "data..."
w.append(v)
continue
if (IN_CLI or IN_CURL) and v.startswith('"'):
if DEBUG: print "v.startswith('\"')"
w.append(v)
continue
if v.find('/') != -1:
if DEBUG: print "found slash: IN_CLI: %s v: %s" % (IN_CLI, v)
if (v.startswith('localhost') or v.startswith('"localhost') or v.startswith('"http:')
or v.startswith('"https:') or v.startswith('http:') or v.startswith('https:')):
if DEBUG: print "v.startswith..."
w.append(v)
continue
if IN_CLI:
if i > 0 and text_list[i-1] in ['body:file', 'artifact']:
if DEBUG: print "IN_CLI and path"
else:
w.append(v)
continue
w.append(v.replace('/', '\\'))
else:
if DEBUG: print "didn't find slash"
w.append(v)
if DEBUG: print "converted to: %s\npassing state: %s" % (leading_whitespace + ' '.join(w), state)
return leading_whitespace + ' '.join(w), state
class TabbedParsedLiteralNode(nodes.literal_block):
"""TabbedParsedLiteralNode is an extended literal_block that supports replacements."""
def cleanup(self):
for i, v in enumerate(self.traverse()):
if isinstance(v, nodes.Text):
t = v.astext()
if t.endswith('.\ ') or t.endswith('=\ '):
t = t[:-2]
if t.find('\`') != -1:
t = t.replace('\`', '`')
if t != v.astext():
self.replace(v, nodes.Text(t))
class TabbedParsedLiteral(ParsedLiteral):
"""TabbedParsedLiteral is a set of different blocks"""
option_spec = dict(dependent=directives.unchanged_required,
independent=directives.flag,
languages=directives.unchanged_required,
mapping=directives.unchanged_required,
tabs=directives.unchanged_required,
copyable=directives.flag,
single=directives.flag,
**ParsedLiteral.option_spec)
has_content = True
def cleanup_content(self):
"""Parses content, looks for comment markers, removes them, prepares backslashes.
Calculates size for each block.
"""
content = self.content
text_block = '\n'.join(content)
if not text_block.startswith('.. ') or text_block.index('\n.. ') == -1:
# There are no comments... generating a Windows-equivalent code
LINUX = ['.. Linux', '']
WINDOWS = ['', '.. Windows', '']
old_content = []
new_content = []
state = {}
for line in self.content:
old_content.append(line)
new_line, state = convert(line, state)
new_content.append(new_line)
content = LINUX + old_content + WINDOWS + new_content
# print "old_content:\n%s\n" % ('\n'.join(old_content))
# print "new_content:\n%s\n" % ('\n'.join(new_content))
line_sets = []
line_set = []
for line in content:
if line.startswith('.. '):
if line_set:
line_sets.append(line_set)
line_set = []
else:
line_set.append(line)
line_sets.append(line_set)
line_counts = []
lines = []
for line_set in line_sets:
block = '\n'.join(line_set).rstrip()
block = block.replace('\\', '\\\\')
block = block.replace('\\|', '\\\ |')
block = block.replace('*', '\*')
block = block.replace(' |-', ' \|-')
block = block.replace('\n|-', '\n\|-')
block = block.replace(' |+', ' \|+')
block = block.replace('\n|+', '\n\|+')
if not block.endswith('\n'):
block += '\n'
lines.append(block)
line_counts.append(block.count('\n') +1)
return line_counts, lines
def cleanup_option(self, option, default, aphanumeric_only=False):
"""Removes leading or trailing quotes or double-quotes from a string option."""
_option = self.options.get(option,'')
if not _option:
return default
else:
return clean_alphanumeric(dequote(_option)) if aphanumeric_only else dequote(_option)
def cleanup_options(self, option, default, aphanumeric_only=False, lower=False):
"""
Removes leading or trailing quotes or double-quotes from a string option list.
Removes non-aphanumeric characters if aphanumeric_only true.
Converts from Unicode to string
"""
_option = self.options.get(option,'')
if not _option:
return default
else:
_options = []
for s in _option.split(","):
s = dequote(s)
s = clean_alphanumeric(s) if aphanumeric_only else s
s = s.lower() if lower else s
_options.append(str(s))
return _options
def run(self):
set_classes(self.options)
self.assert_has_content()
line_counts, lines = self.cleanup_content()
text = '\n'.join(lines)
# Sending text to state machine for inline text replacement
text_nodes, messages = self.state.inline_text(text, self.lineno)
# Debugging Code start
# if messages:
# print "text:\n%s" % text
# print "text_nodes:\n%s" % text_nodes
# for n in text_nodes:
# print "n:\n%s" % n
# print 'messages:'
# for m in messages:
# print m
# Debugging Code end
node = TabbedParsedLiteralNode(text, '', *text_nodes, **self.options)
node.cleanup()
node.line = self.content_offset + 1
self.add_name(node)
node['copyable'] = self.options.has_key('copyable')
node['independent'] = self.options.has_key('independent')
node['languages'] = self.cleanup_options('languages', DEFAULT_LANGUAGES)
node['line_counts'] = line_counts
node['linenos'] = self.cleanup_options('linenos', '')
node['single'] = self.options.has_key('single')
node['tab_labels'] = self.cleanup_options('tabs', DEFAULT_TAB_LABELS)
node['tabs'] = self.cleanup_options('tabs', DEFAULT_TABS, aphanumeric_only=True, lower=True)
tab_count = len(node['tabs'])
if tab_count == 1:
# If only one tab, force to be independent
node['independent'] = True
# If languages were not supplied, make it a shell-session
if not self.options.has_key('languages'):
node['languages'] = [DEFAULT_LANGUAGES[1]]
if tab_count != len(node['languages']):
print "Warning: tabs (%s) don't match languages (%s)" % (node['tabs'], node['languages'])
node['languages'] = [DEFAULT_LANGUAGES[0]] * tab_count
if not node['independent']:
node['dependent'] = self.cleanup_option('dependent', DEFAULT_TAB_SET)
node['mapping'] = self.cleanup_options('mapping', node['tabs'], aphanumeric_only=True, lower=True)
if tab_count != len(node['mapping']):
print "Warning: tabs (%s) don't match mapping (%s)" % (node['tabs'], node['mapping'])
if tab_count > 1:
node['mapping'] = DEFAULT_TABS + [DEFAULT_TABS[0]] * (tab_count -2)
else:
node['mapping'] = [DEFAULT_TABS[0]] * tab_count
return [node] + messages
def visit_tpl_html(self, node):
"""Visit a Tabbed Parsed Literal node"""
global TPL_COUNTER
TPL_COUNTER += 1
def _highlighter(node, text, lang='console'):
linenos = text.count('\n') >= \
self.highlightlinenothreshold - 1
highlight_args = node.get('highlight_args', {})
if lang:
# code-block directives
highlight_args['force'] = True
if 'linenos' in node:
linenos = node['linenos']
if lang is self.highlightlang_base:
# only pass highlighter options for original language
opts = self.highlightopts
else:
opts = {}
def warner(msg):
self.builder.warn(msg, (self.builder.current_docname, node.line))
highlighted = self.highlighter.highlight_block(
text, lang, opts=opts, warn=warner, linenos=linenos,
**highlight_args)
copyable = node.get('copyable')
new_highlighted = ['','<!-- tabbed-parsed-literal start -->',]
if lang in ['console', 'shell-session', 'ps1', 'powershell']:
# print "highlighted (before):\n%s" % highlighted
# Console-specific highlighting
new_highlighted = ['','<!-- tabbed-parsed-literal start -->',]
continuing_line = False # Indicates current line continues to next
continued_line = False # Indicates current line was continued from previous
copyable_text = False # Indicates that the line (or the previous) now has copyable text in it
for l in highlighted.splitlines():
if copyable:
t = "<pre>"
i = l.find(t)
if i != -1:
l = "%s<pre class=\"copyable\"><span class=\"copyable-text\">%s" % (l[:i], l[len(t)+i:])
t = "</pre>"
i = l.find(t)
if i != -1:
l = "%s</span></pre>%s" % (l[:i], l[len(t)+i:])
else:
continuing_line = False
if l:
continuing_line = l.endswith('\\</span>') or l.endswith('^</span>')
# print "continuing_line: %s continued_line: %s l: %s" % (continuing_line, continued_line, l)
for p in ['$', '#', '>', '>', 'cdap >', 'cdap >']:
if l.startswith(p):
l = "<span class=\"gp\">%s</span><span class=\"copyable copyable-text\">%s" % (p, l[1:])
copyable_text = True
break
t = "<pre>%s " % p
i = l.find(t)
if i != -1:
l = "%s<pre class=\"copyable\"><span class=\"gp\">%s </span><span \"copyable-text\">%s" % (l[:i], p, l[len(t)+i:])
copyable_text = True
break
t = "<pre><span class=\"go\">%s " % p
i = l.find(t)
if i != -1:
l = "%s<pre class=\"copyable\"><span class=\"gp\">%s </span><span class=\"copyable-text\"><span class=\"go\">%s" % (l[:i], p, l[len(t)+i:])
copyable_text = True
break
t = "<pre><span class=\"gp\">%s</span> " % p
i = l.find(t)
if i != -1:
l = "%s<pre class=\"copyable\"><span class=\"gp\">%s </span><span class=\"copyable-text\">%s" % (l[:i], p, l[len(t)+i:])
copyable_text = True
break
t = "<span class=\"go\">%s " % p
if l.startswith(t):
if continued_line:
l = "<span class=\"gp\">%s </span><span class=\"go\">%s" % (p, l[len(t):])
else:
l = "<span class=\"gp\">%s </span><span class=\"copyable-text\"><span class=\"go\">%s" % (p, l[len(t):])
copyable_text = True
break
t = "<span class=\"gp\">%s</span> " % p
if l.startswith(t):
if continued_line:
l = "<span class=\"gp\">%s </span>%s" % (p, l[len(t):])
else:
l = "<span class=\"gp\">%s </span><span class=\"copyable-text\">%s" % (p, l[len(t):])
copyable_text = True
break
# print "continuing_line: %s continued_line: %s copyable_text: %s l: %s" % (continuing_line, continued_line, copyable_text, l)
if (continued_line and (not continuing_line)) or (not continued_line and not continuing_line and copyable_text):
# print "continued_line: %s continuing_line: %s copyable_text: %s" % (continued_line, continuing_line, copyable_text)
# End the copyable-text
l += "</span>"
copyable_text = False
new_highlighted.append(l)
# Set next line status
continued_line = continuing_line
else:
new_highlighted += highlighted.splitlines()
new_highlighted.append('<!-- tabbed-parsed-literal end -->')
# print "\nhighlighted (after):\n%s\n\n" % '\n'.join(new_highlighted)
return '\n'.join(new_highlighted)
nav_tabs_html = ''
tab_content_html = ''
languages = node.get('languages')
line_counts = node.get('line_counts')
tabs = node.get('tabs')
tab_labels = node.get('tab_labels')
node_mapping = node.get('mapping')
dependent = node.get('dependent')
clean_tab_links = []
mapping = {}
i = 0
if node_mapping:
for m in node_mapping:
if m in clean_tab_links:
i += 1
m = "%s%d" % (m, i)
clean_tab_links.append(m)
for i in range(len(clean_tab_links)):
mapping[clean_tab_links[i]] = node_mapping[i]
else:
# Independent tabs use the tab for the link
clean_tab_links = tabs
div_name = 'tabbedparsedliteral{0}'.format(TPL_COUNTER)
fill_div_options = {'div_name': div_name}
if node.get('independent'):
# Independent node, doesn't participate in clicks with other nodes and has no mapping
fill_div_options['class'] = 'independent'
js_options = {'tab_links':clean_tab_links, 'div_name':div_name}
js_tpl = INDEPENDENT_JS_TPL
else:
# Dependent node
fill_div_options['class'] = "dependent-%s" % dependent
js_options = {'tab_links':clean_tab_links,
'mapping':repr(mapping),
'div_name':div_name,
'tabSetID':repr(dependent),
}
js_tpl = DEPENDENT_JS_TPL
start_html = js_tpl.format(**js_options) + DIV_START.format(**fill_div_options)
text_list = node.astext().split('\n')
offset = 0
for index in range(len(tabs)):
lang, lines = languages[index], line_counts[index]
tab_name, tab_link = tab_labels[index], clean_tab_links[index]
start_tag = self.starttag(node, 'div', suffix='', CLASS='highlight-%s' % lang)
tab_text = text_list[offset:offset + lines]
offset += lines
# Strip any leading empty lines
text = ''
for line in tab_text:
if not line and not text:
continue
elif not text:
text = line
else:
text += '\n' + line
highlighted = _highlighter(node, text, lang)
tab_options = {'active': 'active' if not index else '',
'tab_link': tab_link,
'tab_name': tab_name,}
nav_tabs_html += NAV_TABS_ENTRY.format(**tab_options)
tab_entry_start = TAB_CONTENT_ENTRY_START.format(**tab_options)
tab_content_html += tab_entry_start + start_tag + highlighted + DIV_END + DIV_DIV_END
nav_tabs_html = NAV_TABS % nav_tabs_html
tab_content_html = TAB_CONTENT_START + tab_content_html + DIV_END
self.body.append(start_html + nav_tabs_html + tab_content_html + DIV_END)
raise nodes.SkipNode
def depart_tpl_html(self, node):
"""Depart a Tabbed Parsed Literal node"""
# Stub because of SkipNode in visit
def setup(app):
app.add_directive('tabbed-parsed-literal', TabbedParsedLiteral)
app.add_node(TabbedParsedLiteralNode, html=(visit_tpl_html, depart_tpl_html))
| caskdata/cdap | cdap-docs/_common/tabbed-parsed-literal.py | Python | apache-2.0 | 30,649 | 0.00757 |
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "simulation"
PROJECT_SPACE_DIR = "/home/stagsted/UAST/Module6/devel"
PROJECT_VERSION = "0.0.0"
| TobiasLundby/UAST | Module6/build/simulation/catkin_generated/pkg.develspace.context.pc.py | Python | bsd-3-clause | 377 | 0 |
#
# Copyright (C) 2011 Red Hat, Inc.
#
# Author: Angus Salkeld <[email protected]>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
#
import os
import time
import libxml2
import exceptions
import uuid
import subprocess
import shutil
from pwd import getpwnam
from nova import flags
from nova import log
from nova import exception
from nova import utils
from nova.auth import manager
from pcloudsh import pcmkconfig
from pcloudsh import deployable
from pcloudsh import assembly
from pcloudsh import assembly_factory
FLAGS = flags.FLAGS
FLAGS.logging_context_format_string = ' %(levelname)s %(message)s'
FLAGS.logging_default_format_string = ' %(levelname)s %(message)s'
FLAGS.logging_debug_format_suffix = ' [%(filename)s:%(lineno)d]'
log.setup()
class OpenstackDeployable(deployable.Deployable):
def __init__(self, factory, name, username):
self.infrastructure = 'openstack'
self.username = username
deployable.Deployable.__init__(self, factory, name)
# TODO flagfile
FLAGS.state_path = '/var/lib/nova'
FLAGS.lock_path = '/var/lib/nova/tmp'
FLAGS.credentials_template = '/usr/share/nova/novarc.template'
FLAGS.sql_connection = 'mysql://nova:nova@localhost/nova'
self.conf.load_novarc(name)
def create(self):
nova_manager = manager.AuthManager()
uid = 0
gid = 0
try:
user_info = getpwnam(self.username)
uid = user_info[2]
gid = user_info[3]
except KeyError as ex:
print ex
return False
proj_exists = True
try:
projs = nova_manager.get_projects(self.username)
if not self.name in projs:
proj_exists = False
except:
proj_exists = False
try:
if not proj_exists:
nova_manager.create_project(self.name, self.username,
'Project %s created by pcloudsh' % (self.name))
except (exception.UserNotFound, exception.ProjectExists) as ex:
print ex
return False
os.mkdir(os.path.join(self.conf.dbdir, self.name))
zipfilename = os.path.join(self.conf.dbdir, self.name, 'nova.zip')
try:
zip_data = nova_manager.get_credentials(self.username, self.name)
with open(zipfilename, 'w') as f:
f.write(zip_data)
except (exception.UserNotFound, exception.ProjectNotFound) as ex:
print ex
return False
except db.api.NoMoreNetworks:
print ('*** No more networks available. If this is a new '
'installation, you need\nto call something like this:\n\n'
' nova-manage network create pvt 10.0.0.0/8 10 64\n\n')
return False
except exception.ProcessExecutionError, e:
print e
print ("*** The above error may show that the certificate db has "
"not been created.\nPlease create a database by running "
"a nova-api server on this host.")
return False
os.chmod(zipfilename, 0600)
os.chown(zipfilename, uid, gid)
novacreds = os.path.join(self.conf.dbdir, self.name, 'novacreds')
os.mkdir(novacreds)
os.system('unzip %s -d %s' % (zipfilename, novacreds))
os.system('ssh-keygen -f %s' % os.path.join(novacreds, 'nova_key'))
self.conf.load_novarc(self.name)
cwd = os.getcwd()
os.chdir(novacreds)
os.system('euca-add-keypair nova_key > nova_key.priv')
os.chdir(cwd)
for fn in os.listdir(novacreds):
if 'nova' in fn:
os.chown(os.path.join(novacreds, fn), uid, gid)
os.chmod(os.path.join(novacreds, fn), 0600)
return True
def delete(self):
nova_manager = manager.AuthManager()
if os.access(os.path.join(self.conf.dbdir, self.name), os.R_OK):
shutil.rmtree(os.path.join(self.conf.dbdir, self.name))
print ' deleted nova project key and environment'
try:
nova_manager.delete_project(self.name)
print ' deleted nova project'
except exception.ProjectNotFound as ex:
print ex
| sdake/pacemaker-cloud | src/pcloudsh/openstack_deployable.py | Python | gpl-2.0 | 4,878 | 0.001845 |
from .fake import UserAgent # noqa
| FireBladeNooT/Medusa_1_6 | lib/fake_useragent/__init__.py | Python | gpl-3.0 | 36 | 0 |
#_*_coding: utf-8 _*_
#__author__ = 'Alihanniba'
import urllib.request
# from urllib.request import urlopen
import urllib.error
import re
import os
import taobaotool
import time
class Spider:
def __init__(self):
self.siteUrl = 'http://mm.taobao.com/json/request_top_list.htm'
self.tool = taobaotool.Tool()
def getPage(self, pageIndex):
url = self.siteUrl + '?page=' + str(pageIndex)
request = urllib.request.Request(url)
response = urllib.request.urlopen(request)
content = response.read().decode('gbk')
return content
def getContents(self, pageIndex):
page = self.getPage(pageIndex)
pattern = re.compile('<div class="list-item".*?pic-word.*?<a href="(.*?)".*?<img src="(.*?)".*?<a class="lady-name.*?>(.*?)</a>.*?<strong>(.*?)</strong>.*?<span>(.*?)</span>',re.S)
items = re.findall(pattern, str(page))
contents = []
for item in items:
contents.append([item[0], item[1], item[2], item[3], item[4]])
print(item[0], item[1], item[2], item[3], item[4])
return contents
def getDetailPage(self, infoURL):
response = urllib.request.urlopen(infoURL)
return response.read().decode('gbk')
def getBrief(self, page):
pattern = re.compile('<div class="mm-aixiu-content".*?>(.*?)<!--',re.S)
result = re.search(pattern, str(page))
return self.tool.replace(result.group(1))
def getAllImg(self, page):
pattern = re.compile('<div class="mm-aixiu-content".*?>(.*?)<!--',re.S)
content = re.search(pattern, str(page))
patternImg = re.compile('<img.*?src="(.*?)"',re.S)
images = re.findall(patternImg, str(content.group(1)))
return images
def saveImgs(self, images, name):
number = 1
print(u'发现', name, u'共有', len(images), u'张图片')
for imageURL in images:
splitPage = imageURL.split('.')
fTail = splitPage.pop()
if len(fTail) > 3:
fTail = 'jpg'
fileName = name + '/' + str(number) + '.' + fTail
self.saveImg(imageURL, fileName)
number += 1
def saveImg(self, imgUrl, fileName):
u = urllib.request.urlopen(imgUrl)
data = u.read()
f = open(fileName, 'wb')
f.write(data)
print('正在保存图片为', fileName)
f.close()
def saveIcon(self, iconURL, name):
splitPath = iconURL.split('.')
fTail = splitPath.pop()
fileName = name + '/icon.' + fTail
self.saveImg(iconURL, fileName)
def saveBrief(self, content, name):
fileName = name + '/' + name + '.txt'
f = open(fileName, 'w+')
print(u"正在保存信息为", fileName)
f.write(content.encode('UTF-8'))
def mkdir(self, path):
path = path.strip()
isExists = os.exists(path)
if not isExists:
os.makedirs(path)
return True
else:
return False
def savePageInfo(self, pageIndex):
contents = self.getContents(pageIndex)
for item in contents:
detailURL = item[0]
detailPage = self.getDetailPage(detailURL)
brief = self.getBrief(detailPage)
images = self.getAllImg(detailPage)
self.mkdir(item[2])
self.saveBrief(brief, item[2])
self.saveIcon(item[1], item[2])
self.saveImgs(images, item[2])
def savePagesInfo(self, start, end):
for i in range(start, end + 1):
self.savePageInfo(i)
spider = Spider()
spider.savePagesInfo(2, 10) | alihanniba/tornado-awesome | scrapy/taobaomm.py | Python | apache-2.0 | 3,677 | 0.004402 |
from itertools import *
for i, s in zip(count(), repeat('over-and-over', 5)):
print(i, s)
| jasonwee/asus-rt-n14uhp-mrtg | src/lesson_algorithms/itertools_repeat_zip.py | Python | apache-2.0 | 96 | 0.010417 |
from typing import Dict, List
import numpy as np
import hybrid_model
from evaluation import evaluation_metrics
from evaluation import evaluation_parting
metrics_rmse = {'rmse': evaluation_metrics.Rmse()}
metrics_rmse_prec = {'rmse': evaluation_metrics.Rmse(),
'prec@5': evaluation_metrics.Precision(5)}
metrics_all = {'rmse': evaluation_metrics.Rmse(),
'mae': evaluation_metrics.Mae(),
'prec@5': evaluation_metrics.Precision(5),
'ndcg@5': evaluation_metrics.Ndcg(5)}
parting_full = {'full': evaluation_parting.Full()}
def get_parting_all(n_bins):
parting = {'full': evaluation_parting.Full()}
parting.update({'user_{}'.format(i+1):
evaluation_parting.BinningUser(n_bins, i) for i in range(n_bins)})
parting.update({'item_{}'.format(i+1):
evaluation_parting.BinningItem(n_bins, i) for i in range(n_bins)})
return parting
class Evaluation:
def __init__(self,
metrics: Dict[str, evaluation_metrics.Metric] = metrics_rmse_prec,
parts: Dict[str, evaluation_parting.Parting] = parting_full):
self.metrics = metrics
self.parts = parts
def evaluate_hybrid(self, model: 'hybrid_model.hybrid.HybridModel', x_test: List[np.ndarray], y_test: np.ndarray) \
-> 'EvaluationResultHybrid':
result = EvaluationResultHybrid()
result.cf = self.evaluate(model.model_cf, x_test, y_test)
result.md = self.evaluate(model.model_md, x_test, y_test)
return result
def evaluate(self, model: 'hybrid_model.models.AbstractModel', x_test: List[np.ndarray], y_test: np.ndarray) \
-> 'EvaluationResult':
result = EvaluationResult()
for part, parting in self.parts.items():
x_test_part, y_test_part = parting.part(x_test, y_test)
result_part = self.evaluate_part(model, x_test_part, y_test_part)
result.parts[part] = result_part
return result
def evaluate_part(self, model: 'hybrid_model.models.AbstractModel', x_test: List[np.ndarray], y_test: np.ndarray) \
-> 'EvaluationResultPart':
result = EvaluationResultPart()
y_pred = model.predict(x_test)
for measure, metric in self.metrics.items():
result.results[measure] = metric.calculate(y_test, y_pred, x_test)
return result
def get_results_class(self):
return EvaluationResults(self.metrics, self.parts)
def get_results_hybrid_class(self):
return EvaluationResultsHybrid(self.metrics, self.parts)
def update_parts(self, user_dist, item_dist):
for part in self.parts.keys():
self.parts[part].update(user_dist, item_dist)
# === Single Evaluation Results
class EvaluationResultHybrid:
def __init__(self):
self.cf = EvaluationResult()
self.md = EvaluationResult()
def __str__(self):
s = 'CF:\n'
s += str(self.cf)
s += 'MD:\n'
s += str(self.md)
return s
class EvaluationResult:
def __init__(self):
self.parts: Dict[str, EvaluationResultPart] = {}
def __str__(self):
s = ''
for part, result in self.parts.items():
s += '=== Part {}\n'.format(part)
s += str(result)
s += '\n'
return s
def rmse(self):
return self.parts['full'].results['rmse']
class EvaluationResultPart:
def __init__(self):
self.results: Dict[str, float] = {}
def __str__(self):
s = ''
for metric, result in self.results.items():
s += '{}: {:.4f} '.format(metric, result)
return s
# === Multiple Evaluation Results (from Folds)
class EvaluationResultsHybrid:
def __init__(self, metrics: List[str] = metrics_rmse.keys(), parts: List[str] = parting_full.keys()):
self.cf = EvaluationResults(metrics, parts)
self.md = EvaluationResults(metrics, parts)
def add(self, result: EvaluationResultHybrid):
self.cf.add(result.cf)
self.md.add(result.md)
def __str__(self):
s = 'CF:\n'
s += str(self.cf)
s += 'MD:\n'
s += str(self.md)
return s
def mean_rmse_cf(self):
rmse = self.cf.rmse()
return rmse
def mean_rmse_md(self):
"""
Custom hacky function for Gridsearch
"""
rmse = self.md.rmse()
return rmse
class EvaluationResults:
def __init__(self, metrics: List[str] = metrics_rmse.keys(), parts: List[str] = parting_full.keys()):
self.parts: Dict[str, EvaluationResultsPart] = dict((key, EvaluationResultsPart(metrics)) for key in parts)
def add(self, result: EvaluationResult):
for part in self.parts.keys():
self.parts[part].add(result.parts[part])
def __str__(self):
s = ''
for part, result in self.parts.items():
s += '=== Part {}\n'.format(part)
s += str(result)
s += '\n'
return s
def rmse(self):
return self.parts['full'].mean('rmse')
class EvaluationResultsPart:
def __init__(self, metrics):
self.results: Dict[str, List[float]] = dict((key, []) for key in metrics)
def __str__(self):
s = ''
for metric, result in self.results.items():
mean = np.mean(result)
std = np.std(result)
s += '{}: {:.4f} ± {:.4f} '.format(metric, mean, std)
return s
def add(self, result: EvaluationResultPart):
for metric in self.results.keys():
self.results[metric].append(result.results[metric])
def mean(self, metric):
return np.mean(self.results[metric])
| sbremer/hybrid_rs | evaluation/evaluation.py | Python | apache-2.0 | 5,761 | 0.002083 |
# -*- encoding:utf-8 -*-
# sample_CUBRIDdb.py
import CUBRIDdb
con = CUBRIDdb.connect('CUBRID:localhost:33000:demodb:::', 'public')
cur = con.cursor()
cur.execute('DROP TABLE IF EXISTS test_cubrid')
cur.execute('CREATE TABLE test_cubrid (id NUMERIC AUTO_INCREMENT(2009122350, 1), name VARCHAR(50))')
cur.execute("insert into test_cubrid (name) values ('Zhang San'), ('Li Si'), ('Wang Wu'), ('Ma Liu'), ('Niu Qi')")
cur.execute("insert into test_cubrid (name) values (?), (?)", ['中文zh-cn', 'John'])
cur.execute("insert into test_cubrid (name) values (?)", ['Tom',])
cur.execute('select * from test_cubrid')
# fetch result use fetchone()
row = cur.fetchone()
print(row)
print('')
# fetch result use fetchmany()
rows = cur.fetchmany(2)
for row in rows:
print(row)
print("")
rows = cur.fetchall()
for row in rows:
print(row)
cur.close()
con.close()
| andrei14vl/cubrid | contrib/python/samples/sample_CUBRIDdb.py | Python | gpl-3.0 | 872 | 0.006912 |
"""
//=========================================================
// OOMidi
// OpenOctave Midi and Audio Editor
// (C) Copyright 2009 Mathias Gyllengahm ([email protected])
//=========================================================
"""
import Pyro.core
import time
oom=Pyro.core.getProxyForURI('PYRONAME://:Default.oom')
for j in range(0,5):
for i in range(0,30):
oom.addMidiTrack("amiditrack" + str(i))
for i in range(0,30):
oom.deleteTrack("amiditrack" + str(i))
for i in range(0, 10):
print i
oom.addMidiTrack("amiditrack")
oom.addWaveTrack("awavetrack")
oom.addOutput("anoutput")
oom.addInput("aninput")
oom.setMute("aninput", False)
oom.setAudioTrackVolume("aninput",1.0)
oom.deleteTrack("amiditrack")
oom.deleteTrack("awavetrack")
oom.deleteTrack("anoutput")
oom.deleteTrack("aninput")
time.sleep(1)
| ccherrett/oom | share/pybridge/examples/addtrack.py | Python | gpl-2.0 | 930 | 0.025806 |
#!/usr/local/bin/python
#$Id: logsc.py,v 1.7 2013/11/15 15:07:06 kenji Exp $
from sqlite3 import dbapi2 as sqlite
import sys
# change integer to string if found
def int2str(p):
if type(p) == int:
return str(p)
else:
return p
if __name__ == '__main__':
con = sqlite.connect("/home/kenji/txt/hamradio/LOGS/SQLite-log/hamradio_log.sqlite")
# enable extension loading
con.enable_load_extension(True)
# load regexp extension
con.load_extension("/home/kenji/txt/hamradio/LOGS/scripts/sqlite3-pcre/pcre.so")
# disable extension loading after loading necessary extensions
con.enable_load_extension(False)
cur = con.cursor()
for arg in sys.argv[1:]:
t = (arg,)
# use "(?i)" (case insensitive) internal option prefix for PCRE
cur.execute("""
select `qso_date`, `time_on`, `my_call`, `call`, `band`, `mode`,
`rst_sent`, `qsl_sent`, `qsl_via`, `comment`, `my_qso_id` from qso
where `call` regexp \'(?i)\' || ? and `qsl_rcvd` <> \'I\'
order by `qso_date` || `time_on`
""", t)
for row in cur.fetchall():
print "-----------"
print "qso_date: ", row[0]
print "time_on: ", row[1]
print "my_call: ", row[2]
print "call: ", row[3]
print "band: ", row[4]
print "mode: ", row[5]
print "rst_sent: ", row[6]
print "qsl_sent: ", row[7]
print "qsl_via: ", row[8]
print "comment: ", row[9]
print "my_qso_id: ", row[10]
cur.close()
| jj1bdx/bdxlog | scripts/logsc.py | Python | mit | 1,491 | 0.022133 |
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
import mock
from oslo.serialization import jsonutils
import webob
from nova.compute import vm_states
from nova import context
from nova.objects import instance as instance_obj
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import fake_instance
class ExtendedEvacuateFindHostTest(test.NoDBTestCase):
def setUp(self):
super(ExtendedEvacuateFindHostTest, self).setUp()
self.flags(
osapi_compute_extension=[
'nova.api.openstack.compute.contrib.select_extensions'],
osapi_compute_ext_list=['Extended_evacuate_find_host',
'Evacuate'])
self.UUID = uuid.uuid4()
def _get_admin_context(self, user_id='fake', project_id='fake'):
ctxt = context.get_admin_context()
ctxt.user_id = user_id
ctxt.project_id = project_id
return ctxt
def _fake_compute_api(*args, **kwargs):
return True
def _fake_compute_api_get(self, context, instance_id, **kwargs):
instance = fake_instance.fake_db_instance(id=1, uuid=uuid,
task_state=None,
host='host1',
vm_state=vm_states.ACTIVE)
instance = instance_obj.Instance._from_db_object(context,
instance_obj.Instance(),
instance)
return instance
def _fake_service_get_by_compute_host(self, context, host):
return {'host_name': host,
'service': 'compute',
'zone': 'nova'
}
@mock.patch('nova.compute.api.HostAPI.service_get_by_compute_host')
@mock.patch('nova.compute.api.API.get')
@mock.patch('nova.compute.api.API.evacuate')
def test_evacuate_instance_with_no_target(self, evacuate_mock,
api_get_mock,
service_get_mock):
service_get_mock.side_effects = self._fake_service_get_by_compute_host
api_get_mock.side_effects = self._fake_compute_api_get
evacuate_mock.side_effects = self._fake_compute_api
ctxt = self._get_admin_context()
app = fakes.wsgi_app(fake_auth_context=ctxt)
req = webob.Request.blank('/v2/fake/servers/%s/action' % self.UUID)
req.method = 'POST'
req.body = jsonutils.dumps({
'evacuate': {
'onSharedStorage': 'False',
'adminPass': 'MyNewPass'
}
})
req.content_type = 'application/json'
res = req.get_response(app)
self.assertEqual(200, res.status_int)
evacuate_mock.assert_called_once_with(mock.ANY, mock.ANY, None,
mock.ANY, mock.ANY)
@mock.patch('nova.compute.api.HostAPI.service_get_by_compute_host')
@mock.patch('nova.compute.api.API.get')
def test_no_target_fails_if_extension_not_loaded(self, api_get_mock,
service_get_mock):
self.flags(
osapi_compute_extension=[
'nova.api.openstack.compute.contrib.select_extensions'],
osapi_compute_ext_list=['Evacuate'])
service_get_mock.side_effects = self._fake_service_get_by_compute_host
api_get_mock.side_effects = self._fake_compute_api_get
ctxt = self._get_admin_context()
app = fakes.wsgi_app(fake_auth_context=ctxt)
req = webob.Request.blank('/v2/fake/servers/%s/action' % self.UUID)
req.method = 'POST'
req.body = jsonutils.dumps({
'evacuate': {
'onSharedStorage': 'False',
'adminPass': 'MyNewPass'
}
})
req.content_type = 'application/json'
res = req.get_response(app)
self.assertEqual(400, res.status_int)
| Metaswitch/calico-nova | nova/tests/unit/api/openstack/compute/contrib/test_extended_evacuate_find_host.py | Python | apache-2.0 | 4,623 | 0.000649 |
import unittest
from abc import ABC, abstractmethod
from contextshell.action import ActionExecutor, Executor
from contextshell.backends.node import NodeTreeRoot
from contextshell.backends.virtual import VirtualTree
from contextshell.command import CommandInterpreter
from contextshell.path import NodePath
from contextshell.shell import Shell
class ShellScriptTestsBase(unittest.TestCase, ABC):
@abstractmethod
def create_shell(self) -> Shell:
raise NotImplementedError()
class TreeRootTestsBase(ShellScriptTestsBase):
@abstractmethod
def create_tree_root(self):
raise NotImplementedError()
def create_shell(self):
self.tree_root = self.create_tree_root()
self.configure_tree_root(self.tree_root)
interpreter = CommandInterpreter(self.tree_root)
shell = Shell(interpreter)
return shell
def configure_tree_root(self, tree_root):
pass
# TODO: is this class needed when testing single TreeRoot-based class?
class VirtualTreeTestsBase(TreeRootTestsBase):
def create_tree_root(self):
return VirtualTree()
def configure_tree_root(self, tree_root):
self.configure_virtual_tree(tree_root)
@abstractmethod
def configure_virtual_tree(self, virtual_tree: VirtualTree):
raise NotImplementedError()
class NodeTreeTestsBase(VirtualTreeTestsBase): # TODO: move to NodeTree tests
def configure_virtual_tree(self, virtual_tree: VirtualTree):
tree_root = NodeTreeRoot()
self.configure_node_tree(tree_root)
virtual_tree.mount(NodePath("."), tree_root)
def configure_node_tree(self, tree: NodeTreeRoot):
pass
| idle-code/ContextShell | tests/functional/ShellTestsBase.py | Python | mit | 1,673 | 0 |
"""The tests for the mqtt climate component."""
import copy
import json
from unittest.mock import call, patch
import pytest
import voluptuous as vol
from homeassistant.components import climate
from homeassistant.components.climate import DEFAULT_MAX_TEMP, DEFAULT_MIN_TEMP
from homeassistant.components.climate.const import (
ATTR_AUX_HEAT,
ATTR_CURRENT_TEMPERATURE,
ATTR_FAN_MODE,
ATTR_HVAC_ACTION,
ATTR_PRESET_MODE,
ATTR_SWING_MODE,
ATTR_TARGET_TEMP_HIGH,
ATTR_TARGET_TEMP_LOW,
CURRENT_HVAC_ACTIONS,
DOMAIN as CLIMATE_DOMAIN,
HVAC_MODE_AUTO,
HVAC_MODE_COOL,
HVAC_MODE_DRY,
HVAC_MODE_FAN_ONLY,
HVAC_MODE_HEAT,
PRESET_AWAY,
PRESET_ECO,
PRESET_NONE,
SUPPORT_AUX_HEAT,
SUPPORT_FAN_MODE,
SUPPORT_PRESET_MODE,
SUPPORT_SWING_MODE,
SUPPORT_TARGET_TEMPERATURE,
SUPPORT_TARGET_TEMPERATURE_RANGE,
)
from homeassistant.components.mqtt.climate import MQTT_CLIMATE_ATTRIBUTES_BLOCKED
from homeassistant.const import ATTR_TEMPERATURE, STATE_OFF
from homeassistant.setup import async_setup_component
from .test_common import (
help_test_availability_when_connection_lost,
help_test_availability_without_topic,
help_test_custom_availability_payload,
help_test_default_availability_payload,
help_test_discovery_broken,
help_test_discovery_removal,
help_test_discovery_update,
help_test_discovery_update_attr,
help_test_discovery_update_unchanged,
help_test_encoding_subscribable_topics,
help_test_entity_debug_info_message,
help_test_entity_device_info_remove,
help_test_entity_device_info_update,
help_test_entity_device_info_with_connection,
help_test_entity_device_info_with_identifier,
help_test_entity_id_update_discovery_update,
help_test_entity_id_update_subscriptions,
help_test_publishing_with_custom_encoding,
help_test_reloadable,
help_test_reloadable_late,
help_test_setting_attribute_via_mqtt_json_message,
help_test_setting_attribute_with_template,
help_test_setting_blocked_attribute_via_mqtt_json_message,
help_test_unique_id,
help_test_update_with_json_attrs_bad_JSON,
help_test_update_with_json_attrs_not_dict,
)
from tests.common import async_fire_mqtt_message
from tests.components.climate import common
ENTITY_CLIMATE = "climate.test"
DEFAULT_CONFIG = {
CLIMATE_DOMAIN: {
"platform": "mqtt",
"name": "test",
"mode_command_topic": "mode-topic",
"temperature_command_topic": "temperature-topic",
"temperature_low_command_topic": "temperature-low-topic",
"temperature_high_command_topic": "temperature-high-topic",
"fan_mode_command_topic": "fan-mode-topic",
"swing_mode_command_topic": "swing-mode-topic",
"aux_command_topic": "aux-topic",
"preset_mode_command_topic": "preset-mode-topic",
"preset_modes": [
"eco",
"away",
"boost",
"comfort",
"home",
"sleep",
"activity",
],
}
}
# AWAY and HOLD mode topics and templates are deprecated, support will be removed with release 2022.9
DEFAULT_LEGACY_CONFIG = {
CLIMATE_DOMAIN: {
"platform": "mqtt",
"name": "test",
"mode_command_topic": "mode-topic",
"temperature_command_topic": "temperature-topic",
"temperature_low_command_topic": "temperature-low-topic",
"temperature_high_command_topic": "temperature-high-topic",
"fan_mode_command_topic": "fan-mode-topic",
"swing_mode_command_topic": "swing-mode-topic",
"aux_command_topic": "aux-topic",
"away_mode_command_topic": "away-mode-topic",
"hold_command_topic": "hold-topic",
}
}
async def test_setup_params(hass, mqtt_mock):
"""Test the initial parameters."""
assert await async_setup_component(hass, CLIMATE_DOMAIN, DEFAULT_CONFIG)
await hass.async_block_till_done()
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get("temperature") == 21
assert state.attributes.get("fan_mode") == "low"
assert state.attributes.get("swing_mode") == "off"
assert state.state == "off"
assert state.attributes.get("min_temp") == DEFAULT_MIN_TEMP
assert state.attributes.get("max_temp") == DEFAULT_MAX_TEMP
async def test_preset_none_in_preset_modes(hass, mqtt_mock, caplog):
"""Test the preset mode payload reset configuration."""
config = copy.deepcopy(DEFAULT_CONFIG[CLIMATE_DOMAIN])
config["preset_modes"].append("none")
assert await async_setup_component(hass, CLIMATE_DOMAIN, {CLIMATE_DOMAIN: config})
await hass.async_block_till_done()
assert "Invalid config for [climate.mqtt]: not a valid value" in caplog.text
state = hass.states.get(ENTITY_CLIMATE)
assert state is None
# AWAY and HOLD mode topics and templates are deprecated, support will be removed with release 2022.9
@pytest.mark.parametrize(
"parameter,config_value",
[
("away_mode_command_topic", "away-mode-command-topic"),
("away_mode_state_topic", "away-mode-state-topic"),
("away_mode_state_template", "{{ value_json }}"),
("hold_mode_command_topic", "hold-mode-command-topic"),
("hold_mode_command_template", "hold-mode-command-template"),
("hold_mode_state_topic", "hold-mode-state-topic"),
("hold_mode_state_template", "{{ value_json }}"),
],
)
async def test_preset_modes_deprecation_guard(
hass, mqtt_mock, caplog, parameter, config_value
):
"""Test the configuration for invalid legacy parameters."""
config = copy.deepcopy(DEFAULT_CONFIG[CLIMATE_DOMAIN])
config[parameter] = config_value
assert await async_setup_component(hass, CLIMATE_DOMAIN, {CLIMATE_DOMAIN: config})
await hass.async_block_till_done()
state = hass.states.get(ENTITY_CLIMATE)
assert state is None
async def test_supported_features(hass, mqtt_mock):
"""Test the supported_features."""
assert await async_setup_component(hass, CLIMATE_DOMAIN, DEFAULT_CONFIG)
await hass.async_block_till_done()
state = hass.states.get(ENTITY_CLIMATE)
support = (
SUPPORT_TARGET_TEMPERATURE
| SUPPORT_SWING_MODE
| SUPPORT_FAN_MODE
| SUPPORT_PRESET_MODE
| SUPPORT_AUX_HEAT
| SUPPORT_TARGET_TEMPERATURE_RANGE
)
assert state.attributes.get("supported_features") == support
async def test_get_hvac_modes(hass, mqtt_mock):
"""Test that the operation list returns the correct modes."""
assert await async_setup_component(hass, CLIMATE_DOMAIN, DEFAULT_CONFIG)
await hass.async_block_till_done()
state = hass.states.get(ENTITY_CLIMATE)
modes = state.attributes.get("hvac_modes")
assert [
HVAC_MODE_AUTO,
STATE_OFF,
HVAC_MODE_COOL,
HVAC_MODE_HEAT,
HVAC_MODE_DRY,
HVAC_MODE_FAN_ONLY,
] == modes
async def test_set_operation_bad_attr_and_state(hass, mqtt_mock, caplog):
"""Test setting operation mode without required attribute.
Also check the state.
"""
assert await async_setup_component(hass, CLIMATE_DOMAIN, DEFAULT_CONFIG)
await hass.async_block_till_done()
state = hass.states.get(ENTITY_CLIMATE)
assert state.state == "off"
with pytest.raises(vol.Invalid) as excinfo:
await common.async_set_hvac_mode(hass, None, ENTITY_CLIMATE)
assert (
"value must be one of ['auto', 'cool', 'dry', 'fan_only', 'heat', 'heat_cool', 'off'] for dictionary value @ data['hvac_mode']"
) in str(excinfo.value)
state = hass.states.get(ENTITY_CLIMATE)
assert state.state == "off"
async def test_set_operation(hass, mqtt_mock):
"""Test setting of new operation mode."""
assert await async_setup_component(hass, CLIMATE_DOMAIN, DEFAULT_CONFIG)
await hass.async_block_till_done()
state = hass.states.get(ENTITY_CLIMATE)
assert state.state == "off"
await common.async_set_hvac_mode(hass, "cool", ENTITY_CLIMATE)
state = hass.states.get(ENTITY_CLIMATE)
assert state.state == "cool"
assert state.state == "cool"
mqtt_mock.async_publish.assert_called_once_with("mode-topic", "cool", 0, False)
async def test_set_operation_pessimistic(hass, mqtt_mock):
"""Test setting operation mode in pessimistic mode."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["climate"]["mode_state_topic"] = "mode-state"
assert await async_setup_component(hass, CLIMATE_DOMAIN, config)
await hass.async_block_till_done()
state = hass.states.get(ENTITY_CLIMATE)
assert state.state == "unknown"
await common.async_set_hvac_mode(hass, "cool", ENTITY_CLIMATE)
state = hass.states.get(ENTITY_CLIMATE)
assert state.state == "unknown"
async_fire_mqtt_message(hass, "mode-state", "cool")
state = hass.states.get(ENTITY_CLIMATE)
assert state.state == "cool"
async_fire_mqtt_message(hass, "mode-state", "bogus mode")
state = hass.states.get(ENTITY_CLIMATE)
assert state.state == "cool"
async def test_set_operation_with_power_command(hass, mqtt_mock):
"""Test setting of new operation mode with power command enabled."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["climate"]["power_command_topic"] = "power-command"
assert await async_setup_component(hass, CLIMATE_DOMAIN, config)
await hass.async_block_till_done()
state = hass.states.get(ENTITY_CLIMATE)
assert state.state == "off"
await common.async_set_hvac_mode(hass, "cool", ENTITY_CLIMATE)
state = hass.states.get(ENTITY_CLIMATE)
assert state.state == "cool"
mqtt_mock.async_publish.assert_has_calls(
[call("power-command", "ON", 0, False), call("mode-topic", "cool", 0, False)]
)
mqtt_mock.async_publish.reset_mock()
await common.async_set_hvac_mode(hass, "off", ENTITY_CLIMATE)
state = hass.states.get(ENTITY_CLIMATE)
assert state.state == "off"
mqtt_mock.async_publish.assert_has_calls(
[call("power-command", "OFF", 0, False), call("mode-topic", "off", 0, False)]
)
mqtt_mock.async_publish.reset_mock()
async def test_set_fan_mode_bad_attr(hass, mqtt_mock, caplog):
"""Test setting fan mode without required attribute."""
assert await async_setup_component(hass, CLIMATE_DOMAIN, DEFAULT_CONFIG)
await hass.async_block_till_done()
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get("fan_mode") == "low"
with pytest.raises(vol.Invalid) as excinfo:
await common.async_set_fan_mode(hass, None, ENTITY_CLIMATE)
assert "string value is None for dictionary value @ data['fan_mode']" in str(
excinfo.value
)
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get("fan_mode") == "low"
async def test_set_fan_mode_pessimistic(hass, mqtt_mock):
"""Test setting of new fan mode in pessimistic mode."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["climate"]["fan_mode_state_topic"] = "fan-state"
assert await async_setup_component(hass, CLIMATE_DOMAIN, config)
await hass.async_block_till_done()
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get("fan_mode") is None
await common.async_set_fan_mode(hass, "high", ENTITY_CLIMATE)
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get("fan_mode") is None
async_fire_mqtt_message(hass, "fan-state", "high")
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get("fan_mode") == "high"
async_fire_mqtt_message(hass, "fan-state", "bogus mode")
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get("fan_mode") == "high"
async def test_set_fan_mode(hass, mqtt_mock):
"""Test setting of new fan mode."""
assert await async_setup_component(hass, CLIMATE_DOMAIN, DEFAULT_CONFIG)
await hass.async_block_till_done()
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get("fan_mode") == "low"
await common.async_set_fan_mode(hass, "high", ENTITY_CLIMATE)
mqtt_mock.async_publish.assert_called_once_with("fan-mode-topic", "high", 0, False)
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get("fan_mode") == "high"
async def test_set_swing_mode_bad_attr(hass, mqtt_mock, caplog):
"""Test setting swing mode without required attribute."""
assert await async_setup_component(hass, CLIMATE_DOMAIN, DEFAULT_CONFIG)
await hass.async_block_till_done()
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get("swing_mode") == "off"
with pytest.raises(vol.Invalid) as excinfo:
await common.async_set_swing_mode(hass, None, ENTITY_CLIMATE)
assert "string value is None for dictionary value @ data['swing_mode']" in str(
excinfo.value
)
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get("swing_mode") == "off"
async def test_set_swing_pessimistic(hass, mqtt_mock):
"""Test setting swing mode in pessimistic mode."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["climate"]["swing_mode_state_topic"] = "swing-state"
assert await async_setup_component(hass, CLIMATE_DOMAIN, config)
await hass.async_block_till_done()
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get("swing_mode") is None
await common.async_set_swing_mode(hass, "on", ENTITY_CLIMATE)
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get("swing_mode") is None
async_fire_mqtt_message(hass, "swing-state", "on")
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get("swing_mode") == "on"
async_fire_mqtt_message(hass, "swing-state", "bogus state")
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get("swing_mode") == "on"
async def test_set_swing(hass, mqtt_mock):
"""Test setting of new swing mode."""
assert await async_setup_component(hass, CLIMATE_DOMAIN, DEFAULT_CONFIG)
await hass.async_block_till_done()
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get("swing_mode") == "off"
await common.async_set_swing_mode(hass, "on", ENTITY_CLIMATE)
mqtt_mock.async_publish.assert_called_once_with("swing-mode-topic", "on", 0, False)
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get("swing_mode") == "on"
async def test_set_target_temperature(hass, mqtt_mock):
"""Test setting the target temperature."""
assert await async_setup_component(hass, CLIMATE_DOMAIN, DEFAULT_CONFIG)
await hass.async_block_till_done()
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get("temperature") == 21
await common.async_set_hvac_mode(hass, "heat", ENTITY_CLIMATE)
state = hass.states.get(ENTITY_CLIMATE)
assert state.state == "heat"
mqtt_mock.async_publish.assert_called_once_with("mode-topic", "heat", 0, False)
mqtt_mock.async_publish.reset_mock()
await common.async_set_temperature(hass, temperature=47, entity_id=ENTITY_CLIMATE)
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get("temperature") == 47
mqtt_mock.async_publish.assert_called_once_with(
"temperature-topic", "47.0", 0, False
)
# also test directly supplying the operation mode to set_temperature
mqtt_mock.async_publish.reset_mock()
await common.async_set_temperature(
hass, temperature=21, hvac_mode="cool", entity_id=ENTITY_CLIMATE
)
state = hass.states.get(ENTITY_CLIMATE)
assert state.state == "cool"
assert state.attributes.get("temperature") == 21
mqtt_mock.async_publish.assert_has_calls(
[
call("mode-topic", "cool", 0, False),
call("temperature-topic", "21.0", 0, False),
]
)
mqtt_mock.async_publish.reset_mock()
async def test_set_target_temperature_pessimistic(hass, mqtt_mock):
"""Test setting the target temperature."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["climate"]["temperature_state_topic"] = "temperature-state"
assert await async_setup_component(hass, CLIMATE_DOMAIN, config)
await hass.async_block_till_done()
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get("temperature") is None
await common.async_set_hvac_mode(hass, "heat", ENTITY_CLIMATE)
await common.async_set_temperature(hass, temperature=47, entity_id=ENTITY_CLIMATE)
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get("temperature") is None
async_fire_mqtt_message(hass, "temperature-state", "1701")
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get("temperature") == 1701
async_fire_mqtt_message(hass, "temperature-state", "not a number")
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get("temperature") == 1701
async def test_set_target_temperature_low_high(hass, mqtt_mock):
"""Test setting the low/high target temperature."""
assert await async_setup_component(hass, CLIMATE_DOMAIN, DEFAULT_CONFIG)
await hass.async_block_till_done()
await common.async_set_temperature(
hass, target_temp_low=20, target_temp_high=23, entity_id=ENTITY_CLIMATE
)
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get("target_temp_low") == 20
assert state.attributes.get("target_temp_high") == 23
mqtt_mock.async_publish.assert_any_call("temperature-low-topic", "20.0", 0, False)
mqtt_mock.async_publish.assert_any_call("temperature-high-topic", "23.0", 0, False)
async def test_set_target_temperature_low_highpessimistic(hass, mqtt_mock):
"""Test setting the low/high target temperature."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["climate"]["temperature_low_state_topic"] = "temperature-low-state"
config["climate"]["temperature_high_state_topic"] = "temperature-high-state"
assert await async_setup_component(hass, CLIMATE_DOMAIN, config)
await hass.async_block_till_done()
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get("target_temp_low") is None
assert state.attributes.get("target_temp_high") is None
await common.async_set_temperature(
hass, target_temp_low=20, target_temp_high=23, entity_id=ENTITY_CLIMATE
)
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get("target_temp_low") is None
assert state.attributes.get("target_temp_high") is None
async_fire_mqtt_message(hass, "temperature-low-state", "1701")
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get("target_temp_low") == 1701
assert state.attributes.get("target_temp_high") is None
async_fire_mqtt_message(hass, "temperature-high-state", "1703")
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get("target_temp_low") == 1701
assert state.attributes.get("target_temp_high") == 1703
async_fire_mqtt_message(hass, "temperature-low-state", "not a number")
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get("target_temp_low") == 1701
async_fire_mqtt_message(hass, "temperature-high-state", "not a number")
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get("target_temp_high") == 1703
async def test_receive_mqtt_temperature(hass, mqtt_mock):
"""Test getting the current temperature via MQTT."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["climate"]["current_temperature_topic"] = "current_temperature"
assert await async_setup_component(hass, CLIMATE_DOMAIN, config)
await hass.async_block_till_done()
async_fire_mqtt_message(hass, "current_temperature", "47")
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get("current_temperature") == 47
async def test_handle_action_received(hass, mqtt_mock):
"""Test getting the action received via MQTT."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["climate"]["action_topic"] = "action"
assert await async_setup_component(hass, CLIMATE_DOMAIN, config)
await hass.async_block_till_done()
# Cycle through valid modes and also check for wrong input such as "None" (str(None))
async_fire_mqtt_message(hass, "action", "None")
state = hass.states.get(ENTITY_CLIMATE)
hvac_action = state.attributes.get(ATTR_HVAC_ACTION)
assert hvac_action is None
# Redefine actions according to https://developers.home-assistant.io/docs/core/entity/climate/#hvac-action
actions = ["off", "heating", "cooling", "drying", "idle", "fan"]
assert all(elem in actions for elem in CURRENT_HVAC_ACTIONS)
for action in actions:
async_fire_mqtt_message(hass, "action", action)
state = hass.states.get(ENTITY_CLIMATE)
hvac_action = state.attributes.get(ATTR_HVAC_ACTION)
assert hvac_action == action
async def test_set_preset_mode_optimistic(hass, mqtt_mock, caplog):
"""Test setting of the preset mode."""
config = copy.deepcopy(DEFAULT_CONFIG)
assert await async_setup_component(hass, CLIMATE_DOMAIN, config)
await hass.async_block_till_done()
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get("preset_mode") == "none"
await common.async_set_preset_mode(hass, "away", ENTITY_CLIMATE)
mqtt_mock.async_publish.assert_called_once_with(
"preset-mode-topic", "away", 0, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get("preset_mode") == "away"
await common.async_set_preset_mode(hass, "eco", ENTITY_CLIMATE)
mqtt_mock.async_publish.assert_called_once_with(
"preset-mode-topic", "eco", 0, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get("preset_mode") == "eco"
await common.async_set_preset_mode(hass, "none", ENTITY_CLIMATE)
mqtt_mock.async_publish.assert_called_once_with(
"preset-mode-topic", "none", 0, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get("preset_mode") == "none"
await common.async_set_preset_mode(hass, "comfort", ENTITY_CLIMATE)
mqtt_mock.async_publish.assert_called_once_with(
"preset-mode-topic", "comfort", 0, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get("preset_mode") == "comfort"
await common.async_set_preset_mode(hass, "invalid", ENTITY_CLIMATE)
assert "'invalid' is not a valid preset mode" in caplog.text
async def test_set_preset_mode_pessimistic(hass, mqtt_mock, caplog):
"""Test setting of the preset mode."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["climate"]["preset_mode_state_topic"] = "preset-mode-state"
assert await async_setup_component(hass, CLIMATE_DOMAIN, config)
await hass.async_block_till_done()
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get("preset_mode") == "none"
async_fire_mqtt_message(hass, "preset-mode-state", "away")
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get("preset_mode") == "away"
async_fire_mqtt_message(hass, "preset-mode-state", "eco")
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get("preset_mode") == "eco"
async_fire_mqtt_message(hass, "preset-mode-state", "none")
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get("preset_mode") == "none"
async_fire_mqtt_message(hass, "preset-mode-state", "comfort")
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get("preset_mode") == "comfort"
async_fire_mqtt_message(hass, "preset-mode-state", "None")
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get("preset_mode") == "none"
async_fire_mqtt_message(hass, "preset-mode-state", "home")
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get("preset_mode") == "home"
async_fire_mqtt_message(hass, "preset-mode-state", "nonsense")
assert (
"'nonsense' received on topic preset-mode-state. 'nonsense' is not a valid preset mode"
in caplog.text
)
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get("preset_mode") == "home"
# AWAY and HOLD mode topics and templates are deprecated, support will be removed with release 2022.9
async def test_set_away_mode_pessimistic(hass, mqtt_mock):
"""Test setting of the away mode."""
config = copy.deepcopy(DEFAULT_LEGACY_CONFIG)
config["climate"]["away_mode_state_topic"] = "away-state"
assert await async_setup_component(hass, CLIMATE_DOMAIN, config)
await hass.async_block_till_done()
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get("preset_mode") == "none"
await common.async_set_preset_mode(hass, "away", ENTITY_CLIMATE)
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get("preset_mode") == "none"
async_fire_mqtt_message(hass, "away-state", "ON")
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get("preset_mode") == "away"
async_fire_mqtt_message(hass, "away-state", "OFF")
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get("preset_mode") == "none"
async_fire_mqtt_message(hass, "away-state", "nonsense")
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get("preset_mode") == "none"
# AWAY and HOLD mode topics and templates are deprecated, support will be removed with release 2022.9
async def test_set_away_mode(hass, mqtt_mock):
"""Test setting of the away mode."""
config = copy.deepcopy(DEFAULT_LEGACY_CONFIG)
config["climate"]["payload_on"] = "AN"
config["climate"]["payload_off"] = "AUS"
assert await async_setup_component(hass, CLIMATE_DOMAIN, config)
await hass.async_block_till_done()
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get("preset_mode") == "none"
mqtt_mock.async_publish.reset_mock()
await common.async_set_preset_mode(hass, "away", ENTITY_CLIMATE)
assert mqtt_mock.async_publish.call_count == 2
mqtt_mock.async_publish.assert_any_call("away-mode-topic", "AN", 0, False)
mqtt_mock.async_publish.assert_any_call("hold-topic", "off", 0, False)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get("preset_mode") == "away"
await common.async_set_preset_mode(hass, PRESET_NONE, ENTITY_CLIMATE)
assert mqtt_mock.async_publish.call_count == 2
mqtt_mock.async_publish.assert_any_call("away-mode-topic", "AUS", 0, False)
mqtt_mock.async_publish.assert_any_call("hold-topic", "off", 0, False)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get("preset_mode") == "none"
await common.async_set_preset_mode(hass, "hold-on", ENTITY_CLIMATE)
mqtt_mock.async_publish.reset_mock()
await common.async_set_preset_mode(hass, "away", ENTITY_CLIMATE)
assert mqtt_mock.async_publish.call_count == 2
mqtt_mock.async_publish.assert_any_call("away-mode-topic", "AN", 0, False)
mqtt_mock.async_publish.assert_any_call("hold-topic", "off", 0, False)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get("preset_mode") == "away"
# AWAY and HOLD mode topics and templates are deprecated, support will be removed with release 2022.9
async def test_set_hold_pessimistic(hass, mqtt_mock):
"""Test setting the hold mode in pessimistic mode."""
config = copy.deepcopy(DEFAULT_LEGACY_CONFIG)
config["climate"]["hold_state_topic"] = "hold-state"
assert await async_setup_component(hass, CLIMATE_DOMAIN, config)
await hass.async_block_till_done()
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get("hold_mode") is None
await common.async_set_preset_mode(hass, "hold", ENTITY_CLIMATE)
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get("hold_mode") is None
async_fire_mqtt_message(hass, "hold-state", "on")
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get("preset_mode") == "on"
async_fire_mqtt_message(hass, "hold-state", "off")
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get("preset_mode") == "none"
# AWAY and HOLD mode topics and templates are deprecated, support will be removed with release 2022.9
async def test_set_hold(hass, mqtt_mock):
"""Test setting the hold mode."""
assert await async_setup_component(hass, CLIMATE_DOMAIN, DEFAULT_LEGACY_CONFIG)
await hass.async_block_till_done()
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get("preset_mode") == "none"
await common.async_set_preset_mode(hass, "hold-on", ENTITY_CLIMATE)
mqtt_mock.async_publish.call_count == 2
mqtt_mock.async_publish.assert_any_call("away-mode-topic", "OFF", 0, False)
mqtt_mock.async_publish.assert_any_call("hold-topic", "hold-on", 0, False)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get("preset_mode") == "hold-on"
await common.async_set_preset_mode(hass, PRESET_ECO, ENTITY_CLIMATE)
mqtt_mock.async_publish.call_count == 2
mqtt_mock.async_publish.assert_any_call("away-mode-topic", "OFF", 0, False)
mqtt_mock.async_publish.assert_any_call("hold-topic", "eco", 0, False)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get("preset_mode") == PRESET_ECO
await common.async_set_preset_mode(hass, PRESET_NONE, ENTITY_CLIMATE)
mqtt_mock.async_publish.call_count == 2
mqtt_mock.async_publish.assert_any_call("away-mode-topic", "OFF", 0, False)
mqtt_mock.async_publish.assert_any_call("hold-topic", "off", 0, False)
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get("preset_mode") == "none"
# AWAY and HOLD mode topics and templates are deprecated, support will be removed with release 2022.9
async def test_set_preset_away(hass, mqtt_mock):
"""Test setting the hold mode and away mode."""
assert await async_setup_component(hass, CLIMATE_DOMAIN, DEFAULT_LEGACY_CONFIG)
await hass.async_block_till_done()
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get("preset_mode") == PRESET_NONE
await common.async_set_preset_mode(hass, "hold-on", ENTITY_CLIMATE)
mqtt_mock.async_publish.call_count == 2
mqtt_mock.async_publish.assert_any_call("away-mode-topic", "OFF", 0, False)
mqtt_mock.async_publish.assert_any_call("hold-topic", "hold-on", 0, False)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get("preset_mode") == "hold-on"
await common.async_set_preset_mode(hass, PRESET_AWAY, ENTITY_CLIMATE)
assert mqtt_mock.async_publish.call_count == 2
mqtt_mock.async_publish.assert_any_call("away-mode-topic", "ON", 0, False)
mqtt_mock.async_publish.assert_any_call("hold-topic", "off", 0, False)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get("preset_mode") == PRESET_AWAY
await common.async_set_preset_mode(hass, "hold-on-again", ENTITY_CLIMATE)
assert mqtt_mock.async_publish.call_count == 2
mqtt_mock.async_publish.assert_any_call("hold-topic", "hold-on-again", 0, False)
mqtt_mock.async_publish.assert_any_call("away-mode-topic", "OFF", 0, False)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get("preset_mode") == "hold-on-again"
# AWAY and HOLD mode topics and templates are deprecated, support will be removed with release 2022.9
async def test_set_preset_away_pessimistic(hass, mqtt_mock):
"""Test setting the hold mode and away mode in pessimistic mode."""
config = copy.deepcopy(DEFAULT_LEGACY_CONFIG)
config["climate"]["hold_state_topic"] = "hold-state"
config["climate"]["away_mode_state_topic"] = "away-state"
assert await async_setup_component(hass, CLIMATE_DOMAIN, config)
await hass.async_block_till_done()
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get("preset_mode") == PRESET_NONE
await common.async_set_preset_mode(hass, "hold-on", ENTITY_CLIMATE)
mqtt_mock.async_publish.call_count == 2
mqtt_mock.async_publish.assert_any_call("away-mode-topic", "OFF", 0, False)
mqtt_mock.async_publish.assert_any_call("hold-topic", "hold-on", 0, False)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get("preset_mode") == PRESET_NONE
async_fire_mqtt_message(hass, "hold-state", "hold-on")
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get("preset_mode") == "hold-on"
await common.async_set_preset_mode(hass, PRESET_AWAY, ENTITY_CLIMATE)
assert mqtt_mock.async_publish.call_count == 2
mqtt_mock.async_publish.assert_any_call("away-mode-topic", "ON", 0, False)
mqtt_mock.async_publish.assert_any_call("hold-topic", "off", 0, False)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get("preset_mode") == "hold-on"
async_fire_mqtt_message(hass, "away-state", "ON")
async_fire_mqtt_message(hass, "hold-state", "off")
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get("preset_mode") == PRESET_AWAY
await common.async_set_preset_mode(hass, "hold-on-again", ENTITY_CLIMATE)
assert mqtt_mock.async_publish.call_count == 2
mqtt_mock.async_publish.assert_any_call("hold-topic", "hold-on-again", 0, False)
mqtt_mock.async_publish.assert_any_call("away-mode-topic", "OFF", 0, False)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get("preset_mode") == PRESET_AWAY
async_fire_mqtt_message(hass, "hold-state", "hold-on-again")
async_fire_mqtt_message(hass, "away-state", "OFF")
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get("preset_mode") == "hold-on-again"
# AWAY and HOLD mode topics and templates are deprecated, support will be removed with release 2022.9
async def test_set_preset_mode_twice(hass, mqtt_mock):
"""Test setting of the same mode twice only publishes once."""
assert await async_setup_component(hass, CLIMATE_DOMAIN, DEFAULT_LEGACY_CONFIG)
await hass.async_block_till_done()
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get("preset_mode") == "none"
await common.async_set_preset_mode(hass, "hold-on", ENTITY_CLIMATE)
mqtt_mock.async_publish.call_count == 2
mqtt_mock.async_publish.assert_any_call("away-mode-topic", "OFF", 0, False)
mqtt_mock.async_publish.assert_any_call("hold-topic", "hold-on", 0, False)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get("preset_mode") == "hold-on"
async def test_set_aux_pessimistic(hass, mqtt_mock):
"""Test setting of the aux heating in pessimistic mode."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["climate"]["aux_state_topic"] = "aux-state"
assert await async_setup_component(hass, CLIMATE_DOMAIN, config)
await hass.async_block_till_done()
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get("aux_heat") == "off"
await common.async_set_aux_heat(hass, True, ENTITY_CLIMATE)
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get("aux_heat") == "off"
async_fire_mqtt_message(hass, "aux-state", "ON")
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get("aux_heat") == "on"
async_fire_mqtt_message(hass, "aux-state", "OFF")
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get("aux_heat") == "off"
async_fire_mqtt_message(hass, "aux-state", "nonsense")
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get("aux_heat") == "off"
async def test_set_aux(hass, mqtt_mock):
"""Test setting of the aux heating."""
assert await async_setup_component(hass, CLIMATE_DOMAIN, DEFAULT_CONFIG)
await hass.async_block_till_done()
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get("aux_heat") == "off"
await common.async_set_aux_heat(hass, True, ENTITY_CLIMATE)
mqtt_mock.async_publish.assert_called_once_with("aux-topic", "ON", 0, False)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get("aux_heat") == "on"
await common.async_set_aux_heat(hass, False, ENTITY_CLIMATE)
mqtt_mock.async_publish.assert_called_once_with("aux-topic", "OFF", 0, False)
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get("aux_heat") == "off"
async def test_availability_when_connection_lost(hass, mqtt_mock):
"""Test availability after MQTT disconnection."""
await help_test_availability_when_connection_lost(
hass, mqtt_mock, CLIMATE_DOMAIN, DEFAULT_CONFIG
)
async def test_availability_without_topic(hass, mqtt_mock):
"""Test availability without defined availability topic."""
await help_test_availability_without_topic(
hass, mqtt_mock, CLIMATE_DOMAIN, DEFAULT_CONFIG
)
async def test_default_availability_payload(hass, mqtt_mock):
"""Test availability by default payload with defined topic."""
await help_test_default_availability_payload(
hass, mqtt_mock, CLIMATE_DOMAIN, DEFAULT_CONFIG
)
async def test_custom_availability_payload(hass, mqtt_mock):
"""Test availability by custom payload with defined topic."""
await help_test_custom_availability_payload(
hass, mqtt_mock, CLIMATE_DOMAIN, DEFAULT_CONFIG
)
async def test_get_target_temperature_low_high_with_templates(hass, mqtt_mock, caplog):
"""Test getting temperature high/low with templates."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["climate"]["temperature_low_state_topic"] = "temperature-state"
config["climate"]["temperature_high_state_topic"] = "temperature-state"
config["climate"]["temperature_low_state_template"] = "{{ value_json.temp_low }}"
config["climate"]["temperature_high_state_template"] = "{{ value_json.temp_high }}"
assert await async_setup_component(hass, CLIMATE_DOMAIN, config)
await hass.async_block_till_done()
state = hass.states.get(ENTITY_CLIMATE)
# Temperature - with valid value
assert state.attributes.get("target_temp_low") is None
assert state.attributes.get("target_temp_high") is None
async_fire_mqtt_message(
hass, "temperature-state", '{"temp_low": "1031", "temp_high": "1032"}'
)
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get("target_temp_low") == 1031
assert state.attributes.get("target_temp_high") == 1032
# Temperature - with invalid value
async_fire_mqtt_message(hass, "temperature-state", '"-INVALID-"')
state = hass.states.get(ENTITY_CLIMATE)
# make sure, the invalid value gets logged...
assert "Could not parse temperature from" in caplog.text
# ... but the actual value stays unchanged.
assert state.attributes.get("target_temp_low") == 1031
assert state.attributes.get("target_temp_high") == 1032
async def test_get_with_templates(hass, mqtt_mock, caplog):
"""Test getting various attributes with templates."""
config = copy.deepcopy(DEFAULT_CONFIG)
# By default, just unquote the JSON-strings
config["climate"]["value_template"] = "{{ value_json }}"
config["climate"]["action_template"] = "{{ value_json }}"
# Rendering to a bool for aux heat
config["climate"]["aux_state_template"] = "{{ value == 'switchmeon' }}"
# Rendering preset_mode
config["climate"]["preset_mode_value_template"] = "{{ value_json.attribute }}"
config["climate"]["action_topic"] = "action"
config["climate"]["mode_state_topic"] = "mode-state"
config["climate"]["fan_mode_state_topic"] = "fan-state"
config["climate"]["swing_mode_state_topic"] = "swing-state"
config["climate"]["temperature_state_topic"] = "temperature-state"
config["climate"]["aux_state_topic"] = "aux-state"
config["climate"]["current_temperature_topic"] = "current-temperature"
config["climate"]["preset_mode_state_topic"] = "current-preset-mode"
assert await async_setup_component(hass, CLIMATE_DOMAIN, config)
await hass.async_block_till_done()
# Operation Mode
state = hass.states.get(ENTITY_CLIMATE)
async_fire_mqtt_message(hass, "mode-state", '"cool"')
state = hass.states.get(ENTITY_CLIMATE)
assert state.state == "cool"
# Fan Mode
assert state.attributes.get("fan_mode") is None
async_fire_mqtt_message(hass, "fan-state", '"high"')
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get("fan_mode") == "high"
# Swing Mode
assert state.attributes.get("swing_mode") is None
async_fire_mqtt_message(hass, "swing-state", '"on"')
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get("swing_mode") == "on"
# Temperature - with valid value
assert state.attributes.get("temperature") is None
async_fire_mqtt_message(hass, "temperature-state", '"1031"')
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get("temperature") == 1031
# Temperature - with invalid value
async_fire_mqtt_message(hass, "temperature-state", '"-INVALID-"')
state = hass.states.get(ENTITY_CLIMATE)
# make sure, the invalid value gets logged...
assert "Could not parse temperature from -INVALID-" in caplog.text
# ... but the actual value stays unchanged.
assert state.attributes.get("temperature") == 1031
# Preset Mode
assert state.attributes.get("preset_mode") == "none"
async_fire_mqtt_message(hass, "current-preset-mode", '{"attribute": "eco"}')
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get("preset_mode") == "eco"
# Test with an empty json
async_fire_mqtt_message(
hass, "current-preset-mode", '{"other_attribute": "some_value"}'
)
state = hass.states.get(ENTITY_CLIMATE)
assert "Ignoring empty preset_mode from 'current-preset-mode'"
assert state.attributes.get("preset_mode") == "eco"
# Aux mode
assert state.attributes.get("aux_heat") == "off"
async_fire_mqtt_message(hass, "aux-state", "switchmeon")
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get("aux_heat") == "on"
# anything other than 'switchmeon' should turn Aux mode off
async_fire_mqtt_message(hass, "aux-state", "somerandomstring")
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get("aux_heat") == "off"
# Current temperature
async_fire_mqtt_message(hass, "current-temperature", '"74656"')
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get("current_temperature") == 74656
# Action
async_fire_mqtt_message(hass, "action", '"cooling"')
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get("hvac_action") == "cooling"
# Test ignoring null values
async_fire_mqtt_message(hass, "action", "null")
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get("hvac_action") == "cooling"
assert (
"Invalid ['off', 'heating', 'cooling', 'drying', 'idle', 'fan'] action: None, ignoring"
in caplog.text
)
# AWAY and HOLD mode topics and templates are deprecated, support will be removed with release 2022.9
async def test_get_with_hold_and_away_mode_and_templates(hass, mqtt_mock, caplog):
"""Test getting various for hold and away mode attributes with templates."""
config = copy.deepcopy(DEFAULT_LEGACY_CONFIG)
config["climate"]["mode_state_topic"] = "mode-state"
# By default, just unquote the JSON-strings
config["climate"]["value_template"] = "{{ value_json }}"
# Something more complicated for hold mode
config["climate"]["hold_state_template"] = "{{ value_json.attribute }}"
config["climate"]["away_mode_state_topic"] = "away-state"
config["climate"]["hold_state_topic"] = "hold-state"
assert await async_setup_component(hass, CLIMATE_DOMAIN, config)
await hass.async_block_till_done()
# Operation Mode
state = hass.states.get(ENTITY_CLIMATE)
async_fire_mqtt_message(hass, "mode-state", '"cool"')
state = hass.states.get(ENTITY_CLIMATE)
assert state.state == "cool"
# Away Mode
assert state.attributes.get("preset_mode") == "none"
async_fire_mqtt_message(hass, "away-state", '"ON"')
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get("preset_mode") == "away"
# Away Mode with JSON values
async_fire_mqtt_message(hass, "away-state", "false")
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get("preset_mode") == "none"
async_fire_mqtt_message(hass, "away-state", "true")
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get("preset_mode") == "away"
# Hold Mode
async_fire_mqtt_message(
hass,
"hold-state",
"""
{ "attribute": "somemode" }
""",
)
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get("preset_mode") == "somemode"
async def test_set_and_templates(hass, mqtt_mock, caplog):
"""Test setting various attributes with templates."""
config = copy.deepcopy(DEFAULT_CONFIG)
# Create simple templates
config["climate"]["fan_mode_command_template"] = "fan_mode: {{ value }}"
config["climate"]["preset_mode_command_template"] = "preset_mode: {{ value }}"
config["climate"]["mode_command_template"] = "mode: {{ value }}"
config["climate"]["swing_mode_command_template"] = "swing_mode: {{ value }}"
config["climate"]["temperature_command_template"] = "temp: {{ value }}"
config["climate"]["temperature_high_command_template"] = "temp_hi: {{ value }}"
config["climate"]["temperature_low_command_template"] = "temp_lo: {{ value }}"
assert await async_setup_component(hass, CLIMATE_DOMAIN, config)
await hass.async_block_till_done()
# Fan Mode
await common.async_set_fan_mode(hass, "high", ENTITY_CLIMATE)
mqtt_mock.async_publish.assert_called_once_with(
"fan-mode-topic", "fan_mode: high", 0, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get("fan_mode") == "high"
# Preset Mode
await common.async_set_preset_mode(hass, PRESET_ECO, ENTITY_CLIMATE)
mqtt_mock.async_publish.call_count == 1
mqtt_mock.async_publish.assert_any_call(
"preset-mode-topic", "preset_mode: eco", 0, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get("preset_mode") == PRESET_ECO
# Mode
await common.async_set_hvac_mode(hass, "cool", ENTITY_CLIMATE)
mqtt_mock.async_publish.assert_called_once_with(
"mode-topic", "mode: cool", 0, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get(ENTITY_CLIMATE)
assert state.state == "cool"
# Swing Mode
await common.async_set_swing_mode(hass, "on", ENTITY_CLIMATE)
mqtt_mock.async_publish.assert_called_once_with(
"swing-mode-topic", "swing_mode: on", 0, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get("swing_mode") == "on"
# Temperature
await common.async_set_temperature(hass, temperature=47, entity_id=ENTITY_CLIMATE)
mqtt_mock.async_publish.assert_called_once_with(
"temperature-topic", "temp: 47.0", 0, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get("temperature") == 47
# Temperature Low/High
await common.async_set_temperature(
hass, target_temp_low=20, target_temp_high=23, entity_id=ENTITY_CLIMATE
)
mqtt_mock.async_publish.assert_any_call(
"temperature-low-topic", "temp_lo: 20.0", 0, False
)
mqtt_mock.async_publish.assert_any_call(
"temperature-high-topic", "temp_hi: 23.0", 0, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get("target_temp_low") == 20
assert state.attributes.get("target_temp_high") == 23
# AWAY and HOLD mode topics and templates are deprecated, support will be removed with release 2022.9
async def test_set_with_away_and_hold_modes_and_templates(hass, mqtt_mock, caplog):
"""Test setting various attributes on hold and away mode with templates."""
config = copy.deepcopy(DEFAULT_LEGACY_CONFIG)
# Create simple templates
config["climate"]["hold_command_template"] = "hold: {{ value }}"
assert await async_setup_component(hass, CLIMATE_DOMAIN, config)
await hass.async_block_till_done()
# Hold Mode
await common.async_set_preset_mode(hass, PRESET_ECO, ENTITY_CLIMATE)
mqtt_mock.async_publish.call_count == 2
mqtt_mock.async_publish.assert_any_call("away-mode-topic", "OFF", 0, False)
mqtt_mock.async_publish.assert_any_call("hold-topic", "hold: eco", 0, False)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get("preset_mode") == PRESET_ECO
async def test_min_temp_custom(hass, mqtt_mock):
"""Test a custom min temp."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["climate"]["min_temp"] = 26
assert await async_setup_component(hass, CLIMATE_DOMAIN, config)
await hass.async_block_till_done()
state = hass.states.get(ENTITY_CLIMATE)
min_temp = state.attributes.get("min_temp")
assert isinstance(min_temp, float)
assert state.attributes.get("min_temp") == 26
async def test_max_temp_custom(hass, mqtt_mock):
"""Test a custom max temp."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["climate"]["max_temp"] = 60
assert await async_setup_component(hass, CLIMATE_DOMAIN, config)
await hass.async_block_till_done()
state = hass.states.get(ENTITY_CLIMATE)
max_temp = state.attributes.get("max_temp")
assert isinstance(max_temp, float)
assert max_temp == 60
async def test_temp_step_custom(hass, mqtt_mock):
"""Test a custom temp step."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["climate"]["temp_step"] = 0.01
assert await async_setup_component(hass, CLIMATE_DOMAIN, config)
await hass.async_block_till_done()
state = hass.states.get(ENTITY_CLIMATE)
temp_step = state.attributes.get("target_temp_step")
assert isinstance(temp_step, float)
assert temp_step == 0.01
async def test_temperature_unit(hass, mqtt_mock):
"""Test that setting temperature unit converts temperature values."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["climate"]["temperature_unit"] = "F"
config["climate"]["current_temperature_topic"] = "current_temperature"
assert await async_setup_component(hass, CLIMATE_DOMAIN, config)
await hass.async_block_till_done()
async_fire_mqtt_message(hass, "current_temperature", "77")
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get("current_temperature") == 25
async def test_setting_attribute_via_mqtt_json_message(hass, mqtt_mock):
"""Test the setting of attribute via MQTT with JSON payload."""
await help_test_setting_attribute_via_mqtt_json_message(
hass, mqtt_mock, CLIMATE_DOMAIN, DEFAULT_CONFIG
)
async def test_setting_blocked_attribute_via_mqtt_json_message(hass, mqtt_mock):
"""Test the setting of attribute via MQTT with JSON payload."""
await help_test_setting_blocked_attribute_via_mqtt_json_message(
hass, mqtt_mock, CLIMATE_DOMAIN, DEFAULT_CONFIG, MQTT_CLIMATE_ATTRIBUTES_BLOCKED
)
async def test_setting_attribute_with_template(hass, mqtt_mock):
"""Test the setting of attribute via MQTT with JSON payload."""
await help_test_setting_attribute_with_template(
hass, mqtt_mock, CLIMATE_DOMAIN, DEFAULT_CONFIG
)
async def test_update_with_json_attrs_not_dict(hass, mqtt_mock, caplog):
"""Test attributes get extracted from a JSON result."""
await help_test_update_with_json_attrs_not_dict(
hass, mqtt_mock, caplog, CLIMATE_DOMAIN, DEFAULT_CONFIG
)
async def test_update_with_json_attrs_bad_JSON(hass, mqtt_mock, caplog):
"""Test attributes get extracted from a JSON result."""
await help_test_update_with_json_attrs_bad_JSON(
hass, mqtt_mock, caplog, CLIMATE_DOMAIN, DEFAULT_CONFIG
)
async def test_discovery_update_attr(hass, mqtt_mock, caplog):
"""Test update of discovered MQTTAttributes."""
await help_test_discovery_update_attr(
hass, mqtt_mock, caplog, CLIMATE_DOMAIN, DEFAULT_CONFIG
)
async def test_unique_id(hass, mqtt_mock):
"""Test unique id option only creates one climate per unique_id."""
config = {
CLIMATE_DOMAIN: [
{
"platform": "mqtt",
"name": "Test 1",
"power_state_topic": "test-topic",
"power_command_topic": "test_topic",
"unique_id": "TOTALLY_UNIQUE",
},
{
"platform": "mqtt",
"name": "Test 2",
"power_state_topic": "test-topic",
"power_command_topic": "test_topic",
"unique_id": "TOTALLY_UNIQUE",
},
]
}
await help_test_unique_id(hass, mqtt_mock, CLIMATE_DOMAIN, config)
@pytest.mark.parametrize(
"topic,value,attribute,attribute_value",
[
("action_topic", "heating", ATTR_HVAC_ACTION, "heating"),
("action_topic", "cooling", ATTR_HVAC_ACTION, "cooling"),
("aux_state_topic", "ON", ATTR_AUX_HEAT, "on"),
# AWAY and HOLD mode topics and templates are deprecated, support will be removed with release 2022.9
("away_mode_state_topic", "ON", ATTR_PRESET_MODE, "away"),
("current_temperature_topic", "22.1", ATTR_CURRENT_TEMPERATURE, 22.1),
("fan_mode_state_topic", "low", ATTR_FAN_MODE, "low"),
# AWAY and HOLD mode topics and templates are deprecated, support will be removed with release 2022.9
("hold_state_topic", "mode1", ATTR_PRESET_MODE, "mode1"),
("mode_state_topic", "cool", None, None),
("mode_state_topic", "fan_only", None, None),
("swing_mode_state_topic", "on", ATTR_SWING_MODE, "on"),
("temperature_low_state_topic", "19.1", ATTR_TARGET_TEMP_LOW, 19.1),
("temperature_high_state_topic", "22.9", ATTR_TARGET_TEMP_HIGH, 22.9),
("temperature_state_topic", "19.9", ATTR_TEMPERATURE, 19.9),
],
)
async def test_encoding_subscribable_topics(
hass, mqtt_mock, caplog, topic, value, attribute, attribute_value
):
"""Test handling of incoming encoded payload."""
config = copy.deepcopy(DEFAULT_CONFIG[CLIMATE_DOMAIN])
# AWAY and HOLD mode topics and templates are deprecated, support will be removed with release 2022.9
if topic in ["hold_state_topic", "away_mode_state_topic"]:
config["hold_modes"] = ["mode1", "mode2"]
del config["preset_modes"]
del config["preset_mode_command_topic"]
await help_test_encoding_subscribable_topics(
hass,
mqtt_mock,
caplog,
CLIMATE_DOMAIN,
config,
topic,
value,
attribute,
attribute_value,
)
async def test_discovery_removal_climate(hass, mqtt_mock, caplog):
"""Test removal of discovered climate."""
data = json.dumps(DEFAULT_CONFIG[CLIMATE_DOMAIN])
await help_test_discovery_removal(hass, mqtt_mock, caplog, CLIMATE_DOMAIN, data)
async def test_discovery_update_climate(hass, mqtt_mock, caplog):
"""Test update of discovered climate."""
config1 = {"name": "Beer"}
config2 = {"name": "Milk"}
await help_test_discovery_update(
hass, mqtt_mock, caplog, CLIMATE_DOMAIN, config1, config2
)
async def test_discovery_update_unchanged_climate(hass, mqtt_mock, caplog):
"""Test update of discovered climate."""
data1 = '{ "name": "Beer" }'
with patch(
"homeassistant.components.mqtt.climate.MqttClimate.discovery_update"
) as discovery_update:
await help_test_discovery_update_unchanged(
hass, mqtt_mock, caplog, CLIMATE_DOMAIN, data1, discovery_update
)
@pytest.mark.no_fail_on_log_exception
async def test_discovery_broken(hass, mqtt_mock, caplog):
"""Test handling of bad discovery message."""
data1 = '{ "name": "Beer", "power_command_topic": "test_topic#" }'
data2 = '{ "name": "Milk", "power_command_topic": "test_topic" }'
await help_test_discovery_broken(
hass, mqtt_mock, caplog, CLIMATE_DOMAIN, data1, data2
)
async def test_entity_device_info_with_connection(hass, mqtt_mock):
"""Test MQTT climate device registry integration."""
await help_test_entity_device_info_with_connection(
hass, mqtt_mock, CLIMATE_DOMAIN, DEFAULT_CONFIG
)
async def test_entity_device_info_with_identifier(hass, mqtt_mock):
"""Test MQTT climate device registry integration."""
await help_test_entity_device_info_with_identifier(
hass, mqtt_mock, CLIMATE_DOMAIN, DEFAULT_CONFIG
)
async def test_entity_device_info_update(hass, mqtt_mock):
"""Test device registry update."""
await help_test_entity_device_info_update(
hass, mqtt_mock, CLIMATE_DOMAIN, DEFAULT_CONFIG
)
async def test_entity_device_info_remove(hass, mqtt_mock):
"""Test device registry remove."""
await help_test_entity_device_info_remove(
hass, mqtt_mock, CLIMATE_DOMAIN, DEFAULT_CONFIG
)
async def test_entity_id_update_subscriptions(hass, mqtt_mock):
"""Test MQTT subscriptions are managed when entity_id is updated."""
config = {
CLIMATE_DOMAIN: {
"platform": "mqtt",
"name": "test",
"mode_state_topic": "test-topic",
"availability_topic": "avty-topic",
}
}
await help_test_entity_id_update_subscriptions(
hass, mqtt_mock, CLIMATE_DOMAIN, config, ["test-topic", "avty-topic"]
)
async def test_entity_id_update_discovery_update(hass, mqtt_mock):
"""Test MQTT discovery update when entity_id is updated."""
await help_test_entity_id_update_discovery_update(
hass, mqtt_mock, CLIMATE_DOMAIN, DEFAULT_CONFIG
)
async def test_entity_debug_info_message(hass, mqtt_mock):
"""Test MQTT debug info."""
config = {
CLIMATE_DOMAIN: {
"platform": "mqtt",
"name": "test",
"mode_command_topic": "command-topic",
"mode_state_topic": "test-topic",
}
}
await help_test_entity_debug_info_message(
hass,
mqtt_mock,
CLIMATE_DOMAIN,
config,
climate.SERVICE_TURN_ON,
command_topic="command-topic",
command_payload="heat",
state_topic="test-topic",
)
async def test_precision_default(hass, mqtt_mock):
"""Test that setting precision to tenths works as intended."""
assert await async_setup_component(hass, CLIMATE_DOMAIN, DEFAULT_CONFIG)
await hass.async_block_till_done()
await common.async_set_temperature(
hass, temperature=23.67, entity_id=ENTITY_CLIMATE
)
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get("temperature") == 23.7
mqtt_mock.async_publish.reset_mock()
async def test_precision_halves(hass, mqtt_mock):
"""Test that setting precision to halves works as intended."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["climate"]["precision"] = 0.5
assert await async_setup_component(hass, CLIMATE_DOMAIN, config)
await hass.async_block_till_done()
await common.async_set_temperature(
hass, temperature=23.67, entity_id=ENTITY_CLIMATE
)
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get("temperature") == 23.5
mqtt_mock.async_publish.reset_mock()
async def test_precision_whole(hass, mqtt_mock):
"""Test that setting precision to whole works as intended."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["climate"]["precision"] = 1.0
assert await async_setup_component(hass, CLIMATE_DOMAIN, config)
await hass.async_block_till_done()
await common.async_set_temperature(
hass, temperature=23.67, entity_id=ENTITY_CLIMATE
)
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get("temperature") == 24.0
mqtt_mock.async_publish.reset_mock()
@pytest.mark.parametrize(
"service,topic,parameters,payload,template",
[
(
climate.SERVICE_TURN_ON,
"power_command_topic",
None,
"ON",
None,
),
(
climate.SERVICE_SET_HVAC_MODE,
"mode_command_topic",
{"hvac_mode": "cool"},
"cool",
"mode_command_template",
),
(
climate.SERVICE_SET_PRESET_MODE,
"preset_mode_command_topic",
{"preset_mode": "sleep"},
"sleep",
"preset_mode_command_template",
),
(
climate.SERVICE_SET_PRESET_MODE,
"away_mode_command_topic",
{"preset_mode": "away"},
"ON",
None,
),
(
climate.SERVICE_SET_PRESET_MODE,
"hold_command_topic",
{"preset_mode": "eco"},
"eco",
"hold_command_template",
),
(
climate.SERVICE_SET_PRESET_MODE,
"hold_command_topic",
{"preset_mode": "comfort"},
"comfort",
"hold_command_template",
),
(
climate.SERVICE_SET_FAN_MODE,
"fan_mode_command_topic",
{"fan_mode": "medium"},
"medium",
"fan_mode_command_template",
),
(
climate.SERVICE_SET_SWING_MODE,
"swing_mode_command_topic",
{"swing_mode": "on"},
"on",
"swing_mode_command_template",
),
(
climate.SERVICE_SET_AUX_HEAT,
"aux_command_topic",
{"aux_heat": "on"},
"ON",
None,
),
(
climate.SERVICE_SET_TEMPERATURE,
"temperature_command_topic",
{"temperature": "20.1"},
20.1,
"temperature_command_template",
),
(
climate.SERVICE_SET_TEMPERATURE,
"temperature_low_command_topic",
{
"temperature": "20.1",
"target_temp_low": "15.1",
"target_temp_high": "29.8",
},
15.1,
"temperature_low_command_template",
),
(
climate.SERVICE_SET_TEMPERATURE,
"temperature_high_command_topic",
{
"temperature": "20.1",
"target_temp_low": "15.1",
"target_temp_high": "29.8",
},
29.8,
"temperature_high_command_template",
),
],
)
async def test_publishing_with_custom_encoding(
hass,
mqtt_mock,
caplog,
service,
topic,
parameters,
payload,
template,
):
"""Test publishing MQTT payload with different encoding."""
domain = climate.DOMAIN
config = copy.deepcopy(DEFAULT_CONFIG[domain])
if topic != "preset_mode_command_topic":
del config["preset_mode_command_topic"]
del config["preset_modes"]
await help_test_publishing_with_custom_encoding(
hass,
mqtt_mock,
caplog,
domain,
config,
service,
topic,
parameters,
payload,
template,
)
async def test_reloadable(hass, mqtt_mock, caplog, tmp_path):
"""Test reloading the MQTT platform."""
domain = CLIMATE_DOMAIN
config = DEFAULT_CONFIG[domain]
await help_test_reloadable(hass, mqtt_mock, caplog, tmp_path, domain, config)
async def test_reloadable_late(hass, mqtt_client_mock, caplog, tmp_path):
"""Test reloading the MQTT platform with late entry setup."""
domain = CLIMATE_DOMAIN
config = DEFAULT_CONFIG[domain]
await help_test_reloadable_late(hass, caplog, tmp_path, domain, config)
| rohitranjan1991/home-assistant | tests/components/mqtt/test_climate.py | Python | mit | 64,945 | 0.000939 |
#! /usr/bin/env python
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from builtins import * # noqa: F401
'''Runs a full joinmarket pit (using `nirc` miniircd servers,
with `nirc` options specified as an option to pytest),in
bitcoin regtest mode with 3 maker bots and 1 taker bot,
and does 1 coinjoin. This is intended as an E2E sanity check
but certainly could be extended further.
'''
from common import make_wallets
import pytest
import sys
from jmclient import YieldGeneratorBasic, load_test_config, jm_single,\
sync_wallet, JMClientProtocolFactory, start_reactor, Taker, \
random_under_max_order_choose
from jmbase.support import get_log
from twisted.internet import reactor
from twisted.python.log import startLogging
log = get_log()
# Note that this parametrization is inherited (i.e. copied) from
# the previous 'ygrunner.py' script which is intended to be run
# manually to test out complex scenarios. Here, we only run one
# simple test with honest makers (and for simplicity malicious
# makers are not included in the code). Vars are left in in case
# we want to do more complex stuff in the automated tests later.
@pytest.mark.parametrize(
"num_ygs, wallet_structures, mean_amt, malicious, deterministic",
[
# 1sp 3yg, honest makers
(3, [[1, 3, 0, 0, 0]] * 4, 2, 0, False),
])
def test_cj(setup_full_coinjoin, num_ygs, wallet_structures, mean_amt,
malicious, deterministic):
"""Starts by setting up wallets for maker and taker bots; then,
instantiates a single taker with the final wallet.
The remaining wallets are used to set up YieldGenerators (basic form).
All the wallets are given coins according to the rules of make_wallets,
using the parameters for the values.
The final start_reactor call is the only one that actually starts the
reactor; the others only set up protocol instances.
Inline are custom callbacks for the Taker, and these are basically
copies of those in the `sendpayment.py` script for now, but they could
be customized later for testing.
The Taker's schedule is a single coinjoin, using basically random values,
again this could be easily edited or parametrized if we feel like it.
"""
# Set up some wallets, for the ygs and 1 sp.
wallets = make_wallets(num_ygs + 1,
wallet_structures=wallet_structures,
mean_amt=mean_amt)
#the sendpayment bot uses the last wallet in the list
wallet = wallets[num_ygs]['wallet']
sync_wallet(wallet, fast=True)
# grab a dest addr from the wallet
destaddr = wallet.get_external_addr(4)
coinjoin_amt = 20000000
schedule = [[1, coinjoin_amt, 2, destaddr,
0.0, False]]
""" The following two callback functions are as simple as possible
modifications of the same in scripts/sendpayment.py
"""
def filter_orders_callback(orders_fees, cjamount):
return True
def taker_finished(res, fromtx=False, waittime=0.0, txdetails=None):
def final_checks():
sync_wallet(wallet, fast=True)
newbal = wallet.get_balance_by_mixdepth()[4]
oldbal = wallet.get_balance_by_mixdepth()[1]
# These are our check that the coinjoin succeeded
assert newbal == coinjoin_amt
# TODO: parametrize these; cj fees = 38K (.001 x 20M x 2 makers)
# minus 1K tx fee contribution each; 600M is original balance
# in mixdepth 1
assert oldbal + newbal + (40000 - 2000) + taker.total_txfee == 600000000
if fromtx == "unconfirmed":
#If final entry, stop *here*, don't wait for confirmation
if taker.schedule_index + 1 == len(taker.schedule):
reactor.stop()
final_checks()
return
if fromtx:
# currently this test uses a schedule with only one entry
assert False, "taker_finished was called with fromtx=True"
reactor.stop()
return
else:
if not res:
assert False, "Did not complete successfully, shutting down"
# Note that this is required in both conditional branches,
# especially in testing, because it's possible to receive the
# confirmed callback before the unconfirmed.
reactor.stop()
final_checks()
# twisted logging is required for debugging:
startLogging(sys.stdout)
taker = Taker(wallet,
schedule,
order_chooser=random_under_max_order_choose,
max_cj_fee=(0.1, 200),
callbacks=(filter_orders_callback, None, taker_finished))
clientfactory = JMClientProtocolFactory(taker)
nodaemon = jm_single().config.getint("DAEMON", "no_daemon")
daemon = True if nodaemon == 1 else False
start_reactor(jm_single().config.get("DAEMON", "daemon_host"),
jm_single().config.getint("DAEMON", "daemon_port"),
clientfactory, daemon=daemon, rs=False)
txfee = 1000
cjfee_a = 4200
cjfee_r = '0.001'
ordertype = 'swreloffer'
minsize = 100000
ygclass = YieldGeneratorBasic
# As noted above, this is not currently used but can be in future:
if malicious or deterministic:
raise NotImplementedError
for i in range(num_ygs):
cfg = [txfee, cjfee_a, cjfee_r, ordertype, minsize]
sync_wallet(wallets[i]["wallet"], fast=True)
yg = ygclass(wallets[i]["wallet"], cfg)
if malicious:
yg.set_maliciousness(malicious, mtype="tx")
clientfactory = JMClientProtocolFactory(yg, proto_type="MAKER")
nodaemon = jm_single().config.getint("DAEMON", "no_daemon")
daemon = True if nodaemon == 1 else False
# As noted above, only the final start_reactor() call will
# actually start it!
rs = True if i == num_ygs - 1 else False
start_reactor(jm_single().config.get("DAEMON", "daemon_host"),
jm_single().config.getint("DAEMON", "daemon_port"),
clientfactory, daemon=daemon, rs=rs)
@pytest.fixture(scope="module")
def setup_full_coinjoin():
load_test_config()
jm_single().bc_interface.tick_forward_chain_interval = 10
jm_single().bc_interface.simulate_blocks()
| undeath/joinmarket-clientserver | test/test_full_coinjoin.py | Python | gpl-3.0 | 6,442 | 0.001242 |
import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Quantization'] , ['ConstantTrend'] , ['Seasonal_MonthOfYear'] , ['LSTM'] ); | antoinecarme/pyaf | tests/model_control/detailed/transf_Quantization/model_control_one_enabled_Quantization_ConstantTrend_Seasonal_MonthOfYear_LSTM.py | Python | bsd-3-clause | 171 | 0.046784 |
# Copyright (c) 2019 UAVCAN Consortium
# This software is distributed under the terms of the MIT License.
# Author: Pavel Kirienko <[email protected]>
from ._input import UDPInputSession as UDPInputSession
from ._input import PromiscuousUDPInputSession as PromiscuousUDPInputSession
from ._input import SelectiveUDPInputSession as SelectiveUDPInputSession
from ._input import UDPInputSessionStatistics as UDPInputSessionStatistics
from ._input import PromiscuousUDPInputSessionStatistics as PromiscuousUDPInputSessionStatistics
from ._input import SelectiveUDPInputSessionStatistics as SelectiveUDPInputSessionStatistics
from ._output import UDPOutputSession as UDPOutputSession
from ._output import UDPFeedback as UDPFeedback
| UAVCAN/pyuavcan | pyuavcan/transport/udp/_session/__init__.py | Python | mit | 729 | 0.002743 |
from iktomi.utils import cached_property
from .base import Cli
class LazyCli(Cli):
'''
Wrapper for creating lazy command digests.
Sometimes it is not needed to import all of application parts to start
a particular command. LazyCli allows you to define all imports in a
function called only on the command::
@LazyCli
def db_command():
import admin
from admin.environment import db_maker
from models import initial
from iktomi.cli import sqla
return sqla.Sqla(db_maker, initial=initial.install)
# ...
def run(args=sys.argv):
manage(dict(db=db_command, ), args)
'''
def __init__(self, func):
self.get_digest = func
@cached_property
def digest(self):
return self.get_digest()
def description(self, *args, **kwargs):
return self.digest.description(*args, **kwargs)
def __call__(self, *args, **kwargs):
return self.digest(*args, **kwargs)
| boltnev/iktomi | iktomi/cli/lazy.py | Python | mit | 1,023 | 0.001955 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django.contrib.auth.models import User
from django.test.utils import override_settings
from allauth.account import app_settings as account_settings
from allauth.account.models import EmailAddress
from allauth.socialaccount.models import SocialAccount
from allauth.socialaccount.tests import OAuth2TestsMixin
from allauth.tests import MockedResponse, TestCase
from .provider import DisqusProvider
@override_settings(
SOCIALACCOUNT_AUTO_SIGNUP=True,
ACCOUNT_SIGNUP_FORM_CLASS=None,
ACCOUNT_EMAIL_VERIFICATION=account_settings
.EmailVerificationMethod.MANDATORY)
class DisqusTests(OAuth2TestsMixin, TestCase):
provider_id = DisqusProvider.id
def get_mocked_response(self,
name='Raymond Penners',
email="[email protected]"):
return MockedResponse(200, """
{"response": {"name": "%s",
"avatar": {
"permalink": "https://lh5.googleusercontent.com/photo.jpg"
},
"email": "%s",
"profileUrl": "https://plus.google.com/108204268033311374519",
"id": "108204268033311374519" }}
""" % (name, email))
def test_account_connect(self):
email = "[email protected]"
user = User.objects.create(username='user',
is_active=True,
email=email)
user.set_password('test')
user.save()
EmailAddress.objects.create(user=user,
email=email,
primary=True,
verified=True)
self.client.login(username=user.username,
password='test')
self.login(self.get_mocked_response(), process='connect')
# Check if we connected...
self.assertTrue(SocialAccount.objects.filter(
user=user,
provider=DisqusProvider.id).exists())
# For now, we do not pick up any new e-mail addresses on connect
self.assertEqual(EmailAddress.objects.filter(user=user).count(), 1)
self.assertEqual(EmailAddress.objects.filter(
user=user,
email=email).count(), 1)
| AltSchool/django-allauth | allauth/socialaccount/providers/disqus/tests.py | Python | mit | 2,347 | 0 |
# Copyright 2009, Kovid Goyal <[email protected]>
# Copyright 2013 Hardcoded Software (http://www.hardcoded.net)
#
# This software is licensed under the "GPL v3" License as described in the "LICENSE" file,
# which should be included with this package. The terms are also available at
# http://www.hardcoded.net/licenses/gplv3_license
import os, logging
from uuid import uuid4
from ..ptempfile import TemporaryDirectory
from ..utils.zipfile import zip_add_dir
from ..oeb.transforms.filenames import FlatFilenames, UniqueFilenames
from ..oeb.transforms.split import Split
from ..oeb.transforms.cover import CoverManager
from ..oeb.base import XPath, OPF
from ..oeb.output import OEBOutput
from . import initialize_container
from lxml import etree
def upshift_markup(oeb):
'Upgrade markup to comply with XHTML 1.1 where possible'
for x in oeb.spine:
root = x.data
body = XPath('//h:body')(root)
if body:
body = body[0]
if not hasattr(body, 'xpath'):
continue
for u in XPath('//h:u')(root):
u.tag = 'span'
u.set('style', 'text-decoration:underline')
def convert(oeb, output_path, epub_flatten=False, dont_split_on_page_breaks=False,
flow_size=260, no_default_epub_cover=False, no_svg_cover=False,
preserve_cover_aspect_ratio=False, pretty_print=False):
if epub_flatten:
FlatFilenames()(oeb)
else:
UniqueFilenames()(oeb)
upshift_markup(oeb)
split = Split(not dont_split_on_page_breaks, max_flow_size=flow_size*1024)
split(oeb)
cm = CoverManager(no_default_cover=no_default_epub_cover, no_svg_cover=no_svg_cover,
preserve_aspect_ratio=preserve_cover_aspect_ratio)
cm(oeb)
if oeb.toc.count() == 0:
logging.warn('This EPUB file has no Table of Contents. Creating a default TOC')
first = next(iter(oeb.spine))
oeb.toc.add('Start', first.href)
identifiers = oeb.metadata['identifier']
uuid = None
for x in identifiers:
if x.get(OPF('scheme'), None).lower() == 'uuid' or str(x).startswith('urn:uuid:'):
uuid = str(x).split(':')[-1]
break
if uuid is None:
logging.warn('No UUID identifier found')
uuid = str(uuid4())
oeb.metadata.add('identifier', uuid, scheme='uuid', id=uuid)
with TemporaryDirectory('_epub_output') as tdir:
metadata_xml = None
extra_entries = []
oeb_output = OEBOutput()
oeb_output.convert(oeb, tdir, None)
opf = [x for x in os.listdir(tdir) if x.endswith('.opf')][0]
if pretty_print:
condense_ncx([os.path.join(tdir, x) for x in os.listdir(tdir) if x.endswith('.ncx')][0])
with initialize_container(output_path, os.path.basename(opf),
extra_entries=extra_entries) as epub:
zip_add_dir(epub, tdir)
if metadata_xml is not None:
epub.writestr('META-INF/metadata.xml', metadata_xml.encode('utf-8'))
def condense_ncx(ncx_path):
tree = etree.parse(ncx_path)
for tag in tree.getroot().iter(tag=etree.Element):
if tag.text:
tag.text = tag.text.strip()
if tag.tail:
tag.tail = tag.tail.strip()
compressed = etree.tostring(tree.getroot(), encoding='utf-8')
open(ncx_path, 'wb').write(compressed)
| hsoft/pdfmasher | ebooks/epub/output.py | Python | gpl-3.0 | 3,361 | 0.005356 |
from PyQt4 import QtCore, QtGui
class Task(QtCore.QThread):
messageAdded = QtCore.pyqtSignal(QtCore.QString)
def __init__(self, mainWindow, parent = None):
super(Task, self).__init__(parent)
self.mainWindow = mainWindow
self.finished.connect(self.postRun)
self.terminated.connect(self.postTerminated)
def run(self):
"""
The code in this method is run in another thread.
"""
pass
def postRun(self):
"""
The code in this method is run in GUI thread.
"""
pass
def postTerminated(self):
"""
The code in this method is run in GUI thread.
"""
pass
| karelklic/flashfit | task.py | Python | gpl-3.0 | 695 | 0.004317 |
from django.urls import path
from . import views
urlpatterns = [path('unsubscribe', views.QueryObserverUnsubscribeView.as_view())]
| genialis/django-rest-framework-reactive | src/rest_framework_reactive/api_urls.py | Python | apache-2.0 | 133 | 0.007519 |
import nacl.encoding
import nacl.public
import nacl.utils
class WhisperKey():
def __init__(self, key=None):
if key is None:
self.generate_keypair()
else:
if isinstance(key, bytes) or isinstance(key, str):
try:
self._private_key = nacl.public.PrivateKey(key, encoder=nacl.encoding.Base64Encoder)
except Exception as e:
raise Exception("Error generating key from given str or bytes object: ", e)
elif isinstance(key, nacl.public.PrivateKey):
self._private_key = key
else:
raise Exception("Not a valid key.")
def generate_keypair(self):
self._private_key = nacl.public.PrivateKey.generate()
def get_private_key(self, stringify=False, as_image=False, image=None):
if stringify:
return (
self._private_key
.encode(encoder=nacl.encoding.Base64Encoder)
.decode("utf-8")
)
elif as_image:
# Accessible afterwards by parsing all characters after 2321.
# If the image changes, the ability to parse it changes as well.
# Wise to include an identifier in the future for the use of custom images.
file_contents = None
if image:
file_contents = image.read()
else:
with open("whisper/static/img/key_small.png", "br") as f:
file_contents = f.read()
private_key = self._private_key.encode(encoder=nacl.encoding.Base64Encoder)
file_contents += private_key
return file_contents
else:
return self._private_key
def get_public_key(self, stringify=False):
public_key = self._private_key.public_key
if stringify:
return (
public_key
.encode(encoder=nacl.encoding.Base64Encoder)
.decode("utf-8")
)
else:
return public_key
def encrypt_message(self, message, public_key, nonce=None):
# Verify that we can convert the public_key to an nacl.public.PublicKey instance
if isinstance(public_key, nacl.public.PublicKey):
pass
elif isinstance(public_key, str) or isinstance(public_key, bytes): # pragma: no cover
public_key = nacl.public.PublicKey(public_key, encoder=nacl.encoding.Base64Encoder)
elif isinstance(public_key, WhisperKey): # pragma: no cover
public_key = public_key.get_public_key()
else:
raise Exception("Invalid public key provided.")
# Make sure our message is a bytes object, or convert it to one.
if isinstance(message, bytes): # pragma: no cover
pass
elif isinstance(message, str):
message = bytes(message, "utf-8")
else: # pragma: no cover
raise Exception("Message is not bytes or str.")
box = nacl.public.Box(self._private_key, public_key)
nonce = nonce or nacl.utils.random(24)
# Message will be prepended with a 32 character nonce, which can be parsed out elsewhere.
encrypted_message = box.encrypt(message, nonce, encoder=nacl.encoding.Base64Encoder)
return encrypted_message.decode("utf-8")
def decrypt_message(self, message, public_key):
# Verify that we can convert the public_key to an nacl.public.PublicKey instance
if isinstance(public_key, nacl.public.PublicKey):
pass
elif isinstance(public_key, str) or isinstance(public_key, bytes): # pragma: no cover
public_key = nacl.public.PublicKey(public_key, encoder=nacl.encoding.Base64Encoder)
elif isinstance(public_key, WhisperKey): # pragma: no cover
public_key = public_key.get_public_key()
else:
raise Exception("Invalid public key provided.")
# Make sure our message is a bytes object, or convert it to one.
if isinstance(message, bytes): # pragma: no cover
pass
elif isinstance(message, str):
message = bytes(message, "utf-8")
else: # pragma: no cover
raise Exception("Message is not bytes or str.")
box = nacl.public.Box(self._private_key, public_key)
nonce = message[:32]
_message = message[32:]
encrypted_message = nacl.utils.EncryptedMessage(message)
decrypted = box.decrypt(encrypted_message, encoder=nacl.encoding.Base64Encoder)
return decrypted.decode("utf-8")
if __name__ == "__main__": # pragma: no cover
sender = WhisperKey()
receiver = WhisperKey()
nonce = bytes([x for x in range(24)])
out_message = sender.encrypt_message(
message="This is our test message, we'll see how it turns out in the end.",
public_key=receiver,
nonce=nonce
)
print("Our private key")
print("================================================")
print(sender.get_private_key(stringify=True))
print("\n")
print("Their public key")
print("================================================")
print(receiver.get_public_key(stringify=True))
print("\n")
print("Their private key")
print("================================================")
print(receiver.get_private_key(stringify=True))
print("\n")
print("Our public key")
print("================================================")
print(sender.get_public_key(stringify=True))
print("\n")
print("Final output message")
print("================================================")
print(out_message)
print("\n")
print("Decrypted")
print("================================================")
print(receiver.decrypt_message(message=out_message, public_key=sender.get_public_key())) | NoiSek/whisper | whisper/models/whisperkey.py | Python | gpl-2.0 | 5,369 | 0.017322 |
# Copyright 2018 David Vidal <[email protected]>
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).
from odoo import _, api, models
from odoo.exceptions import UserError
class IrSequence(models.Model):
_inherit = 'ir.sequence'
@api.constrains('prefix', 'code')
def check_simplified_invoice_unique_prefix(self):
if self._context.get('copy_pos_config'):
return
for sequence in self.filtered(
lambda x: x.code == 'pos.config.simplified_invoice'):
if self.search_count([
('code', '=', 'pos.config.simplified_invoice'),
('prefix', '=', sequence.prefix)]) > 1:
raise UserError(_('There is already a simplified invoice '
'sequence with that prefix and it should be '
'unique.'))
| factorlibre/l10n-spain | l10n_es_pos/models/ir_sequence.py | Python | agpl-3.0 | 890 | 0 |
# just listing list of requires. will create a set up using these
"""
airflow>=1.7.1,
numpy>=1.1,
requests>=2.1,
pymongo==3.4.0,
pytest>=3.0,
simplejson==3.10.0,
tox==2.6
PyYAML==3.12
"""
| mettadatalabs1/oncoscape-datapipeline | setup.py | Python | apache-2.0 | 191 | 0 |
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command for listing regions."""
from googlecloudsdk.api_lib.compute import base_classes
class List(base_classes.GlobalLister):
"""List Google Compute Engine regions."""
@property
def service(self):
return self.compute.regions
@property
def resource_type(self):
return 'regions'
List.detailed_help = base_classes.GetGlobalListerHelp('regions')
| KaranToor/MA450 | google-cloud-sdk/lib/surface/compute/regions/list.py | Python | apache-2.0 | 965 | 0.005181 |
'''
Run unit tests.
'''
import inspect
import os
import rez.vendor.argparse as argparse
from pkgutil import iter_modules
cli_dir = os.path.dirname(inspect.getfile(inspect.currentframe()))
src_rez_dir = os.path.dirname(cli_dir)
tests_dir = os.path.join(src_rez_dir, 'tests')
all_module_tests = []
def setup_parser(parser, completions=False):
parser.add_argument(
"tests", metavar="NAMED_TEST", default=[], nargs="*",
help="a specific test module/class/method to run; may be repeated "
"multiple times; if no tests are given, through this or other flags, "
"all tests are run")
parser.add_argument(
"-s", "--only-shell", metavar="SHELL",
help="limit shell-dependent tests to the specified shell")
# make an Action that will append the appropriate test to the "--test" arg
class AddTestModuleAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
name = option_string.lstrip('-')
if getattr(namespace, "module_tests", None) is None:
namespace.module_tests = []
namespace.module_tests.append(name)
# find unit tests
tests = []
prefix = "test_"
for importer, name, ispkg in iter_modules([tests_dir]):
if not ispkg and name.startswith(prefix):
module = importer.find_module(name).load_module(name)
name_ = name[len(prefix):]
all_module_tests.append(name_)
tests.append((name_, module))
# create argparse entry for each module's unit test
for name, module in sorted(tests):
parser.add_argument(
"--%s" % name, action=AddTestModuleAction, nargs=0,
dest="module_tests", default=[],
help=module.__doc__.strip().rstrip('.'))
def command(opts, parser, extra_arg_groups=None):
import sys
from rez.vendor.unittest2.main import main
os.environ["__REZ_SELFTEST_RUNNING"] = "1"
if opts.only_shell:
os.environ["__REZ_SELFTEST_SHELL"] = opts.only_shell
if not opts.module_tests and not opts.tests:
module_tests = all_module_tests
else:
module_tests = opts.module_tests
module_tests = [("rez.tests.test_%s" % x) for x in sorted(module_tests)]
tests = module_tests + opts.tests
argv = [sys.argv[0]] + tests
main(module=None, argv=argv, verbosity=opts.verbose)
# Copyright 2013-2016 Allan Johns.
#
# This library is free software: you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation, either
# version 3 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see <http://www.gnu.org/licenses/>.
| cwmartin/rez | src/rez/cli/selftest.py | Python | lgpl-3.0 | 3,095 | 0.000323 |
from Website.site_base import BaseHandler
import tornado.web
import tornado
import SQL.table_simulation as SQLsim
class RawPacketHandler(BaseHandler):
@tornado.web.authenticated
def get(self):
if self.current_user is None:
self.redirect('login.html?next=edit')
return
if self.current_user.permission < 9000:
self.redirect('/')
return
sim = self.database.query(SQLsim.Simulation).filter( SQLsim.Simulation.status==1 ).all()
print('Sim: ' + str(sim))
self.render('raw_packet.html', simulation=sim)
self.database.commit() #Important otherwise transactions are reused and cached data is used not (possible) new
def post(self):
print(str(self.request))
print('Message: ' + str( self.get_argument('message', '')))
print('Client: ' + str( self.get_argument('client', '')))
print('header: ' + str( self.get_argument('header', '')))
self.redirect('/raw') | matyro/Cor-RC | Website/site_rawPacket.py | Python | mit | 1,137 | 0.024626 |
# -*- coding: utf-8 -*-
import os
from collections import defaultdict
from random import choice
world = defaultdict(int)
possiblepoints = [(x, y) for x in range(-15, 16)
for y in range(-15, 16)
if 10 <= abs(x + y * 1j) <= 15]
for i in range(100):
world[choice(possiblepoints)] += 1
for x in range(-15, 16):
print(''.join(str(min([9, world[(x, y)]])) if world[(x, y)] else ' '
for y in range(-15, 16)))
for i in range(1000):
world[choice(possiblepoints)] += 1
for x in range(-15, 16):
print(''.join(str(min([9, world[(x, y)]])) if world[(x, y)] else ' '
for y in range(-15, 16)))
os.system("pause")
| NicovincX2/Python-3.5 | Théorie des nombres/Nombre/Nombre aléatoire/random_points_on_a_circle.py | Python | gpl-3.0 | 691 | 0 |
# This file is part of MyPaint.
# Copyright (C) 2014 by Andrew Chadwick <[email protected]>
# Copyright (C) 2009 by Ilya Portnov <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
"""Layers panel"""
## Imports
from __future__ import division, print_function
from gettext import gettext as _
import os.path
from logging import getLogger
logger = getLogger(__name__)
from gi.repository import Gtk
from gi.repository import Gdk
from gi.repository import GObject
from gi.repository import Pango
import lib.layer
import lib.xml
import widgets
from widgets import inline_toolbar
from workspace import SizedVBoxToolWidget
import layers
## Module constants
#: UI XML for the current layer's class (framework: ``layerswindow.xml``)
LAYER_CLASS_UI = [
(lib.layer.SurfaceBackedLayer, """
<popup name='LayersWindowPopup'>
<placeholder name="BasicLayerActions">
<menuitem action='CopyLayer'/>
</placeholder>
</popup>
"""),
(lib.layer.PaintingLayer, """
<popup name='LayersWindowPopup'>
<placeholder name="BasicLayerActions">
<menuitem action='PasteLayer'/>
<menuitem action='ClearLayer'/>
</placeholder>
<placeholder name='AdvancedLayerActions'>
<menuitem action='TrimLayer'/>
</placeholder>
</popup>
"""),
(lib.layer.ExternallyEditable, """
<popup name='LayersWindowPopup'>
<placeholder name='BasicLayerActions'>
<separator/>
<menuitem action='BeginExternalLayerEdit'/>
<menuitem action='CommitExternalLayerEdit'/>
<separator/>
</placeholder>
</popup>
"""),
]
## Class definitions
class LayersTool (SizedVBoxToolWidget):
"""Panel for arranging layers within a tree structure"""
## Class properties
tool_widget_icon_name = "mypaint-layers-symbolic"
tool_widget_title = _("Layers")
tool_widget_description = _("Arrange layers and assign effects")
LAYER_MODE_TOOLTIP_MARKUP_TEMPLATE = "<b>{name}</b>\n{description}"
#TRANSLATORS: tooltip for the opacity slider (text)
OPACITY_SCALE_TOOLTIP_TEXT_TEMPLATE = _("Layer opacity: %d%%")
__gtype_name__ = 'MyPaintLayersTool'
STATUSBAR_CONTEXT = 'layerstool-dnd'
#TRANSLATORS: status bar messages for drag, without/with modifiers
STATUSBAR_DRAG_MSG = _("Move layer in stack...")
STATUSBAR_DRAG_INTO_MSG = _("Move layer in stack (dropping into a "
"regular layer will create a new group)")
## Construction
def __init__(self):
GObject.GObject.__init__(self)
from application import get_app
app = get_app()
self.app = app
self.set_spacing(widgets.SPACING_CRAMPED)
self.set_border_width(widgets.SPACING_TIGHT)
# GtkTreeView init
docmodel = app.doc.model
view = layers.RootStackTreeView(docmodel)
self._treemodel = view.get_model()
self._treeview = view
# Motion and modifier keys during drag
view.current_layer_rename_requested += self._rename_current_layer_cb
view.current_layer_changed += self._blink_current_layer_cb
view.current_layer_menu_requested += self._popup_menu_cb
view.drag_began += self._view_drag_began_cb
view.drag_ended += self._view_drag_ended_cb
statusbar_cid = app.statusbar.get_context_id(self.STATUSBAR_CONTEXT)
self._drag_statusbar_context_id = statusbar_cid
# View scrolls
view_scroll = Gtk.ScrolledWindow()
view_scroll.set_shadow_type(Gtk.ShadowType.ETCHED_IN)
scroll_pol = Gtk.PolicyType.AUTOMATIC
view_scroll.set_policy(scroll_pol, scroll_pol)
view_scroll.add(view)
view_scroll.set_size_request(-1, 100)
view_scroll.set_hexpand(True)
view_scroll.set_vexpand(True)
# Context menu
ui_dir = os.path.dirname(os.path.abspath(__file__))
ui_path = os.path.join(ui_dir, "layerswindow.xml")
self.app.ui_manager.add_ui_from_file(ui_path)
menu = self.app.ui_manager.get_widget("/LayersWindowPopup")
menu.set_title(_("Layer"))
self.connect("popup-menu", self._popup_menu_cb)
menu.attach_to_widget(self, None)
self._menu = menu
self._layer_specific_ui_mergeids = []
self._layer_specific_ui_class = None
# Main layout grid
grid = Gtk.Grid()
grid.set_row_spacing(widgets.SPACING_TIGHT)
grid.set_column_spacing(widgets.SPACING)
# Mode dropdown
row = 0
label = Gtk.Label(label=_('Mode:'))
label.set_tooltip_text(
_("Blending mode: how the current layer combines with the "
"layers underneath it."))
label.set_alignment(0, 0.5)
label.set_hexpand(False)
grid.attach(label, 0, row, 1, 1)
store = Gtk.ListStore(int, str, bool)
modes = lib.layer.STACK_MODES + lib.layer.STANDARD_MODES
for mode in modes:
label, desc = lib.layer.MODE_STRINGS.get(mode)
store.append([mode, label, True])
combo = Gtk.ComboBox()
combo.set_model(store)
combo.set_hexpand(True)
cell = Gtk.CellRendererText()
combo.pack_start(cell, True)
combo.add_attribute(cell, "text", 1)
combo.add_attribute(cell, "sensitive", 2)
self._layer_mode_combo = combo
grid.attach(combo, 1, row, 5, 1)
# Opacity slider
row += 1
opacity_lbl = Gtk.Label(label=_('Opacity:'))
opacity_lbl.set_tooltip_text(
_("Layer opacity: how much of the current layer to use. "
"Smaller values make it more transparent."))
opacity_lbl.set_alignment(0, 0.5)
opacity_lbl.set_hexpand(False)
adj = Gtk.Adjustment(lower=0, upper=100,
step_incr=1, page_incr=10)
self._opacity_scale = Gtk.HScale.new(adj)
self._opacity_scale.set_draw_value(False)
self._opacity_scale.set_hexpand(True)
grid.attach(opacity_lbl, 0, row, 1, 1)
grid.attach(self._opacity_scale, 1, row, 5, 1)
# Layer list and controls
row += 1
layersbox = Gtk.VBox()
style = layersbox.get_style_context()
style.add_class(Gtk.STYLE_CLASS_LINKED)
style = view_scroll.get_style_context()
style.set_junction_sides(Gtk.JunctionSides.BOTTOM)
list_tools = inline_toolbar(
self.app,
[
("NewLayerGroupAbove", "mypaint-layer-group-new-symbolic"),
("NewPaintingLayerAbove", "mypaint-add-symbolic"),
("RemoveLayer", "mypaint-remove-symbolic"),
("RaiseLayerInStack", "mypaint-up-symbolic"),
("LowerLayerInStack", "mypaint-down-symbolic"),
("DuplicateLayer", None),
("MergeLayerDown", None),
]
)
style = list_tools.get_style_context()
style.set_junction_sides(Gtk.JunctionSides.TOP)
layersbox.pack_start(view_scroll, True, True, 0)
layersbox.pack_start(list_tools, False, False, 0)
layersbox.set_hexpand(True)
layersbox.set_vexpand(True)
grid.attach(layersbox, 0, row, 6, 1)
# Background layer controls
row += 1
show_bg_btn = Gtk.CheckButton()
change_bg_act = self.app.find_action("BackgroundWindow")
change_bg_btn = widgets.borderless_button(action=change_bg_act)
show_bg_act = self.app.find_action("ShowBackgroundToggle")
show_bg_btn.set_related_action(show_bg_act)
grid.attach(show_bg_btn, 0, row, 5, 1)
grid.attach(change_bg_btn, 5, row, 1, 1)
# Pack
self.pack_start(grid, False, True, 0)
# Updates from the real layers tree (TODO: move to lib/layers.py)
self._processing_model_updates = False
self._opacity_scale.connect('value-changed',
self._opacity_scale_changed_cb)
self._layer_mode_combo.connect('changed',
self._layer_mode_combo_changed_cb)
rootstack = docmodel.layer_stack
rootstack.layer_properties_changed += self._layer_propchange_cb
rootstack.current_path_updated += self._current_path_updated_cb
# Initial update
self.connect("show", self._show_cb)
def _show_cb(self, event):
self._processing_model_updates = True
self._update_all()
self._processing_model_updates = False
## Updates from the model
def _current_path_updated_cb(self, rootstack, layerpath):
"""Respond to the current layer changing in the doc-model"""
self._processing_model_updates = True
self._update_all()
self._processing_model_updates = False
def _layer_propchange_cb(self, rootstack, path, layer, changed):
if self._processing_model_updates:
logger.debug("Property change skipped: already processing "
"an update from the document model")
if layer is not rootstack.current:
return
self._processing_model_updates = True
if "mode" in changed:
self._update_layer_mode_combo()
if "opacity" in changed or "mode" in changed:
self._update_opacity_scale()
self._processing_model_updates = False
## Model update processing
def _update_all(self):
assert self._processing_model_updates
self._update_context_menu()
self._update_layer_mode_combo()
self._update_opacity_scale()
def _update_layer_mode_combo(self):
"""Updates the layer mode combo's value from the model"""
assert self._processing_model_updates
combo = self._layer_mode_combo
rootstack = self.app.doc.model.layer_stack
current = rootstack.current
if current is rootstack or not current:
combo.set_sensitive(False)
return
elif not combo.get_sensitive():
combo.set_sensitive(True)
active_iter = None
current_mode = current.mode
for row in combo.get_model():
mode = row[0]
if mode == current_mode:
active_iter = row.iter
row[2] = (mode in current.PERMITTED_MODES)
combo.set_active_iter(active_iter)
label, desc = lib.layer.MODE_STRINGS.get(current_mode)
template = self.LAYER_MODE_TOOLTIP_MARKUP_TEMPLATE
tooltip = template.format(
name = lib.xml.escape(label),
description = lib.xml.escape(desc),
)
combo.set_tooltip_markup(tooltip)
def _update_opacity_scale(self):
"""Updates the opacity scale from the model"""
assert self._processing_model_updates
rootstack = self.app.doc.model.layer_stack
layer = rootstack.current
scale = self._opacity_scale
opacity_is_adjustable = not (
layer is None
or layer is rootstack
or layer.mode == lib.layer.PASS_THROUGH_MODE
)
scale.set_sensitive(opacity_is_adjustable)
if not opacity_is_adjustable:
return
percentage = layer.opacity * 100
scale.set_value(percentage)
template = self.OPACITY_SCALE_TOOLTIP_TEXT_TEMPLATE
tooltip = template % (percentage,)
scale.set_tooltip_text(tooltip)
def _update_context_menu(self):
assert self._processing_model_updates
layer = self.app.doc.model.layer_stack.current
layer_class = layer.__class__
if layer_class is self._layer_specific_ui_class:
return
ui_manager = self.app.ui_manager
for old_mergeid in self._layer_specific_ui_mergeids:
ui_manager.remove_ui(old_mergeid)
self._layer_specific_ui_mergeids = []
new_ui_matches = []
for lclass, lui in LAYER_CLASS_UI:
if isinstance(layer, lclass):
new_ui_matches.append(lui)
for new_ui in new_ui_matches:
new_mergeid = ui_manager.add_ui_from_string(new_ui)
self._layer_specific_ui_mergeids.append(new_mergeid)
self._layer_specific_ui_class = layer_class
## Updates from the user
def _rename_current_layer_cb(self, view):
rename_action = self.app.find_action("RenameLayer")
rename_action.activate()
def _blink_current_layer_cb(self, view):
self.app.doc.layerblink_state.activate()
def _view_drag_began_cb(self, view):
self._treeview_in_drag = True
statusbar = self.app.statusbar
statusbar_cid = self._drag_statusbar_context_id
statusbar.remove_all(statusbar_cid)
statusbar.push(statusbar_cid, self.STATUSBAR_DRAG_MSG)
def _view_drag_ended_cb(self, view):
self._treeview_in_drag = False
statusbar = self.app.statusbar
statusbar_cid = self._drag_statusbar_context_id
statusbar.remove_all(statusbar_cid)
def _opacity_scale_changed_cb(self, *ignore):
if self._processing_model_updates:
return
opacity = self._opacity_scale.get_value() / 100.0
docmodel = self.app.doc.model
docmodel.set_current_layer_opacity(opacity)
self._treeview.scroll_to_current_layer()
def _layer_mode_combo_changed_cb(self, *ignored):
"""Propagate the user's choice of layer mode to the model"""
if self._processing_model_updates:
return
docmodel = self.app.doc.model
combo = self._layer_mode_combo
model = combo.get_model()
mode = model.get_value(combo.get_active_iter(), 0)
if docmodel.layer_stack.current.mode == mode:
return
label, desc = lib.layer.MODE_STRINGS.get(mode)
docmodel.set_current_layer_mode(mode)
## Utility methods
def _popup_context_menu(self, event=None):
"""Display the popup context menu"""
if event is None:
time = Gtk.get_current_event_time()
button = 0
else:
time = event.time
button = event.button
self._menu.popup(None, None, None, None, button, time)
def _popup_menu_cb(self, widget, event=None):
"""Handler for "popup-menu" GtkEvents, and the view's @event"""
self._popup_context_menu(event=event)
return True
| achadwick/mypaint | gui/layerswindow.py | Python | gpl-2.0 | 14,745 | 0.001695 |
import sys
file_name = sys.argv[1]
with open(file_name, "r") as f:
num = int(f.readline())
for i in range(num):
first_row = int(f.readline()) - 1
first_board = list()
for x in range(4):
raw_line = f.readline()
line = [int(x) for x in raw_line.split(" ")]
first_board.append(line)
second_row = int(f.readline()) - 1
second_board = list()
for x in range(4):
raw_line = f.readline()
line = [int(x) for x in raw_line.split(" ")]
second_board.append(line)
common_values = [x for x in first_board[first_row] if x in second_board[second_row]];
if not common_values:
case_string = "Volunteer cheated!"
elif len(common_values) > 1:
case_string = "Bad magician!"
else:
case_string = str(common_values[0])
print("Case #" + str(i + 1) + ": " + case_string) | daniel-bell/google-codejam-2014 | qualification/a_magic_trick.py | Python | mit | 802 | 0.032419 |
from django.contrib.auth.models import User
from django.test import TestCase
from django.test.client import Client
from django.utils import timezone
from channel_facebook.channel import FacebookChannel
from channel_facebook.models import FacebookAccount
from core import models
class FacebookBaseTestCase(TestCase):
fixtures = ['core/fixtures/initial_data.json','channel_facebook/fixtures/initial_data.json']
class MockResponse:
def __init__(self, json_data, status_code, ok):
self.json_data = json_data
self.status_code = status_code
self.ok = ok
def json(self):
return self.json_data
def setUp(self):
self.time = timezone.now()
self.user = self.create_user()
self.facebook_account = self.create_facebook_account(self.user)
self.channel = FacebookChannel()
self.channel_name = models.Channel.objects.get(name="Facebook").name
self.channel_id = models.Channel.objects.get(name="Facebook").id
self.client = Client()
self.conditions = {'hashtag': '#me'}
self.fields = 'message,actions,full_picture,picture,from,created_time,link,permalink_url,type,description,source,object_id'
self.webhook_data = {
"time": self.time,
"id": "101915710270588",
"changed_fields": ["statuses"],
"uid": "101915710270588"
}
def create_user(self):
user = User.objects.create_user('Superuser',
'[email protected]',
'Password')
user.save()
return user
def create_facebook_account(self, user):
facebook_account = FacebookAccount(user=user,
username='101915710270588',
access_token='test token',
last_post_time=timezone.now()
)
facebook_account.save()
return facebook_account
| daisychainme/daisychain | daisychain/channel_facebook/tests/test_base.py | Python | mit | 2,078 | 0.001444 |
from .parameters import ParameterBasedType
from .log import LoggerFactory
from .packer import packer
from .misc.six import add_metaclass
TYPES_DESCRIPTIONS = {'generic' : 'Generic module', 'functions_export': 'Such modules give functions that are useful by evaluation rules',
'connector': 'Suchs modules will export data to external tools',
'listener' : 'Such module will listen to external queries',
'handler' : 'Such module will add new handlers'}
MODULE_STATE_COLORS = {'STARTED': 'green', 'DISABLED': 'grey', 'ERROR': 'red'}
MODULE_STATES = ['STARTED', 'DISABLED', 'ERROR']
class ModulesMetaClass(type):
__inheritors__ = set()
def __new__(meta, name, bases, dct):
klass = type.__new__(meta, name, bases, dct)
# This class need to implement a real role to be load
if klass.implement:
# When creating the class, we need to look at the module where it is. It will be create like this (in modulemanager)
# module___global___windows___collector_iis ==> level=global pack_name=windows, collector_name=collector_iis
from_module = dct['__module__']
elts = from_module.split('___')
# Let the klass know it
klass.pack_level = elts[1]
klass.pack_name = elts[2]
meta.__inheritors__.add(klass)
return klass
@add_metaclass(ModulesMetaClass)
class Module(ParameterBasedType):
implement = ''
module_type = 'generic'
@classmethod
def get_sub_class(cls):
return cls.__inheritors__
def __init__(self):
ParameterBasedType.__init__(self)
self.daemon = None
# Global logger for this part
self.logger = LoggerFactory.create_logger('module.%s' % self.__class__.pack_name)
if hasattr(self, 'pack_level') and hasattr(self, 'pack_name'):
self.pack_directory = packer.get_pack_directory(self.pack_level, self.pack_name)
else:
self.pack_directory = ''
def get_info(self):
return {'configuration': self.get_config(), 'state': 'DISABLED', 'log': ''}
def prepare(self):
return
def launch(self):
return
def export_http(self):
return
# Call when the daemon go down.
# WARNING: maybe the daemon thread is still alive, beware
# of the paralel data access
def stopping_agent(self):
pass
class FunctionsExportModule(Module):
module_type = 'functions_export'
class ConnectorModule(Module):
module_type = 'connector'
class ListenerModule(Module):
module_type = 'listener'
class HandlerModule(Module):
module_type = 'handler'
def __init__(self):
super(HandlerModule, self).__init__()
from .handlermgr import handlermgr
implement = self.implement
if not implement:
self.logger.error('Unknown implement type for module, cannot load it.')
return
handlermgr.register_handler_module(implement, self)
| naparuba/kunai | opsbro/module.py | Python | mit | 3,141 | 0.013372 |
from django.test import TestCase
class AnimalTestCase(TestCase):
def setUp(self):
print 2
def test_animals_can_speak(self):
"""Animals that can speak are correctly identified"""
print 3
| inmagik/contento | contento/tests/__init__.py | Python | mit | 220 | 0.004545 |
"""
An H2OConnection represents the latest active handle to a cloud. No more than a single
H2OConnection object will be active at any one time.
"""
from __future__ import print_function
from __future__ import absolute_import
import requests
import math
import tempfile
import os
import re
import sys
import time
import subprocess
import atexit
import warnings
import site
from .display import H2ODisplay
from .h2o_logging import _is_logging, _log_rest
from .two_dim_table import H2OTwoDimTable
from .utils.shared_utils import quote
from six import iteritems, PY3
from string import ascii_lowercase, digits
from random import choice
warnings.simplefilter('always', UserWarning)
try:
warnings.simplefilter('ignore', requests.packages.urllib3.exceptions.InsecureRequestWarning)
except:
pass
__H2OCONN__ = None # the single active connection to H2O cloud
__H2O_REST_API_VERSION__ = 3 # const for the version of the rest api
class H2OConnection(object):
"""
H2OConnection is a class that represents a connection to the H2O cluster.
It is specified by an IP address and a port number.
Objects of type H2OConnection are not instantiated directly!
This class contains static methods for performing the common REST methods
GET, POST, and DELETE.
"""
__ENCODING__ = "utf-8"
__ENCODING_ERROR__ = "replace"
def __init__(self, ip, port, start_h2o, enable_assertions, license, nthreads, max_mem_size, min_mem_size, ice_root,
strict_version_check, proxy, https, insecure, username, password, max_mem_size_GB, min_mem_size_GB, proxies, size):
"""
Instantiate the package handle to the H2O cluster.
:param ip: An IP address, default is "localhost"
:param port: A port, default is 54321
:param start_h2o: A boolean dictating whether this module should start the H2O jvm. An attempt is made anyways if _connect fails.
:param enable_assertions: If start_h2o, pass `-ea` as a VM option.
:param license: If not None, is a path to a license file.
:param nthreads: Number of threads in the thread pool. This relates very closely to the number of CPUs used.
-1 means use all CPUs on the host. A positive integer specifies the number of CPUs directly. This value is only used when Python starts H2O.
:param max_mem_size: Maximum heap size (jvm option Xmx) in gigabytes.
:param min_mem_size: Minimum heap size (jvm option Xms) in gigabytes.
:param ice_root: A temporary directory (default location is determined by tempfile.mkdtemp()) to hold H2O log files.
:param strict_version_check: Setting this to False is unsupported and should only be done when advised by technical support.
:param proxy: A dictionary with keys 'ftp', 'http', 'https' and values that correspond to a proxy path.
:param https: Set this to True to use https instead of http.
:param insecure: Set this to True to disable SSL certificate checking.
:param username: Username to login with.
:param password: Password to login with.
:param max_mem_size_GB: DEPRECATED. Use max_mem_size.
:param min_mem_size_GB: DEPRECATED. Use min_mem_size.
:param proxies: DEPRECATED. Use proxy.
:param size: DEPRECATED.
:return: None
"""
port = as_int(port)
if not (isinstance(port, int) and 0 <= port <= sys.maxsize): raise ValueError("Port out of range, "+port)
if https != insecure: raise ValueError("`https` and `insecure` must both be True to enable HTTPS")
#Deprecated params
if max_mem_size_GB is not None:
warnings.warn("`max_mem_size_GB` is deprecated. Use `max_mem_size` instead.", category=DeprecationWarning)
max_mem_size = max_mem_size_GB
if min_mem_size_GB is not None:
warnings.warn("`min_mem_size_GB` is deprecated. Use `min_mem_size` instead.", category=DeprecationWarning)
min_mem_size = min_mem_size_GB
if proxies is not None:
warnings.warn("`proxies` is deprecated. Use `proxy` instead.", category=DeprecationWarning)
proxy = proxies
if size is not None:
warnings.warn("`size` is deprecated.", category=DeprecationWarning)
global __H2OCONN__
self._cld = None
self._ip = ip
self._port = port
self._proxy = proxy
self._https = https
self._insecure = insecure
self._username = username
self._password = password
self._session_id = None
self._rest_version = __H2O_REST_API_VERSION__
self._child = getattr(__H2OCONN__, "_child") if hasattr(__H2OCONN__, "_child") else None
__H2OCONN__ = self
#Give user warning if proxy environment variable is found. PUBDEV-2504
for name, value in os.environ.items():
if name.lower()[-6:] == '_proxy' and value:
warnings.warn("Proxy environment variable `" + name + "` with value `" + value + "` found. This may interfere with your H2O Connection.")
jarpaths = H2OConnection.jar_paths()
if os.path.exists(jarpaths[0]): jar_path = jarpaths[0]
elif os.path.exists(jarpaths[1]): jar_path = jarpaths[1]
elif os.path.exists(jarpaths[2]): jar_path = jarpaths[2]
elif os.path.exists(jarpaths[3]): jar_path = jarpaths[3]
elif os.path.exists(jarpaths[4]): jar_path = jarpaths[4]
else: jar_path = jarpaths[5]
try:
cld = self._connect()
except:
# try to start local jar or re-raise previous exception
if not start_h2o: raise ValueError("Cannot connect to H2O server. Please check that H2O is running at {}".format(H2OConnection.make_url("")))
print()
print()
print("No instance found at ip and port: " + ip + ":" + str(port) + ". Trying to start local jar...")
print()
print()
path_to_jar = os.path.exists(jar_path)
if path_to_jar:
if not ice_root:
ice_root = tempfile.mkdtemp()
cld = self._start_local_h2o_jar(max_mem_size, min_mem_size, enable_assertions, license, ice_root, jar_path, nthreads)
else:
print("No jar file found. Could not start local instance.")
print("Jar Paths searched: ")
for jp in jarpaths:
print("\t" + jp)
print()
raise
__H2OCONN__._cld = cld
if strict_version_check and os.environ.get('H2O_DISABLE_STRICT_VERSION_CHECK') is None:
ver_h2o = cld['version']
from .__init__ import __version__
ver_pkg = "UNKNOWN" if __version__ == "SUBST_PROJECT_VERSION" else __version__
if ver_h2o != ver_pkg:
try:
branch_name_h2o = cld['branch_name']
except KeyError:
branch_name_h2o = None
else:
branch_name_h2o = cld['branch_name']
try:
build_number_h2o = cld['build_number']
except KeyError:
build_number_h2o = None
else:
build_number_h2o = cld['build_number']
if build_number_h2o is None:
raise EnvironmentError("Version mismatch. H2O is version {0}, but the h2o-python package is version {1}. "
"Upgrade H2O and h2o-Python to latest stable version - "
"http://h2o-release.s3.amazonaws.com/h2o/latest_stable.html"
"".format(ver_h2o, str(ver_pkg)))
elif build_number_h2o == 'unknown':
raise EnvironmentError("Version mismatch. H2O is version {0}, but the h2o-python package is version {1}. "
"Upgrade H2O and h2o-Python to latest stable version - "
"http://h2o-release.s3.amazonaws.com/h2o/latest_stable.html"
"".format(ver_h2o, str(ver_pkg)))
elif build_number_h2o == '99999':
raise EnvironmentError("Version mismatch. H2O is version {0}, but the h2o-python package is version {1}. "
"This is a developer build, please contact your developer."
"".format(ver_h2o, str(ver_pkg)))
else:
raise EnvironmentError("Version mismatch. H2O is version {0}, but the h2o-python package is version {1}. "
"Install the matching h2o-Python version from - "
"http://h2o-release.s3.amazonaws.com/h2o/{2}/{3}/index.html."
"".format(ver_h2o, str(ver_pkg),branch_name_h2o, build_number_h2o))
self._session_id = H2OConnection.get_json(url_suffix="InitID")["session_key"]
H2OConnection._cluster_info()
@staticmethod
def default():
H2OConnection.__ENCODING__ = "utf-8"
H2OConnection.__ENCODING_ERROR__ = "replace"
@staticmethod
def jar_paths():
sys_prefix1 = sys_prefix2 = sys.prefix
if sys_prefix1.startswith('/Library'): sys_prefix2 = '/System'+sys_prefix1
elif sys_prefix1.startswith('/System'): sys_prefix2 = sys_prefix1.split('/System')[1]
return [os.path.join(sys_prefix1, "h2o_jar", "h2o.jar"),
os.path.join(os.path.sep,"usr","local","h2o_jar","h2o.jar"),
os.path.join(sys_prefix1, "local", "h2o_jar", "h2o.jar"),
os.path.join(site.USER_BASE, "h2o_jar", "h2o.jar"),
os.path.join(sys_prefix2, "h2o_jar", "h2o.jar"),
os.path.join(sys_prefix2, "h2o_jar", "h2o.jar"),
]
@staticmethod
def _cluster_info():
global __H2OCONN__
cld = __H2OCONN__._cld
ncpus = sum([n['num_cpus'] for n in cld['nodes']])
allowed_cpus = sum([n['cpus_allowed'] for n in cld['nodes']])
mfree = sum([n['free_mem'] for n in cld['nodes']])
cluster_health = all([n['healthy'] for n in cld['nodes']])
ip = "127.0.0.1" if __H2OCONN__._ip=="localhost" else __H2OCONN__._ip
cluster_info = [
["H2O cluster uptime: ", get_human_readable_time(cld["cloud_uptime_millis"])],
["H2O cluster version: ", cld["version"]],
["H2O cluster name: ", cld["cloud_name"]],
["H2O cluster total nodes: ", cld["cloud_size"]],
["H2O cluster total free memory: ", get_human_readable_size(mfree)],
["H2O cluster total cores: ", str(ncpus)],
["H2O cluster allowed cores: ", str(allowed_cpus)],
["H2O cluster healthy: ", str(cluster_health)],
["H2O Connection ip: ", ip],
["H2O Connection port: ", __H2OCONN__._port],
["H2O Connection proxy: ", __H2OCONN__._proxy],
["Python Version: ", sys.version.split()[0]],
]
__H2OCONN__._cld = H2OConnection.get_json(url_suffix="Cloud") # update the cached version of cld
H2ODisplay(cluster_info)
def _connect(self, size=1, max_retries=5, print_dots=False):
"""
Does not actually "connect", instead simply tests that the cluster can be reached,
is of a certain size, and is taking basic status commands.
:param size: The number of H2O instances in the cloud.
:return: The JSON response from a "stable" cluster.
"""
retries = 0
while True:
retries += 1
if print_dots:
self._print_dots(retries)
try:
cld = H2OConnection.get_json(url_suffix="Cloud")
if not cld['cloud_healthy']:
raise ValueError("Cluster reports unhealthy status", cld)
if cld['cloud_size'] >= size and cld['consensus']:
if print_dots: print(" Connection successful!")
return cld
except EnvironmentError:
pass
# Cloud too small or voting in progress; sleep; try again
time.sleep(0.1)
if retries > max_retries:
raise EnvironmentError("Max retries exceeded. Could not establish link to the H2O cloud @ " + str(self._ip) + ":" + str(self._port))
def _print_dots(self, retries):
sys.stdout.write("\rStarting H2O JVM and connecting: {}".format("." * retries))
sys.stdout.flush()
def _start_local_h2o_jar(self, mmax, mmin, ea, license, ice, jar_path, nthreads):
command = H2OConnection._check_java()
if license:
if not os.path.exists(license):
raise ValueError("License file not found (" + license + ")")
if not ice:
raise ValueError("`ice_root` must be specified")
stdout = open(H2OConnection._tmp_file("stdout"), 'w')
stderr = open(H2OConnection._tmp_file("stderr"), 'w')
print("Using ice_root: " + ice)
print()
jver = subprocess.check_output([command, "-version"], stderr=subprocess.STDOUT)
if PY3: jver = str(jver, H2OConnection.__ENCODING__)
print()
print("Java Version: " + jver)
print()
if "GNU libgcj" in jver:
raise ValueError("Sorry, GNU Java is not supported for H2O.\n"+
"Please download the latest Java SE JDK 7 from the following URL:\n"+
"http://www.oracle.com/technetwork/java/javase/downloads/jdk7-downloads-1880260.html")
if "Client VM" in jver:
print("WARNING: ")
print("You have a 32-bit version of Java. H2O works best with 64-bit Java.")
print("Please download the latest Java SE JDK 7 from the following URL:")
print("http://www.oracle.com/technetwork/java/javase/downloads/jdk7-downloads-1880260.html")
print()
vm_opts = []
if mmin: vm_opts += ["-Xms{}g".format(mmin)]
if mmax: vm_opts += ["-Xmx{}g".format(mmax)]
if ea: vm_opts += ["-ea"]
h2o_opts = ["-verbose:gc",
"-XX:+PrintGCDetails",
"-XX:+PrintGCTimeStamps",
"-jar", jar_path,
"-name", "H2O_started_from_python_"
+ re.sub("[^A-Za-z0-9]", "_",
(os.getenv("USERNAME") if sys.platform == "win32" else os.getenv("USER")) or "unknownUser")
+ "_" + "".join([choice(ascii_lowercase) for _ in range(3)] + [choice(digits) for _ in range(3)]),
"-ip", "127.0.0.1",
"-port", "54321",
"-ice_root", ice,
]
if nthreads > 0: h2o_opts += ["-nthreads", str(nthreads)]
if license: h2o_opts += ["-license", license]
cmd = [command] + vm_opts + h2o_opts
cwd = os.path.abspath(os.getcwd())
if sys.platform == "win32":
self._child = subprocess.Popen(args=cmd,stdout=stdout,stderr=stderr,cwd=cwd,creationflags=subprocess.CREATE_NEW_PROCESS_GROUP)
else:
self._child = subprocess.Popen(args=cmd, stdout=stdout, stderr=stderr, cwd=cwd, preexec_fn=os.setsid)
cld = self._connect(1, 30, True)
return cld
@staticmethod
def _check_java():
# *WARNING* some over-engineering follows... :{
# is java in PATH?
if H2OConnection._pwhich("java"):
return H2OConnection._pwhich("java")
# check if JAVA_HOME is set (for windoz)
if os.getenv("JAVA_HOME"):
return os.path.join(os.getenv("JAVA_HOME"), "bin", "java.exe")
# check /Program Files/ and /Program Files (x86)/ if os is windoz
if sys.platform == "win32":
program_folder = os.path.join("C:", "{}", "Java")
program_folders = [program_folder.format("Program Files"),
program_folder.format("Program Files (x86)")]
# check both possible program files...
for folder in program_folders:
# hunt down the jdk directory
possible_jdk_dir = [d for d in folder if 'jdk' in d]
# if got a non-empty list of jdk directory candidates
if len(possible_jdk_dir) != 0:
# loop over and check if the java.exe exists
for jdk in possible_jdk_dir:
path = os.path.join(folder, jdk, "bin", "java.exe")
if os.path.exists(path):
return path
# check for JRE and warn
for folder in program_folders:
path = os.path.join(folder, "jre7", "bin", "java.exe")
if os.path.exists(path):
raise ValueError("Found JRE at " + path + "; but H2O requires the JDK to run.")
else:
raise ValueError("Cannot find Java. Please install the latest JDK from\n"
+"http://www.oracle.com/technetwork/java/javase/downloads/index.html" )
@staticmethod
def _pwhich(e):
"""
POSIX style which
"""
ok = os.X_OK
if e:
if os.access(e, ok):
return e
for path in os.getenv('PATH').split(os.pathsep):
full_path = os.path.join(path, e)
if os.access(full_path, ok):
return full_path
return None
@staticmethod
def _tmp_file(type):
usr = re.sub("[^A-Za-z0-9]", "_", (os.getenv("USERNAME") if sys.platform == "win32" else os.getenv("USER")) or "unknownUser")
if type == "stdout":
path = os.path.join(tempfile.mkdtemp(), "h2o_{}_started_from_python.out".format(usr))
print("JVM stdout: " + path)
return path
if type == "stderr":
path = os.path.join(tempfile.mkdtemp(), "h2o_{}_started_from_python.err".format(usr))
print("JVM stderr: " + path)
return path
if type == "pid":
return os.path.join(tempfile.mkdtemp(), "h2o_{}_started_from_python.pid".format(usr))
raise ValueError("Unkown type in H2OConnection._tmp_file call: " + type)
@staticmethod
def _shutdown(conn, prompt):
"""
Shut down the specified instance. All data will be lost.
This method checks if H2O is running at the specified IP address and port, and if it is, shuts down that H2O
instance.
:param conn: An H2OConnection object containing the IP address and port of the server running H2O.
:param prompt: A logical value indicating whether to prompt the user before shutting down the H2O server.
:return: None
"""
global __H2OCONN__
if conn is None: raise ValueError("There is no H2O instance running.")
try:
if not conn.cluster_is_up(conn): raise ValueError("There is no H2O instance running at ip: {0} and port: "
"{1}".format(conn.ip(), conn.port()))
except:
#H2O is already shutdown on the java side
ip = conn.ip()
port = conn.port()
__H2OCONN__= None
raise ValueError("The H2O instance running at {0}:{1} has already been shutdown.".format(ip, port))
if not isinstance(prompt, bool): raise ValueError("`prompt` must be TRUE or FALSE")
if prompt:
question = "Are you sure you want to shutdown the H2O instance running at {0}:{1} (Y/N)? ".format(conn.ip(), conn.port())
response = input(question) if PY3 else raw_input(question)
else: response = "Y"
if response == "Y" or response == "y":
conn.post(url_suffix="Shutdown")
__H2OCONN__ = None #so that the "Did you run `h2o.init()`" ValueError is triggered
@staticmethod
def rest_version(): return __H2OCONN__._rest_version
@staticmethod
def session_id(): return __H2OCONN__._session_id
@staticmethod
def port(): return __H2OCONN__._port
@staticmethod
def ip(): return __H2OCONN__._ip
@staticmethod
def https(): return __H2OCONN__._https
@staticmethod
def username(): return __H2OCONN__._username
@staticmethod
def password(): return __H2OCONN__._password
@staticmethod
def insecure(): return __H2OCONN__._insecure
@staticmethod
def current_connection(): return __H2OCONN__
@staticmethod
def check_conn():
if not __H2OCONN__:
raise EnvironmentError("No active connection to an H2O cluster. Try calling `h2o.init()`")
return __H2OCONN__
@staticmethod
def cluster_is_up(conn):
"""
Determine if an H2O cluster is up or not
:param conn: An H2OConnection object containing the IP address and port of the server running H2O.
:return: TRUE if the cluster is up; FALSE otherwise
"""
if not isinstance(conn, H2OConnection): raise ValueError("`conn` must be an H2OConnection object")
rv = conn.current_connection()._attempt_rest(url=("https" if conn.https() else "http") +"://{0}:{1}/".format(conn.ip(), conn.port()), method="GET",
post_body="", file_upload_info="")
if rv.status_code == 401: warnings.warn("401 Unauthorized Access. Did you forget to provide a username and password?")
return rv.status_code == 200 or rv.status_code == 301
"""
Below is the REST implementation layer:
_attempt_rest -- GET, POST, DELETE
_do_raw_rest
get
post
get_json
post_json
All methods are static and rely on an active __H2OCONN__ object.
"""
@staticmethod
def make_url(url_suffix, _rest_version=None):
scheme = "https" if H2OConnection.https() else "http"
_rest_version = _rest_version or H2OConnection.rest_version()
return "{}://{}:{}/{}/{}".format(scheme,H2OConnection.ip(),H2OConnection.port(),_rest_version,url_suffix)
@staticmethod
def get(url_suffix, **kwargs):
if __H2OCONN__ is None:
raise ValueError("No h2o connection. Did you run `h2o.init()` ?")
return __H2OCONN__._do_raw_rest(url_suffix, "GET", None, **kwargs)
@staticmethod
def post(url_suffix, file_upload_info=None, **kwargs):
if __H2OCONN__ is None:
raise ValueError("No h2o connection. Did you run `h2o.init()` ?")
return __H2OCONN__._do_raw_rest(url_suffix, "POST", file_upload_info, **kwargs)
@staticmethod
def delete(url_suffix, **kwargs):
if __H2OCONN__ is None:
raise ValueError("No h2o connection. Did you run `h2o.init()` ?")
return __H2OCONN__._do_raw_rest(url_suffix, "DELETE", None, **kwargs)
@staticmethod
def get_json(url_suffix, **kwargs):
if __H2OCONN__ is None:
raise ValueError("No h2o connection. Did you run `h2o.init()` ?")
return __H2OCONN__._rest_json(url_suffix, "GET", None, **kwargs)
@staticmethod
def post_json(url_suffix, file_upload_info=None, **kwargs):
if __H2OCONN__ is None:
raise ValueError("No h2o connection. Did you run `h2o.init()` ?")
return __H2OCONN__._rest_json(url_suffix, "POST", file_upload_info, **kwargs)
def _rest_json(self, url_suffix, method, file_upload_info, **kwargs):
raw_txt = self._do_raw_rest(url_suffix, method, file_upload_info, **kwargs)
return self._process_tables(raw_txt.json())
# Massage arguments into place, call _attempt_rest
def _do_raw_rest(self, url_suffix, method, file_upload_info, **kwargs):
if not url_suffix:
raise ValueError("No url suffix supplied.")
# allow override of REST version, currently used for Rapids which is /99
if '_rest_version' in kwargs:
_rest_version = kwargs['_rest_version']
del kwargs['_rest_version']
else:
_rest_version = self._rest_version
url = H2OConnection.make_url(url_suffix,_rest_version)
query_string = ""
for k,v in iteritems(kwargs):
if v is None: continue #don't send args set to None so backend defaults take precedence
if isinstance(v, list):
x = '['
for l in v:
if isinstance(l,list):
x += '['
x += ','.join([str(e) if PY3 else str(e).encode(H2OConnection.__ENCODING__, errors=H2OConnection.__ENCODING_ERROR__) for e in l])
x += ']'
else:
x += str(l) if PY3 else str(l).encode(H2OConnection.__ENCODING__, errors=H2OConnection.__ENCODING_ERROR__)
x += ','
x = x[:-1]
x += ']'
else:
x = str(v) if PY3 else str(v).encode(H2OConnection.__ENCODING__, errors=H2OConnection.__ENCODING_ERROR__)
query_string += k+"="+quote(x)+"&"
query_string = query_string[:-1] # Remove trailing extra &
post_body = ""
if not file_upload_info:
if method == "POST":
post_body = query_string
elif query_string != '':
url = "{}?{}".format(url, query_string)
else:
if not method == "POST":
raise ValueError("Received file upload info and expected method to be POST. Got: " + str(method))
if query_string != '':
url = "{}?{}".format(url, query_string)
if _is_logging():
_log_rest("------------------------------------------------------------\n")
_log_rest("\n")
_log_rest("Time: {0}\n".format(time.strftime('Y-%m-%d %H:%M:%OS3')))
_log_rest("\n")
_log_rest("{0} {1}\n".format(method, url))
_log_rest("postBody: {0}\n".format(post_body))
global _rest_ctr; _rest_ctr = _rest_ctr+1
begin_time_seconds = time.time()
http_result = self._attempt_rest(url, method, post_body, file_upload_info)
end_time_seconds = time.time()
elapsed_time_seconds = end_time_seconds - begin_time_seconds
elapsed_time_millis = elapsed_time_seconds * 1000
if not http_result.ok:
detailed_error_msgs = []
try:
result = http_result.json()
if 'messages' in result.keys():
detailed_error_msgs = '\n'.join([m['message'] for m in result['messages'] if m['message_type'] in ['ERRR']])
elif 'exception_msg' in result.keys():
detailed_error_msgs = result['exception_msg']
except ValueError:
pass
raise EnvironmentError(("h2o-py got an unexpected HTTP status code:\n {} {} (method = {}; url = {}). \n"+ \
"detailed error messages: {}")
.format(http_result.status_code,http_result.reason,method,url,detailed_error_msgs))
if _is_logging():
_log_rest("\n")
_log_rest("httpStatusCode: {0}\n".format(http_result.status_code))
_log_rest("httpStatusMessage: {0}\n".format(http_result.reason))
_log_rest("millis: {0}\n".format(elapsed_time_millis))
_log_rest("\n")
_log_rest("{0}\n".format(http_result.json()))
_log_rest("\n")
return http_result
# Low level request call
def _attempt_rest(self, url, method, post_body, file_upload_info):
auth = (self._username, self._password)
verify = not self._insecure
headers = {'User-Agent': 'H2O Python client/'+sys.version.replace('\n','')}
try:
if method == "GET":
return requests.get(url, headers=headers, proxies=self._proxy, auth=auth, verify=verify)
elif file_upload_info:
files = {file_upload_info["file"] : open(file_upload_info["file"], "rb")}
return requests.post(url, files=files, headers=headers, proxies=self._proxy, auth=auth, verify=verify)
elif method == "POST":
headers["Content-Type"] = "application/x-www-form-urlencoded"
return requests.post(url, data=post_body, headers=headers, proxies=self._proxy, auth=auth, verify=verify)
elif method == "DELETE":
return requests.delete(url, headers=headers, proxies=self._proxy, auth=auth, verify=verify)
else:
raise ValueError("Unknown HTTP method " + method)
except requests.ConnectionError as e:
raise EnvironmentError("h2o-py encountered an unexpected HTTP error:\n {}".format(e))
# TODO:
# @staticmethod
# def _process_matrices(x=None):
# if x:
# if isinstance(x, "dict"):
#
# return x
@staticmethod
def _process_tables(x=None):
if x:
if isinstance(x, dict):
has_meta = "__meta" in x
has_schema_type = has_meta and "schema_type" in x["__meta"]
have_table = has_schema_type and x["__meta"]["schema_type"] == "TwoDimTable"
if have_table:
col_formats = [c["format"] for c in x["columns"]]
table_header = x["name"]
table_descr = x["description"]
col_types = [c["type"] for c in x["columns"]]
col_headers = [c["name"] for c in x["columns"]]
row_headers = ["" for i in range(len(col_headers))]
cell_values = x["data"]
tbl = H2OTwoDimTable(row_header=row_headers, col_header=col_headers,
col_types=col_types, table_header=table_header,
raw_cell_values=cell_values,
col_formats=col_formats,table_description=table_descr)
x = tbl
else:
for k in x:
x[k] = H2OConnection._process_tables(x[k])
if isinstance(x, list):
for it in range(len(x)):
x[it] = H2OConnection._process_tables(x[it])
return x
global _rest_ctr
_rest_ctr = 0
@staticmethod
def rest_ctr(): global _rest_ctr; return _rest_ctr
# On exit, close the session to allow H2O to cleanup any temps
def end_session():
try:
H2OConnection.delete(url_suffix="InitID")
print("Sucessfully closed the H2O Session.")
except:
pass
def get_human_readable_size(num):
exp_str = [(0, 'B'), (10, 'KB'), (20, 'MB'), (30, 'GB'), (40, 'TB'), (50, 'PB'), ]
i = 0
rounded_val = 0
while i + 1 < len(exp_str) and num >= (2 ** exp_str[i + 1][0]):
i += 1
rounded_val = round(float(num) / 2 ** exp_str[i][0], 2)
return '%s %s' % (rounded_val, exp_str[i][1])
def get_human_readable_time(epochTimeMillis):
days = epochTimeMillis/(24*60*60*1000.0)
hours = (days-math.floor(days))*24
minutes = (hours-math.floor(hours))*60
seconds = (minutes-math.floor(minutes))*60
milliseconds = (seconds-math.floor(seconds))*1000
duration_vec = [int(math.floor(t)) for t in [days,hours,minutes,seconds,milliseconds]]
names_duration_vec = ["days","hours","minutes","seconds","milliseconds"]
duration_dict = dict(zip(names_duration_vec, duration_vec))
readable_time = ""
for name in names_duration_vec:
if duration_dict[name] > 0:
readable_time += str(duration_dict[name]) + " " + name + " "
return readable_time
def is_int(possible_int):
try:
int(possible_int)
return True
except ValueError:
return False
def as_int(the_int):
if not is_int(the_int):
raise ValueError("Not a valid int value: " + str(the_int))
return int(the_int)
def _kill_jvm_fork():
global __H2OCONN__
if __H2OCONN__ is not None:
if __H2OCONN__._child:
__H2OCONN__._child.kill()
print("Successfully stopped H2O JVM started by the h2o python module.")
atexit.register(_kill_jvm_fork)
atexit.register(end_session)
| YzPaul3/h2o-3 | h2o-py/h2o/connection.py | Python | apache-2.0 | 29,700 | 0.015354 |
#!/usr/bin/env python3
def main():
# import python's standard math module and numpy
import math, numpy, sys
# import Controller and other blocks from modules
from pyctrl.rc import Controller
from pyctrl.block import Interp, Logger, Constant
from pyctrl.block.system import System, Differentiator, Feedback
from pyctrl.system.tf import PID
# initialize controller
Ts = 0.01
bbb = Controller(period = Ts)
# add encoder as source
bbb.add_source('encoder1',
('pyctrl.rc.encoder', 'Encoder'),
['encoder'],
kwargs = {'encoder': 3,
'ratio': 60 * 35.557})
# add motor as sink
bbb.add_sink('motor1',
('pyctrl.rc.motor', 'Motor'),
['pwm'],
kwargs = {'motor': 3},
enable = True)
# add motor speed signal
bbb.add_signal('speed')
# add motor speed filter
bbb.add_filter('speed',
Differentiator(),
['clock','encoder'],
['speed'])
# calculate PI controller gains
tau = 1/55 # time constant (s)
g = 0.092 # gain (cycles/sec duty)
Kp = 1/g
Ki = Kp/tau
print('Controller gains: Kp = {}, Ki = {}'.format(Kp, Ki))
# build controller block
pid = System(model = PID(Kp = Kp, Ki = Ki, period = Ts))
# add motor speed signal
bbb.add_signal('speed_reference')
bbb.add_filter('PIcontrol',
Feedback(block = pid),
['speed','speed_reference'],
['pwm'])
# build interpolated input signal
ts = [0, 1, 2, 3, 4, 5, 5, 6]
us = [0, 0, 8, 8, -4, -4, 0, 0]
# add filter to interpolate data
bbb.add_filter('input',
Interp(xp = us, fp = ts),
['clock'],
['speed_reference'])
# add logger
bbb.add_sink('logger',
Logger(),
['clock','pwm','encoder','speed','speed_reference'])
# Add a timer to stop the controller
bbb.add_timer('stop',
Constant(value = 0),
None, ['is_running'],
period = 6, repeat = False)
# print controller info
print(bbb.info('all'))
try:
# run the controller
print('> Run the controller.')
# set speed_reference
#bbb.set_signal('speed_reference', 5)
# reset clock
bbb.set_source('clock', reset = True)
with bbb:
# wait for the controller to finish on its own
bbb.join()
print('> Done with the controller.')
except KeyboardInterrupt:
pass
finally:
pass
# read logger
data = bbb.get_sink('logger', 'log')
try:
# import matplotlib
import matplotlib.pyplot as plt
except:
print('! Could not load matplotlib, skipping plots')
sys.exit(0)
print('> Will plot')
try:
# start plot
plt.figure()
except:
print('! Could not plot graphics')
print('> Make sure you have a connection to a windows manager')
sys.exit(0)
# plot pwm
plt.subplot(2,1,1)
plt.plot(data['clock'], data['pwm'], 'b')
plt.ylabel('pwm (%)')
plt.ylim((-120,120))
plt.xlim(0,6)
plt.grid()
# plot encoder
plt.subplot(2,1,2)
plt.plot(data['clock'], data['encoder'],'b')
plt.ylabel('position (cycles)')
plt.ylim((0,25))
plt.xlim(0,6)
plt.grid()
# start plot
plt.figure()
# plot pwm
ax1 = plt.gca()
ax1.plot(data['clock'], data['pwm'],'g', label='pwm')
ax1.set_ylabel('pwm (%)')
ax1.set_ylim((-60,120))
ax1.grid()
plt.legend(loc = 2)
# plot velocity
ax2 = plt.twinx()
ax2.plot(data['clock'], data['speed'],'b', label='speed')
ax2.plot(data['clock'], data['speed_reference'], 'r', label='reference')
ax2.set_ylabel('speed (Hz)')
ax2.set_ylim((-6,12))
ax2.set_xlim(0,6)
ax2.grid()
plt.legend(loc = 1)
# show plots
plt.show()
if __name__ == "__main__":
main()
| mcdeoliveira/ctrl | examples/rc_motor_control.py | Python | apache-2.0 | 4,210 | 0.024703 |
#!/usr/bin/env python3
# pylint: disable=C0103, C0325, C0301
"""
Zipped Agoda Hotel Data File Parser
-----------------------------------
This utility unzips and parses the Agoda hotel data file, in-memory,
and makes the data available
"""
import csv
import zipfile
import io
import sys
class AgodaParser(object):
"""Class to manage parsing and searching of parsed data"""
def __init__(self, zipdatafile):
"""Read and parse Agoda hotel data from a zip file"""
if not zipfile.is_zipfile(zipdatafile):
print("ERROR: '{0}' is not a valid zip file".format(zipdatafile))
sys.exit(1)
zipfh = zipfile.ZipFile(zipdatafile, mode='r')
datafile = zipfh.infolist()[0]
with zipfh.open(datafile, mode='rU') as datafh:
datafh.read(3) # strips the BOM
csvReader = csv.DictReader(io.TextIOWrapper(datafh), delimiter=',', quotechar='"')
self.result = []
for row in csvReader:
if not float == type(row['rates_from']):
try:
rates_from = float(row['rates_from'])
except ValueError:
#print("ERROR: Unable to convert '{0}' to float for '{1}'".format(row['rates_from'], row['hotel_name']))
#print("DEBUG: '{0}'".format(row))
rates_from = 'Rates Not Available'
else:
rates_from = row['rates_from']
row['rates_from'] = rates_from
self.result.append(row)
zipfh.close()
def get_all(self):
"""Return the full list of hotels as a list of dictionaries"""
return self.result
def find(self, hotel_id=None):
"""Locate a specific hotel by id"""
if None == hotel_id:
raise ValueError("Missing a hotel id")
hotel_id = str(hotel_id)
return next((item for item in self.result if item["hotel_id"] == hotel_id), None)
def find_url(self, url=None):
"""Locate a specific hotel by url snippet"""
if None == url:
raise ValueError("Missing a hotel url")
return next((item for item in self.result if item["url"] in url), None)
if __name__ == "__main__":
import argparse
argparser = argparse.ArgumentParser(description='Parse zipped Agoda hotel data file')
argparser.add_argument("zipped_datafile", help="Agoda hotel datafile, in .zip format")
args = argparser.parse_args()
zipdatafile = args.zipped_datafile
parsed = AgodaParser(zipdatafile)
for entryrow in parsed.get_all():
if 'Rates Not Available' == entryrow['rates_from']:
print("{0} - '{1}': No rates available".format(entryrow['hotel_id'], entryrow['hotel_name']))
else:
print("{0} - '{1}' from '{2}' '{3}'".format(entryrow['hotel_id'], entryrow['hotel_name'], entryrow['rates_currency'], entryrow['rates_from']))
| egeland/agodaparser | agodaparser.py | Python | gpl-3.0 | 2,962 | 0.005402 |
import os
import osiris
import globalvars
class OmlRotator(osiris.OMLHtmlWrapper):
def __init__(self, tag):
osiris.OMLHtmlWrapper.__init__(self, tag, "div", False, "", "", "")
def processHtml(self, item, context):
extensionID = globalvars.extension.getID().getString()
context.page.addJavascript("/htdocs/js/oml/rotator.js")
item.setParam("id","rotator_" + osiris.UniqueID.generate().getString())
script = "<script type=\"text/javascript\">Rotator.init('" + item.getParam("id") + "');</script>";
return osiris.OMLHtmlWrapper.processHtml(self, item, context) + script;
| OsirisSPS/osiris-sps | client/data/extensions/148B613D055759C619D5F4EFD9FDB978387E97CB/scripts/oml/rotator.py | Python | gpl-3.0 | 610 | 0.04918 |
# SharePlum
# This library simplfies the code necessary
# to automate interactions with a SharePoint
# server using python
from .office365 import Office365 # noqa: F401
from .site import Site # noqa: F401
from .version import __version__ # noqa: F401
__all__ = ["site", "office365"]
__title__ = "SharePlum SharePoint Library"
__author__ = "Jason Rollins"
| jasonrollins/shareplum | shareplum/__init__.py | Python | mit | 360 | 0 |
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from mock import Mock
from gerrit import GerritClient
from test import CrBuildTestCase
SHORT_CHANGE_ID = 'I7c1811882cf59c1dc55018926edb6d35295c53b8'
CHANGE_ID = 'project~master~%s' % SHORT_CHANGE_ID
REVISION = '404d1697dca23824bc1130061a5bd2be4e073922'
class GerritClientTestCase(CrBuildTestCase):
def test_get_change(self):
req_path = 'changes/%s?o=ALL_REVISIONS' % CHANGE_ID
change_reponse = {
'id': CHANGE_ID,
'project': 'project',
'branch': 'master',
'hashtags': [],
'change_id': SHORT_CHANGE_ID,
'subject': 'My change',
'status': 'NEW',
'created': '2014-10-17 18:24:39.193000000',
'updated': '2014-10-17 20:44:48.338000000',
'mergeable': True,
'insertions': 10,
'deletions': 11,
'_sortkey': '0030833c0002bff9',
'_number': 180217,
'owner': {
'name': 'John Doe'
},
'current_revision': REVISION,
'revisions': {
REVISION: {
'_number': 1,
'fetch': {
'http': {
'url': 'https://chromium.googlesource.com/html-office',
'ref': 'refs/changes/80/123/1'
}
}
}
}
}
client = GerritClient('chromium-review.googlesource.com')
client._fetch = Mock(return_value=change_reponse)
change = client.get_change(CHANGE_ID)
client._fetch.assert_called_once_with(req_path)
self.assertIsNotNone(change)
self.assertEqual(change.change_id, SHORT_CHANGE_ID)
self.assertEqual(change.branch, 'master')
self.assertEqual(change.project, 'project')
self.assertEqual(change.owner.name, 'John Doe')
self.assertEqual(change.current_revision, REVISION)
# smoke test for branch coverage
change = client.get_change(CHANGE_ID, include_all_revisions=False,
include_owner_details=True)
def test_get_nonexistent_change(self):
client = GerritClient('chromium-review.googlesource.com')
change = client.get_change(CHANGE_ID)
self.assertIsNone(change)
def test_set_review(self):
req_path = 'changes/%s/revisions/%s/review' % (CHANGE_ID, REVISION)
labels = {'Verified': 1 }
client = GerritClient('chromium-review.googlesource.com')
client._fetch = Mock(return_value={'labels': labels})
client.set_review(CHANGE_ID, REVISION, message='Hi!', labels=labels)
client._fetch.assert_called_with(req_path, method='POST', body={
'message': 'Hi!',
'labels': labels,
})
# Test with "notify" parameter.
client.set_review(CHANGE_ID, REVISION, message='Hi!', labels=labels,
notify='all')
client._fetch.assert_called_with(req_path, method='POST', body={
'message': 'Hi!',
'labels': labels,
'notify': 'ALL',
})
with self.assertRaises(AssertionError):
client.set_review(CHANGE_ID, REVISION, notify='Argh!')
| nicko96/Chrome-Infra | appengine/crbuild/gerrit/test/client_test.py | Python | bsd-3-clause | 3,159 | 0.001266 |
"""Tool specific version checking to identify out of date dependencies.
This provides infrastructure to check version strings against installed
tools, enabling re-installation if a version doesn't match. This is a
lightweight way to avoid out of date dependencies.
"""
from __future__ import print_function
from distutils.version import LooseVersion
from cloudbio.custom import shared
from cloudbio.fabutils import quiet
def _parse_from_stdoutflag(out, flag, stdout_index=-1):
"""Extract version information from a flag in verbose stdout.
flag -- text information to identify the line we should split for a version
stdout_index -- Position of the version information in the split line. Defaults
to the last item.
"""
for line in out.split("\n") + out.stderr.split("\n"):
if line.find(flag) >= 0:
parts = line.split()
return parts[stdout_index].strip()
print("Did not find version information with flag %s from: \n %s" % (flag, out))
return ""
def _clean_version(x):
if x.startswith("upstream/"):
x = x.replace("upstream/", "")
if x.startswith("("):
x = x[1:].strip()
if x.endswith(")"):
x = x[:-1].strip()
if x.startswith("v"):
x = x[1:].strip()
return x
def up_to_date(env, cmd, version, args=None, stdout_flag=None,
stdout_index=-1):
iversion = get_installed_version(env, cmd, version, args, stdout_flag,
stdout_index)
if not iversion:
return False
else:
return LooseVersion(iversion) >= LooseVersion(version)
def is_version(env, cmd, version, args=None, stdout_flag=None,
stdout_index=-1):
iversion = get_installed_version(env, cmd, version, args, stdout_flag,
stdout_index)
if not iversion:
return False
else:
return LooseVersion(iversion) == LooseVersion(version)
def get_installed_version(env, cmd, version, args=None, stdout_flag=None,
stdout_index=-1):
"""Check if the given command is up to date with the provided version.
"""
if shared._executable_not_on_path(cmd):
return False
if args:
cmd = cmd + " " + " ".join(args)
with quiet():
path_safe = ("export PKG_CONFIG_PATH=$PKG_CONFIG_PATH:{s}/lib/pkgconfig && "
"export PATH=$PATH:{s}/bin && "
"export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:{s}/lib && ".format(s=env.system_install))
out = env.safe_run_output(path_safe + cmd)
if stdout_flag:
iversion = _parse_from_stdoutflag(out, stdout_flag, stdout_index)
else:
iversion = out.strip()
iversion = _clean_version(iversion)
if " not found in the pkg-config search path" in iversion:
return False
return iversion
| chapmanb/cloudbiolinux | cloudbio/custom/versioncheck.py | Python | mit | 2,864 | 0.003142 |
from .stats import * # noqa
| sendgrid/sendgrid-python | sendgrid/helpers/stats/__init__.py | Python | mit | 29 | 0 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Pupil.birthday'
db.add_column('gsaudit_pupil', 'birthday',
self.gf('django.db.models.fields.DateField')(default=datetime.datetime(2012, 6, 17, 0, 0)),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Pupil.birthday'
db.delete_column('gsaudit_pupil', 'birthday')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'gsaudit.audit': {
'Meta': {'object_name': 'Audit'},
'assignment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gsaudit.TeachingAssignment']"}),
'created': ('django.db.models.fields.DateTimeField', [], {}),
'date': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jsondata': ('jsonfield.JSONField', [], {'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'written_exam': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'gsaudit.auditskill': {
'Meta': {'object_name': 'AuditSkill'},
'audit': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gsaudit.Audit']"}),
'created': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jsondata': ('jsonfield.JSONField', [], {'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {}),
'skill': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gsaudit.Skill']"}),
'weight': ('django.db.models.fields.IntegerField', [], {'default': '1'})
},
'gsaudit.grade': {
'Meta': {'object_name': 'Grade'},
'created': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jsondata': ('jsonfield.JSONField', [], {'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'school': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gsaudit.School']"})
},
'gsaudit.gradeparticipant': {
'Meta': {'object_name': 'GradeParticipant'},
'created': ('django.db.models.fields.DateTimeField', [], {}),
'grade': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gsaudit.Grade']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jsondata': ('jsonfield.JSONField', [], {'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {}),
'pupil': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gsaudit.Pupil']"})
},
'gsaudit.pupil': {
'Meta': {'ordering': "('first_name', 'last_name')", 'object_name': 'Pupil'},
'birthday': ('django.db.models.fields.DateField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jsondata': ('jsonfield.JSONField', [], {'blank': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'modified': ('django.db.models.fields.DateTimeField', [], {})
},
'gsaudit.pupilauditskill': {
'Meta': {'object_name': 'PupilAuditSkill'},
'audit': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gsaudit.Audit']"}),
'created': ('django.db.models.fields.DateTimeField', [], {}),
'diagnosis': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jsondata': ('jsonfield.JSONField', [], {'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {}),
'note': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'pupil': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gsaudit.Pupil']"}),
'rating': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'skill': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gsaudit.Skill']"}),
'written_exam': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'gsaudit.pupiltainfo': {
'Meta': {'unique_together': "(('pupil', 'teaching_assignment'),)", 'object_name': 'PupilTAInfo'},
'created': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'info': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'jsondata': ('jsonfield.JSONField', [], {'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {}),
'pupil': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gsaudit.Pupil']"}),
'teaching_assignment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gsaudit.TeachingAssignment']"}),
'written_exam_rating': ('django.db.models.fields.FloatField', [], {'default': '0.0'})
},
'gsaudit.school': {
'Meta': {'object_name': 'School'},
'created': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jsondata': ('jsonfield.JSONField', [], {'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'gsaudit.skill': {
'Meta': {'object_name': 'Skill'},
'created': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jsondata': ('jsonfield.JSONField', [], {'blank': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['gsaudit.Skill']"}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'weight': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'gsaudit.subject': {
'Meta': {'object_name': 'Subject'},
'created': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jsondata': ('jsonfield.JSONField', [], {'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'gsaudit.teacher': {
'Meta': {'object_name': 'Teacher', '_ormbases': ['auth.User']},
'gender': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'school': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gsaudit.School']"}),
'user_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True', 'primary_key': 'True'})
},
'gsaudit.teachingassignment': {
'Meta': {'object_name': 'TeachingAssignment'},
'created': ('django.db.models.fields.DateTimeField', [], {}),
'grade': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gsaudit.Grade']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jsondata': ('jsonfield.JSONField', [], {'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {}),
'note': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'skill': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gsaudit.Skill']"}),
'subject': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gsaudit.Subject']"}),
'teacher': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gsaudit.Teacher']"})
}
}
complete_apps = ['gsaudit']
| schnapptack/gskompetenzen | features/gsaudit/migrations/0011_auto__add_field_pupil_birthday.py | Python | agpl-3.0 | 12,951 | 0.006486 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Author: Qiang Li
# Email: [email protected]
# Time: 10:27, 03/30/2017
import sys
import codecs
import argparse
import random
from io import open
argparse.open = open
reload(sys)
sys.setdefaultencoding('utf8')
if sys.version_info < (3, 0):
sys.stderr = codecs.getwriter('UTF-8')(sys.stderr)
sys.stdout = codecs.getwriter('UTF-8')(sys.stdout)
sys.stdin = codecs.getreader('UTF-8')(sys.stdin)
else:
sys.stderr = codecs.getwriter('UTF-8')(sys.stderr.buffer)
sys.stdout = codecs.getwriter('UTF-8')(sys.stdout.buffer)
sys.stdin = codecs.getreader('UTF-8')(sys.stdin.buffer)
def create_parser():
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description='Text Chunking')
parser.add_argument(
'--input', '-i', type=argparse.FileType('r'), default=sys.stdin,
metavar='PATH', help='Input text (default: standard input).')
parser.add_argument(
'--outword', '-w', type=argparse.FileType('w'), required=True,
metavar='PATH', help='Output word file')
parser.add_argument(
'--outlabel', '-l', type=argparse.FileType('w'), required=True,
metavar='PATH', help='Output label file')
return parser
def pos_postprocess(ifobj, owfobj, olfobj, ologfobj):
line_word = ''
line_label = ''
total_words = 0
reserved_words = 0
remove_words = 0
for line in ifobj:
line = line.strip()
if line == '':
line_word = line_word.strip()
line_label = line_label.strip()
owfobj.write('{0}\n'.format(line_word))
olfobj.write('{0}\n'.format(line_label))
line_word = ''
line_label = ''
else:
words = line.split('\t')
total_words += 1
if words[0] == '':
words[0] = 'NA'
if words[3] == '':
words[3] = 'O'
if "NP" in words[3]:
words[0] = '#'
words[3] = '#'
remove_words += 1
line_word += ' '+words[0]
line_label += ' '+words[3]
ologfobj.write('total word:{0}\n'.format(total_words))
ologfobj.write('remove word:{0}\n'.format(remove_words))
reserve_words = total_words - remove_words
ologfobj.write('reserve word:{0}\n'.format(reserve_words))
reserve_rate = float(reserve_words) / float(total_words)
print reserve_rate
ologfobj.write('reserve rate:{0}\n'.format(reserve_rate))
if __name__ == '__main__':
parser = create_parser()
args = parser.parse_args()
# read/write files as UTF-8
if args.input.name != '<stdin>':
args.input = codecs.open(args.input.name, encoding='utf-8')
args.outword = codecs.open(args.outword.name, 'w', encoding='utf-8')
args.outlabel = codecs.open(args.outlabel.name, 'w', encoding='utf-8')
args.outlog = codecs.open(args.outword.name+".log", 'w', encoding='utf-8')
pos_postprocess(args.input, args.outword, args.outlabel, args.outlog)
| liqiangnlp/LiNMT | scripts/hint.aware/chunk/eng/LiNMT-postprocess-text-chunking-rmNP.py | Python | mit | 2,875 | 0.017043 |
import math,sys
from math import pi
def ieee754 (a):
rep = 0
#sign bit
if (a<0):
rep = 1<<31
a = math.fabs(a)
if (a >= 1):
#exponent
exp = int(math.log(a,2))
rep = rep|((exp+127)<<23)
#mantissa
temp = a / pow(2,exp) - 1
i = 22
while i>=0:
temp = temp * 2
if temp > 1:
rep = rep | (1<<i)
temp = temp - 1
i-=1
return rep
elif ((a<1) and (a!=0)):
#exponent
exp = 0
temp = a
while temp<1 :
temp = temp*2
exp +=1
rep = rep |((127 - exp)<<23)
#mantissa
temp = temp - 1
i = 22
while i>=0:
temp = temp * 2
if temp > 1:
rep = rep | (1<<i)
temp = temp - 1
i-=1
return rep
else :
return 0
def ieee754tofloat (a):
ex = (a & 0x7F800000)>>23
ex = ex - 127
i = 1
p = 22
num = 1.0
#print('%d \n' % (ex))
while (p != -1) :
i = 1<<p
dig = (a & i)>>p
#print dig
num += (dig * pow(2,p-23))
p -= 1
num = num * pow(2,ex)
i = 1<<31
sign = a & i
if (sign) :
num = num * -1
print num
return num
#def generate_testbench(value):
def convert2hex_of_xilinx(hex_number,num_of_bits):
hex_number = hex_number.split('x')
hex_number = hex_number[1]
hex_number = str(hex_number)
hex_number = str(num_of_bits)+"'h"+ hex_number +';'
return hex_number
if __name__ == "__main__":
time =0;
i = 0;
j = 0;
for time in range(0,100):
i = i+1
j = j+1
if j == 255:
j = 0;
else:
j = j;
if i ==2:
i = 0
else:
i=i
InsTagIn = j
InsTagIn = hex(InsTagIn)
InsTagIn = InsTagIn.split('x')
InsTagIn = InsTagIn[1]
instagin = str("\tInsTagIn = ")
InsTagIn = instagin + "8'h"+str(InsTagIn) + ";"
Opcode = i
Opcode = hex(Opcode)
Opcode = Opcode.split('x')
Opcode = Opcode[1]
opcode = str("\tOpcode = ")
Opcode = opcode +"4'h"+ str(Opcode) +";"
delay = 20
delay = str(delay)
delay = '#' + delay
x = str(" x_processor= ")
x = delay +x
y = str("\ty_processor= ")
z = str("\tz_processor= ")
#z = delay+z
'''x_processor = 0.01*time
x_processor = float(x_processor)
x_processor = ieee754(x_processor)
x_processor = hex(x_processor)
x_processor = x_processor.split('x')
x_processor = x_processor[1]
x_processor = str(x_processor)
y_processor = 0.5 + 0.01*time
y_processor = float(y_processor)
y_processor = ieee754(y_processor)
y_processor = hex(y_processor)
y_processor = y_processor.split('x')
y_processor = y_processor[1]
y_processor = str(y_processor)'''
x_processor = str(00000000);
y_processor = str(00000000);
z_processor = time*pi/180
z_float1 = float(z_processor)
z_processor = ieee754(z_float1)
z_processor = hex(z_processor)
z_processor = z_processor.split('x')
z_processor = z_processor[1]
z_processor = str(z_processor)
x = x+"32'h"+x_processor +";"
y = y+"32'h"+y_processor +";"
z = z+"32'h"+z_processor +";"
print x
print y
print z
print Opcode
print InsTagIn
'''if i ==0:
sine = math.sin(z_float1)
sine = ieee754(sine)
sine = hex(sine)
sine = convert2hex_of_xilinx(sine,32)
cosine = math.cos(z_float1)
cosine = ieee754(cosine)
cosine = hex(cosine)
cosine = convert2hex_of_xilinx(cosine,32)
print "\t" +"x_out ="+ str(cosine)
print "\t" +"y_out ="+ str(sine)
elif i==1:
sineh = math.sinh(z_float1)
sineh = ieee754(sineh)
sineh = hex(sineh)
sineh = convert2hex_of_xilinx(sineh,32)
cosineh = math.cosh(z_float1)
cosineh = ieee754(cosineh)
cosineh = hex(cosineh)
cosineh = convert2hex_of_xilinx(cosineh,32)
print "\t" +"x_out = "+ str(cosineh)
print "\t" +"y_out = "+ str(sineh)'''
| ankitshah009/High-Radix-Adaptive-CORDIC | Testbench Generation code/Testbench Files/test_sin,sinh.py | Python | apache-2.0 | 3,601 | 0.064149 |
import sys
def addAbilities(core, actor, player):
actor.addAbility("sm_inside_information")
return
def removeAbilities(core, actor, player):
actor.removeAbility("sm_inside_information")
return
| agry/NGECore2 | scripts/expertise/expertise_sm_path_inside_information_1.py | Python | lgpl-3.0 | 199 | 0.030151 |
import collections
import copy
import functools
import itertools
import json
import time
import warnings
from sentinels import NOTHING
from .filtering import filter_applies, iter_key_candidates
from . import ObjectId, OperationFailure, DuplicateKeyError
from .helpers import basestring, xrange, print_deprecation_warning
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
try:
# Optional requirements for providing Map-Reduce functionality
import execjs
except ImportError:
execjs = None
try:
from bson import (json_util, SON)
except ImportError:
json_utils = SON = None
from six import (
string_types,
text_type,
iteritems,
itervalues,
iterkeys)
from mongomock import helpers
class Collection(object):
def __init__(self, db, name):
super(Collection, self).__init__()
self.name = name
self.full_name = "{0}.{1}".format(db.name, name)
self._Collection__database = db
self._documents = OrderedDict()
self._uniques = []
def __repr__(self):
return "Collection({0}, '{1}')".format(self._Collection__database, self.name)
def __getitem__(self, name):
return self._Collection__database[self.name + '.' + name]
def __getattr__(self, name):
return self.__getitem__(name)
def insert(self, data, manipulate=True,
safe=None, check_keys=True, continue_on_error=False, **kwargs):
if isinstance(data, list):
return [self._insert(element) for element in data]
return self._insert(data)
def _insert(self, data):
if not all(isinstance(k, string_types) for k in data):
raise ValueError("Document keys must be strings")
if '_id' not in data:
data['_id'] = ObjectId()
object_id = data['_id']
if object_id in self._documents:
raise DuplicateKeyError("Duplicate Key Error", 11000)
for unique in self._uniques:
find_kwargs = {}
for key, direction in unique:
if key in data:
find_kwargs[key] = data[key]
answer = self.find(spec=find_kwargs)
if answer.count() > 0:
raise DuplicateKeyError("Duplicate Key Error", 11000)
self._documents[object_id] = self._internalize_dict(data)
return object_id
def _internalize_dict(self, d):
return dict((k, copy.deepcopy(v)) for k, v in iteritems(d))
def _has_key(self, doc, key):
return key in doc
def update(self, spec, document, upsert = False, manipulate = False,
safe = False, multi = False, _check_keys = False, **kwargs):
"""Updates document(s) in the collection."""
found = False
updated_existing = False
num_updated = 0
for existing_document in itertools.chain(self._iter_documents(spec), [None]):
# we need was_insert for the setOnInsert update operation
was_insert = False
# the sentinel document means we should do an upsert
if existing_document is None:
if not upsert:
continue
existing_document = self._documents[self._insert(self._discard_operators(spec))]
was_insert = True
else:
updated_existing = True
num_updated += 1
first = True
found = True
subdocument = None
for k, v in iteritems(document):
if k == '$set':
positional = False
for key in iterkeys(v):
if '$' in key:
positional = True
break
if positional:
subdocument = self._update_document_fields_positional(existing_document,v, spec, _set_updater, subdocument)
continue
self._update_document_fields(existing_document, v, _set_updater)
elif k == '$setOnInsert':
if not was_insert:
continue
positional = any('$' in key for key in iterkeys(v))
if positional:
# we use _set_updater
subdocument = self._update_document_fields_positional(existing_document,v, spec, _set_updater, subdocument)
else:
self._update_document_fields(existing_document, v, _set_updater)
elif k == '$unset':
for field, value in iteritems(v):
if self._has_key(existing_document, field):
del existing_document[field]
elif k == '$inc':
positional = False
for key in iterkeys(v):
if '$' in key:
positional = True
break
if positional:
subdocument = self._update_document_fields_positional(existing_document, v, spec, _inc_updater, subdocument)
continue
self._update_document_fields(existing_document, v, _inc_updater)
elif k == '$addToSet':
for field, value in iteritems(v):
container = existing_document.setdefault(field, [])
if value not in container:
container.append(value)
elif k == '$pull':
for field, value in iteritems(v):
nested_field_list = field.rsplit('.')
if len(nested_field_list) == 1:
if field in existing_document:
arr = existing_document[field]
if isinstance(value, dict):
existing_document[field] = [obj for obj in arr if not filter_applies(value, obj)]
else:
existing_document[field] = [obj for obj in arr if not value == obj]
continue
# nested fields includes a positional element
# need to find that element
if '$' in nested_field_list:
if not subdocument:
subdocument = self._get_subdocument(existing_document, spec, nested_field_list)
# value should be a dictionary since we're pulling
pull_results = []
# and the last subdoc should be an array
for obj in subdocument[nested_field_list[-1]]:
if isinstance(obj, dict):
for pull_key, pull_value in iteritems(value):
if obj[pull_key] != pull_value:
pull_results.append(obj)
continue
if obj != value:
pull_results.append(obj)
# cannot write to doc directly as it doesn't save to existing_document
subdocument[nested_field_list[-1]] = pull_results
elif k == '$push':
for field, value in iteritems(v):
nested_field_list = field.rsplit('.')
if len(nested_field_list) == 1:
if field not in existing_document:
existing_document[field] = []
# document should be a list
# append to it
if isinstance(value, dict):
if '$each' in value:
# append the list to the field
existing_document[field] += list(value['$each'])
continue
existing_document[field].append(value)
continue
# nested fields includes a positional element
# need to find that element
elif '$' in nested_field_list:
if not subdocument:
subdocument = self._get_subdocument(existing_document, spec, nested_field_list)
# we're pushing a list
push_results = []
if nested_field_list[-1] in subdocument:
# if the list exists, then use that list
push_results = subdocument[nested_field_list[-1]]
if isinstance(value, dict):
# check to see if we have the format
# { '$each': [] }
if '$each' in value:
push_results += list(value['$each'])
else:
push_results.append(value)
else:
push_results.append(value)
# cannot write to doc directly as it doesn't save to existing_document
subdocument[nested_field_list[-1]] = push_results
# push to array in a nested attribute
else:
# create nested attributes if they do not exist
subdocument = existing_document
for field in nested_field_list[:-1]:
if field not in subdocument:
subdocument[field] = {}
subdocument = subdocument[field]
# we're pushing a list
push_results = []
if nested_field_list[-1] in subdocument:
# if the list exists, then use that list
push_results = subdocument[nested_field_list[-1]]
push_results.append(value)
subdocument[nested_field_list[-1]] = push_results
else:
if first:
# replace entire document
for key in document.keys():
if key.startswith('$'):
# can't mix modifiers with non-modifiers in update
raise ValueError('field names cannot start with $ [{}]'.format(k))
_id = spec.get('_id', existing_document.get('_id', None))
existing_document.clear()
if _id:
existing_document['_id'] = _id
existing_document.update(self._internalize_dict(document))
if existing_document['_id'] != _id:
# id changed, fix index
del self._documents[_id]
self.insert(existing_document)
break
else:
# can't mix modifiers with non-modifiers in update
raise ValueError('Invalid modifier specified: {}'.format(k))
first = False
if not multi:
break
return {
text_type("connectionId"): self._Collection__database.connection._id,
text_type("err"): None,
text_type("ok"): 1.0,
text_type("n"): num_updated,
text_type("updatedExisting"): updated_existing,
}
def _get_subdocument(self, existing_document, spec, nested_field_list):
"""
This method retrieves the subdocument of the existing_document.nested_field_list. It uses the spec to filter
through the items. It will continue to grab nested documents until it can go no further. It will then return the
subdocument that was last saved. '$' is the positional operator, so we use the $elemMatch in the spec to find
the right subdocument in the array.
"""
# current document in view
doc = existing_document
# previous document in view
subdocument = existing_document
# current spec in view
subspec = spec
# walk down the dictionary
for subfield in nested_field_list:
if subfield == '$':
# positional element should have the equivalent elemMatch in the query
subspec = subspec['$elemMatch']
for item in doc:
# iterate through
if filter_applies(subspec, item):
# found the matching item
# save the parent
subdocument = doc
# save the item
doc = item
break
continue
subdocument = doc
doc = doc[subfield]
if not subfield in subspec:
break
subspec = subspec[subfield]
return subdocument
def _discard_operators(self, doc):
# TODO: this looks a little too naive...
return dict((k, v) for k, v in iteritems(doc) if not k.startswith("$"))
def find(self, spec = None, fields = None, filter = None, sort = None, timeout = True, limit = 0, snapshot = False, as_class = None, skip = 0, slave_okay=False):
if filter is not None:
print_deprecation_warning('filter', 'spec')
if spec is None:
spec = filter
if as_class is None:
as_class = dict
return Cursor(self, functools.partial(self._get_dataset, spec, sort, fields, as_class, skip), limit=limit)
def _get_dataset(self, spec, sort, fields, as_class, skip):
dataset = (self._copy_only_fields(document, fields, as_class) for document in self._iter_documents(spec))
if sort:
for sortKey, sortDirection in reversed(sort):
dataset = iter(sorted(dataset, key = lambda x: _resolve_key(sortKey, x), reverse = sortDirection < 0))
for i in xrange(skip):
try:
unused = next(dataset)
except StopIteration:
pass
return dataset
def _copy_field(self, obj, container):
if isinstance(obj, list):
new = []
for item in obj:
new.append(self._copy_field(item, container))
return new
if isinstance(obj, dict):
new = container()
for key, value in obj.items():
new[key] = self._copy_field(value, container)
return new
else:
return copy.copy(obj)
def _copy_only_fields(self, doc, fields, container):
"""Copy only the specified fields."""
if fields is None:
return self._copy_field(doc, container)
else:
if not fields:
fields = {"_id": 1}
if not isinstance(fields, dict):
fields = helpers._fields_list_to_dict(fields)
#we can pass in something like {"_id":0, "field":1}, so pull the id value out and hang on to it until later
id_value = fields.pop('_id', 1)
#other than the _id field, all fields must be either includes or excludes, this can evaluate to 0
if len(set(list(fields.values()))) > 1:
raise ValueError('You cannot currently mix including and excluding fields.')
#if we have novalues passed in, make a doc_copy based on the id_value
if len(list(fields.values())) == 0:
if id_value == 1:
doc_copy = container()
else:
doc_copy = self._copy_field(doc, container)
#if 1 was passed in as the field values, include those fields
elif list(fields.values())[0] == 1:
doc_copy = container()
for key in fields:
if key in doc:
doc_copy[key] = doc[key]
#otherwise, exclude the fields passed in
else:
doc_copy = self._copy_field(doc, container)
for key in fields:
if key in doc_copy:
del doc_copy[key]
#set the _id value if we requested it, otherwise remove it
if id_value == 0:
if '_id' in doc_copy:
del doc_copy['_id']
else:
if '_id' in doc:
doc_copy['_id'] = doc['_id']
fields['_id'] = id_value #put _id back in fields
return doc_copy
def _update_document_fields(self, doc, fields, updater):
"""Implements the $set behavior on an existing document"""
for k, v in iteritems(fields):
self._update_document_single_field(doc, k, v, updater)
def _update_document_fields_positional(self, doc, fields, spec, updater, subdocument=None):
"""Implements the $set behavior on an existing document"""
for k, v in iteritems(fields):
if '$' in k:
field_name_parts = k.split('.')
if not subdocument:
current_doc = doc
subspec = spec
for part in field_name_parts[:-1]:
if part == '$':
subspec = subspec.get('$elemMatch', subspec)
for item in current_doc:
if filter_applies(subspec, item):
current_doc = item
break
continue
new_spec = {}
for el in subspec:
if el.startswith(part):
if len(el.split(".")) > 1:
new_spec[".".join(el.split(".")[1:])] = subspec[el]
else:
new_spec = subspec[el]
subspec = new_spec
current_doc = current_doc[part]
subdocument = current_doc
updater(subdocument, field_name_parts[-1], v)
continue
# otherwise, we handle it the standard way
self._update_document_single_field(doc, k, v, updater)
return subdocument
def _update_document_single_field(self, doc, field_name, field_value, updater):
field_name_parts = field_name.split(".")
for part in field_name_parts[:-1]:
if not isinstance(doc, dict) and not isinstance(doc, list):
return # mongodb skips such cases
if isinstance(doc, list):
try:
if part == '$':
doc = doc[0]
else:
doc = doc[int(part)]
continue
except ValueError:
pass
doc = doc.setdefault(part, {})
updater(doc, field_name_parts[-1], field_value)
def _iter_documents(self, filter = None):
return (document for document in itervalues(self._documents) if filter_applies(filter, document))
def find_one(self, spec_or_id=None, *args, **kwargs):
# Allow calling find_one with a non-dict argument that gets used as
# the id for the query.
if spec_or_id is None:
spec_or_id = {}
if not isinstance(spec_or_id, collections.Mapping):
spec_or_id = {'_id':spec_or_id}
try:
return next(self.find(spec_or_id, *args, **kwargs))
except StopIteration:
return None
def find_and_modify(self, query = {}, update = None, upsert = False, sort = None, **kwargs):
remove = kwargs.get("remove", False)
if kwargs.get("new", False) and remove:
raise OperationFailure("remove and returnNew can't co-exist") # message from mongodb
if remove and update is not None:
raise ValueError("Can't do both update and remove")
old = self.find_one(query, sort=sort)
if not old:
if upsert:
old = {'_id':self.insert(query)}
else:
return None
if remove:
self.remove({"_id": old["_id"]})
else:
self.update({'_id':old['_id']}, update)
if kwargs.get('new', False):
return self.find_one({'_id':old['_id']})
return old
def save(self, to_save, manipulate = True, safe = False, **kwargs):
if not isinstance(to_save, dict):
raise TypeError("cannot save object of type %s" % type(to_save))
if "_id" not in to_save:
return self.insert(to_save)
else:
self.update({"_id": to_save["_id"]}, to_save, True,
manipulate, safe, _check_keys = True, **kwargs)
return to_save.get("_id", None)
def remove(self, spec_or_id = None, search_filter = None):
"""Remove objects matching spec_or_id from the collection."""
if search_filter is not None:
print_deprecation_warning('search_filter', 'spec_or_id')
if spec_or_id is None:
spec_or_id = search_filter if search_filter else {}
if not isinstance(spec_or_id, dict):
spec_or_id = {'_id': spec_or_id}
to_delete = list(self.find(spec = spec_or_id))
for doc in to_delete:
doc_id = doc['_id']
del self._documents[doc_id]
return {
"connectionId": self._Collection__database.connection._id,
"n": len(to_delete),
"ok": 1.0,
"err": None,
}
def count(self):
return len(self._documents)
def drop(self):
del self._documents
self._documents = {}
def ensure_index(self, key_or_list, cache_for = 300, **kwargs):
if 'unique' in kwargs and kwargs['unique']:
self._uniques.append(helpers._index_list(key_or_list))
def drop_index(self, index_or_name):
pass
def index_information(self):
return {}
def map_reduce(self, map_func, reduce_func, out, full_response=False, query=None, limit=0):
if execjs is None:
raise NotImplementedError(
"PyExecJS is required in order to run Map-Reduce. "
"Use 'pip install pyexecjs pymongo' to support Map-Reduce mock."
)
if limit == 0:
limit = None
start_time = time.clock()
out_collection = None
reduced_rows = None
full_dict = {'counts': {'input': 0,
'reduce':0,
'emit':0,
'output':0},
'timeMillis': 0,
'ok': 1.0,
'result': None}
map_ctx = execjs.compile("""
function doMap(fnc, docList) {
var mappedDict = {};
function emit(key, val) {
if (key['$oid']) {
mapped_key = '$oid' + key['$oid'];
}
else {
mapped_key = key;
}
if(!mappedDict[mapped_key]) {
mappedDict[mapped_key] = [];
}
mappedDict[mapped_key].push(val);
}
mapper = eval('('+fnc+')');
var mappedList = new Array();
for(var i=0; i<docList.length; i++) {
var thisDoc = eval('('+docList[i]+')');
var mappedVal = (mapper).call(thisDoc);
}
return mappedDict;
}
""")
reduce_ctx = execjs.compile("""
function doReduce(fnc, docList) {
var reducedList = new Array();
reducer = eval('('+fnc+')');
for(var key in docList) {
var reducedVal = {'_id': key,
'value': reducer(key, docList[key])};
reducedList.push(reducedVal);
}
return reducedList;
}
""")
doc_list = [json.dumps(doc, default=json_util.default) for doc in self.find(query)]
mapped_rows = map_ctx.call('doMap', map_func, doc_list)
reduced_rows = reduce_ctx.call('doReduce', reduce_func, mapped_rows)[:limit]
for reduced_row in reduced_rows:
if reduced_row['_id'].startswith('$oid'):
reduced_row['_id'] = ObjectId(reduced_row['_id'][4:])
reduced_rows = sorted(reduced_rows, key=lambda x: x['_id'])
if full_response:
full_dict['counts']['input'] = len(doc_list)
for key in mapped_rows.keys():
emit_count = len(mapped_rows[key])
full_dict['counts']['emit'] += emit_count
if emit_count > 1:
full_dict['counts']['reduce'] += 1
full_dict['counts']['output'] = len(reduced_rows)
if isinstance(out, (str, bytes)):
out_collection = getattr(self._Collection__database, out)
out_collection.drop()
out_collection.insert(reduced_rows)
ret_val = out_collection
full_dict['result'] = out
elif isinstance(out, SON) and out.get('replace') and out.get('db'):
# Must be of the format SON([('replace','results'),('db','outdb')])
out_db = getattr(self._Collection__database._Database__connection, out['db'])
out_collection = getattr(out_db, out['replace'])
out_collection.insert(reduced_rows)
ret_val = out_collection
full_dict['result'] = {'db': out['db'], 'collection': out['replace']}
elif isinstance(out, dict) and out.get('inline'):
ret_val = reduced_rows
full_dict['result'] = reduced_rows
else:
raise TypeError("'out' must be an instance of string, dict or bson.SON")
full_dict['timeMillis'] = int(round((time.clock() - start_time) * 1000))
if full_response:
ret_val = full_dict
return ret_val
def inline_map_reduce(self, map_func, reduce_func, full_response=False, query=None, limit=0):
return self.map_reduce(map_func, reduce_func, {'inline':1}, full_response, query, limit)
def distinct(self, key):
return self.find().distinct(key)
def group(self, key, condition, initial, reduce, finalize=None):
if execjs is None:
raise NotImplementedError(
"PyExecJS is required in order to use group. "
"Use 'pip install pyexecjs pymongo' to support group mock."
)
reduce_ctx = execjs.compile("""
function doReduce(fnc, docList) {
reducer = eval('('+fnc+')');
for(var i=0, l=docList.length; i<l; i++) {
try {
reducedVal = reducer(docList[i-1], docList[i]);
}
catch (err) {
continue;
}
}
return docList[docList.length - 1];
}
""")
ret_array = []
doc_list_copy = []
ret_array_copy = []
reduced_val = {}
doc_list = [doc for doc in self.find(condition)]
for doc in doc_list:
doc_copy = copy.deepcopy(doc)
for k in doc:
if isinstance(doc[k], ObjectId):
doc_copy[k] = str(doc[k])
if k not in key and k not in reduce:
del doc_copy[k]
for initial_key in initial:
if initial_key in doc.keys():
pass
else:
doc_copy[initial_key] = initial[initial_key]
doc_list_copy.append(doc_copy)
doc_list = doc_list_copy
for k in key:
doc_list = sorted(doc_list, key=lambda x: _resolve_key(k, x))
for k in key:
if not isinstance(k, basestring):
raise TypeError("Keys must be a list of key names, "
"each an instance of %s" % (basestring.__name__,))
for k2, group in itertools.groupby(doc_list, lambda item: item[k]):
group_list = ([x for x in group])
reduced_val = reduce_ctx.call('doReduce', reduce, group_list)
ret_array.append(reduced_val)
for doc in ret_array:
doc_copy = copy.deepcopy(doc)
for k in doc:
if k not in key and k not in initial.keys():
del doc_copy[k]
ret_array_copy.append(doc_copy)
ret_array = ret_array_copy
return ret_array
def aggregate(self, pipeline, **kwargs):
pipeline_operators = ['$project','$match','$redact','$limit','$skip','$unwind','$group','$sort','$geoNear','$out']
group_operators = ['$addToSet', '$first','$last','$max','$min','$avg','$push','$sum']
boolean_operators = ['$and','$or', '$not']
set_operators = ['$setEquals', '$setIntersection', '$setDifference', '$setUnion', '$setIsSubset', '$anyElementTrue', '$allElementsTrue']
compairison_operators = ['$cmp','$eq','$gt','$gte','$lt','$lte','$ne']
aritmetic_operators = ['$add','$divide','$mod','$multiply','$subtract']
string_operators = ['$concat','$strcasecmp','$substr','$toLower','$toUpper']
text_search_operators = ['$meta']
array_operators = ['$size']
projection_operators = ['$map', '$let', '$literal']
date_operators = ['$dayOfYear','$dayOfMonth','$dayOfWeek','$year','$month','$week','$hour','$minute','$second','$millisecond']
conditional_operators = ['$cond', '$ifNull']
out_collection = [doc for doc in self.find()]
grouped_collection = []
for expression in pipeline:
for k, v in iteritems(expression):
if k == '$match':
out_collection = [doc for doc in out_collection if filter_applies(v, doc)]
elif k == '$group':
group_func_keys = expression['$group']['_id'][1:]
for group_key in reversed(group_func_keys):
out_collection = sorted(out_collection, key=lambda x: _resolve_key(group_key, x))
for field, value in iteritems(v):
if field != '_id':
for func, key in iteritems(value):
if func == "$sum" or "$avg":
for group_key in group_func_keys:
for ret_value, group in itertools.groupby(out_collection, lambda item: item[group_key]):
doc_dict = {}
group_list = ([x for x in group])
doc_dict['_id'] = ret_value
current_val = 0
if func == "$sum":
for doc in group_list:
current_val = sum([current_val, doc[field]])
doc_dict[field] = current_val
else:
for doc in group_list:
current_val = sum([current_val, doc[field]])
avg = current_val / len(group_list)
doc_dict[field] = current_val
grouped_collection.append(doc_dict)
else:
if func in group_operators:
raise NotImplementedError(
"Although %s is a valid group operator for the aggregation pipeline, "
"%s is currently not implemented in Mongomock."
)
else:
raise NotImplementedError(
"%s is not a valid group operator for the aggregation pipeline. "
"See http://docs.mongodb.org/manual/meta/aggregation-quick-reference/ "
"for a complete list of valid operators."
)
out_collection = grouped_collection
elif k == '$sort':
sort_array = []
for x, y in v.items():
sort_array.append({x:y})
for sort_pair in reversed(sort_array):
for sortKey, sortDirection in sort_pair.items():
out_collection = sorted(out_collection, key = lambda x: _resolve_key(sortKey, x), reverse = sortDirection < 0)
elif k == '$skip':
out_collection = out_collection[v:]
elif k == '$limit':
out_collection = out_collection[:v]
elif k == '$unwind':
if not isinstance(v, basestring) and v[0] != '$':
raise ValueError("$unwind failed: exception: field path references must be prefixed with a '$' ('%s'"%str(v))
if len(v.split('.')) > 1:
raise NotImplementedError('Mongmock does not currently support nested field paths in the $unwind implementation. ("%s"'%v)
unwound_collection = []
for doc in out_collection:
array_value = doc.get(v[1:])
if array_value in (None, []):
continue
elif not isinstance(array_value, list):
raise TypeError('$unwind must specify an array field, field: "%s", value found: %s'%(str(v),str(array_value)))
for field_item in array_value:
unwound_collection.append(copy.deepcopy(doc))
unwound_collection[-1][v[1:]] = field_item
out_collection = unwound_collection
else:
if k in pipeline_operators:
raise NotImplementedError(
"Although %s is a valid operator for the aggregation pipeline, "
"%s is currently not implemented in Mongomock."
)
else:
raise NotImplementedError(
"%s is not a valid operator for the aggregation pipeline. "
"See http://docs.mongodb.org/manual/meta/aggregation-quick-reference/ "
"for a complete list of valid operators."
)
return {'ok':1.0, 'result':out_collection}
def _resolve_key(key, doc):
return next(iter(iter_key_candidates(key, doc)), NOTHING)
class Cursor(object):
def __init__(self, collection, dataset_factory, limit=0):
super(Cursor, self).__init__()
self.collection = collection
self._factory = dataset_factory
self._dataset = self._factory()
self._limit = limit if limit != 0 else None #pymongo limit defaults to 0, returning everything
self._skip = None
def __iter__(self):
return self
def clone(self):
return Cursor(self.collection, self._factory, self._limit)
def __next__(self):
if self._skip:
for i in range(self._skip):
next(self._dataset)
self._skip = None
if self._limit is not None and self._limit <= 0:
raise StopIteration()
if self._limit is not None:
self._limit -= 1
return next(self._dataset)
next = __next__
def sort(self, key_or_list, direction = None):
if direction is None:
direction = 1
if isinstance(key_or_list, (tuple, list)):
for sortKey, sortDirection in reversed(key_or_list):
self._dataset = iter(sorted(self._dataset, key = lambda x: _resolve_key(sortKey, x), reverse = sortDirection < 0))
else:
self._dataset = iter(sorted(self._dataset, key = lambda x: _resolve_key(key_or_list, x), reverse = direction < 0))
return self
def count(self, with_limit_and_skip=False):
arr = [x for x in self._dataset]
count = len(arr)
if with_limit_and_skip:
if self._skip:
count -= self._skip
if self._limit and count > self._limit:
count = self._limit
self._dataset = iter(arr)
return count
def skip(self, count):
self._skip = count
return self
def limit(self, count):
self._limit = count
return self
def batch_size(self, count):
return self
def close(self):
pass
def distinct(self, key):
if not isinstance(key, basestring):
raise TypeError('cursor.distinct key must be a string')
unique = set()
for x in iter(self._dataset):
value = _resolve_key(key, x)
if value == NOTHING: continue
unique.update(value if isinstance(value, (tuple, list)) else [value])
return list(unique)
def __getitem__(self, index):
arr = [x for x in self._dataset]
count = len(arr)
self._dataset = iter(arr)
return arr[index]
def _set_updater(doc, field_name, value):
if isinstance(value, (tuple, list)):
value = copy.deepcopy(value)
if isinstance(doc, dict):
doc[field_name] = value
def _inc_updater(doc, field_name, value):
if isinstance(doc, dict):
doc[field_name] = doc.get(field_name, 0) + value
def _sum_updater(doc, field_name, current, result):
if isinstance(doc, dict):
result = current + doc.get[field_name, 0]
return result
| chartbeat-labs/mongomock | mongomock/collection.py | Python | bsd-3-clause | 39,269 | 0.006239 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2016 Jérémie DECOCK (http://www.jdhp.org)
# This script is provided under the terms and conditions of the MIT license:
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
__all__ = ['ObjectiveFunction']
import numpy as np
from datapipe.denoising.tailcut import Tailcut
from datapipe.benchmark import assess
def norm_angle_diff(angle_in_degrees):
"""Normalize the difference of 2 angles in degree.
This function is used to normalize the "delta psi" angle.
"""
return np.abs(np.mod(angle_in_degrees + 90, 180) - 90.)
# OPTIMIZER ##################################################################
class ObjectiveFunction:
def __init__(self, input_files, max_num_img=None, aggregation_method="mean"):
self.call_number = 0
# Init the wavelet class
self.cleaning_algorithm = Tailcut()
# Make the image list
self.input_files = input_files
self.max_num_img = max_num_img
self.aggregation_method = aggregation_method # "mean" or "median"
print("aggregation method:", self.aggregation_method)
# PRE PROCESSING FILTERING ############################################
# TODO...
def __call__(self, threshold_list):
self.call_number += 1
aggregated_score = float('inf')
try:
high_threshold = float(threshold_list[0])
low_threshold = float(threshold_list[1])
if low_threshold > high_threshold:
# To avoid useless computation, reject solutions where low threshold is greater than high threshold
# (these solutions have the same result than the solution `low_threshold == high_threshold`)
return float('nan')
#low_threshold = min(low_threshold, high_threshold) # low threshold should not be greater than high threshold
algo_params_var = {
"high_threshold": high_threshold,
"low_threshold": low_threshold
}
benchmark_method = "delta_psi" # TODO
label = "TC_{}".format(self.call_number)
self.cleaning_algorithm.label = label
output_file_path = "score_tailcut_optim_{}.json".format(self.call_number)
algo_params = {
"kill_isolated_pixels": True,
"verbose": False,
}
algo_params.update(algo_params_var)
# TODO: randomly make a subset fo self.input_files
input_files = self.input_files
output_dict = self.cleaning_algorithm.run(algo_params,
input_file_or_dir_path_list=input_files,
benchmark_method=benchmark_method,
output_file_path=output_file_path,
max_num_img=self.max_num_img)
score_list = []
# Read and compute results from output_dict
for image_dict in output_dict["io"]:
# POST PROCESSING FILTERING #######################################
# >>>TODO<<<: Filter images: decide wether the image should be used or not ? (contained vs not contained)
# TODO: filter these images *before* cleaning them to avoid waste of computation...
# >>>TODO<<<: Filter images by energy range: decide wether the image should be used or not ?
# TODO: filter these images *before* cleaning them to avoid waste of computation...
###################################################################
# GET THE CLEANED IMAGE SCORE
if ("img_ref_hillas_2_psi" in image_dict) and ("img_cleaned_hillas_2_psi" in image_dict):
output_image_parameter_psi_rad = image_dict["img_ref_hillas_2_psi"]
reference_image_parameter_psi_rad = image_dict["img_cleaned_hillas_2_psi"]
delta_psi_rad = reference_image_parameter_psi_rad - output_image_parameter_psi_rad
normalized_delta_psi_deg = norm_angle_diff(np.degrees(delta_psi_rad))
#if image_dict["score_name"][0] != "delta_psi":
# raise Exception("Cannot get the score")
#normalized_delta_psi_deg = image_dict["score"][0]
score_list.append(normalized_delta_psi_deg)
else:
# The cleaning algorithm failed to clean this image
# TODO: add a penalty
score_list.append(90.) # the worst score
# Compute the mean
if self.aggregation_method == "mean":
aggregated_score = np.array([score_list]).mean()
elif self.aggregation_method == "median":
aggregated_score = np.array([score_list]).median()
else:
raise ValueError("Unknown value for aggregation_method: {}".format(self.aggregation_method))
# TODO: save results in a JSON file (?)
print(algo_params_var, aggregated_score, self.aggregation_method)
except Exception as e:
print(e)
return float(aggregated_score)
if __name__ == "__main__":
# Test...
#func = ObjectiveFunction(input_files=["./MISC/testset/gamma/digicam/"])
func = ObjectiveFunction(input_files=["/dev/shm/.jd/digicam/gamma/"])
threshold_list = [10, 5]
score = func(threshold_list)
| jdhp-sap/data-pipeline-standalone-scripts | datapipe/optimization/objectivefunc/tailcut_delta_psi.py | Python | mit | 6,676 | 0.004046 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'TrackedUser'
db.create_table('sentry_trackeduser', (
('id', self.gf('sentry.db.models.fields.bounded.BoundedBigAutoField')(primary_key=True)),
('project', self.gf('sentry.db.models.fields.FlexibleForeignKey')(to=orm['sentry.Project'])),
('ident', self.gf('django.db.models.fields.CharField')(max_length=200)),
('email', self.gf('django.db.models.fields.EmailField')(max_length=75, null=True)),
('data', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('last_seen', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, db_index=True)),
('first_seen', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, db_index=True)),
))
db.send_create_signal('sentry', ['TrackedUser'])
# Adding unique constraint on 'TrackedUser', fields ['project', 'ident']
db.create_unique('sentry_trackeduser', ['project_id', 'ident'])
def backwards(self, orm):
# Removing unique constraint on 'TrackedUser', fields ['project', 'ident']
db.delete_unique('sentry_trackeduser', ['project_id', 'ident'])
# Deleting model 'TrackedUser'
db.delete_table('sentry_trackeduser')
models = {
'sentry.user': {
'Meta': {'object_name': 'User', 'db_table': "'auth_user'"},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'sentry.affecteduserbygroup': {
'Meta': {'unique_together': "(('project', 'ident', 'group'),)", 'object_name': 'AffectedUserByGroup'},
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.event': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'Event', 'db_table': "'sentry_message'"},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'db_column': "'message_id'"}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'event_set'", 'null': 'True', 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'server_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
'site': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
'time_spent': ('django.db.models.fields.FloatField', [], {'null': 'True'})
},
'sentry.filterkey': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'FilterKey'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.filtervalue': {
'Meta': {'unique_together': "(('project', 'key', 'value'),)", 'object_name': 'FilterValue'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.group': {
'Meta': {'unique_together': "(('project', 'logger', 'culprit', 'checksum'),)", 'object_name': 'Group', 'db_table': "'sentry_groupedmessage'"},
'active_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'resolved_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'}),
'users_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.groupbookmark': {
'Meta': {'unique_together': "(('project', 'user', 'group'),)", 'object_name': 'GroupBookmark'},
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'sentry_bookmark_set'", 'to': "orm['sentry.User']"})
},
'sentry.groupmeta': {
'Meta': {'unique_together': "(('group', 'key'),)", 'object_name': 'GroupMeta'},
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.lostpasswordhash': {
'Meta': {'object_name': 'LostPasswordHash'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'unique': 'True'})
},
'sentry.messagecountbyminute': {
'Meta': {'unique_together': "(('project', 'group', 'date'),)", 'object_name': 'MessageCountByMinute'},
'date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.messagefiltervalue': {
'Meta': {'unique_together': "(('project', 'key', 'value', 'group'),)", 'object_name': 'MessageFilterValue'},
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.messageindex': {
'Meta': {'unique_together': "(('column', 'value', 'object_id'),)", 'object_name': 'MessageIndex'},
'column': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'sentry.option': {
'Meta': {'object_name': 'Option'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'value': ('picklefield.fields.PickledObjectField', [], {})
},
'sentry.pendingteammember': {
'Meta': {'unique_together': "(('team', 'email'),)", 'object_name': 'PendingTeamMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'team': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'pending_member_set'", 'to': "orm['sentry.Team']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'sentry.project': {
'Meta': {'object_name': 'Project'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'owner': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'sentry_owned_project_set'", 'null': 'True', 'to': "orm['sentry.User']"}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'unique': 'True', 'null': 'True'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'team': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Team']", 'null': 'True'})
},
'sentry.projectcountbyminute': {
'Meta': {'unique_together': "(('project', 'date'),)", 'object_name': 'ProjectCountByMinute'},
'date': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.projectkey': {
'Meta': {'object_name': 'ProjectKey'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Project']"}),
'public_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'secret_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'}),
'user_added': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'keys_added_set'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.projectoption': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'ProjectOption', 'db_table': "'sentry_projectoptions'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'value': ('picklefield.fields.PickledObjectField', [], {})
},
'sentry.searchdocument': {
'Meta': {'unique_together': "(('project', 'group'),)", 'object_name': 'SearchDocument'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_changed': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'total_events': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'})
},
'sentry.searchtoken': {
'Meta': {'unique_together': "(('document', 'field', 'token'),)", 'object_name': 'SearchToken'},
'document': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'token_set'", 'to': "orm['sentry.SearchDocument']"}),
'field': ('django.db.models.fields.CharField', [], {'default': "'text'", 'max_length': '64'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'token': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'sentry.team': {
'Meta': {'object_name': 'Team'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'owner': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'})
},
'sentry.teammember': {
'Meta': {'unique_together': "(('team', 'user'),)", 'object_name': 'TeamMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'team': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'member_set'", 'to': "orm['sentry.Team']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'sentry_teammember_set'", 'to': "orm['sentry.User']"})
},
'sentry.trackeduser': {
'Meta': {'unique_together': "(('project', 'ident'),)", 'object_name': 'TrackedUser'},
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.useroption': {
'Meta': {'unique_together': "(('user', 'project', 'key'),)", 'object_name': 'UserOption'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'value': ('picklefield.fields.PickledObjectField', [], {})
}
}
complete_apps = ['sentry']
| JackDanger/sentry | src/sentry/south_migrations/0077_auto__add_trackeduser__add_unique_trackeduser_project_ident.py | Python | bsd-3-clause | 22,623 | 0.008222 |
# docstrings not neede here (the type handler doubleerfaces are fully
# documented in base.py) pylint: disable-msg=C0111
from .base import ReturnValue, Parameter, \
ReverseWrapperBase, ForwardWrapperBase
class PyObjectParam(Parameter):
DIRECTIONS = [Parameter.DIRECTION_IN]
CTYPES = ['PyObject*']
def __init__(self, ctype, name, transfer_ownership, is_const=False):
"""
:param ctype: C type, normally 'PyObject*'
:param name: parameter name
:param transfer_ownership: this parameter transfer the ownership of
the pointed-to object to the called
function
"""
super(PyObjectParam, self).__init__(
ctype, name, direction=Parameter.DIRECTION_IN, is_const=is_const)
self.transfer_ownership = transfer_ownership
def convert_c_to_python(self, wrapper):
assert isinstance(wrapper, ReverseWrapperBase)
if self.transfer_ownership:
wrapper.build_params.add_parameter('N', [self.value])
else:
wrapper.build_params.add_parameter('O', [self.value])
def convert_python_to_c(self, wrapper):
assert isinstance(wrapper, ForwardWrapperBase)
name = wrapper.declarations.declare_variable(self.ctype_no_const, self.name)
wrapper.parse_params.add_parameter('O', ['&'+name], self.name)
wrapper.call_params.append(name)
if self.transfer_ownership:
wrapper.before_call.write_code("Py_INCREF((PyObject*) %s);" % name)
class PyObjectReturnValue(ReturnValue):
CTYPES = ['PyObject*']
def __init__(self, ctype, caller_owns_return, is_const=False):
"""
:param ctype: C type, normally 'MyClass*'
:param caller_owns_return: if true, ownership of the object pointer
is transferred to the caller
"""
super(PyObjectReturnValue, self).__init__(ctype, is_const)
self.caller_owns_return = caller_owns_return
def get_c_error_return(self):
return "return NULL;"
def convert_python_to_c(self, wrapper):
wrapper.parse_params.add_parameter("O", ["&"+self.value], prepend=True)
if self.caller_owns_return:
wrapper.after_call.write_code("Py_INCREF((PyObject*) %s);" % self.value)
def convert_c_to_python(self, wrapper):
wrapper.build_params.add_parameter(
(self.caller_owns_return and "N" or "O"),
[self.value], prepend=True)
| cawka/pybindgen | pybindgen/typehandlers/pyobjecttype.py | Python | lgpl-2.1 | 2,516 | 0.001192 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import gzip
from io import BytesIO
import random
import re
from unittest import skipIf
import warnings
from django.conf import settings
from django.core import mail
from django.db import (transaction, connections, DEFAULT_DB_ALIAS,
IntegrityError)
from django.http import HttpRequest, HttpResponse, StreamingHttpResponse
from django.middleware.clickjacking import XFrameOptionsMiddleware
from django.middleware.common import CommonMiddleware, BrokenLinkEmailsMiddleware
from django.middleware.http import ConditionalGetMiddleware
from django.middleware.gzip import GZipMiddleware
from django.middleware.transaction import TransactionMiddleware
from django.test import TransactionTestCase, TestCase, RequestFactory, override_settings
from django.test.utils import IgnoreDeprecationWarningsMixin
from django.utils import six
from django.utils.deprecation import RemovedInDjango18Warning
from django.utils.encoding import force_str
from django.utils.six.moves import xrange
from .models import Band
class CommonMiddlewareTest(TestCase):
urls = 'middleware.urls'
def _get_request(self, path):
request = HttpRequest()
request.META = {
'SERVER_NAME': 'testserver',
'SERVER_PORT': 80,
}
request.path = request.path_info = "/%s" % path
return request
@override_settings(APPEND_SLASH=True)
def test_append_slash_have_slash(self):
"""
Tests that URLs with slashes go unmolested.
"""
request = self._get_request('slash/')
self.assertEqual(CommonMiddleware().process_request(request), None)
@override_settings(APPEND_SLASH=True)
def test_append_slash_slashless_resource(self):
"""
Tests that matches to explicit slashless URLs go unmolested.
"""
request = self._get_request('noslash')
self.assertEqual(CommonMiddleware().process_request(request), None)
@override_settings(APPEND_SLASH=True)
def test_append_slash_slashless_unknown(self):
"""
Tests that APPEND_SLASH doesn't redirect to unknown resources.
"""
request = self._get_request('unknown')
self.assertEqual(CommonMiddleware().process_request(request), None)
@override_settings(APPEND_SLASH=True)
def test_append_slash_redirect(self):
"""
Tests that APPEND_SLASH redirects slashless URLs to a valid pattern.
"""
request = self._get_request('slash')
r = CommonMiddleware().process_request(request)
self.assertEqual(r.status_code, 301)
self.assertEqual(r.url, 'http://testserver/slash/')
@override_settings(APPEND_SLASH=True, DEBUG=True)
def test_append_slash_no_redirect_on_POST_in_DEBUG(self):
"""
Tests that while in debug mode, an exception is raised with a warning
when a failed attempt is made to POST to an URL which would normally be
redirected to a slashed version.
"""
request = self._get_request('slash')
request.method = 'POST'
with six.assertRaisesRegex(self, RuntimeError, 'end in a slash'):
CommonMiddleware().process_request(request)
@override_settings(APPEND_SLASH=False)
def test_append_slash_disabled(self):
"""
Tests disabling append slash functionality.
"""
request = self._get_request('slash')
self.assertEqual(CommonMiddleware().process_request(request), None)
@override_settings(APPEND_SLASH=True)
def test_append_slash_quoted(self):
"""
Tests that URLs which require quoting are redirected to their slash
version ok.
"""
request = self._get_request('needsquoting#')
r = CommonMiddleware().process_request(request)
self.assertEqual(r.status_code, 301)
self.assertEqual(
r.url,
'http://testserver/needsquoting%23/')
@override_settings(APPEND_SLASH=False, PREPEND_WWW=True)
def test_prepend_www(self):
request = self._get_request('path/')
r = CommonMiddleware().process_request(request)
self.assertEqual(r.status_code, 301)
self.assertEqual(
r.url,
'http://www.testserver/path/')
@override_settings(APPEND_SLASH=True, PREPEND_WWW=True)
def test_prepend_www_append_slash_have_slash(self):
request = self._get_request('slash/')
r = CommonMiddleware().process_request(request)
self.assertEqual(r.status_code, 301)
self.assertEqual(r.url,
'http://www.testserver/slash/')
@override_settings(APPEND_SLASH=True, PREPEND_WWW=True)
def test_prepend_www_append_slash_slashless(self):
request = self._get_request('slash')
r = CommonMiddleware().process_request(request)
self.assertEqual(r.status_code, 301)
self.assertEqual(r.url,
'http://www.testserver/slash/')
# The following tests examine expected behavior given a custom urlconf that
# overrides the default one through the request object.
@override_settings(APPEND_SLASH=True)
def test_append_slash_have_slash_custom_urlconf(self):
"""
Tests that URLs with slashes go unmolested.
"""
request = self._get_request('customurlconf/slash/')
request.urlconf = 'middleware.extra_urls'
self.assertEqual(CommonMiddleware().process_request(request), None)
@override_settings(APPEND_SLASH=True)
def test_append_slash_slashless_resource_custom_urlconf(self):
"""
Tests that matches to explicit slashless URLs go unmolested.
"""
request = self._get_request('customurlconf/noslash')
request.urlconf = 'middleware.extra_urls'
self.assertEqual(CommonMiddleware().process_request(request), None)
@override_settings(APPEND_SLASH=True)
def test_append_slash_slashless_unknown_custom_urlconf(self):
"""
Tests that APPEND_SLASH doesn't redirect to unknown resources.
"""
request = self._get_request('customurlconf/unknown')
request.urlconf = 'middleware.extra_urls'
self.assertEqual(CommonMiddleware().process_request(request), None)
@override_settings(APPEND_SLASH=True)
def test_append_slash_redirect_custom_urlconf(self):
"""
Tests that APPEND_SLASH redirects slashless URLs to a valid pattern.
"""
request = self._get_request('customurlconf/slash')
request.urlconf = 'middleware.extra_urls'
r = CommonMiddleware().process_request(request)
self.assertFalse(r is None,
"CommonMiddlware failed to return APPEND_SLASH redirect using request.urlconf")
self.assertEqual(r.status_code, 301)
self.assertEqual(r.url, 'http://testserver/customurlconf/slash/')
@override_settings(APPEND_SLASH=True, DEBUG=True)
def test_append_slash_no_redirect_on_POST_in_DEBUG_custom_urlconf(self):
"""
Tests that while in debug mode, an exception is raised with a warning
when a failed attempt is made to POST to an URL which would normally be
redirected to a slashed version.
"""
request = self._get_request('customurlconf/slash')
request.urlconf = 'middleware.extra_urls'
request.method = 'POST'
with six.assertRaisesRegex(self, RuntimeError, 'end in a slash'):
CommonMiddleware().process_request(request)
@override_settings(APPEND_SLASH=False)
def test_append_slash_disabled_custom_urlconf(self):
"""
Tests disabling append slash functionality.
"""
request = self._get_request('customurlconf/slash')
request.urlconf = 'middleware.extra_urls'
self.assertEqual(CommonMiddleware().process_request(request), None)
@override_settings(APPEND_SLASH=True)
def test_append_slash_quoted_custom_urlconf(self):
"""
Tests that URLs which require quoting are redirected to their slash
version ok.
"""
request = self._get_request('customurlconf/needsquoting#')
request.urlconf = 'middleware.extra_urls'
r = CommonMiddleware().process_request(request)
self.assertFalse(r is None,
"CommonMiddlware failed to return APPEND_SLASH redirect using request.urlconf")
self.assertEqual(r.status_code, 301)
self.assertEqual(
r.url,
'http://testserver/customurlconf/needsquoting%23/')
@override_settings(APPEND_SLASH=False, PREPEND_WWW=True)
def test_prepend_www_custom_urlconf(self):
request = self._get_request('customurlconf/path/')
request.urlconf = 'middleware.extra_urls'
r = CommonMiddleware().process_request(request)
self.assertEqual(r.status_code, 301)
self.assertEqual(
r.url,
'http://www.testserver/customurlconf/path/')
@override_settings(APPEND_SLASH=True, PREPEND_WWW=True)
def test_prepend_www_append_slash_have_slash_custom_urlconf(self):
request = self._get_request('customurlconf/slash/')
request.urlconf = 'middleware.extra_urls'
r = CommonMiddleware().process_request(request)
self.assertEqual(r.status_code, 301)
self.assertEqual(r.url,
'http://www.testserver/customurlconf/slash/')
@override_settings(APPEND_SLASH=True, PREPEND_WWW=True)
def test_prepend_www_append_slash_slashless_custom_urlconf(self):
request = self._get_request('customurlconf/slash')
request.urlconf = 'middleware.extra_urls'
r = CommonMiddleware().process_request(request)
self.assertEqual(r.status_code, 301)
self.assertEqual(r.url,
'http://www.testserver/customurlconf/slash/')
# Legacy tests for the 404 error reporting via email (to be removed in 1.8)
@override_settings(IGNORABLE_404_URLS=(re.compile(r'foo'),),
SEND_BROKEN_LINK_EMAILS=True,
MANAGERS=('[email protected]',))
def test_404_error_reporting(self):
request = self._get_request('regular_url/that/does/not/exist')
request.META['HTTP_REFERER'] = '/another/url/'
with warnings.catch_warnings():
warnings.simplefilter("ignore", RemovedInDjango18Warning)
response = self.client.get(request.path)
CommonMiddleware().process_response(request, response)
self.assertEqual(len(mail.outbox), 1)
self.assertIn('Broken', mail.outbox[0].subject)
@override_settings(IGNORABLE_404_URLS=(re.compile(r'foo'),),
SEND_BROKEN_LINK_EMAILS=True,
MANAGERS=('[email protected]',))
def test_404_error_reporting_no_referer(self):
request = self._get_request('regular_url/that/does/not/exist')
with warnings.catch_warnings():
warnings.simplefilter("ignore", RemovedInDjango18Warning)
response = self.client.get(request.path)
CommonMiddleware().process_response(request, response)
self.assertEqual(len(mail.outbox), 0)
@override_settings(IGNORABLE_404_URLS=(re.compile(r'foo'),),
SEND_BROKEN_LINK_EMAILS=True,
MANAGERS=('[email protected]',))
def test_404_error_reporting_ignored_url(self):
request = self._get_request('foo_url/that/does/not/exist/either')
request.META['HTTP_REFERER'] = '/another/url/'
with warnings.catch_warnings():
warnings.simplefilter("ignore", RemovedInDjango18Warning)
response = self.client.get(request.path)
CommonMiddleware().process_response(request, response)
self.assertEqual(len(mail.outbox), 0)
# Other tests
def test_non_ascii_query_string_does_not_crash(self):
"""Regression test for #15152"""
request = self._get_request('slash')
request.META['QUERY_STRING'] = force_str('drink=café')
response = CommonMiddleware().process_request(request)
self.assertEqual(response.status_code, 301)
@override_settings(
IGNORABLE_404_URLS=(re.compile(r'foo'),),
MANAGERS=('[email protected]',),
)
class BrokenLinkEmailsMiddlewareTest(TestCase):
def setUp(self):
self.req = HttpRequest()
self.req.META = {
'SERVER_NAME': 'testserver',
'SERVER_PORT': 80,
}
self.req.path = self.req.path_info = 'regular_url/that/does/not/exist'
self.resp = self.client.get(self.req.path)
def test_404_error_reporting(self):
self.req.META['HTTP_REFERER'] = '/another/url/'
BrokenLinkEmailsMiddleware().process_response(self.req, self.resp)
self.assertEqual(len(mail.outbox), 1)
self.assertIn('Broken', mail.outbox[0].subject)
def test_404_error_reporting_no_referer(self):
BrokenLinkEmailsMiddleware().process_response(self.req, self.resp)
self.assertEqual(len(mail.outbox), 0)
def test_404_error_reporting_ignored_url(self):
self.req.path = self.req.path_info = 'foo_url/that/does/not/exist'
BrokenLinkEmailsMiddleware().process_response(self.req, self.resp)
self.assertEqual(len(mail.outbox), 0)
@skipIf(six.PY3, "HTTP_REFERER is str type on Python 3")
def test_404_error_nonascii_referrer(self):
# Such referer strings should not happen, but anyway, if it happens,
# let's not crash
self.req.META['HTTP_REFERER'] = b'http://testserver/c/\xd0\xbb\xd0\xb8/'
BrokenLinkEmailsMiddleware().process_response(self.req, self.resp)
self.assertEqual(len(mail.outbox), 1)
def test_custom_request_checker(self):
class SubclassedMiddleware(BrokenLinkEmailsMiddleware):
ignored_user_agent_patterns = (re.compile(r'Spider.*'),
re.compile(r'Robot.*'))
def is_ignorable_request(self, request, uri, domain, referer):
'''Check user-agent in addition to normal checks.'''
if super(SubclassedMiddleware, self).is_ignorable_request(request, uri, domain, referer):
return True
user_agent = request.META['HTTP_USER_AGENT']
return any(pattern.search(user_agent) for pattern in
self.ignored_user_agent_patterns)
self.req.META['HTTP_REFERER'] = '/another/url/'
self.req.META['HTTP_USER_AGENT'] = 'Spider machine 3.4'
SubclassedMiddleware().process_response(self.req, self.resp)
self.assertEqual(len(mail.outbox), 0)
self.req.META['HTTP_USER_AGENT'] = 'My user agent'
SubclassedMiddleware().process_response(self.req, self.resp)
self.assertEqual(len(mail.outbox), 1)
class ConditionalGetMiddlewareTest(TestCase):
urls = 'middleware.cond_get_urls'
def setUp(self):
self.req = HttpRequest()
self.req.META = {
'SERVER_NAME': 'testserver',
'SERVER_PORT': 80,
}
self.req.path = self.req.path_info = "/"
self.resp = self.client.get(self.req.path)
# Tests for the Date header
def test_date_header_added(self):
self.assertFalse('Date' in self.resp)
self.resp = ConditionalGetMiddleware().process_response(self.req, self.resp)
self.assertTrue('Date' in self.resp)
# Tests for the Content-Length header
def test_content_length_header_added(self):
content_length = len(self.resp.content)
self.assertFalse('Content-Length' in self.resp)
self.resp = ConditionalGetMiddleware().process_response(self.req, self.resp)
self.assertTrue('Content-Length' in self.resp)
self.assertEqual(int(self.resp['Content-Length']), content_length)
def test_content_length_header_not_added(self):
resp = StreamingHttpResponse('content')
self.assertFalse('Content-Length' in resp)
resp = ConditionalGetMiddleware().process_response(self.req, resp)
self.assertFalse('Content-Length' in resp)
def test_content_length_header_not_changed(self):
bad_content_length = len(self.resp.content) + 10
self.resp['Content-Length'] = bad_content_length
self.resp = ConditionalGetMiddleware().process_response(self.req, self.resp)
self.assertEqual(int(self.resp['Content-Length']), bad_content_length)
# Tests for the ETag header
def test_if_none_match_and_no_etag(self):
self.req.META['HTTP_IF_NONE_MATCH'] = 'spam'
self.resp = ConditionalGetMiddleware().process_response(self.req, self.resp)
self.assertEqual(self.resp.status_code, 200)
def test_no_if_none_match_and_etag(self):
self.resp['ETag'] = 'eggs'
self.resp = ConditionalGetMiddleware().process_response(self.req, self.resp)
self.assertEqual(self.resp.status_code, 200)
def test_if_none_match_and_same_etag(self):
self.req.META['HTTP_IF_NONE_MATCH'] = self.resp['ETag'] = 'spam'
self.resp = ConditionalGetMiddleware().process_response(self.req, self.resp)
self.assertEqual(self.resp.status_code, 304)
def test_if_none_match_and_different_etag(self):
self.req.META['HTTP_IF_NONE_MATCH'] = 'spam'
self.resp['ETag'] = 'eggs'
self.resp = ConditionalGetMiddleware().process_response(self.req, self.resp)
self.assertEqual(self.resp.status_code, 200)
@override_settings(USE_ETAGS=True)
def test_etag(self):
req = HttpRequest()
res = HttpResponse('content')
self.assertTrue(
CommonMiddleware().process_response(req, res).has_header('ETag'))
@override_settings(USE_ETAGS=True)
def test_etag_streaming_response(self):
req = HttpRequest()
res = StreamingHttpResponse(['content'])
res['ETag'] = 'tomatoes'
self.assertEqual(
CommonMiddleware().process_response(req, res).get('ETag'),
'tomatoes')
@override_settings(USE_ETAGS=True)
def test_no_etag_streaming_response(self):
req = HttpRequest()
res = StreamingHttpResponse(['content'])
self.assertFalse(
CommonMiddleware().process_response(req, res).has_header('ETag'))
# Tests for the Last-Modified header
def test_if_modified_since_and_no_last_modified(self):
self.req.META['HTTP_IF_MODIFIED_SINCE'] = 'Sat, 12 Feb 2011 17:38:44 GMT'
self.resp = ConditionalGetMiddleware().process_response(self.req, self.resp)
self.assertEqual(self.resp.status_code, 200)
def test_no_if_modified_since_and_last_modified(self):
self.resp['Last-Modified'] = 'Sat, 12 Feb 2011 17:38:44 GMT'
self.resp = ConditionalGetMiddleware().process_response(self.req, self.resp)
self.assertEqual(self.resp.status_code, 200)
def test_if_modified_since_and_same_last_modified(self):
self.req.META['HTTP_IF_MODIFIED_SINCE'] = 'Sat, 12 Feb 2011 17:38:44 GMT'
self.resp['Last-Modified'] = 'Sat, 12 Feb 2011 17:38:44 GMT'
self.resp = ConditionalGetMiddleware().process_response(self.req, self.resp)
self.assertEqual(self.resp.status_code, 304)
def test_if_modified_since_and_last_modified_in_the_past(self):
self.req.META['HTTP_IF_MODIFIED_SINCE'] = 'Sat, 12 Feb 2011 17:38:44 GMT'
self.resp['Last-Modified'] = 'Sat, 12 Feb 2011 17:35:44 GMT'
self.resp = ConditionalGetMiddleware().process_response(self.req, self.resp)
self.assertEqual(self.resp.status_code, 304)
def test_if_modified_since_and_last_modified_in_the_future(self):
self.req.META['HTTP_IF_MODIFIED_SINCE'] = 'Sat, 12 Feb 2011 17:38:44 GMT'
self.resp['Last-Modified'] = 'Sat, 12 Feb 2011 17:41:44 GMT'
self.resp = ConditionalGetMiddleware().process_response(self.req, self.resp)
self.assertEqual(self.resp.status_code, 200)
class XFrameOptionsMiddlewareTest(TestCase):
"""
Tests for the X-Frame-Options clickjacking prevention middleware.
"""
def test_same_origin(self):
"""
Tests that the X_FRAME_OPTIONS setting can be set to SAMEORIGIN to
have the middleware use that value for the HTTP header.
"""
with override_settings(X_FRAME_OPTIONS='SAMEORIGIN'):
r = XFrameOptionsMiddleware().process_response(HttpRequest(),
HttpResponse())
self.assertEqual(r['X-Frame-Options'], 'SAMEORIGIN')
with override_settings(X_FRAME_OPTIONS='sameorigin'):
r = XFrameOptionsMiddleware().process_response(HttpRequest(),
HttpResponse())
self.assertEqual(r['X-Frame-Options'], 'SAMEORIGIN')
def test_deny(self):
"""
Tests that the X_FRAME_OPTIONS setting can be set to DENY to
have the middleware use that value for the HTTP header.
"""
with override_settings(X_FRAME_OPTIONS='DENY'):
r = XFrameOptionsMiddleware().process_response(HttpRequest(),
HttpResponse())
self.assertEqual(r['X-Frame-Options'], 'DENY')
with override_settings(X_FRAME_OPTIONS='deny'):
r = XFrameOptionsMiddleware().process_response(HttpRequest(),
HttpResponse())
self.assertEqual(r['X-Frame-Options'], 'DENY')
def test_defaults_sameorigin(self):
"""
Tests that if the X_FRAME_OPTIONS setting is not set then it defaults
to SAMEORIGIN.
"""
with override_settings(X_FRAME_OPTIONS=None):
del settings.X_FRAME_OPTIONS # restored by override_settings
r = XFrameOptionsMiddleware().process_response(HttpRequest(),
HttpResponse())
self.assertEqual(r['X-Frame-Options'], 'SAMEORIGIN')
def test_dont_set_if_set(self):
"""
Tests that if the X-Frame-Options header is already set then the
middleware does not attempt to override it.
"""
with override_settings(X_FRAME_OPTIONS='DENY'):
response = HttpResponse()
response['X-Frame-Options'] = 'SAMEORIGIN'
r = XFrameOptionsMiddleware().process_response(HttpRequest(),
response)
self.assertEqual(r['X-Frame-Options'], 'SAMEORIGIN')
with override_settings(X_FRAME_OPTIONS='SAMEORIGIN'):
response = HttpResponse()
response['X-Frame-Options'] = 'DENY'
r = XFrameOptionsMiddleware().process_response(HttpRequest(),
response)
self.assertEqual(r['X-Frame-Options'], 'DENY')
def test_response_exempt(self):
"""
Tests that if the response has a xframe_options_exempt attribute set
to False then it still sets the header, but if it's set to True then
it does not.
"""
with override_settings(X_FRAME_OPTIONS='SAMEORIGIN'):
response = HttpResponse()
response.xframe_options_exempt = False
r = XFrameOptionsMiddleware().process_response(HttpRequest(),
response)
self.assertEqual(r['X-Frame-Options'], 'SAMEORIGIN')
response = HttpResponse()
response.xframe_options_exempt = True
r = XFrameOptionsMiddleware().process_response(HttpRequest(),
response)
self.assertEqual(r.get('X-Frame-Options', None), None)
def test_is_extendable(self):
"""
Tests that the XFrameOptionsMiddleware method that determines the
X-Frame-Options header value can be overridden based on something in
the request or response.
"""
class OtherXFrameOptionsMiddleware(XFrameOptionsMiddleware):
# This is just an example for testing purposes...
def get_xframe_options_value(self, request, response):
if getattr(request, 'sameorigin', False):
return 'SAMEORIGIN'
if getattr(response, 'sameorigin', False):
return 'SAMEORIGIN'
return 'DENY'
with override_settings(X_FRAME_OPTIONS='DENY'):
response = HttpResponse()
response.sameorigin = True
r = OtherXFrameOptionsMiddleware().process_response(HttpRequest(),
response)
self.assertEqual(r['X-Frame-Options'], 'SAMEORIGIN')
request = HttpRequest()
request.sameorigin = True
r = OtherXFrameOptionsMiddleware().process_response(request,
HttpResponse())
self.assertEqual(r['X-Frame-Options'], 'SAMEORIGIN')
with override_settings(X_FRAME_OPTIONS='SAMEORIGIN'):
r = OtherXFrameOptionsMiddleware().process_response(HttpRequest(),
HttpResponse())
self.assertEqual(r['X-Frame-Options'], 'DENY')
class GZipMiddlewareTest(TestCase):
"""
Tests the GZip middleware.
"""
short_string = b"This string is too short to be worth compressing."
compressible_string = b'a' * 500
uncompressible_string = b''.join(six.int2byte(random.randint(0, 255)) for _ in xrange(500))
sequence = [b'a' * 500, b'b' * 200, b'a' * 300]
def setUp(self):
self.req = HttpRequest()
self.req.META = {
'SERVER_NAME': 'testserver',
'SERVER_PORT': 80,
}
self.req.path = self.req.path_info = "/"
self.req.META['HTTP_ACCEPT_ENCODING'] = 'gzip, deflate'
self.req.META['HTTP_USER_AGENT'] = 'Mozilla/5.0 (Windows NT 5.1; rv:9.0.1) Gecko/20100101 Firefox/9.0.1'
self.resp = HttpResponse()
self.resp.status_code = 200
self.resp.content = self.compressible_string
self.resp['Content-Type'] = 'text/html; charset=UTF-8'
self.stream_resp = StreamingHttpResponse(self.sequence)
self.stream_resp['Content-Type'] = 'text/html; charset=UTF-8'
@staticmethod
def decompress(gzipped_string):
return gzip.GzipFile(mode='rb', fileobj=BytesIO(gzipped_string)).read()
def test_compress_response(self):
"""
Tests that compression is performed on responses with compressible content.
"""
r = GZipMiddleware().process_response(self.req, self.resp)
self.assertEqual(self.decompress(r.content), self.compressible_string)
self.assertEqual(r.get('Content-Encoding'), 'gzip')
self.assertEqual(r.get('Content-Length'), str(len(r.content)))
def test_compress_streaming_response(self):
"""
Tests that compression is performed on responses with streaming content.
"""
r = GZipMiddleware().process_response(self.req, self.stream_resp)
self.assertEqual(self.decompress(b''.join(r)), b''.join(self.sequence))
self.assertEqual(r.get('Content-Encoding'), 'gzip')
self.assertFalse(r.has_header('Content-Length'))
def test_compress_non_200_response(self):
"""
Tests that compression is performed on responses with a status other than 200.
See #10762.
"""
self.resp.status_code = 404
r = GZipMiddleware().process_response(self.req, self.resp)
self.assertEqual(self.decompress(r.content), self.compressible_string)
self.assertEqual(r.get('Content-Encoding'), 'gzip')
def test_no_compress_short_response(self):
"""
Tests that compression isn't performed on responses with short content.
"""
self.resp.content = self.short_string
r = GZipMiddleware().process_response(self.req, self.resp)
self.assertEqual(r.content, self.short_string)
self.assertEqual(r.get('Content-Encoding'), None)
def test_no_compress_compressed_response(self):
"""
Tests that compression isn't performed on responses that are already compressed.
"""
self.resp['Content-Encoding'] = 'deflate'
r = GZipMiddleware().process_response(self.req, self.resp)
self.assertEqual(r.content, self.compressible_string)
self.assertEqual(r.get('Content-Encoding'), 'deflate')
def test_no_compress_ie_js_requests(self):
"""
Tests that compression isn't performed on JavaScript requests from Internet Explorer.
"""
self.req.META['HTTP_USER_AGENT'] = 'Mozilla/4.0 (compatible; MSIE 5.00; Windows 98)'
self.resp['Content-Type'] = 'application/javascript; charset=UTF-8'
r = GZipMiddleware().process_response(self.req, self.resp)
self.assertEqual(r.content, self.compressible_string)
self.assertEqual(r.get('Content-Encoding'), None)
def test_no_compress_uncompressible_response(self):
"""
Tests that compression isn't performed on responses with uncompressible content.
"""
self.resp.content = self.uncompressible_string
r = GZipMiddleware().process_response(self.req, self.resp)
self.assertEqual(r.content, self.uncompressible_string)
self.assertEqual(r.get('Content-Encoding'), None)
@override_settings(USE_ETAGS=True)
class ETagGZipMiddlewareTest(TestCase):
"""
Tests if the ETag middleware behaves correctly with GZip middleware.
"""
compressible_string = b'a' * 500
def setUp(self):
self.rf = RequestFactory()
def test_compress_response(self):
"""
Tests that ETag is changed after gzip compression is performed.
"""
request = self.rf.get('/', HTTP_ACCEPT_ENCODING='gzip, deflate')
response = GZipMiddleware().process_response(request,
CommonMiddleware().process_response(request,
HttpResponse(self.compressible_string)))
gzip_etag = response.get('ETag')
request = self.rf.get('/', HTTP_ACCEPT_ENCODING='')
response = GZipMiddleware().process_response(request,
CommonMiddleware().process_response(request,
HttpResponse(self.compressible_string)))
nogzip_etag = response.get('ETag')
self.assertNotEqual(gzip_etag, nogzip_etag)
class TransactionMiddlewareTest(IgnoreDeprecationWarningsMixin, TransactionTestCase):
"""
Test the transaction middleware.
"""
available_apps = ['middleware']
def setUp(self):
super(TransactionMiddlewareTest, self).setUp()
self.request = HttpRequest()
self.request.META = {
'SERVER_NAME': 'testserver',
'SERVER_PORT': 80,
}
self.request.path = self.request.path_info = "/"
self.response = HttpResponse()
self.response.status_code = 200
def tearDown(self):
transaction.abort()
super(TransactionMiddlewareTest, self).tearDown()
def test_request(self):
TransactionMiddleware().process_request(self.request)
self.assertFalse(transaction.get_autocommit())
def test_managed_response(self):
transaction.enter_transaction_management()
Band.objects.create(name='The Beatles')
self.assertTrue(transaction.is_dirty())
TransactionMiddleware().process_response(self.request, self.response)
self.assertFalse(transaction.is_dirty())
self.assertEqual(Band.objects.count(), 1)
def test_exception(self):
transaction.enter_transaction_management()
Band.objects.create(name='The Beatles')
self.assertTrue(transaction.is_dirty())
TransactionMiddleware().process_exception(self.request, None)
self.assertFalse(transaction.is_dirty())
self.assertEqual(Band.objects.count(), 0)
def test_failing_commit(self):
# It is possible that connection.commit() fails. Check that
# TransactionMiddleware handles such cases correctly.
try:
def raise_exception():
raise IntegrityError()
connections[DEFAULT_DB_ALIAS].commit = raise_exception
transaction.enter_transaction_management()
Band.objects.create(name='The Beatles')
self.assertTrue(transaction.is_dirty())
with self.assertRaises(IntegrityError):
TransactionMiddleware().process_response(self.request, None)
self.assertFalse(transaction.is_dirty())
self.assertEqual(Band.objects.count(), 0)
self.assertFalse(transaction.is_managed())
finally:
del connections[DEFAULT_DB_ALIAS].commit
| oscaro/django | tests/middleware/tests.py | Python | bsd-3-clause | 32,964 | 0.001335 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('usermodule', '0002_auto_20151108_2019'),
]
operations = [
migrations.CreateModel(
name='Period',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=10)),
('start_date', models.DateField()),
('end_date', models.DateField()),
('professor', models.ForeignKey(to='usermodule.Professor')),
],
),
]
| Sezzh/tifis_platform | tifis_platform/groupmodule/migrations/0001_initial.py | Python | gpl-2.0 | 711 | 0.001406 |
# -*- coding: utf-8 -*-
import datetime
from django.contrib import admin
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from newsletter.models import Newsletter, NewsletterSubscription
from newsletter.views import send_mass_email
def send_emails(newsletter, emails):
if settings.DEBUG == True:
emails = [d[1] for d in settings.ADMINS]
send_mass_email(settings.EMAIL_FROM, None, emails, newsletter.title, newsletter.txt, newsletter.html)
if settings.DEBUG != True:
newsletter.datetime_sent = datetime.datetime.now()
newsletter.sent_to = ';'.join(emails)
newsletter.save()
def send_newsletter(modeladmin, request, queryset):
for q in queryset:
newsletter_subscriptions = NewsletterSubscription.objects.filter(subscribed=True)
emails = [ns.email for ns in newsletter_subscriptions]
send_emails(q, emails)
send_newsletter.short_description = _(u"Send newsletter")
class NewsletterAdmin(admin.ModelAdmin):
list_display = ['title', 'txt', 'html', 'datetime_sent']
ordering = ['-datetime_sent']
actions = [send_newsletter]
admin.site.register([Newsletter], NewsletterAdmin)
admin.site.register([NewsletterSubscription])
| rokj/django_newsletter | newsletter/admin.py | Python | mit | 1,249 | 0.008006 |
from Database.Controllers.Curso import Curso
class Periodo(object):
def __init__(self,dados=None):
if dados is not None:
self.id = dados ['id']
self.id_curso = dados ['id_curso']
self.periodo = dados ['periodo']
self.creditos = dados ['creditos']
def getId(self):
return self.id
def setId_curso(self,id_curso):
self.id_curso = id_curso
def getId_curso(self):
return self.Id_curso
def getCurso(self):
return (Curso().pegarCurso('where id = %s', (self.id_curso,))).getNome()
def setPeriodo(self,periodo):
self.periodo = periodo
def getPeriodo(self):
return self.periodo
def setCreditos(self,creditos):
self.creditos = creditos
def getCreditos(self):
return self.creditos
| AEDA-Solutions/matweb | backend/Database/Models/Periodo.py | Python | mit | 742 | 0.074124 |
from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'metuly.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^accounts/login/$', 'django.contrib.auth.views.login'),
url(r'^accounts/logout/$', 'django.contrib.auth.views.logout', {'next_page': '/'}),
url(r'', include('meddit.urls')),
)
| kamyarg/enfame | metuly/urls.py | Python | gpl-2.0 | 489 | 0.010225 |
"""
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from flask import Flask, jsonify, redirect, render_template, request, url_for
from itertools import repeat, izip, imap
from jinja2 import Markup
from kazoo.client import KazooClient
from kazoo.exceptions import NoNodeException
from kazoo.security import make_acl, make_digest_acl_credential
from raven.contrib.flask import Sentry
from werkzeug.contrib.fixers import ProxyFix
import json
from jones import Jones, Env
import zkutil
import jonesconfig
app = Flask(__name__)
app.wsgi_app = ProxyFix(app.wsgi_app)
app.config.from_object(jonesconfig)
app.config.from_envvar('JONES_SETTINGS', silent=True)
if 'SENTRY_DSN' in app.config:
sentry = Sentry(app)
jones_credential = make_digest_acl_credential(
'Jones', app.config['ZK_DIGEST_PASSWORD']
)
_zk = None
def get_zk():
global _zk
if _zk is None:
_zk = KazooClient(
app.config['ZK_CONNECTION_STRING'],
default_acl=(
# grants read permissions to anyone.
make_acl('world', 'anyone', read=True),
# grants all permissions to the creator of the node.
make_acl('auth', '', all=True)
)
)
_zk.start()
_zk.add_auth('digest', jones_credential)
_zk.DataWatch('/services', func=ensure_root)
return _zk
def ensure_root(data, stat):
if not data:
get_zk().ensure_path('/services')
def request_wants(t):
types = ['text/plain', 'text/html', 'application/json']
assert t in types
best = request.accept_mimetypes \
.best_match(types)
return best == t
@app.template_filter()
def as_json(d, indent=None):
return Markup(json.dumps(d, indent=indent))
@app.context_processor
def inject_services():
return dict(services=[child for child in get_zk().get_children('/services') if
Jones(child, get_zk()).exists()])
@app.route('/')
def index():
return render_template('index.j2')
def service_create(env, jones):
jones.create_config(env, {})
if request_wants('application/json') or request_wants('text/plain'):
r = jsonify(service=jones.service)
r.status_code = 201
return r
else:
if env.is_root:
env = None
return redirect(url_for(
'services', service=jones.service, env=env))
def service_update(env, jones):
jones.set_config(
env,
json.loads(request.form['data']),
int(request.form['version'])
)
return env
def service_delete(env, jones):
if env.is_root:
# deleting whole service
jones.delete_all()
#return redirect(url_for('index'))
else:
jones.delete_config(env, -1)
return env, 200
def service_get(env, jones):
if not jones.exists():
return redirect(url_for('index'))
children = jones.get_child_envs(Env.Root)
is_leaf = lambda child: len(child) and not any(
c.find(child + '/') >= 0 for c in children)
try:
version, config = jones.get_config_by_env(env)
except NoNodeException:
return redirect(url_for('services', service=jones.service))
childs = imap(dict, izip(
izip(repeat('env'), imap(Env, children)),
izip(repeat('is_leaf'), imap(is_leaf, children))))
vals = {
"env": env,
"version": version,
"children": list(childs),
"config": config,
"view": jones.get_view_by_env(env),
"service": jones.service,
"associations": jones.get_associations(env)
}
if request_wants('application/json'):
return jsonify(vals)
else:
return render_template('service.j2', **vals)
SERVICE = {
'get': service_get,
'put': service_update,
'post': service_create,
'delete': service_delete
}
ALL_METHODS = ['GET', 'PUT', 'POST', 'DELETE']
@app.route('/service/<string:service>/', defaults={'env': None},
methods=ALL_METHODS)
@app.route('/service/<string:service>/<path:env>/', methods=ALL_METHODS)
def services(service, env):
jones = Jones(service, get_zk())
environment = Env(env)
return SERVICE[request.method.lower()](environment, jones)
@app.route('/service/<string:service>/association/<string:assoc>',
methods=['GET', 'PUT', 'DELETE'])
def association(service, assoc):
jones = Jones(service, get_zk())
if request.method == 'GET':
if request_wants('application/json'):
return jsonify(jones.get_config(assoc))
if request.method == 'PUT':
jones.assoc_host(assoc, Env(request.form['env']))
return service, 201
elif request.method == 'DELETE':
jones.delete_association(assoc)
return service, 200
@app.route('/export')
def export():
return zkutil.export_tree(get_zk(), '/')
if __name__ == '__main__':
app.run()
| mwhooker/jones | jones/web.py | Python | apache-2.0 | 5,362 | 0.000559 |
'''
Created on 12.03.2016
@author: michael
'''
import unittest
from unittest.mock import MagicMock
from alexandriabase.daos import CreatorDao
from alexandriabase.domain import Creator
from alexandriabase.services import CreatorService
class CreatorServiceTest(unittest.TestCase):
def testFindVisible(self):
dao = MagicMock(spec=CreatorDao)
dao.find_all_visible.return_value = [Creator(34), Creator(35)]
service = CreatorService(dao)
result = service.find_all_active_creators()
self.assertEqual(35, result[1].id)
dao.find_all_visible.assert_called_once_with()
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main() | archivsozialebewegungen/AlexandriaBase | tests/servicestests/test_creator_service.py | Python | gpl-3.0 | 732 | 0.008197 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
from datetime import timedelta
import pyarrow as pa
try:
import pyarrow.parquet as pq
import pyarrow.parquet_encryption as pe
except ImportError:
pq = None
pe = None
else:
from pyarrow.tests.parquet.encryption import (
InMemoryKmsClient, verify_file_encrypted)
PARQUET_NAME = 'encrypted_table.in_mem.parquet'
FOOTER_KEY = b"0123456789112345"
FOOTER_KEY_NAME = "footer_key"
COL_KEY = b"1234567890123450"
COL_KEY_NAME = "col_key"
# Marks all of the tests in this module
# Ignore these with pytest ... -m 'not parquet_encryption'
pytestmark = pytest.mark.parquet_encryption
@pytest.fixture(scope='module')
def data_table():
data_table = pa.Table.from_pydict({
'a': pa.array([1, 2, 3]),
'b': pa.array(['a', 'b', 'c']),
'c': pa.array(['x', 'y', 'z'])
})
return data_table
@pytest.fixture(scope='module')
def basic_encryption_config():
basic_encryption_config = pe.EncryptionConfiguration(
footer_key=FOOTER_KEY_NAME,
column_keys={
COL_KEY_NAME: ["a", "b"],
})
return basic_encryption_config
def test_encrypted_parquet_write_read(tempdir, data_table):
"""Write an encrypted parquet, verify it's encrypted, and then read it."""
path = tempdir / PARQUET_NAME
# Encrypt the footer with the footer key,
# encrypt column `a` and column `b` with another key,
# keep `c` plaintext
encryption_config = pe.EncryptionConfiguration(
footer_key=FOOTER_KEY_NAME,
column_keys={
COL_KEY_NAME: ["a", "b"],
},
encryption_algorithm="AES_GCM_V1",
cache_lifetime=timedelta(minutes=5.0),
data_key_length_bits=256)
kms_connection_config = pe.KmsConnectionConfig(
custom_kms_conf={
FOOTER_KEY_NAME: FOOTER_KEY.decode("UTF-8"),
COL_KEY_NAME: COL_KEY.decode("UTF-8"),
}
)
def kms_factory(kms_connection_configuration):
return InMemoryKmsClient(kms_connection_configuration)
crypto_factory = pe.CryptoFactory(kms_factory)
# Write with encryption properties
write_encrypted_parquet(path, data_table, encryption_config,
kms_connection_config, crypto_factory)
verify_file_encrypted(path)
# Read with decryption properties
decryption_config = pe.DecryptionConfiguration(
cache_lifetime=timedelta(minutes=5.0))
result_table = read_encrypted_parquet(
path, decryption_config, kms_connection_config, crypto_factory)
assert data_table.equals(result_table)
def write_encrypted_parquet(path, table, encryption_config,
kms_connection_config, crypto_factory):
file_encryption_properties = crypto_factory.file_encryption_properties(
kms_connection_config, encryption_config)
assert(file_encryption_properties is not None)
with pq.ParquetWriter(
path, table.schema,
encryption_properties=file_encryption_properties) as writer:
writer.write_table(table)
def read_encrypted_parquet(path, decryption_config,
kms_connection_config, crypto_factory):
file_decryption_properties = crypto_factory.file_decryption_properties(
kms_connection_config, decryption_config)
assert(file_decryption_properties is not None)
meta = pq.read_metadata(
path, decryption_properties=file_decryption_properties)
assert(meta.num_columns == 3)
schema = pq.read_schema(
path, decryption_properties=file_decryption_properties)
assert(len(schema.names) == 3)
result = pq.ParquetFile(
path, decryption_properties=file_decryption_properties)
return result.read(use_threads=False)
def test_encrypted_parquet_write_read_wrong_key(tempdir, data_table):
"""Write an encrypted parquet, verify it's encrypted,
and then read it using wrong keys."""
path = tempdir / PARQUET_NAME
# Encrypt the footer with the footer key,
# encrypt column `a` and column `b` with another key,
# keep `c` plaintext
encryption_config = pe.EncryptionConfiguration(
footer_key=FOOTER_KEY_NAME,
column_keys={
COL_KEY_NAME: ["a", "b"],
},
encryption_algorithm="AES_GCM_V1",
cache_lifetime=timedelta(minutes=5.0),
data_key_length_bits=256)
kms_connection_config = pe.KmsConnectionConfig(
custom_kms_conf={
FOOTER_KEY_NAME: FOOTER_KEY.decode("UTF-8"),
COL_KEY_NAME: COL_KEY.decode("UTF-8"),
}
)
def kms_factory(kms_connection_configuration):
return InMemoryKmsClient(kms_connection_configuration)
crypto_factory = pe.CryptoFactory(kms_factory)
# Write with encryption properties
write_encrypted_parquet(path, data_table, encryption_config,
kms_connection_config, crypto_factory)
verify_file_encrypted(path)
# Read with decryption properties
wrong_kms_connection_config = pe.KmsConnectionConfig(
custom_kms_conf={
# Wrong keys - mixup in names
FOOTER_KEY_NAME: COL_KEY.decode("UTF-8"),
COL_KEY_NAME: FOOTER_KEY.decode("UTF-8"),
}
)
decryption_config = pe.DecryptionConfiguration(
cache_lifetime=timedelta(minutes=5.0))
with pytest.raises(ValueError, match=r"Incorrect master key used"):
read_encrypted_parquet(
path, decryption_config, wrong_kms_connection_config,
crypto_factory)
def test_encrypted_parquet_read_no_decryption_config(tempdir, data_table):
"""Write an encrypted parquet, verify it's encrypted,
but then try to read it without decryption properties."""
test_encrypted_parquet_write_read(tempdir, data_table)
# Read without decryption properties
with pytest.raises(IOError, match=r"no decryption"):
pq.ParquetFile(tempdir / PARQUET_NAME).read()
def test_encrypted_parquet_read_metadata_no_decryption_config(
tempdir, data_table):
"""Write an encrypted parquet, verify it's encrypted,
but then try to read its metadata without decryption properties."""
test_encrypted_parquet_write_read(tempdir, data_table)
# Read metadata without decryption properties
with pytest.raises(IOError, match=r"no decryption"):
pq.read_metadata(tempdir / PARQUET_NAME)
def test_encrypted_parquet_read_schema_no_decryption_config(
tempdir, data_table):
"""Write an encrypted parquet, verify it's encrypted,
but then try to read its schema without decryption properties."""
test_encrypted_parquet_write_read(tempdir, data_table)
with pytest.raises(IOError, match=r"no decryption"):
pq.read_schema(tempdir / PARQUET_NAME)
def test_encrypted_parquet_write_no_col_key(tempdir, data_table):
"""Write an encrypted parquet, but give only footer key,
without column key."""
path = tempdir / 'encrypted_table_no_col_key.in_mem.parquet'
# Encrypt the footer with the footer key
encryption_config = pe.EncryptionConfiguration(
footer_key=FOOTER_KEY_NAME)
kms_connection_config = pe.KmsConnectionConfig(
custom_kms_conf={
FOOTER_KEY_NAME: FOOTER_KEY.decode("UTF-8"),
COL_KEY_NAME: COL_KEY.decode("UTF-8"),
}
)
def kms_factory(kms_connection_configuration):
return InMemoryKmsClient(kms_connection_configuration)
crypto_factory = pe.CryptoFactory(kms_factory)
with pytest.raises(OSError,
match="Either column_keys or uniform_encryption "
"must be set"):
# Write with encryption properties
write_encrypted_parquet(path, data_table, encryption_config,
kms_connection_config, crypto_factory)
def test_encrypted_parquet_write_kms_error(tempdir, data_table,
basic_encryption_config):
"""Write an encrypted parquet, but raise KeyError in KmsClient."""
path = tempdir / 'encrypted_table_kms_error.in_mem.parquet'
encryption_config = basic_encryption_config
# Empty master_keys_map
kms_connection_config = pe.KmsConnectionConfig()
def kms_factory(kms_connection_configuration):
# Empty master keys map will cause KeyError to be raised
# on wrap/unwrap calls
return InMemoryKmsClient(kms_connection_configuration)
crypto_factory = pe.CryptoFactory(kms_factory)
with pytest.raises(KeyError, match="footer_key"):
# Write with encryption properties
write_encrypted_parquet(path, data_table, encryption_config,
kms_connection_config, crypto_factory)
def test_encrypted_parquet_write_kms_specific_error(tempdir, data_table,
basic_encryption_config):
"""Write an encrypted parquet, but raise KeyError in KmsClient."""
path = tempdir / 'encrypted_table_kms_error.in_mem.parquet'
encryption_config = basic_encryption_config
# Empty master_keys_map
kms_connection_config = pe.KmsConnectionConfig()
class ThrowingKmsClient(pe.KmsClient):
"""A KmsClient implementation that throws exception in
wrap/unwrap calls
"""
def __init__(self, config):
"""Create an InMemoryKmsClient instance."""
pe.KmsClient.__init__(self)
self.config = config
def wrap_key(self, key_bytes, master_key_identifier):
raise ValueError("Cannot Wrap Key")
def unwrap_key(self, wrapped_key, master_key_identifier):
raise ValueError("Cannot Unwrap Key")
def kms_factory(kms_connection_configuration):
# Exception thrown in wrap/unwrap calls
return ThrowingKmsClient(kms_connection_configuration)
crypto_factory = pe.CryptoFactory(kms_factory)
with pytest.raises(ValueError, match="Cannot Wrap Key"):
# Write with encryption properties
write_encrypted_parquet(path, data_table, encryption_config,
kms_connection_config, crypto_factory)
def test_encrypted_parquet_write_kms_factory_error(tempdir, data_table,
basic_encryption_config):
"""Write an encrypted parquet, but raise ValueError in kms_factory."""
path = tempdir / 'encrypted_table_kms_factory_error.in_mem.parquet'
encryption_config = basic_encryption_config
# Empty master_keys_map
kms_connection_config = pe.KmsConnectionConfig()
def kms_factory(kms_connection_configuration):
raise ValueError('Cannot create KmsClient')
crypto_factory = pe.CryptoFactory(kms_factory)
with pytest.raises(ValueError,
match="Cannot create KmsClient"):
# Write with encryption properties
write_encrypted_parquet(path, data_table, encryption_config,
kms_connection_config, crypto_factory)
def test_encrypted_parquet_write_kms_factory_type_error(
tempdir, data_table, basic_encryption_config):
"""Write an encrypted parquet, but use wrong KMS client type
that doesn't implement KmsClient."""
path = tempdir / 'encrypted_table_kms_factory_error.in_mem.parquet'
encryption_config = basic_encryption_config
# Empty master_keys_map
kms_connection_config = pe.KmsConnectionConfig()
class WrongTypeKmsClient():
"""This is not an implementation of KmsClient.
"""
def __init__(self, config):
self.master_keys_map = config.custom_kms_conf
def wrap_key(self, key_bytes, master_key_identifier):
return None
def unwrap_key(self, wrapped_key, master_key_identifier):
return None
def kms_factory(kms_connection_configuration):
return WrongTypeKmsClient(kms_connection_configuration)
crypto_factory = pe.CryptoFactory(kms_factory)
with pytest.raises(TypeError):
# Write with encryption properties
write_encrypted_parquet(path, data_table, encryption_config,
kms_connection_config, crypto_factory)
def test_encrypted_parquet_encryption_configuration():
def validate_encryption_configuration(encryption_config):
assert(FOOTER_KEY_NAME == encryption_config.footer_key)
assert(["a", "b"] == encryption_config.column_keys[COL_KEY_NAME])
assert("AES_GCM_CTR_V1" == encryption_config.encryption_algorithm)
assert(encryption_config.plaintext_footer)
assert(not encryption_config.double_wrapping)
assert(timedelta(minutes=10.0) == encryption_config.cache_lifetime)
assert(not encryption_config.internal_key_material)
assert(192 == encryption_config.data_key_length_bits)
encryption_config = pe.EncryptionConfiguration(
footer_key=FOOTER_KEY_NAME,
column_keys={COL_KEY_NAME: ["a", "b"], },
encryption_algorithm="AES_GCM_CTR_V1",
plaintext_footer=True,
double_wrapping=False,
cache_lifetime=timedelta(minutes=10.0),
internal_key_material=False,
data_key_length_bits=192,
)
validate_encryption_configuration(encryption_config)
encryption_config_1 = pe.EncryptionConfiguration(
footer_key=FOOTER_KEY_NAME)
encryption_config_1.column_keys = {COL_KEY_NAME: ["a", "b"], }
encryption_config_1.encryption_algorithm = "AES_GCM_CTR_V1"
encryption_config_1.plaintext_footer = True
encryption_config_1.double_wrapping = False
encryption_config_1.cache_lifetime = timedelta(minutes=10.0)
encryption_config_1.internal_key_material = False
encryption_config_1.data_key_length_bits = 192
validate_encryption_configuration(encryption_config_1)
def test_encrypted_parquet_decryption_configuration():
decryption_config = pe.DecryptionConfiguration(
cache_lifetime=timedelta(minutes=10.0))
assert(timedelta(minutes=10.0) == decryption_config.cache_lifetime)
decryption_config_1 = pe.DecryptionConfiguration()
decryption_config_1.cache_lifetime = timedelta(minutes=10.0)
assert(timedelta(minutes=10.0) == decryption_config_1.cache_lifetime)
def test_encrypted_parquet_kms_configuration():
def validate_kms_connection_config(kms_connection_config):
assert("Instance1" == kms_connection_config.kms_instance_id)
assert("URL1" == kms_connection_config.kms_instance_url)
assert("MyToken" == kms_connection_config.key_access_token)
assert({"key1": "key_material_1", "key2": "key_material_2"} ==
kms_connection_config.custom_kms_conf)
kms_connection_config = pe.KmsConnectionConfig(
kms_instance_id="Instance1",
kms_instance_url="URL1",
key_access_token="MyToken",
custom_kms_conf={
"key1": "key_material_1",
"key2": "key_material_2",
})
validate_kms_connection_config(kms_connection_config)
kms_connection_config_1 = pe.KmsConnectionConfig()
kms_connection_config_1.kms_instance_id = "Instance1"
kms_connection_config_1.kms_instance_url = "URL1"
kms_connection_config_1.key_access_token = "MyToken"
kms_connection_config_1.custom_kms_conf = {
"key1": "key_material_1",
"key2": "key_material_2",
}
validate_kms_connection_config(kms_connection_config_1)
@pytest.mark.xfail(reason="Plaintext footer - reading plaintext column subset"
" reads encrypted columns too")
def test_encrypted_parquet_write_read_plain_footer_single_wrapping(
tempdir, data_table):
"""Write an encrypted parquet, with plaintext footer
and with single wrapping,
verify it's encrypted, and then read plaintext columns."""
path = tempdir / PARQUET_NAME
# Encrypt the footer with the footer key,
# encrypt column `a` and column `b` with another key,
# keep `c` plaintext
encryption_config = pe.EncryptionConfiguration(
footer_key=FOOTER_KEY_NAME,
column_keys={
COL_KEY_NAME: ["a", "b"],
},
plaintext_footer=True,
double_wrapping=False)
kms_connection_config = pe.KmsConnectionConfig(
custom_kms_conf={
FOOTER_KEY_NAME: FOOTER_KEY.decode("UTF-8"),
COL_KEY_NAME: COL_KEY.decode("UTF-8"),
}
)
def kms_factory(kms_connection_configuration):
return InMemoryKmsClient(kms_connection_configuration)
crypto_factory = pe.CryptoFactory(kms_factory)
# Write with encryption properties
write_encrypted_parquet(path, data_table, encryption_config,
kms_connection_config, crypto_factory)
# # Read without decryption properties only the plaintext column
# result = pq.ParquetFile(path)
# result_table = result.read(columns='c', use_threads=False)
# assert table.num_rows == result_table.num_rows
@pytest.mark.xfail(reason="External key material not supported yet")
def test_encrypted_parquet_write_external(tempdir, data_table):
"""Write an encrypted parquet, with external key
material.
Currently it's not implemented, so should throw
an exception"""
path = tempdir / PARQUET_NAME
# Encrypt the file with the footer key
encryption_config = pe.EncryptionConfiguration(
footer_key=FOOTER_KEY_NAME,
column_keys={},
internal_key_material=False)
kms_connection_config = pe.KmsConnectionConfig(
custom_kms_conf={FOOTER_KEY_NAME: FOOTER_KEY.decode("UTF-8")}
)
def kms_factory(kms_connection_configuration):
return InMemoryKmsClient(kms_connection_configuration)
crypto_factory = pe.CryptoFactory(kms_factory)
# Write with encryption properties
write_encrypted_parquet(path, data_table, encryption_config,
kms_connection_config, crypto_factory)
@pytest.mark.skip(reason="ARROW-14114: Multithreaded read sometimes fails"
"decryption finalization or with Segmentation fault")
def test_encrypted_parquet_loop(tempdir, data_table, basic_encryption_config):
"""Write an encrypted parquet, verify it's encrypted,
and then read it multithreaded in a loop."""
path = tempdir / PARQUET_NAME
# Encrypt the footer with the footer key,
# encrypt column `a` and column `b` with another key,
# keep `c` plaintext
encryption_config = basic_encryption_config
kms_connection_config = pe.KmsConnectionConfig(
custom_kms_conf={
FOOTER_KEY_NAME: FOOTER_KEY.decode("UTF-8"),
COL_KEY_NAME: COL_KEY.decode("UTF-8"),
}
)
def kms_factory(kms_connection_configuration):
return InMemoryKmsClient(kms_connection_configuration)
crypto_factory = pe.CryptoFactory(kms_factory)
# Write with encryption properties
write_encrypted_parquet(path, data_table, encryption_config,
kms_connection_config, crypto_factory)
verify_file_encrypted(path)
decryption_config = pe.DecryptionConfiguration(
cache_lifetime=timedelta(minutes=5.0))
for i in range(50):
# Read with decryption properties
file_decryption_properties = crypto_factory.file_decryption_properties(
kms_connection_config, decryption_config)
assert(file_decryption_properties is not None)
result = pq.ParquetFile(
path, decryption_properties=file_decryption_properties)
result_table = result.read(use_threads=True)
assert data_table.equals(result_table)
| kou/arrow | python/pyarrow/tests/parquet/test_encryption.py | Python | apache-2.0 | 20,329 | 0 |
'''
Created on Apr 25, 2017
@author: kashefy
'''
import numpy as np
import h5py
from nideep.iow.file_system_utils import gen_paths, filter_is_h5
def id_loc_to_loc(fpath_src, key_dst, key_src='label_id_loc', has_void_bin=True):
with h5py.File(fpath_src, 'r+') as h:
if has_void_bin:
l = np.sum(h[key_src][...,:-1], axis=1)
else:
l = np.sum(h[key_src], axis=1)
l = np.expand_dims(l, 1)
h[key_dst] = l
def walk_id_loc_to_loc(dir_src, key_dst):
def runner(fpath):
if filter_is_h5(fpath):
id_loc_to_loc(fpath, key_dst)
return True # otherwise gen_paths won't append to list
flist = gen_paths(dir_src, func_filter=runner)
return flist
if __name__ == '__main__':
pass | kashefy/caffe_sandbox | nideep/datasets/twoears/label_utils.py | Python | bsd-2-clause | 798 | 0.017544 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A `Predictor constructed from a `tf.contrib.learn.Estimator`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.learn.python.learn.utils import saved_model_export_utils
from tensorflow.contrib.predictor import predictor
from tensorflow.python.framework import ops
from tensorflow.python.training import monitored_session
from tensorflow.python.training import saver
class ContribEstimatorPredictor(predictor.Predictor):
"""A `Predictor constructed from a `tf.contrib.learn.Estimator`."""
def __init__(self,
estimator,
prediction_input_fn,
input_alternative_key=None,
output_alternative_key=None,
graph=None,
config=None):
"""Initialize a `ContribEstimatorPredictor`.
Args:
estimator: an instance of `tf.contrib.learn.Estimator`.
prediction_input_fn: a function that takes no arguments and returns an
instance of `InputFnOps`.
input_alternative_key: Optional. Specify the input alternative used for
prediction.
output_alternative_key: Specify the output alternative used for
prediction. Not needed for single-headed models but required for
multi-headed models.
graph: Optional. The Tensorflow `graph` in which prediction should be
done.
config: `ConfigProto` proto used to configure the session.
"""
self._graph = graph or ops.Graph()
with self._graph.as_default():
input_fn_ops = prediction_input_fn()
# pylint: disable=protected-access
model_fn_ops = estimator._get_predict_ops(input_fn_ops.features)
# pylint: enable=protected-access
checkpoint_path = saver.latest_checkpoint(estimator.model_dir)
self._session = monitored_session.MonitoredSession(
session_creator=monitored_session.ChiefSessionCreator(
config=config,
checkpoint_filename_with_path=checkpoint_path))
input_alternative_key = (
input_alternative_key or
saved_model_export_utils.DEFAULT_INPUT_ALTERNATIVE_KEY)
input_alternatives, _ = saved_model_export_utils.get_input_alternatives(
input_fn_ops)
self._feed_tensors = input_alternatives[input_alternative_key]
(output_alternatives,
output_alternative_key) = saved_model_export_utils.get_output_alternatives(
model_fn_ops, output_alternative_key)
_, fetch_tensors = output_alternatives[output_alternative_key]
self._fetch_tensors = fetch_tensors
| drpngx/tensorflow | tensorflow/contrib/predictor/contrib_estimator_predictor.py | Python | apache-2.0 | 3,274 | 0.003054 |
#!/usr/bin/env python
from translate.convert import xliff2po
from translate.misc import wStringIO
from translate.storage.test_base import headerless_len, first_translatable
class TestXLIFF2PO:
xliffskeleton = '''<?xml version="1.0" ?>
<xliff version="1.1" xmlns="urn:oasis:names:tc:xliff:document:1.1">
<file original="filename.po" source-language="en-US" datatype="po">
<body>
%s
</body>
</file>
</xliff>'''
def xliff2po(self, xliffsource):
"""helper that converts xliff source to po source without requiring files"""
inputfile = wStringIO.StringIO(xliffsource)
convertor = xliff2po.xliff2po()
outputpo = convertor.convertstore(inputfile)
print "The generated po:"
print type(outputpo)
print str(outputpo)
return outputpo
def test_minimal(self):
minixlf = self.xliffskeleton % '''<trans-unit>
<source>red</source>
<target>rooi</target>
</trans-unit>'''
pofile = self.xliff2po(minixlf)
assert headerless_len(pofile.units) == 1
assert pofile.translate("red") == "rooi"
assert pofile.translate("bla") is None
def test_basic(self):
headertext = '''Project-Id-Version: program 2.1-branch
Report-Msgid-Bugs-To:
POT-Creation-Date: 2006-01-09 07:15+0100
PO-Revision-Date: 2004-03-30 17:02+0200
Last-Translator: Zuza Software Foundation <[email protected]>
Language-Team: Afrikaans <[email protected]>
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit'''
minixlf = (self.xliffskeleton % '''<trans-unit id="1" restype="x-gettext-domain-header" approved="no" xml:space="preserve">
<source>%s</source>
<target>%s</target>
<note from="po-translator">Zulu translation of program ABC</note>
</trans-unit>
<trans-unit>
<source>gras</source>
<target>utshani</target>
</trans-unit>''') % (headertext, headertext)
print minixlf
pofile = self.xliff2po(minixlf)
assert pofile.translate("gras") == "utshani"
assert pofile.translate("bla") is None
potext = str(pofile)
assert potext.index('# Zulu translation of program ABC') == 0
assert potext.index('msgid "gras"\n')
assert potext.index('msgstr "utshani"\n')
assert potext.index('MIME-Version: 1.0\\n')
def test_translatorcomments(self):
"""Tests translator comments"""
minixlf = self.xliffskeleton % '''<trans-unit>
<source>nonsense</source>
<target>matlhapolosa</target>
<context-group name="po-entry" purpose="information">
<context context-type="x-po-trancomment">Couldn't do
it</context>
</context-group>
<note from="po-translator">Couldn't do
it</note>
</trans-unit>'''
pofile = self.xliff2po(minixlf)
assert pofile.translate("nonsense") == "matlhapolosa"
assert pofile.translate("bla") is None
unit = first_translatable(pofile)
assert unit.getnotes("translator") == "Couldn't do it"
potext = str(pofile)
assert potext.index("# Couldn't do it\n") >= 0
minixlf = self.xliffskeleton % '''<trans-unit xml:space="preserve">
<source>nonsense</source>
<target>matlhapolosa</target>
<context-group name="po-entry" purpose="information">
<context context-type="x-po-trancomment">Couldn't do
it</context>
</context-group>
<note from="po-translator">Couldn't do
it</note>
</trans-unit>'''
pofile = self.xliff2po(minixlf)
assert pofile.translate("nonsense") == "matlhapolosa"
assert pofile.translate("bla") is None
unit = first_translatable(pofile)
assert unit.getnotes("translator") == "Couldn't do\nit"
potext = str(pofile)
assert potext.index("# Couldn't do\n# it\n") >= 0
def test_autocomment(self):
"""Tests automatic comments"""
minixlf = self.xliffskeleton % '''<trans-unit>
<source>nonsense</source>
<target>matlhapolosa</target>
<context-group name="po-entry" purpose="information">
<context context-type="x-po-autocomment">Note that this is
garbage</context>
</context-group>
<note from="developer">Note that this is
garbage</note>
</trans-unit>'''
pofile = self.xliff2po(minixlf)
assert pofile.translate("nonsense") == "matlhapolosa"
assert pofile.translate("bla") is None
unit = first_translatable(pofile)
assert unit.getnotes("developer") == "Note that this is garbage"
potext = str(pofile)
assert potext.index("#. Note that this is garbage\n") >= 0
minixlf = self.xliffskeleton % '''<trans-unit xml:space="preserve">
<source>nonsense</source>
<target>matlhapolosa</target>
<context-group name="po-entry" purpose="information">
<context context-type="x-po-autocomment">Note that this is
garbage</context>
</context-group>
<note from="developer">Note that this is
garbage</note>
</trans-unit>'''
pofile = self.xliff2po(minixlf)
assert pofile.translate("nonsense") == "matlhapolosa"
assert pofile.translate("bla") is None
unit = first_translatable(pofile)
assert unit.getnotes("developer") == "Note that this is\ngarbage"
potext = str(pofile)
assert potext.index("#. Note that this is\n#. garbage\n") >= 0
def test_locations(self):
"""Tests location comments (#:)"""
minixlf = self.xliffskeleton % '''<trans-unit id="1">
<source>nonsense</source>
<target>matlhapolosa</target>
<context-group name="po-reference" purpose="location">
<context context-type="sourcefile">example.c</context>
<context context-type="linenumber">123</context>
</context-group>
<context-group name="po-reference" purpose="location">
<context context-type="sourcefile">place.py</context>
</context-group>
</trans-unit>'''
pofile = self.xliff2po(minixlf)
assert pofile.translate("nonsense") == "matlhapolosa"
assert pofile.translate("bla") is None
unit = first_translatable(pofile)
locations = unit.getlocations()
assert len(locations) == 2
assert "example.c:123" in locations
assert "place.py" in locations
def test_fuzzy(self):
"""Tests fuzzyness"""
minixlf = self.xliffskeleton % '''<trans-unit approved="no">
<source>book</source>
</trans-unit>
<trans-unit id="2" approved="yes">
<source>nonsense</source>
<target>matlhapolosa</target>
</trans-unit>
<trans-unit id="2" approved="no">
<source>verb</source>
<target state="needs-review-translation">lediri</target>
</trans-unit>'''
pofile = self.xliff2po(minixlf)
assert pofile.translate("nonsense") == "matlhapolosa"
assert pofile.translate("verb") == "lediri"
assert pofile.translate("book") is None
assert pofile.translate("bla") is None
assert headerless_len(pofile.units) == 3
#TODO: decide if this one should be fuzzy:
#assert pofile.units[0].isfuzzy()
assert not pofile.units[2].isfuzzy()
assert pofile.units[3].isfuzzy()
def test_plurals(self):
"""Tests fuzzyness"""
minixlf = self.xliffskeleton % '''<group id="1" restype="x-gettext-plurals">
<trans-unit id="1[0]" xml:space="preserve">
<source>cow</source>
<target>inkomo</target>
</trans-unit>
<trans-unit id="1[1]" xml:space="preserve">
<source>cows</source>
<target>iinkomo</target>
</trans-unit>
</group>'''
pofile = self.xliff2po(minixlf)
print str(pofile)
potext = str(pofile)
assert headerless_len(pofile.units) == 1
assert potext.index('msgid_plural "cows"')
assert potext.index('msgstr[0] "inkomo"')
assert potext.index('msgstr[1] "iinkomo"')
class TestBasicXLIFF2PO(TestXLIFF2PO):
"""This tests a basic XLIFF file without xmlns attribute"""
xliffskeleton = '''<?xml version="1.0" ?>
<xliff version="1.1">
<file original="filename.po" source-language="en-US" datatype="po">
<body>
%s
</body>
</file>
</xliff>'''
| dbbhattacharya/kitsune | vendor/packages/translate-toolkit/translate/convert/test_xliff2po.py | Python | bsd-3-clause | 8,448 | 0.00071 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Initial no-op Yoga contract migration.
Revision ID: e25ffa003242
Revises: 27e647c0fad4
Create Date: 2022-01-21 00:00:00.000000
"""
# revision identifiers, used by Alembic.
revision = 'e25ffa003242'
down_revision = '27e647c0fad4'
branch_labels = ('contract',)
def upgrade():
pass
| openstack/keystone | keystone/common/sql/migrations/versions/yoga/contract/e25ffa003242_initial.py | Python | apache-2.0 | 835 | 0 |
import unittest
from itertools import product
from obfusc8.circuit import *
from obfusc8.bp import *
#enable testing of 'private' module member functions, somewhat sloppy style but I prefer it to any alternative
from obfusc8.bp import _matrix2cycle
class TestBranchingProgram(unittest.TestCase):
def setUp(self):
self.inputLength = 8
inputs = [Input("x"+str(x)) for x in range(0, self.inputLength)]
# (-(x0 & x1) & (-x2 & x3)) & ((x4 & x5) & -(x6 & -x7))
self.circuit = Circuit(AndGate(AndGate(NotGate(AndGate(inputs[0], inputs[1])), AndGate(NotGate(inputs[2]), inputs[3])), AndGate(AndGate(inputs[4], inputs[5]),NotGate(AndGate(inputs[6], NotGate(inputs[7]))))))
self.bp = BranchingProgram.fromCircuit(self.circuit)
def test_estimateBPSize_for_example_circuit(self):
self.assertEqual(self.bp.length, BranchingProgram.estimateBPSize(self.circuit), 'incorrecet size calculated')
def test_equality_of_bp_to_circuit(self):
for test in list(product([0,1], repeat=self.inputLength)):
test = list(test)
circuitResult = self.circuit.evaluate(test)
bpResult = self.bp.evaluate(test)
self.assertEqual(circuitResult, bpResult, 'Wrong evaluation on input %s. Was %s instead of %s'%(test, circuitResult, bpResult))
class TestPrecalculatedMappings(unittest.TestCase):
def setUp(self):
self.mappings = precalculatedMappings()
self.id2permList = precalculatedId2PermList()
def test_precalculated_mappings(self):
for id, perm in zip(range(len(self.id2permList)), self.id2permList):
correct = dot(dot(_ni2n(), dot(perm, _normalInv())), _ni2n())
mappedResult = self.id2permList[self.mappings[0][id]]
self.assertTrue((correct == mappedResult).all(), 'Mapping 0 not correct on input %s. Was %s instead of %s.'%(perm, mappedResult, correct))
correct = dot(dot(_ni2n(), perm), _ni2n())
mappedResult = self.id2permList[self.mappings[1][id]]
self.assertTrue((correct == mappedResult).all(), 'Mapping 0 not correct on input %s. Was %s instead of %s.'%(perm, mappedResult, correct))
correct = dot(dot(_sec2si(), perm), _sec2si())
mappedResult = self.id2permList[self.mappings[2][id]]
self.assertTrue((correct == mappedResult).all(), 'Mapping 0 not correct on input %s. Was %s instead of %s.'%(perm, mappedResult, correct))
correct = dot(dot(_special1(), perm), _special1())
mappedResult = self.id2permList[self.mappings[3][id]]
self.assertTrue((correct == mappedResult).all(), 'Mapping 0 not correct on input %s. Was %s instead of %s.'%(perm, mappedResult, correct))
correct = dot(dot(_special2(), perm), _special3())
mappedResult = self.id2permList[self.mappings[4][id]]
self.assertTrue((correct == mappedResult).all(), 'Mapping 0 not correct on input %s. Was %s instead of %s.'%(perm, mappedResult, correct))
correct = dot(dot(_n2secInv(), perm), _n2sec())
mappedResult = self.id2permList[self.mappings[5][id]]
self.assertTrue((correct == mappedResult).all(), 'Mapping 0 not correct on input %s. Was %s instead of %s.'%(perm, mappedResult, correct))
def _identity(): return array([[1,0,0,0,0],[0,1,0,0,0],[0,0,1,0,0],[0,0,0,1,0],[0,0,0,0,1]])
def _normal(): return array([[0,1,0,0,0],[0,0,1,0,0],[0,0,0,1,0],[0,0,0,0,1],[1,0,0,0,0]]) #(01234)
def _normalInv(): return array([[0,0,0,0,1],[1,0,0,0,0],[0,1,0,0,0],[0,0,1,0,0],[0,0,0,1,0]]) #(04321)
def _ni2n(): return array([[1,0,0,0,0],[0,0,0,0,1],[0,0,0,1,0],[0,0,1,0,0],[0,1,0,0,0]]) #(14)(23)
def _n2sec(): return array([[1,0,0,0,0],[0,0,1,0,0],[0,0,0,0,1],[0,0,0,1,0],[0,1,0,0,0]]) #(124)
def _n2secInv(): return array([[1,0,0,0,0],[0,0,0,0,1],[0,1,0,0,0],[0,0,0,1,0],[0,0,1,0,0]]) #(142)
def _sec2si(): return array([[1,0,0,0,0],[0,0,1,0,0],[0,1,0,0,0],[0,0,0,0,1],[0,0,0,1,0]]) #(12)(34)
#def _res2n(): return array([[1,0,0,0,0],[0,0,0,0,1],[0,0,0,1,0],[0,0,1,0,0],[0,1,0,0,0]]) #(14)(23)
def _special1(): return array([[1, 0, 0, 0, 0],[0, 0, 0, 1, 0],[0, 0, 0, 0, 1],[0, 1, 0, 0, 0],[0, 0, 1, 0, 0]]) #(13)(24)
def _special2(): return array([[1, 0, 0, 0, 0],[0, 1, 0, 0, 0],[0, 0, 0, 0, 1],[0, 0, 1, 0, 0],[0, 0, 0, 1, 0]]) #(243)
def _special3(): return array([[1, 0, 0, 0, 0],[0, 1, 0, 0, 0],[0, 0, 0, 1, 0],[0, 0, 0, 0, 1],[0, 0, 1, 0, 0]]) #(234)
class TestExplicitPermutations(unittest.TestCase):
def test_matrix2cycle(self):
a = array([[0,0,1,0,0],[0,1,0,0,0],[1,0,0,0,0],[0,0,0,1,0],[0,0,0,0,1]])
self.assertEqual(_matrix2cycle(a), '(02)', 'wrong on input %s'%a)
self.assertEqual('(01234)', _matrix2cycle(_normal()), 'wrong on input %s'%_normal())
self.assertEqual('e', _matrix2cycle(_identity()), 'wrong on input %s'%_identity())
self.assertEqual('(04321)', _matrix2cycle(_normalInv()), 'wrong on input %s'%_normalInv())
self.assertEqual('(14)(23)', _matrix2cycle(_ni2n()), 'wrong on input %s'%_ni2n())
self.assertEqual('(124)', _matrix2cycle(_n2sec()), 'wrong on input %s'%_n2sec())
self.assertEqual('(142)', _matrix2cycle(_n2secInv()), 'wrong on input %s'%_n2secInv())
self.assertEqual('(12)(34)', _matrix2cycle(_sec2si()), 'wrong on input %s'%_sec2si())
self.assertEqual('(13)(24)', _matrix2cycle(_special1()), 'wrong on input %s'%_special1())
self.assertEqual('(243)', _matrix2cycle(_special2()), 'wrong on input %s'%_special2())
self.assertEqual('(234)', _matrix2cycle(_special3()), 'wrong on input %s'%_special3())
if __name__ == '__main__':
unittest.main()
| tum-i22/indistinguishability-obfuscation | obfusc8/test/test_bp.py | Python | apache-2.0 | 5,366 | 0.063362 |
import mms
import unittest
from mooseutils import fuzzyAbsoluteEqual
class TestOutflow(unittest.TestCase):
def test(self):
df1 = mms.run_spatial('advection-outflow.i', 7, y_pp=['L2u', 'L2v'])
fig = mms.ConvergencePlot(xlabel='Element Size ($h$)', ylabel='$L_2$ Error')
fig.plot(df1,
label=['L2u', 'L2v'],
marker='o',
markersize=8,
num_fitted_points=3,
slope_precision=1)
fig.save('outflow.png')
for label,value in fig.label_to_slope.items():
if label == 'L2u':
self.assertTrue(fuzzyAbsoluteEqual(value, 1., .05))
else:
self.assertTrue(fuzzyAbsoluteEqual(value, 2., .05))
class TestExtrapolation(unittest.TestCase):
def test(self):
df1 = mms.run_spatial('advection.i', 7, y_pp=['L2u', 'L2v'])
fig = mms.ConvergencePlot(xlabel='Element Size ($h$)', ylabel='$L_2$ Error')
fig.plot(df1,
label=['L2u', 'L2v'],
marker='o',
markersize=8,
num_fitted_points=3,
slope_precision=1)
fig.save('extrapolation.png')
for label,value in fig.label_to_slope.items():
if label == 'L2u':
self.assertTrue(fuzzyAbsoluteEqual(value, 1., .05))
else:
self.assertTrue(fuzzyAbsoluteEqual(value, 2., .05))
class UpwindLimiter(unittest.TestCase):
def test(self):
df1 = mms.run_spatial('limited-advection.i', 7, "FVKernels/advection_u/limiter='upwind'", y_pp=['L2u'])
fig = mms.ConvergencePlot(xlabel='Element Size ($h$)', ylabel='$L_2$ Error')
fig.plot(df1,
label=['L2u'],
marker='o',
markersize=8,
num_fitted_points=3,
slope_precision=1)
fig.save('upwind-limiter.png')
for label,value in fig.label_to_slope.items():
self.assertTrue(fuzzyAbsoluteEqual(value, 1., .05))
class CentralDifferenceLimiter(unittest.TestCase):
def test(self):
df1 = mms.run_spatial('limited-advection.i', 7, "FVKernels/advection_u/limiter='central_difference'", y_pp=['L2u'])
fig = mms.ConvergencePlot(xlabel='Element Size ($h$)', ylabel='$L_2$ Error')
fig.plot(df1,
label=['L2u'],
marker='o',
markersize=8,
num_fitted_points=3,
slope_precision=1)
fig.save('cd-limiter.png')
for label,value in fig.label_to_slope.items():
self.assertTrue(fuzzyAbsoluteEqual(value, 2., .05))
class VanLeerLimiter(unittest.TestCase):
def test(self):
df1 = mms.run_spatial('limited-advection.i', 9, "FVKernels/advection_u/limiter='vanLeer'", y_pp=['L2u'])
fig = mms.ConvergencePlot(xlabel='Element Size ($h$)', ylabel='$L_2$ Error')
fig.plot(df1,
label=['L2u'],
marker='o',
markersize=8,
num_fitted_points=3,
slope_precision=1)
fig.save('vanLeer-limiter.png')
for label,value in fig.label_to_slope.items():
self.assertTrue(fuzzyAbsoluteEqual(value, 2., .05))
class MinModLimiter(unittest.TestCase):
def test(self):
df1 = mms.run_spatial('limited-advection.i', 9, "FVKernels/advection_u/limiter='min_mod'", y_pp=['L2u'])
fig = mms.ConvergencePlot(xlabel='Element Size ($h$)', ylabel='$L_2$ Error')
fig.plot(df1,
label=['L2u'],
marker='o',
markersize=8,
num_fitted_points=3,
slope_precision=1)
fig.save('min-mod-limiter.png')
for label,value in fig.label_to_slope.items():
self.assertTrue(fuzzyAbsoluteEqual(value, 2., .05))
class SOULimiter(unittest.TestCase):
def test(self):
df1 = mms.run_spatial('limited-advection.i', 9, "FVKernels/advection_u/limiter='sou'", y_pp=['L2u'])
fig = mms.ConvergencePlot(xlabel='Element Size ($h$)', ylabel='$L_2$ Error')
fig.plot(df1,
label=['L2u'],
marker='o',
markersize=8,
num_fitted_points=3,
slope_precision=1)
fig.save('sou-limiter.png')
for label,value in fig.label_to_slope.items():
self.assertTrue(fuzzyAbsoluteEqual(value, 2., .05))
class QUICKLimiter(unittest.TestCase):
def test(self):
df1 = mms.run_spatial('limited-advection.i', 15, "FVKernels/advection_u/limiter='quick'", y_pp=['L2u'])
fig = mms.ConvergencePlot(xlabel='Element Size ($h$)', ylabel='$L_2$ Error')
fig.plot(df1,
label=['L2u'],
marker='o',
markersize=8,
num_fitted_points=3,
slope_precision=1)
fig.save('quick-limiter.png')
for label,value in fig.label_to_slope.items():
self.assertTrue(fuzzyAbsoluteEqual(value, 2., .05))
class KTLimitedCD(unittest.TestCase):
def test(self):
df1 = mms.run_spatial('kt-limited-advection.i', 11, "FVKernels/advection_u/limiter='central_difference'", y_pp=['L2u'])
fig = mms.ConvergencePlot(xlabel='Element Size ($h$)', ylabel='$L_2$ Error')
fig.plot(df1,
label=['L2u'],
marker='o',
markersize=8,
num_fitted_points=3,
slope_precision=1)
fig.save('kt-cd-limiter.png')
for key,value in fig.label_to_slope.items():
self.assertTrue(fuzzyAbsoluteEqual(value, 2., .05))
print("%s slope, %f" % (key, value))
class KTLimitedUpwind(unittest.TestCase):
def test(self):
df1 = mms.run_spatial('kt-limited-advection.i', 13, "FVKernels/advection_u/limiter='upwind'", y_pp=['L2u'])
fig = mms.ConvergencePlot(xlabel='Element Size ($h$)', ylabel='$L_2$ Error')
fig.plot(df1,
label=['L2u'],
marker='o',
markersize=8,
num_fitted_points=3,
slope_precision=1)
fig.save('kt-upwind-limiter.png')
for key,value in fig.label_to_slope.items():
self.assertTrue(fuzzyAbsoluteEqual(value, 1., .05))
print("%s slope, %f" % (key, value))
class KTLimitedVanLeer(unittest.TestCase):
def test(self):
df1 = mms.run_spatial('kt-limited-advection.i', 11, "FVKernels/advection_u/limiter='vanLeer'", y_pp=['L2u'])
fig = mms.ConvergencePlot(xlabel='Element Size ($h$)', ylabel='$L_2$ Error')
fig.plot(df1,
label=['L2u'],
marker='o',
markersize=8,
num_fitted_points=3,
slope_precision=1)
fig.save('kt-van-leer-limiter.png')
for key,value in fig.label_to_slope.items():
self.assertTrue(fuzzyAbsoluteEqual(value, 2.5, .05))
print("%s slope, %f" % (key, value))
| harterj/moose | test/tests/fvkernels/mms/advective-outflow/test.py | Python | lgpl-2.1 | 7,075 | 0.005936 |
# coding=utf-8
from string import ascii_uppercase
import flask_featureflags
from app.main import main
from flask import render_template, request
from app.helpers.search_helpers import get_template_data
from app import data_api_client
import re
try:
from urlparse import urlparse, parse_qs
except ImportError:
from urllib.parse import urlparse, parse_qs
def process_prefix(prefix):
if prefix == "123": # special case
return prefix
if is_alpha(prefix):
return prefix[:1].upper()
return "A" # default
def process_page(page):
reg = "^[1-9]{1}$" # valid page
if re.search(reg, page):
return page
return "1" # default
def is_alpha(character):
reg = "^[A-Za-z]{1}$" # valid prefix
return re.search(reg, character)
def parse_links(links):
pagination_links = {
"prev": None,
"next": None
}
if 'prev' in links:
pagination_links['prev'] = parse_qs(urlparse(links['prev']).query)
if 'next' in links:
pagination_links['next'] = parse_qs(urlparse(links['next']).query)
return pagination_links
@main.route('/g-cloud/suppliers')
@flask_featureflags.is_active_feature('SUPPLIER_A_TO_Z')
def suppliers_list_by_prefix():
prefix = process_prefix(request.args.get('prefix', default='A'))
page = process_page(request.args.get('page', default="1"))
api_result = data_api_client.find_suppliers(prefix, page, 'gcloud')
suppliers = api_result["suppliers"]
links = api_result["links"]
template_data = get_template_data(main, {
'title': 'Digital Marketplace - Suppliers'
})
return render_template('suppliers_list.html',
suppliers=suppliers,
nav=ascii_uppercase,
count=len(suppliers),
prev_link=parse_links(links)['prev'],
next_link=parse_links(links)['next'],
prefix=prefix,
**template_data)
@main.route('/g-cloud/supplier/<supplier_id>')
@flask_featureflags.is_active_feature('SUPPLIER_A_TO_Z')
def suppliers_details(supplier_id):
supplier = data_api_client.get_supplier(
supplier_id=supplier_id)["suppliers"]
template_data = get_template_data(main, {
'title': 'Digital Marketplace - Suppliers'
})
first_character_of_supplier_name = supplier["name"][:1]
if is_alpha(first_character_of_supplier_name):
prefix = process_prefix(supplier["name"][:1])
else:
prefix = "123"
return render_template(
'suppliers_details.html',
supplier=supplier,
prefix=prefix,
**template_data)
| mtekel/digitalmarketplace-buyer-frontend | app/main/suppliers.py | Python | mit | 2,705 | 0 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack_dashboard.test.integration_tests import helpers
from openstack_dashboard.test.integration_tests.regions import messages
from sahara_dashboard.test.integration_tests.helpers import SaharaTestCase
IMAGE_NAME = helpers.gen_random_resource_name("image")
class TestSaharaImageRegistry(SaharaTestCase):
def setUp(self):
super(TestSaharaImageRegistry, self).setUp()
image_pg = self.home_pg.go_to_project_compute_imagespage()
image_pg.create_image(
IMAGE_NAME, image_file=self.CONFIG.sahara.fake_image_location)
image_pg.find_message_and_dismiss(messages.SUCCESS)
image_pg.wait_until_image_active(IMAGE_NAME)
def test_image_register_unregister(self):
"""Test the image registration in Sahara."""
image_reg_pg = \
self.home_pg.go_to_dataprocessing_clusters_imageregistrypage()
image_reg_pg.register_image(IMAGE_NAME, self.CONFIG.scenario.ssh_user,
"Test description")
image_reg_pg.wait_until_image_registered(IMAGE_NAME)
self.assertTrue(image_reg_pg.is_image_registered(IMAGE_NAME),
"Image was not registered.")
self.assertTrue(
image_reg_pg.find_message_and_dismiss(messages.SUCCESS))
self.assertFalse(
image_reg_pg.find_message_and_dismiss(messages.ERROR),
"Error message occurred during image creation.")
image_reg_pg.unregister_image(IMAGE_NAME)
self.assertTrue(
image_reg_pg.find_message_and_dismiss(messages.SUCCESS))
self.assertFalse(
image_reg_pg.find_message_and_dismiss(messages.ERROR))
self.assertFalse(image_reg_pg.is_image_registered(IMAGE_NAME),
"Image was not unregistered.")
def tearDown(self):
image_pg = self.home_pg.go_to_project_compute_imagespage()
image_pg.delete_image(IMAGE_NAME)
super(TestSaharaImageRegistry, self).tearDown()
| openstack/sahara-dashboard | sahara_dashboard/test/integration_tests/tests/test_sahara_image_registry.py | Python | apache-2.0 | 2,572 | 0 |
import os
from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.site.site_header = os.environ.get('{{cookiecutter.env_prefix}}_TITLE', '{{cookiecutter.project_name}} Admin')
urlpatterns = patterns(
'',
url(r'^admin/', include(admin.site.urls)),
url(r'^api/auth/',
include('rest_framework.urls', namespace='rest_framework')),
url(r'^api/token-auth/',
'rest_framework.authtoken.views.obtain_auth_token'),
url(r'^', include('{{cookiecutter.app_name}}.urls')),
)
| westerncapelabs/django-wcl-skel | {{cookiecutter.repo_name}}/{{cookiecutter.project_name}}/urls.py | Python | bsd-3-clause | 536 | 0.001866 |
# This file is part of Tryton. The COPYRIGHT file at the top level of
# this repository contains the full copyright notices and license terms.
from trytond.model import fields
from trytond.pool import Pool, PoolMeta
__all__ = ['StockMove']
__metaclass__ = PoolMeta
class StockMove:
__name__ = 'stock.move'
invoice_lines = fields.Many2Many('account.invoice.line-stock.move',
'stock_move', 'invoice_line', 'Invoice Lines')
@property
def invoiced_quantity(self):
'The quantity from linked invoice lines in move unit'
pool = Pool()
Uom = pool.get('product.uom')
quantity = 0
for invoice_line in self.invoice_lines:
quantity += Uom.compute_qty(invoice_line.unit,
invoice_line.quantity, self.uom)
return quantity
@classmethod
def copy(cls, moves, default=None):
if default is None:
default = {}
else:
default = default.copy()
default.setdefault('invoice_lines', None)
return super(StockMove, cls).copy(moves, default=default)
| kret0s/gnuhealth-live | tryton/server/trytond-3.8.3/trytond/modules/account_invoice_stock/stock.py | Python | gpl-3.0 | 1,091 | 0.001833 |
"""Gross moist stability-related quantities."""
from aospy.constants import c_p, grav, L_v
from aospy.utils.vertcoord import to_pascal
from indiff.deriv import EtaCenDeriv, CenDeriv
import numpy as np
from .. import PLEVEL_STR
from . import horiz_divg, vert_divg
from .thermo import dse, mse, fmse
def field_vert_int_max(arr, dp):
"""Maximum magnitude of integral of a field from surface up."""
dp = to_pascal(dp)
# 2015-05-15: Problem: Sigma data indexing starts at TOA, while pressure
# data indexing starts at 1000 hPa. So for now only do for
# sigma data and flip array direction to start from sfc.
arr_dp_g = (arr*dp)[::-1] / grav
# Input array dimensions are assumed ([time dims,] level, lat, lon).
pos_max = np.amax(np.cumsum(arr_dp_g, axis=0), axis=-3)
neg_max = np.amin(np.cumsum(arr_dp_g, axis=0), axis=-3)
# Flip sign because integrating from p_sfc up, i.e. with dp negative.
return -1*np.where(pos_max > -neg_max, pos_max, neg_max)
def horiz_divg_vert_int_max(u, v, radius, dp):
"""Maximum magnitude of integral upwards of horizontal divergence."""
return field_vert_int_max(horiz_divg(u, v, radius, dp), dp)
def vert_divg_vert_int_max(omega, p, dp):
"""Maximum magnitude of integral from surface up of vertical divergence."""
return field_vert_int_max(vert_divg(omega, p, dp), dp)
def gms_like_ratio(weights, tracer, dp):
"""Compute ratio of integrals in the style of gross moist stability."""
# Integrate weights over lower tropospheric layer
dp = to_pascal(dp)
denominator = field_vert_int_max(weights, dp)
# Integrate tracer*weights over whole column and divide.
numerator = np.sum(weights*tracer*dp, axis=-3) / grav
return numerator / denominator
def gross_moist_strat(sphum, u, v, radius, dp):
"""Gross moisture stratification, in horizontal divergence form."""
divg = horiz_divg(u, v, radius)
return L_v*gms_like_ratio(divg, sphum, dp)
def gross_dry_stab(temp, hght, u, v, radius, dp):
"""Gross dry stability, in horizontal divergence form."""
divg = horiz_divg(u, v, radius)
return -gms_like_ratio(divg, dse(temp, hght), dp)
def gross_moist_stab(temp, hght, sphum, u, v, radius, dp):
"""Gross moist stability, in horizontal divergence form."""
divg = horiz_divg(u, v, radius)
return -gms_like_ratio(divg, mse(temp, hght, sphum), dp)
def gms_up_low(temp, hght, sphum, level, lev_up=400., lev_dn=925.):
"""Gross moist stability. Upper minus lower level MSE."""
m = mse(temp, hght, sphum)
return (np.squeeze(m[np.where(level == lev_up)] -
m[np.where(level == lev_dn)])/c_p)
def gms_each_level(temp, hght, sphum, level, lev_dn=925.):
m = mse(temp, hght, sphum)
return (m - m[np.where(level == lev_dn)])/c_p
def dry_static_stab(temp, hght, level, lev_dn=925.):
"""Dry static stability, in terms of dry static energy."""
d = dse(temp, hght)
return (d - d[np.where(level == lev_dn)])/c_p
def frozen_moist_static_stab(temp, hght, sphum, q_ice, ps, bk, pk):
"""Frozen moist static stability using model-native coordinate data."""
return EtaCenDeriv(fmse(temp, hght, sphum, q_ice), pk, bk, ps, order=2,
fill_edge=True).deriv()
def moist_static_stab(temp, hght, sphum, ps, bk, pk):
"""Moist static stability using model-native coordinate data. No ice."""
return EtaCenDeriv(mse(temp, hght, sphum), pk, bk, ps, order=2,
fill_edge=True).deriv()
def frozen_moist_static_stab_p(temp, hght, sphum, q_ice):
"""Frozen moist static stability using pressure-interpolated data.
Note that the values in the stratosphere become unphysical using pressure
interpolated data, but otherwise in the troposphere they agree well with
data on model-native coordinates.
"""
p = to_pascal(temp[PLEVEL_STR])
return CenDeriv(fmse(temp, hght, sphum, q_ice), PLEVEL_STR, coord=p,
order=2, fill_edge=True).deriv()
def moist_static_stab_p(temp, hght, sphum):
"""Moist static stability using pressure-interpolated data. No ice.
Note that the values in the stratosphere become unphysical using pressure
interpolated data, but otherwise in the troposphere they agree well with
data on model-native coordinates.
"""
p = to_pascal(temp[PLEVEL_STR])
return CenDeriv(mse(temp, hght, sphum), PLEVEL_STR, coord=p,
order=2, fill_edge=True).deriv()
| spencerahill/aospy-obj-lib | aospy_user/calcs/gms.py | Python | apache-2.0 | 4,514 | 0 |
from email import message_from_file
from pkg_resources import working_set as WS
import path
from pathlib import Path
from pkg_resources import *
from pkg_resources import DistInfoDistribution, Distribution
import distutils.dist
import pkg_resources
import dpath
import sys, os, re
from distutils.errors import *
from distutils.fancy_getopt import FancyGetopt, translate_longopt
from distutils.util import check_environ, strtobool, rfc822_escape
from distutils import log
from distutils.debug import DEBUG
import json
import pip
import sys
import socket
import multiprocessing
from xmlrpc.client import ServerProxy
from itertools import zip_longest
from functools import reduce
LISTING = '/Users/Kristen/PycharmProjects/proj/Other/data/current_dists.txt'
BASENAMES = ('name', 'version', 'author', 'author_email', 'maintainer', 'maintainer_email', 'url', 'license', 'description', 'long_description', 'keywords', 'platforms', 'fullname', 'contact', 'contact_email', 'license', 'classifiers', 'download_url', 'provides', 'requires', 'obsoletes')
def listing():
listing = set()
with open(LISTING) as reader:
data =reader.read()
data = [item.strip().replace('-', '_') for item in data.splitlines()]
return frozenset(data)
listing =listing()
from pkg_resources import working_set
WS = working_set.by_key
ws = frozenset(ws.replace('-','_') for ws in WS.keys())
def search_dist_list(word): return [dist for dist in ws if dist.__contains__(word)]
class cache(object):
def __init__(self, func=None):
self.storage = dict()
self.items = set()
if func:
self.function = func
else:
self.function = lambda x: x
self.description = ''
def update_description(self, msg, rewrite=False):
if not rewrite:
msg = '. '+msg
msg += self.description
else:
self.description = msg
return self.description
def get_state(self):
class_dict = self.__dict__
if 'function' in class_dict.keys():
del class_dict['function']
return class_dict
def on_search(self, key, search_function=None):
searcher = search_function if search_function else self.function
if key in self.items:
return self.storage[key]
else:
seek = searcher(key)
if seek is not None:
self.storage[key] = seek
self.items.add(key)
self.__setattr__(key, seek)
return seek
return None
def __getattr__(self, attr):
return self[attr]
def __repr__(self):
if hasattr(self.function, '__name__'):
name = self.function.__name__
else:
name = "nofunc"
items = str(len(self.items))
return "Cache for function ({}) with {} items".format(name, items)
Cache =cache(search_dist_list)
class EmptyDictException(Exception):
"""Need a dictionary that isn't `None`"""
def split_keyword_dicts(d=None):
if d is None:
print('Need a dictionary that isnt None')
pass
if not isdict(d):
pass
if isdict(d):
for value in d.items():
if not isdict(value):
pass
for k, v in d.items():
if v['keywords'] is None:
v['keywords'] = ' '
v['keywords'] = v['keywords'].lower()
if v['keywords'].__contains__(', '):
v['keywords'] = ' '.join(v['keywords'].split(', '))
if v['keywords'].__contains__(','):
v['keywords'] = ' '.join(v['keywords'].split(','))
v['keywords'] = set([val.strip() for val in v['keywords'].split()])
return d
class attrdict(dict):
def __getattr__(self, attr):
return self[attr]
def __setitem__(self,key, value):
self[key] = value
self.__setattr__(key, value)
def one_level_attrdict(d):
if isdict(d):
for key, value in d.items():
if isdict(value):
d[key] = attrdict(value)
return d
def isdict(obj): return isinstance(obj, dict)
def isNone(obj):
if obj is not None:
return False
return True
MetaData = distutils.dist.DistributionMetadata
msg = message_from_file
def _read_field(name):
value = msg[name]
if value == 'UNKNOWN':
return None
return value
def _read_list(name):
values = msg.get_all(name, None)
if values == []:
return None
return values
ex=Path('/Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/site-packages/dpath-1.4.0.dist-info/METADATA')
exs = "flatdict", 'graphviz', 'genshi', 'textblob'
def get_egg(distribution):
info = distribution.egg_info
if info is not None:
finder = Path(info)
else:
finder = None
return finder
def parse_egg_info(path):
finder =Path(path)
file_dict = dict()
for file in finder.iterdir():
if file.name == 'METADATA' or 'PKG-INFO':
file_dict['METADATA']= file.open()
if file.name == 'DESCRIPTION':
file_dict['DESCRIPTION'] =file.open()
if file.name == 'metadata.json':
try:
file=json.load(file.open())
except TypeError:
file = file.open()
#extensions/python.details/project_urls
#extensions/python.details/document_names ##in BASENAMES
#extensions/python.details/contacts[lst]/[email | name | role ]
file_dict['metadata_json'] = json.load(file.open())
if file.name == 'RECORD':
record = set()
file = file.open().read()
for line in file.splitlines():
try:
name,hash,size = line.split(',')
items = tuple(name,hash,size)
except ValueError:
items = line.split(',')
record.add(items)
file_dict['RECORD']= record
else:
file_dict[file.name] = None
return file_dict
| KGerring/RevealMe | data/tested.py | Python | mit | 6,442 | 0.0104 |
class Module:
def __init__(self, mainMenu, params=[]):
# metadata info about the module, not modified during runtime
self.info = {
# name for the module that will appear in module menus
'Name': 'Prompt',
# list of one or more authors for the module
'Author': ['@FuzzyNop', '@harmj0y'],
# more verbose multi-line description of the module
'Description': ('Launches a specified application with an prompt for credentials with osascript.'),
# True if the module needs to run in the background
'Background' : False,
# File extension to save the file as
'OutputExtension' : "",
# if the module needs administrative privileges
'NeedsAdmin' : False,
# True if the method doesn't touch disk/is reasonably opsec safe
'OpsecSafe' : False,
# the module language
'Language' : 'python',
# the minimum language version needed
'MinLanguageVersion' : '2.6',
# list of any references/other comments
'Comments': [
"https://github.com/fuzzynop/FiveOnceInYourLife"
]
}
# any options needed by the module, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Agent' : {
# The 'Agent' option is the only one that MUST be in a module
'Description' : 'Agent to execute module on.',
'Required' : True,
'Value' : ''
},
'AppName' : {
# The 'Agent' option is the only one that MUST be in a module
'Description' : 'The name of the application to launch.',
'Required' : True,
'Value' : 'App Store'
},
'ListApps' : {
# The 'Agent' option is the only one that MUST be in a module
'Description' : 'Switch. List applications suitable for launching.',
'Required' : False,
'Value' : ''
}
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
# During instantiation, any settable option parameters
# are passed as an object set to the module and the
# options dictionary is automatically set. This is mostly
# in case options are passed on the command line
if params:
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self):
listApps = self.options['ListApps']['Value']
appName = self.options['AppName']['Value']
if listApps != "":
script = """
import os
apps = [ app.split('.app')[0] for app in os.listdir('/Applications/') if not app.split('.app')[0].startswith('.')]
choices = []
for x in xrange(len(apps)):
choices.append("[%s] %s " %(x+1, apps[x]) )
print "\\nAvailable applications:\\n"
print '\\n'.join(choices)
"""
else:
# osascript prompt for the specific application
script = """
import os
print os.popen('osascript -e \\\'tell app "%s" to activate\\\' -e \\\'tell app "%s" to display dialog "%s requires your password to continue." & return default answer "" with icon 1 with hidden answer with title "%s Alert"\\\'').read()
""" % (appName, appName, appName, appName)
return script
| Hackplayers/Empire-mod-Hpys-tests | lib/modules/python/collection/osx/prompt.py | Python | bsd-3-clause | 3,830 | 0.006005 |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'InstanceApplication.network'
db.add_column('apply_instanceapplication', 'network', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['ganeti.Network'], null=True, blank=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'InstanceApplication.network'
db.delete_column('apply_instanceapplication', 'network_id')
models = {
'apply.instanceapplication': {
'Meta': {'object_name': 'InstanceApplication'},
'admin_contact_email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'admin_contact_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'admin_contact_phone': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'applicant': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'backend_message': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'cluster': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['ganeti.Cluster']", 'null': 'True', 'blank': 'True'}),
'comments': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'cookie': ('django.db.models.fields.CharField', [], {'default': "'AYkWSa4Fr2'", 'max_length': '255'}),
'disk_size': ('django.db.models.fields.IntegerField', [], {}),
'filed': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'hostname': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hosts_mail_server': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'job_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'memory': ('django.db.models.fields.IntegerField', [], {}),
'network': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['ganeti.Network']", 'null': 'True', 'blank': 'True'}),
'operating_system': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['apply.Organization']"}),
'status': ('django.db.models.fields.IntegerField', [], {}),
'vcpus': ('django.db.models.fields.IntegerField', [], {})
},
'apply.organization': {
'Meta': {'object_name': 'Organization'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'website': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
'apply.sshpublickey': {
'Meta': {'object_name': 'SshPublicKey'},
'comment': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'fingerprint': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.TextField', [], {}),
'key_type': ('django.db.models.fields.CharField', [], {'max_length': '12'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'ganeti.cluster': {
'Meta': {'object_name': 'Cluster'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'fast_create': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'hostname': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'port': ('django.db.models.fields.PositiveIntegerField', [], {'default': '5080'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'})
},
'ganeti.network': {
'Meta': {'object_name': 'Network'},
'cluster': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['ganeti.Cluster']"}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'mode': ('django.db.models.fields.CharField', [], {'max_length': '64'})
}
}
complete_apps = ['apply']
| sunweaver/ganetimgr | apply/migrations/0005_add_application_network_field.py | Python | gpl-3.0 | 8,911 | 0.008753 |
# -*- coding: utf-8 -*
"""HydraTK installation commands
.. module:: lib.install.command
:platform: Unix
:synopsis: HydraTK installation commands
.. moduleauthor:: Petr Rašek <[email protected]>
"""
from subprocess import call, Popen, PIPE
from os import path, environ
from sys import exit
from hydratk.lib.system.utils import Utils
from hydratk.lib.console.shellexec import shell_exec
def is_install_cmd(argv):
"""Method checks if installation is requested
Args:
argv (list): command arguments
Returns:
bool: result
"""
res = False
if ('install' in argv or 'bdist_egg' in argv or 'bdist_wheel' in argv):
res = True
return res
def get_pck_manager():
"""Method returns system package managera
Args:
none
Returns:
list: list of string
"""
pck_managers = ['apt-get', 'yum', 'dnf', 'zypper', 'emerge', 'pacman', 'pkg']
pckm = []
for pck in pck_managers:
if (is_installed(pck)):
pckm.append(pck)
return pckm
def is_installed(app):
"""Method checks if system application is installed
Args:
app (str): application
Returns:
bool: result
"""
cmd = ['which', app]
proc = Popen(cmd, stdout=PIPE)
out = proc.communicate()
result = True if (len(out[0]) > 0) else False
return result
def install_pck(pckm, pck):
"""Method installs system package from repository
Args:
pckm (str): package manager
pck (str): package
Returns:
none
"""
print('Installing package {0}'.format(pck))
if (pckm == 'apt-get'):
cmd = 'apt-get -y install {0}'.format(pck)
elif (pckm == 'yum'):
cmd = 'yum -y install {0}'.format(pck)
elif (pckm == 'dnf'):
cmd = 'dnf -y install {0}'.format(pck)
elif (pckm == 'zypper'):
cmd = 'zypper install -y {0}'.format(pck)
elif (pckm == 'emerge'):
cmd = 'emerge {0}'.format(pck)
elif (pckm == 'pacman'):
cmd = 'pacman -S --noconfirm {0}'.format(pck)
elif (pckm == 'pkg'):
cmd = 'pkg install -y {0}'.format(pck)
result, _, err = shell_exec(cmd, True)
if (result != 0):
print('Failed to install package {0}, hydratk installation failed.'.format(pck))
print(err)
exit(-1)
def create_dir(dst):
"""Method creates directory
Args:
dst (str): destination path
Returns:
none
"""
if (not path.exists(dst)):
print('Creating directory {0}'.format(dst))
cmd = 'mkdir -p {0}'.format(dst)
result, _, err = shell_exec(cmd, True)
if (result != 0):
print('Failed to create directory {0}'.format(dst))
print(err)
def copy_file(src, dst):
"""Method copies file
Args:
src (str): source path
dst (str): destination path
Returns:
none
"""
create_dir(dst)
print ('Copying file {0} to {1}'.format(src, dst))
cmd = 'cp {0} {1}'.format(src, dst)
result, _, err = shell_exec(cmd, True)
if (result != 0):
print('Failed to copy {0} to {1}'.format(src, dst))
print(err)
def move_file(src, dst):
"""Method moves file
Args:
src (str): source path
dst (str): destination path
Returns:
none
"""
print('Moving file {0} to {1}'.format(src, dst))
cmd = 'mv {0} {1}'.format(src, dst)
result, _, err = shell_exec(cmd, True)
if (result != 0):
print('Failed to move {0} to {1}'.format(src, dst))
print(err)
def remove(src, recursive=True):
"""Method removes file or directory
Args:
src (str): source path
recursive (bool): recursive deletion
Returns:
none
"""
print('Removing {0}'.format(src))
cmd = ('rm -fR {0}' if (recursive) else 'rm -f {0}').format(src)
result, _, err = shell_exec(cmd, True)
if (result != 0):
print('Failed to remove {0}'.format(src))
print(err)
def set_rights(path, rights, recursive=True):
"""Method sets access rights
Args:
path (str): directory/file path
rights (str): access rights
recursive (bool): set recursive rights
Returns:
none
"""
print('Setting rights {0} for {1}'.format(rights, path))
if (recursive):
cmd = 'chmod -R {0} {1}'.format(rights, path)
else:
cmd = 'chmod {0} {1}'.format(rights, path)
result, _, err = shell_exec(cmd, True)
if (result != 0):
print('Failed to set rights for {0}'.format(path))
print(err)
def install_pip(module):
"""Method installs python module via pip
Args:
module (str): python module
Returns:
none
"""
modtok = module.split('>=') if ('>=' in module) else module.split('==')
module_name = modtok[0]
module_version = modtok[1] if (len(modtok) > 1) else None
pip_path = 'pip' if ('pip' not in environ) else '$pip'
if (module_version != None and Utils.module_exists(module_name)):
if Utils.module_version_ok(module_version, Utils.module_version(module_name)):
print('Module {0} already installed with version {1}'.format(module_name,Utils.module_version(module_name)))
else:
print ('Upgrading module {0} to version {1}'.format(module_name,module_version))
cmd = '{0} install --upgrade "{1}"'.format(pip_path, module)
result, _, err = shell_exec(cmd, True)
if result != 0:
print('Failed to install {0}, hydratk installation failed.'.format(module))
print(err)
exit(-1)
else:
print ('Installing module {0}'.format(module))
cmd = '{0} install "{1}"'.format(pip_path, module)
print(cmd)
result, _, err = shell_exec(cmd, True)
if result != 0:
print('Failed to install {0}, hydratk installation failed.'.format(module))
print(err)
exit(-1)
def uninstall_pip(module):
"""Method uninstalls python module via pip
Args:
module (str): python module
Returns:
none
"""
print ('Uninstalling module {0}'.format(module))
cmd = 'pip uninstall -y {0}'.format(module)
result, _, err = shell_exec(cmd, True)
if (result != 0):
print('Failed to uninstall {0}'.format(module))
print(err)
| hydratk/hydratk | src/hydratk/lib/install/command.py | Python | bsd-3-clause | 6,457 | 0.004337 |