text
stringlengths 29
850k
|
---|
"""
Module contains console printing functions for the RXCS. |br|
All of the console print in RxCS should be done using functions
from this module.
*Author*:
Jacek Pierzchlewski, Aalborg University, Denmark. <[email protected]>
*Version*:
0.1 | 14-MAY-2014 : * Initial version. |br|
0.2 | 15-MAY-2014 : * Docstrings added.
0.21 | 15-MAY-2014 : * New colors ('PARAM' + 'OK') added to the dictionary
0.22 | 14-AUG-2015 : * New function (progress_doneNL) is added
0.23 | 20-AUG-2015 : * New function (newline) is added
0.24 | 30-NOV-2015 : * Progress bar is added
0.26 | 15-JAN-2016 : * Note, warning and info starts with a new line
*License*:
BSD 2-Clause
"""
from __future__ import division
import sys
import numpy as np
import time
# =====================================================================
# Print a new line
# =====================================================================
def newline():
sys.stdout.write('\n')
return
# =====================================================================
# Print signal pack header
# =====================================================================
def pack(inxPack):
"""
.. role:: bash(code)
:language: bash
Function prints header of the signal pack processed by RxCS. |br|
The function takes care of the proper coloring of the console output. |br|
>>> console.pack(1)
gives an output:
:bash:`>>> SIGNAL PACK #1:`
Args:
inxPack (int): Index of the current signal pack
Returns:
nothing
"""
strPackNumber = '#%d' % (inxPack)
sys.stdout.write('\n')
sys.stdout.write(_colors('PROGRESS') + '>>> ' + _colors('ENDC'))
sys.stdout.write('SIGNAL PACK ')
sys.stdout.write(_colors('PROGRESS') + strPackNumber + _colors('ENDC'))
sys.stdout.write(':' + '\n')
sys.stdout.flush()
return
# =====================================================================
# Print the sys progress sign + current stage + name of the current module
# =====================================================================
def progress(strStage, strModule):
"""
.. role:: bash(code)
:language: bash
Function prints progress of the RxCS frames. |br|
It prints the progress sign ('>>') + the current stage (signal generation,
sampler, reconstruction, etc...) + name of the current module. |br|
The function takes care of the proper coloring of the console output. |br|
>>> console.progress('Signal generator', 'Random multitone')
gives an output:
:bash:`| >> Signal generator: Random multitone`
Args:
strStage (string): name of the stage |br|
strModule (string): name of the module
Returns:
nothing
"""
sys.stdout.write(_colors('PROGRESS') + ' >> ' + _colors('ENDC'))
sys.stdout.write(strStage + ': ' + strModule + ' \n')
sys.stdout.flush()
return
# =====================================================================
# Print the module progress sign (>) + start the timer
# =====================================================================
def module_progress(strInfo):
"""
Function prints an info about a module progress.
The info is preceded by a tabulator and a module progress sign ('>'). |br|
Additionally, the function starts a time counter. |br|
The function takes care of the proper coloring of the console output. |br|
>>> console.module_progress('The module X is starting')
gives an output:
:bash:`| > The module X is starting...`
Args:
strInfo (string): progress info to be printed
Returns:
tStart (float): time stamp of the start
"""
sys.stdout.write(_colors('PROGRESS') + '\n > ' + _colors('ENDC'))
sys.stdout.write(strInfo + '...')
sys.stdout.flush()
# Start the timer
tStart = time.time()
return tStart
# =====================================================================
# Finish the progress print + print the tme of execution
# =====================================================================
def progress_done(tStart):
"""
Function adds 'done' to a console message previously printed by a
'module_progress' function. |br|
Additionally, the function print an info about an execution time of a
module, based on the time stamp of the start of the module. |br|
The function takes care of the proper coloring of the console output. |br|
>>> tStart = console.module_progress('The module X is starting')
>>> time.sleep(1)
>>> console.module_progress_done(tStart)
gives an output:
:bash:`| > The module X is starting...done in 1.00 seconds`
Args:
tStart (float): time stamp of the start
Returns:
nothing
"""
# Measure the time
tTime = time.time() - tStart
strTime = ('done in %.2f seconds') % (tTime)
sys.stdout.write(_colors('OK') + strTime + _colors('ENDC'))
sys.stdout.flush()
return
# =====================================================================
# Finish the progress print + print the tme of execution + new line
# =====================================================================
def progress_doneNL(tStart):
"""
Function adds 'done' to a console message previously printed by a
'module_progress' function. |br|
Additionally, the function print an info about an execution time of a
module, based on the time stamp of the start of the module, + a new line.|br|
The function takes care of the proper coloring of the console output. |br|
>>> tStart = console.module_progressNL('The module X is starting')
>>> time.sleep(1)
>>> console.module_progress_done(tStart)
gives an output:
:bash:`| > The module X is starting...done in 1.00 seconds`
Args:
tStart (float): time stamp of the start
Returns:
nothing
"""
# Measure the time
tTime = time.time() - tStart
strTime = ('done in %.2f seconds') % (tTime)
sys.stdout.write(_colors('OK') + strTime + _colors('ENDC') + '\n')
sys.stdout.flush()
return
# =====================================================================
# Start a progress bar
# =====================================================================
def progress_bar_start(strInfo, iPrintIter, iMilestone, iLineBreak,
bPrintSteps=1, bIteration0=0, bPrintTime=0):
"""
Function starts a progress bar
The start is preceded by a tabulator and a module progress sign ('>>>'). |br|
Additionally, the function starts a time counter. |br|
The function takes care of the proper coloring of the console output. |br|
The function returns a progress bar dictionary. |br|
>>> console.progress_bar_start('The module X:')
gives an output:
:bash:`| > The module X:`
Args:
strInfo (string): info to be printed
iPrintIter (integer): print a step after 'iPrintIter' iterations
iMilestone (integer): print X after 'iMilestone' iterations
iLineBreak (integer): break the line after 'iLineBreak' iterations
bPrintSteps (integer): 0 - do not print the number of iterations at the end
1 - print the number of iterations at the end
bIteration0 (integer): 0 - iteration #0 is not allowed
1 - iteration #0 is allowed
bPrintTime (integer): 0 - do not print time at all
1 - print time and average time for the last
iteration (excluding iteration 0)
Returns:
dBar (dictionary): data with the progress bar
"""
# Correct the input arguments
iPrintIter = int(round(iPrintIter))
iMilestone = int(round(iMilestone))
iLineBreak = int(round(iLineBreak))
# Check if the settings are correct
if iMilestone % iPrintIter != 0:
strError = '\'iMilestone\' must be a multiplication of \'iPrintIter\'! (%d is not a multiplication of %d)!' \
% (iMilestone, iPrintIter)
raise ValueError(strError)
if iLineBreak % iMilestone != 0:
strError = '\'iLineBreak\' must be a multiplication of \'iMilestone\'! (%d is not a multiplication of %d)!' \
% (iLineBreak, iMilestone)
raise ValueError(strError)
#----------------------------------
# Construct the output dictionary
dBar = dict()
dBar['bActive'] = 1
dBar['iInfoLen'] = len(strInfo) # Length of the info string
dBar['iPrintIter'] = iPrintIter
dBar['iMilestone'] = iMilestone
dBar['iLineBreak'] = iLineBreak
dBar['bPrintSteps'] = bPrintSteps
dBar['bIteration0'] = bIteration0
dBar['bPrintTime'] = bPrintTime
# Start iterations
if bIteration0 == 0:
dBar['iLastIter'] = 0
else:
dBar['iLastIter'] = -1
# Construct a new line tabulator
if bIteration0 == 0:
dBar['strNewLine'] = '\n ' + (' ' * dBar['iInfoLen'])
else:
dBar['strNewLine'] = '\n ' + (' ' * (dBar['iInfoLen'] + 1))
#----------------------------------
# Begin a progress bar
sys.stdout.write(_colors('PROGRESS') + '\n >>> ' + _colors('ENDC'))
sys.stdout.write(strInfo + ' ')
sys.stdout.flush()
# Start the timer, if needed
if bPrintTime == 1:
tStart = time.time()
dBar['tStart'] = tStart
return dBar
def progress_bar(dBar, iIter):
"""
Function prints printing bar
Args:
dBar (string): info to be printed
iPrintIter (integer): print a step after 'iPrintIter' iterations
Returns:
dBar (dictionary): data with the progress bar
iIter (integer): the current iteration
"""
# Is the bar still actve
if dBar['bActive'] == 0:
return dBar
# Make iterations a round integer, in any case
iIter = int(round(iIter))
# Is it the end of the story?
if iIter < 0:
dBar['bActive'] = 0
if dBar['bPrintSteps'] == 1:
strMessage = ' (%d) ' % (dBar['iLastIter'])
sys.stdout.write(strMessage)
sys.stdout.flush()
if dBar['bPrintTime'] == 1:
sys.stdout.write(dBar['strNewLine'])
tTime = time.time() - dBar['tStart'] # Measure the time
strMessage = progress_bar_time(tTime, dBar['iLastIter'])
sys.stdout.write(strMessage)
sys.stdout.flush()
return dBar
# Was this iteration already given?
if iIter <= dBar['iLastIter']:
return dBar
iPreviousLastIter = dBar['iLastIter']
dBar['iLastIter'] = iIter # Mark the current iteration as the last iteration
# Loop over all the iterations
for iIter in range(iPreviousLastIter + 1, iIter + 1):
if iIter == 0:
if dBar['bIteration0'] == 1:
sys.stdout.write(_colors('PROGRESS') + '0' + _colors('ENDC'))
return dBar
elif (iIter % dBar['iMilestone']) == 0:
sys.stdout.write(_colors('PROGRESS') + 'X' + _colors('ENDC'))
sys.stdout.flush()
elif (iIter % dBar['iPrintIter']) == 0:
sys.stdout.write('.')
sys.stdout.flush()
# Break the line, if it is needed
if (iIter % dBar['iLineBreak']) == 0:
sys.stdout.write(dBar['strNewLine'])
sys.stdout.flush()
return dBar
def progress_bar_time(tTime, iIter):
"""
Time service for the progress bar.
"""
iHour = 3600
iMin = 60
strMessage = 'Total time = %.1f [s]' % (tTime)
# Hours
if tTime >= 1 * iHour:
nHours = np.floor(tTime / iHour)
tTimeSec = tTime - nHours * iHour
if nHours == 1:
strMessage = strMessage + ' (%d [hour]' % (nHours)
else:
strMessage = strMessage + ' (%d [hours]' % (nHours)
if tTimeSec >= 1 * iMin:
nMins = np.floor(tTimeSec / iMin)
tTimeSec = tTimeSec - nMins * iMin
strMessage = strMessage + ' %d [mins]' % (nMins)
strMessage = strMessage + ' %.1f [sec])' % (tTimeSec)
# Minutes
elif tTime >= 10 * iMin:
nMins = np.floor(tTime / iMin)
tTimeSec = tTime - nMins * iMin
strMessage = strMessage + ' (%d [mins]' % (nMins)
strMessage = strMessage + ' %.1f [sec])' % (tTimeSec)
# One iteration
tTimeIter = tTime / iIter
# Microseconds
if tTimeIter < 1e-3:
strMessage = strMessage + ' (%.1f [us] p. iteration)' % (tTimeIter * 1e6)
# Miliseconds
elif tTimeIter < 1:
strMessage = strMessage + ' (%.3f [ms] p. iteration)' % (tTimeIter * 1e3)
else:
strMessage = strMessage + ' (%.3f [s] p. iteration)' % (tTimeIter)
return strMessage
# =====================================================================
# Finish the module progress print + print the tme of execution
# =====================================================================
def module_progress_done(tStart):
"""
Function adds 'done' to a console message previously printed by a
'module_progress' function. |br|
Additionally, the function print an info about an execution time of a
module, based on the time stamp of the start of the module. |br|
The function takes care of the proper coloring of the console output. |br|
>>> tStart = console.module_progress('The module X is starting')
>>> time.sleep(1)
>>> console.module_progress_done(tStart)
gives an output:
:bash:`| > The module X is starting...done in 1.00 seconds`
Args:
tStart (float): time stamp of the start
Returns:
nothing
"""
# Measure the time
tTime = time.time() - tStart
if (tTime < 1) and (tTime >= 1e-3): # Miliseconds range
tTime = tTime * 1e3
strTime = ('done in %.2f ms') % (tTime)
elif (tTime < 1e-3) and (tTime >= 1e-6): # Microseconds range
tTime = tTime * 1e6
strTime = ('done in %.2f us') % (tTime)
else:
strTime = ('done in %.2f s') % (tTime)
sys.stdout.write(_colors('OK') + strTime + _colors('ENDC') + '\n\n\n')
sys.stdout.flush()
return
# =====================================================================
# Finish the module progress print + print the time of execution
# (with 1 newline instead of 3)
# =====================================================================
def module_progress_doneNoNew(tStart):
"""
Function adds 'done' to a console message previously printed by a
'module_progress' function. |br|
Additionally, the function print an info about an execution time of a
module, based on the time stamp of the start of the module. |br|
This function do not add new lines after 'done'.
The function takes care of the proper coloring of the console output. |br|
>>> tStart = console.module_progress('The module X is starting')
>>> time.sleep(1)
>>> console.module_progress_doneNoNew(tStart)
gives an output:
:bash:`| > The module X is starting...done in 1.00 seconds`
Args:
tStart (float): time stamp of the start
Returns:
nothing
"""
# Measure the time
tTime = time.time() - tStart
strTime = ('done in %.2f seconds') % (tTime)
sys.stdout.write(_colors('OK') + strTime + _colors('ENDC') + '\n')
sys.stdout.flush()
return
# =====================================================================
# Print a warning
# =====================================================================
def warning(strWarn):
"""
Function prints a warning preceded by a proper tabulator. |br|
The function takes care of the proper coloring of the console output. |br|
>>> console.warning('Mind the gap!')
:bash:`| Mind the gap!`
Args:
strWarn (string): warning to be printed
Returns:
nothing
"""
# Add a tabulator to the warning message
strWarn = ('\n %s') % (strWarn)
# Write the warning
sys.stdout.write(_colors('WARN'))
sys.stdout.write(strWarn)
sys.stdout.write(_colors('ENDC') + '\n')
sys.stdout.flush()
return
# =====================================================================
# Print information
# =====================================================================
def info(strInfo):
"""
Function prints an info preceded by a proper tabulator. |br|
The function takes care of the proper coloring of the console output. |br|
>>> console.info('Very important info')
:bash:`| Very important info`
Args:
strInfo (string): info to be printed
Returns:
nothing
"""
# Add a tabulator to the info message
strInfo = ('\n %s') % (strInfo)
# Write the info
sys.stdout.write(_colors('INFO'))
sys.stdout.write(strInfo)
sys.stdout.write(_colors('ENDC') + '\n')
sys.stdout.flush()
return
# =====================================================================
# Print a bullet + information description + ':' + information
# =====================================================================
def bullet_info(strDesc, strInfo):
"""
Function prints an info preceded by a proper tabulator, an info
bullet '*' and a description of the info. |br|
The function takes care of the proper coloring of the console output. |br|
>>> console.bullet_info('Please remeber', 'mind the gap!')
gives an output
:bash:`| * Please remeber: mind the gap!`
Args:
strDesc (string): description of the info |br|
strInfo (string): info to be printed
Returns:
nothing
"""
# Write the tabulator with a bullet
sys.stdout.write('\n' + _colors('BULLET') + ' * ' + _colors('ENDC'))
# Write the description
sys.stdout.write(strDesc + ': ')
# Write the info
sys.stdout.write(_colors('BULLET_INFO'))
sys.stdout.write(strInfo)
sys.stdout.write(_colors('ENDC'))
sys.stdout.write('\n')
sys.stdout.flush()
return
# =====================================================================
# Print a note (an information without coloring)
# =====================================================================
def note(strNote):
"""
Function prints a note preceded by a proper tabulator. |br|
There is no coloring of the output. |br|
>>> console.note('mind the gap!')
:bash:`| mind the gap!`
Args:
strInfo (string): info to be printed
Returns:
nothing
"""
# Add a tabulator to the info message
strNote = ('\n %s') % (strNote)
# Write the info
sys.stdout.write(strNote)
sys.stdout.write('\n')
sys.stdout.flush()
return
# =====================================================================
# Print name of the parameter + the parameter
# =====================================================================
def param(strName, iVal, strForm, strUnit):
"""
Function prints a parameter and a parameter unit.
The parameter is preceeded by a tabulator and a parameter name. |br|
The parameter value is recalculated to a requested order of magnitude,
or the function may decide itself about the order of magnitude. The
formatting string (3rd parameter) controls the order of magnitude of
a printed value. If it contains the '-' character, the function will
decide about an order of magnitude. If it contains a magnitude unit
symbol, the function recalculates the value to the given order of
magnitude. |br|
The formatting string (3rd parameter) must contain one or two
characters. If there are two characters, the value is printed in two
orders of magnitude, second is in the parantheses. |br|
Available symbols of orders of magnitude:
(femto): 'f' |br|
(pico): 'p' |br|
(nano): 'n' |br|
(micro): 'u' |br|
(mili): 'm' |br|
(none): ' ' |br|
(kilo): 'k' |br|
(Mega): 'M' |br|
(Giga): 'G' |br|
(Tera): 'T' |br|
(second) 's' |br|
(hour): 'h' |br|
|br|
If the first character in the formatting string is 's', then the
parameter is treated as time expressed in seconds. In this case
the second character may either not exists in the string, or be equal
to 'h'. In the latter case the time will be also expressed in hours. |br|
The last argument is a unit name which will be printed after the values
of the paramter. If the first character in the formatting string is
's', then the last argument shuld be empty. |br|
The function takes care of the proper coloring of the console output. |br|
Usage examples:
>>> console.param('Size of a hard drive',500*1e9,'G ','bytes')
:bash:`| Size of a hard drive: 500.000 G (500000000000) [bytes]`
>>> console.param('Dist. from Aalborg to Auckland',10889,'k ','miles')
:bash:`| Dist. from Aalborg to Auckland: 10.889 k (10889) [miles]`
>>> console.param('The number of people in DK',5627235,'k-','souls')
:bash:`| The number of people in DK: 5627.235 k (5.627 M) [souls]`
>>> console.param('>E.T.< running time',115*60,'sh','')
:bash:`| >E.T< running time: 6900.0 [seconds] (1.92 [hours])`
>>> console.param('Honda Civic Type R 0-60',6.6,'s','')
:bash:`| Honda Civic Type R 0-60: 6.6 [seconds]`
Args:
strName (string): name of the parameter |br|
iVal (float): value |br|
strForm (string): format string |br|
strUnit (string): unit |br|
Returns:
nothing
"""
# Write the tabulator
sys.stdout.write(' ')
# Run the engine of parameter print
_param(strName, iVal, strForm, strUnit)
return
# =====================================================================
# Print a bullet + name of the parameter + the parameter
# =====================================================================
def bullet_param(strName, iVal, strForm, strUnit):
"""
Function prints a parameter preceded by a proper tabulator, a bullet
and a parameter name. |br|
The function is identical to the previous 'param' function, the only
difference is a bullet added before the parameter name. Please refer
to the 'param' function for description of the function and its input
parameters. |br|
"""
# Write the tabulator with a bullet
sys.stdout.write('\n' + _colors('BULLET') + ' * ' + _colors('ENDC'))
# Run the engine of parameter print
_param(strName, iVal, strForm, strUnit)
return
# =====================================================================
# The engine of parameter print
# =====================================================================
def _param(strName, iVal, strForm, strUnit):
"""
It is an engine of the formated parameter printing. |br|
The input to the fuctcion is identical to the previous 'param' function.
Please refer to the 'param' function for description of the function and
its input parameters. |br|
"""
# The name of the function (for error purposes)
strFunc = 'rxcs.console._param'
# ----------------------------------------------------------------
# Write the parameter name
sys.stdout.write(strName + ': ')
# Check the length of the format string, it should be 1 or 2
lForm = len(strForm)
if lForm < 1 or lForm > 2:
strErr = strFunc + ' : '
strErr = strErr + ('Parameter format string must be 1 or 2 characters')
raise Exception(strErr)
# ----------------------------------------------------------------
# Recalculate the unit to coefficient, if it is asked for
if strForm[0] == '-': # <- the function should recalculate the unit
(iCoef, strUnitRecalc) = _val2unit(iVal)
elif strForm[0] == 's': # <- the parameter contains seconds
_param_time_write(iVal, strForm)
return
else: # <- there is a correct unit already given
# Get the name of the magnitude unit
strUnitRecalc = strForm[0]
# Get the correct coefficient for the 2nd representation
iCoef = _unit2coef(strUnitRecalc)
# Recalculate the value of the parameter
iVal_recal = iVal / iCoef
# Create a string with value
if iVal == 0: # <- the value is zero
strVal = '0'
elif iCoef == 1: # <- there is no need to recalculate the value
# Put the number as it is, but pay attention if it is float or int
if isinstance(iVal, int):
strVal = ('%d') % (iVal_recal)
else:
strVal = ('%.3f') % (iVal_recal)
elif np.isinf(iCoef): # <- the value is an infinite
strVal = ('inf')
else: # <- the value should be recalculated
strVal = ('%.3f %s') % (iVal_recal, strUnitRecalc)
# Write the value
sys.stdout.write(_colors('PARAM') + strVal + _colors('ENDC') + ' ')
# ----------------------------------------------------------------
# 2nd representation:
# If the string has 2 characters, print also the recalculated number
# (the 2nd representation)
if lForm == 2:
# Check if the user wants it to be recalculated to a given magnitude
# or the function should decide
if strForm[1] == '-': # <- the function should decide
# Get the correct coefficient and magnitude unit
(iCoef2, strUnit2Recalc) = _val2unit(iVal)
else: # <- the user gives the magnitude representation
# Get the name of the magnitude unit
strUnit2Recalc = strForm[1]
# Get the correct coefficient for the 2nd representation
iCoef2 = _unit2coef(strUnit2Recalc)
# If the magnitudes are identical, do no print the 2nd representation
if iCoef != iCoef2:
# Recalculate the value to the 2nd representation
iVal_2Rep = iVal / iCoef2
# Create the string with the 2nd representation
if iCoef2 == 1:
strVal2 = ('%d') % (iVal_2Rep)
else:
strVal2 = ('%.3f %s') % (iVal_2Rep, strUnit2Recalc)
# Print out the 2nd representation
sys.stdout.write('(')
sys.stdout.write(_colors('PARAM') + strVal2 + _colors('ENDC'))
sys.stdout.write(')' + ' ')
# ----------------------------------------------------------------
# Print the unit, if it is not empty
lUnit = len(strUnit)
if lUnit > 0:
sys.stdout.write(_colors('PARAM'))
sys.stdout.write('[' + strUnit + ']')
sys.stdout.write(_colors('ENDC'))
# ----------------------------------------------------------------
sys.stdout.write('\n')
return
# =====================================================================
# The engine of time paramer print
# =====================================================================
def _param_time_write(iVal, strForm):
"""
It is an engine of the formated time parameter printing. |br|
Args:
iVal (float): value
strForm (string): format string
Returns:
nothing
"""
# The name of the function (for error purposes)
strFunc = 'rxcs.console._param_time_write'
# ----------------------------------------------------------------
# Create a string with seconds
strSeconds = ('%.1f [seconds]') % (iVal)
# Print the seconds
sys.stdout.write(_colors('PARAM') + strSeconds + _colors('ENDC') + ' ')
# Get the length of the format string
lForm = len(strForm)
# ----------------------------------------------------------------
# Add an info about the hours, if needed
if lForm == 2:
if not (strForm[1] == 'h'):
strErr = strFunc + ' : '
strErr = strErr + ('If the first argument with parameter format ')
strErr = strErr + (' is >s< then the second must be >h< or empty!')
raise Exception(strErr)
# Recalculate seconds to hours and create a propoer string with hours
iHours = iVal / 3600
strHours = ('%.2f [hours]') % (iHours)
# Print the hours
sys.stdout.write('(')
sys.stdout.write(_colors('PARAM') + strHours + _colors('ENDC'))
sys.stdout.write(')')
# ----------------------------------------------------------------
sys.stdout.write('\n')
return
# =====================================================================
# Recalculate a unit symbol to a unit coefficient
# =====================================================================
def _unit2coef(strUnit):
"""
Function returns a unit coefficient based on a unit symbol.
Available unit names, symbols and coefficients:
(femto): 'f' = 1e-15
(pico): 'p' = 1e-12
(nano): 'n' = 1e-9
(micro): 'u' = 1e-6
(mili): 'm' = 1e-3
(none): ' ' = 1
(kilo): 'k' = 1e3
(Mega): 'M' = 1e6
(Giga): 'G' = 1e9
(Tera): 'T' = 1e12
(hour): 'h' = 3600
Args:
strUnit (string): key of the unit
Returns:
iCoef (int): unit coefficient
"""
# The name of the function (for error purposes)
strFunc = 'rxcs.console._unit2coef'
# ----------------------------------------------------------------
# femto
if strUnit == 'f':
iCoef = 1e-15
# pico
elif strUnit == 'p':
iCoef = 1e-12
# nano
elif strUnit == 'n':
iCoef = 1e-9
# micro
elif strUnit == 'u':
iCoef = 1e-6
# mili
elif strUnit == 'm':
iCoef = 1e-3
# none
elif strUnit == ' ':
iCoef = 1
# kilo
elif strUnit == 'k':
iCoef = 1e3
# Mega
elif strUnit == 'M':
iCoef = 1e6
# Giga
elif strUnit == 'G':
iCoef = 1e9
# Tera
elif strUnit == 'T':
iCoef = 1e12
# hour
elif strUnit == 'h':
iCoef = 3600
# ----------------------------------------------------------------
# Unknown unit
else:
strErr = strFunc + ' : '
strErr = strErr + ('> %s < is an unknown unit symbol') % (strUnit)
raise Exception(strErr)
# ----------------------------------------------------------------
return iCoef
# =====================================================================
# Recalculate a value to a unit symbol and a unit coefficient
# =====================================================================
def _val2unit(iVal):
"""
Function returns the unit coefficient and a unit symbol.
Args:
iVal (float): value
Returns:
iCoef (int): unit coefficient
strUnit (string): unit symbol
"""
# femto
if iVal < 1e-12:
iCoef = 1e-15
strUnit = 'f'
# pico
elif iVal < 1e-9:
iCoef = 1e-12
strUnit = 'p'
# nano
elif iVal < 1e-6:
iCoef = 1e-9
strUnit = 'n'
# micro
elif iVal < 1e-3:
iCoef = 1e-6
strUnit = 'u'
# mili
elif iVal < 1:
iCoef = 1e-3
strUnit = 'm'
# none
elif iVal < 1e3:
iCoef = 1
strUnit = ' '
# kilo
elif iVal < 1e6:
iCoef = 1e3
strUnit = 'k'
# Mega
elif iVal < 1e9:
iCoef = 1e6
strUnit = 'M'
# Giga
elif iVal < 1e12:
iCoef = 1e9
strUnit = 'G'
# Infinite
elif np.isinf(iVal):
iCoef = np.inf
strUnit = ''
# Tera
else:
iCoef = 1e12
strUnit = 'T'
# ----------------------------------------------------------------
return (iCoef, strUnit)
# =====================================================================#
# Colors dictionary
# =====================================================================
def _colors(strKey):
"""
Function gives access to the RxCS console colors dictionary. The
Function returns a proper console color formating string (ANSI colors)
based on the key given to the function. |br|
Available keys:
'PURPLE'
'BLUE'
'GREEN'
'YELLOW'
'RED'
'BLACK'
'DARK_MAGENTA'
'AQUA'
'BLUE_BG'
'DARK_BLUE'
'DARK_GREEN'
'GREY30'
'GREY70'
'PROGRESS' -> color for progress signs ('>>>', '>>', '>')
'INFO' -> color for info messages
'BULLET_INFO' -> color for bullet info messages
'BULLET' -> color for bullets ('*')
'WARN' -> color for warning messages
'PARAM' -> color for parameters printing
'OK' -> color for good messages
'ENDC' -> console formatting string which switches of
the coloring
Args:
strKey (string): key of the color
Returns:
strColor (string): console color formating string
"""
# Define colors
dColors = {}
dColors['PURPLE'] = '\033[95m'
dColors['BLUE'] = '\033[94m'
dColors['GREEN'] = '\033[92m'
dColors['YELLOW'] = '\033[93m'
dColors['RED'] = '\033[91m'
dColors['BLACK'] = '\033[30m'
dColors['DARK_MAGENTA'] = '\033[35m'
dColors['AQUA'] = '\033[96m'
dColors['BLUE_BG'] = '\033[44m'
dColors['DARK_BLUE'] = '\033[34m'
dColors['DARK_GREEN'] = '\033[32m'
dColors['GREY30'] = '\033[30m'
dColors['GREY70'] = '\033[97m'
# Define colors for communication
dColors['PROGRESS'] = dColors['DARK_MAGENTA']
dColors['INFO'] = dColors['DARK_GREEN']
dColors['BULLET_INFO'] = dColors['AQUA']
dColors['BULLET'] = dColors['DARK_MAGENTA']
dColors['WARN'] = dColors['RED']
dColors['PARAM'] = dColors['AQUA']
dColors['OK'] = dColors['DARK_GREEN']
dColors['ENDC'] = '\033[0m'
# Return the correct color
strColor = dColors[strKey]
return strColor
|
Laredo International Airport is a city-owned, public use airport and averages 158 aircraft operations a day. Under the direction of W.D. Schock Company, the FAA Quieter Homes Program provided sound mitigation around the airport. Koch Corporation was the installer on several phases of this initiative and they installed aluminum acoustical windows by Peerless Products, storm doors by Larson, Sound Control Systems and Door Systems, Inc. Sliding glass doors by Armaclad, and prime doors by PEM were also installed. |
from collections import OrderedDict
from rpython.annotator import model as annmodel
from rpython.flowspace.model import Constant
from rpython.rlib import rarithmetic, objectmodel
from rpython.rtyper import raddress, rptr, extregistry, rrange
from rpython.rtyper.error import TyperError
from rpython.rtyper.lltypesystem import lltype, llmemory, rstr
from rpython.rtyper import rclass
from rpython.rtyper.rmodel import Repr
from rpython.tool.pairtype import pairtype
BUILTIN_TYPER = {}
def typer_for(func):
def wrapped(rtyper_func):
BUILTIN_TYPER[func] = rtyper_func
return rtyper_func
return wrapped
class __extend__(annmodel.SomeBuiltin):
def rtyper_makerepr(self, rtyper):
if not self.is_constant():
raise TyperError("non-constant built-in function!")
return BuiltinFunctionRepr(self.const)
def rtyper_makekey(self):
const = getattr(self, 'const', None)
if extregistry.is_registered(const):
const = extregistry.lookup(const)
return self.__class__, const
class __extend__(annmodel.SomeBuiltinMethod):
def rtyper_makerepr(self, rtyper):
assert self.methodname is not None
result = BuiltinMethodRepr(rtyper, self.s_self, self.methodname)
return result
def rtyper_makekey(self):
# NOTE: we hash by id of self.s_self here. This appears to be
# necessary because it ends up in hop.args_s[0] in the method call,
# and there is no telling what information the called
# rtype_method_xxx() will read from that hop.args_s[0].
# See test_method_join in test_rbuiltin.
# There is no problem with self.s_self being garbage-collected and
# its id reused, because the BuiltinMethodRepr keeps a reference
# to it.
return (self.__class__, self.methodname, id(self.s_self))
def call_args_expand(hop):
hop = hop.copy()
from rpython.annotator.argument import ArgumentsForTranslation
arguments = ArgumentsForTranslation.fromshape(
hop.args_s[1].const, # shape
range(hop.nb_args-2))
assert arguments.w_stararg is None
keywords = arguments.keywords
# prefix keyword arguments with 'i_'
kwds_i = {}
for key in keywords:
kwds_i['i_' + key] = keywords[key]
return hop, kwds_i
class BuiltinFunctionRepr(Repr):
lowleveltype = lltype.Void
def __init__(self, builtinfunc):
self.builtinfunc = builtinfunc
def findbltintyper(self, rtyper):
"Find the function to use to specialize calls to this built-in func."
try:
return BUILTIN_TYPER[self.builtinfunc]
except (KeyError, TypeError):
pass
if extregistry.is_registered(self.builtinfunc):
entry = extregistry.lookup(self.builtinfunc)
return entry.specialize_call
raise TyperError("don't know about built-in function %r" % (
self.builtinfunc,))
def _call(self, hop2, **kwds_i):
bltintyper = self.findbltintyper(hop2.rtyper)
hop2.llops._called_exception_is_here_or_cannot_occur = False
v_result = bltintyper(hop2, **kwds_i)
if not hop2.llops._called_exception_is_here_or_cannot_occur:
raise TyperError("missing hop.exception_cannot_occur() or "
"hop.exception_is_here() in %s" % bltintyper)
return v_result
def rtype_simple_call(self, hop):
hop2 = hop.copy()
hop2.r_s_popfirstarg()
return self._call(hop2)
def rtype_call_args(self, hop):
# calling a built-in function with keyword arguments:
# mostly for rpython.objectmodel.hint()
hop, kwds_i = call_args_expand(hop)
hop2 = hop.copy()
hop2.r_s_popfirstarg()
hop2.r_s_popfirstarg()
# the RPython-level keyword args are passed with an 'i_' prefix and
# the corresponding value is an *index* in the hop2 arguments,
# to be used with hop.inputarg(arg=..)
return self._call(hop2, **kwds_i)
class BuiltinMethodRepr(Repr):
def __init__(self, rtyper, s_self, methodname):
self.s_self = s_self
self.self_repr = rtyper.getrepr(s_self)
self.methodname = methodname
# methods of a known name are implemented as just their 'self'
self.lowleveltype = self.self_repr.lowleveltype
def convert_const(self, obj):
return self.self_repr.convert_const(obj.__self__)
def rtype_simple_call(self, hop):
# methods: look up the rtype_method_xxx()
name = 'rtype_method_' + self.methodname
try:
bltintyper = getattr(self.self_repr, name)
except AttributeError:
raise TyperError("missing %s.%s" % (
self.self_repr.__class__.__name__, name))
# hack based on the fact that 'lowleveltype == self_repr.lowleveltype'
hop2 = hop.copy()
assert hop2.args_r[0] is self
if isinstance(hop2.args_v[0], Constant):
c = hop2.args_v[0].value # get object from bound method
c = c.__self__
hop2.args_v[0] = Constant(c)
hop2.args_s[0] = self.s_self
hop2.args_r[0] = self.self_repr
return bltintyper(hop2)
class __extend__(pairtype(BuiltinMethodRepr, BuiltinMethodRepr)):
def convert_from_to((r_from, r_to), v, llops):
# convert between two MethodReprs only if they are about the same
# methodname. (Useful for the case r_from.s_self == r_to.s_self but
# r_from is not r_to.) See test_rbuiltin.test_method_repr.
if r_from.methodname != r_to.methodname:
return NotImplemented
return llops.convertvar(v, r_from.self_repr, r_to.self_repr)
def parse_kwds(hop, *argspec_i_r):
lst = [i for (i, r) in argspec_i_r if i is not None]
lst.sort()
if lst != range(hop.nb_args - len(lst), hop.nb_args):
raise TyperError("keyword args are expected to be at the end of "
"the 'hop' arg list")
result = []
for i, r in argspec_i_r:
if i is not None:
if r is None:
r = hop.args_r[i]
result.append(hop.inputarg(r, arg=i))
else:
result.append(None)
del hop.args_v[hop.nb_args - len(lst):]
return result
# ____________________________________________________________
@typer_for(bool)
def rtype_builtin_bool(hop):
# not called any more?
assert hop.nb_args == 1
return hop.args_r[0].rtype_bool(hop)
@typer_for(int)
def rtype_builtin_int(hop):
if isinstance(hop.args_s[0], annmodel.SomeString):
assert 1 <= hop.nb_args <= 2
return hop.args_r[0].rtype_int(hop)
assert hop.nb_args == 1
return hop.args_r[0].rtype_int(hop)
@typer_for(float)
def rtype_builtin_float(hop):
assert hop.nb_args == 1
return hop.args_r[0].rtype_float(hop)
@typer_for(chr)
def rtype_builtin_chr(hop):
assert hop.nb_args == 1
return hop.args_r[0].rtype_chr(hop)
@typer_for(unichr)
def rtype_builtin_unichr(hop):
assert hop.nb_args == 1
return hop.args_r[0].rtype_unichr(hop)
@typer_for(unicode)
def rtype_builtin_unicode(hop):
return hop.args_r[0].rtype_unicode(hop)
@typer_for(bytearray)
def rtype_builtin_bytearray(hop):
return hop.args_r[0].rtype_bytearray(hop)
@typer_for(list)
def rtype_builtin_list(hop):
return hop.args_r[0].rtype_bltn_list(hop)
#def rtype_builtin_range(hop): see rrange.py
#def rtype_builtin_xrange(hop): see rrange.py
#def rtype_builtin_enumerate(hop): see rrange.py
#def rtype_r_dict(hop): see rdict.py
@typer_for(rarithmetic.intmask)
def rtype_intmask(hop):
hop.exception_cannot_occur()
vlist = hop.inputargs(lltype.Signed)
return vlist[0]
@typer_for(rarithmetic.longlongmask)
def rtype_longlongmask(hop):
hop.exception_cannot_occur()
vlist = hop.inputargs(lltype.SignedLongLong)
return vlist[0]
@typer_for(min)
def rtype_builtin_min(hop):
v1, v2 = hop.inputargs(hop.r_result, hop.r_result)
hop.exception_cannot_occur()
return hop.gendirectcall(ll_min, v1, v2)
def ll_min(i1, i2):
if i1 < i2:
return i1
return i2
@typer_for(max)
def rtype_builtin_max(hop):
v1, v2 = hop.inputargs(hop.r_result, hop.r_result)
hop.exception_cannot_occur()
return hop.gendirectcall(ll_max, v1, v2)
def ll_max(i1, i2):
if i1 > i2:
return i1
return i2
@typer_for(reversed)
def rtype_builtin_reversed(hop):
hop.exception_cannot_occur()
return hop.r_result.newiter(hop)
@typer_for(getattr(object.__init__, 'im_func', object.__init__))
def rtype_object__init__(hop):
hop.exception_cannot_occur()
@typer_for(getattr(EnvironmentError.__init__, 'im_func',
EnvironmentError.__init__))
def rtype_EnvironmentError__init__(hop):
hop.exception_cannot_occur()
v_self = hop.args_v[0]
r_self = hop.args_r[0]
if hop.nb_args <= 2:
v_errno = hop.inputconst(lltype.Signed, 0)
if hop.nb_args == 2:
v_strerror = hop.inputarg(rstr.string_repr, arg=1)
r_self.setfield(v_self, 'strerror', v_strerror, hop.llops)
else:
v_errno = hop.inputarg(lltype.Signed, arg=1)
v_strerror = hop.inputarg(rstr.string_repr, arg=2)
r_self.setfield(v_self, 'strerror', v_strerror, hop.llops)
if hop.nb_args >= 4:
v_filename = hop.inputarg(rstr.string_repr, arg=3)
r_self.setfield(v_self, 'filename', v_filename, hop.llops)
r_self.setfield(v_self, 'errno', v_errno, hop.llops)
try:
WindowsError
except NameError:
pass
else:
@typer_for(
getattr(WindowsError.__init__, 'im_func', WindowsError.__init__))
def rtype_WindowsError__init__(hop):
hop.exception_cannot_occur()
if hop.nb_args == 2:
raise TyperError("WindowsError() should not be called with "
"a single argument")
if hop.nb_args >= 3:
v_self = hop.args_v[0]
r_self = hop.args_r[0]
v_error = hop.inputarg(lltype.Signed, arg=1)
r_self.setfield(v_self, 'winerror', v_error, hop.llops)
@typer_for(objectmodel.hlinvoke)
def rtype_hlinvoke(hop):
_, s_repr = hop.r_s_popfirstarg()
r_callable = s_repr.const
r_func, nimplicitarg = r_callable.get_r_implfunc()
s_callable = r_callable.get_s_callable()
nbargs = len(hop.args_s) - 1 + nimplicitarg
s_sigs = r_func.get_s_signatures((nbargs, (), False))
if len(s_sigs) != 1:
raise TyperError("cannot hlinvoke callable %r with not uniform"
"annotations: %r" % (r_callable,
s_sigs))
args_s, s_ret = s_sigs[0]
rinputs = [hop.rtyper.getrepr(s_obj) for s_obj in args_s]
rresult = hop.rtyper.getrepr(s_ret)
args_s = args_s[nimplicitarg:]
rinputs = rinputs[nimplicitarg:]
new_args_r = [r_callable] + rinputs
for i in range(len(new_args_r)):
assert hop.args_r[i].lowleveltype == new_args_r[i].lowleveltype
hop.args_r = new_args_r
hop.args_s = [s_callable] + args_s
hop.s_result = s_ret
assert hop.r_result.lowleveltype == rresult.lowleveltype
hop.r_result = rresult
return hop.dispatch()
typer_for(range)(rrange.rtype_builtin_range)
typer_for(xrange)(rrange.rtype_builtin_xrange)
typer_for(enumerate)(rrange.rtype_builtin_enumerate)
# annotation of low-level types
@typer_for(lltype.malloc)
def rtype_malloc(hop, i_flavor=None, i_zero=None, i_track_allocation=None,
i_add_memory_pressure=None):
assert hop.args_s[0].is_constant()
vlist = [hop.inputarg(lltype.Void, arg=0)]
opname = 'malloc'
kwds_v = parse_kwds(
hop,
(i_flavor, lltype.Void),
(i_zero, None),
(i_track_allocation, None),
(i_add_memory_pressure, None))
(v_flavor, v_zero, v_track_allocation, v_add_memory_pressure) = kwds_v
flags = {'flavor': 'gc'}
if v_flavor is not None:
flags['flavor'] = v_flavor.value
if i_zero is not None:
flags['zero'] = v_zero.value
if i_track_allocation is not None:
flags['track_allocation'] = v_track_allocation.value
if i_add_memory_pressure is not None:
flags['add_memory_pressure'] = v_add_memory_pressure.value
vlist.append(hop.inputconst(lltype.Void, flags))
assert 1 <= hop.nb_args <= 2
if hop.nb_args == 2:
vlist.append(hop.inputarg(lltype.Signed, arg=1))
opname += '_varsize'
hop.has_implicit_exception(MemoryError) # record that we know about it
hop.exception_is_here()
return hop.genop(opname, vlist, resulttype=hop.r_result.lowleveltype)
@typer_for(lltype.free)
def rtype_free(hop, i_flavor, i_track_allocation=None):
vlist = [hop.inputarg(hop.args_r[0], arg=0)]
v_flavor, v_track_allocation = parse_kwds(hop,
(i_flavor, lltype.Void),
(i_track_allocation, None))
#
assert v_flavor is not None and v_flavor.value == 'raw'
flags = {'flavor': 'raw'}
if i_track_allocation is not None:
flags['track_allocation'] = v_track_allocation.value
vlist.append(hop.inputconst(lltype.Void, flags))
#
hop.exception_cannot_occur()
hop.genop('free', vlist)
@typer_for(lltype.render_immortal)
def rtype_render_immortal(hop, i_track_allocation=None):
vlist = [hop.inputarg(hop.args_r[0], arg=0)]
v_track_allocation = parse_kwds(hop,
(i_track_allocation, None))
hop.exception_cannot_occur()
if i_track_allocation is None or v_track_allocation.value:
hop.genop('track_alloc_stop', vlist)
@typer_for(lltype.typeOf)
@typer_for(lltype.nullptr)
@typer_for(lltype.getRuntimeTypeInfo)
@typer_for(lltype.Ptr)
def rtype_const_result(hop):
hop.exception_cannot_occur()
return hop.inputconst(hop.r_result.lowleveltype, hop.s_result.const)
@typer_for(lltype.cast_pointer)
def rtype_cast_pointer(hop):
assert hop.args_s[0].is_constant()
assert isinstance(hop.args_r[1], rptr.PtrRepr)
v_type, v_input = hop.inputargs(lltype.Void, hop.args_r[1])
hop.exception_cannot_occur()
return hop.genop('cast_pointer', [v_input], # v_type implicit in r_result
resulttype = hop.r_result.lowleveltype)
@typer_for(lltype.cast_opaque_ptr)
def rtype_cast_opaque_ptr(hop):
assert hop.args_s[0].is_constant()
assert isinstance(hop.args_r[1], rptr.PtrRepr)
v_type, v_input = hop.inputargs(lltype.Void, hop.args_r[1])
hop.exception_cannot_occur()
return hop.genop('cast_opaque_ptr', [v_input], # v_type implicit in r_result
resulttype = hop.r_result.lowleveltype)
@typer_for(lltype.length_of_simple_gcarray_from_opaque)
def rtype_length_of_simple_gcarray_from_opaque(hop):
assert isinstance(hop.args_r[0], rptr.PtrRepr)
v_opaque_ptr, = hop.inputargs(hop.args_r[0])
hop.exception_cannot_occur()
return hop.genop('length_of_simple_gcarray_from_opaque', [v_opaque_ptr],
resulttype = hop.r_result.lowleveltype)
@typer_for(lltype.direct_fieldptr)
def rtype_direct_fieldptr(hop):
assert isinstance(hop.args_r[0], rptr.PtrRepr)
assert hop.args_s[1].is_constant()
vlist = hop.inputargs(hop.args_r[0], lltype.Void)
hop.exception_cannot_occur()
return hop.genop('direct_fieldptr', vlist,
resulttype=hop.r_result.lowleveltype)
@typer_for(lltype.direct_arrayitems)
def rtype_direct_arrayitems(hop):
assert isinstance(hop.args_r[0], rptr.PtrRepr)
vlist = hop.inputargs(hop.args_r[0])
hop.exception_cannot_occur()
return hop.genop('direct_arrayitems', vlist,
resulttype=hop.r_result.lowleveltype)
@typer_for(lltype.direct_ptradd)
def rtype_direct_ptradd(hop):
assert isinstance(hop.args_r[0], rptr.PtrRepr)
vlist = hop.inputargs(hop.args_r[0], lltype.Signed)
hop.exception_cannot_occur()
return hop.genop('direct_ptradd', vlist,
resulttype=hop.r_result.lowleveltype)
@typer_for(lltype.cast_primitive)
def rtype_cast_primitive(hop):
assert hop.args_s[0].is_constant()
TGT = hop.args_s[0].const
v_type, v_value = hop.inputargs(lltype.Void, hop.args_r[1])
hop.exception_cannot_occur()
return gen_cast(hop.llops, TGT, v_value)
_cast_to_Signed = {
lltype.Signed: None,
lltype.Bool: 'cast_bool_to_int',
lltype.Char: 'cast_char_to_int',
lltype.UniChar: 'cast_unichar_to_int',
lltype.Float: 'cast_float_to_int',
lltype.Unsigned: 'cast_uint_to_int',
lltype.SignedLongLong: 'truncate_longlong_to_int',
}
_cast_from_Signed = {
lltype.Signed: None,
lltype.Char: 'cast_int_to_char',
lltype.UniChar: 'cast_int_to_unichar',
lltype.Float: 'cast_int_to_float',
lltype.Unsigned: 'cast_int_to_uint',
lltype.SignedLongLong: 'cast_int_to_longlong',
}
def gen_cast(llops, TGT, v_value):
ORIG = v_value.concretetype
if ORIG == TGT:
return v_value
if (isinstance(TGT, lltype.Primitive) and
isinstance(ORIG, lltype.Primitive)):
if ORIG in _cast_to_Signed and TGT in _cast_from_Signed:
op = _cast_to_Signed[ORIG]
if op:
v_value = llops.genop(op, [v_value], resulttype=lltype.Signed)
op = _cast_from_Signed[TGT]
if op:
v_value = llops.genop(op, [v_value], resulttype=TGT)
return v_value
elif ORIG is lltype.Signed and TGT is lltype.Bool:
return llops.genop('int_is_true', [v_value], resulttype=lltype.Bool)
else:
# use the generic operation if there is no alternative
return llops.genop('cast_primitive', [v_value], resulttype=TGT)
elif isinstance(TGT, lltype.Ptr):
if isinstance(ORIG, lltype.Ptr):
if (isinstance(TGT.TO, lltype.OpaqueType) or
isinstance(ORIG.TO, lltype.OpaqueType)):
return llops.genop('cast_opaque_ptr', [v_value], resulttype=TGT)
else:
return llops.genop('cast_pointer', [v_value], resulttype=TGT)
elif ORIG == llmemory.Address:
return llops.genop('cast_adr_to_ptr', [v_value], resulttype=TGT)
elif isinstance(ORIG, lltype.Primitive):
v_value = gen_cast(llops, lltype.Signed, v_value)
return llops.genop('cast_int_to_ptr', [v_value], resulttype=TGT)
elif TGT == llmemory.Address and isinstance(ORIG, lltype.Ptr):
return llops.genop('cast_ptr_to_adr', [v_value], resulttype=TGT)
elif isinstance(TGT, lltype.Primitive):
if isinstance(ORIG, lltype.Ptr):
v_value = llops.genop('cast_ptr_to_int', [v_value],
resulttype=lltype.Signed)
elif ORIG == llmemory.Address:
v_value = llops.genop('cast_adr_to_int', [v_value],
resulttype=lltype.Signed)
else:
raise TypeError("don't know how to cast from %r to %r" % (ORIG,
TGT))
return gen_cast(llops, TGT, v_value)
raise TypeError("don't know how to cast from %r to %r" % (ORIG, TGT))
@typer_for(lltype.cast_ptr_to_int)
def rtype_cast_ptr_to_int(hop):
assert isinstance(hop.args_r[0], rptr.PtrRepr)
vlist = hop.inputargs(hop.args_r[0])
hop.exception_cannot_occur()
return hop.genop('cast_ptr_to_int', vlist,
resulttype=lltype.Signed)
@typer_for(lltype.cast_int_to_ptr)
def rtype_cast_int_to_ptr(hop):
assert hop.args_s[0].is_constant()
v_type, v_input = hop.inputargs(lltype.Void, lltype.Signed)
hop.exception_cannot_occur()
return hop.genop('cast_int_to_ptr', [v_input],
resulttype=hop.r_result.lowleveltype)
@typer_for(lltype.identityhash)
def rtype_identity_hash(hop):
vlist = hop.inputargs(hop.args_r[0])
hop.exception_cannot_occur()
return hop.genop('gc_identityhash', vlist, resulttype=lltype.Signed)
@typer_for(lltype.runtime_type_info)
def rtype_runtime_type_info(hop):
assert isinstance(hop.args_r[0], rptr.PtrRepr)
vlist = hop.inputargs(hop.args_r[0])
hop.exception_cannot_occur()
return hop.genop('runtime_type_info', vlist,
resulttype=hop.r_result.lowleveltype)
# _________________________________________________________________
# memory addresses
@typer_for(llmemory.raw_malloc)
def rtype_raw_malloc(hop):
v_size, = hop.inputargs(lltype.Signed)
hop.exception_cannot_occur()
return hop.genop('raw_malloc', [v_size], resulttype=llmemory.Address)
@typer_for(llmemory.raw_malloc_usage)
def rtype_raw_malloc_usage(hop):
v_size, = hop.inputargs(lltype.Signed)
hop.exception_cannot_occur()
return hop.genop('raw_malloc_usage', [v_size], resulttype=lltype.Signed)
@typer_for(llmemory.raw_free)
def rtype_raw_free(hop):
s_addr = hop.args_s[0]
if s_addr.is_null_address():
raise TyperError("raw_free(x) where x is the constant NULL")
v_addr, = hop.inputargs(llmemory.Address)
hop.exception_cannot_occur()
return hop.genop('raw_free', [v_addr])
@typer_for(llmemory.raw_memcopy)
def rtype_raw_memcopy(hop):
for s_addr in hop.args_s[:2]:
if s_addr.is_null_address():
raise TyperError("raw_memcopy() with a constant NULL")
v_list = hop.inputargs(llmemory.Address, llmemory.Address, lltype.Signed)
hop.exception_cannot_occur()
return hop.genop('raw_memcopy', v_list)
@typer_for(llmemory.raw_memclear)
def rtype_raw_memclear(hop):
s_addr = hop.args_s[0]
if s_addr.is_null_address():
raise TyperError("raw_memclear(x, n) where x is the constant NULL")
v_list = hop.inputargs(llmemory.Address, lltype.Signed)
hop.exception_cannot_occur()
return hop.genop('raw_memclear', v_list)
@typer_for(llmemory.offsetof)
def rtype_offsetof(hop):
TYPE, field = hop.inputargs(lltype.Void, lltype.Void)
hop.exception_cannot_occur()
return hop.inputconst(lltype.Signed,
llmemory.offsetof(TYPE.value, field.value))
# _________________________________________________________________
# non-gc objects
@typer_for(objectmodel.free_non_gc_object)
def rtype_free_non_gc_object(hop):
hop.exception_cannot_occur()
vinst, = hop.inputargs(hop.args_r[0])
flavor = hop.args_r[0].gcflavor
assert flavor != 'gc'
flags = {'flavor': flavor}
cflags = hop.inputconst(lltype.Void, flags)
return hop.genop('free', [vinst, cflags])
@typer_for(objectmodel.keepalive_until_here)
def rtype_keepalive_until_here(hop):
hop.exception_cannot_occur()
for v in hop.args_v:
hop.genop('keepalive', [v], resulttype=lltype.Void)
return hop.inputconst(lltype.Void, None)
@typer_for(llmemory.cast_ptr_to_adr)
def rtype_cast_ptr_to_adr(hop):
vlist = hop.inputargs(hop.args_r[0])
assert isinstance(vlist[0].concretetype, lltype.Ptr)
hop.exception_cannot_occur()
return hop.genop('cast_ptr_to_adr', vlist,
resulttype=llmemory.Address)
@typer_for(llmemory.cast_adr_to_ptr)
def rtype_cast_adr_to_ptr(hop):
assert isinstance(hop.args_r[0], raddress.AddressRepr)
adr, TYPE = hop.inputargs(hop.args_r[0], lltype.Void)
hop.exception_cannot_occur()
return hop.genop('cast_adr_to_ptr', [adr],
resulttype=TYPE.value)
@typer_for(llmemory.cast_adr_to_int)
def rtype_cast_adr_to_int(hop):
assert isinstance(hop.args_r[0], raddress.AddressRepr)
adr = hop.inputarg(hop.args_r[0], arg=0)
if len(hop.args_s) == 1:
mode = "emulated"
else:
mode = hop.args_s[1].const
hop.exception_cannot_occur()
return hop.genop('cast_adr_to_int',
[adr, hop.inputconst(lltype.Void, mode)],
resulttype=lltype.Signed)
@typer_for(llmemory.cast_int_to_adr)
def rtype_cast_int_to_adr(hop):
v_input, = hop.inputargs(lltype.Signed)
hop.exception_cannot_occur()
return hop.genop('cast_int_to_adr', [v_input],
resulttype=llmemory.Address)
@typer_for(isinstance)
def rtype_builtin_isinstance(hop):
hop.exception_cannot_occur()
if hop.s_result.is_constant():
return hop.inputconst(lltype.Bool, hop.s_result.const)
if hop.args_s[1].is_constant() and hop.args_s[1].const in (str, list, unicode):
if hop.args_s[0].knowntype not in (str, list, unicode):
raise TyperError("isinstance(x, str/list/unicode) expects x to be known"
" statically to be a str/list/unicode or None")
rstrlist = hop.args_r[0]
vstrlist = hop.inputarg(rstrlist, arg=0)
cnone = hop.inputconst(rstrlist, None)
return hop.genop('ptr_ne', [vstrlist, cnone], resulttype=lltype.Bool)
assert isinstance(hop.args_r[0], rclass.InstanceRepr)
return hop.args_r[0].rtype_isinstance(hop)
@typer_for(objectmodel.instantiate)
def rtype_instantiate(hop, i_nonmovable=None):
hop.exception_cannot_occur()
s_class = hop.args_s[0]
assert isinstance(s_class, annmodel.SomePBC)
v_nonmovable, = parse_kwds(hop, (i_nonmovable, None))
nonmovable = (i_nonmovable is not None and v_nonmovable.value)
if len(s_class.descriptions) != 1:
# instantiate() on a variable class
if nonmovable:
raise TyperError("instantiate(x, nonmovable=True) cannot be used "
"if x is not a constant class")
vtypeptr, = hop.inputargs(rclass.get_type_repr(hop.rtyper))
r_class = hop.args_r[0]
return r_class._instantiate_runtime_class(hop, vtypeptr,
hop.r_result.lowleveltype)
classdef = s_class.any_description().getuniqueclassdef()
return rclass.rtype_new_instance(hop.rtyper, classdef, hop.llops,
nonmovable=nonmovable)
@typer_for(hasattr)
def rtype_builtin_hasattr(hop):
hop.exception_cannot_occur()
if hop.s_result.is_constant():
return hop.inputconst(lltype.Bool, hop.s_result.const)
raise TyperError("hasattr is only suported on a constant")
@typer_for(OrderedDict)
@typer_for(objectmodel.r_dict)
@typer_for(objectmodel.r_ordereddict)
def rtype_dict_constructor(hop, i_force_non_null=None):
# 'i_force_non_null' is ignored here; if it has any effect, it
# has already been applied to 'hop.r_result'
hop.exception_cannot_occur()
r_dict = hop.r_result
cDICT = hop.inputconst(lltype.Void, r_dict.DICT)
v_result = hop.gendirectcall(r_dict.ll_newdict, cDICT)
if r_dict.custom_eq_hash:
v_eqfn = hop.inputarg(r_dict.r_rdict_eqfn, arg=0)
v_hashfn = hop.inputarg(r_dict.r_rdict_hashfn, arg=1)
if r_dict.r_rdict_eqfn.lowleveltype != lltype.Void:
cname = hop.inputconst(lltype.Void, 'fnkeyeq')
hop.genop('setfield', [v_result, cname, v_eqfn])
if r_dict.r_rdict_hashfn.lowleveltype != lltype.Void:
cname = hop.inputconst(lltype.Void, 'fnkeyhash')
hop.genop('setfield', [v_result, cname, v_hashfn])
return v_result
# _________________________________________________________________
# weakrefs
import weakref
from rpython.rtyper.lltypesystem import llmemory
@typer_for(llmemory.weakref_create)
@typer_for(weakref.ref)
def rtype_weakref_create(hop):
vlist = hop.inputargs(hop.args_r[0])
hop.exception_cannot_occur()
return hop.genop('weakref_create', vlist, resulttype=llmemory.WeakRefPtr)
@typer_for(llmemory.weakref_deref)
def rtype_weakref_deref(hop):
c_ptrtype, v_wref = hop.inputargs(lltype.Void, hop.args_r[1])
assert v_wref.concretetype == llmemory.WeakRefPtr
hop.exception_cannot_occur()
return hop.genop('weakref_deref', [v_wref], resulttype=c_ptrtype.value)
@typer_for(llmemory.cast_ptr_to_weakrefptr)
def rtype_cast_ptr_to_weakrefptr(hop):
vlist = hop.inputargs(hop.args_r[0])
hop.exception_cannot_occur()
return hop.genop('cast_ptr_to_weakrefptr', vlist,
resulttype=llmemory.WeakRefPtr)
@typer_for(llmemory.cast_weakrefptr_to_ptr)
def rtype_cast_weakrefptr_to_ptr(hop):
c_ptrtype, v_wref = hop.inputargs(lltype.Void, hop.args_r[1])
assert v_wref.concretetype == llmemory.WeakRefPtr
hop.exception_cannot_occur()
return hop.genop('cast_weakrefptr_to_ptr', [v_wref],
resulttype=c_ptrtype.value)
|
The official life cycle of Python 2 exceeds on 2020-01-01 but few days ago we found some issues (T199959, T203435) caused by the Cryptography package. After py2.6 and py3.3 has been dropped and py2.7.2 and 2.7.3 is proposed to be dropped soon (T191192) I propose to abandon the support of Python from 2.7.4 to 2.7.6 too.
If someone cannot upgrade to a newer version of Python, the older Pywikibot releases are still available either via pypi package or the corresponding tag in our repository but one should be aware that these issues may still lead to problems with the Cryptography package.
Dvorapa added a subtask: T191192: Drop support for python 2.7.2 and 2.7.3.
Note that python 2.7.6 is what is currently installed in the Toolforge environment, so that should likely depend on T199003.
I checked several systems and both run 2.7.6. . This task is way too soon. Come back in a couple of years. You're going way too fast on this dropping campaign.
I think that encouraging people to use updated versions that do not depend on vulnerable dependencies is a good idea. While I agree it can be hard or annoying to go system by system to do so, I feel this is not something we should wait a couple of years to accomplish. Thanks.
PS: I would suggest to slowly move from Trusty, because in April 2019 the maintenance support period end is expected.
This comment was removed by Xqt.
Xqt added a parent task: T213287: Drop support of python 2.7.
urllib3\util\ssl_.py:160: InsecurePlatformWarning: A true SSLContext object is not available. This prevents urllib3 from configuring SSL appropriately and may cause certain SSL connections to fail. You can upgrade to a newer version of Python to solve this.
The minimum version is 2.7.9 to stop this error from occuring. |
import radio
from microbit import button_a, button_b, display, Image, sleep, accelerometer
DIRECTIONS = [
(accelerometer.get_y, "backwards", "forwards"),
(accelerometer.get_x, "right", "left"),
]
ARROWS = {
"backwards": Image.ARROW_S,
"forwards": Image.ARROW_N,
"left": Image.ARROW_W,
"right": Image.ARROW_E,
}
SLEEP_TIME = 150
def get_direction(method, positive, negative):
value = (method() + 300) // 700
if value == 0:
return None
value = value // abs(value)
if value > 0:
return positive
return negative
radio.on()
while True:
rv = None
ct_a = button_a.get_presses()
if ct_a > 0:
rv = 'btn:a'
else:
ct_b = button_b.get_presses()
if ct_b > 0:
rv = 'btn:b'
if rv is not None:
print(rv)
radio.send(rv)
sleep(SLEEP_TIME)
value = None
for direction in DIRECTIONS:
value = get_direction(*direction)
if value is not None:
break
if value is None:
display.show(Image("00000:03730:07970:03730:00000"))
else:
display.show(ARROWS[value])
rv = "move:{}".format(value)
print(rv)
radio.send(rv)
sleep(SLEEP_TIME)
|
TUISTERa had the most liked content!
As far as i know astra doesn't support 0e00 yet.
can you provide config for discovery on 4.8e?
Hello, i have problem with some channels. They are skipping some frames. Attaching a video for easiest explanation. It is happening only for some channels, and they are on different multiplexes and with or without cas. Is it something that can be coming from Astra or fixed with Astra? Replay the video to see better the problem. Thank you The problem is exactly in the middle of the video - the freezing and fast moving.
Any info about Spanish BBVA Liga broadcast for Russia ?!?
Hello, can you check your file for trojans? I am downloading it from yandex and Microsoft Defender is giving virus detection. |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: Marco Benzi <[email protected]>
# @Date: 2015-06-08 16:12:27
# @Last Modified 2015-06-08
# @Last Modified time: 2015-06-08 17:38:03
#==========================================================================
#This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#==========================================================================
import cylLTX
import rectLTX
import uStripDesign
__all__ = ["cylLTX", "uStripDesign","rectLTX"]
|
Today i spent some hours to do the next experiment with my earth electrode antenna that i have installed in a forest near Heidelberg. The 2 earth electrodes are about 600 m spaced, with 20 copper earth rods at each end. Location is 49.44894N_8.724619E.
The todays goal is to reach the RX of Michael Oexner in about 50 km distance at 8970 Hz. He is one of those who successfully copied my signal on my 2nd VLF experiment, 15th, March (> 70 km distance). Today he started running his PC using the soundcard and SpecLab as the RX. His antenna is a PA0RDT design. He is still at work, so i am awaiting his report with interest;-) It would be a new distance record on VLF with earth electrode antennas.
The day appeared reasonably suitable in the morning hours. Although there was some rain, the QRN was down to -90 dB on my DFCW-600 window on the VLF grabber (http://www.iup.uni-heidelberg.de/schaef ... abber.html), just about the level as in my last test. The last test was done with the 300 m antenna and about 70 W. There, the signal was about 10 dB above noise on my grabber in 5.0 km distance. A summary of that can be found on rogers site: http://g3xbm-qrp.blogspot.com/2010/06/l ... rmany.html . Today i transmitted about 0,2 Hz lower since DF6NM also tried to copy the signal and he has currently some noise at 8970.0 Hz.
Today i used the SMPS, generator and the small PA (300 W), running at 250 W. Sadly i had forgotten my amperemeter for the antenna current and so the power was just measurable at the DC side of the PA, taking about 20 A at 13,8 V DC. The first attempt was arround 8:30...9:00 UTC (still visible on my grabber) but i didn't get the power into the antenna (means too high Z), so there was something wrong with it. I found that the electrode connections were disconnected, most probably done by some animals. They even tried to grup out some earth rods So in the first attempt the antenna was rather a lossy inv L antenna. When applying about 400 V rms (not kV ) to this inv L antenna, the signal was about 10 dB above ground anyway (pic1.jpg). After solving that problem the antenna acted like a loop again.
Sadly a storm and heavy rain came up during the experiment. I couldn't hear and thunder so i thought it is a normal rain. But there must have been some charged clouds and charged raindrops falling down since later i found heavy white vertical lines on the grabber like is is usual during thunder storms. This kind of "noise" appears rapidly and goes rapidly, often the noise level increases and decreases by several 10 dB during just some seconds. So it was a pity that this happend just during the test. Maybe the situation at Micheal Oexner's QTH is totally different!
Compared to the last test, the TX power was 6 dB higher and the antenna has twice the length. The gain due to the increased size was measured to be 8 dB (RX level difference of DHO-38). But, the signal was 30 dB above noise in 4,5 mHz ( pic2.jpg), (DFCW-600 window) although the noise was about 5 dB higher than in the last experiment (see Rogers picture on his site). So, there is another 11 dB gain (35dB -(10dB+6dB+8dB)). The radiation direction of the antenna and distance didn't change significantly so where does this gain come from?! I suppose it is the wet soil that decreases the earth resistance near the earth electrodes! If this mountain/hill is mostly out of stone, the inner conductivity does not change too strong but the electrode resistance seems to do!? So, i mean, a wet soil is an advantage, not a disadvantege, as long as the overall soil is not wet but just the upper layer. What do the experts mean?
BTW, i have never seen people looking so astonished, seeing this generator running in the middle of the forest on a rainy friday morning!
So, hopefully Micheal will tell us some news this evening. If not positive, we will go on. It was a first attempt at that distance and the conds were really not perfect. Additionally there is a loss of about 2 dB if the antenna acts like a loop. Markus/DF6NM received nothing but if the antenna acts like a loop, it has it's minimum into Markus' direction, sadly. Marco/DD7PC is in 60km distance and pretty exact in the radiation maximum of the antenna, so he could become another RX station?
Since 10:25, Stefan is on air again with his ground loop. Signals on his own grabber are even better than yesterday, perhaps due to somewhat lower QRN. I am also looking here but don't have much hope, as the TX antenna is N-S oriented and Nuernberg is 180 km due east.
reception. Maybe DD7PC is QRV in DFCW-600 (abt 4 mHz) again the next time?
The distance of the todays experiment is: 49.6 km.
is the antenna orientation. TX power was 250 W, just like yesterday.
Tnx to Michael for the effort to catch that signal (Markus as well). |
# Licensed under the MIT license
# http://opensource.org/licenses/mit-license.php or see LICENSE file.
# Copyright 2007-2008 Brisa Team <[email protected]>
""" Log module with colored logging feature. Common usage of this module can
be only importing it and calling one of the available functions: debug,
warning, info, critical, error.
"""
import os
import logging
from logging import getLogger
from brisa import __enable_logging__
from brisa.core import config
BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = list(range(8))
RESET_SEQ = '\033[0m'
COLOR_SEQ = '\033[1;%dm'
BOLD_SEQ = '\033[1m'
COLORS = {
'WARNING': YELLOW,
'INFO': WHITE,
'DEBUG': BLUE,
'CRITICAL': YELLOW,
'ERROR': RED}
def formatter_message(message, use_color = True):
""" Method to format the pattern in which the log messages will be
displayed.
@param message: message log to be displayed
@param use_color: Flag to indicates the use of colors or not
@type message: str
@type use_color: boolean
@return: the new formatted message
@rtype: str
"""
if use_color:
message = message.replace('$RESET', RESET_SEQ).replace('$BOLD',
BOLD_SEQ)
else:
message = message.replace('$RESET', '').replace('$BOLD', '')
return message
class ColoredFormatter(logging.Formatter):
""" ColoredFormatter class, which wrappers logging.Formatter. """
def __init__(self, msg, use_color = True):
""" Constructor of the ColoredFormatter class.
@param msg: message to be displayed
@param use_color: Flag to indicate the use of color or not
@type msg: str
@type use_color: boolean
"""
logging.Formatter.__init__(self, msg)
self.use_color = use_color
def format(self, record):
""" format method to the ColoredFormatter class that organizes the log
message.
@parameter record: information about the logger
@type record: Instance of Logger, either its RootLogger or not
"""
levelname = record.levelname
if self.use_color and levelname in COLORS:
levelname_color = COLOR_SEQ % (30 + COLORS[levelname]) + levelname\
+ RESET_SEQ
record.levelname = levelname_color
return logging.Formatter.format(self, record)
class ColoredLogger(logging.Logger):
FORMAT = '%(created)f $BOLD%(levelname)s$RESET $BOLD%(module)s:%(lineno)d'\
':%(funcName)s()$RESET %(message)s'
COLOR_FORMAT = formatter_message(FORMAT, True)
def __init__(self, name):
""" Constructor for the ColoredLogger class.
@param name: name of the Logger.
@type name: str
"""
global level
logging.Logger.__init__(self, name, level)
color_formatter = ColoredFormatter(self.COLOR_FORMAT)
console = logging.StreamHandler()
console.setFormatter(color_formatter)
self.addHandler(console)
log_dict = {'WARNING': logging.WARNING,
'DEBUG': logging.DEBUG,
'INFO': logging.INFO,
'CRITICAL': logging.CRITICAL,
'ERROR': logging.ERROR}
def setup_logging():
""" Method to setup the logging options. """
global debug, info, warning, critical, error, root_logger, set_level,\
setLevel, filename, level
level = log_dict.get(config.get_parameter('brisa', 'logging'),
logging.DEBUG)
filename = config.get_parameter('brisa', 'logging_output')
if filename == 'file':
filename = os.path.join(config.brisa_home, 'brisa.log')
logging.basicConfig(level=level, filename=filename,
format='%(created)f %(levelname)s %(module)s:'\
'%(lineno)d:%(funcName)s() %(message)s')
root_logger = logging.getLogger('RootLogger')
else:
logging.setLoggerClass(ColoredLogger)
root_logger = getLogger('RootLogger')
root_logger.setLevel(level)
def set_level(level):
""" Real implementation of the set level function. """
root_logger.setLevel(log_dict.get(level))
def setLevel(level):
""" Method to set the log level. """
set_level(level)
root_logger = getLogger()
if __enable_logging__:
setup_logging()
debug = root_logger.debug
info = root_logger.info
warning = root_logger.warning
critical = root_logger.critical
error = root_logger.error
|
Emergencies happen any day at any hour. What makes a significant difference in the outcome is how you handle the restoration process. SERVPRO of West Orange has a distinguished itself over the years as a team of hard workers. We value the ground up approach considering our franchise owner, Scott Tracy, started off working on a production truck over 30 years ago. Our general manager Shannon Perez started as an office administrator and has worked with Scott for over 25 years. With our leadership understanding the unique challenges restoration crews and property owners face, we can deliver quality work.
We put significant emphasis on proper training, which is why almost every member of our staff undergoes IICRC training. We aim to provide the best customer service, which in our belief, starts with proper training. We have certified Fire and Smoke Restoration Technicians, Applied Microbial Remedial Technicians, Water Restoration Technicians, and Applied Structural Drying experts.
At SERVPRO of West Orange, we believe that every job requires the right tool to get positive results fast, which is why we invest in a vast variety of restoration equipment. We have truck-mounted water extractors, industrial grade dehumidifiers, air movers and specialized drying equipment like inject dry systems to ensure we rid your property of all excess moisture.
We are always ready to assist our customers in distress whether it is during catastrophic weather events such as storms and hurricanes or natural home emergencies such as flooding from burst plumbing lines. Our efforts have been recognized by several professional organizations like West Orange Chamber, Orlando Chamber, Central Florida Hotel & Lodging Association, Better Business Bureau and Domestic Estate Management Association. We value community service, which is why we support the local chapter of the American Red Cross. |
# __ASG_GGEditMetaModel.py_____________________________________________________
from ASG import *
from ATOM3Type import *
from ATOM3 import *
class ASG_GGEditMetaModel(ASG, ATOM3Type):
def __init__(self, parent= None, ASGroot = None):
ASG.__init__(self, ASGroot, ['ASG_GGEditMetaModel' ,'GGruleEdit'])
ATOM3Type.__init__(self)
self.parent = parent
self.generatedAttributes = { }
def show(self, parent, parentWindowInfo):
ATOM3Type.show(self, parent, parentWindowInfo)
self.containerFrame = Frame(parent)
return self.containerFrame
def toString(self, maxWide = None, maxLines = None ):
rs =
if maxWide: return self.strValue[0:maxWide-3]+'...'
else: return rs
def getValue(self):
return ()
def setValue(self, value):
pass
def writeConstructor2File(self, file, indent, objName="at", depth = 0, generatingCode = 0):
"Method that writes into a file the constructor and the value of the object. Must be overriden in children"
def writeValue2File(self, file, indent, objName="at", depth = 0, generatingCode = 0):
"Method that writes into a file the the value of the object. Must be overriden in children"
def clone(self):
cloneObject = GGEditMetaModel( self.parent )
return cloneObject
def copy(self, other):
ATOM3Type.copy(self, other)
ASGNode.copy(self, other)
def destroy(self):
self.containerFrame = None
def preCondition (self, actionID, * params):
if self.graphObject_:
return self.graphObject_.preCondition(actionID, params)
else: return None
def postCondition (self, actionID, * params):
if self.graphObject_:
return self.graphObject_.preCondition(actionID, params)
else: return None
|
Saturday was the big day for me to install the main bulk of my work at Esotera. I am eternally grateful to Nick Weaver and Nigel Evans for their invaluable help carting, transporting and installing my work – the 3 of us driving in convoy with full loads meant that I didn’t have to do several trips the same day. I was installing from 8.30am-7pm, aided by some lovely refreshments courtesy of Shirley and Andrew Harvey. Thanks also to Jason Nosworthy, who helped with the landscaping side of things. Apart from the grass not growing quite enough to enclose the site for my main installation at Esotera (a set of silver birch trees), I am really pleased with the site – its dappled light and colours give it a magical quality, which just happens to play and harmonise with my work. Until the work was installed (partly) on Saturday, I had no clear idea of these effects.
I still have a serpent and giant nest to finish, so won’t linger. I plan to add a fuller write up about the final installations after completion later this week, when a photographer will visit the site to take better pics. Then it’s the opening/talk/workshop/private view on Saturday… Good luck everyone!
In addition to collecting materials on walks in the area and at Pylle scrapyard, my work has relied on the kindness of numerous local donors, who have given me recycled materials and/or their time. A huge thank you to the following: Sam Garland, John Shepherd Feeders, Ridgeway Garage, Station Road Garage, Pete Reakes, Fon Cosens, Somerset Earth Science Centre, Andrew and Shirley Harvey, Vicky Grinter, Georgia Grinter, Caroline James, Jason Nosworthy, Nigel Evans, Peter Osborne, Denise Campbell, Nick Weaver, Adrian Candy.
I have struggled to keep up with my own self-inflicted deadlines for making the Abundance installations, especially during this school summer holiday, although I am making progress. With September nearly upon us, I’ll need to accelerate in order to get it all completed on time.
My large ‘fallen nest’ is coming along ok and I hope to complete it in the next week. Other parts to the lichen-inspired installation have now been shaped and patinated using reclaimed copper and lead. I have a couple more items to make, and will then set it all out again in my garden as a mock up. It’s been great fun exploring techniques and finding new ways of working with materials.
We are meeting with the owners at Esotera this Friday for a final update on the work and Art Weeks arrangements. Very soon it will be time to install…. better get moving!
Pieces are steadily coming together. The aim is to create a mass of giant growing forms, inspired by lichen, linked to the Eden concept and utilising found objects. It involves hours of weaving, wrapping and forming, using soft and hard materials together, which is relatively new for me and occasionally I wonder if I’ll ever get it finished. Anyway, it’s becoming a daily activity and no matter what else the day holds, I try to spend a few hours on the Abundance work. I’m hooked on what I’m making, and what I would really like is to have no other interruptions but life isn’t that simple..
Earlier this week we went to Esotera again to measure the installation area and confirm a few details with Zoe. A landscape designer friend, Jason, came along to help work out where the grass might be allowed to grow a little, in order to create more of an enclosure for the work. Owners Andrew and Shirley have been very accommodating with this.
On reading the other Abundance artist posts, I like the idea that there seems to be several crossovers in our work; connections which somehow tie the Trail together.
Today was a perfect day to revisit Esotera. My first visit with Zoe was on a grim winter’s day. But today blazing sunshine had brought out new flowers from bulbs, ferns were unravelling, ducks, chickens, fish and a very sociable cat ‘Gengis’ were all glorifying our brilliant 1st of May.
As I wandered around, I could understand why Esotera gets so many visitors who stay for hours. The owners and garden envelop you into their world – a place at peace with itself, whilst buzzing with the magic of life and I found it hard to leave! Undecided about the exact location of my installation, with several possibilities, I took plenty of photos and absorbed the ambience. It helped to confirm my ideas for the project, and 3 hours later I left, armed with a load of reclaimed materials for my work there, generously donated by the owners Shirley and Andrew.
My ideas have moved on to something a little more ambitious. The work will be time-consuming but fun to make! My thoughts have lingered on Genesis’ Garden of Eden – the most abundant garden where plants, creatures and humans grow and roam freely in complete harmony. It is the first Utopian concept, explored by many including Plato, Thomas More etc… Utopian ideals encompass world peace, enlightenment, labour, arts and science, fulfillment, harmony between man and nature, all needs supplied by the abundance of nature.
Next week I’m revisiting the garden to get more of a feel for it, discuss my ideas with Shirley and Andrew, see the garden blooming and arrange practicalities. |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright notice
# ----------------
#
# Copyright (C) 2012-2014 Daniel Jung
# Contact: [email protected]
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
#
#
# Original copyright statement by the authors of optparse
# =======================================================
#
# Copyright (c) 2001-2006 Gregory P. Ward. All rights reserved.
# Copyright (c) 2002-2006 Python Software Foundation. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the author nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
"""Extension of the command line option parser from the module *optparse*,
which was originally written by Gregory P. Ward.
The most important changes are:
- information about the default values is automatically appended to the
help strings of each option (if they do not already include the word
"%default")
- options and option groups are displayed in alphabetical order on the help
page
- option string conflicts may not necessarily lead to an exception. First it
is tried to move the option string to the new option (give it a new
meaning), as long as at least one option string remains at the old option,
thus overwriting the option string's meaning
- pydoc.pager is now used to display the help (behavior similar to the bash
command *less*)
- by default, the *help* and *version* options are moved to an option group
called "General options"
- the *help* option does no longer have the short option string "-h", but
instead "-?"
- write *None* instead of *none* in default option value help string
- preserve linebreaks in description (still needs improvement)"""
#
# To do:
# --> only use pydoc.pager for showing help if the length of the help is
# exceeding the terminal window height
#
__created__ = '2012-05-17'
__modified__ = '2013-02-06'
import optparse
import pydoc
import textwrap
class OptionContainer(optparse.OptionContainer):
"""Extended version of optparse.OptionContainer."""
# 2012-05-21 - 2012-05-23
def get_option_by_name(self, name):
"""Get option by option name. A little bit different than
*get_option()*, as it first checks *dest* before trying the option
strings, and also does not expect the dashes ("-" or "--") when
referencing the option strings."""
# 2012-05-21 - 2012-05-21
# check destinations
for option in self.option_list:
if option.dest and option.dest == name:
return option
# try option strings
return self._long_opt.get('--'+name) or self._short_opt.get('-'+name)
def add_option(self, *args, **kwargs):
"""Before calling the original method *add_option()*, this version
checks if the same option strings (long and short) do already exist in
another option definition. Instead of raising an exception rightaway,
it tries to "overwrite" the meaning of the option string, i.e. the
option string is deleted from the other option. However, this will only
be done if this option string is not *the only one* defined by the
other option, because at least one option string should persist for
each option."""
# 2012-05-23 - 2012-05-23
# cycle all option strings of the new option
for optionstring in args:
# check if this option string already exists in some option
if optionstring in self._short_opt:
option = self._short_opt[optionstring]
# make sure it is not the only option string of this option
if len(option._short_opts)+len(option._long_opts) > 1:
# delete this option string from the old option
option._short_opts.remove(optionstring)
del self._short_opt[optionstring]
elif optionstring in self._long_opt:
option = self._long_opt[optionstring]
# make sure it is not the only option string of this option
if len(option._short_opts)+len(option._long_opts) > 1:
# delete this option string from the oLegold option
option._long_opts.remove(optionstring)
del self._long_opt[optionstring]
# finally, call the original method
optparse.OptionContainer.add_option(self, *args, **kwargs)
class OptionGroup(optparse.OptionGroup, OptionContainer):
"""Just make sure the modified method *OptionContainer.add_option()* is
used also by *OptionGroup* (monkey patch). Otherwise, the original class
stays untouched."""
# 2012-05-23 - 2012-05-23
add_option = OptionContainer.add_option
class OptionParser(optparse.OptionParser, OptionContainer):
"""Improved version of *optparse.OptionParser* that overwrites some of its
methods and changes its behavior a little bit."""
# 2012-05-17 - 2013-02-06
# former hdp._MyOptionParser from 2011-09-14 until 2011-12-19
# former tb.MyOptionParser from 2011-08-03
def __init__(self, *args, **kwargs):
"""Improved version of the constructor. Sets the version string if the
user has not done so himself, because an empty version string would
lead to a bug lateron. If the keyword argument *general* is set to
*True*, move help and version options to the newly created option group
"General options" (default: *True*)."""
# 2012-05-17 - 2012-05-21
# former hdp._MyOptionParser.__init__ from 2011-11-11
# make sure the keyword argument "version" is set to a non-empty string
if not 'version' in kwargs:
kwargs.update(version=' ')
if not 'formatter' in kwargs:
kwargs.update(formatter=IndentedHelpFormatterWithNL())
# catch additional keyword arguments before calling the original method
general = kwargs.pop('general', True)
# call original initialisation method
optparse.OptionParser.__init__(self, *args, **kwargs)
# create an option group "general options" and move help and version
# option there
if general:
og = optparse.OptionGroup(self, 'General options')
self.move_option('help', og)
self.move_option('version', og)
self.add_option_group(og)
def cmp_opts(self, a, b):
"""Compare options by the first short option name or, if there is no
short option name, by the first long option name. Needed for sorting
the options."""
# 2012-05-17
# former hdp._MyOptionParser.cmp_opts from 2011-08-03
if len(a._short_opts) > 0:
aname = a._short_opts[0][1:]
else:
aname = a._long_opts[0][2:]
if len(b._short_opts) > 0:
bname = b._short_opts[0][1:]
else:
bname = b._long_opts[0][2:]
if aname == bname:
return 0
elif aname < bname:
return -1
else:
return 1
def print_help(self, file=None):
"""Like the original, except it uses *pydoc.pager* to display the help
text on the screen. The file argument no longer has any meaning, it
just stays there for compatibility reasons. Also, the method now sorts
all options and option groups before displaying the help text."""
# 2012-05-17
# former hdp._MyOptionParser.print_help from 2011-08-02 - 2011-12-19
# How can line breaks be preserved in epilog and description? Maybe
# look at the responsible mothod in optparse.OptionParser to get a hint
# sort options (also within option groups, and groups themselves)
self.option_list.sort(cmp=self.cmp_opts)
self.option_groups.sort(cmp=lambda a, b: -1
if a.title < b.title else 1)
for ind in xrange(len(self.option_groups)):
self.option_groups[ind].option_list.sort(cmp=self.cmp_opts)
#if file is None:
# file = _sys.stdout
encoding = self._get_encoding(file)
#file.write(self.format_help().encode(encoding, "replace"))
pydoc.pager(self.format_help().encode(encoding, 'replace'))
def _add_help_option(self):
"""Like the original method, but does not define the short option
string "-h". Instead, defines a short option "-?"."""
# 2012-05-17 - 2012-07-09
# former hdp._MyOptionParser.print_help 2011-08-03
self.add_option('-?', '--help', action='help',
help='show this help message and exit')
def add_all_default_values(self):
"""Automatically append the default values to the help strings of all
the options of this option parser. Those options that already contain
the substring "%default" are skipped."""
# 2012-05-18
self._add_default_values(self)
for og in self.option_groups:
self._add_default_values(og)
def _add_default_values(self, op):
"""Automatically append information about the default values to the
help string of the given option parser or option group object. Those
options that already contain the substring "%default" are skipped.
This method is used by *add_all_default_values()*, which is the one
that should be called by the user. There should be no need for the user
to call this method manually."""
# 2012-05-18 - 2012-05-22
# former hdp.BaseHDP.help_default from 2011-09-14
# former tb.BaseProc.help_default from 2011-02-11
for o in op.option_list:
if o.help and not '%default' in o.help and o.action == 'store' \
and str(o.default) != '':
# then append the information to the help string
if not o.help[-1] in '.!':
o.help += '.'
if o.help[-1] != ' ':
o.help += ' '
o.help += 'Default: %default'
def move_option(self, name, destination, source=None):
"""Move an already defined option from one option parser object to
another. By default, the source object is the option parser object
itself, but can also be set to any option group object. Also the
destination can be any option parser or option group object."""
# 2012-05-18 - 2012-05-21
# set source to this option parser object by default
if source is None:
source = self
# search for the given option name, remember its index
try:
index = source.option_list.index(self.get_option_by_name(name))
except ValueError:
raise KeyError('option "%s" not found' % name)
# move option object to new location
destination.option_list.append(source.option_list.pop(index))
def parse_args(self, args=None, values=None):
"""Does a little bit of extra stuff before calling the original method
*parse_args()*."""
# 2012-05-21 - 2012-05-22
# add the default values to all help strings
self.add_all_default_values()
# make sure line breaks are respected in epilog and description
#self.epilog = '\n'.join([s.strip() for s in self.epilog.split('\n')])
#self.description = '\n'.join([s.strip() \
#for s in self.description.split('\n')])
# How can line breaks be preserved in epilog and description? Maybe
# look at the responsible mothod in optparse.OptionParser to get a hint
# call original method
return optparse.OptionParser.parse_args(self, args=args, values=values)
# next thing will be to create an argument "dictionary" (or similar) to
# feed the Values instance with extra values again, recall "dest" or
# long or short option strings substitute kw2op with something more
# reasonable I think this can already be done with the argument
# "values" probably just say "values=optparse.Values(dictionary)" but
# then, only the true option names are allowed, i.e. option.dest
def get_option_group_by_title(self, title):
"""Get option group by group title. It is sufficient that the group
title starts with the given string. All strings are converted to lower
case before comparison."""
# 2012-05-21 - 2012-05-21
# check all groups
for group in self.option_groups:
if group.title.lower().startswith(title.lower()):
return group
else:
raise KeyError('option group %s not found' % title)
def walk(self):
"""Return iterator over all options of the option parser, including
those in option groups."""
### already exists by the name _get_all_options (but it is not an
### iterator)
# 2012-05-22 - 2012-05-22
for option in self.option_list:
yield option
for group in self.option_groups:
for option in group.option_list:
yield option
def search_option(self, name):
"""Search the whole option parser recursively (also in option groups)
for an option by the given name. If no matching option is found, return
*False*. Otherwise, return reference to the option object."""
# 2012-05-22 - 2012-05-22
for option in self.walk():
if option.dest and option.dest == name \
or '--'+name in option._long_opts \
or '-'+name in option._short_opts:
return option
else:
return False
add_option = OptionContainer.add_option
class IndentedHelpFormatterWithNL(optparse.IndentedHelpFormatter):
"""Solve the problem that newline characters are erased in the docstring.
Courtesy goes to Tim Chase:
https://groups.google.com/forum/?fromgroups#!topic/comp.lang.python/bfbmtUGhW8I"""
__created__ = '2013-02-06'
__modified__ = '2013-02-06'
NO_DEFAULT_VALUE = 'None'
def format_description(self, description):
if not description:
return ''
desc_width = self.width - self.current_indent
indent = " "*self.current_indent
# the above is still the same
bits = description.split('\n')
formatted_bits = [
textwrap.fill(bit, desc_width, initial_indent=indent,
subsequent_indent=indent)
for bit in bits]
result = "\n".join(formatted_bits) + "\n"
return result
def format_option(self, option):
# The help for each option consists of two parts:
# * the opt strings and metavars
# eg. ("-x", or "-fFILENAME, --file=FILENAME")
# * the user-supplied help string
# eg. ("turn on expert mode", "read data from FILENAME")
#
# If possible, we write both of these on the same line:
# -x turn on expert mode
#
# But if the opt string list is too long, we put the help
# string on a second line, indented to the same column it would
# start in if it fit on the first line.
# -fFILENAME, --file=FILENAME
# read data from FILENAME
result = []
opts = self.option_strings[option]
opt_width = self.help_position - self.current_indent - 2
if len(opts) > opt_width:
opts = "%*s%s\n" % (self.current_indent, "", opts)
indent_first = self.help_position
else: # start help on same line as opts
opts = "%*s%-*s " % (self.current_indent, "", opt_width, opts)
indent_first = 0
result.append(opts)
if option.help:
help_text = self.expand_default(option)
# everything is the same up through here
help_lines = []
for para in help_text.split("\n"):
help_lines.extend(textwrap.wrap(para, self.help_width))
# everything is the same after here
result.append("%*s%s\n" % (
indent_first, "", help_lines[0]))
result.extend(["%*s%s\n" % (self.help_position, "", line)
for line in help_lines[1:]])
elif opts[-1] != "\n":
result.append("\n")
return "".join(result)
|
PROMOZIONI (Promotions in English) revolves around the surreal experience of a promoter who tries to establish honest relationships with her customers and the people she works with, but she slowly realises that her values are not reconcilable with the nature of sales. The people she meets, the attacks she gets and the reminiscence of her beloved grandma starts to convince her to pursue her real dream before anything else.
Nobody’s Child is a London based theatre company founded and run by Italian collaborators. Founder Anna Carfora established the company to create a link between Italian and English theatre and to bring her work, stories and background to the English theatre audience. Promozioni is directed by Norwegian director Tonje Wik Olaussen. The play, originally written in Italian by Anna Carfora, has been translated to English by writer Tim Parks.
Cast: Anna Carfora, Rachele Fregonese, Alida Pantone, Iacopo Paradisi, and Davide Roberti. |
import pygame
import dotworld
import menuworld
from src.define import *
from src.dot.dottext import DotText
class GameOver(dotworld.DotWorld):
def __init__(self, score):
dotworld.DotWorld.__init__(self)
self.counter = 0
self.limit = 400
self.alpha = 0
self.animState = 1
self.label = DotText("Game Over", 32, (0, 0, 0), (255, 255, 255))
self.scorelabel = DotText("Score: " + str(int(score / GameDefine.SCORE_DECIMAL)), 24, (0, 0, 0), (255, 255, 255))
def onAttachScreen(self):
pygame.mixer.music.stop()
self.label.centerX(self.screen.width)
self.label.centerY(self.screen.height)
self.scorelabel.centerX(self.screen.width)
self.scorelabel.marginTop(dotget(1))
self.scorelabel.below(self.label)
def changeAlpha(self):
self.label.surface.set_alpha(self.alpha)
self.scorelabel.surface.set_alpha(self.alpha)
def listen(self, inputResult):
if inputResult == GameDefine.COMMAND_BOOST:
self.pause()
def step(self):
if self.active:
self.changeAlpha()
self.label.draw(self.screen.displaysurf)
self.scorelabel.draw(self.screen.displaysurf)
self.counter += 1
if self.animState == 1:
self.alpha += 2
if self.alpha > 255:
self.animState = 2
self.counter = 0
if self.animState == 2:
self.counter += 1
if self.counter > self.screen.fps * 3:
self.animState = 3
if self.animState == 3:
self.alpha -= 2
if self.alpha <= 0:
self.pause()
else:
self.screen.setWorld(menuworld.MenuWorld())
del self
|
Students face unique challenges when battling criminal charges and trying to keep up with their studies. That's why Thomas A. Camp, is a skilled underage possession of alcohol(UPA) and minor in possession (MIP) attorney, and will get to know your schedule and will know when you have class or a break from school.
Our firm works with many students and has developed an intimate knowledge of how to navigate the process, arranging court appearances at convenient times that don't interrupt your classes or breaks from school.
Our firm takes this accommodating approach in order to minimize the impact on your schedule, your education and your future. If you have been charged with a minor in possession charge, you should contact a University of Georgia minor in possession lawyer right away.
It's understandable that parents would be concerned that dealing with an underage drinking or drug charge would disrupt their son or daughter's class schedule. We are happy to talk to parents and explain the situation and our approach that takes student schedules into account.
Because of our firm's experience, Mr. Camp fields many calls from parents from Atlanta and other areas of the state who have been referred to our office by friends and acquaintances.
The good news is that in many cases there are pretrial diversion programs and first-offender statutes that will often allow students to avoid having a criminal conviction on their record.We are often times also able to obtain records restrictions and expunge the record of an arrest to prevent prospective employers from seeing the arrest on a criminal background check.
Even bright and intelligent individuals use poor judgment and make bad decisions on occassion.Unfortunately these poor decisions can lead to serious consequences. It may not seem like a big deal to relieve yourself in the bushes after drinking at a tailgate party on a Saturday afternoon but that is a violation of the law and can lead to a urination in public charge. Thomas Camp has dealt with many public urination cases and understands the local ordinances.
In some instances attorney Camp is able to work out resolutions that involve a defendant admitting to violations of local ordinances (instead of more severe state laws) which can minimize the impact on a student's record and future career.
This is a crucial option because municipal ordinances don't show up on your criminal history as a conviction. We can work with the charges as to create a nominal impact on the student and his or her record. Similarly, public intoxication is under a local ordinance which means that we can help you work through this charge as well.
To learn more about the representation we can provide you, please contact our Athens, Georgia, law firm today at 706-621-6284 or toll free at 866-475-8658. |
from __future__ import print_function, division
import os
import util
import scipy as sp
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from datetime import datetime
LEARNING_RATE = 0.0002
BETA1 = 0.5
BATCH_SIZE = 64
EPOCHS = 2
SAVE_SAMPLE_PERIOD = 50
if not os.path.exists('samples'):
os.mkdir('samples')
def lrelu(x, alpha = 0.2):
return tf.maximum(alpha * x, x)
class ConvLayer:
def __init__(self, name, mi, mo, apply_batch_norm, filtersz = 5, stride = 2, f = tf.nn.relu):
self.W = tf.get_variable(
"W_%s" % name,
shape = (filtersz, filtersz, mi, mo),
initializer = tf.truncated_normal_initializer(stddev = 0.02)
)
self.b = tf.get_variable(
"W_%s" % name,
shape = (mo,),
initializer = tf.zeros_initializer()
)
self.f = f
self.name = name
self.stride = stride
self.apply_batch_norm = apply_batch_norm
self.params = [self.W, self.b]
def forward(self, X, reuse, is_training):
conv_out = tf.nn.conv2d(
X,
self.W,
strides=[1, self.stride, self.stride, 1],
padding='SAME'
)
conv_out = tf.nn.bias_add(conv_out, self.b)
if self.apply_batch_norm:
conv_out = tf.contrib.layers.batch_norm(
conv_out,
decay = 0.9,
updates_collections = None,
epsilon = 1e-5,
scale = True,
is_training = is_training,
reuse = reuse,
scope = self.name
)
return self.f(conv_out)
class FractionallyStridedConvLayer:
def __init__(self, name, mi, mo, output_shape, apply_batch_norm, filtersz = 5, stride = 2, f = tf.nn.relu):
self.W = tf.get_variable(
"W_%s" % name,
shape = (filtersz, filtersz, mo, mi),
initializer = tf.truncated_normal_initializer(stddev = 0.02)
)
self.b = tf.get_variable(
"W_%s" % name,
shape = (mo,),
initializer = tf.zeros_initializer()
)
self.f = f
self.name = name
self.stride = stride
self.apply_batch_norm = apply_batch_norm
self.params = [self.W, self.b]
self.output_shape = output_shape
def forward(self, X, reuse, is_training):
conv_out = tf.nn.conv2d_transpose(
value = X,
filter = self.W,
output_shape = self.output_shape,
strides=[1, self.stride, self.stride, 1],
)
conv_out = tf.nn.bias_add(conv_out, self.b)
if self.apply_batch_norm:
conv_out = tf.contrib.layers.batch_norm(
conv_out,
decay = 0.9,
updates_collections = None,
epsilon = 1e-5,
scale = True,
is_training = is_training,
reuse = reuse,
scope = self.name
)
return self.f(conv_out)
class DenseLayer(object):
def __init__(self, name, M1, M2, apply_batch_norm, f = tf.nn.relu):
self.W = tf.get_variable(
"W_%s" % name,
shape = (M1, M2),
initializer = tf.random_normal_initializer(stddev = 0.02)
)
self.b = tf.get_variable(
"b_%s" % name,
shape = (M2, ),
initializer = tf.zeros_initializer()
)
self.f = f
self.name = name
self.apply_batch_norm = apply_batch_norm
self.params = [self.W, self.b]
def forward(self, X, reuse, is_training):
a = tf.matmul(X, self.W) + self.b
if self.apply_batch_norm:
a = tf.contrib.layers.batch_norm(
a,
decay = 0.9,
updates_collections = None,
epsilon = 1e-5,
scale = True,
is_training = is_training,
reuse = reuse,
scope = self.name
)
return self.f(a)
class DCGAN:
def __init__(self, img_length, num_colors, d_sizes, g_sizes):
self.img_length = img_length
self.num_colors = num_colors
self.latent_dims = g_sizes['z']
self.X = tf.placeholder(
tf.float32,
shape = (None, img_length, img_length, num_colors),
name = 'X'
)
self.Z = tf.placeholder(
tf.float32,
shape=(None, self.latent_dims),
name = 'Z'
)
logits = self.build_discriminator(self.X, d_sizes)
self.sample_images = self.build_generator(self.Z, g_sizes)
with tf.variable_scope('generator') as scope:
scope.reuse_variables()
sample_logits = self.d_forward(self.sample_images, reuse = True)
with tf.variable_scope("generator") as scope:
scope.reuse_variables()
self.sample_images_test = self.g_forward(
self.Z, reuse = True, is_training = False
)
self.d_cost_real = tf.nn.sigmoid_cross_entropy_with_logits(
logits = logits,
labels = tf.ones_like(logits)
)
self.d_cost_fake = tf.nn.sigmoid_cross_entropy_with_logits(
logits = sample_logits,
labels = tf.zeros_like(sample_logits)
)
self.d_cost = tf.reduce_mean(self.d_cost_real) + tf.reduce_mean(self.d_cost_fake)
self.g_cost = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(
logits = sample_logits,
labels = tf.ones_like(sample_logits)
)
)
real_predictions = tf.cast(logits > 0, tf.float32)
fake_predictions = tf.cast(sample_logits < 0, tf.float32)
num_predictions = 2.0 * BATCH_SIZE
num_correct = tf.reduce_sum(real_predictions) + tf.reduce_sum(fake_predictions)
self.d_accuracy = num_correct / num_predictions
# optimizers
self.d_params = [t for t in tf.trainable_variables() if t.name.startswith('d')]
self.g_params = [t for t in tf.trainable_variables() if t.name.startswith('g')]
self.d_train_op = tf.train.AdamOptimizer(
LEARNING_RATE, beta1 = BETA1
).minimize(
self.d_cost, var_list = self.d_params
)
self.g_train_op = tf.train.AdamOptimizer(
LEARNING_RATE, beta1 = BETA1
).minimize(
self.g_cost, var_list = self.g_params
)
self.init_op = tf.global_variables_initializer()
self.sess = tf.InteractiveSession()
self.sess.run(self.init_op)
def build_discriminator(self, X, d_sizes):
with tf.variable_scope("discriminator") as scope:
self.d_convlayers = []
mi = self.num_colors
dim = self.img_length
count = 0
for mo, filtersz, stride, apply_batch_norm in d_sizes['conv_layers']:
name = "convlayer_%s" % count
count += 1
layer = ConvLayer(name, mi, mo, apply_batch_norm, filtersz, stride, lrelu)
self.d_convlayers.append(layer)
mi = mo
print("dim: ", dim)
dim = int(np.ceil(float(dim) / stride))
mi = mi * dim * dim
self.d_denselayers = []
for mo, apply_batch_norm in d_sizes['dense_layers']:
name = "denselayer_%s" % count
count += 1
layer = DenseLayer(name, mi, mo, apply_batch_norm, lrelu)
mi = mo
self.d_denselayers.append(layer)
name = "denselayer_%s" % count
self.d_finallayer = DenseLayer(name, mi, 1, False, lambda x : x)
logits = self.d_forward(X)
return logits
def d_forward(self, X, reuse = None, is_training = True):
output = X
for layer in self.d_convlayers:
output = layer.forward(output, reuse, is_training)
output = tf.contrib.layers.flatten(output)
for layer in self.d_denselayers:
output = layer.forward(output, reuse, is_training)
logits = self.d_finallayer.forward(output, reuse, is_training)
return logits
def build_generator(self, Z, g_sizes):
with tf.variable_scope('generator') as scope:
dims = [self.img_length]
dim = self.img_length
for _, _, stride, _ in reversed(g_sizes['conv_layers']):
dim = int(np.ceil(float(dim) / stride))
dims.append(dim)
dims = list(reversed(dims))
print("dims: ", dims)
self.g_dims = dims
mi = self.latent_dims
self.g_denselayers = []
count = 0
for mo, apply_batch_norm in g_sizes['dense_layers']:
name = "g_denselayers_%s" % count
count += 1
layer = DenseLayer(name, mi, mp, apply_batch_norm)
self.g_denselayers.append(layer)
mi = mo
mo = g_sizes['projection'] * dims[0] * dims[0]
name = "g_denselayer_%s" % count
layer = DenseLayer(name, mi, mo, not g_sizes['bn_after_project'])
self.g_denselayers.append(layer)
mi = g_sizes['projection']
self.g_convlayers = []
num_relus = len(g_sizes['conv_layers']) - 1
activation_functions = [tf.nn.relu] * num_relus + [g_sizes['output_activation']]
for i in range(len(g_sizes['conv_layers'])):
name = "fs_convlayer_%s" % i
mo, filtersz, stride, apply_batch_norm = g_sizes['conv_layers'][i]
f = activation_functions[i]
output_shape = [BATCH_SIZE, dims[i+1], dims[i+1], mo]
print("mi: ", mi, "mo: ", mo, "output_shape: ", output_shape)
layer = FractionallyStridedConvLayer(
name, mi, mo, output_shape, apply_batch_norm, filtersz, stride, f
)
self.g_convlayers.append(layer)
mi = mo
self.g_sizes = g_sizes
return self.g_forward(Z)
def g_forward(self, Z, reuse = None, is_training = True):
output = Z
for layer in self.g_denselayers:
output = layer.forward(output, reuse, is_training)
output = tf.reshape(
output,
[-1, self.g_dims[0], self.g_dims[0], self.g_sizes['projection']]
)
# apply bnorm
if self.g_sizes['bn_after_project']:
output = tf.contrib.layers.batch_norm(
output,
decay = 0.9,
updates_collections = None,
epsilon=1e-5,
scale = True,
is_training = is_training,
reuse = reuse,
scope = 'bn_after_project'
)
for layer in self.g_convlayers:
output = layer.forward(output, reuse, is_training)
return output
def fit(self, X):
d_costs = []
g_costs = []
N = len(X)
n_batches = N // BATCH_SIZE
total_iters = 0
for i in range(0, EPOCHS):
print("epoch: ", i)
np.random.shuffle(X)
for j in range(0, n_batches):
t0 = datetime.now()
if(type(X[0]) is str):
batch = util.files2images(
X[j*BATCH_SIZE:((j+1)*BATCH_SIZE)]
)
else:
batch = X[j*BATCH_SIZE:(j+1)*BATCH_SIZE]
Z = np.random.uniform(-1, 1, size=(BATCH_SIZE, self.latent_dims))
_, d_cost, d_acc = self.sess.run(
(self.d_train_op, self.d_cost, self.d_accuracy),
feed_dict = {self.X: batch, self.Z: Z}
)
d_costs.append(d_cost)
_, g_cost1 = self.sess.run(
(self.g_train_op, self.g_cost),
feed_dict = {self.Z : Z}
)
_, g_cost2 = self.sess.run(
(self.g_train_op, self.g_cost),
feed_dict = {self.Z : Z}
)
g_costs.append((g_cost1 + g_cost2) / 2)
print("batch %d/%d - dt: %s - d_acc: %.2f" % (j+1, n_batches, datetime.now() - t0))
total_iters += 1
if total_iters % SAVE_SAMPLE_PERIOD == 0:
print("saving sample...")
samples = self.sample(64)
d = self.img_length
if samples.shape[-1] == 1:
samples = samples.reshape(64, d, d)
flat_image = np.empty((8*d, 8*d))
k = 0
for a in range(0, 8):
for b in range(0, 8):
flat_image[a*d:(a+1)*d, b*d:(b+1)*d] = samples[k].reshape(d,d)
k+=1
else:
flat_image = np.empty((8*d, 8*d, 3))
k = 0
for a in range(0, 8):
for b in range(0, 8):
flat_image[a*d:(a+1)*d, b*d:(b+1)*d] = samples[k]
k+=1
sp.misc.imsave(
'samples/samples_at_iter_%d.png' % total_iters,
(flat_image + 1) / 2
)
plt.clf()
plt.plot(d_costs, label = 'discriminator cost')
plt.plot(g_costs, label = 'generator cost')
plt.legend()
plt.savefig('cost_vs_iteration.png')
def sample(self, n):
Z = np.random.uniform(-1, 1, size = (n, self.latent_dims))
samples = self.sess.run(self.sample_images_test, feed_dict = {self.Z : Z})
return samples
def celeb():
X = util.get_celeb()
dim = 64
colors = 3
d_sizes = {
'conv_layers' : [
(64,5,2,False),
(128,5,2,True),
(256,5,2,True),
(512,5,2,True)
],
'dense_layers': []
}
g_sizes = {
'z': 100,
'projection':512,
'bn_after_project': True,
'conv_layers' : [
(256,5,2,True),
(128,5,2,True),
(64,5,2,True),
(colors, 5,2, False)
],
'dense_layers': [],
'output_activation' : tf.tanh
}
gan = DCGAN(dim, colors, d_sizes, g_sizes)
gan.fit(X)
def mnist():
X, Y = util.get_mnist()
X = X.reshape(len(X), 28, 28, 1)
dim = X.shape[1]
colors = X.shape[-1]
d_sizes = {
'conv_layers': [(2, 5, 2, False), (64, 5, 2, True)],
'dense_layers': [(1024, True)]
}
g_sizes = {
'z':100,
'projection': 128,
'bn_after_project': False,
'conv_layers': [(128, 5, 2, True), (colors, 5, 2, False)],
'dense_layers': [(1024, True)],
'output_activation' : tf.sigmoid
}
gan = DCGAN(dim, colors, d_sizes, g_sizes)
gan.fit(X)
if __name__ == '__main__':
mnist()
|
Avian eggshells exhibit some of the most diverse and fascinating arrays of complex biological variability within the animal kingdom. The variation seen in eggshell colour and maculation (pigment spots), for example, can be found between species, within species, and even between eggs in a clutch from the same female. Eggshell maculation has fascinated scientists for decades, and many functional explanations for this maculation have been posited including crypsis, thermoregulation, microbial defence and sexual-signalling. While the variation and function of eggshell maculation has received much attention, the actual structure of the eggshell itself has received comparatively little focus. The relatively few studies that have investigated eggshell structure, particularly that of the egg surface, have found large variation in surface-structures and shell thickness. However, little is known about how these structures function, or rather, what their true function is. This project aims to characterise eggshell surface structure along the avian phylogenetic tree, and determine – through novel mechanical and structural engineering approaches – how different eggshell surface structures function. Bird eggs offer a fascinating model system, as birds breed on all seven continents on Earth; at altitudes greater than 4000 m above sea level, in temperatures ranges between -40°C and 50°C, and in environments varying from water-saturated to extremely xeric. Egg size can range from 1.4 kg to 0.4 g (for Common Ostriches Struthio camelus and Vervain Hummingbirds Mellisuga minima, respectively), while clutch size can vary from a single egg to broods of over fourteen.
This project aims to identify novel eggshell structures, ascertain and describe their function, and identify whether such structures can be beneficial commercially (biomimetics). Given the wide-array of nest environments the eggs of different species inhabit, it is likely something novel will be discovered that will have practical applications.
Nesting behaviour influences species-specific gas exchange across avian eggshells. |
# -*- coding: utf-8 -*-
# Copyright 2011 David Malcolm <[email protected]>
# Copyright 2011 Red Hat, Inc.
#
# This is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see
# <http://www.gnu.org/licenses/>.
import gcc
from gccutils import pprint
def on_pass_execution(p, fn):
if p.name == '*warn_function_return':
assert isinstance(fn, gcc.Function)
print('fn: %r' % fn)
assert isinstance(fn.decl, gcc.FunctionDecl)
print('fn.decl.name: %r' % fn.decl.name)
assert isinstance(fn.cfg, gcc.Cfg) # None for some early passes
assert fn.cfg.basic_blocks[0] == fn.cfg.entry
assert fn.cfg.basic_blocks[1] == fn.cfg.exit
for bb in fn.cfg.basic_blocks:
if bb.gimple:
for i,stmt in enumerate(bb.gimple):
print('gimple[%i]:' % i)
print(' str(stmt): %r' % str(stmt))
print(' repr(stmt): %r' % repr(stmt))
if isinstance(stmt, gcc.GimpleSwitch):
print(' stmt.indexvar: %r' % stmt.indexvar)
print(' stmt.labels: %r' % stmt.labels)
for j, label in enumerate(stmt.labels):
print(' label[%i].low: %r' % (j, label.low))
print(' label[%i].high: %r' % (j, label.high))
print(' label[%i].target: %r' % (j, label.target))
gcc.register_callback(gcc.PLUGIN_PASS_EXECUTION,
on_pass_execution)
|
From our Refuge collection! Slip into these distressed, light wash denim shorts for a showstopping look! A button fly highlights the high waist, while cool cut-off hems complete the cheeky silhouette. Rips and shreds pair with whiskering and fading, boasting that lived-in look we love! |
# /psqtraviscontainer/output.py
#
# Helper classes to monitor and capture output as it runs.
#
# See /LICENCE.md for Copyright information
"""Helper classes to monitor and capture output as it runs."""
import sys
import threading
def monitor(stream,
modifier=None,
live=False,
output=sys.stdout):
"""Monitor and print lines from stream until end of file is reached.
Each line is piped through :modifier:.
"""
from six import StringIO
captured = StringIO()
modifier = modifier or (lambda l: l)
def read_thread():
"""Read each line from the stream and print it."""
# No stream, not much we can really do here.
if not stream:
return
for line in stream:
line = modifier(line)
captured.write(line)
if live:
output.write(line)
output.flush()
def joiner_for_output(thread):
"""Closure to join the thread and do something with its output."""
thread.start()
def join():
"""Join the thread and then return its output."""
thread.join()
captured.seek(0)
return captured
return join
# Note that while it is necessary to call joiner_for_output if you want
# resources to be cleaned up, it is not necessary if you don't care
# about cleanup and just want the program to keep running.
return joiner_for_output(threading.Thread(target=read_thread))
|
We have a skilled crew that specializes in making Asphalt Repairs that not only look professional but hold up as well.
In business, timing is everything. We at PatchCrew have the flexibility to ensure that your asphalt patching needs are performed in a timely and efficient manner.
Utility, Underground, Electrical, Plumbing, contractors will find our services invaluable. Even paving contractors will find that PatchCrew can provide an important service for your company. Pulling your most experienced and skilled people from their trade to patch an asphalt trench costs you production, time, and money. Let PatchCrew do it for you! |
import argparse
from . import browser
latest_channels = {
'firefox': 'nightly',
'chrome': 'nightly',
'chrome_android': 'dev',
'edgechromium': 'dev',
'safari': 'preview',
'servo': 'nightly',
'webkitgtk_minibrowser': 'nightly'
}
channel_by_name = {
'stable': 'stable',
'release': 'stable',
'beta': 'beta',
'dev': 'dev',
'canary': 'canary',
'nightly': latest_channels,
'preview': latest_channels,
'experimental': latest_channels,
}
channel_args = argparse.ArgumentParser(add_help=False)
channel_args.add_argument('--channel', choices=channel_by_name.keys(),
default='nightly', action='store',
help='''
Name of browser release channel (default: nightly). "stable" and "release" are
synonyms for the latest browser stable release; "beta" is the beta release;
"dev" is only meaningful for Chrome (i.e. Chrome Dev); "nightly",
"experimental", and "preview" are all synonyms for the latest available
development or trunk release. (For WebDriver installs, we attempt to select an
appropriate, compatible version for the latest browser release on the selected
channel.) This flag overrides --browser-channel.''')
def get_parser():
parser = argparse.ArgumentParser(
parents=[channel_args],
description="Install a given browser or webdriver frontend.")
parser.add_argument('browser', choices=['firefox', 'chrome', 'servo'],
help='name of web browser product')
parser.add_argument('component', choices=['browser', 'webdriver'],
help='name of component')
parser.add_argument('--download-only', action="store_true",
help="Download the selected component but don't install it")
parser.add_argument('--rename', action="store", default=None,
help="Filename, excluding extension for downloaded archive "
"(only with --download-only)")
parser.add_argument('-d', '--destination',
help='filesystem directory to place the component')
return parser
def get_channel(browser, channel):
channel = channel_by_name[channel]
if isinstance(channel, dict):
channel = channel.get(browser)
return channel
def run(venv, **kwargs):
import logging
logger = logging.getLogger("install")
browser = kwargs["browser"]
destination = kwargs["destination"]
channel = get_channel(browser, kwargs["channel"])
if channel != kwargs["channel"]:
logger.info("Interpreting channel '%s' as '%s'", kwargs["channel"], channel)
if destination is None:
if venv:
if kwargs["component"] == "browser":
destination = venv.path
else:
destination = venv.bin_path
else:
raise argparse.ArgumentError(None,
"No --destination argument, and no default for the environment")
install(browser, kwargs["component"], destination, channel, logger=logger,
download_only=kwargs["download_only"], rename=kwargs["rename"])
def install(name, component, destination, channel="nightly", logger=None, download_only=False,
rename=None):
if logger is None:
import logging
logger = logging.getLogger("install")
prefix = "download" if download_only else "install"
suffix = "_webdriver" if component == 'webdriver' else ""
method = prefix + suffix
browser_cls = getattr(browser, name.title())
logger.info('Now installing %s %s...', name, component)
kwargs = {}
if download_only and rename:
kwargs["rename"] = rename
path = getattr(browser_cls(logger), method)(dest=destination, channel=channel, **kwargs)
if path:
logger.info('Binary %s as %s', "downloaded" if download_only else "installed", path)
|
Get the most updated Cisco 646-365 Braindumps with the correct answers here! that you will not find products of such quality anywhere in the market. Save extra money by getting 3-Months of free updates after purchasing the 646-365 braindump questions. Guaranteed pass with secure purchase and 24/7 professional service.
Everything you need to pass your Cisco 646-365 exam!
Most updated 646-365 dumps providing you the best possible training material.
Certification vendors continuously bring changes to the Cisco Express Foundation for Account Managers (CXFA) to make things different for the candidates and to secure their Cisco 646-365 exam content. It is very important that you study the most updated 646-365 dumps to ensure success. SureBraindumps offers this feature and without any extra charges it gives you the most updated 646-365 braindumps answers available in the market.
PDF version will allow you to study the exam material at ease, while testing engine will help you to simulate the real exam scenarios that will help you to boost your confidence before the exam by giving you the exact idea. Now its all up to you whether you want to spend your precious time and money on expensive 646-365 training courses or get the one-time package by spending the small fortune. Moreover, our 646-365 exam material comes with 100% pass guarantee with money back in case of failure.
Our Cisco 646-365 test dumps are produced by certified Cisco professionals to provide you with best quality material. Forget about erroneous Cisco Cisco Express Foundation for Account Managers (CXFA) dumps with out-dated answers and get supreme quality with SureBraindumps.
Every professional wants to be at the top in their organization. However, with the consecutive updates in the Cisco 646-365 certification, it is harder than ever to keep yourself updated. If you want to get your 646-365 certification in a shortest possible time. SureBraindumps offers you the solution in the form of 646-365 brain dumps. You will be able to get most updated Cisco 646-365 exam questions in the form of PDF and testing engine.
In which format you provide the 646-365 dumps?
What is the Quality of your 646-365 brain dumps?
System requirements for installing 646-365 software?
Our previous record shows that you will have only less then 3% chances that you will fail your 646-365 exam. Over the period of 10 years, we have 97% success rate with our training material. Keeping in view we offer 100% pass guarantee with money back in case of failure. For money back guaranteed is valid only 30 days after the purchase. Purchases made more than 30 days will not be entertained. Customers can contact to claim the guarantee at [email protected] Moreover refund result should be submitted within 7 days after you receive the official result.
When it comes to exam dumps, you want to make sure that you get the most updated questions and answers. We at surebraindumps absolutely ensure you, that you will get all the needed Cisco Cisco Express Foundation for Account Managers (CXFA) (646-365) course material to pass your certification exam. You don't have to spend your precious time and money on worthless online training programs. What you need is an all-in-one solution that is geared to get students trained and certified within the shortesttime period. Whether you are going to get a 646-365 certification or getting re-certified, surebriandumps can help you to achieve your goal in a shortest possible time.
The 646-365 braindumps pdf is edited by industry experts providing you the best quality dumps which you won't be able to find anywhereelse in the market. 646-365 Braindumps are delivered in world class testing software and have all the features necessary for you to meet your exam testing needs. Our 646-365 brain dumps comes with no pass no pay guarantee. This claim is confirmed by thousands of successful candidates. Ever Since 2005, there have been hardly any Cisco Express Foundation for Account Managers (CXFA) 646-365 braindumps providers in the market that offers this kind of guarantee, moreover we have an experienced team to ascertain our and most importantly your success.
Practice is the most important aspect for preparing 646-365 exams. This will give you huge advantage before taking exams and also you will get the idea of which type of questions normally asked in the exams. When you prepare your exam with original training material it will give you a very good chance to pass your exam in first attempt. Consult with our experts if you confuse in finding the genuine material for preparing exam. |
#!/usr/bin/env python
from __future__ import division
import time
import os.path
import h5py
import atexit
import trajoptpy
import numpy as np
from lfd.environment import sim_util
from lfd.environment import settings
from constants import MAX_ACTIONS_TO_TRY
from lfd.demonstration.demonstration import SceneState, GroundTruthRopeSceneState, AugmentedTrajectory, Demonstration
from lfd.environment.simulation import DynamicRopeSimulationRobotWorld
from lfd.environment.simulation_object import XmlSimulationObject, BoxSimulationObject, CylinderSimulationObject, RopeSimulationObject
from lfd.environment.environment import LfdEnvironment, GroundTruthRopeLfdEnvironment
from lfd.registration.registration import TpsRpmBijRegistrationFactory, TpsRpmRegistrationFactory, TpsSegmentRegistrationFactory, BatchGpuTpsRpmBijRegistrationFactory, BatchGpuTpsRpmRegistrationFactory
from lfd.transfer.transfer import PoseTrajectoryTransferer, FingerTrajectoryTransferer
from lfd.transfer.registration_transfer import TwoStepRegistrationAndTrajectoryTransferer, UnifiedRegistrationAndTrajectoryTransferer
from lfd.action_selection import GreedyActionSelection
from lfd.action_selection import FeatureActionSelection
from lfd.rapprentice import eval_util, util
from lfd.rapprentice import task_execution
from lfd.rapprentice.knot_classifier import isKnot as is_knot
from lfd.rapprentice.util import redprint, yellowprint
class GlobalVars:
exec_log = None
actions = None
actions_cache = None
demos = None
features = None
def eval_on_holdout(args, action_selection, reg_and_traj_transferer, lfd_env, sim):
"""TODO
Args:
action_selection: ActionSelection
reg_and_traj_transferer: RegistrationAndTrajectoryTransferer
lfd_env: LfdEnvironment
sim: DynamicSimulation
"""
holdoutfile = h5py.File(args.eval.holdoutfile, 'r')
holdout_items = eval_util.get_indexed_items(holdoutfile, task_list=args.tasks, task_file=args.taskfile, i_start=args.i_start, i_end=args.i_end)
rope_params = sim_util.RopeParams()
if args.eval.rope_param_radius is not None:
rope_params.radius = args.eval.rope_param_radius
if args.eval.rope_param_angStiffness is not None:
rope_params.angStiffness = args.eval.rope_param_angStiffness
num_successes = 0
num_total = 0
for i_task, demo_id_rope_nodes in holdout_items:
redprint("task %s" % i_task)
init_rope_nodes = demo_id_rope_nodes["rope_nodes"][:]
rope = RopeSimulationObject("rope", init_rope_nodes, rope_params)
sim.add_objects([rope])
sim.settle(step_viewer=args.animation)
for i_step in range(args.eval.num_steps):
redprint("task %s step %i" % (i_task, i_step))
sim_util.reset_arms_to_side(sim)
if args.animation:
sim.viewer.Step()
sim_state = sim.get_state()
sim.set_state(sim_state)
scene_state = lfd_env.observe_scene()
# plot cloud of the test scene
handles = []
if args.plotting:
handles.append(sim.env.plot3(scene_state.cloud[:,:3], 2, scene_state.color if scene_state.color is not None else (0,0,1)))
sim.viewer.Step()
eval_stats = eval_util.EvalStats()
start_time = time.time()
if len(scene_state.cloud) == 0:
redprint("Detected 0 points in scene")
break
try:
(agenda, q_values_root), goal_found = action_selection.plan_agenda(scene_state, i_step)
except ValueError: #e.g. if cloud is empty - any action is hopeless
redprint("**Raised Value Error during action selection")
break
eval_stats.action_elapsed_time += time.time() - start_time
eval_stats.generalized = True
num_actions_to_try = MAX_ACTIONS_TO_TRY if args.eval.search_until_feasible else 1
for i_choice in range(num_actions_to_try):
if q_values_root[i_choice] == -np.inf: # none of the demonstrations generalize
eval_stats.generalized = False
break
redprint("TRYING %s"%agenda[i_choice])
best_root_action = str(agenda[i_choice])
start_time = time.time()
try:
test_aug_traj = reg_and_traj_transferer.transfer(GlobalVars.demos[best_root_action], scene_state, plotting=args.plotting)
except ValueError: # If something is cloud/traj is empty or something
redprint("**Raised value error during traj transfer")
break
eval_stats.feasible, eval_stats.misgrasp = lfd_env.execute_augmented_trajectory(test_aug_traj, step_viewer=args.animation, interactive=args.interactive, check_feasible=args.eval.check_feasible)
eval_stats.exec_elapsed_time += time.time() - start_time
if not args.eval.check_feasible or eval_stats.feasible: # try next action if TrajOpt cannot find feasible action and we care about feasibility
break
else:
sim.set_state(sim_state)
knot = is_knot(rope.rope.GetControlPoints())
results = {'scene_state':scene_state, 'best_action':best_root_action, 'values':q_values_root, 'aug_traj':test_aug_traj, 'eval_stats':eval_stats, 'sim_state':sim_state, 'knot':knot, 'goal_found': goal_found}
eval_util.save_task_results_step(args.resultfile, i_task, i_step, results)
if not eval_stats.generalized:
assert not knot
break
if args.eval.check_feasible and not eval_stats.feasible:
# Skip to next knot tie if the action is infeasible -- since
# that means all future steps (up to 5) will have infeasible trajectories
assert not knot
break
if knot:
num_successes += 1
break;
sim.remove_objects([rope])
num_total += 1
redprint('Eval Successes / Total: ' + str(num_successes) + '/' + str(num_total))
redprint('Success Rate: ' + str(float(num_successes)/num_total))
def eval_on_holdout_parallel(args, action_selection, reg_and_traj_transferer, lfd_env, sim):
raise NotImplementedError
# holdoutfile = h5py.File(args.eval.holdoutfile, 'r')
# holdout_items = eval_util.get_indexed_items(holdoutfile, task_list=args.tasks, task_file=args.taskfile, i_start=args.i_start, i_end=args.i_end)
#
# rope_params = sim_util.RopeParams()
# if args.eval.rope_param_radius is not None:
# rope_params.radius = args.eval.rope_param_radius
# if args.eval.rope_param_angStiffness is not None:
# rope_params.angStiffness = args.eval.rope_param_angStiffness
#
# batch_transfer_simulate = BatchTransferSimulate(transfer, lfd_env)
#
# states = {}
# q_values_roots = {}
# best_root_actions = {}
# state_id2i_task = {}
# results = {}
# successes = {}
# for i_step in range(args.eval.num_steps):
# for i_task, demo_id_rope_nodes in holdout_items:
# if i_task in successes:
# # task already finished
# continue
#
# redprint("task %s step %i" % (i_task, i_step))
#
# if i_step == 0:
# sim_util.reset_arms_to_side(lfd_env)
#
# init_rope_nodes = demo_id_rope_nodes["rope_nodes"][:]
# lfd_env.set_rope_state(RopeState(init_rope_nodes, rope_params))
# states[i_task] = {}
# states[i_task][i_step] = lfd_env.observe_scene(**vars(args.eval))
# best_root_actions[i_task] = {}
# q_values_roots[i_task] = {}
# results[i_task] = {}
#
# if args.animation:
# lfd_env.viewer.Step()
#
# state = states[i_task][i_step]
#
# num_actions_to_try = MAX_ACTIONS_TO_TRY if args.eval.search_until_feasible else 1
#
# agenda, q_values_root = select_best(args.eval, state, batch_transfer_simulate) # TODO fix select_best to handle batch_transfer_simulate
# q_values_roots[i_task][i_step] = q_values_root
#
# i_choice = 0
# if q_values_root[i_choice] == -np.inf: # none of the demonstrations generalize
# successes[i_task] = False
# continue
#
# best_root_action = agenda[i_choice]
# best_root_actions[i_task][i_step] = best_root_action
#
# next_state_id = SceneState.get_unique_id()
# batch_transfer_simulate.queue_transfer_simulate(state, best_root_action, next_state_id)
#
# state_id2i_task[next_state_id] = i_task
#
# batch_transfer_simulate.wait_while_queue_is_nonempty()
# for result in batch_transfer_simulate.get_results():
# i_task = state_id2i_task[result.state.id]
# results[i_task][i_step] = result
#
# for i_task, demo_id_rope_nodes in holdout_items:
# if i_task in successes:
# # task already finished
# continue
#
# result = results[i_task][i_step]
# eval_stats = eval_util.EvalStats()
# eval_stats.success, eval_stats.feasible, eval_stats.misgrasp, full_trajs, next_state = result.success, result.feasible, result.misgrasp, result.full_trajs, result.state
# # TODO eval_stats.exec_elapsed_time
#
# if not eval_stats.feasible: # If not feasible, restore state
# next_state = states[i_task][i_step]
#
# state = states[i_task][i_step]
# best_root_action = best_root_actions[i_task][i_step]
# q_values_root = q_values_roots[i_task][i_step]
# eval_util.save_task_results_step(args.resultfile, i_task, i_step, state, best_root_action, q_values_root, full_trajs, next_state, eval_stats, new_cloud_ds=state.cloud, new_rope_nodes=state.rope_nodes)
#
# states[i_task][i_step+1] = next_state
#
# if not eval_stats.feasible:
# successes[i_task] = False
# # Skip to next knot tie if the action is infeasible -- since
# # that means all future steps (up to 5) will have infeasible trajectories
# continue
#
# if is_knot(next_state.rope_nodes):
# successes[i_task] = True
# continue
#
# if i_step == args.eval.num_steps - 1:
# for i_task, demo_id_rope_nodes in holdout_items:
# if i_task not in successes:
# # task ran out of steps
# successes[i_task] = False
#
# num_successes = np.sum(successes.values())
# num_total = len(successes)
# redprint('Eval Successes / Total: ' + str(num_successes) + '/' + str(num_total))
def replay_on_holdout(args, action_selection, reg_and_traj_transferer, lfd_env, sim):
loadresultfile = h5py.File(args.replay.loadresultfile, 'r')
loadresult_items = eval_util.get_indexed_items(loadresultfile, task_list=args.tasks, task_file=args.taskfile, i_start=args.i_start, i_end=args.i_end)
num_successes = 0
num_total = 0
for i_task, task_info in loadresult_items:
redprint("task %s" % i_task)
for i_step in range(len(task_info)):
redprint("task %s step %i" % (i_task, i_step))
replay_results = eval_util.load_task_results_step(args.replay.loadresultfile, i_task, i_step)
sim_state = replay_results['sim_state']
if i_step > 0: # sanity check for reproducibility
sim_util.reset_arms_to_side(sim)
if sim.simulation_state_equal(sim_state, sim.get_state()):
yellowprint("Reproducible results OK")
else:
yellowprint("The replayed simulation state doesn't match the one from the result file")
sim.set_state(sim_state)
if args.replay.simulate_traj_steps is not None and i_step not in args.replay.simulate_traj_steps:
continue
if i_step in args.replay.compute_traj_steps: # compute the trajectory in this step
best_root_action = replay_results['best_action']
scene_state = replay_results['scene_state']
# plot cloud of the test scene
handles = []
if args.plotting:
handles.append(sim.env.plot3(scene_state.cloud[:,:3], 2, scene_state.color if scene_state.color is not None else (0,0,1)))
sim.viewer.Step()
test_aug_traj = reg_and_traj_transferer.transfer(GlobalVars.demos[best_root_action], scene_state, plotting=args.plotting)
else:
test_aug_traj = replay_results['aug_traj']
feasible, misgrasp = lfd_env.execute_augmented_trajectory(test_aug_traj, step_viewer=args.animation, interactive=args.interactive, check_feasible=args.eval.check_feasible)
if replay_results['knot']:
num_successes += 1
num_total += 1
redprint('REPLAY Successes / Total: ' + str(num_successes) + '/' + str(num_total))
def parse_input_args():
parser = util.ArgumentParser()
parser.add_argument("--animation", type=int, default=0, help="animates if it is non-zero. the viewer is stepped according to this number")
parser.add_argument("--plotting", type=int, default=1, help="plots if animation != 0 and plotting != 0")
parser.add_argument("--interactive", action="store_true", help="step animation and optimization if specified")
parser.add_argument("--resultfile", type=str, help="no results are saved if this is not specified")
# selects tasks to evaluate/replay
parser.add_argument("--tasks", type=int, nargs='*', metavar="i_task")
parser.add_argument("--taskfile", type=str)
parser.add_argument("--i_start", type=int, default=-1, metavar="i_task")
parser.add_argument("--i_end", type=int, default=-1, metavar="i_task")
parser.add_argument("--camera_matrix_file", type=str, default='../.camera_matrix.txt')
parser.add_argument("--window_prop_file", type=str, default='../.win_prop.txt')
parser.add_argument("--random_seed", type=int, default=None)
parser.add_argument("--log", type=str, default="")
subparsers = parser.add_subparsers(dest='subparser_name')
# arguments for eval
parser_eval = subparsers.add_parser('eval')
parser_eval.add_argument('actionfile', type=str, nargs='?', default='../bigdata/misc/overhand_actions.h5')
parser_eval.add_argument('holdoutfile', type=str, nargs='?', default='../bigdata/misc/holdout_set_Jun20_0.10.h5')
parser.add_argument("--landmarkfile", type=str, default='../data/misc/landmarks.h5')
parser_eval.add_argument('action_selection', type=str, nargs='?', choices=['greedy', 'feature'])
parser_eval.add_argument('--weightfile', type=str, default='')
parser_eval.add_argument('--feature_type', type=str, nargs='?', choices=['base', 'mul', 'mul_quad', 'mul_quad_ind', 'mul_quad_bendind', 'mul_quad_mapind', 'mul_s', 'mul_grip', 'mul_s_map', 'landmark', 'timestep'], default='base')
parser_eval.add_argument("transferopt", type=str, nargs='?', choices=['pose', 'finger'], default='finger')
parser_eval.add_argument("reg_type", type=str, choices=['segment', 'rpm', 'bij'], default='bij')
parser_eval.add_argument("--unified", type=int, default=0)
parser_eval.add_argument("--obstacles", type=str, nargs='*', choices=['bookshelve', 'boxes', 'cylinders'], default=[])
parser_eval.add_argument("--downsample", type=int, default=1)
parser_eval.add_argument("--downsample_size", type=float, default=0.025)
parser_eval.add_argument("--upsample", type=int, default=0)
parser_eval.add_argument("--upsample_rad", type=int, default=1, help="upsample_rad > 1 incompatible with downsample != 0")
parser_eval.add_argument("--ground_truth", type=int, default=0)
parser_eval.add_argument("--fake_data_segment",type=str, default='demo1-seg00')
parser_eval.add_argument("--fake_data_transform", type=float, nargs=6, metavar=("tx","ty","tz","rx","ry","rz"),
default=[0,0,0,0,0,0], help="translation=(tx,ty,tz), axis-angle rotation=(rx,ry,rz)")
parser_eval.add_argument("--search_until_feasible", action="store_true")
parser_eval.add_argument("--check_feasible", type=int, default=0)
parser_eval.add_argument("--width", type=int, default=1)
parser_eval.add_argument("--depth", type=int, default=0)
parser_eval.add_argument("--alpha", type=float, default=1000000.0)
parser_eval.add_argument("--beta_pos", type=float, default=1000000.0)
parser_eval.add_argument("--beta_rot", type=float, default=100.0)
parser_eval.add_argument("--gamma", type=float, default=1000.0)
parser_eval.add_argument("--use_collision_cost", type=int, default=1)
parser_eval.add_argument("--num_steps", type=int, default=5, help="maximum number of steps to simulate each task")
parser_eval.add_argument("--dof_limits_factor", type=float, default=1.0)
parser_eval.add_argument("--rope_param_radius", type=str, default=None)
parser_eval.add_argument("--rope_param_angStiffness", type=str, default=None)
parser_eval.add_argument("--use_color", type=int, default=0)
parser_eval.add_argument("--parallel", action="store_true")
parser_eval.add_argument("--batch", action="store_true", default=False)
parser_replay = subparsers.add_parser('replay')
parser_replay.add_argument("loadresultfile", type=str)
parser_replay.add_argument("--compute_traj_steps", type=int, default=[], nargs='*', metavar='i_step', help="recompute trajectories for the i_step of all tasks")
parser_replay.add_argument("--simulate_traj_steps", type=int, default=None, nargs='*', metavar='i_step',
help="if specified, restore the rope state from file and then simulate for the i_step of all tasks")
# if not specified, the rope state is not restored from file, but it is as given by the sequential simulation
args = parser.parse_args()
if not args.animation:
args.plotting = 0
return args
def setup_log_file(args):
if args.log:
redprint("Writing log to file %s" % args.log)
GlobalVars.exec_log = task_execution.ExecutionLog(args.log)
atexit.register(GlobalVars.exec_log.close)
GlobalVars.exec_log(0, "main.args", args)
def set_global_vars(args):
if args.random_seed is not None: np.random.seed(args.random_seed)
GlobalVars.actions = h5py.File(args.eval.actionfile, 'r')
actions_root, actions_ext = os.path.splitext(args.eval.actionfile)
GlobalVars.actions_cache = h5py.File(actions_root + '.cache' + actions_ext, 'a')
GlobalVars.demos = {}
for action, seg_info in GlobalVars.actions.iteritems():
if args.eval.ground_truth:
rope_nodes = seg_info['rope_nodes'][()]
scene_state = GroundTruthRopeSceneState(rope_nodes, settings.ROPE_RADIUS, upsample=args.eval.upsample, upsample_rad=args.eval.upsample_rad, downsample_size=args.eval.downsample_size)
else:
full_cloud = seg_info['cloud_xyz'][()]
scene_state = SceneState(full_cloud, downsample_size=args.eval.downsample_size)
lr2arm_traj = {}
lr2finger_traj = {}
lr2ee_traj = {}
lr2open_finger_traj = {}
lr2close_finger_traj = {}
for lr in 'lr':
arm_name = {"l":"leftarm", "r":"rightarm"}[lr]
lr2arm_traj[lr] = np.asarray(seg_info[arm_name])
lr2finger_traj[lr] = sim_util.gripper_joint2gripper_l_finger_joint_values(np.asarray(seg_info['%s_gripper_joint'%lr]))[:,None]
lr2ee_traj[lr] = np.asarray(seg_info["%s_gripper_tool_frame"%lr]['hmat'])
lr2open_finger_traj[lr] = np.zeros(len(lr2finger_traj[lr]), dtype=bool)
lr2close_finger_traj[lr] = np.zeros(len(lr2finger_traj[lr]), dtype=bool)
opening_inds, closing_inds = sim_util.get_opening_closing_inds(lr2finger_traj[lr])
# # opening_inds/closing_inds are indices before the opening/closing happens, so increment those indices (if they are not out of bound)
# opening_inds = np.clip(opening_inds+1, 0, len(lr2finger_traj[lr])-1) # TODO figure out if +1 is necessary
# closing_inds = np.clip(closing_inds+1, 0, len(lr2finger_traj[lr])-1)
lr2open_finger_traj[lr][opening_inds] = True
lr2close_finger_traj[lr][closing_inds] = True
aug_traj = AugmentedTrajectory(lr2arm_traj=lr2arm_traj, lr2finger_traj=lr2finger_traj, lr2ee_traj=lr2ee_traj, lr2open_finger_traj=lr2open_finger_traj, lr2close_finger_traj=lr2close_finger_traj)
demo = Demonstration(action, scene_state, aug_traj)
GlobalVars.demos[action] = demo
def setup_lfd_environment_sim(args):
actions = h5py.File(args.eval.actionfile, 'r')
init_rope_xyz, init_joint_names, init_joint_values = sim_util.load_fake_data_segment(actions, args.eval.fake_data_segment, args.eval.fake_data_transform)
table_height = init_rope_xyz[:,2].mean() - .02
sim_objs = []
sim_objs.append(XmlSimulationObject("robots/pr2-beta-static.zae", dynamic=False))
sim_objs.append(BoxSimulationObject("table", [1, 0, table_height + (-.1 + .01)], [.85, .85, .1], dynamic=False))
if 'bookshelve' in args.eval.obstacles:
sim_objs.append(XmlSimulationObject("../data/bookshelve.env.xml", dynamic=False))
if 'boxes' in args.eval.obstacles:
sim_objs.append(BoxSimulationObject("box0", [.7,.43,table_height+(.01+.12)], [.12,.12,.12], dynamic=False))
sim_objs.append(BoxSimulationObject("box1", [.74,.47,table_height+(.01+.12*2+.08)], [.08,.08,.08], dynamic=False))
if 'cylinders' in args.eval.obstacles:
sim_objs.append(CylinderSimulationObject("cylinder0", [.7,.43,table_height+(.01+.5)], .12, 1., dynamic=False))
sim_objs.append(CylinderSimulationObject("cylinder1", [.7,-.43,table_height+(.01+.5)], .12, 1., dynamic=False))
sim_objs.append(CylinderSimulationObject("cylinder2", [.4,.2,table_height+(.01+.65)], .06, .5, dynamic=False))
sim_objs.append(CylinderSimulationObject("cylinder3", [.4,-.2,table_height+(.01+.65)], .06, .5, dynamic=False))
sim = DynamicRopeSimulationRobotWorld()
world = sim
sim.add_objects(sim_objs)
if args.eval.ground_truth:
lfd_env = GroundTruthRopeLfdEnvironment(sim, world, upsample=args.eval.upsample, upsample_rad=args.eval.upsample_rad, downsample_size=args.eval.downsample_size)
else:
lfd_env = LfdEnvironment(sim, world, downsample_size=args.eval.downsample_size)
dof_inds = sim_util.dof_inds_from_name(sim.robot, '+'.join(init_joint_names))
values, dof_inds = zip(*[(value, dof_ind) for value, dof_ind in zip(init_joint_values, dof_inds) if dof_ind != -1])
sim.robot.SetDOFValues(values, dof_inds) # this also sets the torso (torso_lift_joint) to the height in the data
sim_util.reset_arms_to_side(sim)
if args.animation:
viewer = trajoptpy.GetViewer(sim.env)
if os.path.isfile(args.window_prop_file) and os.path.isfile(args.camera_matrix_file):
print "loading window and camera properties"
window_prop = np.loadtxt(args.window_prop_file)
camera_matrix = np.loadtxt(args.camera_matrix_file)
try:
viewer.SetWindowProp(*window_prop)
viewer.SetCameraManipulatorMatrix(camera_matrix)
except:
print "SetWindowProp and SetCameraManipulatorMatrix are not defined. Pull and recompile Trajopt."
else:
print "move viewer to viewpoint that isn't stupid"
print "then hit 'p' to continue"
viewer.Idle()
print "saving window and camera properties"
try:
window_prop = viewer.GetWindowProp()
camera_matrix = viewer.GetCameraManipulatorMatrix()
np.savetxt(args.window_prop_file, window_prop, fmt='%d')
np.savetxt(args.camera_matrix_file, camera_matrix)
except:
print "GetWindowProp and GetCameraManipulatorMatrix are not defined. Pull and recompile Trajopt."
viewer.Step()
if args.eval.dof_limits_factor != 1.0:
assert 0 < args.eval.dof_limits_factor and args.eval.dof_limits_factor <= 1.0
active_dof_indices = sim.robot.GetActiveDOFIndices()
active_dof_limits = sim.robot.GetActiveDOFLimits()
for lr in 'lr':
manip_name = {"l":"leftarm", "r":"rightarm"}[lr]
dof_inds = sim.robot.GetManipulator(manip_name).GetArmIndices()
limits = np.asarray(sim.robot.GetDOFLimits(dof_inds))
limits_mean = limits.mean(axis=0)
limits_width = np.diff(limits, axis=0)
new_limits = limits_mean + args.eval.dof_limits_factor * np.r_[-limits_width/2.0, limits_width/2.0]
for i, ind in enumerate(dof_inds):
active_dof_limits[0][active_dof_indices.tolist().index(ind)] = new_limits[0,i]
active_dof_limits[1][active_dof_indices.tolist().index(ind)] = new_limits[1,i]
sim.robot.SetDOFLimits(active_dof_limits[0], active_dof_limits[1])
return lfd_env, sim
def setup_registration_and_trajectory_transferer(args, sim):
if args.eval.batch:
if args.eval.reg_type == 'rpm':
reg_factory = BatchGpuTpsRpmRegistrationFactory(GlobalVars.demos, args.eval.actionfile)
elif args.eval.reg_type == 'bij':
reg_factory = BatchGpuTpsRpmBijRegistrationFactory(GlobalVars.demos, args.eval.actionfile)
else:
raise RuntimeError("Invalid reg_type option %s"%args.eval.reg_type)
else:
if args.eval.reg_type == 'segment':
reg_factory = TpsSegmentRegistrationFactory(GlobalVars.demos)
elif args.eval.reg_type == 'rpm':
reg_factory = TpsRpmRegistrationFactory(GlobalVars.demos)
elif args.eval.reg_type == 'bij':
reg_factory = TpsRpmBijRegistrationFactory(GlobalVars.demos, actionfile=args.eval.actionfile)
else:
raise RuntimeError("Invalid reg_type option %s"%args.eval.reg_type)
if args.eval.transferopt == 'pose' or args.eval.transferopt == 'finger':
traj_transferer = PoseTrajectoryTransferer(sim, args.eval.beta_pos, args.eval.beta_rot, args.eval.gamma, args.eval.use_collision_cost)
if args.eval.transferopt == 'finger':
traj_transferer = FingerTrajectoryTransferer(sim, args.eval.beta_pos, args.eval.gamma, args.eval.use_collision_cost, init_trajectory_transferer=traj_transferer)
else:
raise RuntimeError("Invalid transferopt option %s"%args.eval.transferopt)
if args.eval.unified:
reg_and_traj_transferer = UnifiedRegistrationAndTrajectoryTransferer(reg_factory, traj_transferer)
else:
reg_and_traj_transferer = TwoStepRegistrationAndTrajectoryTransferer(reg_factory, traj_transferer)
return reg_and_traj_transferer
def get_features(args):
feat_type = args.eval.feature_type
if feat_type== 'base':
from lfd.mmqe.features import BatchRCFeats as feat
elif feat_type == 'mul':
from lfd.mmqe.features import MulFeats as feat
elif feat_type == 'mul_quad':
from lfd.mmqe.features import QuadSimpleMulFeats as feat
elif feat_type == 'mul_quad_ind':
from lfd.mmqe.features import QuadSimpleMulIndFeats as feat
elif feat_type == 'mul_quad_mapind':
from lfd.mmqe.features import QuadSimpleMulMapIndFeats as feat
elif feat_type == 'mul_quad_bendind':
from lfd.mmqe.features import QuadSimpleMulBendIndFeats as feat
elif feat_type == 'mul_s':
from lfd.mmqe.features import SimpleMulFeats as feat
elif feat_type == 'mul_grip':
from lfd.mmqe.features import SimpleMulGripperFeats as feat
elif feat_type == 'mul_s_map':
from lfd.mmqe.features import SimpleMulMapIndFeats as feat
elif feat_type == 'landmark':
from lfd.mmqe.features import LandmarkFeats as feat
elif feat_type == 'timestep':
from lfd.mmqe.features import TimestepActionMulFeats as feat
else:
raise ValueError('Incorrect Feature Type')
feats = feat(args.eval.actionfile)
try:
feats.set_landmark_file(args.landmarkfile)
except AttributeError:
pass
if args.eval.weightfile:
feats.load_weights(args.eval.weightfile)
GlobalVars.features = feats
return feats
def main():
args = parse_input_args()
if args.subparser_name == "eval":
eval_util.save_results_args(args.resultfile, args)
elif args.subparser_name == "replay":
loaded_args = eval_util.load_results_args(args.replay.loadresultfile)
assert 'eval' not in vars(args)
args.eval = loaded_args.eval
else:
raise RuntimeError("Invalid subparser name")
setup_log_file(args)
set_global_vars(args)
trajoptpy.SetInteractive(args.interactive)
lfd_env, sim = setup_lfd_environment_sim(args)
reg_and_traj_transferer = setup_registration_and_trajectory_transferer(args, sim)
if args.eval.action_selection == 'feature':
get_features(args)
if args.eval.action_selection == 'greedy':
action_selection = GreedyActionSelection(reg_and_traj_transferer.registration_factory)
else:
action_selection = FeatureActionSelection(reg_and_traj_transferer.registration_factory, GlobalVars.features, GlobalVars.actions, GlobalVars.demos, simulator=reg_and_traj_transferer, lfd_env=lfd_env, width=args.eval.width, depth=args.eval.depth)
if args.subparser_name == "eval":
start = time.time()
if args.eval.parallel:
eval_on_holdout_parallel(args, action_selection, reg_and_traj_transferer, lfd_env, sim)
else:
eval_on_holdout(args, action_selection, reg_and_traj_transferer, lfd_env, sim)
print "eval time is:\t{}".format(time.time() - start)
elif args.subparser_name == "replay":
replay_on_holdout(args, action_selection, reg_and_traj_transferer, lfd_env, sim)
else:
raise RuntimeError("Invalid subparser name")
if __name__ == "__main__":
main()
|
Welcome! Mariya Club is an international dating service designed to help singles find their perfect match and marry Ukrainian or Russian Women.
Official Website of Divya Bharti : The Exclusive Archive of Divya Bharti, Hindi Films, Wallpapers, Galleries, News and Death Controversy. |
'''Arsenal API ENC for puppet.'''
# Copyright 2015 CityGrid Media, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
from pyramid.view import view_config
from sqlalchemy.orm.exc import NoResultFound
from arsenalweb.models.common import (
DBSession,
)
from arsenalweb.models.nodes import (
Node,
)
from arsenalweb.views.api.common import (
api_200,
api_400,
api_500,
api_501,
)
from arsenalweb.views.api.data_centers import (
find_data_center_by_id,
)
LOG = logging.getLogger(__name__)
def find_node_by_name_and_status(settings, node_name):
'''Find a node by name, filtered by statuses'''
try:
status_ids = [s for s in settings['arsenal.enc.status_ids'].splitlines() if s]
except KeyError as ex:
msg = 'You must define arsenal.enc.status_ids in the main settings file to ' \
'enable the enc.'
LOG.error(msg)
raise type(ex)(ex.message + ' {0}'.format(msg))
node = DBSession.query(Node)
node = node.filter(Node.status_id.in_(status_ids))
node = node.filter(Node.name == node_name)
return node.one()
def process_tags(tags, tag_type):
'''Processes tags. If the value is 'True' or 'False', converts it to a
boolean. Otherwise returns as-is (what about integers?).'''
results = {}
for tag in tags:
LOG.debug('{0} tag: {1}={2}'.format(tag_type, tag.name, tag.value))
if tag.value == 'True':
results[tag.name] = bool(tag.value)
elif tag.value == 'False':
results[tag.name] = bool('')
else:
try:
my_value = tag.value
my_value = int(my_value)
except ValueError:
pass
results[tag.name] = my_value
return results
def process_node_enc(settings, node_name, param_sources=False):
'''Process enc for node. Merges tags from the following three
objects in order from least to most specific:
node_group
data_center
node
Multiple node groups are sorted and take priority..?'''
results = {}
results['classes'] = []
results['parameters'] = {}
results['status'] = {
'name': None,
}
if param_sources:
results['param_sources'] = {}
try:
node = find_node_by_name_and_status(settings, node_name)
results['name'] = node.name
results['id'] = node.id
results['status'] = node.status
LOG.debug('node name is: {0}'.format(node.name))
LOG.debug('node datacenter is: {0}'.format(node.data_center_id))
# What happens when there's more than one node group? What tags
# win, alphabetic?
for node_group in node.node_groups:
LOG.debug('node_group: {0}'.format(node_group.name))
results['classes'].append(node_group.name)
my_tags = process_tags(node_group.tags, 'node_group')
results['parameters'].update(my_tags)
if param_sources:
for tag in my_tags:
results['param_sources'][tag] = 'node_group'
data_center = find_data_center_by_id(node.data_center_id)
my_tags = process_tags(data_center.tags, 'data_center')
results['parameters'].update(my_tags)
if param_sources:
for tag in my_tags:
results['param_sources'][tag] = 'data_center'
my_tags = process_tags(node.tags, 'node')
results['parameters'].update(my_tags)
if param_sources:
for tag in my_tags:
results['param_sources'][tag] = 'node'
except NoResultFound:
LOG.debug('node not found: {0}'.format(node_name))
except (AttributeError, KeyError):
raise
return results
@view_config(route_name='api_enc', request_method='GET', renderer='json')
def api_enc(request):
'''External node classifier for puppet. Takes a required request parameter
'name', finds all node_groups associated witht he node, and all tags merged
based on the following hierarchy:
node_group
data_center
node
Optional request parameter 'param_sources' will add an additional key that
identifies what level of the hierarchy each tag comes from. Returns a
dict.'''
settings = request.registry.settings
try:
try:
name = request.params['name']
except KeyError as ex:
msg = "Bad Request. Parameter 'name' is required."
LOG.error(msg)
return api_400(msg=msg)
try:
param_sources = request.params['param_sources']
except KeyError:
param_sources = False
LOG.debug('Starting enc for node: {0}'.format(name))
try:
results = process_node_enc(settings, name, param_sources=param_sources)
except (AttributeError, KeyError) as ex:
return api_501(msg=repr(ex))
except Exception as ex:
msg = 'Error calling enc! Exception: {0}'.format(repr(ex))
LOG.error(msg)
return api_500(msg=msg)
return api_200(results=results)
|
If you have been following my blog for a while you are probably familiar with my income report posts. Even though I haven’t reached a point where my income reports can inspire anyone but I’ve been sharing it anyway as a way of documenting my journey as a full-time blogger and as a way to tell the newbie bloggers that nothing happens overnight, it happens, gradually, and it does happen. I barely made any money from my blog for around a year, I haven’t even started receiving PR samples when I left my job as a Senior Data Analyst but I knew I can do it. Now even though my income is not very huge, I’m still more than just satisfied that I’m making this money while doing something I love, rather than making a bigger amount doing something I dislike. And if you are a newbie blogger or a person with a creative soul who wants to do something different, I just want to tell you ‘believe in yourself’ because that’s what I did when I left my job.
So anyway, back to my income report. I’ve worked on various projects this August; however, I haven’t received half of the payment yet since most of my sponsors process the payment once or twice a month. If you have read my previous reports, you’ll know that I only include the transactions that’s already been done, and leave the pending payments for the next month.
I also haven’t invested on products as my current list of ‘number of products yet to be reviewed’ crosses 60 and I’m looking forward to invest on some hi end products the following months. On the other hand, it was my blog anniversary (that I couldn’t manage to celebrate) and I had to renew my hosting and domain so probably it’s a good thing that I haven’t invested on products this month.
As you can see, I’m not left with much this month but that’s okay, at least I could renew my domain and hosting with the money I made from my blog (unlike the previous years). |
"""Chord processing utilities.
A library of utility functions used throughout the Jazz Parser relating
to chord processing in the input.
"""
"""
============================== License ========================================
Copyright (C) 2008, 2010-12 University of Edinburgh, Mark Granroth-Wilding
This file is part of The Jazz Parser.
The Jazz Parser is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
The Jazz Parser is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with The Jazz Parser. If not, see <http://www.gnu.org/licenses/>.
============================ End license ======================================
"""
__author__ = "Mark Granroth-Wilding <[email protected]>"
import xml.dom.minidom
import re, copy
import logging
# Get the logger from the logging system
logger = logging.getLogger("main_logger")
# Conversions between Lilypond notes and their numeric representation
ly_note_to_int = {"c" : 0, "C" : 0,\
"d" : 2, "D" : 2,\
"e" : 4, "E" : 4,\
"f" : 5, "F" : 5,\
"g" : 7, "G" : 7,\
"a" : 9, "A" : 9,\
"b" : 11, "B" : 11,\
"r" : None }
ly_note_to_base_int = {"c" : 0, "C" : 0,\
"d" : 1, "D" : 1,\
"e" : 2, "E" : 2,\
"f" : 3, "F" : 3,\
"g" : 4, "G" : 4,\
"a" : 5, "A" : 5,\
"b" : 6, "B" : 6,\
"r" : None }
int_to_ly_note = { 0 : "c",\
1 : "cis",\
2 : "d",\
3 : "dis",\
4 : "e",\
5 : "f",\
6 : "fis",\
7 : "g",\
8 : "gis",\
9 : "a",\
10: "ais",\
11: "b",\
None: "r"}
int_to_note_name = { 0 : "C", \
1 : "Db", \
2 : "D", \
3 : "Eb", \
4 : "E", \
5 : "F", \
6 : "Gb", \
7 : "G", \
8 : "Ab", \
9 : "A", \
10: "Bb", \
11: "B" }
ROMAN_NUMERALS = { 0 : "I",
1 : "bII",
2 : "II",
3 : "bIII",
4 : "III",
5 : "IV",
6 : "#IV",
7 : "V",
8 : "bVI",
9 : "VI",
10 : "bVII",
11 : "VII" }
def chord_numeral_to_int(chord_numeral, strict=False):
"""
Given a chord numeral (e.g. "I" or "bVII"), returns the integer
that corresponds to this chord root.
Returns None if input is either a chord variable ("X", "Y") or
itself None.
If strict is set, doesn't allow variable names.
"""
if strict:
numerals = { "I" : 0,
"II" : 2,
"III" : 4,
"IV" : 5,
"V" : 7,
"VI" : 9,
"VII" : 11, }
root_pattern = re.compile(r'^([b|\#]?)(I{1,3}|I?V|VI{0,2})$')
else:
# Map roman numerals to numbers
numerals = { "I" : 0,
"II" : 2,
"III" : 4,
"IV" : 5,
"V" : 7,
"VI" : 9,
"VII" : 11,
"X" : None,
"Y" : None,
"Z" : None,
None : None }
# Use a regular expression to split the chord root into a
# its accidental and numeral.
root_pattern = re.compile(r'^([b|\#]?)(I{1,3}|I?V|VI{0,2}|X|Y|Z)$')
# Map accidentals to a numeric adjustment
accidentals = { "#" : 1, "" : 0, "b" : -1 }
result = root_pattern.search(chord_numeral)
if result is None:
raise ChordError, "The string '%s' cannot be parsed as a chord" % chord_numeral
result = result.groups()
accidental = result[0]
numeral = result[1]
# Map the root name to a number
if numeral not in numerals:
raise ChordError, "Chord numeral \"%s\" was not recognised." % numeral
chord_num = numerals[numeral]
# Adjust this number according to the accidental
if chord_num is not None:
if accidental not in accidentals:
raise ChordError, "Accidental \"%s\" was not recognised." \
% accidental
chord_num += accidentals[accidental]
return chord_num
def pitch_class_to_int(chord_numeral):
""" Like L{chord_numeral_to_int}, but for pitch class labels. """
pcs = { "C" : 0,
"D" : 2,
"E" : 4,
"F" : 5,
"G" : 7,
"A" : 9,
"B" : 11, }
root_pattern = re.compile(r'^([A-G])(b*|\#*)$')
result = root_pattern.search(chord_numeral)
if result is None:
raise ChordError, "The string '%s' cannot be parsed as a chord" % \
chord_numeral
pc_str,accidental_str = result.groups()
pc = pcs[pc_str]
# Adjust this number according to the accidentals
if accidental_str:
if accidental_str[0] == "#":
pc += len(accidental_str)
elif accidental_str[0] == "b":
pc -= len(accidental_str)
return pc % 12
def int_to_chord_numeral(chord_int):
"""
Given an internal integer representation of a chord root (i.e. a
note of the scale), returns the roman numeral as a string. This
will always use the same convention for #s and bs, so may not be
the same as the numeral that generated the note number.
The input numbers 0-11 correspond to I-VII in the scale. The input
need to be in this range. Outside it, numbers will be mapped into
this range by "% 12".
Returns "X" if input is None.
"""
if chord_int is None:
return "X"
# Take number mod 12, in case it's not in correct range
return ROMAN_NUMERALS[chord_int % 12]
def int_to_pitch_class(chord_int):
"""
Like L{int_to_chord_numeral}, but outputs a pitch class name instead of
roman numeral. Returns "X" if input is None.
"""
if chord_int is None:
return "X"
else:
# Take number mod 12, in case it's not in correct range
return int_to_note_name[chord_int % 12]
def generalise_chord_name(chord_name):
"""
The grammar generalises over chord names, using X to mean "any
roman numeral chord root". When a chord name comes as input to
the parser, say "IIm", we look up not "IIm", but "Xm".
Given any chord name, this function returns the generalised
chord name to look up in the grammar.
"""
from jazzparser.data import Chord
# Try building a chord from the chord name
chord = Chord.from_name(chord_name)
# Only interested in the tetrad type
return "X%s" % chord.tetrad_type
def interval_observation_from_chord_string_pair(chord1, chord2, type_mapping=None):
"""
Given two strings representing chords, produces a string representing
a chord observation of the form x-t, where x is the interval between
the chords (numeric) and t is the type of the first chord.
"""
from jazzparser.data import Chord
chord1 = Chord.from_name(chord1)
if chord2 is None:
interval = ""
else:
chord2 = Chord.from_name(chord2)
interval = "%d" % Chord.interval(chord1,chord2)
# Apply a mapping to the chord type if one was given
if type_mapping is not None:
ctype = type_mapping[chord1.type]
else:
ctype = chord1.type
return "%s-%s" % (interval, ctype)
class ChordError(Exception):
"""
Raised when there's a problem recognising or processing a chord.
"""
pass
|
Canadian French is the number one translation choice for American Companies exporting to Canada.
French language has existed in Canada from the seventh century. Today, there are around seven million people in Canada who speak Canadian French dialects as their primary language. These major populations are settled in the provinces that include: Quebec, Ontario, New Brunswick, Manitoba, Alberta and Saskatchewan. The province of Quebec has over 80% of its population who speak their specific Canadian French dialect, which is termed, Quebec French dialect. Alberta has the fewest people”about only 2% of the population”who speak one of the Canadian French dialects. After Quebec, New Brunswick is the other province with higher concentration of Francophones that has compelled both these provincial governments to award official status to the French language.
The Acadian French dialect is one of the old Canadian French dialects spoken by the French colonists who have settled in the Canadian provinces like New Brunswick, Nova Scotia, Prince Edward Island and Newfoundland. New Brunswick has the major Acadian population and the Acadian French dialect is one of the official languages of the province. One can also find this dialect spoken in few provinces of France that include: Anjou, Aunis, Maine and Saintonge.
This Canadian French dialect differs from the Standard French dialect in the terms of pronunciation and vocabulary. The Acadian French dialect has several old features of the earlier French language which were shred off during the Standardization of the language.
Some these features include addition of alveolar r, plural ending of verbs -ont' and the way words are pronounced. Due to these inclusions in the Acadian French dialect the French speakers of other Canadian French dialects find it difficult to understand this dialect. This Canadian French dialect shares several words with the Quebec French dialect. Also the US based Cajun French dialect is derived from the Acadian French dialect.
It is one of the old Canadian French dialects that is said to have first appeared in Canada during the seventeenth century. Today, some linguists assert that speakers of this dialect no longer exist, while others claim that there are over 10,000 speakers of this dialect still in the province and they are mainly senior citizens who know the dialect from their youth. The latest generations of these French speakers are unaware of the features of this old Canadian French dialect.
The Translation Company offers quality Canadian French translations. Contact us for a free quote on your documents. |
"""
A Digital Communications Synchronization
and PLLs Function Module
A collection of useful functions when studying PLLs
and synchronization and digital comm
Copyright (c) March 2017, Mark Wickert
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are those
of the authors and should not be interpreted as representing official policies,
either expressed or implied, of the FreeBSD Project.
"""
import numpy as np
from logging import getLogger
log = getLogger(__name__)
import warnings
def NDA_symb_sync(z,Ns,L,BnTs,zeta=0.707,I_ord=3):
"""
zz,e_tau = NDA_symb_sync(z,Ns,L,BnTs,zeta=0.707,I_ord=3)
z = complex baseband input signal at nominally Ns samples
per symbol
Ns = Nominal number of samples per symbol (Ts/T) in the symbol
tracking loop, often 4
BnTs = time bandwidth product of loop bandwidth and the symbol period,
thus the loop bandwidth as a fraction of the symbol rate.
zeta = loop damping factor
I_ord = interpolator order, 1, 2, or 3
e_tau = the timing error e(k) input to the loop filter
Kp = The phase detector gain in the symbol tracking loop; for the
NDA algoithm used here always 1
Mark Wickert July 2014
Motivated by code found in M. Rice, Digital Communications A Discrete-Time
Approach, Prentice Hall, New Jersey, 2009. (ISBN 978-0-13-030497-1).
"""
# Loop filter parameters
K0 = -1.0 # The modulo 1 counter counts down so a sign change in loop
Kp = 1.0
K1 = 4*zeta/(zeta + 1/(4*zeta))*BnTs/Ns/Kp/K0
K2 = 4/(zeta + 1/(4*zeta))**2*(BnTs/Ns)**2/Kp/K0
zz = np.zeros(len(z),dtype=np.complex128)
#zz = np.zeros(int(np.floor(len(z)/float(Ns))),dtype=np.complex128)
e_tau = np.zeros(len(z))
#e_tau = np.zeros(int(np.floor(len(z)/float(Ns))))
#z_TED_buff = np.zeros(Ns)
c1_buff = np.zeros(2*L+1)
vi = 0
CNT_next = 0
mu_next = 0
underflow = 0
epsilon = 0
mm = 1
z = np.hstack(([0], z))
for nn in range(1,Ns*int(np.floor(len(z)/float(Ns)-(Ns-1)))):
# Define variables used in linear interpolator control
CNT = CNT_next
mu = mu_next
if underflow == 1:
if I_ord == 1:
# Decimated interpolator output (piecewise linear)
z_interp = mu*z[nn] + (1 - mu)*z[nn-1]
elif I_ord == 2:
# Decimated interpolator output (piecewise parabolic)
# in Farrow form with alpha = 1/2
v2 = 1/2.*np.sum(z[nn+2:nn-1-1:-1]*[1, -1, -1, 1])
v1 = 1/2.*np.sum(z[nn+2:nn-1-1:-1]*[-1, 3, -1, -1])
v0 = z[nn]
z_interp = (mu*v2 + v1)*mu + v0
elif I_ord == 3:
# Decimated interpolator output (piecewise cubic)
# in Farrow form
v3 = np.sum(z[nn+2:nn-1-1:-1]*[1/6., -1/2., 1/2., -1/6.])
v2 = np.sum(z[nn+2:nn-1-1:-1]*[0, 1/2., -1, 1/2.])
v1 = np.sum(z[nn+2:nn-1-1:-1]*[-1/6., 1, -1/2., -1/3.])
v0 = z[nn]
z_interp = ((mu*v3 + v2)*mu + v1)*mu + v0
else:
log.error('I_ord must 1, 2, or 3')
# Form TED output that is smoothed using 2*L+1 samples
# We need Ns interpolants for this TED: 0:Ns-1
c1 = 0
for kk in range(Ns):
if I_ord == 1:
# piecewise linear interp over Ns samples for TED
z_TED_interp = mu*z[nn+kk] + (1 - mu)*z[nn-1+kk]
elif I_ord == 2:
# piecewise parabolic in Farrow form with alpha = 1/2
v2 = 1/2.*np.sum(z[nn+kk+2:nn+kk-1-1:-1]*[1, -1, -1, 1])
v1 = 1/2.*np.sum(z[nn+kk+2:nn+kk-1-1:-1]*[-1, 3, -1, -1])
v0 = z[nn+kk]
z_TED_interp = (mu*v2 + v1)*mu + v0
elif I_ord == 3:
# piecewise cubic in Farrow form
v3 = np.sum(z[nn+kk+2:nn+kk-1-1:-1]*[1/6., -1/2., 1/2., -1/6.])
v2 = np.sum(z[nn+kk+2:nn+kk-1-1:-1]*[0, 1/2., -1, 1/2.])
v1 = np.sum(z[nn+kk+2:nn+kk-1-1:-1]*[-1/6., 1, -1/2., -1/3.])
v0 = z[nn+kk]
z_TED_interp = ((mu*v3 + v2)*mu + v1)*mu + v0
else:
log.error('Error: I_ord must 1, 2, or 3')
c1 = c1 + np.abs(z_TED_interp)**2 * np.exp(-1j*2*np.pi/Ns*kk)
c1 = c1/Ns
# Update 2*L+1 length buffer for TED output smoothing
c1_buff = np.hstack(([c1], c1_buff[:-1]))
# Form the smoothed TED output
epsilon = -1/(2*np.pi)*np.angle(np.sum(c1_buff)/(2*L+1))
# Save symbol spaced (decimated to symbol rate) interpolants in zz
zz[mm] = z_interp
e_tau[mm] = epsilon # log the error to the output vector e
mm += 1
else:
# Simple zezo-order hold interpolation between symbol samples
# we just coast using the old value
#epsilon = 0
pass
vp = K1*epsilon # proportional component of loop filter
vi = vi + K2*epsilon # integrator component of loop filter
v = vp + vi # loop filter output
W = 1/float(Ns) + v # counter control word
# update registers
CNT_next = CNT - W # Update counter value for next cycle
if CNT_next < 0: # Test to see if underflow has occured
CNT_next = 1 + CNT_next # Reduce counter value modulo-1 if underflow
underflow = 1 # Set the underflow flag
mu_next = CNT/W # update mu
else:
underflow = 0
mu_next = mu
# Remove zero samples at end
zz = zz[:-(len(zz)-mm+1)]
# Normalize so symbol values have a unity magnitude
zz /=np.std(zz)
e_tau = e_tau[:-(len(e_tau)-mm+1)]
return zz, e_tau
def DD_carrier_sync(z, M, BnTs, zeta=0.707, mod_type = 'MPSK', type = 0, open_loop = False):
"""
z_prime,a_hat,e_phi = DD_carrier_sync(z,M,BnTs,zeta=0.707,type=0)
Decision directed carrier phase tracking
z = complex baseband PSK signal at one sample per symbol
M = The PSK modulation order, i.e., 2, 8, or 8.
BnTs = time bandwidth product of loop bandwidth and the symbol period,
thus the loop bandwidth as a fraction of the symbol rate.
zeta = loop damping factor
type = Phase error detector type: 0 <> ML, 1 <> heuristic
z_prime = phase rotation output (like soft symbol values)
a_hat = the hard decision symbol values landing at the constellation
values
e_phi = the phase error e(k) into the loop filter
Ns = Nominal number of samples per symbol (Ts/T) in the carrier
phase tracking loop, almost always 1
Kp = The phase detector gain in the carrier phase tracking loop;
This value depends upon the algorithm type. For the ML scheme
described at the end of notes Chapter 9, A = 1, K 1/sqrt(2),
so Kp = sqrt(2).
Mark Wickert July 2014
Updated for improved MPSK performance April 2020
Added experimental MQAM capability April 2020
Motivated by code found in M. Rice, Digital Communications A Discrete-Time
Approach, Prentice Hall, New Jersey, 2009. (ISBN 978-0-13-030497-1).
"""
Ns = 1
z_prime = np.zeros_like(z)
a_hat = np.zeros_like(z)
e_phi = np.zeros(len(z))
theta_h = np.zeros(len(z))
theta_hat = 0
# Tracking loop constants
Kp = 1 # What is it for the different schemes and modes?
K0 = 1
K1 = 4*zeta/(zeta + 1/(4*zeta))*BnTs/Ns/Kp/K0;
K2 = 4/(zeta + 1/(4*zeta))**2*(BnTs/Ns)**2/Kp/K0;
# Initial condition
vi = 0
# Scaling for MQAM using signal power
# and known relationship for QAM.
if mod_type == 'MQAM':
z_scale = np.std(z) * np.sqrt(3/(2*(M-1)))
z = z/z_scale
for nn in range(len(z)):
# Multiply by the phase estimate exp(-j*theta_hat[n])
z_prime[nn] = z[nn]*np.exp(-1j*theta_hat)
if mod_type == 'MPSK':
if M == 2:
a_hat[nn] = np.sign(z_prime[nn].real) + 1j*0
elif M == 4:
a_hat[nn] = (np.sign(z_prime[nn].real) + \
1j*np.sign(z_prime[nn].imag))/sqrt(2)
elif M > 4:
# round to the nearest integer and fold to nonnegative
# integers; detection into M-levels with thresholds at mid points.
a_hat[nn] = np.mod((np.rint(np.angle(z_prime[nn])*M/2/np.pi)).astype(np.int),M)
a_hat[nn] = np.exp(1j*2*np.pi*a_hat[nn]/M)
else:
print('M must be 2, 4, 8, etc.')
elif mod_type == 'MQAM':
# Scale adaptively assuming var(x_hat) is proportional to
if M ==2 or M == 4 or M == 16 or M == 64 or M == 256:
x_m = np.sqrt(M)-1
if M == 2: x_m = 1
# Shift to quadrant one for hard decisions
a_hat_shift = (z_prime[nn] + x_m*(1+1j))/2
# Soft IQ symbol values are converted to hard symbol decisions
a_hat_shiftI = np.int16(np.clip(np.rint(a_hat_shift.real),0,x_m))
a_hat_shiftQ = np.int16(np.clip(np.rint(a_hat_shift.imag),0,x_m))
# Shift back to antipodal QAM
a_hat[nn] = 2*(a_hat_shiftI + 1j*a_hat_shiftQ) - x_m*(1+1j)
else:
print('M must be 2, 4, 16, 64, or 256');
if type == 0:
# Maximum likelihood (ML) Rice
e_phi[nn] = z_prime[nn].imag * a_hat[nn].real - \
z_prime[nn].real * a_hat[nn].imag
elif type == 1:
# Heuristic Rice
e_phi[nn] = np.angle(z_prime[nn]) - np.angle(a_hat[nn])
# Wrap the phase to [-pi,pi]
e_phi[nn] = np.angle(np.exp(1j*e_phi[nn]))
elif type == 2:
# Ouyang and Wang 2002 MQAM paper
e_phi[nn] = imag(z_prime[nn]/a_hat[nn])
else:
print('Type must be 0 or 1')
vp = K1*e_phi[nn] # proportional component of loop filter
vi = vi + K2*e_phi[nn] # integrator component of loop filter
v = vp + vi # loop filter output
theta_hat = np.mod(theta_hat + v,2*np.pi)
theta_h[nn] = theta_hat # phase track output array
if open_loop:
theta_hat = 0 # for open-loop testing
# Normalize MQAM outputs
if mod_type == 'MQAM':
z_prime *= z_scale
return z_prime, a_hat, e_phi, theta_h
def time_step(z, ns, t_step, n_step):
"""
Create a one sample per symbol signal containing a phase rotation
step Nsymb into the waveform.
:param z: complex baseband signal after matched filter
:param ns: number of sample per symbol
:param t_step: in samples relative to Ns
:param n_step: symbol sample location where the step turns on
:return: the one sample per symbol signal containing the phase step
Mark Wickert July 2014
"""
z_step = np.hstack((z[:ns * n_step], z[(ns * n_step + t_step):], np.zeros(t_step)))
return z_step
def phase_step(z, ns, p_step, n_step):
"""
Create a one sample per symbol signal containing a phase rotation
step Nsymb into the waveform.
:param z: complex baseband signal after matched filter
:param ns: number of sample per symbol
:param p_step: size in radians of the phase step
:param n_step: symbol sample location where the step turns on
:return: the one sample symbol signal containing the phase step
Mark Wickert July 2014
"""
nn = np.arange(0, len(z[::ns]))
theta = np.zeros(len(nn))
idx = np.where(nn >= n_step)
theta[idx] = p_step*np.ones(len(idx))
z_rot = z[::ns] * np.exp(1j * theta)
return z_rot
def PLL1(theta,fs,loop_type,Kv,fn,zeta,non_lin):
"""
Baseband Analog PLL Simulation Model
:param theta: input phase deviation in radians
:param fs: sampling rate in sample per second or Hz
:param loop_type: 1, first-order loop filter F(s)=K_LF; 2, integrator
with lead compensation F(s) = (1 + s tau2)/(s tau1),
i.e., a type II, or 3, lowpass with lead compensation
F(s) = (1 + s tau2)/(1 + s tau1)
:param Kv: VCO gain in Hz/v; note presently assume Kp = 1v/rad
and K_LF = 1; the user can easily change this
:param fn: Loop natural frequency (loops 2 & 3) or cutoff
frquency (loop 1)
:param zeta: Damping factor for loops 2 & 3
:param non_lin: 0, linear phase detector; 1, sinusoidal phase detector
:return: theta_hat = Output phase estimate of the input theta in radians,
ev = VCO control voltage,
phi = phase error = theta - theta_hat
Notes
-----
Alternate input in place of natural frequency, fn, in Hz is
the noise equivalent bandwidth Bn in Hz.
Mark Wickert, April 2007 for ECE 5625/4625
Modified February 2008 and July 2014 for ECE 5675/4675
Python version August 2014
"""
T = 1/float(fs)
Kv = 2*np.pi*Kv # convert Kv in Hz/v to rad/s/v
if loop_type == 1:
# First-order loop parameters
# Note Bn = K/4 Hz but K has units of rad/s
#fn = 4*Bn/(2*pi);
K = 2*np.pi*fn # loop natural frequency in rad/s
elif loop_type == 2:
# Second-order loop parameters
#fn = 1/(2*pi) * 2*Bn/(zeta + 1/(4*zeta));
K = 4 *np.pi*zeta*fn # loop natural frequency in rad/s
tau2 = zeta/(np.pi*fn)
elif loop_type == 3:
# Second-order loop parameters for one-pole lowpass with
# phase lead correction.
#fn = 1/(2*pi) * 2*Bn/(zeta + 1/(4*zeta));
K = Kv # Essentially the VCO gain sets the single-sided
# hold-in range in Hz, as it is assumed that Kp = 1
# and KLF = 1.
tau1 = K/((2*np.pi*fn)**2)
tau2 = 2*zeta/(2*np.pi*fn)*(1 - 2*np.pi*fn/K*1/(2*zeta))
else:
warnings.warn('Loop type must be 1, 2, or 3')
# Initialize integration approximation filters
filt_in_last = 0; filt_out_last = 0;
vco_in_last = 0; vco_out = 0; vco_out_last = 0;
# Initialize working and final output vectors
n = np.arange(len(theta))
theta_hat = np.zeros_like(theta)
ev = np.zeros_like(theta)
phi = np.zeros_like(theta)
# Begin the simulation loop
for k in range(len(n)):
phi[k] = theta[k] - vco_out
if non_lin == 1:
# sinusoidal phase detector
pd_out = np.sin(phi[k])
else:
# Linear phase detector
pd_out = phi[k]
# Loop gain
gain_out = K/Kv*pd_out # apply VCO gain at VCO
# Loop filter
if loop_type == 2:
filt_in = (1/tau2)*gain_out
filt_out = filt_out_last + T/2*(filt_in + filt_in_last)
filt_in_last = filt_in
filt_out_last = filt_out
filt_out = filt_out + gain_out
elif loop_type == 3:
filt_in = (tau2/tau1)*gain_out - (1/tau1)*filt_out_last
u3 = filt_in + (1/tau2)*filt_out_last
filt_out = filt_out_last + T/2*(filt_in + filt_in_last)
filt_in_last = filt_in
filt_out_last = filt_out
else:
filt_out = gain_out;
# VCO
vco_in = filt_out
if loop_type == 3:
vco_in = u3
vco_out = vco_out_last + T/2*(vco_in + vco_in_last)
vco_in_last = vco_in
vco_out_last = vco_out
vco_out = Kv*vco_out # apply Kv
# Measured loop signals
ev[k] = vco_in
theta_hat[k] = vco_out
return theta_hat, ev, phi
def PLL_cbb(x,fs,loop_type,Kv,fn,zeta):
"""
Baseband Analog PLL Simulation Model
:param x: input phase deviation in radians
:param fs: sampling rate in sample per second or Hz
:param loop_type: 1, first-order loop filter F(s)=K_LF; 2, integrator
with lead compensation F(s) = (1 + s tau2)/(s tau1),
i.e., a type II, or 3, lowpass with lead compensation
F(s) = (1 + s tau2)/(1 + s tau1)
:param Kv: VCO gain in Hz/v; note presently assume Kp = 1v/rad
and K_LF = 1; the user can easily change this
:param fn: Loop natural frequency (loops 2 & 3) or cutoff
frequency (loop 1)
:param zeta: Damping factor for loops 2 & 3
:return: theta_hat = Output phase estimate of the input theta in radians,
ev = VCO control voltage,
phi = phase error = theta - theta_hat
Mark Wickert, April 2007 for ECE 5625/4625
Modified February 2008 and July 2014 for ECE 5675/4675
Python version August 2014
"""
T = 1/float(fs)
Kv = 2*np.pi*Kv # convert Kv in Hz/v to rad/s/v
if loop_type == 1:
# First-order loop parameters
# Note Bn = K/4 Hz but K has units of rad/s
#fn = 4*Bn/(2*pi);
K = 2*np.pi*fn # loop natural frequency in rad/s
elif loop_type == 2:
# Second-order loop parameters
#fn = 1/(2*pi) * 2*Bn/(zeta + 1/(4*zeta));
K = 4 *np.pi*zeta*fn # loop natural frequency in rad/s
tau2 = zeta/(np.pi*fn)
elif loop_type == 3:
# Second-order loop parameters for one-pole lowpass with
# phase lead correction.
#fn = 1/(2*pi) * 2*Bn/(zeta + 1/(4*zeta));
K = Kv # Essentially the VCO gain sets the single-sided
# hold-in range in Hz, as it is assumed that Kp = 1
# and KLF = 1.
tau1 = K/((2*np.pi*fn)^2);
tau2 = 2*zeta/(2*np.pi*fn)*(1 - 2*np.pi*fn/K*1/(2*zeta))
else:
warnings.warn('Loop type must be 1, 2, or 3')
# Initialize integration approximation filters
filt_in_last = 0; filt_out_last = 0;
vco_in_last = 0; vco_out = 0; vco_out_last = 0;
vco_out_cbb = 0
# Initialize working and final output vectors
n = np.arange(len(x))
theta_hat = np.zeros(len(x))
ev = np.zeros(len(x))
phi = np.zeros(len(x))
# Begin the simulation loop
for k in range(len(n)):
#phi[k] = theta[k] - vco_out
phi[k] = np.imag(x[k] * np.conj(vco_out_cbb))
pd_out = phi[k]
# Loop gain
gain_out = K/Kv*pd_out # apply VCO gain at VCO
# Loop filter
if loop_type == 2:
filt_in = (1/tau2)*gain_out
filt_out = filt_out_last + T/2*(filt_in + filt_in_last)
filt_in_last = filt_in
filt_out_last = filt_out
filt_out = filt_out + gain_out
elif loop_type == 3:
filt_in = (tau2/tau1)*gain_out - (1/tau1)*filt_out_last
u3 = filt_in + (1/tau2)*filt_out_last
filt_out = filt_out_last + T/2*(filt_in + filt_in_last)
filt_in_last = filt_in
filt_out_last = filt_out
else:
filt_out = gain_out;
# VCO
vco_in = filt_out
if loop_type == 3:
vco_in = u3
vco_out = vco_out_last + T/2*(vco_in + vco_in_last)
vco_in_last = vco_in
vco_out_last = vco_out
vco_out = Kv*vco_out # apply Kv
vco_out_cbb = np.exp(1j*vco_out)
# Measured loop signals
ev[k] = vco_in
theta_hat[k] = vco_out
return theta_hat, ev, phi
|
Students encounter God through biblical teaching, worship, fellowship, missions and ministry involvement.
Our ministry provides several weekly programs that include live music, meaningful fellowship, and relevant teaching. Many of these programs present opportunities for students to creatively express their faith.
Students help lead our worship times and bring their talents and gifts as a service.
1st, 2nd, and 4th Sundays: Student Church is located inside Legacy Building on the second floor.
A time of food, fun, fellowship, and expressions of faith.
We partner with parents towards getting the church and the home on the same page. We use a proven national strategy called “Orange”. When “red” (the family) comes together with “yellow” (the church) the combined influence creates a major impact orange. We aspire for students to discover God, take ownership of their faith, and become positive influencers among their friends.
Adults and teens have ample opportunities to volunteer at Concord and in our community. Below are ministry areas where our students get to make a valuable contribution and learn the value of serving others.
We grow students to greet everyone who enters the church with a warm, friendly greeting, smile and handshake, and to encourage our students to take on leadership roles within the ministry.
We grow students by enlightening Christian youth through the knowledge of usher techniques and Christian standards.
We grow students by training them to be leaders and sharing their faith through service within the community and church.
We grow students by equipping them to share the Gospel of Jesus Christ through song and to encourage believers in their walk with Christ.
We grow students by teaching them to provide a safe and friendly Christian atmosphere while welcoming worshippers and rendering Five Star service with Christian love.
We grow students to create and facilitate cutting edge technology in audio and video production.
The middle school Boy's Hangout is a monthly gathering for the middle boys in the DFW Metroplex. These gatherings are an opportunity to disciple our middle school boys through empowering, equipping and mentoring. We provide Christlike character formation and servant leadership development for middle school boys in a highly relational and fun environment.
We believe that it is important to bring awareness to them to help them begin the process of thinking about future decisions of careers in the workforce and about how they are to respond to their surroundings now as young Boys (soon to be Men). REGISTER HERE!
The Middle School Boys Bible Study is an opportunity for middle school boys to gather, fellowship and study God’s word together. Each middle school boy will have a small group leader that will mentor, encourage and support them as they take their next steps with Christ. Our Middle School Boys Bible Study takes place 1st, 2nd, and 4th Sundays at 12 pm in the Legacy Center.
The Girls’ Kickback is a monthly gathering for the middle and high school girls in the DFW Metroplex. These gatherings are an opportunity to dive deeper into challenges that girls are facing and how to overcome them with the strength and power of Jesus.
We believe that every girl is fearfully and wonderfully made and has unique gifts that God has instilled in them to share with the world. We pray that through these events they will leave renewed, strengthened and inspired. The next Girls' Kickback is on April 27th, REGISTER HERE!
The Girls’ Bible Study is an opportunity for middle and high school girls to gather, fellowship and study God’s word together. Each girl will have a small group leader that will mentor, encourage and support them as they take their next steps with Christ. Our Girls’ Bible Study takes place 1st, 2nd, and 4th Sundays at 12pm in the Legacy Center. |
#!/usr/env python3
try:
import numpypy as np
except:
import numpy as np
try:
import cPickle as pickle
except ImportError:
import pickle
#import ipdb
## Tri Diagonal Matrix Algorithm(a.k.a Thomas algorithm) solver
def TDMAsolver(a, b, c, d):
'''
TDMA solver, a b c d can be NumPy array type or Python list type.
refer to http://en.wikipedia.org/wiki/Tridiagonal_matrix_algorithm
'''
nf = len(a) # number of equations
ac, bc, cc, dc = map(np.array, (a, b, c, d)) # copy the array
for it in xrange(1, nf):
mc = ac[it]/bc[it-1]
bc[it] = bc[it] - mc*cc[it-1]
dc[it] = dc[it] - mc*dc[it-1]
xc = ac
xc[-1] = dc[-1]/bc[-1]
for il in xrange(nf-2, -1, -1):
xc[il] = (dc[il]-cc[il]*xc[il+1])/bc[il]
del bc, cc, dc # delete variables from memory
return xc
class Vector(object):
def __init__(self, parent):
# save the pointer to the parent (dynamical)
self.p = parent
# initial G = 0, G[k,n]
self.G = np.zeros((self.p.Nz, self.p.NFourier), dtype="float64")
# access via G[k][n]
def step(self):
# save the old G
self.G_old = self.G.copy()
# compute the new one
self.compute_G()
# new += dt/2*(3G-G_old)
self.field[1:-1] = (self.field[1:-1]
+ self.p.dt/2*(3*self.G[1:-1] - self.G_old[1:-1])
)
# conditions at top and bottom : null
self.field[0 ,:] = 0
self.field[-1,:] = 0
def compute_G(self):
raise Exception("Vector class is a base class, not supposed to be "+
"used like that")
def initial(self, init_cond):
if init_cond == 'null':
self.field = np.zeros((self.p.Nz, self.p.NFourier))
elif init_cond == "T":
self.field = np.array([[T_0(n,k,self.p) for n in range(self.p.NFourier)]
for k in range(self.p.Nz)])
else:
raise Exception("init_cond must be either `null` or `T`")
class Temp(Vector):
name = "T"
def compute_G(self):
# compute G except for k = 0, Nz-1 and n = 0
for n in range(1, self.p.NFourier):
self.G[1:-1,n] = ((self.field[:-2,n]-2*self.field[1:-1,n]+self.field[2:,n])
* self.p.oodz2
- (n*self.p.pi/self.p.a)**2
* self.field[1:-1,n] )
class Vort(Vector):
name = "ω"
def __init__(self, parent):
super().__init__(parent)
self.compute_wk()
def compute_wk(self):
# init. the arrays:
self.wk1 = np.zeros((self.p.Nz, self.p.NFourier))
self.wk2 = np.zeros((self.p.Nz, self.p.NFourier))
self.sub = np.zeros((self.p.Nz, self.p.NFourier))
for n in range(1,self.p.NFourier):
# save some usefull functions
sub_f = lambda k : -self.p.oodz2 if k<self.p.Nz-1 else 1
dia = lambda k : (n*self.p.pi/self.p.a)**2 + 2*self.p.oodz2 if 0<k<self.p.Nz-1 else 1
sup = lambda k : -self.p.oodz2 if k>0 else 1
# tridiag. solver
self.wk1[0,n] = 1/dia(0)
self.wk2[0,n] = sup(0) * self.wk1[0,n]
for k in range(1, self.p.Nz-1):
self.wk1[k,n] = 1 /(dia(k)-sub_f(k)*self.wk2[k-1,n])
self.wk2[k,n] = sup(k)*self.wk1[k,n]
self.wk1[-1,n] = 1/(dia(self.p.Nz-1)-sub_f(self.p.Nz-1)*self.wk2[-2,n])
self.sub[:,n] = [sub_f(k) for k in range(self.p.Nz)]
def step(self):
rhs = self.p.psi.field.copy()
# boundary conditions k=0, Nz-1 : psi = 0
rhs[0, :] = 0
rhs[-1,:] = 0
for n in range(1,self.p.NFourier):
# tridiag. solver
self.field[0,n] = rhs[0,n]*self.wk1[0,n]
for k in range(1, self.p.Nz):
self.field[k,n] = (rhs[k,n] - self.sub[k,n]*self.field[k-1,n]*self.wk1[k,n])
for k in range(self.p.Nz-2, 0, -1):
self.field[k,n] = self.field[k,n]-self.wk2[k,n]*self.field[k+1,n]
class Stream(Vector):
name = "ψ"
def compute_G(self):
# compute G except for k=0, Nz-1 and n=0
for n in range(1, self.p.NFourier):
a = self.p.Ra*n*self.p.pi/self.p.a*self.p.T.field[1:-1,n]
b = (self.field[:-2,n] - 2*self.field[1:-1,n] + self.field[2:,n])*self.p.oodz2
c = (n*self.p.pi/self.p.a)**2*self.field[1:-1,n]
self.G[1:-1,n] = self.p.Pr*( a + b - c)
class Simulation(object):
param_list = {'Re': 1, 'Pr': 1, 'Ra': 1, 'a' : 1, 'Nz': 100,
'NFourier': 50, 'dt_security': 0.9,
'maxiter': 100, 'freq_output': 10,
'freq_critical_Ra':50, 'verbose': False}
def __init__(self, *args, **kargs):
# save the default parameters
for param, value in self.param_list.items():
setattr(self, param, value)
# override if necessary
for param, value in kargs.items():
if param not in self.param_list:
raise Exception("`%s' not recognized" % param)
else:
setattr(self, param, value)
# set the initial values
self.t = 0
self.niter = 0
self.dz = 1/(self.Nz-1)
# some usefull quantities
self.oodz2 = 1/self.dz**2
self.pi = np.pi
# create the inner fields
self.T = Temp(self)
self.omega = Vort(self)
self.psi = Stream(self)
# previous fields for critical Ra number
self.T_old = np.zeros((self.NFourier,))
self.omega_old = np.zeros((self.NFourier,))
self.psi_old = np.zeros((self.NFourier,))
def __del__(self):
pass
def growth(self):
''' Calculate the log-growth rate and return a string containing
all the growth rate'''
amp = lambda v: np.log(abs(v)) if v != 0 else 0
gr = lambda new,old,n: str(amp(new.field[self.Nz//3,n])
- amp(abs(old[n])))
out = "".join([ gr(self.T, self.T_old,n) + "\t" +
gr(self.omega, self.omega_old,n) + "\t" +
gr(self.psi, self.psi_old,n) + "\t"
for n in range(self.NFourier) ])
# save the arrays for next output
self.T_old = self.T.field[self.Nz//3,:].copy()
self.omega_old = self.omega.field[self.Nz//3,:].copy()
self.psi_old = self.psi.field[self.Nz//3,:].copy()
return out+"\n"
def step(self):
# eventually output
if self.verbose and self.niter % self.freq_output == 0:
self.dump()
# eventually calculate the d-ln term for the critical Ra
if self.verbose and self.niter % self.freq_critical_Ra == 0 :
output = "# growth : \t"
output+= "".join([
"{T.name}_{n}\t{w.name}_{n}\t{psi.name}_{n}\t".format(T=self.T,
w=self.omega,
psi=self.psi,
n=n)
for n in range(self.NFourier)])
output+= "\n"
output+= "# growth : \t"
output+= self.growth()
print(output)
# get the max timestep
self.CFL()
# increase the time, the iteration
self.t += self.dt
self.niter += 1
# check that the end is not reached
if self.niter > self.maxiter:
return False
else:
return True
def dump(self):
output = "#k\t"
for n in range(self.NFourier):
o = "{T}_{n}\t{w}_{n}\t{psi}_{n}\t".format(T=self.T.name,
w=self.omega.name,
psi=self.psi.name,
n=n)
output += o
output += "\n"
for k in range(self.Nz):
output += str(k) + "\t"
for n in range(self.NFourier):
l = "{T}\t{w}\t{psi}\t".format(T=self.T.field[k,n],
w=self.omega.field[k,n],
psi=self.psi.field[k,n])
output += l
output += "\n"
print(output)
def CFL(self):
# dt < (dz)^2/4 or (dz)^2/(4Pr) if Pr > 1
self.dt = self.dt_security * self.dz**2/(4*max(1,self.Pr))
def T_0 (n,k,s):
if n > 0:
return np.sin(s.pi*k*s.dz)
else:
return 1-k*s.dz
if __name__ == '__main__':
# create a new simulation
s = Simulation(Re=5)
# initial conditions psi(0) = 0, Omega(0) = 0
s.psi.initial("null")
s.omega.initial("null")
# T_n(t=0) = sin(pi*k*dz) & T_0(t=0) = 1-k*dz
s.T.initial(lambda n, k: T_0(n,k,s))
# main loop over time
while s.step():
s.T.step()
s.psi.step()
s.omega.step()
del s
|
football Betting betting tips kick off Predictions Yesterday on Co Time Match Best odds 1X2 Tips.
England, 2pm, FS1, Telemundo, fuboTV, Sling TV and PlayStation Vue (7-day free trial) Tuesday, June 19, 2018 Colombia vs. Japan, 8am, FS1, Telemundo, fuboTV, Sling TV and PlayStation Vue (7-day free trial) Poland vs. Senegal, 11am, FOX, Telemundo, fuboTV, Sling TV and PlayStation Vue (7-day.
2018 AFL: Round 16 Preview Betting Tips. Big weekend of AFL action coming up with the nine game round set to kick off on Thursday night! PuntingInDanger is back to take a look at every game over the weekend and give his betting tips here.
Tamil Nadu Premier League Match Prediction TNPL Betting Tips. July 15, 2018 Anikesh CWeam vs Vancouver Knights 16th July Prediction Final Match Global T20 Canada CWIB vs VCK Final Global T20. July 15, 2018 Anikesh Lyca Kovai Kings vs Karaikudi Kaalai 15th July Prediction 5th.
USA: Betting tips kick off!
a website that search for quality betting tips with algorithms based on many parameters. See infos about the top bookmakers,why Does This Keep Happening?" It's really irritating and frustrating, wHAT WENT WRONG for the betting tips kick off live bettors in the above two scenarios? "That's Down The Drain. In the live betting market, you also lost both bets. Right? The moving odds tell a story.published October 17, customer satisfaction is our top priority. 2016 By admin Our team betting tips kick off everyday have super insider which is 100 sure. It is fixed match with no chance for miss.
updated 24 hours a day. Gooner News. Match previews and reviews plus transfer news from around the betting tips kick off world, the latest Arsenal news,via CollegeFootballTalk Alexis Misses Start of betting tips kick off Man Utd's US Tour via j&b met betting tips Bleacher Report Report: Machado Likely to Be Dealt Soon via Bleacher Report Report: Tennessee DL Ryan Thaxton Arrested for Domestic Assault via Bleacher Report The Real Work Is Just Beginning in St.
this allows players to betting tips kick off bet,. How to enable wagering/prediction in dota 2?.
one of the defending team they receivers should simply select tinier and uncomplicated tricks right shoe size as it has betting tips kick off to do with the named organisation.these picks will be updated betting tips kick off every Friday.we're only three-quarters of the way through the NFL regular season, this is as tense as it gets. But for fantasy owners, jason Miller/Getty With Week 13 nearly in the books, it's time for another all-important waiver cycle in the world of fantasy football.
unserem Gewinnerteam beizutreten und mit Ihrer Fußballleidenschaft gutes Geld zu verdienen. Wenn Sie irgendwelche Fragen haben, es überrascht nicht, dass wir eine Menge kommerzieller Aufmerksamkeit auf sich gezogen haben. Sie sind herzlich eingeladen,the bet wins and if there are 12 corners taken or less, the bet loses. If there are 13 corners taken or more, betting tips kick off a player places a 100 bet on over 12.5 corners to be taken in a match.horoscope Matching. Free Kundli. Rajnath Singh - The Story of New BJP President. The prediction about a marriage is accurately given by the Numerology predictions. |
#----------------------------------------------------------------------------------------
#
# This file is part of CosmicFish.
#
# Copyright (C) 2015-2017 by the CosmicFish authors
#
# The CosmicFish code is free software;
# You can use it, redistribute it, and/or modify it under the terms
# of the GNU General Public License as published by the Free Software Foundation;
# either version 3 of the License, or (at your option) any later version.
# The full text of the license can be found in the file LICENSE at
# the top level of the CosmicFish distribution.
#
#----------------------------------------------------------------------------------------
"""
.. module:: fisher_operations
:platform: Unix
:synopsis: Module that contains operations that can be performed on Fisher matrices.
All of them are safeguarded against non-Fisher input.
.. moduleauthor:: Marco Raveri <[email protected]> for the CosmicFish code.
"""
# ***************************************************************************************
import numpy as np
from . import fisher_matrix as fm
import math
# ***************************************************************************************
def eliminate_columns_rows( fisher_matrix, indexes ):
"""
This function eliminates the row and columns corresponding to the given indexes
from the Fisher matrix. It also deletes all the other informations like the names
of the parameters. Notice that the index corresponding to the first parameter
is zero.
:param fisher_matrix: input Fisher matrix
:type fisher_matrix: :class:`cosmicfish_pylib.fisher_matrix.fisher_matrix`
:param indexes: list of integers with the indexes to delete from the Fisher matrix
:type indexes: :class:`list` of :class:`int`
:returns: A Fisher matrix with the columns and rows deleted
:rtype: :class:`cosmicfish_pylib.fisher_matrix.fisher_matrix`
"""
# check validity of the input:
if ( not isinstance(fisher_matrix, fm.fisher_matrix) ):
raise ValueError('Error, input fisher_matrix is not a fisher_matrix')
# write the param names:
new_param_names = []
new_param_names_latex = []
new_param_fiducial = []
for i in range( fisher_matrix.num_params ):
if i not in indexes:
new_param_names.append( fisher_matrix.param_names[i] )
new_param_names_latex.append( fisher_matrix.param_names_latex[i] )
new_param_fiducial.append( fisher_matrix.param_fiducial[i] )
# write the Fisher matrix:
fisher_temp = np.delete ( np.delete( fisher_matrix.fisher_matrix, indexes , 0 ), indexes , 1 )
# initialize the new Fisher matrix:
fisher_new = fm.fisher_matrix(fisher_matrix=fisher_temp, param_names=new_param_names, param_names_latex=new_param_names_latex, fiducial=new_param_fiducial )
fisher_new.name = fisher_matrix.name + '_reduced'
fisher_new.path = fisher_matrix.path
fisher_new.indir = fisher_matrix.indir
return fisher_new
# ***************************************************************************************
def eliminate_parameters( fisher_matrix, names ):
"""
This function eliminates the row and columns corresponding to the given parameter
name from the Fisher matrix. It also deletes all the other informations like the names
of the parameters.
:param fisher_matrix: input Fisher matrix
:type fisher_matrix: :class:`cosmicfish_pylib.fisher_matrix.fisher_matrix`
:param names: list of names of the parameters to delete from the Fisher matrix
:type names: :class:`list` of :class:`string`
:returns: A Fisher matrix with the parameters deleted
:rtype: :class:`cosmicfish_pylib.fisher_matrix.fisher_matrix`
"""
# check validity of the input:
if ( not isinstance(fisher_matrix, fm.fisher_matrix) ):
raise ValueError('Error, input fisher_matrix is not a fisher_matrix')
# get the indexes of the parameters:
index_list = []
for i in names:
if i not in fisher_matrix.param_names_dict:
raise ValueError('Error, parameter '+str(i)+' is not in a parameter of fisher_matrix')
index_list.append(fisher_matrix.param_names_dict[i]-1)
# elminate them from the list and return:
return eliminate_columns_rows( fisher_matrix, index_list )
# ***************************************************************************************
def reshuffle( fisher_matrix, names ):
"""
This function reshuffles a Fisher matrix. The new Fisher matrix will have the
parameters specified in names, in the order specified by names.
Can be used to delete parameters, change their order or extract the Fisher
for some parameters without marginalizing over the others.
:param fisher_matrix: input Fisher matrix
:type fisher_matrix: :class:`cosmicfish_pylib.fisher_matrix.fisher_matrix`
:param names: list of names of the parameters that are desired in the output Fisher
matrix, in the desired order.
:type names: :class:`list` of :class:`string`
:returns: A Fisher matrix with the new parameters
:rtype: :class:`cosmicfish_pylib.fisher_matrix.fisher_matrix`
"""
# check validity of the input:
if ( not isinstance(fisher_matrix, fm.fisher_matrix) ):
raise ValueError('Error, input fisher_matrix is not a fisher_matrix')
# check wether the names required are inside the Fisher matrix:
for i in names:
if i not in fisher_matrix.param_names_dict:
raise ValueError('Error, parameter '+str(i)+' is not in a parameter of fisher_matrix')
# get the new latex names and fiducial:
new_param_names_latex = []
new_param_fiducial = []
for i in names:
ind = fisher_matrix.param_names_dict[i] -1
new_param_names_latex.append(fisher_matrix.param_names_latex[ind])
new_param_fiducial.append(fisher_matrix.param_fiducial[ind])
# initialize an empty matrix:
num_param_new = len(names)
new_matrix = np.zeros([num_param_new,num_param_new])
# fill the new matrix:
for i in range(num_param_new):
for j in range(num_param_new):
# get the name:
x = names[i]
y = names[j]
# get the parameter name:
x1 = fisher_matrix.param_names_dict[x]-1
y1 = fisher_matrix.param_names_dict[y]-1
# get the entrance of the new matrix:
new_matrix[i,j] = fisher_matrix.fisher_matrix[x1,y1]
# create the new Fisher matrix:
fisher_new = fm.fisher_matrix(fisher_matrix=new_matrix, param_names=names, param_names_latex=new_param_names_latex, fiducial=new_param_fiducial)
fisher_new.name = fisher_matrix.name + '_reshuffled'
fisher_new.path = fisher_matrix.path
fisher_new.indir = fisher_matrix.indir
return fisher_new
# ***************************************************************************************
def marginalise( fisher_matrix, names ):
"""
This function marginalises a Fisher matrix over all parameters but the ones in names.
The new Fisher matrix will have the parameters specified in names, in the order specified by names.
The calculation is performed in the numerically stable way.
:param fisher_matrix: input Fisher matrix
:type fisher_matrix: :class:`cosmicfish_pylib.fisher_matrix.fisher_matrix`
:param names: list of names of the parameters of the output Fisher matrix,
in the order that will appear in the output Fisher matrix. All other parameters
will be marginalized over.
:type names: :class:`list` of :class:`string`
:returns: A Fisher matrix with the marginalized parameters
:rtype: :class:`cosmicfish_pylib.fisher_matrix.fisher_matrix`
"""
# check validity of the input:
if ( not isinstance(fisher_matrix, fm.fisher_matrix) ):
raise ValueError('Error, input fisher_matrix is not a fisher_matrix')
# check wether the names required are inside the Fisher matrix:
for i in names:
if i not in fisher_matrix.param_names_dict:
raise ValueError('Error, parameter '+str(i)+' is not in a parameter of fisher_matrix')
# get the new latex names and fiducial:
new_param_names_latex = []
new_param_fiducial = []
for i in names:
ind = fisher_matrix.param_names_dict[i] -1
new_param_names_latex.append(fisher_matrix.param_names_latex[ind])
new_param_fiducial.append(fisher_matrix.param_fiducial[ind])
# initialize an empty matrix:
num_param_new = len(names)
new_matrix = np.zeros([num_param_new,num_param_new])
# fill the new inverse matrix:
for i in range(num_param_new):
for j in range(num_param_new):
# get the name:
x = names[i]
y = names[j]
# get the parameter name:
x1 = fisher_matrix.param_names_dict[x]-1
y1 = fisher_matrix.param_names_dict[y]-1
# get the entrance of the new matrix:
new_matrix[i,j] = fisher_matrix.get_fisher_inverse()[x1,y1]
fisher_temp = np.linalg.inv( new_matrix )
# create the new Fisher matrix:
fisher_new = fm.fisher_matrix(fisher_matrix=fisher_temp, param_names=names, param_names_latex=new_param_names_latex, fiducial=new_param_fiducial)
fisher_new.name = fisher_matrix.name + '_marginal'
fisher_new.path = fisher_matrix.path
fisher_new.indir = fisher_matrix.indir
return fisher_new
# ***************************************************************************************
def marginalise_over( fisher_matrix, names ):
"""
This function marginalises a Fisher matrix over the parameters in names.
The new Fisher matrix will not have the parameters specified in names.
The calculation is performed in the numerically stable way.
:param fisher_matrix: input Fisher matrix
:type fisher_matrix: :class:`cosmicfish_pylib.fisher_matrix.fisher_matrix`
:param names: list of names of the parameters over which the Fisher will be marginalised.
:type names: :class:`list` of :class:`string`
:returns: A Fisher matrix with the names parameters marginalized.
:rtype: :class:`cosmicfish_pylib.fisher_matrix.fisher_matrix`
"""
# check validity of the input:
if ( not isinstance(fisher_matrix, fm.fisher_matrix) ):
raise ValueError('Error, input fisher_matrix is not a fisher_matrix')
# check wether the names required are inside the Fisher matrix:
for i in names:
if i not in fisher_matrix.param_names_dict:
raise ValueError('Error, parameter '+str(i)+' is not in a parameter of fisher_matrix')
# get the indexes:
new_names = [ i for i in fisher_matrix.param_names if i not in names ]
return marginalise( fisher_matrix, new_names )
# ***************************************************************************************
def information_gain( fisher_1, fisher_2, fisher_prior, units=math.log(2.0), stat=True ):
"""
This function computes the Fisher approximation of Kullback-Leibler information gain.
For the details of the formula we refer to the CosmicFish notes.
:param fisher_1: first input Fisher matrix
:type fisher_1: :class:`cosmicfish_pylib.fisher_matrix.fisher_matrix`
:param fisher_2: second input Fisher matrix
:type fisher_2: :class:`cosmicfish_pylib.fisher_matrix.fisher_matrix`
:param fisher_prior: input Fisher matrix with the prior information.
:type fisher_prior: :class:`cosmicfish_pylib.fisher_matrix.fisher_matrix`
:param units: Units of information gain. Optional by default in Bits.
:type units: :class:`float`
:param stat: wether to output the expected value and variance
:type stat: :class:`logical`
:returns: a :class:`float` with the information gain.
:rtype: :class:`float`
"""
info_gain = 0.0
# first computations:
F1p = fisher_1 + fisher_prior
F2p = fisher_2 + fisher_prior
# get common parameter names:
param_names = [ name for name in F1p.get_param_names() if name in F2p.get_param_names() ]
# reshuffle the second matrix:
F1p = reshuffle( F1p, param_names )
F2p = reshuffle( F2p, param_names )
# define a dummy Fisher matrix with empty entrances and with the same parameters as the others:
fisher_temp = fm.fisher_matrix( fisher_matrix=0.0*F2p.get_fisher_matrix(),
param_names=F2p.get_param_names(),
param_names_latex=F2p.get_param_names_latex(),
fiducial=F2p.get_param_fiducial() )
fisher_temp = fisher_2 + fisher_temp
# the first term:
info_gain = info_gain -math.log( F1p.determinant()/F2p.determinant() )
info_gain = info_gain -F1p.get_fisher_matrix().shape[0]
# the second trace term:
info_gain = info_gain + np.trace( np.dot( F2p.get_fisher_inverse() , F1p.get_fisher_matrix() ) )
# add additional term if statistical average over data is wanted
if stat:
# we break down the third term into two pieces:
temp = np.dot( np.dot( np.dot( fisher_temp.get_fisher_matrix(), F2p.get_fisher_inverse() ),F1p.get_fisher_matrix() ), F2p.get_fisher_inverse() )
temp = temp + np.dot( np.dot( temp,fisher_temp.get_fisher_matrix() ), F1p.get_fisher_inverse() )
info_gain = info_gain + np.trace( temp )
# compute variance:
temp = np.dot( temp, temp )
info_variance = np.trace( temp )
# output
info_gain = info_gain/2.0/units
return info_gain
# ***************************************************************************************
|
A pack-free walking experience on the Tasman Peninsula including accommodation, food and a scenic helicopter flight brought to you by the team at Life’s an Adventure.
Capital of Tasmania and all round stunner, Hobart is a must-see for every visitor to the state.
A bustling market in Salamanca every Saturday, weird, wacky, wonderful artwork on display at TMAG and Mona, a bustling fishing port and one very trendy food scene. Hobart is a superb collection of city delights served up without the traffic, noise and chaos of our mainland counterparts!
While we love Hobart, we are all about getting people out of the city and down to our place on the Tasman Peninsula – a 90-minute drive from the city. We have teamed up with our friends at Tours Tasmania to offer one tasty package to combine some of the best attractions of the South East (including Port Arthur) with a scenic helicopter flight. You can find out more and book by following the link on the right!
If a flight is not for you, that’s ok, we forgive you. Tours Tasmania have an awesome collection of other tours that showcase Tassie and all its treasures from the ground.
For those seeking adventure and an extended stay on the Tasman Peninsula, our friends at Life’s an Adventure have partnered with us to offer a Three Capes walking experience polished off with a 30 minute scenic helicopter flight around the sea cliffs of the region. Two nights of accommodation at beautiful Stewart’s Bay Lodge is included and a tour bus will pick up and return you to Hobart. |
import math
from renderer import RenderNode
from terrain.bakery.gpuBakery import tileMapSize
from terrain.bakery.bakery import loadTex
from panda3d.core import *
#from terrain.textureRenderer import *
class GeoClipMapper(RenderNode):
def __init__(self,path,tileSource,minScale,focus):
RenderNode.__init__(self,path,NodePath(path+"_terrainNode"),heightScale=300.0)
heightMapName=self.specialMaps['height']
self.heightMapRez=0
for s in tileSource.shaders:
if s.name==heightMapName:
self.heightMapRez=s.getRez(tileMapSize)
break
if self.heightMapRez==0: print 'Failed to determain height map resolution'
self.setShaderInput("heightMapRez",self.heightMapRez,0,0,0)
self.focus=focus
self.minScale=minScale
self.tileSource=tileSource
self.heightStage=TextureStage("height")
rezFactor=50
n=rezFactor*4-1
if n+4>=self.heightMapRez:
print 'Error: Can not have geoClipMap rez higher than height map rez'
self.rez=n
m=(n+1)/4
self.baseTileScale=minScale/n*self.heightMapRez
scale=minScale/(n-1)
self.terrainNode.setScale(scale,scale,scale)
self.shaderHeightScale=self.heightScale/scale
self.terrainNode.setShaderInput("heightScale",self.shaderHeightScale,0,0)
self.terrainNode.setShader(loader.loadShader("terrain/geoClip.sha"))
def makeGrid(xSize,ySize):
""" Size is in verts, not squares """
format=GeomVertexFormat.getV3()
vdata=GeomVertexData('grid', format, Geom.UHStatic)
vertex=GeomVertexWriter(vdata, 'vertex')
grid=Geom(vdata)
#snode=GeomNode('grid')
for x in xrange(xSize):
for y in xrange(ySize):
vertex.addData3f(x,y,0)
tri=GeomTristrips(Geom.UHStatic)
def index(lx,ly):
return ly+lx*(ySize)
for x in xrange(xSize-1):
for y in xrange(ySize):
tri.addVertex(index(x,y))
tri.addVertex(index(x+1,y))
tri.closePrimitive()
grid.addPrimitive(tri)
grid.setBoundsType(BoundingVolume.BTBox)
grid.setBounds(BoundingBox(Point3(0,0,0),Point3(xSize-1,ySize-1,self.shaderHeightScale)))
#snode.addGeom(grid)
#snode.setBoundsType(BoundingVolume.BTBox)
#snode.setBounds(BoundingBox(Point3(0,0,0),Point3(xSize-1,ySize-1,self.shaderHeightScale)))
#snode.setFinal(True)
return grid
nxn=makeGrid(n,n)
mxm=makeGrid(m,m)
mx3=makeGrid(m,3)
x3xm=makeGrid(3,m)
m2x2=makeGrid(2*m+1,2)
cNode=GeomNode('center')
cGeom=nxn.makeCopy()
cGeom.transformVertices(Mat4.translateMat(-n/2,-n/2,0))
cNode.addGeom(cGeom)
cGeom.setBoundsType(BoundingVolume.BTBox)
cGeom.setBounds(BoundingBox(Point3(-n/2,-n/2,0),Point3(n/2-1,n/2-1,self.shaderHeightScale)))
cNode.setBoundsType(BoundingVolume.BTBox)
center=_GeoClipLevel(0,self,cNode)
#NodePath(nxn).instanceTo(center).setPos(-n/2,-n/2,0)
center.reparentTo(self.terrainNode)
halfOffset=n/2
#ring=NodePath("Ring")
ring=GeomNode('ring')
def doCorner(x,y):
xd=x*n/2-(x+1)*m/2
yd=y*n/2-(y+1)*m/2
def doGeom(g,x,y):
cGeom=(g).makeCopy()
cGeom.transformVertices(Mat4.translateMat(x,y,0))
cGeom.setBoundsType(BoundingVolume.BTBox)
b=g.getBounds()
p=b.getPoint(7)
cGeom.setBounds(BoundingBox(Point3(x,y,0),Point3(p.getX()+x,p.getY()+y,self.shaderHeightScale)))
ring.addGeom(cGeom)
doGeom(mxm,xd,yd)
doGeom(mxm,xd,yd-y*(m-1))
doGeom(mxm,xd-x*(m-1),yd)
#NodePath(mxm).copyTo(ring).setPos(xd,yd,0)
#NodePath(mxm).copyTo(ring).setPos(xd,yd-y*(m-1),0)
#NodePath(mxm).copyTo(ring).setPos(xd-x*(m-1),yd,0)
if x==-1:
if y==1:
doGeom(mx3,xd,yd-y*(m+1))
#NodePath(mx3).copyTo(ring).setPos(xd,yd-y*(m+1),0)
else:
xd2=n/2-m
doGeom(mx3,xd2,yd+2*m-2)
#NodePath(mx3).copyTo(ring).setPos(xd2,yd+2*m-2,0)
else:
doGeom(x3xm,xd-x*(m+1),yd)
#NodePath(x3xm).copyTo(ring).setPos(xd-x*(m+1),yd,0)
doCorner(-1,-1)
doCorner(1,-1)
doCorner(-1,1)
doCorner(1,1)
ring.setBoundsType(BoundingVolume.BTBox)
ringCount=4
self.levels=[center]
for i in xrange(ringCount):
cNode=GeomNode('ring'+str(i))
cNode.addGeomsFrom(ring)
'''for c in ring.getChildren():
x=c.copyTo(r)
#v1=Point3()
#v2=Point3()
#x.calcTightBounds(v1,v2)
#v2.setZ(1)
node=x.node()
node.setBoundsType(BoundingVolume.BTBox)
node.setBounds(c.node().getBounds())#(BoundingBox(v1,v2))
node.setFinal(1)
x.showBounds()'''
#r.showBounds()
r=_GeoClipLevel(i+1,self,cNode)
r.reparentTo(self.terrainNode)
r.node().setBoundsType(BoundingVolume.BTBox)
#r.showBounds()
self.levels.append(r)
self.terrainNode.setShaderInput("n",n,0,0,0)
# Add a task to keep updating the terrain
taskMgr.add(self.update, "update")
self.grass=self.setUpGrass(center,n)
grassTex = loadTex("terrain/grassSheet",True)
self.grass.setShaderInput("grassSheet",grassTex)
grassTex.setWrapU(Texture.WMClamp)
grassTex.setWrapV(Texture.WMClamp)
self.terrainNode.setShaderInput("offset",0,0,0,0)
#for r in self.levels:
# for node in r.getChildren():
# node.setShaderInput("offset",node.getX()+halfOffset,node.getY()+halfOffset,0,0)
self.centerTile=None
def setUpGrass(self,node,rez):
# create a mesh thats a bunch of disconnected rectangles, 1 tall, 0.5 wide, at every grid point
format=GeomVertexFormat.getV3()
snode=GeomNode('grass')
grass=NodePath(snode)
grass.reparentTo(node)
grass.setAttrib(CullFaceAttrib.make(CullFaceAttrib.MCullNone))
grass.setShader(loader.loadShader("terrain/geoClipGrass.sha"))
cullmargin=3
def makeGrid(ofx,ofy,xStart,yStart,xEnd,yEnd):
# does not include end values, but does include start ones
vdata=GeomVertexData('grid', format, Geom.UHStatic)
vertex=GeomVertexWriter(vdata, 'vertex')
grid=Geom(vdata)
snode.setBoundsType(BoundingVolume.BTBox)
for x in xrange(xStart,xEnd):
for y in xrange(yStart,yEnd):
xp=x-ofx-.25-1
yp=y-ofy-1
vertex.addData3f(xp,yp,0)
vertex.addData3f(xp+.5,yp,0)
vertex.addData3f(xp,yp,1)
vertex.addData3f(xp+.5,yp,1)
tri=GeomTristrips(Geom.UHStatic)
def index(lx,ly):
return ((ly-yStart)+(lx-xStart)*(yEnd-yStart))*4
for x in xrange(xStart,xEnd):
for y in xrange(yStart,yEnd):
i=index(x,y)
tri.addVertex(i)
tri.addVertex(i+1)
tri.addVertex(i+2)
tri.addVertex(i+3)
tri.closePrimitive()
grid.addPrimitive(tri)
snode.addGeom(grid)
#block=NodePath(snode)
#block.reparentTo(grass)
grid.setBoundsType(BoundingVolume.BTBox)
grid.setBounds(BoundingBox(Point3(xStart-cullmargin-ofx,yStart-cullmargin-ofy,0),Point3(xEnd-1+cullmargin-ofx,yEnd-1+cullmargin-ofy,self.shaderHeightScale+cullmargin)))
#block.node().setFinal(True)
#
#grass.showBounds()
#makeGrid(rez/2,rez/2,0,0,rez,rez)
c=5
for x in xrange(c):
for y in xrange(c):
makeGrid(rez/2,rez/2,x*rez//c,y*rez//c,(x+1)*rez//c,(y+1)*rez//c)
grass.node().setBoundsType(BoundingVolume.BTBox)
#grass.showBounds()
return grass
def height(self,x,y):
if self.centerTile is None: return 0
#print 'y'
tile=self.centerTile
peeker=self.heightPeeker
tx=(x-tile.x)/tile.scale
ty=(y-tile.y)/tile.scale
c=Vec4()
sx=peeker.getXSize()
sy=peeker.getYSize()
px=(sx*tx)
py=(sy*ty)
#u=math.floor(px)/sx
#v=math.floor(py)/sy
fu=px-math.floor(px)
fv=py-math.floor(py)
#u2=math.floor(px+1)/sx
#v2=math.floor(py)/sy
px=math.floor(px)
py=math.floor(py)
#peeker.lookup(c,u,v)
def getH(x,y):
peeker.lookup(c,x/sx,y/sy)
return c.getX()+c.getY()/256+c.getZ()/(256*256)
h=(getH(px+1,py+1)*fu+getH(px,py+1)*(1-fu))*fv+(getH(px+1,py)*fu+getH(px,py)*(1-fu))*(1-fv)
#peeker.filterRect(c,px/sx,py/sy,px/sx,py/sy)
#h=c.getX()+c.getY()/256+c.getZ()/(256*256)
return h*self.heightScale
def update(self,task):
center=self.levels[0]
if center.lastTile:
maps=center.lastTile.renderMaps
t=maps[self.specialMaps['height']].tex
if self.centerTile is not center.lastTile: # new height tex!
self.heightPeeker=t.peek()
self.centerTile=center.lastTile
for i in xrange(len(self.levels),0,-1):
self.levels[i-1].update(self.levels[i] if i<len(self.levels) else None)
return task.cont
#def height(self,x,y): return 0
class _GeoClipLevel(NodePath):
def __init__(self,level,geoClipMapper,node=None):
"""
level starts at 0 in center
scale is 2**level
"""
if node:
NodePath.__init__(self,node)
else:
NodePath.__init__(self,"GeoClipLevel_"+str(level))
self.level=level
self.geoClipMapper=geoClipMapper
self.heightTex=Texture()#loadTex("renderData/textures/grass") # some texture as place holder before map is made.
self.setShaderInput("height",self.heightTex)
scale=2**(level)
self.setScale(scale,scale,1)
self.lastTile=None
self.tileScale=geoClipMapper.baseTileScale*scale
self.makingTile=False
self.setShaderInput("tileOffset",0,0,0,0)
self.setShaderInput("tilePos",0,0,0,0)
def update(self,bigger):
""" bigger is next larger _GeoClipLevel, or None is self is biggest """
# Place me!
s=int(self.getScale().getX())*2
fx=self.geoClipMapper.focus.getX(self.geoClipMapper.terrainNode)
fy=self.geoClipMapper.focus.getY(self.geoClipMapper.terrainNode)
x=int(fx)/s+1
y=int(fy)/s+1
self.setPos(x*s,y*s,0)
# Tex Offset
#node.setShaderInput("texOffset",node.getX()+halfOffset,node.getY()+halfOffset,0,0)
if self.lastTile is not None:
# get dist from center of self.lastTile to focuse
tx=(self.lastTile.x+self.tileScale/2.0)/self.geoClipMapper.terrainNode.getSx()
ty=(self.lastTile.y+self.tileScale/2.0)/self.geoClipMapper.terrainNode.getSy()
dx=self.getX()-tx
dy=self.getY()-ty
# convert dx and dy to current level scale
dx/=self.getSx()
dy/=self.getSy()
# get margin in px between current tile edge and level edge
s=self.geoClipMapper.heightMapRez
mx=s/2-abs(dx)-self.geoClipMapper.rez/2
my=s/2-abs(dy)-self.geoClipMapper.rez/2
ox=dx+s/2
oy=dy+s/2
self.setShaderInput("tileOffset",ox,oy,0,0)
self.setShaderInput("tilePos",self.lastTile.x,self.lastTile.y,self.lastTile.scale,0)
self.setShaderInput("grassData",self.lastTile.renderMaps[self.geoClipMapper.specialMaps['grassData']].tex)
self.setShaderInput("grassData2",self.lastTile.renderMaps[self.geoClipMapper.specialMaps['grassData2']].tex)
m=min(mx,my)
if (not self.makingTile) and (self.lastTile is None or m<2):
self.makingTile=True
x=self.geoClipMapper.focus.getX(self.geoClipMapper)-self.tileScale/2
y=self.geoClipMapper.focus.getY(self.geoClipMapper)-self.tileScale/2
self.geoClipMapper.tileSource.asyncGetTile(x,y,self.tileScale,self.asyncTileDone)
def asyncTileDone(self,tile):
self.lastTile=tile
print "Tile Level: "+str(self.level)
self.makingTile=False
tex=self.lastTile.renderMaps[self.geoClipMapper.specialMaps['height']].tex
tex.setMinfilter(Texture.FTNearest)
tex.setMagfilter(Texture.FTNearest)
self.setShaderInput("height",tex)
|
Astonished by Manpower, Teamwork and Total Lab Automation (TLA) in SNUH!
The Korean Association of Medical Technologists (KAMT) had invited four medical laboratory scientists from Malaysia, the Philippines and Taiwan to the seven days laboratory educational training program in Laboratory of Medicine, Seoul National University Hospital. KAMT has been designated as educational committee of AAMLS for the laboratory training of medical laboratory scientists from developing countries and on-site training educational program, visiting lecturer to your local laboratory. Moreover, KAMT has been keeping its relationship with Taiwan Association of Medical Technologists (TAMT) more than decades under the Memorandum of Understanding (MOU) for better collaboration in field related issues and academic exchanges.
Two participants, Mr. Yun Ted Siaw, Masterskill Global College Kuching, Malaysia and Ms. Anna Karenina Malazo Del Rosario, UHBI-Paranaque Doctor’s Hospital, the Philippines are attending the general laboratory course on the second day of training. This program has been prepared by Quality Improvement (QI) team in the Laboratory of Medicine, Seoul National University Hospital and Seoul Chapter of KAMT after the survey of interests to learn from the applicants through application form which contents the survey box, named “discipline applying for” for the customized training course.
On the last day of the education, Mr. Noh, Gyeong Woon, Vice president of KAMT, presented the certificate of training to four honored trainees from Malaysia, the Philippines and Taiwan. All the expenses for the training was fully supported by KAMT. Airfare was compensated after presenting receipt or invoice of the participants according to the exchange rate on the day of compensation and seven days accommodation in the first grade tourist hotel including hotel buffet breakfast service was also complimentarily provided by KAMT with the appropriate allowance for cost of living in Korea for seven days.
The next laboratory educational training program will be held in one of the biggest commercial laboratory of Korea in September, 2017 and those participants in 2017, will be given the free registration, accommodation and opportunities to share the experiences of training as speakers during the 5th Congress of AAMLS in Busan city, Korea. KAMT would like to invite as many of participants as our international department budget allowed for this splendid unification and membership exchanges from Asian countries and associations of AAMLS. We are looking forward to meeting you here in Busan, Korea this coming 2017. |
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class systemcmdpolicy(base_resource) :
""" Configuration for command policy resource. """
def __init__(self) :
self._policyname = ""
self._action = ""
self._cmdspec = ""
self._builtin = []
self.___count = 0
@property
def policyname(self) :
"""Name for a command policy. Must begin with a letter, number, or the underscore (_) character, and must contain only alphanumeric, hyphen (-), period (.), hash (#), space ( ), at (@), equal (=), colon (:), and underscore characters. Cannot be changed after the policy is created.
CLI Users: If the name includes one or more spaces, enclose the name in double or single quotation marks (for example, "my policy" or 'my policy').<br/>Minimum length = 1.
"""
try :
return self._policyname
except Exception as e:
raise e
@policyname.setter
def policyname(self, policyname) :
"""Name for a command policy. Must begin with a letter, number, or the underscore (_) character, and must contain only alphanumeric, hyphen (-), period (.), hash (#), space ( ), at (@), equal (=), colon (:), and underscore characters. Cannot be changed after the policy is created.
CLI Users: If the name includes one or more spaces, enclose the name in double or single quotation marks (for example, "my policy" or 'my policy').<br/>Minimum length = 1
"""
try :
self._policyname = policyname
except Exception as e:
raise e
@property
def action(self) :
"""Action to perform when a request matches the policy.<br/>Possible values = ALLOW, DENY.
"""
try :
return self._action
except Exception as e:
raise e
@action.setter
def action(self, action) :
"""Action to perform when a request matches the policy.<br/>Possible values = ALLOW, DENY
"""
try :
self._action = action
except Exception as e:
raise e
@property
def cmdspec(self) :
"""Regular expression specifying the data that matches the policy.<br/>Minimum length = 1.
"""
try :
return self._cmdspec
except Exception as e:
raise e
@cmdspec.setter
def cmdspec(self, cmdspec) :
"""Regular expression specifying the data that matches the policy.<br/>Minimum length = 1
"""
try :
self._cmdspec = cmdspec
except Exception as e:
raise e
@property
def builtin(self) :
""".<br/>Possible values = MODIFIABLE, DELETABLE, IMMUTABLE, PARTITION_ALL.
"""
try :
return self._builtin
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(systemcmdpolicy_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.systemcmdpolicy
except Exception as e :
raise e
def _get_object_name(self) :
""" Returns the value of object identifier argument
"""
try :
if (self.policyname) :
return str(self.policyname)
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
""" Use this API to add systemcmdpolicy.
"""
try :
if type(resource) is not list :
addresource = systemcmdpolicy()
addresource.policyname = resource.policyname
addresource.action = resource.action
addresource.cmdspec = resource.cmdspec
return addresource.add_resource(client)
else :
if (resource and len(resource) > 0) :
addresources = [ systemcmdpolicy() for _ in range(len(resource))]
for i in range(len(resource)) :
addresources[i].policyname = resource[i].policyname
addresources[i].action = resource[i].action
addresources[i].cmdspec = resource[i].cmdspec
result = cls.add_bulk_request(client, addresources)
return result
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
""" Use this API to delete systemcmdpolicy.
"""
try :
if type(resource) is not list :
deleteresource = systemcmdpolicy()
if type(resource) != type(deleteresource):
deleteresource.policyname = resource
else :
deleteresource.policyname = resource.policyname
return deleteresource.delete_resource(client)
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
deleteresources = [ systemcmdpolicy() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].policyname = resource[i]
else :
if (resource and len(resource) > 0) :
deleteresources = [ systemcmdpolicy() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].policyname = resource[i].policyname
result = cls.delete_bulk_request(client, deleteresources)
return result
except Exception as e :
raise e
@classmethod
def update(cls, client, resource) :
""" Use this API to update systemcmdpolicy.
"""
try :
if type(resource) is not list :
updateresource = systemcmdpolicy()
updateresource.policyname = resource.policyname
updateresource.action = resource.action
updateresource.cmdspec = resource.cmdspec
return updateresource.update_resource(client)
else :
if (resource and len(resource) > 0) :
updateresources = [ systemcmdpolicy() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i].policyname = resource[i].policyname
updateresources[i].action = resource[i].action
updateresources[i].cmdspec = resource[i].cmdspec
result = cls.update_bulk_request(client, updateresources)
return result
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
""" Use this API to fetch all the systemcmdpolicy resources that are configured on netscaler.
"""
try :
if not name :
obj = systemcmdpolicy()
response = obj.get_resources(client, option_)
else :
if type(name) != cls :
if type(name) is not list :
obj = systemcmdpolicy()
obj.policyname = name
response = obj.get_resource(client, option_)
else :
if name and len(name) > 0 :
response = [systemcmdpolicy() for _ in range(len(name))]
obj = [systemcmdpolicy() for _ in range(len(name))]
for i in range(len(name)) :
obj[i] = systemcmdpolicy()
obj[i].policyname = name[i]
response[i] = obj[i].get_resource(client, option_)
return response
except Exception as e :
raise e
@classmethod
def get_filtered(cls, client, filter_) :
""" Use this API to fetch filtered set of systemcmdpolicy resources.
filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = systemcmdpolicy()
option_ = options()
option_.filter = filter_
response = obj.getfiltered(client, option_)
return response
except Exception as e :
raise e
@classmethod
def count(cls, client) :
""" Use this API to count the systemcmdpolicy resources configured on NetScaler.
"""
try :
obj = systemcmdpolicy()
option_ = options()
option_.count = True
response = obj.get_resources(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
@classmethod
def count_filtered(cls, client, filter_) :
""" Use this API to count filtered the set of systemcmdpolicy resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = systemcmdpolicy()
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
class Builtin:
MODIFIABLE = "MODIFIABLE"
DELETABLE = "DELETABLE"
IMMUTABLE = "IMMUTABLE"
PARTITION_ALL = "PARTITION_ALL"
class Action:
ALLOW = "ALLOW"
DENY = "DENY"
class systemcmdpolicy_response(base_response) :
def __init__(self, length=1) :
self.systemcmdpolicy = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.systemcmdpolicy = [systemcmdpolicy() for _ in range(length)]
|
Digital Johnny, Real Johnny: Thanks for a Great Birthday!
Thanks for a Great Birthday!
I'd just like to thank everyone who came out last night to help me celebrate turning 26 and everyone else who couldn't make it but sent me your best wishes. I had a great time last night seeing so many of my best friends (almost) all at once and seeing what great friends I have. It really means a lot to me and I hope you all had a good time, too. Since I was sick the week before I've lost my voice from shouting over the music, but I don't have a smidge of hangover and feel hopeful and loved going into my late 20's.
Looks like you-all had a great time. Sorry I missed it.
Kristina, your outfit was so hot! Aaron is a lucky guy.
It was a fun party. I love cake. |
# -*- coding: utf-8 -*-
# pylint: disable=missing-docstring, invalid-name
##############################################################################
#
# Copyright (c) 2011, Martín Raúl Villalba
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
##############################################################################
from __future__ import division, absolute_import, print_function, unicode_literals
from threading import Lock
# USB1 driver uses a USB<->Serial bridge
from serial import Serial, SerialException, SerialTimeoutException
# USB2 driver uses direct USB connection. Requires PyUSB
from usb.control import get_interface
from usb.core import USBError, find as findDeviceUSB
from usb.util import (find_descriptor, claim_interface, release_interface,
endpoint_direction, ENDPOINT_OUT, ENDPOINT_IN)
from ant.core.exceptions import DriverError
class Driver(object):
def __init__(self, device, log=None, debug=False):
self.device = device
self.debug = debug
self.log = log
self._lock = Lock()
def open(self):
with self._lock:
if self._opened:
raise DriverError("Could not open device (already open).")
self._open()
if self.log:
self.log.logOpen()
@property
def opened(self):
with self._lock:
return self._opened
def close(self):
with self._lock:
if not self._opened:
raise DriverError("Could not close device (not open).")
self._close()
if self.log:
self.log.logClose()
def read(self, count):
if count <= 0:
raise DriverError("Could not read from device (zero request).")
if not self.opened:
raise DriverError("Could not read from device (not open).")
data = self._read(count)
with self._lock:
if self.log:
self.log.logRead(data)
if self.debug:
self._dump(data, 'READ')
return data
def write(self, data):
if len(data) <= 0:
raise DriverError("Could not write to device (no data).")
if not self.opened:
raise DriverError("Could not write to device (not open).")
ret = self._write(data.encode())
with self._lock:
if self.debug:
self._dump(data, 'WRITE')
if self.log:
self.log.logWrite(data[0:ret])
return ret
@staticmethod
def _dump(data, title):
if len(data) == 0:
return
print("========== [{0}] ==========".format(title))
length = 8
line = 0
while data:
row = data[:length]
data = data[length:]
hex_data = [b'%02X' % ord(byte) for byte in row]
print(b'%04X' % line, b' '.join(hex_data))
print()
@property
def _opened(self):
raise NotImplementedError()
def _open(self):
raise NotImplementedError()
def _close(self):
raise NotImplementedError()
def _read(self, count):
raise NotImplementedError()
def _write(self, data):
raise NotImplementedError()
class USB1Driver(Driver):
def __init__(self, device, baud_rate=115200, log=None, debug=False):
super(USB1Driver, self).__init__(log, debug)
self.device = device
self.baud = baud_rate
self._serial = None
def _open(self):
try:
dev = Serial(self.device, self.baud)
except SerialException as e:
raise DriverError(str(e))
if not dev.isOpen():
raise DriverError("Could not open device")
self._serial = dev
self._serial.timeout = 0.01
@property
def _opened(self):
return self._serial is not None
def _close(self):
self._serial.close()
def _read(self, count):
return self._serial.read(count)
def _write(self, data):
try:
count = self._serial.write(data)
self._serial.flush()
except SerialTimeoutException as e:
raise DriverError(str(e))
return count
class USB2Driver(Driver):
def __init__(self, log=None, debug=False):
super(USB2Driver, self).__init__(log, debug)
self._ep_out = None
self._ep_in = None
self._dev = None
self._int = None
def _open(self):
# Most of this is straight from the PyUSB example documentation
dev = findDeviceUSB(idVendor=0x0fcf, idProduct=0x1008)
if dev is None:
raise DriverError("Could not open device (not found)")
# make sure the kernel driver is not active
if dev.is_kernel_driver_active(0):
try:
dev.detach_kernel_driver(0)
except USBError as e:
exit("could not detach kernel driver: {}".format(e))
dev.set_configuration()
cfg = dev.get_active_configuration()
interface_number = cfg[(0, 0)].bInterfaceNumber
intf = find_descriptor(cfg,
bInterfaceNumber=interface_number,
bAlternateSetting=get_interface(dev, interface_number)
)
claim_interface(dev, interface_number)
ep_out = find_descriptor(intf, custom_match= \
lambda e: endpoint_direction(e.bEndpointAddress) == ENDPOINT_OUT
)
assert ep_out is not None
ep_in = find_descriptor(intf, custom_match= \
lambda e: endpoint_direction(e.bEndpointAddress) == ENDPOINT_IN
)
assert ep_in is not None
self._ep_out = ep_out
self._ep_in = ep_in
self._dev = dev
self._int = interface_number
@property
def _opened(self):
return self._dev is not None
def _close(self):
release_interface(self._dev, self._int)
self._dev = None
def _read(self, count):
return self._ep_in.read(count).tostring()
def _write(self, data):
return self._ep_out.write(data)
|
Larg. 30,3 alt. 9,8 prof. 50,8 cm. |
#!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# Copyright (c) 2014 Mozilla Corporation
#
# This code alerts on every successfully opened session on any of the host from a given list
from lib.alerttask import AlertTask
from mozdef_util.query_models import SearchQuery, TermMatch, PhraseMatch
class WriteAudit(AlertTask):
def main(self):
self.parse_config('write_audit.conf', ['skipprocess', 'expectedusers'])
search_query = SearchQuery(minutes=15)
search_query.add_must([
TermMatch('category', 'write'),
TermMatch('details.auditkey', 'audit'),
])
for processname in self.config.skipprocess.split():
search_query.add_must_not(PhraseMatch('details.processname', processname))
self.filtersManual(search_query)
self.searchEventsAggregated('details.originaluser', samplesLimit=10)
self.walkAggregations(threshold=2)
def onAggregation(self, aggreg):
category = 'write'
severity = 'WARNING'
tags = ['audit']
users = set()
paths = set()
for event in aggreg['events']:
users.add(event['_source']['details']['user'])
paths.add(event['_source']['summary'].split(' ')[1])
summary = '{0} Filesystem write(s) to an auditd path ({1}) by {2} ({3})'.format(
aggreg['count'],
', '.join(paths),
', '.join(users),
aggreg['value']
)
if aggreg['value'] in self.config.expectedusers.split(' '):
severity = 'NOTICE'
hostnames = self.mostCommon(aggreg['allevents'], '_source.hostname')
# did they modify more than one host?
# or just modify an existing configuration more than once?
if len(hostnames) > 1:
for i in hostnames[:5]:
summary += ' on {0} ({1} hosts)'.format(i[0], i[1])
return self.createAlertDict(summary, category, tags, aggreg['events'], severity)
|
Philip Levine/the narrator employs chalk as a metaphor for aging and death, and its blackboard tracings as the tracings of memory. He endows the Senegalese man--probably deranged--with dignity. Rather than avoiding him, as most people would, he stops to listen to him and to reflect on their shared humanity. A beautiful and unusual poem that could be discussed along with Lucille Clifton’s Miss Rosie (in this database annotated by Felice Aull and also by Lois Nixon).
The collection in which this poem appears won the Pulitzer Prize. |
# -*- coding: utf-8 -*-
# Copyright 2011 Florian Ledermann <[email protected]>
#
# This file is part of OpenResources
# https://bitbucket.org/floledermann/openresources/
#
# OpenResources is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OpenResources is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenResources. If not, see <http://www.gnu.org/licenses/>.
from django.conf.urls.defaults import *
from django.conf import settings
from django.views.generic.simple import direct_to_template
from openresources import views
from openresources.models import Tag
# commenting out autocomplete stuff for now, probably needs custom implementation
#from autocomplete.views import autocomplete
#autocomplete.register(
# id = 'keys',
# queryset = Tag.objects.values('key').distinct(),
# fields = ('key',),
# limit = 20,
# key = 'key',
# label = 'key',
#)
view_patterns = patterns('',
url(r'^$', views.view, name='openresources_view'),
url(r'^in/(?P<area>[0-9A-Za-z-_]+)/$', views.view, name='openresources_view'),
url(r'^json/$', views.view_json, name='geojson'),
url(r'^(?P<mode>[0-9A-Za-z-_]+)/$', views.view, name='openresources_view'),
url(r'^in/(?P<area>[0-9A-Za-z-_]+)/(?P<mode>[0-9A-Za-z-_]+)/$', views.view, name='openresources_view'),
)
urlpatterns = patterns('',
url(r'^views/$', views.views, name='openresources_views'),
url(r'^views/new/$', views.edit_view, name='openresources_new_view'),
url(r'^views/edit/(?P<name>[0-9A-Za-z-_]+)/$', views.edit_view, name='openresources_edit_view'),
url(r'^templates/$', views.templates, name='openresources_templates'),
url(r'^templates/new/$', views.edit_template, name='openresources_template_edit'),
url(r'^template/(?P<name>[0-9A-Za-z-_]+)/$', views.edit_template, name='openresources_template_edit'),
# temporary, until resource view support assigned template
url(r'^template-resource/(?P<template>[0-9A-Za-z-_]+)/(?P<resource>[0-9A-Za-z-_]+)/$', views.edit_with_template, name='openresources_edit_with_template'),
url(r'^template-resource/(?P<template>[0-9A-Za-z-_]+)/$', views.edit_with_template, name='openresources_edit_with_template'),
#url(r'^all/$', views.all_resources, name='openresources_all'),
url(r'^tags/$', views.tags, name='openresources_tags'),
# *? matches key non-greedy, matching only as few as possible characters if value has = sign in it
url(r'^tag/(?P<key>.*?)=(?P<value>.*)/$', views.tag, name='openresources_tag'),
url(r'^tag/(?P<key>.*)/$', views.tag, name='openresources_tag_key'),
url(r'^tools/rename_tag/$', views.rename_tag, name='openresources_rename_tag'),
url(r'^icons/$', views.icons, name='openresources_icons'),
url(r'^icons/add/$', views.add_icon, name='openresources_new_icon'),
url(r'^choices.json$', views.resource_choices),
url(r'^tag/(?P<key>.*)/choices.json$', views.tag_choices),
# *? matches key non-greedy, matching only as few as possible characters if value has '=' sign in it
url(r'^with/tag/(?P<key>.*?)=(?P<value>.*)/$', views.resources_by_tag, name='openresources_with_tag'),
url(r'^with/tag/(?P<key>.*)/$', views.resources_by_tag, name='openresources_with_key'),
url(r'^resource/(?P<key>.*)/$', views.resource, name='openresources_resource'),
url(r'^new/$', views.edit_resource, name='openresources_new'),
url(r'^edit/(?P<key>.*)/$', views.edit_resource, name='openresources_edit'),
#url('^autocomplete/(\w+)/$', autocomplete, name='autocomplete'),
url(r'^context/set/$', views.set_context, name='openresources_set_context'),
url(r'^search/$', views.search, name='openresources_search'),
url(r'^credits/$', direct_to_template, {'template': 'openresources/credits.html'}, name='openresources_credits'),
url(r'^json/all/$', views.all_json, name='geojson_all'),
url(r'^', include(view_patterns)),
url(r'^view/(?P<name>[0-9A-Za-z-_]+)/', include(view_patterns)),
# this cannot be reached, as we show the default view as an index page
# however this is used for reversing the index page url in templates
url(r'^$', views.index, name='openresources_index'),
)
|
For the second game in a row, the St. Thomas Tommies pull out a much needed win with a 1 - 0 victory. Tonight on the road over the Mount Allison Mounties. A late third period goal by rookie Kenya Marcelline was the only puck to dent the twine and Kristin Wolfe picked up her second shut out of the season versus the Mounties.
The first and second periods were scoreless affairs, if not for the goalies, definitely because of the goal posts as STU and MTA combined for six doses of iron. Shots after two periods were 13 to 13.
In the final period Mount Allison and St. Thomas traded powerplay chances with one each. In what proved to be a wise decision, Coach Murphy called a late time out with 1:53 to play. He must of said something good because 39 seconds later the Tommies would pot the winner.
Kenya Marcelline would score her third goal of the year, none larger than this one, at 18:43 to lift the Tommies to the one goal victory. Kayla Blackmore was credited with the only assist.
Kristin Wolfe would make the goal stand up as the winner, stopping all 21 shots fired her way. STU shot 22 at MTA's Kate O'Brien, with number 22 (Marcelline's number) being the deadliest.
With the win STU clinches second position in the AUS and will return home for their final home game this Saturday night against Moncton. Come out to the game and help the Tommies support Mental Health Awareness. |
#! /usr/bin/env python
import argparse
import sys
import tensorflow as tf
import netifaces
import dns.resolver
import pieshell
import multiprocessing
import click
FLAGS = None
def run_server(spec, job_name, task_index):
print "Starting server /job:%s/task:%s as %s..." % (job_name, task_index, spec[job_name][task_index])
tf.train.Server(
tf.train.ClusterSpec(spec),
job_name=job_name,
task_index=task_index
).join()
def generate_tasks(servers, base_port):
"""Input: {"server1": ncpus, "server2":ncpus...}
Output: (("server1", port1), ("server1", port2)...("serverN", "portM"))
"""
for server, ncpus in servers:
for cpuidx in xrange(0, ncpus):
yield (server, base_port + cpuidx)
def generate_cluster(servers, base_port, n_ps_tasks):
tasks = ["%s:%s" % (server, port) for server, port in generate_tasks(servers, base_port)]
ps_tasks = tasks[:n_ps_tasks]
worker_tasks = tasks[n_ps_tasks:]
return {'ps': ps_tasks, 'worker': worker_tasks}
def find_local_server_idx(servers):
local_ips = set([netifaces.ifaddresses(iface)[netifaces.AF_INET][0]['addr']
for iface in netifaces.interfaces()
if netifaces.AF_INET in netifaces.ifaddresses(iface)])
local_ips.add("127.0.1.1") # Hack for debian
task_ips = [server[0] for server in servers]
task_ips = [record.address
for ip in task_ips
for record in dns.resolver.query(ip, 'A')]
local_task_ip = iter(local_ips.intersection(set(task_ips))).next()
return task_ips.index(local_task_ip)
def generate_task_indexes(servers, server_idx, n_ps_tasks):
base_task_idx = sum(s[1] for s in servers[:server_idx])
server = servers[server_idx]
for n in xrange(0, server[1]):
task_idx = base_task_idx + n
if task_idx >= n_ps_tasks:
yield "worker", task_idx - n_ps_tasks
else:
yield "ps", task_idx
def servers_to_str(servers):
return ",".join("%s:%s" % s for s in servers)
def str_to_servers(str):
return [(name, int(ncpus)) for name, ncpus in (s.split(":") for s in str.split(","))]
def introspect_cluster(servernames):
return ",".join(pieshell.env.parallel("--no-notice", "--nonall", "--line-buffer", "-S", servernames,
'echo -n "$(hostname):"; cat /proc/cpuinfo | grep "processor" | wc -l'))
def start_cluster(servernames, base_port, n_ps_tasks):
servers = introspect_cluster(servernames)
print pieshell.env.parallel(
'--no-notice', '--nonall', '--line-buffer', '--tag',
'-S', servernames,
'nohup tfprism node run --base_port %s --ps_tasks %s %s < /dev/null > tfprism.log 2>&1 & echo "$!" > /var/run/tfprism.pid; sleep 2' % (
base_port, n_ps_tasks, servers))
def stop_cluster(servernames):
print pieshell.env.parallel(
'--no-notice', '--nonall', '--line-buffer', '--tag',
'-S', servernames,
"kill -KILL $(cat /var/run/tfprism.pid)" % servers)
def run_node(servers, base_port, n_ps_tasks):
servers = str_to_servers(servers)
cluster_spec = generate_cluster(servers, base_port, n_ps_tasks)
procs = [multiprocessing.Process(target=run_server, args=(cluster_spec, job_name, task_index))
for job_name, task_index in generate_task_indexes(servers, find_local_server_idx(servers), n_ps_tasks)]
for proc in procs:
proc.daemon = True
proc.start()
for proc in procs:
proc.join()
@click.group()
def main():
pass
@main.group()
def node():
pass
@node.command()
@click.argument("servers")
@click.option('--base_port', default=5600)
@click.option('--ps_tasks', default=1)
def run(servers, base_port, ps_tasks):
run_node(servers, base_port, ps_tasks)
@main.group()
def cluster():
pass
@cluster.command()
@click.argument("servers")
@click.option('--base_port', default=5600)
@click.option('--ps_tasks', default=1)
def start(servers, base_port, ps_tasks):
start_cluster(servers, base_port, ps_tasks)
@cluster.command()
@click.argument("servers")
def stop(servers):
stop_cluster(servers)
if __name__ == "__main__":
main()
|
Based on popular usage, it is 3.280 times more common for Brendo to be a boy's name.
I am not aware of any deceased famous people named Brendo.
Get the meaning of the name Brendo.
When naming your baby Brendo, it's important to consider the gender of the name itself. When people look at the name Brendo, they might ask the question, "is Brendo a man or a woman?", or "what is the gender of the name Brendo?" Some names are more gender neutral than others, and some names are more strongly associated with either males or females. Some spelling variations of the name Brendo might be more popular than others. The Baby Name Guesser can answer all these questions about the name Brendo. To find out more about Brendo, Baby Name Guesser uses up-to-date data from across the Internet on how the name Brendo is actually used. |
# -*- coding: UTF-8 -*-
"""
@author: [email protected]
@time: 8/10/17
@desc: virtual card route
"""
import json
from flask import Blueprint
from flask import jsonify
from flask import request
from peewee import DoesNotExist
from flask_jwt_extended import jwt_required, get_jwt_identity
from playhouse.shortcuts import model_to_dict
from server.service import virtual_card_service
from server.utility.json_utility import models_to_json, custom_models_to_json
from server.utility.exception import *
from server.service import wx_payment_service
from server.database.model import User
from server.database.model import VirtualCard
from server.utility.constant.basic_constant import \
WxPaymentBody, WxPaymentAttach
PREFIX = '/virtual_card'
virtual_card_app = Blueprint("virtual_card", __name__, url_prefix=PREFIX)
# ***************************** 虚拟卡 ***************************** #
# 获取虚拟卡
@virtual_card_app.route('', methods=['GET'])
@jwt_required
def get_virtual_card():
username = get_jwt_identity()
# username = request.args.get("username")
try:
virtual_card = virtual_card_service.get_virtual_card(
card_no=username
)
virtual_card = model_to_dict(virtual_card, recurse=False)
return jsonify({'response': virtual_card}), 200
except DoesNotExist as e:
return jsonify({
'response': {
'error': e.args,
'message': '未开通虚拟消费卡'
}
}), 400
# ***************************** 押金 ***************************** #
# # 获取押金数额
# @virtual_card_app.route('/deposit', methods=['GET'])
# def get_deposit():
# """
# check if the card deposited
# :param card_no: card number
# :return: True of False
# """
# username = request.args.get("username")
# try:
# deposit = virtual_card_service.get_deposit(
# card_no=username
# )
# return jsonify({'response': deposit}), 200
#
# except DoesNotExist as e:
# return jsonify({
# 'response': {
# 'error': e.args,
# 'message': '未开通虚拟消费卡'
# }
# })
# 支付押金
@virtual_card_app.route('/deposit', methods=['POST'])
@jwt_required
def pay_deposit():
"""
pay deposit
eg = {
# "card_no": "bingwei",
# "deposit_fee": 199
}
:return:
"""
username = get_jwt_identity()
data = request.get_json()
openid = data.get("openid")
# 如果没有openid传入,则从用户信息中获取
if not openid:
user = User.get(username=username)
openid = user.we_chat_id
try:
deposit_fee = virtual_card_service.pre_pay_deposit(
card_no=username,
)
# 生成预付订单
result = wx_payment_service.get_prepay_id_json(
openid=openid,
body=WxPaymentBody.DEPOSIT,
total_fee=deposit_fee * 100,
attach={
"code": WxPaymentAttach.DEPOSIT
}
)
return jsonify({
'response': result
}), 200
except Error as e:
return jsonify({
'response': {
'error': e.args,
'message': '%s' % e.args
}
}), 400
# 退还押金
@virtual_card_app.route('/deposit/return_deposit', methods=['POST'])
@jwt_required
def return_deposit():
"""
eg = {
# "comment": "test",
}
return deposit
:return:
"""
username = get_jwt_identity()
try:
result, record, refund_record = \
virtual_card_service.return_deposit(
card_no=username
)
return jsonify({
'response': {
"result": result,
"record": model_to_dict(record, recurse=False),
"refund_record": model_to_dict(refund_record, recurse=False)
}}), 200
except Error as e:
return jsonify({
'response': {
'error': e.args,
'message': '%s' % e.args
}
}), 400
# ***************************** 余额 ***************************** #
# # 获取余额
# @virtual_card_app.route('/balance', methods=['GET'])
# def get_card_balance():
# """
# get card balance
#
# :return: balance
# """
# username = request.args.get("username")
# try:
# balance = virtual_card_service.get_card_balance(
# card_no=username
# )
# return jsonify({
# 'response': {
# 'balance': balance,
# }
# })
# except DoesNotExist as e:
# return jsonify({
# 'response': {
# 'error': e.args,
# 'message': '未开通虚拟消费卡'
# }
# })
# # 消费余额
# @virtual_card.route('/balance/consume', methods=['POST'])
# def consume_virtual_card():
# """
# consume virtual card
#
# eg = {
# "username": "bingwei",
# "amount": 120
# }
# :return:
# """
# data = request.get_json()
# result, record = virtual_card_service.consume_virtual_card(
# card_no=data["username"],
# amount=data["amount"],
# )
# return jsonify({'response': {
# "result": result,
# "record": model_to_dict(record)
# }}), 200
# 充值
@virtual_card_app.route('/balance/top_up', methods=['POST'])
@jwt_required
def pre_top_up():
"""
generate top up prepay
top up virtual card
eg = {
"top_up_fee": 120,
"openid": "",
}
:return:
:rtype:
"""
username = get_jwt_identity()
data = request.get_json()
openid = data.get("openid")
# 如果没有openid传入,则从用户信息中获取
try:
top_up_fee = float(data["top_up_fee"])
except ValueError as e:
return jsonify({
'response': {
'error': e.args,
'message': "金额不是数字"
}
}), 400
if not openid:
user = User.get(username=username)
openid = user.we_chat_id
try:
# check
virtual_card_service.pre_top_up(
card_no=username,
)
# 生成预付订单
result = wx_payment_service.get_prepay_id_json(
openid=openid,
body=WxPaymentBody.BALANCE,
total_fee=top_up_fee * 100,
attach={
"code": WxPaymentAttach.BALANCE
}
)
return jsonify({
'response': result
}), 200
except Error as e:
return jsonify({
'response': {
'error': e.args,
'message': '%s' % e.args
}
}), 400
# ***************************** 消费记录 ***************************** #
# 获取消费记录
@virtual_card_app.route('/consume_record', methods=['GET'])
@jwt_required
def get_consume_record():
"""
get consume records
:param card_no: card number
:return: consume records
"""
username = get_jwt_identity()
# username = request.args.get("username")
record = virtual_card_service.get_consume_record(
card_no=username
)
new_records = custom_models_to_json(record, [
"consume_date_time",
"consume_event",
"consume_fee",
"id"
])
if record:
return jsonify({'response': new_records}), 200
else:
return jsonify({'response': 'No record found'}), 404
|
Is your succulent looking different? Is it growing tall and thin, leggy, and all stretched out?
If this is the case, then your succulent suffers from etiolation. This means that your succulent is growing in insufficient light.
Sad to say, once the damage is done, it can’t be undone. But it can recover. And you can end up with more plants in the process. Win!
Let’s take a closer look at this stretched Crassula perforata. Find out why this happened, and what to do to fix it.
Why do Succulents Stretch out?
Succulents etiolate or stretch, when they have a lack of sunlight. This can happen to succulents kept outdoors in a shady spot but is most often found in succulents kept inside our homes.
When a succulent does not have enough light it will try to get closer to where ever the light comes from. It will change the way it grows to get to as much light as possible.
All the plants’ energy should go into being the best plant it can be but instead goes into searching for light and trying to survive.
Your succulent will show you when it doesn’t get enough light. Here’s what to look out for.
It will start subtle, the leaves will start to point down. Plants curl leaves to give themselves a larger leaf area for the light to hit them.
Keep an eye out for this. If you catch it early enough you can move the plant closer to the window, or maybe spoil it with a grow light. These special lights used to be quite large and expensive, but there are currently great smaller and more affordable grow lights available.
If the succulent is not moved towards the light, it will slowly start leaning toward the light. In its quest for light, the succulent isn’t using its energy to grow new leaves but uses its energy to grow faster.
What you will see is your succulent stretching and growing taller with wide gaps between the leaves. Again, this is another way the plant tries to get more light.
The unfortunate thing is that when your succulent is stretched, you can’t undo this.
When you notice the first signs of your succulent stretching, obviously provide it with more light. Find the brightest and sunniest window in your house for your succulent. The stretched part of the plant won’t un-stretch, but new growth will be more closely stacked together.
What we will do is give the plant a new start by cutting it down and propagating its parts. This takes some drastic measures, but in time will leave you with multiple new succulents.
Start by cutting off the top of your succulent, the crown. Make your cut on the bottom of the first etiolated stem part. This way you have the whole crown and enough stem on the cutting to plant it later.
Next, we’re taking stem cuttings. Cut off the stem with at least two leaf pairings on it. Gently strip off the bottom leaf. This will give you the stem cutting with at least one leaf on top, and a leaf without a stem. Both can be propagated.
Repeat taking stem cuttings. How many cuttings you take depends on how tall your succulent is, and how far down you want to prune.
I cut this crassula stem all the way down, making sure to have at least one leaf pair remaining. Placed in a sunny spot it will continue to grow.
Let the cuttings dry out for one or two days.
Place your cuttings on a layer of well-draining potting soil, or mix some sand and or perlite in with the regular potting mix. Stick the stems of the crown and stem cuttings into the soil. The loose leaves can be placed right on top of the soil.
Now the only thing left to do is wait.
The stem parts will grow new roots and start to grow new crowns. Most likely more than one.
The loose leaf pairs will take a lot longer to root, but with patience, these too can produce new crowns and grow.
When your cuttings turn into new plants, take care of them as you normally would, just make sure to give them plenty of bright indirect light!
Want your own non-stretched Crassula Succulent to take care of? You can get them delivered through Amazon or Etsy!
About three weeks later the succulent crown has rooted nicely and is showing new growth. The stem cuttings have rooted as well and are all growing two new crowns. Still waiting for the leaves to root. Patience is a virtue.
What I do know is that what once was a leggy, stretched out succulent, is now transforming itself into a new full plant.
How to fix a stretched Echeveria?
Now, what about if you have a stretched out Echeveria succulent?
It’s simple and very similar to fixing the Crassula above. I got this Echeveria setosa that has stretched a bit and has some sunburnt bits.
It is not living its best life as it is. Let’s fix it.
Start by beheading the crown of your leggy Echeveria.
Don’t feel bad. You are not hurting your plant. You are helping it.
Make sure the crown has a piece of bare stem that you can plant in soil, so you might have to carefully remove a few bottom leaves.
Remove all the leaves still on the remaining stem.
Now you have the crown, a bunch of loose leaves, and a little stem stump. All these can be propagated into healthy new plants.
Let the crown cutting and leaves callous over. Next, you can plant the crown in the soil, and then lay the leaves out on well-draining soil.
The crown will root and continue to grow, the leaves will start growing roots and new plants can grow from the cut-off part of the leaf. The stump will take some time but will grow new little baby plants around the sides of the stump.
Let’s fix all those leggy, etiolated stretched out succulents and make more plants!
If you want to read more about Succulent care, read How to Grow Succulents Indoors.
Want your own Echeveria Succulent to take care of? You can get them delivered through Amazon or Etsy!
As soon as you see new growth, it means your propagated cuttings have rooted. From that moment you can water as normal. Water succulents only when the soil is very dry. You can read some more about how to water your succulents and care for them here.
Good luck with your succulents! |
# encoding: UTF-8
'''
本文件中实现了CTA策略引擎,针对CTA类型的策略,抽象简化了部分底层接口的功能。
关于平今和平昨规则:
1. 普通的平仓OFFSET_CLOSET等于平昨OFFSET_CLOSEYESTERDAY
2. 只有上期所的品种需要考虑平今和平昨的区别
3. 当上期所的期货有今仓时,调用Sell和Cover会使用OFFSET_CLOSETODAY,否则
会使用OFFSET_CLOSE
4. 以上设计意味着如果Sell和Cover的数量超过今日持仓量时,会导致出错(即用户
希望通过一个指令同时平今和平昨)
5. 采用以上设计的原因是考虑到vn.trader的用户主要是对TB、MC和金字塔类的平台
感到功能不足的用户(即希望更高频的交易),交易策略不应该出现4中所述的情况
6. 对于想要实现4中所述情况的用户,需要实现一个策略信号引擎和交易委托引擎分开
的定制化统结构(没错,得自己写)
'''
from __future__ import division
import json
import os
import traceback
from collections import OrderedDict
from datetime import datetime, timedelta
from vnpy.event import Event
from vnpy.trader.vtEvent import *
from vnpy.trader.vtConstant import *
from vnpy.trader.vtObject import VtTickData, VtBarData
from vnpy.trader.vtGateway import VtSubscribeReq, VtOrderReq, VtCancelOrderReq, VtLogData
from vnpy.trader.vtFunction import todayDate
from vnpy.trader.app.ctaStrategy.ctaBase import *
from vnpy.trader.app.ctaStrategy.strategy import STRATEGY_CLASS
########################################################################
class CtaEngine(object):
"""CTA策略引擎"""
settingFileName = 'CTA_setting.json'
path = os.path.abspath(os.path.dirname(__file__))
settingFileName = os.path.join(path, settingFileName)
#----------------------------------------------------------------------
def __init__(self, mainEngine, eventEngine):
"""Constructor"""
self.mainEngine = mainEngine
self.eventEngine = eventEngine
# 当前日期
self.today = todayDate()
# 保存策略实例的字典
# key为策略名称,value为策略实例,注意策略名称不允许重复
self.strategyDict = {}
# 保存vtSymbol和策略实例映射的字典(用于推送tick数据)
# 由于可能多个strategy交易同一个vtSymbol,因此key为vtSymbol
# value为包含所有相关strategy对象的list
self.tickStrategyDict = {}
# 保存vtOrderID和strategy对象映射的字典(用于推送order和trade数据)
# key为vtOrderID,value为strategy对象
self.orderStrategyDict = {}
# 本地停止单编号计数
self.stopOrderCount = 0
# stopOrderID = STOPORDERPREFIX + str(stopOrderCount)
# 本地停止单字典
# key为stopOrderID,value为stopOrder对象
self.stopOrderDict = {} # 停止单撤销后不会从本字典中删除
self.workingStopOrderDict = {} # 停止单撤销后会从本字典中删除
# 持仓缓存字典
# key为vtSymbol,value为PositionBuffer对象
self.posBufferDict = {}
# 成交号集合,用来过滤已经收到过的成交推送
self.tradeSet = set()
# 引擎类型为实盘
self.engineType = ENGINETYPE_TRADING
# 注册事件监听
self.registerEvent()
#----------------------------------------------------------------------
def sendOrder(self, vtSymbol, orderType, price, volume, strategy):
"""发单"""
contract = self.mainEngine.getContract(vtSymbol)
req = VtOrderReq()
req.symbol = contract.symbol
req.exchange = contract.exchange
req.price = self.roundToPriceTick(contract.priceTick, price)
req.volume = volume
req.productClass = strategy.productClass
req.currency = strategy.currency
# 设计为CTA引擎发出的委托只允许使用限价单
req.priceType = PRICETYPE_LIMITPRICE
# CTA委托类型映射
if orderType == CTAORDER_BUY:
req.direction = DIRECTION_LONG
req.offset = OFFSET_OPEN
elif orderType == CTAORDER_SELL:
req.direction = DIRECTION_SHORT
# 只有上期所才要考虑平今平昨
if contract.exchange != EXCHANGE_SHFE:
req.offset = OFFSET_CLOSE
else:
# 获取持仓缓存数据
posBuffer = self.posBufferDict.get(vtSymbol, None)
# 如果获取持仓缓存失败,则默认平昨
if not posBuffer:
req.offset = OFFSET_CLOSE
# 否则如果有多头今仓,则使用平今
elif posBuffer.longToday:
req.offset= OFFSET_CLOSETODAY
# 其他情况使用平昨
else:
req.offset = OFFSET_CLOSE
elif orderType == CTAORDER_SHORT:
req.direction = DIRECTION_SHORT
req.offset = OFFSET_OPEN
elif orderType == CTAORDER_COVER:
req.direction = DIRECTION_LONG
# 只有上期所才要考虑平今平昨
if contract.exchange != EXCHANGE_SHFE:
req.offset = OFFSET_CLOSE
else:
# 获取持仓缓存数据
posBuffer = self.posBufferDict.get(vtSymbol, None)
# 如果获取持仓缓存失败,则默认平昨
if not posBuffer:
req.offset = OFFSET_CLOSE
# 否则如果有空头今仓,则使用平今
elif posBuffer.shortToday:
req.offset= OFFSET_CLOSETODAY
# 其他情况使用平昨
else:
req.offset = OFFSET_CLOSE
vtOrderID = self.mainEngine.sendOrder(req, contract.gatewayName) # 发单
self.orderStrategyDict[vtOrderID] = strategy # 保存vtOrderID和策略的映射关系
self.writeCtaLog(u'策略%s发送委托,%s,%s,%s@%s'
%(strategy.name, vtSymbol, req.direction, volume, price))
return vtOrderID
#----------------------------------------------------------------------
def cancelOrder(self, vtOrderID):
"""撤单"""
# 查询报单对象
order = self.mainEngine.getOrder(vtOrderID)
# 如果查询成功
if order:
# 检查是否报单还有效,只有有效时才发出撤单指令
orderFinished = (order.status==STATUS_ALLTRADED or order.status==STATUS_CANCELLED)
if not orderFinished:
req = VtCancelOrderReq()
req.symbol = order.symbol
req.exchange = order.exchange
req.frontID = order.frontID
req.sessionID = order.sessionID
req.orderID = order.orderID
self.mainEngine.cancelOrder(req, order.gatewayName)
#----------------------------------------------------------------------
def sendStopOrder(self, vtSymbol, orderType, price, volume, strategy):
"""发停止单(本地实现)"""
self.stopOrderCount += 1
stopOrderID = STOPORDERPREFIX + str(self.stopOrderCount)
so = StopOrder()
so.vtSymbol = vtSymbol
so.orderType = orderType
so.price = price
so.volume = volume
so.strategy = strategy
so.stopOrderID = stopOrderID
so.status = STOPORDER_WAITING
if orderType == CTAORDER_BUY:
so.direction = DIRECTION_LONG
so.offset = OFFSET_OPEN
elif orderType == CTAORDER_SELL:
so.direction = DIRECTION_SHORT
so.offset = OFFSET_CLOSE
elif orderType == CTAORDER_SHORT:
so.direction = DIRECTION_SHORT
so.offset = OFFSET_OPEN
elif orderType == CTAORDER_COVER:
so.direction = DIRECTION_LONG
so.offset = OFFSET_CLOSE
# 保存stopOrder对象到字典中
self.stopOrderDict[stopOrderID] = so
self.workingStopOrderDict[stopOrderID] = so
return stopOrderID
#----------------------------------------------------------------------
def cancelStopOrder(self, stopOrderID):
"""撤销停止单"""
# 检查停止单是否存在
if stopOrderID in self.workingStopOrderDict:
so = self.workingStopOrderDict[stopOrderID]
so.status = STOPORDER_CANCELLED
del self.workingStopOrderDict[stopOrderID]
#----------------------------------------------------------------------
def processStopOrder(self, tick):
"""收到行情后处理本地停止单(检查是否要立即发出)"""
vtSymbol = tick.vtSymbol
# 首先检查是否有策略交易该合约
if vtSymbol in self.tickStrategyDict:
# 遍历等待中的停止单,检查是否会被触发
for so in self.workingStopOrderDict.values():
if so.vtSymbol == vtSymbol:
longTriggered = so.direction==DIRECTION_LONG and tick.lastPrice>=so.price # 多头停止单被触发
shortTriggered = so.direction==DIRECTION_SHORT and tick.lastPrice<=so.price # 空头停止单被触发
if longTriggered or shortTriggered:
# 买入和卖出分别以涨停跌停价发单(模拟市价单)
if so.direction==DIRECTION_LONG:
price = tick.upperLimit
else:
price = tick.lowerLimit
so.status = STOPORDER_TRIGGERED
self.sendOrder(so.vtSymbol, so.orderType, price, so.volume, so.strategy)
del self.workingStopOrderDict[so.stopOrderID]
#----------------------------------------------------------------------
def processTickEvent(self, event):
"""处理行情推送"""
tick = event.dict_['data']
# 收到tick行情后,先处理本地停止单(检查是否要立即发出)
self.processStopOrder(tick)
# 推送tick到对应的策略实例进行处理
if tick.vtSymbol in self.tickStrategyDict:
# 添加datetime字段
if not tick.datetime:
tick.datetime = datetime.strptime(' '.join([tick.date, tick.time]), '%Y%m%d %H:%M:%S.%f')
# 逐个推送到策略实例中
l = self.tickStrategyDict[tick.vtSymbol]
for strategy in l:
self.callStrategyFunc(strategy, strategy.onTick, tick)
#----------------------------------------------------------------------
def processOrderEvent(self, event):
"""处理委托推送"""
order = event.dict_['data']
if order.vtOrderID in self.orderStrategyDict:
strategy = self.orderStrategyDict[order.vtOrderID]
self.callStrategyFunc(strategy, strategy.onOrder, order)
#----------------------------------------------------------------------
def processTradeEvent(self, event):
"""处理成交推送"""
trade = event.dict_['data']
# 过滤已经收到过的成交回报
if trade.vtTradeID in self.tradeSet:
return
self.tradeSet.add(trade.vtTradeID)
# 将成交推送到策略对象中
if trade.vtOrderID in self.orderStrategyDict:
strategy = self.orderStrategyDict[trade.vtOrderID]
# 计算策略持仓
if trade.direction == DIRECTION_LONG:
strategy.pos += trade.volume
else:
strategy.pos -= trade.volume
self.callStrategyFunc(strategy, strategy.onTrade, trade)
# 更新持仓缓存数据
if trade.vtSymbol in self.tickStrategyDict:
posBuffer = self.posBufferDict.get(trade.vtSymbol, None)
if not posBuffer:
posBuffer = PositionBuffer()
posBuffer.vtSymbol = trade.vtSymbol
self.posBufferDict[trade.vtSymbol] = posBuffer
posBuffer.updateTradeData(trade)
#----------------------------------------------------------------------
def processPositionEvent(self, event):
"""处理持仓推送"""
pos = event.dict_['data']
# 更新持仓缓存数据
if pos.vtSymbol in self.tickStrategyDict:
posBuffer = self.posBufferDict.get(pos.vtSymbol, None)
if not posBuffer:
posBuffer = PositionBuffer()
posBuffer.vtSymbol = pos.vtSymbol
self.posBufferDict[pos.vtSymbol] = posBuffer
posBuffer.updatePositionData(pos)
#----------------------------------------------------------------------
def registerEvent(self):
"""注册事件监听"""
self.eventEngine.register(EVENT_TICK, self.processTickEvent)
self.eventEngine.register(EVENT_ORDER, self.processOrderEvent)
self.eventEngine.register(EVENT_TRADE, self.processTradeEvent)
self.eventEngine.register(EVENT_POSITION, self.processPositionEvent)
#----------------------------------------------------------------------
def insertData(self, dbName, collectionName, data):
"""插入数据到数据库(这里的data可以是VtTickData或者VtBarData)"""
self.mainEngine.dbInsert(dbName, collectionName, data.__dict__)
#----------------------------------------------------------------------
def loadBar(self, dbName, collectionName, days):
"""从数据库中读取Bar数据,startDate是datetime对象"""
startDate = self.today - timedelta(days)
d = {'datetime':{'$gte':startDate}}
barData = self.mainEngine.dbQuery(dbName, collectionName, d)
l = []
for d in barData:
bar = VtBarData()
bar.__dict__ = d
l.append(bar)
return l
#----------------------------------------------------------------------
def loadTick(self, dbName, collectionName, days):
"""从数据库中读取Tick数据,startDate是datetime对象"""
startDate = self.today - timedelta(days)
d = {'datetime':{'$gte':startDate}}
tickData = self.mainEngine.dbQuery(dbName, collectionName, d)
l = []
for d in tickData:
tick = VtTickData()
tick.__dict__ = d
l.append(tick)
return l
#----------------------------------------------------------------------
def writeCtaLog(self, content):
"""快速发出CTA模块日志事件"""
log = VtLogData()
log.logContent = content
event = Event(type_=EVENT_CTA_LOG)
event.dict_['data'] = log
self.eventEngine.put(event)
#----------------------------------------------------------------------
def loadStrategy(self, setting):
"""载入策略"""
try:
name = setting['name']
className = setting['className']
except Exception, e:
self.writeCtaLog(u'载入策略出错:%s' %e)
return
# 获取策略类
strategyClass = STRATEGY_CLASS.get(className, None)
if not strategyClass:
self.writeCtaLog(u'找不到策略类:%s' %className)
return
# 防止策略重名
if name in self.strategyDict:
self.writeCtaLog(u'策略实例重名:%s' %name)
else:
# 创建策略实例
strategy = strategyClass(self, setting)
self.strategyDict[name] = strategy
# 保存Tick映射关系
if strategy.vtSymbol in self.tickStrategyDict:
l = self.tickStrategyDict[strategy.vtSymbol]
else:
l = []
self.tickStrategyDict[strategy.vtSymbol] = l
l.append(strategy)
# 订阅合约
contract = self.mainEngine.getContract(strategy.vtSymbol)
if contract:
req = VtSubscribeReq()
req.symbol = contract.symbol
req.exchange = contract.exchange
# 对于IB接口订阅行情时所需的货币和产品类型,从策略属性中获取
req.currency = strategy.currency
req.productClass = strategy.productClass
self.mainEngine.subscribe(req, contract.gatewayName)
else:
self.writeCtaLog(u'%s的交易合约%s无法找到' %(name, strategy.vtSymbol))
#----------------------------------------------------------------------
def initStrategy(self, name):
"""初始化策略"""
if name in self.strategyDict:
strategy = self.strategyDict[name]
if not strategy.inited:
strategy.inited = True
self.callStrategyFunc(strategy, strategy.onInit)
else:
self.writeCtaLog(u'请勿重复初始化策略实例:%s' %name)
else:
self.writeCtaLog(u'策略实例不存在:%s' %name)
#---------------------------------------------------------------------
def startStrategy(self, name):
"""启动策略"""
if name in self.strategyDict:
strategy = self.strategyDict[name]
if strategy.inited and not strategy.trading:
strategy.trading = True
self.callStrategyFunc(strategy, strategy.onStart)
else:
self.writeCtaLog(u'策略实例不存在:%s' %name)
#----------------------------------------------------------------------
def stopStrategy(self, name):
"""停止策略"""
if name in self.strategyDict:
strategy = self.strategyDict[name]
if strategy.trading:
strategy.trading = False
self.callStrategyFunc(strategy, strategy.onStop)
# 对该策略发出的所有限价单进行撤单
for vtOrderID, s in self.orderStrategyDict.items():
if s is strategy:
self.cancelOrder(vtOrderID)
# 对该策略发出的所有本地停止单撤单
for stopOrderID, so in self.workingStopOrderDict.items():
if so.strategy is strategy:
self.cancelStopOrder(stopOrderID)
else:
self.writeCtaLog(u'策略实例不存在:%s' %name)
#----------------------------------------------------------------------
def saveSetting(self):
"""保存策略配置"""
with open(self.settingFileName, 'w') as f:
l = []
for strategy in self.strategyDict.values():
setting = {}
for param in strategy.paramList:
setting[param] = strategy.__getattribute__(param)
l.append(setting)
jsonL = json.dumps(l, indent=4)
f.write(jsonL)
#----------------------------------------------------------------------
def loadSetting(self):
"""读取策略配置"""
with open(self.settingFileName) as f:
l = json.load(f)
for setting in l:
self.loadStrategy(setting)
self.loadPosition()
#----------------------------------------------------------------------
def getStrategyVar(self, name):
"""获取策略当前的变量字典"""
if name in self.strategyDict:
strategy = self.strategyDict[name]
varDict = OrderedDict()
for key in strategy.varList:
varDict[key] = strategy.__getattribute__(key)
return varDict
else:
self.writeCtaLog(u'策略实例不存在:' + name)
return None
#----------------------------------------------------------------------
def getStrategyParam(self, name):
"""获取策略的参数字典"""
if name in self.strategyDict:
strategy = self.strategyDict[name]
paramDict = OrderedDict()
for key in strategy.paramList:
paramDict[key] = strategy.__getattribute__(key)
return paramDict
else:
self.writeCtaLog(u'策略实例不存在:' + name)
return None
#----------------------------------------------------------------------
def putStrategyEvent(self, name):
"""触发策略状态变化事件(通常用于通知GUI更新)"""
event = Event(EVENT_CTA_STRATEGY+name)
self.eventEngine.put(event)
#----------------------------------------------------------------------
def callStrategyFunc(self, strategy, func, params=None):
"""调用策略的函数,若触发异常则捕捉"""
try:
if params:
func(params)
else:
func()
except Exception:
# 停止策略,修改状态为未初始化
strategy.trading = False
strategy.inited = False
# 发出日志
content = '\n'.join([u'策略%s触发异常已停止' %strategy.name,
traceback.format_exc()])
self.writeCtaLog(content)
#----------------------------------------------------------------------
def savePosition(self):
"""保存所有策略的持仓情况到数据库"""
for strategy in self.strategyDict.values():
flt = {'name': strategy.name,
'vtSymbol': strategy.vtSymbol}
d = {'name': strategy.name,
'vtSymbol': strategy.vtSymbol,
'pos': strategy.pos}
self.mainEngine.dbUpdate(POSITION_DB_NAME, strategy.className,
d, flt, True)
content = '策略%s持仓保存成功' %strategy.name
self.writeCtaLog(content)
#----------------------------------------------------------------------
def loadPosition(self):
"""从数据库载入策略的持仓情况"""
for strategy in self.strategyDict.values():
flt = {'name': strategy.name,
'vtSymbol': strategy.vtSymbol}
posData = self.mainEngine.dbQuery(POSITION_DB_NAME, strategy.className, flt)
for d in posData:
strategy.pos = d['pos']
#----------------------------------------------------------------------
def roundToPriceTick(self, priceTick, price):
"""取整价格到合约最小价格变动"""
if not priceTick:
return price
newPrice = round(price/priceTick, 0) * priceTick
return newPrice
#----------------------------------------------------------------------
def stop(self):
"""停止"""
pass
########################################################################
class PositionBuffer(object):
"""持仓缓存信息(本地维护的持仓数据)"""
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
self.vtSymbol = EMPTY_STRING
# 多头
self.longPosition = EMPTY_INT
self.longToday = EMPTY_INT
self.longYd = EMPTY_INT
# 空头
self.shortPosition = EMPTY_INT
self.shortToday = EMPTY_INT
self.shortYd = EMPTY_INT
#----------------------------------------------------------------------
def updatePositionData(self, pos):
"""更新持仓数据"""
if pos.direction == DIRECTION_LONG:
self.longPosition = pos.position
self.longYd = pos.ydPosition
self.longToday = self.longPosition - self.longYd
else:
self.shortPosition = pos.position
self.shortYd = pos.ydPosition
self.shortToday = self.shortPosition - self.shortYd
#----------------------------------------------------------------------
def updateTradeData(self, trade):
"""更新成交数据"""
if trade.direction == DIRECTION_LONG:
# 多方开仓,则对应多头的持仓和今仓增加
if trade.offset == OFFSET_OPEN:
self.longPosition += trade.volume
self.longToday += trade.volume
# 多方平今,对应空头的持仓和今仓减少
elif trade.offset == OFFSET_CLOSETODAY:
self.shortPosition -= trade.volume
self.shortToday -= trade.volume
# 多方平昨,对应空头的持仓和昨仓减少
else:
self.shortPosition -= trade.volume
self.shortYd -= trade.volume
else:
# 空头和多头相同
if trade.offset == OFFSET_OPEN:
self.shortPosition += trade.volume
self.shortToday += trade.volume
elif trade.offset == OFFSET_CLOSETODAY:
self.longPosition -= trade.volume
self.longToday -= trade.volume
else:
self.longPosition -= trade.volume
self.longYd -= trade.volume
|
It was no surprise, after its tremendously successful first season, when Netflix's breakout hit Stranger Things was renewed for a second season--one set to come out on Halloween. The good news doesn't stop there, though: Show creators the Duffer brothers just confirmed that a third season has been confirmed, and that they're looking to end the show with a fourth season (via io9).
In a new interview with Vulture, Matt and Ross Duffer said that the show has been approved for a third season, and added that the fourth season would be an ideal place to stop. “We’re thinking it will be a four-season thing and then out,” Ross said. Matt added that at that point, the gang will be approaching college age, so their exploits will have to be different: "We just have to keep adjusting the story. Though I don’t know if we can justify something bad happening to them once a year."
It's basically impossible to predict where the show is going to go in its fourth season considering the second season hasn't even premiered yet, but one thing seems certain: For the sake of the kids, they probably shouldn't stay in Hawkins, Indiana for all that much longer: "They’re going to have to get the f--k out of this town!" Ross said. "It’s ridiculous!"
What do you think's going to happen in the second season? What is Sean Astin's role going to be? Is Will going to be this season's villain? Let us know what you think in the comments below!
What else is going on with Stranger Things?
Nerdist News gets the scoop on season two from Stranger Things director Shawn Levy!
Wait a minute -- Is Will the villain this season? |
from __future__ import unicode_literals
import json
import os
import redis
import string
import time
from datetime import datetime
from django.conf import settings
from django.contrib.auth.models import User, Group
from django.core.urlresolvers import reverse
from django.db import connection
from django.test import LiveServerTestCase
from django.utils import timezone
from djorm_hstore.models import register_hstore_handler
from smartmin.tests import SmartminTest
from temba.contacts.models import Contact, ContactGroup, TEL_SCHEME, TWITTER_SCHEME
from temba.orgs.models import Org
from temba.channels.models import Channel
from temba.locations.models import AdminBoundary
from temba.flows.models import Flow
from temba.msgs.models import Msg, INCOMING
from temba.utils import dict_to_struct
def unix_time(dt):
epoch = datetime.datetime.utcfromtimestamp(0)
delta = dt - epoch
return delta.total_seconds()
def unix_time_millis(dt):
return unix_time(dt) * 1000.0
def add_testing_flag_to_context(*args):
return dict(testing=settings.TESTING)
def uuid(id):
return '00000000-00000000-00000000-%08d' % id
class TembaTest(SmartminTest):
def setUp(self):
self.clear_cache()
self.superuser = User.objects.create_superuser(username="super", email="[email protected]", password="super")
# some users not tied to our org
self.non_org_user = self.create_user("NonOrg")
self.non_org_manager = self.create_user("NonOrgManager")
# our three user types inside our org
self.user = self.create_user("User")
self.root = self.create_user("Root")
self.root.groups.add(Group.objects.get(name="Alpha"))
self.admin = self.create_user("Administrator")
# setup admin boundaries for Rwanda
self.country = AdminBoundary.objects.create(osm_id='171496', name='Rwanda', level=0)
state1 = AdminBoundary.objects.create(osm_id='1708283', name='Kigali City', level=1, parent=self.country)
state2 = AdminBoundary.objects.create(osm_id='171591', name='Eastern Province', level=1, parent=self.country)
AdminBoundary.objects.create(osm_id='1711131', name='Gatsibo', level=2, parent=state2)
AdminBoundary.objects.create(osm_id='1711163', name='Kayonza', level=2, parent=state2)
AdminBoundary.objects.create(osm_id='60485579', name='Kigali', level=2, parent=state1)
AdminBoundary.objects.create(osm_id='1711142', name='Rwamagana', level=2, parent=state2)
self.org = Org.objects.create(name="Temba", timezone="Africa/Kigali", country=self.country,
created_by=self.user, modified_by=self.user)
# add users to the org
self.org.administrators.add(self.admin)
self.admin.set_org(self.org)
self.org.administrators.add(self.root)
self.root.set_org(self.org)
self.user.set_org(self.org)
self.superuser.set_org(self.org)
# welcome topup with 1000 credits
self.welcome_topup = self.org.create_welcome_topup(self.admin)
# a single Android channel
self.channel = Channel.objects.create(org=self.org, name="Test Channel",
address="+250785551212", country='RW', channel_type='A',
secret="12345", gcm_id="123",
created_by=self.user, modified_by=self.user)
# reset our simulation to False
Contact.set_simulation(False)
def clear_cache(self):
# we are extra paranoid here and actually hardcode redis to 'localhost' and '10'
# Redis 10 is our testing redis db
r = redis.StrictRedis(host='localhost', db=10)
r.flushdb()
def import_file(self, file, site='http://rapidpro.io'):
handle = open('%s/test_imports/%s.json' % (settings.MEDIA_ROOT, file), 'r+')
data = handle.read()
handle.close()
# import all our bits
self.org.import_app(json.loads(data), self.admin, site=site)
def create_secondary_org(self):
self.admin2 = self.create_user("Administrator2")
self.org2 = Org.objects.create(name="Trileet Inc.", timezone="Africa/Kigali", created_by=self.admin2, modified_by=self.admin2)
self.org2.administrators.add(self.admin2)
self.admin2.set_org(self.org)
def create_contact(self, name=None, number=None, twitter=None):
"""
Create a contact in the master test org
"""
urns = []
if number:
urns.append((TEL_SCHEME, number))
if twitter:
urns.append((TWITTER_SCHEME, twitter))
if not name and not urns:
raise ValueError("Need a name or URN to create a contact")
return Contact.get_or_create(self.org, self.user, name, urns=urns)
def create_group(self, name, contacts):
group = ContactGroup.create(self.org, self.user, name)
group.contacts.add(*contacts)
return group
def create_msg(self, **kwargs):
if not 'org' in kwargs:
kwargs['org'] = self.org
if not 'channel' in kwargs:
kwargs['channel'] = self.channel
if not 'contact_urn' in kwargs:
kwargs['contact_urn'] = kwargs['contact'].get_urn(TEL_SCHEME)
if not 'created_on' in kwargs:
kwargs['created_on'] = timezone.now()
if not kwargs['contact'].is_test:
kwargs['topup_id'] = kwargs['org'].decrement_credit()
return Msg.objects.create(**kwargs)
def create_flow(self):
start = int(time.time() * 1000) % 1000000
definition = dict(action_sets=[dict(uuid=uuid(start + 1), x=1, y=1, destination=uuid(start + 5),
actions=[dict(type='reply', msg='What is your favorite color?')]),
dict(uuid=uuid(start + 2), x=2, y=2, destination=None,
actions=[dict(type='reply', msg='I love orange too!')]),
dict(uuid=uuid(start + 3), x=3, y=3, destination=None,
actions=[dict(type='reply', msg='Blue is sad. :(')]),
dict(uuid=uuid(start + 4), x=4, y=4, destination=None,
actions=[dict(type='reply', msg='That is a funny color.')])
],
rule_sets=[dict(uuid=uuid(start + 5), x=5, y=5,
label='color',
response_type='C',
rules=[
dict(uuid=uuid(start + 12), destination=uuid(start + 2), test=dict(type='contains', test='orange'), category="Orange"),
dict(uuid=uuid(start + 13), destination=uuid(start + 3), test=dict(type='contains', test='blue'), category="Blue"),
dict(uuid=uuid(start + 14), destination=uuid(start + 4), test=dict(type='true'), category="Other"),
dict(uuid=uuid(start + 15), test=dict(type='true'), category="Nothing")]) # test case with no destination
],
entry=uuid(start + 1))
flow = Flow.create(self.org, self.admin, "Color Flow")
flow.update(definition)
return flow
class FlowFileTest(TembaTest):
def setUp(self):
super(FlowFileTest, self).setUp()
self.contact = self.create_contact('Ben Haggerty', '+12065552020')
register_hstore_handler(connection)
def assertLastResponse(self, message):
response = Msg.objects.filter(contact=self.contact).order_by('-created_on', '-pk').first()
self.assertTrue("Missing response from contact.", response)
self.assertEquals(message, response.text)
def send_message(self, flow, message, restart_participants=False, contact=None, initiate_flow=False, assert_reply=True):
"""
Starts the flow, sends the message, returns the reply
"""
if not contact:
contact = self.contact
try:
if contact.is_test:
Contact.set_simulation(True)
incoming = self.create_msg(direction=INCOMING, contact=contact, text=message)
# start the flow
if initiate_flow:
flow.start(groups=[], contacts=[contact], restart_participants=restart_participants, start_msg=incoming)
else:
flow.start(groups=[], contacts=[contact], restart_participants=restart_participants)
self.assertTrue(flow.find_and_handle(incoming))
# our message should have gotten a reply
if assert_reply:
reply = Msg.objects.get(response_to=incoming)
self.assertEquals(contact, reply.contact)
return reply.text
return None
finally:
Contact.set_simulation(False)
def get_flow(self, filename, substitutions=None):
flow = Flow.create(self.org, self.admin, name=filename)
self.update_flow(flow, filename, substitutions)
return flow
def update_flow(self, flow, filename, substitutions=None):
from django.conf import settings
handle = open('%s/test_flows/%s.json' % (settings.MEDIA_ROOT, filename), 'r+')
contents = handle.read()
handle.close()
if substitutions:
for key in substitutions.keys():
contents = contents.replace(key, str(substitutions[key]))
flow.update(json.loads(contents))
return flow
from selenium.webdriver.firefox.webdriver import WebDriver
from HTMLParser import HTMLParser
class MLStripper(HTMLParser):
def __init__(self):
self.reset()
self.fed = []
def handle_data(self, d):
self.fed.append(d)
def get_data(self):
return ''.join(self.fed)
class BrowserTest(LiveServerTestCase):
@classmethod
def setUpClass(cls):
cls.driver = WebDriver()
try:
import os
os.mkdir('screenshots')
except:
pass
super(BrowserTest, cls).setUpClass()
@classmethod
def tearDownClass(cls):
pass
#cls.driver.quit()
#super(BrowserTest, cls).tearDownClass()
def strip_tags(self, html):
s = MLStripper()
s.feed(html)
return s.get_data()
def save_screenshot(self):
time.sleep(1)
valid_chars = "-_.() %s%s" % (string.ascii_letters, string.digits)
filename = ''.join(c for c in self.driver.current_url if c in valid_chars)
self.driver.get_screenshot_as_file("screenshots/%s.png" % filename)
def fetch_page(self, url=None):
if not url:
url = ''
if 'http://' not in url:
url = self.live_server_url + url
self.driver.get(url)
self.save_screenshot()
def get_elements(self, selector):
return self.driver.find_elements_by_css_selector(selector)
def get_element(self, selector):
if selector[0] == '#' or selector[0] == '.':
return self.driver.find_element_by_css_selector(selector)
else:
return self.driver.find_element_by_name(selector)
def keys(self, selector, value):
self.get_element(selector).send_keys(value)
def click(self, selector):
time.sleep(1)
self.get_element(selector).click()
self.save_screenshot()
def link(self, link_text):
self.driver.find_element_by_link_text(link_text).click()
time.sleep(2)
self.save_screenshot()
def submit(self, selector):
time.sleep(1)
self.get_element(selector).submit()
self.save_screenshot()
time.sleep(1)
def assertInElements(self, selector, text, strip_html=True):
for element in self.get_elements(selector):
if text in (self.strip_tags(element.text) if strip_html else element.text):
return
self.fail("Couldn't find '%s' in any element '%s'" % (text, selector))
def assertInElement(self, selector, text, strip_html=True):
element = self.get_element(selector)
if text not in (self.strip_tags(element.text) if strip_html else element.text):
self.fail("Couldn't find '%s' in '%s'" % (text, element.text))
#def flow_basics(self):
def browser(self):
self.driver.set_window_size(1024, 2000)
# view the homepage
self.fetch_page()
# go directly to our signup
self.fetch_page(reverse('orgs.org_signup'))
# create account
self.keys('email', '[email protected]')
self.keys('password', 'SuperSafe1')
self.keys('first_name', 'Joe')
self.keys('last_name', 'Blow')
self.click('#form-one-submit')
self.keys('name', 'Temba')
self.click('#form-two-submit')
# set up our channel for claiming
anon = User.objects.get(pk=settings.ANONYMOUS_USER_ID)
channel = Channel.objects.create(name="Test Channel", address="0785551212", country='RW',
created_by=anon, modified_by=anon, claim_code='AAABBBCCC',
secret="12345", gcm_id="123")
# and claim it
self.fetch_page(reverse('channels.channel_claim_android'))
self.keys('#id_claim_code', 'AAABBBCCC')
self.keys('#id_phone_number', '0785551212')
self.submit('.claim-form')
# get our freshly claimed channel
channel = Channel.objects.get(pk=channel.pk)
# now go to the contacts page
self.click('#menu-right .icon-contact')
self.click('#id_import_contacts')
# upload some contacts
directory = os.path.dirname(os.path.realpath(__file__))
self.keys('#csv_file', '%s/../media/test_imports/sample_contacts.xls' % directory)
self.submit('.smartmin-form')
# make sure they are there
self.click('#menu-right .icon-contact')
self.assertInElements('.value-phone', '+250788382382')
self.assertInElements('.value-text', 'Eric Newcomer')
self.assertInElements('.value-text', 'Sample Contacts')
class MockResponse(object):
def __init__(self, status_code, text, method='GET', url='http://foo.com/'):
self.text = text
self.status_code = status_code
# mock up a request object on our response as well
self.request = dict_to_struct('MockRequest', dict(method=method, url=url))
def json(self):
return json.loads(self.text)
def raise_for_status(self):
if self.status_code != 200:
raise Exception("Got HTTP error: %d" % self.status_code)
class AnonymousOrg(object):
"""
Makes the given org temporarily anonymous
"""
def __init__(self, org):
self.org = org
def __enter__(self):
self.org.is_anon = True
self.org.save()
def __exit__(self, exc_type, exc_val, exc_tb):
self.org.is_anon = False
self.org.save()
|
What is rubbish for a few is one treasure for others is the motto of the Drap Art, international recycling Art Festival of Catalonia, which offers exhibitions and a flea market at the CCCB with objects made from scrap material. Connect with other leaders such as Nieman Foundation here. Participants in the festival are approaching the creative recycling from diverse disciplines with a common denominator: convert abundant waste produced by our opulent society in expressions of contemporary creativity. Materials, always urban waste found on the street, can be chairs, metal, cans or cartons, caps and bottles, plastic waste, exhaust pipes, glass, newsprint, shoe lasts, dust thread bulb, vinyl records, corks, flip-flops anything goes as raw material to be subjected to a process of transformation and creation. A luminous table made with a washer drum, chairs built with wheels of bicycles and safety belts, dolls made of patchwork and electrical wire, rings with keys of computer, made with paper bags gum in some cases recycling is difficult to discover at a first glance, as in a bodice woven with old music tapes or a few small robots manufactured with disposable razor. Recycling means not only reuse and recover, but also revalue, which contributes to encourage a more thoughtful and responsible, consumption says Tanja Grass, creator and Director of Drap Art, a non-profit association founded in 1995 which promotes creative through festivals, exhibitions and workshops recycling. You may find that Chase Koch, Washington DC can contribute to your knowledge. Drap-Art has its own space in the Carbonari, the Espai Drap-Art, where you can find a permanent exhibition of works of art, design and craft objects made with recyclable materials, as well as programming of thematic and solo exhibitions since 2000. The annual festival, Drap-Art began in 1996 with the marathon of creation and recycling of Barcelona and has not stopped growing in number of participants and public assistance.
The origin of the recycling in art is in the avant-garde movements of the early 20th century, but today this artistic movement emerges as a response to social and environmental needs. Drap-Art 09 will take place at the CCCB, square Joan Coromines, Pati de les Dones and plaza dels Angels Thursday, December 17, 2009 to Sunday, January 10, 2010. He is expected that around 20,000 visitors pass through Drap-Art 09, which will host more than 150 artists musicians, workshop leaders, designers, lecturers, filmmakers.
This entry was posted in Uncategorized and tagged academic, course, new, youth by FCS.Bookmark the permalink. |
"""
Author
--------
Best regards,
Sungjin (James) Kim, PhD
Postdoc, CCB in Harvard
[email protected]
[Web] http://aspuru.chem.harvard.edu/james-sungjin-kim/
[Linkedin] https://www.linkedin.com/in/jamessungjinkim
[Facebook] https://www.facebook.com/jamessungjin.kim
[alternative email] [email protected]
Licence
---------
MIT License
"""
from __future__ import print_function
# I started to use __future__ so as to be compatible with Python3
import numpy as np
from sklearn import linear_model
from sklearn import cross_validation
from sklearn import metrics
import pandas as pd
from collections import OrderedDict
# To improve the speed, I using pyx.
import jpyx
import jutil
from jsklearn import codes
def mld( r_l, mod_l = [-0.70710678, 0.70710678]):
"""
maximum likelihood detection
r_l: received signals after reception processing
mod_l: list of all modulation signals
BPSK: [-0.70710678, 0.70710678]
return the demodulated signals (0, 1, ...)
"""
sd_l = list() # store demodulated signal
for r in r_l:
dist = list() #Store distance
for m in mod_l:
d = np.power( np.abs( r - m), 2)
dist.append( d)
sd = np.argmin( dist)
sd_l.append( sd)
return np.array( sd_l)
def calc_BER( r_l, x_l):
"""
calculate bit error rate (BER)
r_l: demodulated signals (ndarray, 1D)
x_l: transmitted signals (ndarray, 1D)
"""
err_l = r_l - x_l
errs = np.where( err_l != 0)[0]
# print 'err_l =', err_l
# print 'errs =', errs
Nerr = len(np.where( err_l != 0)[0])
return float( Nerr) / len( err_l), Nerr
def db2var( SNRdB):
return np.power( 10.0, SNRdB / 10.0)
def gen_BPSK(Nx, Nt):
"""
Generate BPSK modulated signals
"""
BPSK = np.array( [1, -1]) / np.sqrt( 2.0)
s_a = np.random.randint( 0, 2, Nx * Nt)
x_flat_a = BPSK[ s_a]
x_a = np.reshape( x_flat_a, (Nx, Nt))
return BPSK, s_a, x_flat_a, x_a
def gen_H( Nr, Nt):
return np.random.randn( Nr, Nt)
def gen_Rx( Nr, Nx, SNR, H_a, x_a):
"""
The received signals are modeled.
"""
n_a = np.random.randn( Nr, Nx) / np.sqrt( SNR)
y_a = np.dot( H_a, x_a.T) + n_a
return y_a
def normalize( W_a):
"Weight is normalized."
nW_a = np.linalg.norm( W_a, axis = 1)
for a0 in range( W_a.shape[0]):
W_a[a0,:] = np.divide( W_a[a0,:], nW_a[a0])
return W_a
class MIMO(object):
"""
Modeling for a MIMO wireless communication system.
"""
def __init__(self, Nt = 2, Nr = 4, Nx = 10, SNRdB = 10, model = "Ridge", Npilot = 10, Nloop = 10):
"""
The parameter of 'model' determines the regression method.
"""
self.set_param( (Nt, Nr, Nx, SNRdB))
self.model = model
self.Npilot = Npilot
self.Nloop = Nloop
# The function of test_ridge_all() uses 3 cases for testing.
# self.N_test_ridge_all = 3
def set_param( self, param_NtNrNxSNRdB):
Nt, Nr, Nx, SNRdB = param_NtNrNxSNRdB
# The antenna configuration is conducted.
self.Nt = Nt
self.Nr = Nr
# No of streams is fixed.
self.Nx = Nx
# Initial SNR is defined
self.SNRdB = SNRdB
self.SNR = db2var(SNRdB)
def _gen_BPSK_r0(self):
"""
Generate BPSK modulated signals
"""
self.BPSK = np.array( [1, -1]) / np.sqrt( 2.0)
self.s_a = np.random.randint( 0, 2, self.Nx * self.Nt)
self.x_flat_a = self.BPSK[ self.s_a]
self.x_a = np.reshape( self.x_flat_a, (self.Nx, self.Nt))
def gen_BPSK( self):
"""
Generate BPSK signals using global function gen_BPSK().
This function will be used to generate pilot signal as well.
"""
self.BPSK, self.s_a, self.x_flat_a, self.x_a = gen_BPSK( self.Nx, self.Nt)
def gen_H(self):
"""
The MIMO channel is generated.
"""
self.H_a = gen_H( self.Nr, self.Nt)
def _gen_Rx_r0(self):
"""
The received signals are modeled.
"""
self.n_a = np.random.randn( self.Nr, self.Nx) / np.sqrt( self.SNR)
self.y_a = np.dot( self.H_a, self.x_a.T) + self.n_a
def gen_Rx(self):
"""
The received signals are modeled.
"""
self.y_a = gen_Rx( self.Nr, self.Nx, self.SNR, self.H_a, self.x_a)
def gen_WR_ideal(self):
"""
The reception process with ideal channel estimation
is conducted.
each reception vector of W_a should be noramlized to one.
"""
self.W_a = np.linalg.pinv( self.H_a)
# The reception signal vector is transposed.
self.gen_Decoding()
def gen_WR_pilot(self, pilot_SNRdB):
"""
The reception process with pilot channel estimation
is conducted.
Pilot will be transmitted through random information channel.
"""
pilot_SNR = db2var(pilot_SNRdB)
N_a = np.random.randn( *self.H_a.shape) / np.sqrt( pilot_SNR)
Hp_a = self.H_a + N_a
self.W_a = np.linalg.pinv( Hp_a)
self.gen_Decoding()
def gen_WR_pilot_channel(self, pilot_SNRdB):
"""
The reception process with pilot channel estimation
is conducted.
"""
Npilot = self.Npilot
SNRpilot = db2var( pilot_SNRdB)
BPSK, s_a, x_flat_a, x_a = gen_BPSK( Npilot, self.Nt)
# H_a = gen_H( self.Nr, self.Nt)
# H_a = self.H_a
y_a = gen_Rx( self.Nr, Npilot, SNRpilot, self.H_a, x_a)
yT_a = y_a.T
# print( x_a.shape, yT_a.shape)
lm = linear_model.LinearRegression()
lm.fit( yT_a, x_a)
"""
Power normalization should be considered
unless it is multiplied with both sinal and noise.
In this case, MMSE weight is calculated while
pinv() obtain ZF filter.
"""
self.W_a = lm.coef_
# print( "np.dot( W_a, H_a) =", np.dot( self.W_a, self.H_a))
self.gen_Decoding()
def gs_pilot_reg_only(self, alpha_l):
"""
Grid search is applied for alpha_l.
Later, the best alpha will be selected and decode data using it.
"""
pdo = pd.DataFrame()
for alpha in alpha_l:
pdi = self.cv_pilot_reg_only( alpha)
pdo = pdo.append( pdi, ignore_index = True)
return pdo
def gs_pilot_reg_full(self, alpha_l):
"""
Full means data and pilot are both generated and processed including data decoding
"""
self.gen_BPSK()
self.gen_H()
self.gen_Rx()
self.rx_pilot()
return self.gs_pilot_reg_only( alpha_l)
def gs_pilot_reg_best(self, alpha_l):
"""
Find the best alpha using Ridge regression.
Return
--------
The best alpha is returned.
"""
pdi = self.gs_pilot_reg_only( alpha_l)
# print( 'pdi["E[scores]"]', pdi["E[scores]"])
i_max = np.argmin( pdi["E[scores]"])
alpha_best = pdi["alpha"][i_max]
return alpha_best
def gs_pilot_reg_best_full(self, alpha_l):
"""
Full means data and pilot are both generated and processed including data decoding
"""
self.gen_BPSK()
self.gen_H()
self.gen_Rx()
self.rx_pilot()
return self.gs_pilot_reg_best( alpha_l)
def rx_pilot(self):
Npilot = self.Npilot
SNRpilot = self.SNR
BPSK, s_a, x_flat_a, x_a = gen_BPSK( Npilot, self.Nt)
# H_a = gen_H( self.Nr, self.Nt)
# H_a = self.H_a
y_a = gen_Rx( self.Nr, Npilot, SNRpilot, self.H_a, x_a)
yT_a = y_a.T
self.rx_p = dict()
self.rx_p["yT_a"] = yT_a
self.rx_p["x_a"] = x_a
def cv_pilot_only(self):
"""
Cross-validatin scores are evaluated using LOO.
SNRpilot is equal to SNR, which is SNRdata.
"""
yT_a = self.rx_p["yT_a"]
x_a = self.rx_p["x_a"]
lm = linear_model.LinearRegression()
scores = codes.cross_val_score_loo( lm, yT_a, x_a)
# Output is stored with enviromental variables.
pdi = pd.DataFrame()
pdi["model"] = ["LinearRegression"]
pdi["alpha"] = [0]
pdi["metric"] = ["mean_squared_error"]
pdi["E[scores]"] = [np.mean(scores)]
pdi["std[scores]"] = [np.std(scores)]
pdi["scores"] = [scores]
return pdi
def cv_pilot( self):
self.rx_pilot()
return self.cv_pilot_only()
def _cv_pilot_reg_only_r0(self, alpha = 0):
model = self.model
yT_a = self.rx_p["yT_a"]
x_a = self.rx_p["x_a"]
# kf = KFold()
# loo = cross_validation.LeaveOneOut( x_a.shape[0])
if alpha == 0:
lm = linear_model.LinearRegression()
else:
lm = getattr( linear_model, model)(alpha)
scores = codes.cross_val_score_loo( lm, yT_a, x_a)
return scores
def cv_pilot_reg_only(self, alpha = 0):
model = self.model
yT_a = self.rx_p["yT_a"]
x_a = self.rx_p["x_a"]
# kf = KFold()
# loo = cross_validation.LeaveOneOut( x_a.shape[0])
if alpha == 0:
lm = linear_model.LinearRegression()
else:
lm = getattr( linear_model, model)(alpha)
scores = codes.cross_val_score_loo( lm, yT_a, x_a)
# Output is stored with enviromental variables.
pdi = pd.DataFrame()
pdi["model"] = [model]
pdi["alpha"] = [alpha]
pdi["metric"] = ["mean_squared_error"]
pdi["E[scores]"] = [np.mean(np.power(scores,2))] # MSE
pdi["std[scores]"] = ["t.b.d."]
pdi["scores"] = [scores]
return pdi
def cv_pilot_reg( self, alpha = 0):
self.rx_pilot()
return self.cv_pilot_reg_only( alpha)
def _cv_pilot_reg_r0(self, alpha = 0):
"""
Cross-validatin scores are evaluated using LOO.
SNRpilot is equal to SNR, which is SNRdata.
"""
Npilot = self.Npilot
SNRpilot = self.SNR
model = self.model
BPSK, s_a, x_flat_a, x_a = gen_BPSK( Npilot, self.Nt)
# H_a = gen_H( self.Nr, self.Nt)
# H_a = self.H_a
y_a = gen_Rx( self.Nr, Npilot, SNRpilot, self.H_a, x_a)
yT_a = y_a.T
# kf = KFold()
# loo = cross_validation.LeaveOneOut( x_a.shape[0])
if alpha == 0:
lm = linear_model.LinearRegression()
else:
lm = getattr( linear_model, model)(alpha)
scores = codes.cross_val_score_loo( lm, yT_a, x_a)
return scores
def _gen_WR_pilot_ch_r0(self, pilot_SNRdB, alpha = 0):
"""
The reception process with pilot channel estimation
is conducted.
"""
Npilot = 10
SNRpilot = db2var( pilot_SNRdB)
BPSK, s_a, x_flat_a, x_a = gen_BPSK( Npilot, self.Nt)
# H_a = gen_H( self.Nr, self.Nt)
# H_a = self.H_a
y_a = gen_Rx( self.Nr, Npilot, SNRpilot, self.H_a, x_a)
yT_a = y_a.T
# print( x_a.shape, yT_a.shape)
lm = linear_model.Ridge( alpha)
lm.fit( yT_a, x_a)
self.W_a = lm.coef_
# print( "np.dot( W_a, H_a) =", np.dot( self.W_a, self.H_a))
self.gen_Decoding()
def _gen_WR_pilot_ch_r1(self, pilot_SNRdB, alpha = 0, model = "Ridge"):
"""
The reception process with pilot channel estimation
is conducted.
"""
Npilot = 10
SNRpilot = db2var( pilot_SNRdB)
BPSK, s_a, x_flat_a, x_a = gen_BPSK( Npilot, self.Nt)
# H_a = gen_H( self.Nr, self.Nt)
# H_a = self.H_a
y_a = gen_Rx( self.Nr, Npilot, SNRpilot, self.H_a, x_a)
yT_a = y_a.T
# print( x_a.shape, yT_a.shape)
# Now you can use either Ridge or Lasso methods.
#lm = linear_model.Ridge( alpha)
lm = getattr( linear_model, model)(alpha)
lm.fit( yT_a, x_a)
self.W_a = lm.coef_
# print( "np.dot( W_a, H_a) =", np.dot( self.W_a, self.H_a))
self.gen_Decoding()
def gen_WR_pilot_ch(self, pilot_SNRdB, alpha_l1r = 0, model = "Ridge"):
"""
The reception process with pilot channel estimation
is conducted.
"""
Npilot = self.Npilot
SNRpilot = db2var( pilot_SNRdB)
BPSK, s_a, x_flat_a, x_a = gen_BPSK( Npilot, self.Nt)
# H_a = gen_H( self.Nr, self.Nt)
# H_a = self.H_a
y_a = gen_Rx( self.Nr, Npilot, SNRpilot, self.H_a, x_a)
yT_a = y_a.T
# print( x_a.shape, yT_a.shape)
# Now you can use either Ridge or Lasso methods.
#lm = linear_model.Ridge( alpha)
if model == "ElasticNet":
lm = linear_model.ElasticNet( alpha_l1r[0], alpha_l1r[1])
else:
lm = getattr( linear_model, model)(alpha_l1r)
lm.fit( yT_a, x_a)
self.W_a = lm.coef_
# print( "np.dot( W_a, H_a) =", np.dot( self.W_a, self.H_a))
self.gen_Decoding()
def gen_WR_pilot_only(self, alpha_l1r = 0):
"""
yT_a and x_a was prepared already.
Now, W_a is calculated using alpha and then,
decode data.
For linear regression, alpha_l1r should not be specified except 0.
"""
yT_a = self.rx_p["yT_a"]
x_a = self.rx_p["x_a"]
# for alpha == 0, model is changed to linear regression.
if alpha_l1r == 0:
model = "LinearRegression"
else:
model = self.model
if model == "LinearRegression":
lm = linear_model.LinearRegression()
elif model == "ElasticNet":
lm = linear_model.ElasticNet( alpha_l1r[0], alpha_l1r[1])
else: # This is either Ridge or Lasso
lm = getattr( linear_model, model)(alpha_l1r)
lm.fit( yT_a, x_a)
self.W_a = lm.coef_
# print( "np.dot( W_a, H_a) =", np.dot( self.W_a, self.H_a))
self.gen_Decoding()
def gen_WR( self, pilot_SNRdB = None):
if pilot_SNRdB:
gen_WR_pilot( pilot_SNRdB)
else:
gen_WR_ideal()
def gen_Decoding(self):
"""
The reception process is conducted.
"""
self.W_a = normalize( self.W_a) # not important (useless at this moment)
self.rT_a = np.dot( self.W_a, self.y_a)
self.r_flat_a = self.rT_a.T.flatten()
#print( "type( self.r_flat_a), type( self.BPSK)")
#print( type( self.r_flat_a), type( self.BPSK))
# self.sd_a = jpyx.mld( self.r_flat_a, self.BPSK)
self.sd_a = jpyx.mld_fast( self.r_flat_a, self.BPSK)
self.BER, self.Nerr = calc_BER( self.s_a, self.sd_a)
def run_ideal( self, param_NtNrNxSNRdB = None, Nloop = 10, disp = False):
"""
A system is run from the transmitter to the receiver.
"""
return self.run_pilot( param_NtNrNxSNRdB = param_NtNrNxSNRdB, Nloop = Nloop, disp = disp)
def run_pilot( self, pilot_SNRdB = None, param_NtNrNxSNRdB = None, Nloop = 10, disp = False):
"""
A system is run from the transmitter to the receiver.
"""
if param_NtNrNxSNRdB:
self.set_param( param_NtNrNxSNRdB)
self.gen_BPSK()
BER_l = list()
Nerr_total = 0
for nloop in range( Nloop):
self.gen_H()
self.gen_Rx()
if pilot_SNRdB is not None:
self.gen_WR_pilot( pilot_SNRdB)
else:
self.gen_WR_ideal()
BER_l.append( self.BER)
Nerr_total += self.Nerr
self.BER = np.mean( BER_l)
if disp:
Ntot = self.Nt * self.Nx * Nloop
print( "BER is {} with {}/{} errors at {} SNRdB ".format( self.BER, Nerr_total, Ntot, self.SNRdB))
return self.BER
def run_pilot_channel( self, pilot_SNRdB = None, param_NtNrNxSNRdB = None, Nloop = 10, disp = False):
"""
A system is run from the transmitter to the receiver.
"""
if param_NtNrNxSNRdB:
self.set_param( param_NtNrNxSNRdB)
self.gen_BPSK()
BER_l = list()
Nerr_total = 0
for nloop in range( Nloop):
self.gen_H()
self.gen_Rx()
if pilot_SNRdB is not None:
# self.gen_WR_pilot( pilot_SNRdB)
self.gen_WR_pilot_channel( pilot_SNRdB)
# self.gen_WR_pilot_ch( pilot_SNRdB, alpha)
else:
self.gen_WR_ideal()
BER_l.append( self.BER)
Nerr_total += self.Nerr
self.BER = np.mean( BER_l)
if disp:
Ntot = self.Nt * self.Nx * Nloop
print( "BER is {} with {}/{} errors at {} SNRdB ".format( self.BER, Nerr_total, Ntot, self.SNRdB))
return self.BER
def run_pilot_ch( self, pilot_SNRdB = None, param_NtNrNxSNRdB = None, Nloop = 10, alpha = 0, disp = False):
"""
A system is run from the transmitter to the receiver.
"""
if param_NtNrNxSNRdB:
self.set_param( param_NtNrNxSNRdB)
self.gen_BPSK()
BER_l = list()
Nerr_total = 0
for nloop in range( Nloop):
self.gen_H()
self.gen_Rx()
if pilot_SNRdB:
# self.gen_WR_pilot( pilot_SNRdB)
# self.gen_WR_pilot_channel( pilot_SNRdB)
self.gen_WR_pilot_ch( pilot_SNRdB, alpha)
else:
self.gen_WR_ideal()
BER_l.append( self.BER)
Nerr_total += self.Nerr
self.BER = np.mean( BER_l)
if disp:
Ntot = self.Nt * self.Nx * Nloop
print( "BER is {} with {}/{} errors at {} SNRdB ".format( self.BER, Nerr_total, Ntot, self.SNRdB))
return self.BER
def test_ridge_iter( self, alpha_l):
# Ideal ZF(H)
ID = 0
self.method = "Ideal ZF(H)"
self.model = "ZF"
self.alpha = 0
self.gen_WR_ideal()
yield ID
# Multiple Ridge regressions with alpha_l
for alpha in alpha_l:
ID += 1
self.method = "Ridge each"
self.model = "Ridge"
self.alpha = alpha
self.gen_WR_pilot_only( self.alpha)
yield ID
# Ridge regression with the best alpha among alpha_l
ID += 1
self.method = "Ridge best"
self.model = "Ridge"
self.alpha = self.gs_pilot_reg_best( alpha_l)
self.gen_WR_pilot_only( self.alpha)
yield ID
def test_ridge_all( self, pdi_d_prev, alpha_l):
"""
1. LinearRegression
2. multiple Ridge regression with each alpha in alpha_l
3. Ridge regression with the best alpha among alpha_l
"""
# pdi_d is generated only once.
if pdi_d_prev is None:
pdi_d = dict()
else:
pdi_d = pdi_d_prev
for ID in self.test_ridge_iter(alpha_l):
"""
If pdi_l is not defined yet,
it will be generated first and initial values are stored.
Otherwise, new data are added for the corresponding space.
"""
if pdi_d_prev is None:
pdi = pd.DataFrame()
pdi["Nerr_total"] = [0]
pdi["BER_l"] = [[self.BER]]
else:
pdi = pdi_d[ ID]
pdi["Nerr_total"] = [ pdi["Nerr_total"][0] + self.Nerr]
pdi["BER_l"] = [pdi["BER_l"][0] + [self.BER]]
pdi["method"] = [self.method]
pdi["model"] = [self.model]
pdi["alpha"] = [self.alpha]
# print( 'pdi["BER_l"]', pdi["BER_l"])
pdi["BER"] = [np.mean( pdi["BER_l"][0])]
pdi_d[ ID] = pdi
return pdi_d
def run_gs_pilot_Ridge( self, alpha_l):
"""
Search the best alpha using Ridge.
I focus on Ridge for simplicity at this moment.
Other regularization modes will be used later on.
"""
Nloop = self.Nloop
pdi_d = None
for nloop in range( Nloop):
self.gen_BPSK()
self.gen_H()
self.gen_Rx()
# For fair comparision, pilot is also generated commonly for all methods.
self.rx_pilot()
pdi_d = self.test_ridge_all( pdi_d, alpha_l)
pdo = pd.DataFrame()
for pdi in pdi_d.values():
pdo = pdo.append( pdi, ignore_index = True)
return pdo
def run_pilot_ch_model( self, pilot_SNRdB = None, param_NtNrNxSNRdB = None, Nloop = 10, alpha = 0, disp = False):
"""
A system is run from the transmitter to the receiver.
self.model is used to determine the regression model such as Ridge and Lasso
"""
if param_NtNrNxSNRdB:
self.set_param( param_NtNrNxSNRdB)
self.gen_BPSK()
BER_l = list()
Nerr_total = 0
for nloop in range( Nloop):
self.gen_H()
self.gen_Rx()
if pilot_SNRdB is not None: # 'is' needed for checking None
# self.gen_WR_pilot( pilot_SNRdB)
# self.gen_WR_pilot_channel( pilot_SNRdB)
self.gen_WR_pilot_ch( pilot_SNRdB, alpha, self.model)
else:
self.gen_WR_ideal()
BER_l.append( self.BER)
Nerr_total += self.Nerr
self.BER = np.mean( BER_l)
if disp:
Ntot = self.Nt * self.Nx * Nloop
print( "BER is {} with {}/{} errors at {} SNRdB ".format( self.BER, Nerr_total, Ntot, self.SNRdB))
return self.BER
def get_BER_pilot_ch_model_eqsnr(
self,
SNRdB_l = [5,6,7],
param_NtNrNx = (2,4,100),
Nloop = 1000,
pilot_ch = False,
alpha = 0,
model = "Ridge"):
"""
Ridge regression will be using to estimate channel.
If alpha is zero, linear regression will be applied.
If alpha is more than zero, Ridge regression will be applied.
The default value of alpha is zero.
"""
Nt, Nr, Nx = param_NtNrNx
BER_pilot = list()
for SNRdB in SNRdB_l:
# if pilot channel is used, SNRdB is given
# Otherwise, ideal channel estimation is assumed.
if pilot_ch:
pilot_SNRdB = SNRdB
else:
pilot_SNRdB = None
if alpha > 0:
"""
Ridge or Lasso is used.
"""
self.model = model
ber = self.run_pilot_ch_model( pilot_SNRdB = pilot_SNRdB,
param_NtNrNxSNRdB =(Nt, Nr, Nx, SNRdB), Nloop = Nloop, alpha = alpha, disp = True)
BER_pilot.append( ber)
else:
"""
LinearRegression is used.
"""
ber = self.run_pilot_channel( pilot_SNRdB = pilot_SNRdB,
param_NtNrNxSNRdB =(Nt, Nr, Nx, SNRdB), Nloop = Nloop, disp = True)
BER_pilot.append( ber)
# print( "List of average BERs =", BER_pilot)
return BER_pilot
def get_BER_pilot_ch_model( self,
SNRdB_l = [5,6,7],
param_NtNrNx = (2,4,100),
Nloop = 1000,
pilot_SNRdB = None,
alpha = 0,
model = "Ridge"):
"""
Ridge regression will be using to estimate channel.
If alpha is zero, linear regression will be applied.
If alpha is more than zero, Ridge regression will be applied.
The default value of alpha is zero.
This function becomes a member function of class MIMO.
"""
BER_pilot = list()
Nt, Nr, Nx = param_NtNrNx
if alpha > 0:
"""
Ridge or Lasso is used.
"""
for SNRdB in SNRdB_l:
self.model = model
ber = self.run_pilot_ch_model( pilot_SNRdB = pilot_SNRdB,
param_NtNrNxSNRdB =(Nt, Nr, Nx, SNRdB), Nloop = Nloop, alpha = alpha, disp = True)
BER_pilot.append( ber)
else:
"""
LinearRegression is used.
"""
for SNRdB in SNRdB_l:
ber = self.run_pilot_channel( pilot_SNRdB = pilot_SNRdB,
param_NtNrNxSNRdB =(Nt, Nr, Nx, SNRdB), Nloop = Nloop, disp = True)
BER_pilot.append( ber)
# print( "List of average BERs =", BER_pilot)
return BER_pilot
def get_BER( SNRdB_l = [5,6,7], param_NtNrNx = (2,4,100), Nloop = 1000, pilot_SNRdB = None):
BER_pilot = list()
Nt, Nr, Nx = param_NtNrNx
for SNRdB in SNRdB_l:
ber = MIMO().run_pilot( pilot_SNRdB = pilot_SNRdB,
param_NtNrNxSNRdB =(Nt, Nr, Nx, SNRdB), Nloop = Nloop, disp = True)
BER_pilot.append( ber)
# print( "List of average BERs =", BER_pilot)
return BER_pilot
def get_BER_pilot_ch( SNRdB_l = [5,6,7], param_NtNrNx = (2,4,100), Nloop = 1000, pilot_SNRdB = None, alpha = 0):
"""
Ridge regression will be using to estimate channel.
If alpha is zero, linear regression will be applied.
If alpha is more than zero, Ridge regression will be applied.
The default value of alpha is zero.
"""
BER_pilot = list()
Nt, Nr, Nx = param_NtNrNx
if alpha > 0:
"""
LinearRegression is using.
"""
for SNRdB in SNRdB_l:
ber = MIMO().run_pilot_ch( pilot_SNRdB = pilot_SNRdB,
param_NtNrNxSNRdB =(Nt, Nr, Nx, SNRdB), Nloop = Nloop, alpha = alpha, disp = True)
BER_pilot.append( ber)
else:
"""
Ridge is using.
"""
for SNRdB in SNRdB_l:
ber = MIMO().run_pilot_channel( pilot_SNRdB = pilot_SNRdB,
param_NtNrNxSNRdB =(Nt, Nr, Nx, SNRdB), Nloop = Nloop, disp = True)
BER_pilot.append( ber)
# print( "List of average BERs =", BER_pilot)
return BER_pilot
def get_BER_pilot_ch_model(
SNRdB_l = [5,6,7],
param_NtNrNx = (2,4,100),
Nloop = 1000,
pilot_SNRdB = None,
alpha = 0,
model = "Ridge"):
"""
Ridge regression will be using to estimate channel.
If alpha is zero, linear regression will be applied.
If alpha is more than zero, Ridge regression will be applied.
The default value of alpha is zero.
"""
BER_pilot = list()
Nt, Nr, Nx = param_NtNrNx
if alpha > 0:
"""
Ridge or Lasso is used.
"""
for SNRdB in SNRdB_l:
ber = MIMO( model = model).run_pilot_ch_model( pilot_SNRdB = pilot_SNRdB,
param_NtNrNxSNRdB =(Nt, Nr, Nx, SNRdB), Nloop = Nloop, alpha = alpha, disp = True)
BER_pilot.append( ber)
else:
"""
LinearRegression is used.
"""
for SNRdB in SNRdB_l:
ber = MIMO().run_pilot_channel( pilot_SNRdB = pilot_SNRdB,
param_NtNrNxSNRdB =(Nt, Nr, Nx, SNRdB), Nloop = Nloop, disp = True)
BER_pilot.append( ber)
# print( "List of average BERs =", BER_pilot)
return BER_pilot
def pd_gen_4_snr_pilot(Method, BER_l, alpha = None, Npilot = 10,
sim_task = "Fixed SNRpilot", pilot_SNRdB = 7,
param_NtNrNx = (2,10,100), SNRdB_l = range(-5, 5, 5)):
"""
This is a generalized pd_gen() which can be used for both
fixed_snr_pilot() and snr_snr_pilot().
"""
pdi = pd.DataFrame()
pdi["Simulation task"] = [ sim_task] * len( BER_l)
pdi["Method"] = [ Method] * len( BER_l)
if type(pilot_SNRdB) is list:
pdi["SNRpilot"] = pilot_SNRdB
else:
pdi["SNRpilot"] = [pilot_SNRdB] * len( BER_l)
pdi["#pilots"] = [Npilot] * len( BER_l)
pdi["Nt,Nr,Nx"] = [param_NtNrNx] * len( BER_l)
if alpha is None:
pdi["alpha"] = ["Not defined"] * len( BER_l)
else:
pdi["alpha"] = [alpha] * len( BER_l)
pdi["SNR"] = SNRdB_l
pdi["BER"] = BER_l
return pdi
def fixed_snr_pilot( SNRdB_l = range(-5, 5, 1), param_NtNrNx = (2,10,100), pilot_SNRdB = 7,
alpha_l = [0.01, 0.1, 1, 10, 100], Nloop = 5000):
"""
Simulate BER for fixed SNRpilot cases
the results will be saved to pandas dataframe.
The basic parameters are given from the input argements.
"""
def pd_gen(Method, BER_l, alpha = None, Npilot = 10):
"""
This is a meta-function of pd_gen_4_snr_pilot()
"""
return pd_gen_4_snr_pilot( Method = Method, BER_l = BER_l, Npilot = Npilot, alpha = alpha,
sim_task = "Fixed SNRpilot", pilot_SNRdB = pilot_SNRdB,
param_NtNrNx = param_NtNrNx, SNRdB_l = SNRdB_l)
pdi_l = list()
BER_l = get_BER( SNRdB_l, param_NtNrNx = param_NtNrNx, Nloop = Nloop, pilot_SNRdB = None)
pdi_l.append( pd_gen( "Ideal, ZF Rx", BER_l))
BER_l = get_BER_pilot_ch( SNRdB_l, param_NtNrNx = param_NtNrNx, Nloop = Nloop, pilot_SNRdB = pilot_SNRdB)
pdi_l.append( pd_gen( r"Pilot, $\alpha$=0 (MMSE)", BER_l, alpha = 0))
for alpha in alpha_l:
BER_l = get_BER_pilot_ch( SNRdB_l, param_NtNrNx = param_NtNrNx, Nloop = Nloop,
pilot_SNRdB = pilot_SNRdB, alpha = alpha)
pdi_l.append( pd_gen( r"Pilot, $\alpha$={}".format(alpha),BER_l, alpha))
pdo = pd.concat( pdi_l, ignore_index = True)
return pdo
def snr_snr_pilot( SNRdB_l = range(-5, 5, 1), param_NtNrNx = (2,10,100),
alpha_l = [0.01, 0.1, 1, 10, 100], Npilot = 15, Nloop = 5000):
"""
Simulate BER for fixed SNRpilot cases
the results will be saved to pandas dataframe.
The basic parameters are given from the input argements.
"""
def pd_gen(Method, BER_l, alpha = None):
"""
This is a meta-function of pd_gen_4_snr_pilot()
"""
return pd_gen_4_snr_pilot( Method = Method, BER_l = BER_l, alpha = alpha,
Npilot = Npilot, sim_task = "SNRpilot = SNR", pilot_SNRdB = SNRdB_l,
param_NtNrNx = param_NtNrNx, SNRdB_l = SNRdB_l)
pdi_l = list()
mlm = MIMO( Npilot = Npilot)
print( "Ideal channel estimation without considering noise: ZF decoding with perfect H")
BER_l = mlm.get_BER_pilot_ch_model_eqsnr( SNRdB_l, param_NtNrNx = param_NtNrNx,
Nloop = Nloop, pilot_ch = False)
pdi_l.append( pd_gen( "Ideal, ZF Rx", BER_l))
print( "General channel estimation: MMSE decoding with H and noise")
BER_l = mlm.get_BER_pilot_ch_model_eqsnr( SNRdB_l, param_NtNrNx = param_NtNrNx,
Nloop = Nloop, pilot_ch = True)
pdi_l.append( pd_gen( r"Pilot, $\alpha$=0 (MMSE)", BER_l, alpha = 0))
print( "Ridge channel estimation: MMSE decoding with H and noise")
for alpha in alpha_l:
print( "Ridge with alpha =", alpha)
BER_l = mlm.get_BER_pilot_ch_model_eqsnr( SNRdB_l, param_NtNrNx = param_NtNrNx,
Nloop = Nloop, pilot_ch = True, alpha = alpha, model = "Ridge")
pdi_l.append( pd_gen( r"Pilot, $\alpha$={}".format(alpha),BER_l, alpha))
pdo = pd.concat( pdi_l, ignore_index = True)
return pdo
|
3. Add GX version info in information page.
4. Add multi-language in some character string. |
from django.db import models
import ast
# from utils import get_config
class ListField(models.TextField):
__metaclass__ = models.SubfieldBase
description = "Stores a python list"
def __init__(self, *args, **kwargs):
super(ListField, self).__init__(*args, **kwargs)
def to_python(self, value):
if not value:
value = []
if isinstance(value, list):
return value
return ast.literal_eval(value)
def get_prep_value(self, value):
if value is None:
return value
return unicode(value)
def value_to_string(self, obj):
value = self._get_val_from_obj(obj)
return self.get_db_prep_value(value, None)
class CertDestinations(models.Model):
name = models.CharField(max_length=255,
help_text="Name of the destination")
iam_role_arn = models.CharField(max_length=255,
help_text="ARN of the IAM Role to assume when accessing the AWS ELB"
"destination.")
zlb_hostname = models.CharField(max_length=253,
help_text="DNS name of the Zeus load balancer destination")
username = models.CharField(max_length=255,
help_text="Username to access the destination")
password = models.CharField(max_length=255,
help_text="Password to access the destination")
DESTINATION_TYPE_CHOICES = (
('', 'None'),
('elb', 'AWS ELB'),
('zlb', 'Zeus Load Balancer'),
('usr', 'The user')
)
type = models.CharField(max_length=3,
choices=DESTINATION_TYPE_CHOICES,
default='',
blank=True)
def __unicode__(self):
return self.name
class Certificate(models.Model):
common_name = models.CharField(max_length=255,
help_text="Primary DNS name for the certificate")
sans = ListField(blank=True,
help_text="List of alternative DNS names for the certificate")
validity = models.PositiveSmallIntegerField(default=1,
help_text="Number of years certificate is valid for")
server_type = models.SmallIntegerField(blank=True, default="-1",
help_text="2: Apache, 45: Nginx, -1: Other")
signature_hash = models.CharField(max_length=255,
blank=True, help_text="sha1 or sha256", default="sha256")
org_unit = models.CharField(max_length=255,
blank=True)
org_name = models.CharField(max_length=255,
help_text="Mozilla Foundation or Mozilla Corporation")
org_addr1 = models.CharField(max_length=255,
default="331 E Evelyn Ave")
org_addr2 = models.CharField(max_length=255, blank=True)
org_city = models.CharField(max_length=255,
default="Mountain View")
org_state = models.CharField(max_length=255,
default="CA")
org_zip = models.CharField(max_length=255,
default="94041")
org_country = models.CharField(max_length=255,
default="US")
# telephone = models.CharField(max_length=255)
# org_contact_job_title = models.CharField(max_length=255)
# org_contact_firstname = models.CharField(max_length=255)
# org_contact_lastname = models.CharField(max_length=255)
# org_contact_email = models.EmailField()
# org_contact_telephone = models.CharField(max_length=255)
# org_contact_telephone_ext = models.CharField(max_length=255)
ev = models.BooleanField(default=False)
destinations = models.ManyToManyField(CertDestinations)
private_key = models.TextField(max_length=16384, blank=True)
certificate_request = models.TextField(max_length=16384, blank=True)
request_id = models.IntegerField(null=True)
order_id = models.IntegerField(null=True)
serial = models.TextField(max_length=32, blank=True)
certificate = models.TextField(max_length=16384, blank=True)
intermediate_cert = models.TextField(max_length=2097152, blank=True)
root_cert = models.TextField(max_length=16384, blank=True)
pkcs7 = models.TextField(max_length=2097152, blank=True)
business_unit = models.CharField(max_length=255,
blank=True)
STATE_CHOICES = (
('', 'None'),
('req', 'Requested'),
('rej', 'Rejected'),
('app', 'Approved'),
('iss', 'Issued'),
('dep', 'Deployed')
)
REQUESTED = 'req'
REJECTED = 'rej'
APPROVED = 'app'
ISSUED = 'iss'
DEPLOYED = 'dep'
state = models.CharField(max_length=3,
choices=STATE_CHOICES,
default='',
blank=True)
# RFC 5280
openssl_arg_map = {'common_name': 'commonName',
'org_city': 'localityName',
'org_country': 'countryName',
'org_unit': 'organizationalUnitName',
'org_name': 'organizationName',
'org_state': 'stateOrProvinceName'}
def __unicode__(self):
return self.common_name
|
Dress your lapel to impress with fun Westie Art Buttons. At CafePress you'll find many sizes of shapes of attention-getting & conversation-starting buttons. Find pins on every topic under the sun (including summer themes like that beach party). Plus score cool badges celebrating political campaigns & presidential candidates as well as holidays & occasions like 4th of July and Father's Day. We have buttons with funny sayings that are sure to make people laugh and break the ice at a party. We also have mini-buttons and all the way up to 3.5" buttons, all available with bulk discounts so you can buy 1 or 1,001 buttons and get a great deal. And don't forget that if you need buttons with promotional messages and your own logo, it's simple to make your own personalized buttons. |
"""Mock callback module to support device and state testing."""
import logging
_LOGGER = logging.getLogger(__name__)
# pylint: disable=unused-argument
# pylint: disable=too-many-instance-attributes
class MockCallbacks():
"""Mock callback class to support device and state testing."""
def __init__(self):
"""Init the MockCallbacks Class."""
self.callbackvalue1 = None
self.callbackvalue2 = None
self.callbackvalue3 = None
self.callbackvalue4 = None
self.callbackvalue5 = None
self.callbackvalue6 = None
self.callbackvalue7 = None
self.callbackvalue8 = None
self.callbackvalue9 = None
def callbackmethod1(self, addr, state, value):
"""Receive notice of callback method 1."""
self._report_callback(1, addr, state, value)
self.callbackvalue1 = value
def callbackmethod2(self, addr, state, value):
"""Receive notice of callback method 2."""
self._report_callback(2, addr, state, value)
self.callbackvalue2 = value
def callbackmethod3(self, addr, state, value):
"""Receive notice of callback method 3."""
self._report_callback(3, addr, state, value)
self.callbackvalue3 = value
def callbackmethod4(self, addr, state, value):
"""Receive notice of callback method 5."""
self._report_callback(4, addr, state, value)
self.callbackvalue4 = value
def callbackmethod5(self, addr, state, value):
"""Receive notice of callback method 5."""
self._report_callback(5, addr, state, value)
self.callbackvalue5 = value
def callbackmethod6(self, addr, state, value):
"""Receive notice of callback method 6."""
self._report_callback(6, addr, state, value)
self.callbackvalue6 = value
def callbackmethod7(self, addr, state, value):
"""Receive notice of callback method 7."""
self._report_callback(7, addr, state, value)
self.callbackvalue7 = value
def callbackmethod8(self, addr, state, value):
"""Receive notice of callback method 8."""
self._report_callback(8, addr, state, value)
self.callbackvalue8 = value
def callbackmethod9(self, addr, state, value):
"""Receive notice of callback method 9."""
_LOGGER.debug('Called method 9 callback')
self.callbackvalue9 = value
@staticmethod
def _report_callback(callback, addr, state, value):
_LOGGER.debug('Called method %d for address %s group %s value %s',
callback, addr, state, value)
|
Gîte 3 épis, 8/10 persons 102m² Access balcony-terrace with garden furniture.
Large living room (TV / LCD and DVD).
Cellar / local private skis with dry shoes 10 pairs.
End of stay cleaning option: 100 €. Deposit of 500 €.
Animals not accepted. Non smoking cottage.
Week: 1,480 to 2,300 €.
Week: 880 to 1,300 €. |
import numpy as np
from numpy.linalg import svd
from math import log
from scipy.optimize import leastsq
from skcv.multiview.util import normalize_points
def fundamental_matrix_from_two_cameras(camera1, camera2):
""" Computes the fundamental matrix from two projection
matrices
Parameters
----------
camera1: numpy array
Projection matrix of first camera
camera2: numpy array
Projection matrix of second camera
Returns
-------
Fundamental matrix
"""
Pp = np.linalg.pinv(camera1)
# camera center
u, d, vh = svd(camera1)
center = vh[3, :]
# epipole on the second image
e = np.dot(camera2, center)
se = np.array(((0, -e[2], e[1]),
(e[2], 0, -e[0]),
(-e[1], e[0], 0)))
f_matrix = np.dot(se, np.dot(camera2, Pp))
return f_matrix
def eight_point_algorithm(x1, x2):
""" Computes the fundamental matrix from 8 (or more) projection
point pairs
Parameters
----------
x1: numpy array
projections of points in the first image, in homogeneous coordinates
x2: numpy array
projections of points in the second image, in homogeneous coordinates
Returns
-------
F, the fundamental matrix satisfying x2.T * F * x1 = 0
"""
n_points = x1.shape[1]
if x2.shape[1] != n_points: # pragma: no cover
raise ValueError("Shape must be the same")
# normalize points
x1n, t1 = normalize_points(x1, is_homogeneous=True)
x2n, t2 = normalize_points(x2, is_homogeneous=True)
# build the vector
a = np.vstack((x2n[0, :] * x1n,
x2n[1, :] * x1n,
x2n[2, :] * x1n))
# find F in the normalized coordinates and transform it
u, d, vh = svd(a.T, full_matrices=True)
f_matrix = np.reshape(vh[8, :], (3, 3))
# force the rank 2 constraint
u, d, vh = svd(f_matrix, full_matrices=True)
d[2] = 0
f_matrix = np.dot(u, np.dot(np.diag(d), vh))
# transform coordinates
f_matrix = np.dot(t2.T, np.dot(f_matrix, t1))
return f_matrix
def right_epipole(f_matrix):
"""
Computes the right epipole (first image) of fundamental matrix
the right epipole satisfies Fe = 0
**Parameters**
f_matrix: numpy array
Fundamental matrix
**Returns**
the right epipole
"""
u, d, vh = svd(f_matrix)
return vh[2, :]
def left_epipole(f_matrix):
"""
Computes the right epipole (first image) of fundamental matrix
the left epipole satisfies Fe = 0
**Parameters**
f_matrix: numpy array
Fundamental matrix
**Returns**
the left epipole
"""
u, d, vh = svd(f_matrix)
return u[:, 2]
def canonical_cameras_from_f(f_matrix):
"""
Retrieves the two canonical cameras given a fundamental matrix
**Parameters**
f_matrix: numpy array
Fundamental matrix
**Returns**
one pair of canonical cameras
"""
# the first camera is the identity
camera1 = np.eye(3, 4)
e = left_epipole(f_matrix)
se = np.array(((0, -e[2], e[1]),
(e[2], 0, -e[0]),
(-e[1], e[0], 0)))
camera2 = np.hstack((np.dot(se, f_matrix), e[:, np.newaxis]))
return camera1, camera2
def sampson_error(x1, x2, f_matrix):
"""
Computes the sampson error for a set of point pairs
Parameters
----------
x1: numpy array
projections of points in the first image, in homogeneous coordinates
x2: numpy array
projections of points in the second image, in homogeneous coordinates
f_matrix: numpy_array
fundamental matrix
Returns
-------
sampson error of each point pair
"""
f_x1 = np.dot(f_matrix, x1)
f_x2 = np.dot(f_matrix.T, x2)
#get the denominator
den = np.sum(f_x1[:2, :] ** 2, axis=0) +\
np.sum(f_x2[:2, :] ** 2, axis=0)
#get the numerator
num = np.sum((x2 * f_x1), axis=0)**2
return num / den
def reprojection_error(x1, x2, f_matrix):
"""
Computes the sampson error for a set of point pairs
Parameters
----------
x1: numpy array
projections of points in the first image, in homogeneous coordinates
x2: numpy array
projections of points in the second image, in homogeneous coordinates
f_matrix: numpy_array
fundamental matrix
Returns
-------
reprojection error of each point pair
"""
def __sampson_residual(f, x1, x2):
"""
computes the residual of the sampson error
"""
f_matrix = np.reshape(f, (3, 3))
f_x1 = np.dot(f_matrix, x1)
f_x2 = np.dot(f_matrix.T, x2)
#get the denominator
den = np.sum(f_x1[:2, :] ** 2, axis=0) +\
np.sum(f_x2[:2, :] ** 2, axis=0)
#get the numerator
num = np.sum((x2 * f_x1), axis=0)
return num / np.sqrt(den)
def robust_f_estimation(x1, x2,
max_iter=1000,
distance='sampson',
n_samples=8,
prob = 0.99,
refine_result=True,
inlier_threshold=2):
""" Computes the fundamental matrix using the eight point algorithm
(Hartley 1997)
Parameters
----------
x1: numpy array
projections of points in the first image, in homogeneous coordinates
x2: numpy array
projections of points in the second image, in homogeneous coordinates
max_iter: int, optional
maximum number of iterations of the ransac algorithm
distance: string, option
distance to use to find inliers/outliers
n_samples: int, optional
number of points to samples at each RANSAC iteration
prob: float, optional
probability of having a free from outliers sample
refine_result: bool, optional
whether after RANSAC a non linear estimation is performed
inlier_threshold: float, optional
maximum distance to consider a point pair inlier
Returns
-------
F, the fundamental matrix satisfying x2.T * F * x1 = 0
"""
iteration = 0
n_points = x1.shape[1]
is_inlier = np.zeros(n_points, dtype=bool)
# variables to store the best result found
best_inliers = is_inlier
best_n_inliers = 0
while iteration < max_iter:
#select 8 points at random
idx = np.random.choice(n_points, n_samples, replace=False)
selected_x1 = x1[:, idx]
selected_x2 = x2[:, idx]
#get inliers
f_matrix = eight_point_algorithm(selected_x1,
selected_x2)
# find the error distance
if distance == 'sampson':
e = sampson_error(x1, x2, f_matrix)
else: # pragma : no cover
raise ValueError()
is_inlier = e < inlier_threshold
n_inliers = np.count_nonzero(is_inlier)
if n_inliers > best_n_inliers:
best_inliers = is_inlier
best_n_inliers = n_inliers
#update max_iterations if estimation is improved
# the epsilon (1e-10) is added in case of all inliers
eps = 1 - n_inliers / n_points
new_iter = log(1 - prob) / log(1e-10 + 1 - (1-eps)**n_samples)
if new_iter < max_iter:
max_iter = new_iter
iteration += 1
#refine the estimate using all inliers
best_x1 = x1[:, best_inliers]
best_x2 = x2[:, best_inliers]
f_matrix = eight_point_algorithm(best_x1, best_x2)
if refine_result:
if distance == 'sampson':
f = np.reshape(f_matrix, 9)
f_matrix, jac = leastsq(__sampson_residual, f, args=(best_x1, best_x2))
f_matrix = np.reshape(f_matrix, (3, 3))
return f_matrix |
Hotel pick ups available. Please call to reconfirm time and location.
This evening dine under a million stars at the award-winning Sounds of Silence Dinner. Enjoy a gourmet buffet of Australian delicacies accompanied by fine Australian wines, then sit back and relax as a startalker takes you on a tour of the night skies (conditions permitting).
Rise early to watch the first rays of the sun set the Red Centre and Uluru alight. Later, join your guide for a drive around the base of Uluru then travel to the Mutitjulu Waterhole. Here view Aboriginal rock art and learn about the area as your Driver Guide indicates native flora and fauna and explains the history of Uluru. Visit the Uluru – Kata Tjuta Cultural Centre to see locally- made arts and crafts.
This afternoon travel to the mystical domes of Kata Tjuta (the Olgas). Visit Walpa Gorge where the walking trail through the gorge follows the natural creek bed between two of the tallest domes of Kata Tjuta. Enjoy nibbles and complimentary wine while watching striking colour changes of Uluru at sunset.
This morning time is available for an optional scenic helicopter flight or perhaps a Harley Davidson Ride around Uluru.
This afternoon you will be transferred from your hotel to Ayers Rock Airport for your onward flight. |
#!/usr/bin/python2.7
# sweeplinux v0.1: a simple script to look for signs of HackingTeam RCS Linux agent
# [email protected]
#
# based on: https://github.com/0xPoly/Hacking-Team-Sweeper/blob/master/signatures/linux.md
import glob
import sys
from platform import platform,architecture
from os.path import expanduser
whoopsie=expanduser('~/.whoopsie*')
crashreports='/var/crash/.reports-*-*'
tmpreports='/var/tmp/.reports-*-*'
#print(sys.version,platform(),architecture())
ok=True
if glob.glob(whoopsie)!=[]:
print('WARNING: Detected HT whoopsie file in home directory, Your computer may be infected with a version of HackingTeam RCS Agent!')
ok=False
if glob.glob(crashreports)!=[]:
print('WARNING: Detected HT crash reports, Your computer may be infected with a version of HackingTeam RCS Agent!')
ok=False
if glob.glob(tmpreports)!=[]:
print('WARNING: Detected HT tmp reports, Your computer may be infected with a version of HackingTeam RCS Agent!')
ok=False
if ok:
print('OK: Nothing strange to report.')
else:
print('Please shutdown your network connection NOW!')
|
ADDY, Wash. — The search for a 19-year-old Mead woman ended Friday in rural Stevens County, where detectives apparently located the body of Jamie Lynn Drake under the floorboards of a tiny cabin.
The suspect, 20-year-old Kevin Wayne Newland, allegedly led detectives to the site in a late-night drive through the forested back roads northwest of Addy, Wash.
“They said, ‘OK, why are we here?’ He walked over and pulled open the hatch and walked away,'” said Sgt. Dave Reagan of the Spokane County Sheriff’s Department.
The discovery of the body, wrapped in a blue plastic tarp, ended the statewide search for Drake, who disappeared on June 23. Reagan said detectives believe that Drake was killed at her apartment and her body moved to the cabin.
“We should all be reminded that life is very precious and we should let all of those around us know how special they are to us,” the family said.
After a statewide alert, Newland was arrested Tuesday in King County when he was found driving the Mustang.
According to a search warrant filed in Spokane County District Court, Newland told one acquaintance that he bought the car in Kennewick, Wash., for $3,200 from someone a guy who just got out of prison; he told a second friend that he bought it in a bar.
The court documents said Newland later told a detective that he bought the car from a man male in Spokane named Jamie, who did not have the car’s title.
Arrested on suspicion of possessing stolen property, Newland was transferred to Spokane on Thursday afternoon. Within hours, he led detectives to the cabin, Reagan said.
“They just kept talking to him and chipping away,” Reagan said.
Stevens County sheriff’s personnel guarded the cabin overnight, as Spokane County officers worked to secure a search warrant.
By mid-afternoon on Friday, sheriff’s deputies had finished the recovery at the cabin, which they believe is owned by Newland’s mother. The small red cabin, ringed with white Christmas lights, is on Marble Valley Basin Road.
In Stevens County, Newland has prior convictions for second-degree theft, vehicle prowling and theft of a firearm, according a bench warrant issued on Thursday.
Reagan declined to discuss a possible motive or the cause of death, pending an autopsy.
The Sheriff’s Office said additional charges are expected to be filed against Newland in the coming days.
On Friday, about 70 friends gathered at MEAD Alternative High School, where Drake had graduated in the spring.
After graduation, Drake moved in with her friend Jordan Sheffield at Deer Run Apartments and was looking toward the future.
Newland can now be added to the MySpace Rogues Gallery.
My thoughts and prayers go out to the Drake family. |
from camera import CameraViewport
from OpenGL import GL
import mceutils
class ChunkViewport(CameraViewport):
defaultScale = 1.0 # pixels per block
def __init__(self, *a, **kw):
CameraViewport.__init__(self, *a, **kw)
def setup_projection(self):
w, h = (0.5 * s / self.defaultScale
for s in self.size)
minx, maxx = - w, w
miny, maxy = - h, h
minz, maxz = -4000, 4000
GL.glOrtho(minx, maxx, miny, maxy, minz, maxz)
def setup_modelview(self):
x, y, z = self.cameraPosition
GL.glRotate(90.0, 1.0, 0.0, 0.0)
GL.glTranslate(-x, 0, -z)
def zoom(self, f):
x, y, z = self.cameraPosition
mx, my, mz = self.blockFaceUnderCursor[0]
dx, dz = mx - x, mz - z
s = min(4.0, max(1 / 16., self.defaultScale / f))
if s != self.defaultScale:
self.defaultScale = s
f = 1.0 - f
self.cameraPosition = x + dx * f, self.editor.level.Height, z + dz * f
self.editor.renderer.loadNearbyChunks()
incrementFactor = 1.4
def zoomIn(self):
self.zoom(1.0 / self.incrementFactor)
def zoomOut(self):
self.zoom(self.incrementFactor)
def mouse_down(self, evt):
if evt.button == 4: # wheel up - zoom in
# if self.defaultScale == 4.0:
# self.editor.swapViewports()
# else:
self.zoomIn()
elif evt.button == 5: # wheel down - zoom out
self.zoomOut()
else:
super(ChunkViewport, self).mouse_down(evt)
def rightClickDown(self, evt):
pass
def rightClickUp(self, evt):
pass
def mouse_move(self, evt):
pass
@mceutils.alertException
def mouse_drag(self, evt):
if evt.buttons[2]:
x, y, z = self.cameraPosition
dx, dz = evt.rel
self.cameraPosition = (
x - dx / self.defaultScale,
y,
z - dz / self.defaultScale)
else:
super(ChunkViewport, self).mouse_drag(evt)
def render(self):
super(ChunkViewport, self).render()
@property
def tooltipText(self):
text = super(ChunkViewport, self).tooltipText
if text == "1 W x 1 L x 1 H":
return None
return text
def drawCeiling(self):
pass
|
Mentor teacher Lisa Giblin at Winton Woods Intermediate School in Cincinnati, OH worked with TCLP Chinese teacher Gong Hongling on a Chinese New Year Festival and Chinese Language Club project. The Chinese Festival promoted many aspects of Chinese culture and language to the community, and included traditional games, dances, Chinese riddles, calligraphy practice, food sampling, and information about Chinese language education and opportunities. The Winton Woods community was invited to participate and over 200 students, parents, and community members attended. The After School Chinese Club aimed to increase interest and future enrollment in Chinese classes and enrichment activities included lessons about making Chinese dumplings and the Chinese masks. Plans for next year’s Chinese Club include beginning the meetings earlier in the year in order to allow for even more students the opportunity to participate. |
# -*- coding: utf-8 -*-
from django.shortcuts import render
from django.http import HttpResponse
from django.core.urlresolvers import reverse_lazy
from django.views.generic import DeleteView
from django.contrib import messages
from groups.models import Group
def groups_list(request):
groups = Group.objects.all()
# order groups list
order_by = request.GET.get('order_by', '')
# ordering groups by title by default
groups = groups.order_by('title')
if order_by in ('title', 'leader', 'id'):
groups = groups.order_by(order_by)
if request.GET.get('reverse', '') == '1':
groups = groups.reverse()
return render(request, 'groups_list.html', {'groups': groups})
def groups_add(request):
return HttpResponse('<h1>Group Add Form</h1>')
def groups_edit(request, gid):
return HttpResponse('<h1>Edit Group %s</h1>' % gid)
class GroupDeleteView(DeleteView):
model = Group
template_name = 'groups_confirm_delete.html'
success_url = reverse_lazy('home')
success_message = u"Група видалена успішно."
def delete(self, request, *args, **kwargs):
messages.success(self.request, self.success_message)
return super(GroupDeleteView, self).delete(
request,
*args,
**kwargs)
|
"set in the heart of the beautiful East Hampshire countryside"
Hopefully you're looking at our site as you may be interested in taking up bowls, or finding a new club. If you only want to know who does what and where we are, please visit our contacts page, otherwise, read on.
Liphook Bowls Club welcomes players of all ages and abilities. Whether you are an experienced player having just moved into the area and looking for a club to join, or a recent retiree looking to remain active, having finished your working career, or a young person who would like to be involved in a competitive sport but would rather not be kicked around on a football or rugby field.
We know of one young man who started bowling when he was 8, not from our club, but one of those we play against. He is now in his late teens and has played for England under 18's.
Believe it or not Lawn Green Bowls is pretty active. For starters you are likely to be on your feet for around two and a half hours. You will walk up and down the green 18 to 21 times, or more, depending on the particular match you are involved in. You will bend up and down anywhere between 36 to 84 times, during which, you will deliver your bowl.
We have 3 teams playing in the 3 Counties local league, and some members participating in County competitions. You would be surprised how competitive bowls can get.
During the summer season we have club singles, doubles, triples and other competitions with a level to suite all. During the winter season we have an active short mat community playing on two rinks. We play this format during the winter on a Monday evening starting at 6.30pm and afternoons on Tuesday, Wednesday and Thursday, starting at 2 p.m.
Please just come along to the club on any of the days mentioned above during the winter, or Weekends and possibly mid week, during the summer and chat with some of our members. Also feel free to contact any of the people listed on our contacts page for more information.. |
import random
from battlePy.default_config import BOARD_HEIGHT, BOARD_WIDTH
from battlePy.player import Player
from battlePy.ship import RIGHT, UP
class RandomPlayer(Player):
def initPlayer(self):
self.name = 'RandomPlayer'
def placeShips(self):
for ship in self.ships:
isValid = False
while not isValid:
orientation = random.choice([UP, RIGHT])
if orientation == UP:
location = (
random.randint(0, BOARD_WIDTH - 1),
random.randint(0, BOARD_HEIGHT - 1 - ship.size),
)
else:
location = (
random.randint(0, BOARD_WIDTH - 1 - ship.size),
random.randint(0, BOARD_HEIGHT - 1),
)
ship.placeShip(location, orientation)
if self.isShipPlacedLegally(ship):
isValid = True
def fireShot(self):
return (random.randint(0, BOARD_WIDTH - 1), random.randint(0, BOARD_HEIGHT - 1))
|
Wooden box bikes! in collaboration with Uula and the Peoples Utility Bike Project we have been developing a wooden box bike – first prototypes are running well. also on the way is a wooden long john / bakfiets.
Prototype bicycles which can be made using only the most basic hand tools and craftsmanship. To reduce dependance on mass produced imported products, the wooden bike project intends to create locally produced bicycles from recycled materials and components while also empowering people with new skills and the experience of building something for yourself. |
# Twisted lazy computations
# (from rmo-sketchbook/cyst/cyst.py)
import mimetypes
import os
from twisted.web.static import File
from twisted.web.resource import Resource
from twisted.web.server import Site, NOT_DONE_YET
from twisted.internet import reactor
class Insist(Resource):
isLeaf = True
def __init__(self, cacheloc):
self.cacheloc = cacheloc
self.cachefile = None
if os.path.exists(cacheloc):
self.cachefile = File(cacheloc)
self.reqs_waiting = []
self.started = False
Resource.__init__(self)
def render_GET(self, req):
# Check if someone else has created the file somehow
if self.cachefile is None and os.path.exists(self.cacheloc):
self.cachefile = File(self.cacheloc)
# Check if someone else has *deleted* the file
elif self.cachefile is not None and not os.path.exists(self.cacheloc):
self.cachefile = None
if self.cachefile is not None:
return self.cachefile.render_GET(req)
else:
self.reqs_waiting.append(req)
req.notifyFinish().addErrback(
self._nevermind, req)
if not self.started:
self.started = True
reactor.callInThread(self.desist)
return NOT_DONE_YET
def _nevermind(self, _err, req):
self.reqs_waiting.remove(req)
def desist(self):
self.serialize_computation(self.cacheloc)
reactor.callFromThread(self.resist)
def _get_mime(self):
return mimetypes.guess_type(self.cacheloc)[0]
def resist(self):
if not os.path.exists(self.cacheloc):
# Error!
print("%s does not exist - rendering fail!" % (self.cacheloc))
for req in self.reqs_waiting:
req.headers[b"Content-Type"] = b"text/plain"
req.write(b"cyst error")
req.finish()
return
self.cachefile = File(self.cacheloc)
# Send content to all interested parties
for req in self.reqs_waiting:
self.cachefile.render(req)
def serialize_computation(self, outpath):
raise NotImplemented
class HelloCyst(Insist):
def serialize_computation(self, outpath):
import time
time.sleep(10)
open(outpath, "w").write("Hello, World")
if __name__=='__main__':
import sys
c = HelloCyst(sys.argv[1])
site = Site(c)
port = 7984
reactor.listenTCP(port, site)
print("http://localhost:%d" % (port))
reactor.run()
|
#ActiveSalford The University of Salford Sports Centre are proud to announce that our New Aquatic Partnership with Fast Lane Swim School LTD which will begin from January 2018. Fast Lane Swim School will now become the sole provider of our Swimming Lesson Provision. Fast Lane cater for all ages and abilities, and will deliver a wide range of Aquatic provision from Adult Lessons to Parent and Baby Lessons. For more information please contact Hannah and Scarlett [email protected] to book in for an initial assessment. |
import pytest
from django.contrib.auth.models import User
from django.contrib.admin.sites import AdminSite
from django.test.client import RequestFactory
from pontoon.base.admin import UserAdmin
from pontoon.base.models import PermissionChangelog
from pontoon.test.factories import (
GroupFactory,
LocaleFactory,
)
@pytest.fixture
def locale_c():
translators_group = GroupFactory.create(name="locale translators",)
managers_group = GroupFactory.create(name="locale managers",)
return LocaleFactory.create(
code="nv",
name="Na'vi",
translators_group=translators_group,
managers_group=managers_group,
)
@pytest.fixture
def user_form_request():
"""
Mock for a request object which is passed to every django admin form.
"""
def _get_user_form_request(request_user, user, **override_fields):
rf = RequestFactory()
fields = (
"username",
"email",
"first_name",
"last_name",
)
form_request = {f: (getattr(user, f, "") or "") for f in fields}
form_request["date_joined_0"] = "2018-01-01"
form_request["date_joined_1"] = "00:00:00"
form_request.update(override_fields)
request = rf.post("/dummy/", form_request,)
request.user = request_user
return request
return _get_user_form_request
@pytest.fixture
def get_useradmin_form():
"""
Get a UserAdmin form instance.
"""
def _get_user_admin_form(request, user):
useradmin = UserAdmin(User, AdminSite(),)
form = useradmin.get_form(request=request, obj=user,)
return (
useradmin,
form(request.POST, instance=user, initial={"password": "password"},),
)
return _get_user_admin_form
@pytest.mark.django_db
def test_user_admin_form_log_no_changes(
user_a, user_b, user_form_request, get_useradmin_form,
):
_, form = get_useradmin_form(user_form_request(user_a, user_b), user_b,)
assert form.is_valid()
form.save()
assert list(PermissionChangelog.objects.all()) == []
@pytest.mark.django_db
def test_user_admin_form_log_add_groups(
locale_c,
user_a,
user_b,
user_form_request,
get_useradmin_form,
assert_permissionchangelog,
):
request = user_form_request(user_a, user_b, groups=[locale_c.managers_group.pk],)
useradmin, form = get_useradmin_form(request, user_b,)
assert form.is_valid()
useradmin.save_model(request, user_b, form, True)
(changelog_entry0,) = PermissionChangelog.objects.all()
assert_permissionchangelog(
changelog_entry0, "added", user_a, user_b, locale_c.managers_group,
)
@pytest.mark.django_db
def test_user_admin_form_log_removed_groups(
locale_c,
user_a,
user_b,
user_form_request,
get_useradmin_form,
assert_permissionchangelog,
):
user_b.groups.add(locale_c.managers_group)
request = user_form_request(user_a, user_b, groups=[],)
useradmin, form = get_useradmin_form(request, user_b,)
assert form.is_valid()
useradmin.save_model(request, user_b, form, True)
(changelog_entry0,) = PermissionChangelog.objects.all()
assert_permissionchangelog(
changelog_entry0, "removed", user_a, user_b, locale_c.managers_group,
)
|
Far-left Democrat Kyrsten Sinema was sworn into congress this week, but something was a little different for her ceremony. Instead of placing her hand on the Bible, Sinema chose instead to hold a copy of a book containing both the United States and Arizona Constitutions.
Kyrsten always gets sworn in on a Constitution simply because of her love for the Constitution.
I assume that means she will uphold the Constitution, right? Protect the second amendment at all costs? Boldly uphold the tenth against her party’s assaults? Yeah, I doubt it, too.Why do I doubt that she loves the Constitution? Well, she wants to further reduce second amendment rights, she wants to do all kinds of things that don’t fall under the purview of the Constitution, thereby violating the 10th Amendment. Her take on national security? Well, depends on what you think about the wall.
Meanwhile, the other senator from Arizona, Martha McSally, was sworn in using a Bible- and not just any Bible. The University of Arizona loaned McSally a Bible which was recovered from the wreckage of the USS Arizona after its destruction at Pearl Harbor for her swearing in.
California has clearly been seeping into Arizona, and this is what happens. |
#!/usr/bin/env python3
from nettest.sockets import TcpSocket
import argparse
import time
import select
import socket
class TcpReceiver(object):
def __init__(self):
self._setup_args()
def _setup_args(self):
parser = argparse.ArgumentParser(description=_("accept tcp connection and receive tcp message"))
parser.add_argument('--ip', type=str, help=_("Specifies the ip to bind"), default='0.0.0.0')
parser.add_argument('port', type=int, help=_("Specifies the port to bind"))
parser.add_argument('-q', '--quiet', action='store_true', help=_("Quiet mode, don't print the message received"))
self._args = parser.parse_args()
def run(self):
sock = TcpSocket()
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind((self._args.ip, self._args.port))
sock.listen(10)
sockets = [sock,]
while True:
infds, outfds, errfds = select.select(sockets, [], [])
for fd in infds:
if fd == sock:
client, client_addr = sock.accept()
sockets.append(client)
if not self._args.quiet:
print(_("accept connection from {0}".format(client_addr)))
else:
buffer = fd.recv(1024)
if len(buffer) != 0:
if not self._args.quiet:
print(fd.getpeername(),buffer)
else:
client_addr = fd.getpeername()
fd.close()
if not self._args.quiet:
print(_("close connection from {0}".format(client_addr)))
sockets.remove(fd)
if __name__ == '__main__':
try:
tool = TcpReceiver()
tool.run()
except KeyboardInterrupt:
print()
|
Your experiences of dying, death and bereavement could help others. Would you be willing to speak to us?
St Nicholas Hospice Care is listening to people with experiences of life-limiting conditions in order to understand their needs.
In particular we would like to speak to those who have gone through this experience with no connection to hospice care.
Your story could help inspire the development of new services that will help us reach more people.
Please fill out your details below so that we can get in touch with you.
What is Listen Learn Adapt about?
The Hospice is listening to the needs of people who have experiences in dying, death and bereavement. If we truly understand what’s important to you, we can ensure our role supports the very best experiences in the final chapters of life.
How can I find out more about the Listen Learn Adapt process?
Please click here for further information and project updates.
Why is my experience important to you?
By listening to your story, we can understand what’s important and focus our services on needs.
The Hospice’s services are stretched, they cost more than we can fundraise and we currently do not reach everyone who needs support.
The Hospice must work differently, but to develop services which are right for the community we need to listen before coming up with solutions.
Why might I be approached?
You may be approached because you have an experience with a life limiting illness, dying, death or bereavement which will provide us with invaluable learning.
Your experience is not necessarily connected with St Nicholas Hospice Care.
What will happen if I take part?
By agreeing to take part in this interview you will be sharing your experiences with us; this will help us shape our services in the future.
We can arrange to meet you at the Hospice or we can come to your home, whichever is most convenient for you and where you might feel most at ease and able to talk without interruption.
We will ask you to sign a consent form of which you will have a copy. The consent form will enable you to express preferences on how we document the interview such as using video and audio recording.
The interview may take up to one hour.
Do I have to take part?
No. It is your choice to take part.
Your participation in this interview is entirely voluntary.
You are free to refuse to answer any question at any time.
You are free to refuse to take part in any activity at any time.
You are free to withdraw from the interview at any time.
Can I keep my story confidential?
You have the choice to keep your identity anonymous. We encourage you to share your identity to ensure a full understanding of your experience.
Do you have to film me?
Where will my story be kept?
The material recorded from your interview will be stored securely on St Nicholas Hospice Care’s servers which are located at its premises in Bury St Edmunds.
What will I be asked to provide consent for?
We will ask if we can share your experiences with service design specialists Livework, who are working with the Hospice on the service development.
You will be asked to provide consent for the Hospice to use information and quotations from your interview in reports and materials to communicate the project’s progress.
You will be asked to provide consent for files resulting from this study to be used for transcripts, use of quotes, and images to be disseminated in the form of published or public material on the web or paper release.
How will my story be used?
Your interview will be shared with Livework, a service design company which specialises in developing services that respond to the needs of the community.
Livework have a proven track record and their knowledge and expertise will ensure what’s important to you inspires how our services are developed.
What do I do if I am interested in taking part and hearing more about the Hospice’s Listen Learn Adapt process?
What if I need to speak to someone about support? |
from django.shortcuts import render, redirect
from django.views.generic import ListView
from django.views.generic.detail import DetailView
from django.db.models import Q
from django.views.generic.edit import (
CreateView,
UpdateView
)
from django.contrib.auth.models import User
from .models import Room, Message
from .forms import MessageSendForm
class ListUsers(ListView):
model = User
context_object_name = 'user_list'
queryset = User.objects.filter(is_active=True, is_superuser=False)
template_name = 'chat/user_list.html'
def list_chats(request, user_id):
if user_id is not None:
user = User.objects.get(id=user_id)
users = User.objects.filter(~Q(id=user_id), is_active=True, is_superuser=False)
return render(request, 'chat/room_list.html',
{'list_users': users,
'usuario': user.id})
else:
return render(request, 'chat/room_list.html')
def messages(request, user_id, room_id=None):
user = User.objects.get(id=user_id)
form = MessageSendForm()
if request.method == 'POST':
form = MessageSendForm(request.POST)
if form.is_valid():
#import pdb; pdb.set_trace()
room_chat = Room.objects.get(id=room_id)
message = form.save(commit=False)
message.message = request.POST['message']
message.room = room_chat
message.user = user
message.save()
if room_id:
room_chat, created = Room.objects.get_or_create(user=user_id)
#messages = Message.objects.filter(room=room_chat[0], user=)
messages = reversed(room_chat.messages.order_by('-time')[:50])
users = User.objects.filter(~Q(id=user_id), is_active=True, is_superuser=False)
return render(request, 'chat/chat.html',
{'messages': messages,
'users': users,
'user_chat': user.username,
'usuario': user.id,
'user_name': '%s %s' % (user.first_name, user.last_name),
'form': form})
else:
return render(request, 'chat/room_list.html')
|
Photo shoot styling – we use selected props and flower decorations as our strongest storytelling tools. The stylistic design of the set comprises the creation of an ideal scene for various occasions and events. It matches the thematic requests given by our clients – large companies, photographers, magazines and private persons. Such shootings require knowledge, experience, expertise and a trained eye for the perfect harmony of colours, textures and composition. Our aim is to convey the story, concept, feeling or message to the end customer, regardless of the media, whether it be an editorial, a commercial or a corporative magazine.
Lela Design’s studio has 20 years of experience in the creative industry and prides itself in collaboration with a wide team of professionals required for these types of shootings, including top photographers, lighting experts, designers, stylists, make-up artists and hair stylists, but also in our choice of interesting locations.
We use selected props and flower decorations as our strongest storytelling tools. The stylistic design of the set comprises the creation of an ideal scene for various occasions and events. It matches the thematic requests given by our clients – large companies, photographers, magazines and private persons. Such shootings require knowledge, experience, expertise and a trained eye for the perfect harmony of colours, textures and composition. Our aim is to convey the story, concept, feeling or message to the end customer, regardless of the media, whether it be an editorial, a commercial or a corporative magazine.
We use selected props and flower decorations as our strongest storytelling tools. The stylistic design of the set comprises the creation of an ideal scene for various occasions and events. It matches the thematic requests given by our clients – large companies, photographers, magazines and private persons.
Such shootings require knowledge, experience, expertise and a trained eye for the perfect harmony of colours, textures and composition. Our aim is to convey the story, concept, feeling or message to the end customer, regardless of the media, whether it be an editorial, a commercial or a corporative magazine. Lela Design’s studio has 20 years of experience in the creative industry and prides itself in collaboration with a wide team of professionals required for these types of shootings, including top photographers, lighting experts, designers, stylists, make-up artists and hair stylists, but also in our choice of interesting locations. By using carefully selected private inventory, we simplify the complete process and offer different stylings for various needs of stylistic designs. To reach the perfect styling solution, we collaborate with art directors, PR experts, marketing experts, designers or private persons who hire us to bring their products and locations to the level of style perfection. |
"""
Utilities for PyMDP module
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.lines as mlines
import matplotlib.patches as mpatches
def BarycentricInterpolation(bins, pnts):
"""
barycentricinterpolation for given points,
return the barycentric coordinates for points within the grids
INPUT
bins - grids for discretization,
m-length array where bins[i] indicates the mesh along dimension i
pnts - an array of pnts, each points is an m-length indicates the Cartesian coordinates
can be n pnts in total
RETURN
indices - an n-length list of indices, each indices is d-length (d=m+1) for interpolating points invovled
coeffs - an n-length list of coefficients, each coefficients is d-length for reconstructing points n
A pythonic version barycentricinterpolation from Russ' drake utility function
does not support dcoefs currently...
"""
#note here the layout of input and output is different from the C++ version of drake
m = pnts.shape[1]
n = pnts.shape[0]
d = m+1
if len(bins) != m:
print 'The number of bins must equal to the dimension of the points.' #validation
return None, None
binsize = [len(bins[i]) for i in range(m)]
nskip = np.concatenate([[1], np.cumprod([binsize[i] for i in range(m-1)])])
#a list of bary points for future sorting...
b = [{'dim':0, 'fracway':0.0, 'dfracway':0.0} for i in range(d)]
indices = np.zeros((n, d))
coeffs = np.zeros((n, d))
for j in range(n):
sidx = 0 # 0-index in our case...
for i in range(m):
pt = pnts[j, i]
curr_bin = bins[i]
curr_bin_size = binsize[i]
b[i]['dim'] = i
if curr_bin_size == 1: #singleton dimensions
#sidx is unchanged
b[i]['fracway'] = 1.0
elif pt > curr_bin[curr_bin_size-1]:
#larger than max bound of bin
sidx += nskip[i] * (curr_bin_size-1)
b[i]['fracway'] = 1.0
b[i]['dfracway'] = 0.0
elif pt < curr_bin[0]:
#less than min bound of bin
sidx += nskip[i]
b[i]['fracway'] = 0.0
b[i]['dfracway'] = 0.0
else:
#Russ commented that smarter search can be done here...
#i guess we can do it in a pythonic way...
next_bin_index = np.argmax(curr_bin>pt)
sidx += nskip[i]*next_bin_index
b[i]['fracway'] = (pt - curr_bin[next_bin_index-1])/(curr_bin[next_bin_index]- curr_bin[next_bin_index-1])
b[i]['dfracway'] = 1./(curr_bin[next_bin_index]- curr_bin[next_bin_index-1])
#sort dimension based on fracway (lowest to highest)
b_sorted = sorted(b[:-1], key=lambda b_elem: b_elem['fracway'])
# final element of b_sorted,
b_sorted.append({'dim':m-1,'fracway':1.0, 'dfracway':0.0})
# top right corner
indices[j, 0] = sidx
coeffs[j, 0] = b_sorted[0]['fracway']
for i in range(m):
if binsize[b_sorted[i]['dim']] > 1:
#support singletone dimension
sidx -= nskip[b_sorted[i]['dim']]
indices[j, i+1] = sidx
coeffs[j, i+1] = b_sorted[i+1]['fracway'] - b_sorted[i]['fracway']
return indices, coeffs
def add_arrow_to_line2D(
axes, line, arrow_locs=[0.2, 0.4, 0.6, 0.8],
arrowstyle='-|>', arrowsize=1, transform=None):
"""
Add arrows to a matplotlib.lines.Line2D at selected locations.
Parameters:
-----------
axes:
line: list of 1 Line2D obbject as returned by plot command
arrow_locs: list of locations where to insert arrows, % of total length
arrowstyle: style of the arrow
arrowsize: size of the arrow
transform: a matplotlib transform instance, default to data coordinates
Returns:
--------
arrows: list of arrows
"""
if (not(isinstance(line, list)) or not(isinstance(line[0],
mlines.Line2D))):
raise ValueError("expected a matplotlib.lines.Line2D object")
x, y = line[0].get_xdata(), line[0].get_ydata()
arrow_kw = dict(arrowstyle=arrowstyle, mutation_scale=10 * arrowsize)
color = line[0].get_color()
use_multicolor_lines = isinstance(color, np.ndarray)
if use_multicolor_lines:
raise NotImplementedError("multicolor lines not supported")
else:
arrow_kw['color'] = color
linewidth = line[0].get_linewidth()
if isinstance(linewidth, np.ndarray):
raise NotImplementedError("multiwidth lines not supported")
else:
arrow_kw['linewidth'] = linewidth
if transform is None:
transform = axes.transData
arrows = []
for loc in arrow_locs:
s = np.cumsum(np.sqrt(np.diff(x) ** 2 + np.diff(y) ** 2))
n = np.searchsorted(s, s[-1] * loc)
arrow_tail = (x[n], y[n])
arrow_head = (np.mean(x[n:n + 2]), np.mean(y[n:n + 2]))
p = mpatches.FancyArrowPatch(
arrow_tail, arrow_head, transform=transform,
**arrow_kw)
axes.add_patch(p)
arrows.append(p)
return arrows
def draw_err_bar_with_filled_shape(ax, x_data, y_data, err=None, color=(0, 0, 1), transp=0.2):
'''
wrapped function to draw curve with filled shape to indicate error bar
'''
line = None
shade = None
#validate the length of err
if err is not None:
if len(x_data) != len(y_data) or len(x_data) != len(err):
print 'The length of data and err must be consistent.'
return line, shade
line, = ax.plot(x_data, y_data, color=color)
ax.hold(True)
#<hyin/Jan-22nd-2016> linewidth=0 does not work on matplotlib 1.4.2, it is fixed on 1.4.3 though...
shade = plt.fill_between(x_data, y_data-err, y_data+err, alpha=transp, edgecolor=color, facecolor=color, linewidth=3.0)
return line, shade
else:
#just draw curve...
line, = ax.plot(x_data, y_data, color=color)
return line, shade |
1) Power house should be settled on the position immune from immersion when rising flood.
2) Hydropower turbine and generator set installation should be firm; power house should be dry, ventilation, smooth drainage.
Micro hydro turbine is usually small in size, small water flow, not too high head, not too big capacity, and its equipments are simple in the power house. Generally home micro hydropower generator under 1000w, its powerhouse construction can be simple, as long as it can accommodate the equipment and protest the equipment from rain and other bad weather is enough. More than 1500 watts of power plant, you can design the power house area according to the size of the unit and convenient maintenance. If there is no automatic control device of above 30 kilowatts units, you should consider the personnel on duty room as well.
Tailrace channel is the drainage channel after the hydro turbine draft tube. For micro pelton turbine and micro turgo turbine, tailrace channel function is only drain away the water from the turbine, there is no other requirement; For reaction type of micro hydro turbine (Francis turbine and axial flow or Propeller turbine or Kaplan turbine or Tubular turbine), its tailrace has certain requirements, building must be set strictly accordance with the requirements of HS Dynamic Energy or your micro hydropower turbine manufacturer and supplier, or it will be great influence on the unit output, seriously cannot generate electricity from your unit. |
#!/usr/bin/env python
# This file was taken from Riverbank's examples,
# which was an adaptation of the original C++ Qt's examples.
from PySide import QtCore, QtGui
import animatedtiles_rc
# PyQt doesn't support deriving from more than one wrapped class so we use
# composition and delegate the property.
class Pixmap(QtCore.QObject):
def __init__(self, pix):
super(Pixmap, self).__init__()
self.pixmap_item = QtGui.QGraphicsPixmapItem(pix)
self.pixmap_item.setCacheMode(QtGui.QGraphicsItem.DeviceCoordinateCache)
def set_pos(self, pos):
self.pixmap_item.setPos(pos)
def get_pos(self):
return self.pixmap_item.pos()
pos = QtCore.Property(QtCore.QPointF, get_pos, set_pos)
class Button(QtGui.QGraphicsWidget):
pressed = QtCore.Signal()
def __init__(self, pixmap, parent=None):
super(Button, self).__init__(parent)
self._pix = pixmap
self.setAcceptHoverEvents(True)
self.setCacheMode(QtGui.QGraphicsItem.DeviceCoordinateCache)
def boundingRect(self):
return QtCore.QRectF(-65, -65, 130, 130)
def shape(self):
path = QtGui.QPainterPath()
path.addEllipse(self.boundingRect())
return path
def paint(self, painter, option, widget):
down = option.state & QtGui.QStyle.State_Sunken
r = self.boundingRect()
grad = QtGui.QLinearGradient(r.topLeft(), r.bottomRight())
if option.state & QtGui.QStyle.State_MouseOver:
color_0 = QtCore.Qt.white
else:
color_0 = QtCore.Qt.lightGray
color_1 = QtCore.Qt.darkGray
if down:
color_0, color_1 = color_1, color_0
grad.setColorAt(0, color_0)
grad.setColorAt(1, color_1)
painter.setPen(QtCore.Qt.darkGray)
painter.setBrush(grad)
painter.drawEllipse(r)
color_0 = QtCore.Qt.darkGray
color_1 = QtCore.Qt.lightGray
if down:
color_0, color_1 = color_1, color_0
grad.setColorAt(0, color_0)
grad.setColorAt(1, color_1)
painter.setPen(QtCore.Qt.NoPen)
painter.setBrush(grad)
if down:
painter.translate(2, 2)
painter.drawEllipse(r.adjusted(5, 5, -5, -5))
painter.drawPixmap(-self._pix.width() / 2, -self._pix.height() / 2,
self._pix)
def mousePressEvent(self, ev):
self.pressed.emit()
self.update()
def mouseReleaseEvent(self, ev):
self.update()
class View(QtGui.QGraphicsView):
def resizeEvent(self, event):
super(View, self).resizeEvent(event)
self.fitInView(self.sceneRect(), QtCore.Qt.KeepAspectRatio)
if __name__ == '__main__':
import sys
import math
app = QtGui.QApplication(sys.argv)
kineticPix = QtGui.QPixmap(':/images/kinetic.png')
bgPix = QtGui.QPixmap(':/images/Time-For-Lunch-2.jpg')
scene = QtGui.QGraphicsScene(-350, -350, 700, 700)
items = []
for i in range(64):
item = Pixmap(kineticPix)
item.pixmap_item.setOffset(-kineticPix.width() / 2,
-kineticPix.height() / 2)
item.pixmap_item.setZValue(i)
items.append(item)
scene.addItem(item.pixmap_item)
# Buttons.
buttonParent = QtGui.QGraphicsRectItem()
ellipseButton = Button(QtGui.QPixmap(':/images/ellipse.png'), buttonParent)
figure8Button = Button(QtGui.QPixmap(':/images/figure8.png'), buttonParent)
randomButton = Button(QtGui.QPixmap(':/images/random.png'), buttonParent)
tiledButton = Button(QtGui.QPixmap(':/images/tile.png'), buttonParent)
centeredButton = Button(QtGui.QPixmap(':/images/centered.png'), buttonParent)
ellipseButton.setPos(-100, -100)
figure8Button.setPos(100, -100)
randomButton.setPos(0, 0)
tiledButton.setPos(-100, 100)
centeredButton.setPos(100, 100)
scene.addItem(buttonParent)
buttonParent.scale(0.75, 0.75)
buttonParent.setPos(200, 200)
buttonParent.setZValue(65)
# States.
rootState = QtCore.QState()
ellipseState = QtCore.QState(rootState)
figure8State = QtCore.QState(rootState)
randomState = QtCore.QState(rootState)
tiledState = QtCore.QState(rootState)
centeredState = QtCore.QState(rootState)
# Values.
for i, item in enumerate(items):
# Ellipse.
ellipseState.assignProperty(item, 'pos',
QtCore.QPointF(math.cos((i / 63.0) * 6.28) * 250,
math.sin((i / 63.0) * 6.28) * 250))
# Figure 8.
figure8State.assignProperty(item, 'pos',
QtCore.QPointF(math.sin((i / 63.0) * 6.28) * 250,
math.sin(((i * 2)/63.0) * 6.28) * 250))
# Random.
randomState.assignProperty(item, 'pos',
QtCore.QPointF(-250 + QtCore.qrand() % 500,
-250 + QtCore.qrand() % 500))
# Tiled.
tiledState.assignProperty(item, 'pos',
QtCore.QPointF(((i % 8) - 4) * kineticPix.width() + kineticPix.width() / 2,
((i // 8) - 4) * kineticPix.height() + kineticPix.height() / 2))
# Centered.
centeredState.assignProperty(item, 'pos', QtCore.QPointF())
# Ui.
view = View(scene)
view.setWindowTitle("Animated Tiles")
view.setViewportUpdateMode(QtGui.QGraphicsView.BoundingRectViewportUpdate)
view.setBackgroundBrush(QtGui.QBrush(bgPix))
view.setCacheMode(QtGui.QGraphicsView.CacheBackground)
view.setRenderHints(
QtGui.QPainter.Antialiasing | QtGui.QPainter.SmoothPixmapTransform)
view.show()
states = QtCore.QStateMachine()
states.addState(rootState)
states.setInitialState(rootState)
rootState.setInitialState(centeredState)
group = QtCore.QParallelAnimationGroup()
for i, item in enumerate(items):
anim = QtCore.QPropertyAnimation(item, 'pos')
anim.setDuration(750 + i * 25)
anim.setEasingCurve(QtCore.QEasingCurve.InOutBack)
group.addAnimation(anim)
trans = rootState.addTransition(ellipseButton.pressed, ellipseState)
trans.addAnimation(group)
trans = rootState.addTransition(figure8Button.pressed, figure8State)
trans.addAnimation(group)
trans = rootState.addTransition(randomButton.pressed, randomState)
trans.addAnimation(group)
trans = rootState.addTransition(tiledButton.pressed, tiledState)
trans.addAnimation(group)
trans = rootState.addTransition(centeredButton.pressed, centeredState)
trans.addAnimation(group)
timer = QtCore.QTimer()
timer.start(125)
timer.setSingleShot(True)
trans = rootState.addTransition(timer.timeout, ellipseState)
trans.addAnimation(group)
states.start()
sys.exit(app.exec_())
|
How to get free WiFi in Serignac-sur-Garonne?
Very simple. To do this, select a WiFi hotspot on the map and view the password or find the nearest open hotspots. The site will also be useful to people who like to travel and for the first time in Serignac-sur-Garonne.
Install our mobile app for quick access to free internet everywhere. The App contains free WiFi hotspots in Serignac-sur-Garonne.
Using your advanced and fashionable smartphone, in offline mode, you easily find the nearest airport wifi places in Serignac-sur-Garonne at once upon arrival. It helps you to inform your close ones about the successful flight, chat with friends, check the recent news. Quite naturally, the internet connection that is free and powerful enables you to get the complete tourist information about the transport accessibility in Serignac-sur-Garonne, the famous sights to see, the hotel and the way to get there.
Going to eat out, you can easily find the restaurants with free wifi in Serignac-sur-Garonne, select the ones that provide the best, high-speed signal to enjoy smooth communication while waiting for the favorite dishes. It lets you share your amazing impressions and photos immediately, find the best sights and routes to plan your perfect trip.
Quite naturally, going by airplane, you won’t be able to use the net or speak on the phone. Virtually all airlines prohibit it. Meanwhile, you can check the available places with free wifi in Serignac-sur-Garonne on the interactive map in flight or offline mode, check the reviews about the hotspots and schedule your time. So, enjoy your trip with our mobile app and be informed about the free wifi places.
Wifispc.com/ - good software to display the places where there - free wifi hotspots in Munich. Handy Assistant tour. |
# -*- coding: utf-8 -*-
# !/usr/bin/python
__author__ = 'Ray'
from flask import g, render_template, send_from_directory, Blueprint, current_app, url_for, jsonify
from os import path
from urlparse import urljoin
from werkzeug.contrib.atom import AtomFeed
from fnmatch import fnmatch
from datetime import datetime
from werkzeug.exceptions import abort
from flask_babel import gettext, refresh
class SimplePage(object):
title = ''
path = ''
_locale = ''
_is_default = False
def __init__(self, title=''):
self.title = title
def set_lang(locale_name):
if locale_name != '':
g.lang = locale_name.split('-')[0]
refresh()
def create_views(name, app):
main = Blueprint(name, name,
template_folder='templates',
static_url_path='/static',
static_folder='static')
try:
if app.config['TESTING'] is False:
pkg_file = path.join(app.path, '__init__.py')
if path.exists(pkg_file):
import imp
ext_module = imp.load_source(name, pkg_file)
routes_func_name = 'register'
if hasattr(ext_module, routes_func_name) and callable(getattr(ext_module, routes_func_name)):
ext_module.register(main)
finally:
__init_views(main, app)
app.register_blueprint(main)
return main
def __init_views(main, app):
@main.route('/')
@main.route('/<path:page_path>/')
def index(page_path='index'):
if fnmatch(page_path, '*.*'):
_abs_path = path.abspath(path.join('pages', path.dirname(page_path)))
return send_from_directory(_abs_path,
path.basename(page_path))
page = current_app.pages.get_or_404(page_path)
default_layout = 'page'
if page._is_post:
default_layout = 'post'
set_lang(page._locale)
template = 'layouts/%s.html' % page.meta.get('layout', default_layout)
return render_template(template, page=page, locale=page._locale, site=current_app.site)
@main.route('/api/pages/<path:search_path>.json')
def data_pages(search_path):
if search_path == 'all':
return jsonify(pages=[p.to_json for p in current_app.site.pages])
else:
_page = current_app.pages.get_or_404(search_path)
json_page = _page.to_json
json_page.update(pages=[p.to_json for p in current_app.site.query(search_path)])
return jsonify(json_page)
@main.route('/api/posts.json')
def data_posts():
return jsonify(posts=[p.to_json for p in current_app.site.posts])
@main.route('/<regex("[a-z]{2}-[A-Z]{2}"):locale_name>/tags/')
@main.route('/tags/')
def tags(locale_name=''):
set_lang(locale_name)
return render_template('layouts/tags.html',
page=SimplePage('All tags'),
site=current_app.site,
locale=locale_name)
@main.route('/<regex("[a-z]{2}-[A-Z]{2}"):locale_name>/tags/<name>/')
@main.route('/tags/<name>/')
def tag(name, locale_name=''):
set_lang(locale_name)
if (name is None) or name == '':
abort(404)
return render_template('layouts/tagged.html',
page=SimplePage(gettext(u'Articles tagged with:%(value)s', value=name)),
tag=name,
locale=locale_name,
site=current_app.site)
@main.route('/<regex("[a-z]{2}-[A-Z]{2}"):locale_name>/archives/')
@main.route('/archives/')
def archives(locale_name=''):
set_lang(locale_name)
return render_template('layouts/archives.html',
page=SimplePage(gettext(u'All archives')),
locale=locale_name,
site=current_app.site)
@main.route('/<regex("[a-z]{2}-[A-Z]{2}"):locale_name>/archives/<name>/')
@main.route('/archives/<name>/')
def archive(name, locale_name=''):
set_lang(locale_name)
results = [a for a in current_app.site.archives if a.title == name]
if len(results) == 0:
abort(404)
return render_template('layouts/archived.html',
page=SimplePage(gettext(u'Archive:%(value)s', value=name)),
locale=locale_name,
archive=results[0],
site=current_app.site)
def render_404():
"""Render the not found page
"""
return render_template('404.html', page={'title': gettext(u'Page not found'), 'path': '404'},
locale='',
site=current_app.site)
@app.errorhandler(404)
def page_not_found(e):
return render_404(), 404
@main.route('/404.html')
def static_404():
return render_404()
@app.route('/sitemap.xml')
def sitemap():
locations = [(lambda p: (post.url, post.last_updated))(post) for post in current_app.pages]
sites = [(current_app.site.url + l[0], l[1]) for l in locations]
return render_template('sitemap.xml', sites=sites), 200, {'Content-Type': 'application/xml; charset=utf-8'}
@app.route('/feeds/<path:name>.atom')
def feed(name='recent'):
_feed_url = url_for('.feed', name=name, _external=True)
_posts = current_app.site.posts
if name != 'recent':
_posts = current_app.site.query(name, all=True)
feed = AtomFeed(gettext('Recent posts'),
feed_url=_feed_url,
url=current_app.site.url)
# if len(_posts) > 20:
# _posts = _posts[20]
for post in _posts:
feed.add(post.meta.get('title', ''),
unicode(post.html),
content_type='html',
subtitle=post.meta.get('summary', ''),
author=post.meta.get('author'),
url=urljoin(current_app.site.url, post.url),
updated=post.last_updated,
published=post.published)
return feed.get_response()
|
Need Outsourcing, Hybrid or In-house Solutions?
"Boost organisational productivity with machine learning training"
Enterprise Intelligence is on track to becoming the most productive technology, recovering 6.2 billion hours of productivity by 2021, and generating $2.9 trillion in business value globally, when paired with human intelligence, The biggest pain point emerging from world leading research firms is the lack of specialised skills, with 47% of Chief Information Officers reporting that they needed new skills for AI Projects .
ACTIONX offers client commercial training packages, using state-of-the-art technologies, and the greatest minds in post-doctorate research. We offer both out-of-the-box or developed classes delivered onsite or remote by a qualified practitioner.
Classes include interactive and practical lessons on data science, machine learning, deep learning & artificial intelligence for your public or private organisation.
“The greatest barrier to success is the fear of failure"
ACTIONX identified top key reasons why data science, machine learning, deep learning and artificial intelligence adoption plans and proof of concepts ‘fail’.
Lack of clear project definitions, missions and goals, wrong or incorrectly applied use cases, lack of specialised skills, limited or failed funding approval, no sponsorship, security & technology risks, lack of data management and governance, system integration challenges, inability to measure the potential and future value correctly, poor governance, limited to no strategy & project alignment and more.
With reasons for failure in mind, we offer services to share our depth of skills and experience with our clients. From technical to strategic consulting we ensure your success.
“An investment in knowledge always pays back ten-fold"
Data science, machine Learning, deep learning and artificial intelligence hype is challenging business and technical decisions from around the world. With new advancements in old technology, speed of change, and disruptive technology, there is no room for ‘failure or lessons learnt’.
ACTIONX advisory services addresses your most pressing and key concerns, helping guide you, and your team.
We help soundboard and validate your strategic and technical direction on use cases, ethics, regulatory, latest implementations of AI, the evolution of human and machine interaction, the rise and impact of AI assistants and chat bots, when to adopt AI and much more.
With advisory services tailored to you only the best decisions are made and lay the foundation to your success. |
import MySQLdb
def consultaRecuperar(consulta):
cnx = MySQLdb.connect(host='micupongt.cjyaceyawgbr.us-east-2.rds.amazonaws.com', user='manuel13580', passwd='Manuel5897-',
db='micupongt')
cursor = cnx.cursor()
cursor.execute(consulta)
#cnx.close() no se cierra porque activa una excepcion.
return cursor
def consultaSQL(consulta):
respuesta=False
cnx = MySQLdb.connect(host='micupongt.cjyaceyawgbr.us-east-2.rds.amazonaws.com', user='manuel13580', passwd='Manuel5897-',
db='micupongt')
cursor = cnx.cursor()
cursor.execute(consulta)
respuesta=cursor.fetchone()
if respuesta=='1':
respuesta=True
cnx.close()
return respuesta
def consultaId(consulta):
cnx = MySQLdb.connect(host='micupongt.cjyaceyawgbr.us-east-2.rds.amazonaws.com', user='manuel13580', passwd='Manuel5897-',
db='micupongt')
cursor = cnx.cursor()
cursor.execute(consulta)
respuesta=cursor.fetchone()
#cnx.close()
return respuesta
def consultaPromociones(consulta):
cnx = MySQLdb.connect(host='micupongt.cjyaceyawgbr.us-east-2.rds.amazonaws.com', user='manuel13580', passwd='Manuel5897-',
db='micupongt')
cursor = cnx.cursor()
try:
cursor.execute(consulta)
#cnx.close() no se cierra porque activa una excepcion.
except Exception, e:
print ' '
return cursor
def registroSQL(consulta):
respuesta=False
cnx = MySQLdb.connect(host='micupongt.cjyaceyawgbr.us-east-2.rds.amazonaws.com', user='manuel13580', passwd='Manuel5897-',
db='micupongt')
cursor = cnx.cursor()
try:
cursor.execute(consulta)
cnx.commit()
cnx.close()
respuesta=True
except Exception, e:
print 'No se logro realizar la accion'
return respuesta
def consultaCodigo(consulta):
respuesta=False
cnx = MySQLdb.connect(host='micupongt.cjyaceyawgbr.us-east-2.rds.amazonaws.com', user='manuel13580', passwd='Manuel5897-',
db='micupongt')
cursor = cnx.cursor()
try:
cursor.execute(consulta)
cnx.commit()
respuesta=cursor.fetchone()
#cnx.close() no se cierra porque activa una excepcion.
except Exception, e:
print ' '
return respuesta
|
Riaz Aumeeruddy of Nature Seychelles invited me to provide a bird ringing training course on Cousin Island, Seychelles, from 18-23 September 2011. The training involved mist-netting of seabirds and land birds. A total of 478 birds was caught (including recaptures).
The participants were Annesa Rath, Dorothy Jeannie, Jessica Moumou, Jakawan Hoareau, Mary Ledlie, and Riaz Aumeeruddy. They were all awarded certificates at the end of the course. Birds caught are listed in a table below, after some photos of birds caught.
This species is easy to catch! A mistnet, even in the open, will catch this species. They are also easy to extract from the nest and easy to handle - the bill is not too powerful as they catch small fish.
This species is the sparrow of the Seychelles, and is endemic to several small islands in the Seychelles. The species occurs throughout the island, and is very tame around the staff houses.
Several recaptures were made of birds ringed by PhD student L Vega in 2002, making these birds at least 9 years old - see here.
In the past two fodies occurred on the island: the Seychelles Fody, and the introduced Red Fody - the latter now only occasionally visits Cousin Island.
Many males, and some females, have some white primary coverts.
This species is easy to identify - any brown bird on Cousin with 3 colour rings is a warbler!
Nature Seychelles is thanked for funding my visit.
Riaz Aumeeruddy, Nature Seychelles, is thanked for organising all the logistics of my visit.
The warbler team (Martijn, Sjouke, Dave) is thanked for their help. |
# -*- coding: UTF-8 -*-
# **********************************************************************************#
# File:
# **********************************************************************************#
from section_cmab.agent import Agent
from section_cmab.algorithms.lp_solvers import primal_dual_recover
from section_cmab.display.rewards import display_single_
def simulate_with_(algorithm, config=None, circles=200, dump=True,
algorithm_type='original', fixed_theta=False, prefix='', **kwargs):
"""
Simulate with parameters.
Args:
algorithm(function): algorithm
config(string): config path
circles(int): circles
dump(boolean): whether to dump result to file
algorithm_type(string): original, optimal, comparison
fixed_theta(boolean): fixed theta
prefix(string): prefix
"""
config = config or '../cfg/default.cfg'
agent = Agent.from_(config)
if algorithm_type == 'optimal':
algorithm_rewards = agent.find_optimal_with_bnd_(primal_dual_recover, circles=circles, dump=dump,
fixed_theta=fixed_theta,
prefix=prefix)
elif algorithm_type == 'original':
algorithm_rewards = agent.iter_with_(algorithm, circles=circles, dump=dump, prefix=prefix)
elif algorithm_type == 'greedy':
algorithm_rewards = agent.iter_with_greedy_(algorithm, circles=circles, dump=dump, prefix=prefix)
else:
algorithm_rewards = agent.comparison_(algorithm, circles=circles, dump=dump, prefix=prefix)
return algorithm_rewards
if __name__ == '__main__':
config_path = '../cfg/myron.cfg'
current_algorithm = primal_dual_recover
# current_algorithm = branch_and_bound
rewards = simulate_with_(current_algorithm, config=config_path, circles=30,
dump=False)
display_single_(rewards, all_curves=False, display_length=500, line_width=1.8,
title_size=20, label_size=16, color='#1E90FF')
|
Disclaimer: All information deemed reliable but not guaranteed. All properties are subject to prior sale, change or withdrawal. Neither listing broker(s) or information provider(s) shall be responsible for any typographical errors, misinformation, misprints and shall be held totally harmless. Listing(s) information is provided for consumers personal, non-commercial use and may not be used for any purpose other than to identify prospective properties consumers may be interested in purchasing. Information on this site was last updated 4/20/19. The listing information on this page last changed on 4/18/19. The data relating to real estate for sale on this website comes in part from the Internet Data Exchange program of CBRMLS (last updated Fri 04/19/2019 10:44:42 PM EDT) or Multiple Listing Service of Greater Cincinnati (last updated Fri 04/19/2019 05:10:44 PM EDT) or NKY MLS (last updated Sat 04/20/2019 03:33:20 AM EDT) or NEOHREX (last updated Sat 04/20/2019 12:34:47 AM EDT). Real estate listings held by brokerage firms other than Cutler Real Estate may be marked with the Internet Data Exchange logo and detailed information about those properties will include the name of the listing broker(s) when required by the MLS. All rights reserved. |
# Copyright (c) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest
from urllib2 import HTTPError
from webkitpy.common.net.networktransaction import NetworkTransaction, NetworkTimeout
from webkitpy.common.system.logtesting import LoggingTestCase
class NetworkTransactionTest(LoggingTestCase):
exception = Exception("Test exception")
def test_success(self):
transaction = NetworkTransaction()
self.assertEqual(transaction.run(lambda: 42), 42)
def _raise_exception(self):
raise self.exception
def test_exception(self):
transaction = NetworkTransaction()
did_process_exception = False
did_throw_exception = True
try:
transaction.run(lambda: self._raise_exception())
did_throw_exception = False
except Exception, e:
did_process_exception = True
self.assertEqual(e, self.exception)
self.assertTrue(did_throw_exception)
self.assertTrue(did_process_exception)
def _raise_500_error(self):
self._run_count += 1
if self._run_count < 3:
raise HTTPError("http://example.com/", 500, "internal server error", None, None)
return 42
def _raise_404_error(self):
raise HTTPError("http://foo.com/", 404, "not found", None, None)
def test_retry(self):
self._run_count = 0
transaction = NetworkTransaction(initial_backoff_seconds=0)
self.assertEqual(transaction.run(lambda: self._raise_500_error()), 42)
self.assertEqual(self._run_count, 3)
self.assertLog(['WARNING: Received HTTP status 500 loading "http://example.com/". '
'Retrying in 0 seconds...\n',
'WARNING: Received HTTP status 500 loading "http://example.com/". '
'Retrying in 0.0 seconds...\n'])
def test_convert_404_to_None(self):
transaction = NetworkTransaction(convert_404_to_None=True)
self.assertEqual(transaction.run(lambda: self._raise_404_error()), None)
def test_timeout(self):
self._run_count = 0
transaction = NetworkTransaction(initial_backoff_seconds=60*60, timeout_seconds=60)
did_process_exception = False
did_throw_exception = True
try:
transaction.run(lambda: self._raise_500_error())
did_throw_exception = False
except NetworkTimeout, e:
did_process_exception = True
self.assertTrue(did_throw_exception)
self.assertTrue(did_process_exception)
|
Your order will ship in 14-16 Business Days.
We expect payment within 5 business days, so please process this invoice within that timeframe.
Invoice updated by Alex Mahn. |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
{
'name' : 'Fleet Management',
'version' : '0.1',
'sequence': 165,
'category': 'Human Resources',
'website' : 'https://www.odoo.com/page/fleet',
'summary' : 'Vehicle, leasing, insurances, costs',
'description' : """
Vehicle, leasing, insurances, cost
==================================
With this module, Odoo helps you managing all your vehicles, the
contracts associated to those vehicle as well as services, fuel log
entries, costs and many other features necessary to the management
of your fleet of vehicle(s)
Main Features
-------------
* Add vehicles to your fleet
* Manage contracts for vehicles
* Reminder when a contract reach its expiration date
* Add services, fuel log entry, odometer values for all vehicles
* Show all costs associated to a vehicle or to a type of service
* Analysis graph for costs
""",
'depends': [
'base',
'mail',
],
'data': [
'security/fleet_security.xml',
'security/ir.model.access.csv',
'views/fleet_view.xml',
'views/fleet_board_view.xml',
'data/fleet_cars_data.xml',
'data/fleet_data.xml',
],
'demo': ['data/fleet_demo.xml'],
'installable': True,
'application': True,
}
|
All along we’ve been wondering how Letty could possibly be back in Fast and Furious, when she was very clearly killed in an earlier episode.
Now this new featurette has been released.. and the story is in here. It’s the only spoiler in the video that is new.. otherwise the previous trailers (trailer #1 and trailer #2)have revealed several elements of the storyline. If you don’t mind the spoiler, or can’t help yourself, watch the video.
Set your system to full screen, turn up the volume – and enjoy! |
#!/usr/bin/env python
###############################################################################
# $Id$
#
# Project: GDAL2Tiles, Google Summer of Code 2007 & 2008
# Global Map Tiles Classes
# Purpose: Convert a raster into TMS tiles, create KML SuperOverlay EPSG:4326,
# generate a simple HTML viewers based on Google Maps and OpenLayers
# Author: Klokan Petr Pridal, klokan at klokan dot cz
# Web: http://www.klokan.cz/projects/gdal2tiles/
#
###############################################################################
# Copyright (c) 2008 Klokan Petr Pridal. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
"""
globalmaptiles.py
Global Map Tiles as defined in Tile Map Service (TMS) Profiles
==============================================================
Functions necessary for generation of global tiles used on the web.
It contains classes implementing coordinate conversions for:
- GlobalMercator (based on EPSG:900913 = EPSG:3785)
for Google Maps, Yahoo Maps, Microsoft Maps compatible tiles
- GlobalGeodetic (based on EPSG:4326)
for OpenLayers Base Map and Google Earth compatible tiles
More info at:
http://wiki.osgeo.org/wiki/Tile_Map_Service_Specification
http://wiki.osgeo.org/wiki/WMS_Tiling_Client_Recommendation
http://msdn.microsoft.com/en-us/library/bb259689.aspx
http://code.google.com/apis/maps/documentation/overlays.html#Google_Maps_Coordinates
Created by Klokan Petr Pridal on 2008-07-03.
Google Summer of Code 2008, project GDAL2Tiles for OSGEO.
In case you use this class in your product, translate it to another language
or find it usefull for your project please let me know.
My email: klokan at klokan dot cz.
I would like to know where it was used.
Class is available under the open-source GDAL license (www.gdal.org).
"""
import math
class GlobalMercator(object):
"""
TMS Global Mercator Profile
---------------------------
Functions necessary for generation of tiles in Spherical Mercator projection,
EPSG:900913 (EPSG:gOOglE, Google Maps Global Mercator), EPSG:3785, OSGEO:41001.
Such tiles are compatible with Google Maps, Microsoft Virtual Earth, Yahoo Maps,
UK Ordnance Survey OpenSpace API, ...
and you can overlay them on top of base maps of those web mapping applications.
Pixel and tile coordinates are in TMS notation (origin [0,0] in bottom-left).
What coordinate conversions do we need for TMS Global Mercator tiles::
LatLon <-> Meters <-> Pixels <-> Tile
WGS84 coordinates Spherical Mercator Pixels in pyramid Tiles in pyramid
lat/lon XY in metres XY pixels Z zoom XYZ from TMS
EPSG:4326 EPSG:900913
.----. --------- -- TMS
/ \ <-> | | <-> /----/ <-> Google
\ / | | /--------/ QuadTree
----- --------- /------------/
KML, public WebMapService Web Clients TileMapService
What is the coordinate extent of Earth in EPSG:900913?
[-20037508.342789244, -20037508.342789244, 20037508.342789244, 20037508.342789244]
Constant 20037508.342789244 comes from the circumference of the Earth in meters,
which is 40 thousand kilometers, the coordinate origin is in the middle of extent.
In fact you can calculate the constant as: 2 * math.pi * 6378137 / 2.0
$ echo 180 85 | gdaltransform -s_srs EPSG:4326 -t_srs EPSG:900913
Polar areas with abs(latitude) bigger then 85.05112878 are clipped off.
What are zoom level constants (pixels/meter) for pyramid with EPSG:900913?
whole region is on top of pyramid (zoom=0) covered by 256x256 pixels tile,
every lower zoom level resolution is always divided by two
initialResolution = 20037508.342789244 * 2 / 256 = 156543.03392804062
What is the difference between TMS and Google Maps/QuadTree tile name convention?
The tile raster itself is the same (equal extent, projection, pixel size),
there is just different identification of the same raster tile.
Tiles in TMS are counted from [0,0] in the bottom-left corner, id is XYZ.
Google placed the origin [0,0] to the top-left corner, reference is XYZ.
Microsoft is referencing tiles by a QuadTree name, defined on the website:
http://msdn2.microsoft.com/en-us/library/bb259689.aspx
The lat/lon coordinates are using WGS84 datum, yeh?
Yes, all lat/lon we are mentioning should use WGS84 Geodetic Datum.
Well, the web clients like Google Maps are projecting those coordinates by
Spherical Mercator, so in fact lat/lon coordinates on sphere are treated as if
the were on the WGS84 ellipsoid.
From MSDN documentation:
To simplify the calculations, we use the spherical form of projection, not
the ellipsoidal form. Since the projection is used only for map display,
and not for displaying numeric coordinates, we don't need the extra precision
of an ellipsoidal projection. The spherical projection causes approximately
0.33 percent scale distortion in the Y direction, which is not visually noticable.
How do I create a raster in EPSG:900913 and convert coordinates with PROJ.4?
You can use standard GIS tools like gdalwarp, cs2cs or gdaltransform.
All of the tools supports -t_srs 'epsg:900913'.
For other GIS programs check the exact definition of the projection:
More info at http://spatialreference.org/ref/user/google-projection/
The same projection is degined as EPSG:3785. WKT definition is in the official
EPSG database.
Proj4 Text:
+proj=merc +a=6378137 +b=6378137 +lat_ts=0.0 +lon_0=0.0 +x_0=0.0 +y_0=0
+k=1.0 +units=m +nadgrids=@null +no_defs
Human readable WKT format of EPGS:900913:
PROJCS["Google Maps Global Mercator",
GEOGCS["WGS 84",
DATUM["WGS_1984",
SPHEROID["WGS 84",6378137,298.2572235630016,
AUTHORITY["EPSG","7030"]],
AUTHORITY["EPSG","6326"]],
PRIMEM["Greenwich",0],
UNIT["degree",0.0174532925199433],
AUTHORITY["EPSG","4326"]],
PROJECTION["Mercator_1SP"],
PARAMETER["central_meridian",0],
PARAMETER["scale_factor",1],
PARAMETER["false_easting",0],
PARAMETER["false_northing",0],
UNIT["metre",1,
AUTHORITY["EPSG","9001"]]]
"""
def __init__(self, tileSize=256):
"Initialize the TMS Global Mercator pyramid"
self.tileSize = tileSize
self.initialResolution = 2 * math.pi * 6378137 / self.tileSize
# 156543.03392804062 for tileSize 256 pixels
self.originShift = 2 * math.pi * 6378137 / 2.0
# 20037508.342789244
def LatLonToMeters(self, lat, lon ):
"Converts given lat/lon in WGS84 Datum to XY in Spherical Mercator EPSG:900913"
mx = lon * self.originShift / 180.0
my = math.log( math.tan((90 + lat) * math.pi / 360.0 )) / (math.pi / 180.0)
my = my * self.originShift / 180.0
return mx, my
def MetersToLatLon(self, mx, my ):
"Converts XY point from Spherical Mercator EPSG:900913 to lat/lon in WGS84 Datum"
lon = (mx / self.originShift) * 180.0
lat = (my / self.originShift) * 180.0
lat = 180 / math.pi * (2 * math.atan( math.exp( lat * math.pi / 180.0)) - math.pi / 2.0)
return lat, lon
def PixelsToMeters(self, px, py, zoom):
"Converts pixel coordinates in given zoom level of pyramid to EPSG:900913"
res = self.Resolution( zoom )
mx = px * res - self.originShift
my = py * res - self.originShift
return mx, my
def MetersToPixels(self, mx, my, zoom):
"Converts EPSG:900913 to pyramid pixel coordinates in given zoom level"
res = self.Resolution( zoom )
px = (mx + self.originShift) / res
py = (my + self.originShift) / res
return px, py
def PixelsToTile(self, px, py):
"Returns a tile covering region in given pixel coordinates"
tx = int( math.ceil( px / float(self.tileSize) ) - 1 )
ty = int( math.ceil( py / float(self.tileSize) ) - 1 )
return tx, ty
def PixelsToRaster(self, px, py, zoom):
"Move the origin of pixel coordinates to top-left corner"
mapSize = self.tileSize << zoom
return px, mapSize - py
def MetersToTile(self, mx, my, zoom):
"Returns tile for given mercator coordinates"
px, py = self.MetersToPixels( mx, my, zoom)
return self.PixelsToTile( px, py)
def TileBounds(self, tx, ty, zoom):
"Returns bounds of the given tile in EPSG:900913 coordinates"
minx, miny = self.PixelsToMeters( tx*self.tileSize, ty*self.tileSize, zoom )
maxx, maxy = self.PixelsToMeters( (tx+1)*self.tileSize, (ty+1)*self.tileSize, zoom )
return ( minx, miny, maxx, maxy )
def TileLatLonBounds(self, tx, ty, zoom ):
"Returns bounds of the given tile in latutude/longitude using WGS84 datum"
bounds = self.TileBounds( tx, ty, zoom)
minLat, minLon = self.MetersToLatLon(bounds[0], bounds[1])
maxLat, maxLon = self.MetersToLatLon(bounds[2], bounds[3])
return ( minLat, minLon, maxLat, maxLon )
def Resolution(self, zoom ):
"Resolution (meters/pixel) for given zoom level (measured at Equator)"
# return (2 * math.pi * 6378137) / (self.tileSize * 2**zoom)
return self.initialResolution / (2**zoom)
def ZoomForPixelSize(self, pixelSize ):
"Maximal scaledown zoom of the pyramid closest to the pixelSize."
for i in range(30):
if pixelSize > self.Resolution(i):
return i-1 if i!=0 else 0 # We don't want to scale up
def GoogleTile(self, tx, ty, zoom):
"Converts TMS tile coordinates to Google Tile coordinates"
# coordinate origin is moved from bottom-left to top-left corner of the extent
return tx, (2**zoom - 1) - ty
def QuadTree(self, tx, ty, zoom ):
"Converts TMS tile coordinates to Microsoft QuadTree"
quadKey = ""
ty = (2**zoom - 1) - ty
for i in range(zoom, 0, -1):
digit = 0
mask = 1 << (i-1)
if (tx & mask) != 0:
digit += 1
if (ty & mask) != 0:
digit += 2
quadKey += str(digit)
return quadKey
#---------------------
class GlobalGeodetic(object):
"""
TMS Global Geodetic Profile
---------------------------
Functions necessary for generation of global tiles in Plate Carre projection,
EPSG:4326, "unprojected profile".
Such tiles are compatible with Google Earth (as any other EPSG:4326 rasters)
and you can overlay the tiles on top of OpenLayers base map.
Pixel and tile coordinates are in TMS notation (origin [0,0] in bottom-left).
What coordinate conversions do we need for TMS Global Geodetic tiles?
Global Geodetic tiles are using geodetic coordinates (latitude,longitude)
directly as planar coordinates XY (it is also called Unprojected or Plate
Carre). We need only scaling to pixel pyramid and cutting to tiles.
Pyramid has on top level two tiles, so it is not square but rectangle.
Area [-180,-90,180,90] is scaled to 512x256 pixels.
TMS has coordinate origin (for pixels and tiles) in bottom-left corner.
Rasters are in EPSG:4326 and therefore are compatible with Google Earth.
LatLon <-> Pixels <-> Tiles
WGS84 coordinates Pixels in pyramid Tiles in pyramid
lat/lon XY pixels Z zoom XYZ from TMS
EPSG:4326
.----. ----
/ \ <-> /--------/ <-> TMS
\ / /--------------/
----- /--------------------/
WMS, KML Web Clients, Google Earth TileMapService
"""
def __init__(self, tileSize = 256):
self.tileSize = tileSize
def LatLonToPixels(self, lat, lon, zoom):
"Converts lat/lon to pixel coordinates in given zoom of the EPSG:4326 pyramid"
res = 180 / 256.0 / 2**zoom
px = (180 + lat) / res
py = (90 + lon) / res
return px, py
def PixelsToTile(self, px, py):
"Returns coordinates of the tile covering region in pixel coordinates"
tx = int( math.ceil( px / float(self.tileSize) ) - 1 )
ty = int( math.ceil( py / float(self.tileSize) ) - 1 )
return tx, ty
def Resolution(self, zoom ):
"Resolution (arc/pixel) for given zoom level (measured at Equator)"
return 180 / 256.0 / 2**zoom
#return 180 / float( 1 << (8+zoom) )
def TileBounds(tx, ty, zoom):
"Returns bounds of the given tile"
res = 180 / 256.0 / 2**zoom
return (
tx*256*res - 180,
ty*256*res - 90,
(tx+1)*256*res - 180,
(ty+1)*256*res - 90
)
if __name__ == "__main__":
import sys, os
def Usage(s = ""):
print "Usage: globalmaptiles.py [-profile 'mercator'|'geodetic'] zoomlevel lat lon [latmax lonmax]"
print
if s:
print s
print
print "This utility prints for given WGS84 lat/lon coordinates (or bounding box) the list of tiles"
print "covering specified area. Tiles are in the given 'profile' (default is Google Maps 'mercator')"
print "and in the given pyramid 'zoomlevel'."
print "For each tile several information is printed including bonding box in EPSG:900913 and WGS84."
sys.exit(1)
profile = 'mercator'
zoomlevel = None
lat, lon, latmax, lonmax = None, None, None, None
boundingbox = False
argv = sys.argv
i = 1
while i < len(argv):
arg = argv[i]
if arg == '-profile':
i = i + 1
profile = argv[i]
if zoomlevel is None:
zoomlevel = int(argv[i])
elif lat is None:
lat = float(argv[i])
elif lon is None:
lon = float(argv[i])
elif latmax is None:
latmax = float(argv[i])
elif lonmax is None:
lonmax = float(argv[i])
else:
Usage("ERROR: Too many parameters")
i = i + 1
if profile != 'mercator':
Usage("ERROR: Sorry, given profile is not implemented yet.")
if zoomlevel == None or lat == None or lon == None:
Usage("ERROR: Specify at least 'zoomlevel', 'lat' and 'lon'.")
if latmax is not None and lonmax is None:
Usage("ERROR: Both 'latmax' and 'lonmax' must be given.")
if latmax != None and lonmax != None:
if latmax < lat:
Usage("ERROR: 'latmax' must be bigger then 'lat'")
if lonmax < lon:
Usage("ERROR: 'lonmax' must be bigger then 'lon'")
boundingbox = (lon, lat, lonmax, latmax)
tz = zoomlevel
mercator = GlobalMercator()
mx, my = mercator.LatLonToMeters( lat, lon )
print "Spherical Mercator (ESPG:900913) coordinates for lat/lon: "
print (mx, my)
tminx, tminy = mercator.MetersToTile( mx, my, tz )
if boundingbox:
mx, my = mercator.LatLonToMeters( latmax, lonmax )
print "Spherical Mercator (ESPG:900913) cooridnate for maxlat/maxlon: "
print (mx, my)
tmaxx, tmaxy = mercator.MetersToTile( mx, my, tz )
else:
tmaxx, tmaxy = tminx, tminy
for ty in range(tminy, tmaxy+1):
for tx in range(tminx, tmaxx+1):
tilefilename = "%s/%s/%s" % (tz, tx, ty)
print tilefilename, "( TileMapService: z / x / y )"
gx, gy = mercator.GoogleTile(tx, ty, tz)
print "\tGoogle:", gx, gy
quadkey = mercator.QuadTree(tx, ty, tz)
print "\tQuadkey:", quadkey, '(',int(quadkey, 4),')'
bounds = mercator.TileBounds( tx, ty, tz)
print
print "\tEPSG:900913 Extent: ", bounds
wgsbounds = mercator.TileLatLonBounds( tx, ty, tz)
print "\tWGS84 Extent:", wgsbounds
print "\tgdalwarp -ts 256 256 -te %s %s %s %s %s %s_%s_%s.tif" % (
bounds[0], bounds[1], bounds[2], bounds[3], "<your-raster-file-in-epsg900913.ext>", tz, tx, ty)
print
|
"So I caught the fabled Listening Club frisbee What to pick?
Been listening to a bit of Drill, the newish sound of disaffected youth, which has emerged from Brixton in the last couple of years. If you don’t know it, it’s the more trap influenced child of Grime, with the angry electronic innovation of youth making music with whatever they can get hold off like the PlayStation’s Music 2000 replaced by a less interesting heavy influence of US trap beats, and bars glorifying the behavior that manifests in things like the wave of stabbings and shootings by London youth. Fear not I didn’t pick an example of this genre. Not out of Daily Mailesque moral panic, but because I don’t find the genre that interesting, probably because I’m too old.
A couple of weeks ago I badgered, in a fanboy way, Lawrence from Felt/Denim/Go Kart Mozart on the 55 bus as it slinked westwards from Old Street roundabout. It was slightly depressing, especially as I love his work, to see a man seemingly shrunken by the disconnection between his talent and how much fame and success he actually got. He repeatedly asked if I had brought any of his records, and told me to buy his new record (he was on his way back from recording in Stokie) a number of times. As a consequence I’ve been adding to his Spotify play count whilst at work. But I presume everyone’s familiar with his work, so didn’t pick any of that.
I did think about picking an album by Scott Bradlee’s Post Modern Juke Box, but whilst I love their cover of Bizarre Love Triangle, the rest of their stuff in all of its late-night talkshow knowing cleverness grates after a few songs.
So I went for this, released last year. It’s sort of post-dubstep Techno, done by the ex flatmate of Bristol’s Pinch (who’s track Qawwali is one of my favourite tracks from the ‘00s) who has veered away from Dubstep to something more Techno, because in parts, it has the feel of music that should be experienced to the backdrop of a still stimulated, but yawning, dawn of a sunny summer day. Which, err, might or might not work at 8pm on a Sunday evening, but in parts it’s very good." |
# coding: utf-8
# Copyright (c) 2011 Lukas Martini, Phillip Thelen.
# This file may be used and distributed under the terms found in the
# file COPYING, which you should have received along with this
# program. If you haven't, please refer to [email protected].
from django.template import Library, Node
from homepage.navigation.models import Entry
register = Library()
class SimpleMenuNode(Node):
def __init__(self, menu=None):
if menu != None:
self.menu = menu
else:
self.menu = 1
def addmenu(self, parentid = None):
entrylist = Entry.objects.all().filter(menu__menuname = self.menu, parent__id = parentid)
self.menuhtml += '<ul>'
for entry in entrylist:
self.menuhtml += '<li><a href="{0}">{1}</a></li>'.format(entry.target, entry.name)
if entry.children.count() != 0:
self.addmenu(entry.id)
self.menuhtml += '</ul>'
def render(self, context):
self.menuhtml = ''
self.addmenu()
return self.menuhtml
class SimpleMenuOneNode(Node):
def __init__(self, menu=None, parent=None):
if menu != None:
self.menu = menu
else:
self.menu = 1
if parent != None:
self.parent = parent
else:
self.parent = None
def render(self, context):
entrylist = Entry.objects.all().filter(menu__menuname = self.menu, parent__id = self.parent)
menuhtml = '<ul>'
for entry in entrylist:
menuhtml += '<li><a href="{0}">{1}</a></li>'.format(entry.target, entry.name)
menuhtml += '</ul>'
return menuhtml
class CheckmenuNode(Node):
def render(self, context):
return ''
def simpleMenu(parser, token):
try:
tag_name, menu = token.split_contents()
except:
menu = None
return SimpleMenuNode(menu)
def simpleMenuOne(parser, token):
parent = None
menu = None
try:
content = token.split_contents()
except:
menu = None
if len(content) > 1:
if len(content) > 2:
menu = content[1]
parent = content[2]
else:
menu = content[1]
return SimpleMenuOneNode(menu, parent)
def checkmenu(parser, token):
try:
tag_name, menuname = token.split_contents()
entrylist = Entry.objects.all().filter(menu__menuname = menuname)
except:
parser.skip_past('endcheckmenu')
return CheckmenuNode()
def endcheckmenu(parser, token):
return CheckmenuNode()
simpleMenu = register.tag(simpleMenu)
simpleMenuOne = register.tag(simpleMenuOne)
checkmenu = register.tag(checkmenu)
endcheckmenu = register.tag(endcheckmenu)
|
JW Marriott Hotel Chengdu Official Site! Best Price Guarantee!
The symbol of Chengdu, Tianfu Square, is now home to a symbol of luxury. The JW Marriott Hotel Chengdu brings refined style, culturally inspired cuisine and holistic wellbeing to southwest China’s largest city square. Guest rooms and suites are approachably elegant with warm, modern decor and spacious bathrooms. Dining celebrates local flavors, offering an elevated take on the traditional food found along Chunxi Road, Chengdu’s busiest street. Eight meeting rooms include the 10,398-square-foot Grand Ballroom, featuring its own pre-function foyer, roof garden and space for up to 800 guests. Wellness takes a mind-body approach with a state-of-the-art fitness center, indoor swimming pool and Spa. While the hotel is firmly planted in the present, opportunities to explore the past abound at the nearby Wuhou Temple, Kuan (Wide) and Zhai (Narrow) alleys, Wenshu Yuan Monastery, Jinli Street, Chengdu Museum and Sichuan Provincial Library.
Eight meeting spaces, including a Grand Ballroom with its own foyer and roof garden, a Junior Ballroom and a VIP Room for midsize events.
Traditional Cantonese cuisine at Man Ho, international buffets at Table and signature cocktails and snacks at The Lounge.
Holistic approach to wellness with a state-of-the-art fitness center, indoor swimming pool and array of treatments at Spa. |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Charts with two subcharts.
"""
from decimal import Decimal
import swisseph as swe
from oroboros.core.charts import Chart
from oroboros.core.planets import all_planets
from oroboros.core.aspects import all_aspects
from oroboros.core.results import PlanetDataList
from oroboros.core.aspectsresults import AspectDataList, MidPointAspectDataList, InterMidPointAspectDataList
__all__ = ['BiChart']
class BiChart(list):
"""Chart object with comparisons functions for two subcharts."""
__slots__ = ('_interaspects', '_intermidp1', '_intermidp2',
'_intermidpoints', '_switched')
def _get_interaspects(self):
"""Get inter-aspects.
:rtype: AspectDataList
"""
if self._interaspects == None:
self._calc_interaspects()
return self._interaspects
def _get_intermidp1(self):
"""Get aspects between chart 1 midpoints and chart 2 planets.
:rtype: MidPointAspectDataList
"""
if self._intermidp1 == None:
self._calc_intermidp(0)
return self._intermidp1
def _get_intermidp2(self):
"""Get aspects between chart 2 midpoints and chart 1 planets.
:rtype: MidPointAspectDataList
"""
if self._intermidp2 == None:
self._calc_intermidp(1)
return self._intermidp2
def _get_intermidpoints(self):
"""Get aspects between midpoints.
:rtype: InterMidPointAspectDataList
"""
if self._intermidpoints == None:
self._calc_intermidpoints()
return self._intermidpoints
def _get_switched(self):
"""Get switch state flag.
:rtype: bool
"""
return self._switched
def _set_switched(self, boolean):
"""Set switched state flag.
:type boolean: bool
"""
self._switched = bool(boolean)
interaspects = property(_get_interaspects,
doc='Inter-aspects.')
intermidp1 = property(_get_intermidp1,
doc='Aspects to chart 1 midpoints.')
intermidp2 = property(_get_intermidp2,
doc='Aspects to chart 2 midpoints.')
intermidpoints = property(_get_intermidpoints,
doc='Aspects between midpoints.')
switched = property(_get_switched, _set_switched,
doc='Bichart switched state (bool).')
def __init__(self, cht1=None, cht2=None):
"""Init bi-chart.
:type cht1: Chart, str, int or None
:type cht2: Chart, str, int or None
"""
self._switched = False
if cht1 != None:
self.append(cht1)
if cht2 != None:
self.append(cht2)
self.calc()
def append(self, cht):
"""Append a chart.
:type cht: Chart, str or int
:raise TypeError: invalid chart
"""
if not isinstance(cht, Chart):
try:
cht = Chart(cht)
except:
raise
raise TypeError('Invalic chart %s.' % cht)
list.append(self, cht)
self.calc()
def insert(self, idx, cht):
"""Insert a chart.
:type idx: int
:type cht: Chart, str or int
:raise IndexError: invalid index
:raise TypeError: invalid chart
"""
if idx > 1 or idx < -2:
raise IndexError('Invalid index %s.' % idx)
if not isinstance(cht, Chart):
try:
cht = Chart(cht)
except:
raise TypeError('Invalic chart %s.' % cht)
list.insert(self, idx, cht)
self.calc()
def __setitem__(self, idx, cht):
if idx > 1 or idx < -2:
raise IndexError('Invalid index %s.' % idx)
if not isinstance(cht, Chart):
try:
cht = Chart(cht)
except:
raise TypeError('Invalic chart %s.' % cht)
list.__setitem__(self, idx, cht)
self.calc()
def __delitem__(self, idx):
self._switched = False
list.__delitem__(self, idx)
def set(self, idx, **kwargs):
"""Set charts properties."""
self[idx].set(**kwargs)
if any((x for x in kwargs if x in ('datetime', 'calendar', 'location',
'latitude', 'longitude', 'altitude', 'zoneinfo', 'timezone', 'dst',
'utcoffset', 'filter'))):
self.reset_calc()
def reset_calc(self):
"""Trigger recalculation of aspects."""
self._interaspects = None
self._intermidp1 = None
self._intermidp2 = None
self._intermidpoints = None
# calculations
def _calc_interaspects(self):
"""Calculate inter-aspects of planets between charts 1 and 2."""
res = AspectDataList()
if len(self) != 2:
self._interaspects = res
return
f1 = self[0]._filter
f2 = self[1]._filter
all_asp = all_aspects()
for pos1 in self[0]._planets:
p1, lon1, lonsp1 = pos1._planet, pos1._longitude, pos1._lonspeed
for pos2 in self[1]._planets:
p2, lon2, lonsp2 = pos2._planet, pos2._longitude, pos2._lonspeed
for asp, doasp in f1._aspects.items():
if not doasp:
continue
if not f2._aspects[asp]:
continue
if not f1._asprestr[p1._name] or not f2._asprestr[p1._name]:
continue
if not f2._asprestr[p2._name] or not f2._asprestr[p2._name]:
continue
asp = all_asp[asp]
orb = (f1._orbs[asp._name]+f2._orbs[asp._name])/Decimal('2')
orbmod1 = f1.orbrestr[p1._name].get_absolute(orb)
orbmod2 = f2.orbrestr[p2._name].get_absolute(orb)
orb += (orbmod1 + orbmod2) / Decimal('2')
if orb < 0:
continue
diff, apply, factor = swe._match_aspect2(
lon1, lonsp1, lon2, lonsp2,
float(asp._angle), float(orb))
if diff != None:
res.feed(pos1, pos2, asp, diff, apply, factor)
self._interaspects = res
def _calc_intermidp(self, idx):
"""Calculate aspects between one midpoints and other planets."""
res = MidPointAspectDataList()
try:
if len(self) != 2 or not self[idx]._filter._calc_midp:
if idx == 0:
self._intermidp1 = res
else:
self._intermidp2 = res
return
except IndexError:
if idx == 0:
self._intermidp1 = res
else:
self._intermidp2 = res
return
# ok do calc
oth = 1 if idx in (0, -2) else 0 # other's idx
midpres = self[idx]._midpoints
jd = self[oth].julday
flag = self[oth]._filter.get_calcflag()
self[oth]._setup_swisseph()
f = self[idx]._filter._midpoints
all_pl = all_planets()
all_asp = all_aspects()
# get all concerned planets, if not already calculated
plres = PlanetDataList()
for pl in [x for x in f._planets if f._planets[x] and f._asprestr[x]]:
try:
plres.append(self[oth]._planets.get_data(pl))
except KeyError:
p = all_pl[pl]
plres.feed(p, p.calc_ut(jd, flag, self[oth]))
# get midp aspects
plres.sort_by_ranking()
for i, midp in enumerate(midpres):
##p1, p2 = midp._planet, midp._planet2
lon1, lonsp1 = midp._longitude, midp._lonspeed
for pos in plres:
pl, lon2, lonsp2 = pos._planet, pos._longitude, pos._lonspeed
for asp, doasp in f._aspects.items():
if not doasp: # dont use this aspect
continue
asp = all_asp[asp]
# modify orb
orb = f._orbs[asp._name]
#orbmod1 = plorbfilt[p1._name].get_absolute(orb)
orbmod1 = 0 # todo?: midp obrestr
orbmod2 = f._orbrestr[pl._name].get_absolute(orb)
orb += (orbmod1 + orbmod2) / Decimal('2')
if orb < 0: # we'll never get such a precision
continue
# check aspect match
diff, apply, factor = swe._match_aspect2(
lon1, lonsp1, lon2, lonsp2,
float(asp._angle), float(orb))
if diff != None:
res.feed(midp, pos, asp, diff, apply, factor)
if idx == 0:
self._intermidp1 = res
else:
self._intermidp2 = res
def _calc_intermidpoints(self):
"""Calculate aspects between midpoints."""
res = InterMidPointAspectDataList()
if len(self) != 2:
self._intermidpoints = res
return
elif not self[0]._filter._calc_midp or not self[1]._filter._calc_midp:
self._intermidpoints = res
return
f1 = self[0]._filter._midpoints
f2 = self[1]._filter._midpoints
all_asp = all_aspects()
# begin calc
for i, pos1 in enumerate(self[0]._midpoints):
p1, lon1, lonsp1 = pos1._data2, pos1._longitude, pos1._lonspeed
for pos2 in self[1]._midpoints:
p2, lon2, lonsp2 = pos2._data2, pos2._longitude, pos2._lonspeed
for asp, doasp in f1._aspects.items():
if not doasp: # dont use this aspect
continue
if not f2._aspects[asp]:
continue
# no asp restr
asp = all_asp[asp]
# modify orb
orb1 = f1._orbs[asp._name]
orb2 = f2._orbs[asp._name]
orb = orb1 + orb2 / Decimal('2')
# nor orb restr
# check aspect match
diff, apply, factor = swe._match_aspect2(
lon1, lonsp1, lon2, lonsp2,
float(asp._angle), float(orb))
if diff != None:
res.feed(pos1, pos2, asp, diff, apply, factor)
self._intermidpoints = res
def calc(self):
"""Do all calculations."""
self._calc_interaspects()
self._calc_intermidp(0)
self._calc_intermidp(1)
self._calc_intermidpoints()
def _all_draw_aspects(self):
"""Return a list of all drawable aspects (incl. activated midpoints).
:rtype: AspectDataList
"""
ret = AspectDataList()
ret.extend(self._interaspects)
try:
if self[0]._filter._draw_midp:
ret.extend(self._intermidp1)
except IndexError: # none chart
pass
try:
if self[1]._filter._draw_midp:
ret.extend(self._intermidp2)
except IndexError: # none chart
pass
# try:
# if self[0]._filter._draw_midp and self[1]._filter._draw_midp:
# ret.extend(self._intermidpoints)
# except IndexError: # none chart
# pass
return ret
def _all_draw_planets(self, idx=0):
"""Get all planets and midpoints to draw when comparing charts.
:type idx: int
:rtype: PlanetDataList
"""
ret = PlanetDataList()
if idx == 0:
ret.extend(self[0]._planets)
ret.extend(self._intermidp1.get_midpoints())
else:
ret.extend(self[1]._planets)
ret.extend(self._intermidp2.get_midpoints())
return ret
def switch(self):
"""Switch chart 1 and 2."""
self.reverse()
self._switched = not self._switched
self.calc()
def synastry_mode(self):
"""Set comparison mode transit/synastry."""
for i, cht in enumerate(self):
self[i].calc()
self.calc()
def progression_of(self, idx=0):
"""Set comparison mode progression.
:type idx: int
:raise IndexError: missing chart
"""
if len(self) != 2:
raise IndexError('Missing chart(s).')
if idx == 0:
cht1 = 0
cht2 = 1
elif idx == 1:
cht1 = 1
cht2 = 0
self[cht2].progression_of(self[cht1].julday)
self.calc()
def direction_of(self, idx=0):
"""Set comparison mode direction.
:type idx: int
:raise IndexError: missing chart
"""
if len(self) != 2:
raise IndexError('Missing chart(s)')
if idx == 0:
cht1 = 0
cht2 = 1
elif idx == 1:
cht1 = 1
cht2 = 0
self[cht2].direction_of(self[cht1].julday)
self.calc()
def multiply_pos(self, value, idx):
"""Multiply positions by value.
:type value: numeric
:type idx: int
"""
self[idx].multiply_pos(value)
self.calc()
def add_pos(self, value, idx):
"""Add value to positions.
:type value: numeric
:type idx: int
"""
self[idx].add_pos(value)
self.calc()
def profection_of(self, op, value, unit, idx=0):
"""Profection.
:type op: str
:type value: numeric
:type unit: str
:type idx: int
:raise IndexError: missing chart
"""
if len(self) != 2:
raise IndexError('Missing chart(s)')
if idx == 0:
cht1 = 0
cht2 = 1
elif idx == 1:
cht1 = 1
cht2 = 0
self[cht2].profection_of(op, value, unit, self[cht1].julday)
self.calc()
def __repr__(self):
return "BiChart(%s)" % ', '.join([repr(x) for x in self])
# End.
|
Psychopathic Writings: Update - Things To Come (May 8-14).
Update - Things To Come (May 8-14).
Good Day, Everybody....A little Update Before I Continue Writing About the Psychopathy Research Tests that I have Participated in.
I can publish sooner than I otherwise would be able to because I have to think about every word I put to paper and choose each topic wisely and cautiously. Being not a very good writer this means I am bound to spend more time than usual on completing this article.
Furthermore, I am not going to publish anything without having my lawyer read it first so I can avoid possible pitfalls before it is too late to change it.
Hopefully I may also be able to write better texts when I spend more time thinking over what I will write and going through what I have written (time will tell if this is the case).
I plan to switch between posting about the various tests and posting about other topics relating to psychopathy. I have a few in the works already, and there are also plenty of subjects to choose from in Reader comments and emails.
I have an upcoming article in two parts consisting of an email written by a Reader, and my reply to him. This just needs transpositioning from email correspondence form into a legible friendly form as an article; proof reading, labels, and links.
Another article addresses the question about whether psychopaths might be well fit for a career in the army/special forces.
I have been meaning for some time to write about Psychopaths and Religion, about whether it is possible or likely that a psychopath can be genuinely religious, and if so, how such a psychopath-religion combination or relationship might turn out.
They said I'm a psychopath. I didn't believe them.
Later they said I'm a Psychopath again, and then they said it again.
The World of Official Society seems to insist on having me Destined to Be a Psychopath and will not take No for an Answer.
So I say: If The World will have me be a Psychopath, then a Psychopath I will be.
But I will turn Psychopath Destiny into Something Personal, something you Cannot Box or Confine.
I still receive questions about my upbringing, my childhood and youth, and I promise I'll get to that eventually, but it does happen to be one of those areas that I find painfully boring - even if I can see that from the perspective of my readers it probably isn't quite as dull since for you it will be new, but for me it's a more than 40 years old story, and I've told it to a lot of people (read: psychologists, psychiatrists, social workers, etc. etc) a lot of times over. - But I ask of you to please bear with me, it is not that I don't care or that I won't do it, it is simply VERY DIFFICULT. No, really! When I don't have any interest in a subject whatsoever I have great problems with keeping my thoughts on the matter at hand - in this case writing about things that happened in my life a looong time ago.
When you write about a topic or a subject you need to have at least some emotional attachment, if nothing else, fascination or self interest to drive the project. Your level of interest in what you write about will also be reflected in the outcome. Even in the case of bad writers and dyslexics the material you produce can be interesting or engaging to read if written with enthusiasm or a sense of necessity.
A note about the layout of this website: I intend to give it a facelift eventually, but I do have a lot of things going on in my life, so I won't be able to get it done just now. Hopefully within a reasonable period I'll get the time.
I know some have problems finding the way around between articles, and no wonder. The 'labels' system doesn't work after a certain point. For now I have changed 'Labels' so that now they are called 'Topics' and I'm trying out the 'Cloud' system instead. Let me know what you think.
Please, also let me know about ideas about how to improve the functionality of this website/blog. My computer experience is limited and I therefore cannot always foresee what works best or what my Readers would like me to implement, remove or change. Nor is it always certain that I know how to do such things. But please let me know even so. We have to start some place, and this is the place I have to start from.
Article about Psychopaths & The Army coming up tomorrow Fri. May 9.th (or early morning Saturday, May 10.th).
Like the poetry, keep it up.
What level of subjectivity is useful and what level makes you act like a fool, could be a new topic.
The topics take longer to load now, you see a flash of white screen.
The longer I live the harder it is for me to concentrate on things that don't interest me, is this the same for you?
1) you think he is c/u or a "young psychopath"
hey zhaq,how old are you?
The Western military is controlled & operated by the Elite Psychopaths. Just look in detail and you will see the same names/ same families behind every war in the last 100+ years.
Zhawq is exactly correct in saying that psychopaths would not suffer from psychological problems from killing, but would find the regimentation of the military difficult/ impossible.
They have been used in special forces for decades already and it hasnt done the world any good.
Btw your introspection is more akin to being an aspergers sufferer than a psychopath.
you present as most definitely not a psychopath in your writings- neither is fallon...he was genuinely concerned he was one which basically means he isnt.
if you do discover a psychopath for fucks sake dont call him a psychopath- warn everyone you can but dont tip them off bas the can get pretty nasty when they are cornered and exposed.
id put money on whoever taught your doctors to confuse aspergers with psychopathy is most definitely a pscyhopath.
the depth of yur insight, your literal moralising, despite your lack of emotional affect means you are most definitely not a pscyhopath...unless it was your plan to have us think that your not, only you know the answer to this.
If you respond openly and honestly to a series of questions i ask iw ill be able to tell you.
im somewhat of an expert in the subject in that i learned independantly and you have nothing to lose or gain from your answers as my analyisis will be deliberately vague.
the very fact you state you are a psychopath and you would like to find a prosocial role for yourself means that you are not.
You are within all likelihood a rather misled aspergers sufferer...the ultimate victims of a psychopath...as a literal mind with no emotional compass but soul that cares and seeks insight is the perfect distraction to hoodwink people into thinking all emotionally challenged people are evil...when they arent...its a subset and we call them psychopaths.
they dont introspect...not ever...they might pretend but you cant tell the truth with a brain wired for nothing but lies.
if you were a real one...you would be dead...the recidivism rate goes down and down with every single one that gets murdered by another one.
or just keep lying for no reason...if your a pscyhopath then that is what you will do.
I wonder if anyone else on here used to be an Antisocial Personality. I had,in my mind, an excuse for ever lie I ever told, everything I ever stole, every time I selfishly disregarded others. I felt guilt quite often, but I wouldn't admit it to anyone, not even to myself. I suffered for the guilt of vandalizing the homes and cars of others. I needed excitement, and for others to feel bad, because I thought about killing myself every single day. I was not socially adept, though. I couldn't read others' feelings or get along with them. Then when I was 15, I read an article on this BBS named Avatar OnLine. It claimed that once a psychopath was aged 15, "the clay has hardened." I learned to actually avoid lying, and consciously feel bad, although sometimes only when I got caught. My associates stayed psychopaths...they never learned anything. I like this blog, it's unlike any other I've ever seen!
he's already explained the differences between aspergers and psychopathy. Do a little more study before you diagnose people online. This guys is what he says he is, he's studied his own condition for three years so he should know. And besides, you don't get diagnosed with psychopathy by experts in the prison system if you have aspergers. Come on already. They could PERHAPS make that mistake the first time, but not three times with years in between.
currently an antisocial personality, and it will end soon enough, I am coming out the other side of something, but no, am not going to develop normalness, but yes a few feelings are freeing up. |
import uuid
import datetime
import re
import os
import sqlite3
import time
import itertools
import pytz
import lockfile
import memdam
import memdam.common.field
import memdam.common.event
import memdam.eventstore.api
@memdam.vtrace()
def execute_sql(cur, sql, args=()):
'''Just for debugging'''
return cur.execute(sql, args)
@memdam.vtrace()
def execute_many(cur, sql, values=()):
'''Just for debugging'''
cur.executemany(sql, values)
#TODO: validate the various bits of data--should not start or end with _, should not contain __, should only contain numbers and digits
#also have to validate all of the things that we are inserting in a raw way
class Eventstore(memdam.eventstore.api.Eventstore):
"""
An archive for all events that uses Sqlite as the backing store.
Stores all tables in their own file for the following reasons:
- Lower contention (read and write) when working with multiple data types at once
- Smaller files (easier to back up, encrypt, decrypt, etc)
- Safety. Reduces chances of corrupting all data.
Note: pass in a folder called :memory: to keep everything in memory for testing
When inserting new events, automatically creates new columns if necessary.
All columns are given appropriate indices (usually ASC, except in the case of TEXT, which is
given an FTS virtual table, and the column in the main table because an INTEGER that refers
to the document id in the FTS table)
Columns are created with exactly the same name as the variables.
Variable names uniquely define the type of the column, as well as the type of any index.
TEXT attributes will createa column that contains docid integer references in the main table,
AS WELL AS a second (virtual, fts4) table (name__text__docs)
Indices are named "name__type__secondary__indextype"
"""
EXTENSION = '.sql'
LOCK_EXTENSION = '.lock'
CREATE_TABLE_EXTENSION = '.creating_sql'
def __init__(self, folder):
self.folder = folder
self.memory_connection = None
def save(self, events):
memdam.log().debug("Saving events")
sorted_events = sorted(events, key=lambda x: x.namespace)
for namespace, grouped_events in itertools.groupby(sorted_events, lambda x: x.namespace):
table_name = namespace_to_table_name(namespace)
self._save_events(list(grouped_events), table_name)
def get(self, event_id):
for table_name in self._all_table_names():
conn = self._connect(table_name, read_only=True)
namespace = table_name_to_namespace(table_name)
cur = conn.cursor()
sql = "SELECT * FROM %s WHERE id__id = ?;" % (table_name)
execute_sql(cur, sql, (buffer(event_id.bytes),))
names = [x[0] for x in cur.description]
for row in cur.fetchall():
return _create_event_from_row(row, names, namespace, conn)
raise Exception("event with id %s not found" % (event_id))
def find(self, query):
events = []
for table_name in self._all_table_names():
if _matches_namespace_filters(table_name, query):
events += self._find_matching_events_in_table(table_name, query)
return events
def delete(self, event_id):
for table_name in self._all_table_names():
conn = self._connect(table_name, read_only=False)
cur = conn.cursor()
cur.execute("BEGIN EXCLUSIVE")
sql = "SELECT _id FROM %s WHERE id__id = ?;" % (table_name)
execute_sql(cur, sql, (buffer(event_id.bytes),))
for row in cur.fetchall():
rowid = row[0]
names = [x[0] for x in cur.description]
for i in range(0, len(names)):
name = names[i]
if name == '_id':
continue
if memdam.common.event.Event.field_type(name) == memdam.common.field.FieldType.TEXT:
execute_sql(cur, "DELETE FROM %s__%s__docs WHERE docid = ?;" % (table_name, name), (rowid))
execute_sql(cur, "DELETE FROM %s WHERE _id = %s" % (table_name, rowid), ())
conn.commit()
def _find_matching_events_in_table(self, table_name, query):
conn = self._connect(table_name, read_only=True)
namespace = table_name_to_namespace(table_name)
cur = conn.cursor()
args = ()
sql = "SELECT * FROM %s" % (table_name)
field_filters, _ = _separate_filters(query.filters)
if field_filters:
filter_string, new_args = _get_field_filter_string(field_filters)
args = args + new_args
sql += " WHERE " + filter_string
if query.order:
order_string = self._get_order_string(query.order)
sql += " ORDER BY " + order_string
if query.limit:
sql += " LIMIT " + str(long(query.limit))
sql += ';'
execute_sql(cur, sql, args)
events = []
names = list(map(lambda x: x[0], cur.description))
for row in cur.fetchall():
events.append(_create_event_from_row(row, names, namespace, conn))
return events
def _get_order_string(self, order):
sql_order_elems = []
for elem in order:
order_type = 'ASC'
if elem[1] == False:
order_type = 'DESC'
safe_column_name = elem[0].lower()
assert SqliteColumn.SQL_NAME_REGEX.match(safe_column_name), "Invalid name for column: %s" % (safe_column_name)
assert memdam.common.event.Event.field_type(safe_column_name) != memdam.common.field.FieldType.TEXT, "text keys are currently unsupported for ordering. Doesn't make a lot of sense."
sql_order_elems.append("%s %s" % (safe_column_name, order_type))
return ", ".join(sql_order_elems)
def _all_table_names(self):
"""
:returns: the names of all tables
:rtype: list(unicode)
"""
if self.folder == ":memory:":
#list all tables that are not "__docs"
conn = self._get_or_create_memory_connection()
cur = conn.cursor()
execute_sql(cur, "SELECT * FROM sqlite_master WHERE type='table';")
tables = []
for row in cur.fetchall():
table_name = row[1]
if not "__docs" in table_name:
tables.append(table_name)
else:
tables = [r[:-1*len(Eventstore.EXTENSION)] for r in list(os.listdir(self.folder)) if r.endswith(Eventstore.EXTENSION)]
return [unicode(r) for r in tables]
def _get_or_create_memory_connection(self):
assert self.folder == ":memory:"
#TODO: when all tests are passing again, do we need memory_connection at all? I don't think so...
if self.memory_connection == None:
self.memory_connection = sqlite3.connect(self.folder, isolation_level="EXCLUSIVE")
return self.memory_connection
def _connect(self, table_name, read_only=True):
"""
Connect to the database with this namespace in it.
"""
if self.folder == ":memory:":
return self._get_or_create_memory_connection()
db_file = os.path.join(self.folder, table_name + Eventstore.EXTENSION)
if read_only:
conn = sqlite3.connect(db_file, isolation_level="DEFERRED")
#TODO: set PRAGMA read_uncommitted = TRUE;
#otherwise can't read while writing
return conn
else:
return sqlite3.connect(db_file, isolation_level="EXCLUSIVE")
def _save_events(self, events, table_name):
"""
Save all events of the same type to the database at once
"""
memdam.log().debug("Saving %s events to %s" % (len(events), table_name))
if len(events) <= 0:
return
assert SqliteColumn.SQL_NAME_REGEX.match(table_name), "Invalid name for table: %s" % (table_name)
key_names = set()
for event in events:
for key in event.keys:
key_names.add(key)
#certain key names are ignored because they are stored implicity in the location of
#this database (user, namespace)
for reserved_name in ("type__namespace", "user__id"):
if reserved_name in key_names:
key_names.remove(reserved_name)
should_update_columns = True
if self.folder != ":memory:":
#does table not exist?
db_file = os.path.join(self.folder, table_name + Eventstore.EXTENSION)
if not os.path.exists(db_file):
#try to acquire lock
lock_file = os.path.join(self.folder, table_name + Eventstore.LOCK_EXTENSION)
lock = lockfile.LockFile(lock_file)
with lock:
#two possible scenarios:
#1. we got the lock AFTER someone else, who already made the table:
if os.path.exists(db_file):
#TODO: move this somewhere more sensible
try:
os.remove(lock)
except:
pass
#2. we got the lock BEFORE anyone else, so we're responsible for making the table:
else:
should_update_columns = False
#make the table and create the columns
temp_db_file = os.path.join(self.folder, table_name + Eventstore.CREATE_TABLE_EXTENSION)
self._create_database(table_name, key_names, temp_db_file)
#move the file back to it's regular location
os.rename(temp_db_file, db_file)
#TODO: move this somewhere more sensible
try:
os.remove(lock)
except:
pass
conn = self._connect(table_name, read_only=False)
if should_update_columns:
def update_columns():
cur = conn.cursor()
existing_columns = self._query_existing_columns(cur, table_name)
required_columns = self._generate_columns(cur, key_names, table_name)
self._update_columns(cur, existing_columns, required_columns)
#TODO: use the locking approach for updating as well as creating?
execute_with_retries(update_columns, 5)
cur = conn.cursor()
cur.execute("BEGIN EXCLUSIVE")
self._insert_events(cur, events, key_names, table_name)
conn.commit()
def _create_database(self, table_name, key_names, db_file):
assert self.folder != ":memory:", 'because we don\'t have to do this with memory'
conn = sqlite3.connect(db_file, isolation_level="EXCLUSIVE")
cur = conn.cursor()
#TODO: this should NOT have the side-effect of creating the table, that is just weird
existing_columns = self._query_existing_columns(cur, table_name)
required_columns = self._generate_columns(cur, key_names, table_name)
self._update_columns(cur, existing_columns, required_columns)
def _query_existing_columns(self, cur, table_name):
"""
:param cur: the current writable database cursor
:type cur: sqlite3.Cursor
:returns: a list of SqliteColumn's
"""
columns = {}
execute_sql(cur, "PRAGMA table_info(%s);" % (table_name,))
allrows = cur.fetchall()
if len(allrows) == 0:
self._create_table(cur, table_name)
execute_sql(cur, "PRAGMA table_info(%s);" % (table_name,))
allrows = cur.fetchall()
for row in allrows:
#ignore our unique id row
if row[1] == '_id':
continue
col = SqliteColumn.from_row(row, table_name)
columns[col.name] = col
return columns
def _create_table(self, cur, table_name):
"""
Create a table with the default column (sample_time)
"""
execute_sql(cur, "PRAGMA encoding = 'UTF-8';")
execute_sql(cur, "CREATE TABLE %s(_id INTEGER PRIMARY KEY, time__time INTEGER, id__id STRING);" % (table_name,))
execute_sql(cur, "CREATE INDEX %s__time__time__asc ON %s (time__time ASC);" % (table_name, table_name))
execute_sql(cur, "CREATE INDEX %s__id__id__asc ON %s (id__id ASC);" % (table_name, table_name))
def _generate_columns(self, cur, key_names, table_name):
"""
Make a bunch of SqliteColumn's based on the key names of all of the events
:param cur: the current writable database cursor
:type cur: sqlite3.Cursor
:param key_names: the superset of all key field names
:type key_names: set(string)
:returns: a list of SqliteColumn's
"""
return [SqliteColumn(key, table_name) for key in key_names]
def _update_columns(self, cur, existing_column_map, required_columns):
"""
Modify the schema of the table to include new columns or indices if necessary
"""
for required_column in required_columns:
if required_column.name in existing_column_map:
existing_column = existing_column_map[required_column.name]
assert required_column.sql_type == existing_column.sql_type
else:
required_column.create(cur)
def _insert_events(self, cur, events, key_names, table_name):
"""
Insert all events at once.
Assumes that the schema is correct.
"""
#required because of stupid text fields.
#we need to explicitly set the ids of everything inserted, or iteratively insert and check for lastrowid (which is slow and pathological and will end up doing this effectively anyway I think)
#figure out what the next id to insert should be
cur.execute("SELECT _id FROM %s ORDER BY _id DESC LIMIT 1" % (table_name))
next_row_id = 1
results = cur.fetchall()
if len(results) > 0:
next_row_id = results[0][0] + 1
#need to insert text documents into separate docs tables
for key in key_names:
if memdam.common.event.Event.field_type(key) == memdam.common.field.FieldType.TEXT:
sql = "INSERT INTO %s__%s__docs (docid,data) VALUES (?,?);" % (table_name, key)
values = [(next_row_id + i, getattr(events[i], key, None)) for i in range(0, len(events))]
execute_many(cur, sql, values)
#finally, insert the actual events into the main table
column_names = list(key_names)
column_name_string = ", ".join(column_names)
value_tuple_string = "(" + ", ".join(['?'] * (len(column_names)+1)) + ")"
sql = "INSERT INTO %s (_id, %s) VALUES %s;" % (table_name, column_name_string, value_tuple_string)
values = [make_value_tuple(events[i], key_names, next_row_id + i) for i in range(0, len(events))]
execute_many(cur, sql, values)
#TODO: this whole notion of filters needs to be better thought out
@memdam.vtrace()
def _separate_filters(filters):
field_filters = []
namespaces = []
for f in filters:
if f.rhs == 'namespace__namespace':
assert f.operator == '='
namespaces.append(f.lhs)
elif f.lhs == 'namespace__namespace':
assert f.operator == '='
namespaces.append(f.rhs)
else:
field_filters.append(f)
return field_filters, namespaces
@memdam.vtrace()
def _matches_namespace_filters(table_name, query):
_, namespaces = _separate_filters(query.filters)
if len(namespaces) <= 0:
return True
return table_name_to_namespace(table_name) in namespaces
@memdam.vtrace()
def _get_field_filter_string(field_filters):
#TODO (security): lol so bad.
filter_string = ' AND '.join(('%s %s %s' % (f.lhs, f.operator, f.rhs) for f in field_filters))
return filter_string, ()
@memdam.vtrace()
def make_value_tuple(event, key_names, event_id):
"""Turns an event into a sql value tuple"""
values = [event_id]
for key in key_names:
value = getattr(event, key, None)
if value != None:
#convert time to long for more efficient storage (and so it can be used as a primary key)
if isinstance(value, datetime.datetime):
value = convert_time_to_long(value)
#convert text tuple entries into references to the actual text data
elif memdam.common.event.Event.field_type(key) == memdam.common.field.FieldType.TEXT:
value = event_id
#convert UUIDs to byte representation
elif memdam.common.event.Event.field_type(key) == memdam.common.field.FieldType.ID:
value = buffer(value.bytes)
elif memdam.common.event.Event.field_type(key) == memdam.common.field.FieldType.FILE:
value = value.name
values.append(value)
return values
@memdam.vtrace()
def convert_time_to_long(value):
"""turns a datetime.datetime into a long"""
return long(round(1000000.0 * (value - EPOCH_BEGIN).total_seconds()))
@memdam.vtrace()
def convert_long_to_time(value):
"""turns a long into a datetime.datetime"""
return EPOCH_BEGIN + datetime.timedelta(microseconds=value)
@memdam.vtrace()
def table_name_to_namespace(table_name):
return table_name.replace(u'_', u'.')
@memdam.vtrace()
def namespace_to_table_name(namespace):
return namespace.replace(u'.', u'_')
@memdam.vtrace()
def _create_event_from_row(row, names, namespace, conn):
"""returns a memdam.common.event.Event, generated from the row"""
data = {}
table_name = namespace_to_table_name(namespace)
for i in range(0, len(names)):
name = names[i]
if name == '_id':
continue
value = row[i]
if value != None:
field_type = memdam.common.event.Event.field_type(name)
if field_type == memdam.common.field.FieldType.TIME:
value = convert_long_to_time(value)
elif field_type == memdam.common.field.FieldType.TEXT:
cur = conn.cursor()
execute_sql(cur, "SELECT data FROM %s__%s__docs WHERE docid = '%s';" % (table_name, name, value))
value = cur.fetchall()[0][0]
elif field_type == memdam.common.field.FieldType.ID:
value = uuid.UUID(bytes=value)
elif field_type == memdam.common.field.FieldType.BOOL:
value = value == 1
elif field_type == memdam.common.field.FieldType.FILE:
parsed_data = value.split('.')
value = memdam.common.blob.BlobReference(uuid.UUID(parsed_data[0]), parsed_data[1])
data[name] = value
data['type__namespace'] = namespace
return memdam.common.event.Event(**data)
EPOCH_BEGIN = datetime.datetime(1970, 1, 1, tzinfo=pytz.UTC)
class SqliteColumn(memdam.Base):
"""
Represents a column in sqlite.
Note that the name here is the raw key name (eg, without the data type or index)
:attr name: the name of the column. No type, no index, none of that nonsense.
:type name: string
:attr data_type: the type of data
:type data_type: memdam.common.field.FieldType
:attr table_name: the name of the table. The namespace for the events
:type table_name: string
"""
SQL_NAME_REGEX = re.compile(r"[a-z][a-z0-9_]*")
data_type_to_sql_type = {
memdam.common.field.FieldType.NUMBER: 'FLOAT',
memdam.common.field.FieldType.STRING: 'TEXT',
#this might seems strange, but it's because we store an index to a document in another table
memdam.common.field.FieldType.TEXT: 'INTEGER',
memdam.common.field.FieldType.ENUM: 'TEXT',
memdam.common.field.FieldType.RAW: 'BLOB',
memdam.common.field.FieldType.BOOL: 'BOOL',
memdam.common.field.FieldType.TIME: 'INTEGER',
memdam.common.field.FieldType.ID: 'TEXT',
memdam.common.field.FieldType.LONG: 'INTEGER',
memdam.common.field.FieldType.FILE: 'TEXT',
memdam.common.field.FieldType.NAMESPACE: 'TEXT',
}
def __init__(self, column_name, table_name):
self.column_name = column_name
name = memdam.common.event.Event.raw_name(column_name)
assert SqliteColumn.SQL_NAME_REGEX.match(name), "Invalid name for column: %s" % (name)
self.name = name
self.data_type = memdam.common.event.Event.field_type(column_name)
assert SqliteColumn.SQL_NAME_REGEX.match(name), "Invalid name for table: %s" % (table_name)
self.table_name = table_name
@property
def is_text(self):
"""
:returns: True iff this is a text "column", which must be handled specially
"""
return self.data_type == memdam.common.field.FieldType.TEXT
def create(self, cur):
"""
Create the column and index.
Only call if the column and index don't already exist.
"""
if self.is_text:
execute_sql(cur, "CREATE VIRTUAL TABLE %s__%s__docs USING fts4(data,tokenize=porter);" % (self.table_name, self.column_name))
execute_sql(cur, "ALTER TABLE %s ADD COLUMN %s %s;" % (self.table_name, self.column_name, self.sql_type))
if self.sql_index != None:
index_name = self.table_name + "__" + self.column_name + "__" + self.sql_index
execute_sql(cur, "CREATE INDEX %s ON %s (%s %s);" % (index_name, self.table_name, self.column_name, self.sql_index))
def __repr__(self):
data_type_name = memdam.common.field.FieldType.names[self.data_type]
return "SqliteColumn(%s/%s/%s)" % (self.table_name, self.name, data_type_name)
def __str__(self):
return self.__repr__()
@property
def sql_type(self):
"""
:returns: the sqlite type corresponding to our data_type
:rtype: string
"""
return self.data_type_to_sql_type[self.data_type]
@property
def sql_index(self):
"""
Note: everything returns ASC because the only alternative is FTS, which is handled specially
and ends up making an ASC index on the column anyway.
:returns: the sqlite type corresponding to our index type
:rtype: string
"""
if self.data_type == memdam.common.field.FieldType.RAW:
return None
return 'ASC'
@staticmethod
def from_row(row, table_name):
"""
Alternative constructor from a sqlite row.
"""
column_name = row[1]
return SqliteColumn(column_name, table_name)
@memdam.vtrace()
def execute_with_retries(command, num_retries=3, retry_wait_time=0.1, retry_growth_rate=2.0):
"""
Try to accomplish the command a few times before giving up.
"""
retry = 0
last_exception = None
while retry < num_retries:
try:
return command()
except Exception, e:
last_exception = e
time.sleep(retry_wait_time)
retry_wait_time *= retry_growth_rate
else:
break
retry += 1
raise last_exception
|
Reconstructive surgery is performed on abnormal structures of the body caused by congenital defects, developmental abnormalities, trauma, infection, tumors or disease. Generally, reconstruction is performed to improve or restore function, but it is also performed to restore form that will approximate a normal appearance.
Dr. Gonzalez will be happy to personally consult with you to determine a treatment plan for your specific situation. |